gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from segwit import send_to_witness
from test_framework.test_framework import BitcoinTestFramework
from test_framework import blocktools
from test_framework.mininode import CTransaction
from test_framework.util import *
import io
# Sequence number that is BIP 125 opt-in and BIP 68-compliant
BIP125_SEQUENCE_NUMBER = 0xfffffffd
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self, split=False):
extra_args = [["-debug", "-prematurewitness", "-walletprematurewitness", "-walletrbf={}".format(i)]
for i in range(self.num_nodes)]
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].encryptwallet(WALLET_PASSPHRASE)
bitcoind_processes[1].wait()
self.nodes[1] = start_node(1, self.options.tmpdir, extra_args[1])
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.is_network_split = False
self.sync_all()
def run_test(self):
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
print("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
print("Running tests")
dest_address = peer_node.getnewaddress()
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
print("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.00090000})
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propogates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.validateaddress(rbf_node.getnewaddress())
rbf_node.addwitnessaddress(segwit_out["address"])
segwitid = send_to_witness(
version=0,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
get_change_address(rbf_node): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransaction(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = create_fund_sign_send(peer_node, {dest_address: 0.00090000})
assert_raises_message(JSONRPCException, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransaction(rawtx)
signedtx = peer_node.signrawtransaction(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_message(JSONRPCException, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = create_fund_sign_send(rbf_node, {rbf_node_address: 0.00050000})
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransaction(tx)
txid = rbf_node.sendrawtransaction(tx["hex"])
assert_raises_message(JSONRPCException, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node,
Decimal("0.00100000"),
{dest_address: 0.00080000,
get_change_address(rbf_node): Decimal("0.00010000")})
rbf_node.bumpfee(rbfid, {"totalFee": 20000})
rbfid = spend_one_input(rbf_node,
Decimal("0.00100000"),
{dest_address: 0.00080000,
get_change_address(rbf_node): Decimal("0.00010000")})
assert_raises_message(JSONRPCException, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 20001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=9900, but it converts to 10,000
rbfid = spend_one_input(rbf_node,
Decimal("0.00100000"),
{dest_address: 0.00080000,
get_change_address(rbf_node): Decimal("0.00010000")})
fulltx = rbf_node.getrawtransaction(rbfid, 1)
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 19900})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00020000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) #change output is eliminated
def test_settxfee(rbf_node, dest_address):
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
# increase feerate by 2.5x, test that fee increased at least 2x
rbf_node.settxfee(Decimal("0.00001000"))
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.00090000})
rbftx = rbf_node.gettransaction(rbfid)
rbf_node.settxfee(Decimal("0.00002500"))
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["fee"] > 2 * abs(rbftx["fee"])
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbf_node.settxfee(Decimal("0.00001000"))
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.00090000})
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 1000})
assert_raises_message(JSONRPCException, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 2000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 2000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.00090000})
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
assert_raises_message(JSONRPCException, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 20000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = create_fund_sign_send(rbf_node, {rbf_node_address: 0.00090000})
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then call abandon to make sure the wallet doesn't attempt to resubmit the
# bump tx, then invalidate the block so the rbf tx will be put back in the
# mempool. this makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
rbf_node.abandontransaction(bumpid)
rbf_node.invalidateblock(block.hash)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
rbfid = rbf_node.sendtoaddress(dest_address, 0.00090000, "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.00090000})
rbf_node.walletlock()
assert_raises_message(JSONRPCException, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def create_fund_sign_send(node, outputs):
rawtx = node.createrawtransaction([], outputs)
fundtx = node.fundrawtransaction(rawtx)
signedtx = node.signrawtransaction(fundtx["hex"])
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def spend_one_input(node, input_amount, outputs):
input = dict(sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == input_amount))
rawtx = node.createrawtransaction([input], outputs)
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def get_change_address(node):
"""Get a wallet change address.
There is no wallet RPC to access unused change addresses, so this creates a
dummy transaction, calls fundrawtransaction to give add an input and change
output, then returns the change address."""
dest_address = node.getnewaddress()
dest_amount = Decimal("0.00012345")
rawtx = node.createrawtransaction([], {dest_address: dest_amount})
fundtx = node.fundrawtransaction(rawtx)
info = node.decoderawtransaction(fundtx["hex"])
return next(address for out in info["vout"]
if out["value"] != dest_amount for address in out["scriptPubKey"]["addresses"])
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = blocktools.create_block(int(tip, 16), blocktools.create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
error = node.submitblock(bytes_to_hex_str(block.serialize(True)))
if error is not None:
raise Exception(error)
return block
if __name__ == "__main__":
BumpFeeTest().main()
| |
import os
import shutil
import unittest
import tempfile
import struct
from pyoram.storage.block_storage import \
BlockStorageTypeFactory
from pyoram.storage.block_storage_file import \
BlockStorageFile
from pyoram.storage.block_storage_mmap import \
BlockStorageMMap
from pyoram.storage.block_storage_ram import \
BlockStorageRAM
from pyoram.storage.block_storage_sftp import \
BlockStorageSFTP
from pyoram.storage.block_storage_s3 import \
BlockStorageS3
from pyoram.storage.boto3_s3_wrapper import \
(Boto3S3Wrapper,
MockBoto3S3Wrapper)
import six
from six.moves import xrange
from six import BytesIO
thisdir = os.path.dirname(os.path.abspath(__file__))
try:
import boto3
has_boto3 = True
except: # pragma: no cover
has_boto3 = False # pragma: no cover
class TestBlockStorageTypeFactory(unittest.TestCase):
def test_file(self):
self.assertIs(BlockStorageTypeFactory('file'),
BlockStorageFile)
def test_mmap(self):
self.assertIs(BlockStorageTypeFactory('mmap'),
BlockStorageMMap)
def test_ram(self):
self.assertIs(BlockStorageTypeFactory('ram'),
BlockStorageRAM)
def test_sftp(self):
self.assertIs(BlockStorageTypeFactory('sftp'),
BlockStorageSFTP)
def test_s3(self):
self.assertIs(BlockStorageTypeFactory('s3'),
BlockStorageS3)
def test_invalid(self):
with self.assertRaises(ValueError):
BlockStorageTypeFactory(None)
def test_register_invalid_name(self):
with self.assertRaises(ValueError):
BlockStorageTypeFactory.register_device(
's3', BlockStorageFile)
def test_register_invalid_type(self):
with self.assertRaises(TypeError):
BlockStorageTypeFactory.register_device(
'new_str_type', str)
class _TestBlockStorage(object):
_type = None
_type_kwds = None
@classmethod
def _read_storage(cls, storage):
with open(storage.storage_name, 'rb') as f:
return f.read()
@classmethod
def _remove_storage(cls, name):
if os.path.exists(name):
if os.path.isdir(name):
shutil.rmtree(name, ignore_errors=True)
else:
os.remove(name)
@classmethod
def _check_exists(cls, name):
return os.path.exists(name)
@classmethod
def _get_empty_existing(cls):
return os.path.join(thisdir,
"baselines",
"exists.empty")
@classmethod
def _get_dummy_noexist(cls):
fd, name = tempfile.mkstemp(dir=os.getcwd())
os.close(fd)
return name
def _open_teststorage(self, **kwds):
kwds.update(self._type_kwds)
return self._type(self._testfname, **kwds)
def _reopen_storage(self, storage):
return self._type(storage.storage_name, **self._type_kwds)
@classmethod
def setUpClass(cls):
assert cls._type is not None
assert cls._type_kwds is not None
cls._dummy_name = cls._get_dummy_noexist()
if cls._check_exists(cls._dummy_name):
cls._remove_storage(cls._dummy_name)
if os.path.exists(cls._dummy_name):
_TestBlockStorage.\
_remove_storage(cls._dummy_name) # pragma: no cover
cls._block_size = 25
cls._block_count = 5
cls._testfname = cls.__name__ + "_testfile.bin"
cls._blocks = []
f = cls._type.setup(
cls._testfname,
block_size=cls._block_size,
block_count=cls._block_count,
initialize=lambda i: bytes(bytearray([i])*cls._block_size),
ignore_existing=True,
**cls._type_kwds)
f.close()
cls._original_f = f
for i in range(cls._block_count):
data = bytearray([i])*cls._block_size
cls._blocks.append(data)
@classmethod
def tearDownClass(cls):
cls._remove_storage(cls._testfname)
cls._remove_storage(cls._dummy_name)
def test_setup_fails(self):
self.assertEqual(self._check_exists(self._dummy_name), False)
with self.assertRaises(IOError):
self._type.setup(
self._get_empty_existing(),
block_size=10,
block_count=10,
**self._type_kwds)
self.assertEqual(self._check_exists(self._dummy_name), False)
with self.assertRaises(IOError):
self._type.setup(
self._get_empty_existing(),
block_size=10,
block_count=10,
ignore_existing=False,
**self._type_kwds)
self.assertEqual(self._check_exists(self._dummy_name), False)
with self.assertRaises(ValueError):
self._type.setup(self._dummy_name,
block_size=0,
block_count=1,
**self._type_kwds)
self.assertEqual(self._check_exists(self._dummy_name), False)
with self.assertRaises(ValueError):
self._type.setup(self._dummy_name,
block_size=1,
block_count=0,
**self._type_kwds)
self.assertEqual(self._check_exists(self._dummy_name), False)
with self.assertRaises(TypeError):
self._type.setup(self._dummy_name,
block_size=1,
block_count=1,
header_data=2,
**self._type_kwds)
self.assertEqual(self._check_exists(self._dummy_name), False)
# TODO: The multiprocessing module is bad
# about handling exceptions raised on the
# thread's stack.
#with self.assertRaises(ValueError):
# def _init(i):
# raise ValueError
# self._type.setup(self._dummy_name,
# block_size=1,
# block_count=1,
# initialize=_init,
# **self._type_kwds)
#self.assertEqual(self._check_exists(self._dummy_name), False)
def test_setup(self):
fname = ".".join(self.id().split(".")[1:])
fname += ".bin"
fname = os.path.join(thisdir, fname)
self._remove_storage(fname)
bsize = 10
bcount = 11
fsetup = self._type.setup(fname, bsize, bcount, **self._type_kwds)
fsetup.close()
flen = len(self._read_storage(fsetup))
self.assertEqual(
flen,
self._type.compute_storage_size(bsize,
bcount))
self.assertEqual(
flen >
self._type.compute_storage_size(bsize,
bcount,
ignore_header=True),
True)
with self._reopen_storage(fsetup) as f:
self.assertEqual(f.header_data, bytes())
self.assertEqual(fsetup.header_data, bytes())
self.assertEqual(f.block_size, bsize)
self.assertEqual(fsetup.block_size, bsize)
self.assertEqual(f.block_count, bcount)
self.assertEqual(fsetup.block_count, bcount)
self.assertEqual(f.storage_name, fsetup.storage_name)
self.assertEqual(fsetup.storage_name, fsetup.storage_name)
if self._type is not BlockStorageRAM:
self.assertEqual(fsetup.storage_name, fname)
else:
self.assertEqual(fsetup.storage_name, None)
self._remove_storage(fname)
def test_setup_withdata(self):
fname = ".".join(self.id().split(".")[1:])
fname += ".bin"
fname = os.path.join(thisdir, fname)
self._remove_storage(fname)
bsize = 10
bcount = 11
header_data = bytes(bytearray([0,1,2]))
fsetup = self._type.setup(fname,
bsize,
bcount,
header_data=header_data,
**self._type_kwds)
fsetup.close()
flen = len(self._read_storage(fsetup))
self.assertEqual(
flen,
self._type.compute_storage_size(bsize,
bcount,
header_data=header_data))
self.assertTrue(len(header_data) > 0)
self.assertEqual(
self._type.compute_storage_size(bsize,
bcount) <
self._type.compute_storage_size(bsize,
bcount,
header_data=header_data),
True)
self.assertEqual(
flen >
self._type.compute_storage_size(bsize,
bcount,
header_data=header_data,
ignore_header=True),
True)
with self._reopen_storage(fsetup) as f:
self.assertEqual(f.header_data, header_data)
self.assertEqual(fsetup.header_data, header_data)
self.assertEqual(f.block_size, bsize)
self.assertEqual(fsetup.block_size, bsize)
self.assertEqual(f.block_count, bcount)
self.assertEqual(fsetup.block_count, bcount)
self.assertEqual(f.storage_name, fsetup.storage_name)
self.assertEqual(fsetup.storage_name, fsetup.storage_name)
if self._type is not BlockStorageRAM:
self.assertEqual(fsetup.storage_name, fname)
else:
self.assertEqual(fsetup.storage_name, None)
self._remove_storage(fname)
def test_init_noexists(self):
self.assertEqual(self._check_exists(self._dummy_name), False)
with self.assertRaises(IOError):
with self._type(self._dummy_name, **self._type_kwds) as f:
pass # pragma: no cover
def test_init_exists(self):
self.assertEqual(self._check_exists(self._testfname), True)
databefore = self._read_storage(self._original_f)
with self._open_teststorage() as f:
self.assertEqual(f.block_size, self._block_size)
self.assertEqual(f.block_count, self._block_count)
self.assertEqual(f.storage_name, self._testfname)
self.assertEqual(f.header_data, bytes())
self.assertEqual(self._check_exists(self._testfname), True)
dataafter = self._read_storage(self._original_f)
self.assertEqual(databefore, dataafter)
def test_read_block(self):
with self._open_teststorage() as f:
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
for i, data in enumerate(self._blocks):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
for i, data in enumerate(self._blocks):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
for i, data in reversed(list(enumerate(self._blocks))):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
for i, data in reversed(list(enumerate(self._blocks))):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
self._block_count*self._block_size*4)
with self._open_teststorage() as f:
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
self.assertEqual(list(bytearray(f.read_block(0))),
list(self._blocks[0]))
self.assertEqual(list(bytearray(f.read_block(self._block_count-1))),
list(self._blocks[-1]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
self._block_size*2)
def test_write_block(self):
data = bytearray([self._block_count])*self._block_size
self.assertEqual(len(data) > 0, True)
with self._open_teststorage() as f:
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
for i in xrange(self._block_count):
self.assertNotEqual(list(bytearray(f.read_block(i))),
list(data))
for i in xrange(self._block_count):
f.write_block(i, bytes(data))
for i in xrange(self._block_count):
self.assertEqual(list(bytearray(f.read_block(i))),
list(data))
for i, block in enumerate(self._blocks):
f.write_block(i, bytes(block))
self.assertEqual(f.bytes_sent,
self._block_count*self._block_size*2)
self.assertEqual(f.bytes_received,
self._block_count*self._block_size*2)
def test_read_blocks(self):
with self._open_teststorage() as f:
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
data = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
data = f.read_blocks([0])
self.assertEqual(len(data), 1)
self.assertEqual(list(bytearray(data[0])),
list(self._blocks[0]))
self.assertEqual(len(self._blocks) > 1, True)
data = f.read_blocks(list(xrange(1, self._block_count)) + [0])
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data[:-1], 1):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
self.assertEqual(list(bytearray(data[-1])),
list(self._blocks[0]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
(2*self._block_count+1)*self._block_size)
def test_yield_blocks(self):
with self._open_teststorage() as f:
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
data = list(f.yield_blocks(list(xrange(self._block_count))))
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
data = list(f.yield_blocks([0]))
self.assertEqual(len(data), 1)
self.assertEqual(list(bytearray(data[0])),
list(self._blocks[0]))
self.assertEqual(len(self._blocks) > 1, True)
data = list(f.yield_blocks(list(xrange(1, self._block_count)) + [0]))
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data[:-1], 1):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
self.assertEqual(list(bytearray(data[-1])),
list(self._blocks[0]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
(2*self._block_count+1)*self._block_size)
def test_write_blocks(self):
data = [bytearray([self._block_count])*self._block_size
for i in xrange(self._block_count)]
with self._open_teststorage() as f:
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
orig = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(orig), self._block_count)
for i, block in enumerate(orig):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
f.write_blocks(list(xrange(self._block_count)),
[bytes(b) for b in data])
new = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(new), self._block_count)
for i, block in enumerate(new):
self.assertEqual(list(bytearray(block)),
list(data[i]))
f.write_blocks(list(xrange(self._block_count)),
[bytes(b) for b in self._blocks])
orig = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(orig), self._block_count)
for i, block in enumerate(orig):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
self.assertEqual(f.bytes_sent,
self._block_count*self._block_size*2)
self.assertEqual(f.bytes_received,
self._block_count*self._block_size*3)
def test_update_header_data(self):
fname = ".".join(self.id().split(".")[1:])
fname += ".bin"
fname = os.path.join(thisdir, fname)
self._remove_storage(fname)
bsize = 10
bcount = 11
header_data = bytes(bytearray([0,1,2]))
fsetup = self._type.setup(fname,
block_size=bsize,
block_count=bcount,
header_data=header_data,
**self._type_kwds)
fsetup.close()
new_header_data = bytes(bytearray([1,1,1]))
with self._reopen_storage(fsetup) as f:
self.assertEqual(f.header_data, header_data)
f.update_header_data(new_header_data)
self.assertEqual(f.header_data, new_header_data)
with self._reopen_storage(fsetup) as f:
self.assertEqual(f.header_data, new_header_data)
with self.assertRaises(ValueError):
with self._reopen_storage(fsetup) as f:
f.update_header_data(bytes(bytearray([1,1])))
with self.assertRaises(ValueError):
with self._reopen_storage(fsetup) as f:
f.update_header_data(bytes(bytearray([1,1,1,1])))
with self._reopen_storage(fsetup) as f:
self.assertEqual(f.header_data, new_header_data)
self._remove_storage(fname)
def test_locked_flag(self):
with self._open_teststorage() as f:
with self.assertRaises(IOError):
with self._open_teststorage() as f1:
pass # pragma: no cover
with self.assertRaises(IOError):
with self._open_teststorage() as f1:
pass # pragma: no cover
with self._open_teststorage(ignore_lock=True) as f1:
pass
with self.assertRaises(IOError):
with self._open_teststorage() as f1:
pass # pragma: no cover
with self._open_teststorage(ignore_lock=True) as f1:
pass
with self._open_teststorage(ignore_lock=True) as f1:
pass
with self._open_teststorage(ignore_lock=True) as f:
pass
def test_read_block_cloned(self):
with self._open_teststorage() as forig:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
with forig.clone_device() as f:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
for i, data in enumerate(self._blocks):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
for i, data in enumerate(self._blocks):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
for i, data in reversed(list(enumerate(self._blocks))):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
for i, data in reversed(list(enumerate(self._blocks))):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
self._block_count*self._block_size*4)
with forig.clone_device() as f:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
self.assertEqual(list(bytearray(f.read_block(0))),
list(self._blocks[0]))
self.assertEqual(list(bytearray(f.read_block(self._block_count-1))),
list(self._blocks[-1]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
self._block_size*2)
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
def test_write_block_cloned(self):
data = bytearray([self._block_count])*self._block_size
self.assertEqual(len(data) > 0, True)
with self._open_teststorage() as forig:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
with forig.clone_device() as f:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
for i in xrange(self._block_count):
self.assertNotEqual(list(bytearray(f.read_block(i))),
list(data))
for i in xrange(self._block_count):
f.write_block(i, bytes(data))
for i in xrange(self._block_count):
self.assertEqual(list(bytearray(f.read_block(i))),
list(data))
for i, block in enumerate(self._blocks):
f.write_block(i, bytes(block))
self.assertEqual(f.bytes_sent,
self._block_count*self._block_size*2)
self.assertEqual(f.bytes_received,
self._block_count*self._block_size*2)
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
def test_read_blocks_cloned(self):
with self._open_teststorage() as forig:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
with forig.clone_device() as f:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
data = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
data = f.read_blocks([0])
self.assertEqual(len(data), 1)
self.assertEqual(list(bytearray(data[0])),
list(self._blocks[0]))
self.assertEqual(len(self._blocks) > 1, True)
data = f.read_blocks(list(xrange(1, self._block_count)) + [0])
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data[:-1], 1):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
self.assertEqual(list(bytearray(data[-1])),
list(self._blocks[0]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
(2*self._block_count + 1)*self._block_size)
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
def test_yield_blocks_cloned(self):
with self._open_teststorage() as forig:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
with forig.clone_device() as f:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
data = list(f.yield_blocks(list(xrange(self._block_count))))
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
data = list(f.yield_blocks([0]))
self.assertEqual(len(data), 1)
self.assertEqual(list(bytearray(data[0])),
list(self._blocks[0]))
self.assertEqual(len(self._blocks) > 1, True)
data = list(f.yield_blocks(list(xrange(1, self._block_count)) + [0]))
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data[:-1], 1):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
self.assertEqual(list(bytearray(data[-1])),
list(self._blocks[0]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
(2*self._block_count + 1)*self._block_size)
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
def test_write_blocks_cloned(self):
data = [bytearray([self._block_count])*self._block_size
for i in xrange(self._block_count)]
with self._open_teststorage() as forig:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
with forig.clone_device() as f:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
orig = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(orig), self._block_count)
for i, block in enumerate(orig):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
f.write_blocks(list(xrange(self._block_count)),
[bytes(b) for b in data])
new = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(new), self._block_count)
for i, block in enumerate(new):
self.assertEqual(list(bytearray(block)),
list(data[i]))
f.write_blocks(list(xrange(self._block_count)),
[bytes(b) for b in self._blocks])
orig = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(orig), self._block_count)
for i, block in enumerate(orig):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
self.assertEqual(f.bytes_sent,
self._block_count*self._block_size*2)
self.assertEqual(f.bytes_received,
self._block_count*self._block_size*3)
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
class TestBlockStorageFile(_TestBlockStorage,
unittest.TestCase):
_type = BlockStorageFile
_type_kwds = {}
class TestBlockStorageFileNoThreadPool(_TestBlockStorage,
unittest.TestCase):
_type = BlockStorageFile
_type_kwds = {'threadpool_size': 0}
class TestBlockStorageFileThreadPool(_TestBlockStorage,
unittest.TestCase):
_type = BlockStorageFile
_type_kwds = {'threadpool_size': 1}
class TestBlockStorageMMap(_TestBlockStorage,
unittest.TestCase):
_type = BlockStorageMMap
_type_kwds = {}
class _TestBlockStorageRAM(_TestBlockStorage):
@classmethod
def _read_storage(cls, storage):
return storage.data
@classmethod
def _remove_storage(cls, name):
pass
@classmethod
def _check_exists(cls, name):
return True
def _open_teststorage(self, **kwds):
kwds.update(self._type_kwds)
return self._type(self._original_f.data, **kwds)
def _reopen_storage(self, storage):
return self._type(storage.data, **self._type_kwds)
#
# Override some of the test methods
#
def test_setup_fails(self):
with self.assertRaises(ValueError):
self._type.setup(self._dummy_name,
block_size=0,
block_count=1,
**self._type_kwds)
with self.assertRaises(ValueError):
self._type.setup(self._dummy_name,
block_size=1,
block_count=0,
**self._type_kwds)
with self.assertRaises(TypeError):
self._type.setup(self._dummy_name,
block_size=1,
block_count=1,
header_data=2,
**self._type_kwds)
def test_init_noexists(self):
with self.assertRaises(TypeError):
with self._type(2, **self._type_kwds) as f:
pass # pragma: no cover
with self.assertRaises(TypeError):
with self._type(None, **self._type_kwds) as f:
pass # pragma: no cover
with self.assertRaises(struct.error):
with self._type(bytearray(), **self._type_kwds) as f:
pass # pragma: no cover
def test_init_exists(self):
databefore = self._read_storage(self._original_f)
with self._open_teststorage() as f:
self.assertEqual(f.block_size, self._block_size)
self.assertEqual(f.block_count, self._block_count)
self.assertEqual(f.storage_name, self._original_f.storage_name)
self.assertEqual(f.storage_name, None)
self.assertEqual(f.header_data, bytes())
dataafter = self._read_storage(self._original_f)
self.assertEqual(databefore, dataafter)
def test_tofile_fromfile_fileobj(self):
out1 = BytesIO()
self._original_f.tofile(out1)
out1.seek(0)
self.assertEqual(len(self._original_f.data) > 0, True)
self.assertEqual(self._original_f.data, out1.read())
out1.seek(0)
in1 = self._type.fromfile(out1)
self.assertNotEqual(self._original_f.data, in1.data)
out2 = BytesIO()
in1.tofile(out2)
self.assertNotEqual(self._original_f.data, in1.data)
in1.close()
self.assertEqual(self._original_f.data, in1.data)
out2.seek(0)
with self.assertRaises(IOError):
with self._type.fromfile(out2) as in2:
pass # pragma: no cover
out2.seek(0)
with self._type.fromfile(out2, ignore_lock=True) as in2:
self.assertEqual(self._original_f.data, in1.data)
self.assertNotEqual(self._original_f.data, in2.data)
self.assertEqual(self._original_f.data, in1.data)
self.assertNotEqual(self._original_f.data, in2.data)
def test_tofile_fromfile_filename(self):
def _create():
fd, out = tempfile.mkstemp()
os.close(fd)
return out
def _read(name):
with open(name, 'rb') as f:
return f.read()
out1 = _create()
self._original_f.tofile(out1)
self.assertEqual(len(self._original_f.data) > 0, True)
self.assertEqual(self._original_f.data, _read(out1))
in1 = self._type.fromfile(out1)
self.assertNotEqual(self._original_f.data, in1.data)
out2 = _create()
in1.tofile(out2)
self.assertNotEqual(self._original_f.data, in1.data)
in1.close()
self.assertEqual(self._original_f.data, in1.data)
with self.assertRaises(IOError):
with self._type.fromfile(out2) as in2:
pass # pragma: no cover
with self._type.fromfile(out2, ignore_lock=True) as in2:
self.assertEqual(self._original_f.data, in1.data)
self.assertNotEqual(self._original_f.data, in2.data)
self.assertEqual(self._original_f.data, in1.data)
self.assertNotEqual(self._original_f.data, in2.data)
class TestBlockStorageRAM(_TestBlockStorageRAM,
unittest.TestCase):
_type = BlockStorageRAM
_type_kwds = {}
class _dummy_sftp_file(object):
def __init__(self, *args, **kwds):
self._f = open(*args, **kwds)
def __enter__(self):
return self
def __exit__(self, *args):
self._f.close()
def readv(self, chunks):
data = []
for offset, size in chunks:
self._f.seek(offset)
data.append(self._f.read(size))
return data
def __getattr__(self, key):
return getattr(self._f, key)
def set_pipelined(self):
pass
class dummy_sftp(object):
remove = os.remove
stat = os.stat
@staticmethod
def open(*args, **kwds):
return _dummy_sftp_file(*args, **kwds)
@staticmethod
def close():
pass
class dummy_sshclient(object):
@staticmethod
def open_sftp():
return dummy_sftp
class TestBlockStorageSFTP(_TestBlockStorage,
unittest.TestCase):
_type = BlockStorageSFTP
_type_kwds = {'sshclient': dummy_sshclient}
def test_setup_fails_no_sshclient(self):
self.assertEqual(self._check_exists(self._dummy_name), False)
kwds = dict(self._type_kwds)
del kwds['sshclient']
with self.assertRaises(ValueError):
self._type.setup(self._dummy_name,
block_size=1,
block_count=1,
**kwds)
self.assertEqual(self._check_exists(self._dummy_name), False)
def test_init_exists_no_sshclient(self):
self.assertEqual(self._check_exists(self._testfname), True)
kwds = dict(self._type_kwds)
del kwds['sshclient']
with self.assertRaises(ValueError):
with self._type(self._testfname, **kwds) as f:
pass # pragma: no cover
databefore = self._read_storage(self._original_f)
with self._open_teststorage() as f:
self.assertEqual(f.block_size, self._block_size)
self.assertEqual(f.block_count, self._block_count)
self.assertEqual(f.storage_name, self._testfname)
self.assertEqual(f.header_data, bytes())
self.assertEqual(self._check_exists(self._testfname), True)
dataafter = self._read_storage(self._original_f)
self.assertEqual(databefore, dataafter)
class _TestBlockStorageS3Mock(_TestBlockStorage):
_type = BlockStorageS3
_type_kwds = {}
@classmethod
def _read_storage(cls, storage):
import glob
data = bytearray()
name = storage.storage_name
prefix_len = len(os.path.join(name,"b"))
nblocks = max(int(bfile[prefix_len:]) for bfile in glob.glob(name+"/b*")) + 1
with open(os.path.join(name, BlockStorageS3._index_name), 'rb') as f:
data.extend(f.read())
for i in range(nblocks):
with open(os.path.join(name, "b"+str(i)), 'rb') as f:
data.extend(f.read())
return data
def test_init_exists_no_bucket(self):
self.assertEqual(self._check_exists(self._testfname), True)
databefore = self._read_storage(self._original_f)
with self._open_teststorage() as f:
self.assertEqual(f.block_size, self._block_size)
self.assertEqual(f.block_count, self._block_count)
self.assertEqual(f.storage_name, self._testfname)
self.assertEqual(f.header_data, bytes())
self.assertEqual(self._check_exists(self._testfname), True)
dataafter = self._read_storage(self._original_f)
self.assertEqual(databefore, dataafter)
kwds = dict(self._type_kwds)
del kwds['bucket_name']
with self.assertRaises(ValueError):
with self._type(self._testfname, **kwds) as f:
pass # pragma: no cover
dataafter = self._read_storage(self._original_f)
self.assertEqual(databefore, dataafter)
def test_setup_fails_no_bucket(self):
self.assertEqual(self._check_exists(self._dummy_name), False)
kwds = dict(self._type_kwds)
del kwds['bucket_name']
with self.assertRaises(ValueError):
self._type.setup(self._dummy_name,
block_size=1,
block_count=1,
**kwds)
self.assertEqual(self._check_exists(self._dummy_name), False)
def test_setup_ignore_existing(self):
self.assertEqual(self._check_exists(self._dummy_name), False)
with self._type.setup(self._dummy_name,
block_size=1,
block_count=1,
**self._type_kwds) as f:
pass
self.assertEqual(self._check_exists(self._dummy_name), True)
with self.assertRaises(IOError):
with self._type.setup(self._dummy_name,
block_size=1,
block_count=1,
**self._type_kwds) as f:
pass # pragma: no cover
self.assertEqual(self._check_exists(self._dummy_name), True)
with self._type.setup(self._dummy_name,
block_size=1,
block_count=1,
ignore_existing=True,
**self._type_kwds) as f:
pass
self.assertEqual(self._check_exists(self._dummy_name), True)
self._remove_storage(self._dummy_name)
class TestBlockStorageS3Mock(_TestBlockStorageS3Mock,
unittest.TestCase):
_type_kwds = {'s3_wrapper': MockBoto3S3Wrapper,
'bucket_name': '.'}
class TestBlockStorageS3MockNoThreadPool(_TestBlockStorageS3Mock,
unittest.TestCase):
_type_kwds = {'s3_wrapper': MockBoto3S3Wrapper,
'bucket_name': '.',
'threadpool_size': 0}
class TestBlockStorageS3MockThreadPool(_TestBlockStorageS3Mock,
unittest.TestCase):
_type_kwds = {'s3_wrapper': MockBoto3S3Wrapper,
'bucket_name': '.',
'threadpool_size': 4}
@unittest.skipIf((os.environ.get('PYORAM_AWS_TEST_BUCKET') is None) or \
(not has_boto3),
"No PYORAM_AWS_TEST_BUCKET defined in environment or "
"boto3 is not available")
class TestBlockStorageS3(_TestBlockStorage,
unittest.TestCase):
_type = BlockStorageS3
_type_kwds = {'bucket_name': os.environ.get('PYORAM_AWS_TEST_BUCKET')}
@classmethod
def _read_storage(cls, storage):
data = bytearray()
name = storage.storage_name
s3 = Boto3S3Wrapper(cls._type_kwds['bucket_name'])
prefix_len = len(name+"/b")
nblocks = 1 + max(int(obj.key[prefix_len:]) for obj
in s3._bucket.objects.filter(Prefix=name+"/b"))
data.extend(s3.download(name+"/"+BlockStorageS3._index_name))
for i in range(nblocks):
data.extend(s3.download(name+"/b"+str(i)))
return data
@classmethod
def _remove_storage(cls, name):
Boto3S3Wrapper(cls._type_kwds['bucket_name']).clear(name)
@classmethod
def _check_exists(cls, name):
return Boto3S3Wrapper(cls._type_kwds['bucket_name']).exists(name)
@classmethod
def _get_empty_existing(cls):
return "exists.empty"
@classmethod
def _get_dummy_noexist(cls):
s3 = Boto3S3Wrapper(cls._type_kwds['bucket_name'])
fd, name = tempfile.mkstemp(dir=os.getcwd())
os.close(fd)
os.remove(name)
while s3.exists(name):
fd, name = tempfile.mkstemp(dir=os.getcwd())
os.close(fd)
os.remove(name)
return name
if __name__ == "__main__":
unittest.main() # pragma: no cover
| |
# --------------------------------------------------------------------------------- #
# MULTIDIRDIALOG wxPython IMPLEMENTATION
#
# Andrea Gavana, @ 07 October 2008
# Latest Revision: 28 Sep 2012, 21.00 GMT
#
#
# TODO List
#
# 1) Implement an meaningful action for the "Make New Folder" button, but this
# requires a strong integration with Explorer, at least on Windows;
#
# 2) Be more user-friendly with special folders as the Desktop, My Documents etc...
#
#
# For all kind of problems, requests of enhancements and bug reports, please
# write to me at:
#
# andrea.gavana@gmail.com
# andrea.gavana@maerskoil.com
#
# Or, obviously, to the wxPython mailing list!!!
#
#
# End Of Comments
# --------------------------------------------------------------------------------- #
"""
This class represents a possible replacement for :class:`DirDialog`, with the additional
ability of selecting multiple folders at once.
Description
===========
This class represents a possible replacement for :class:`DirDialog`, with the additional
ability of selecting multiple folders at once. It may be useful when you wish to
present to the user a directory browser which allows multiple folder selections.
:class:`MultiDirDialog` sports the following features:
* Ability to select a single or mutliple folders, depending on the style passed;
* More colourful and eye-catching buttons;
* Good old Python code :-D .
And a lot more. Check the demo for an almost complete review of the functionalities.
Usage
=====
Usage example::
import os
import wx
import wx.lib.agw.multidirdialog as MDD
# Our normal wxApp-derived class, as usual
app = wx.App(0)
dlg = MDD.MultiDirDialog(None, title="Custom MultiDirDialog", defaultPath=os.getcwd(),
agwStyle=MDD.DD_MULTIPLE|MDD.DD_DIR_MUST_EXIST)
if dlg.ShowModal() != wx.ID_OK:
print "You Cancelled The Dialog!"
dlg.Destroy()
return
paths = dlg.GetPaths()
for indx, path in enumerate(paths):
print "Path %d: %s"%(indx+1, path)
dlg.Destroy()
app.MainLoop()
Supported Platforms
===================
:class:`MultiDirDialog` has been tested on the following platforms:
* Windows (Windows XP).
Window Styles
=============
This class supports the following window styles:
===================== =========== ==================================================
Window Styles Hex Value Description
===================== =========== ==================================================
``DD_NEW_DIR_BUTTON`` 0x000 Enable/disable the "Make new folder" button
``DD_DIR_MUST_EXIST`` 0x200 The dialog will allow the user to choose only an existing folder. When this style is not given, a "Create new directory" button is added to the dialog (on Windows) or some other way is provided to the user to type the name of a new folder.
``DD_MULTIPLE`` 0x400 Allows the selection of multiple folders.
===================== =========== ==================================================
Events Processing
=================
`No custom events are available for this class.`
License And Version
===================
:class:`MultiDirDialog` is distributed under the wxPython license.
Latest Revision: Andrea Gavana @ 28 Sep 2012, 21.00 GMT
Version 0.3
"""
import os
import wx
import wx.lib.buttons as buttons
from wx.lib.embeddedimage import PyEmbeddedImage
# MultiDirDialog styles
DD_MULTIPLE = 1024
""" Allows the selection of multiple folders. """
DD_DEFAULT_STYLE = wx.DD_DEFAULT_STYLE
""" Equivalent to a combination of ``wx.DEFAULT_DIALOG_STYLE`` and ``wx.RESIZE_BORDER``. """
DD_DIR_MUST_EXIST = wx.DD_DIR_MUST_EXIST
""" The dialog will allow the user to choose only an existing folder. When this style is not given, a "Create new directory" button is added to the dialog (on Windows) or some other way is provided to the user to type the name of a new folder. """
DD_NEW_DIR_BUTTON = wx.DD_NEW_DIR_BUTTON
""" The `Make New Folder` button will be displayed. """
_ = wx.GetTranslation
_cancel = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAA1dJ"
"REFUOI11019oEwccB/Dv3eUuyZ2XpfljsmJ7JY01KZYWty6bdMwnp1X34JNS5sPAsmYruOnL"
"3kTGcPg6Bdkexqql4EPdBuKbVG0xLmpoWjbW0D+S1Jg24RJzuSR3l58PtpsI/l5/fB5+3x9f"
"AEDc7VauhMP3prq7q9+1t5/AW+aiLB+ZDocrU6HQk4tAFAC4s8Gg0uVyXTsZiw190Nsr6JnM"
"kZAkrd6rVtOv4wuyfLS/rW3y6Oioq2tgILiRyXy4v1yexU979yaKIyNEiQRRsUjG2Bjddrtr"
"532+k9v4B1kevu33l+vnzhFtbBAtL9OLS5douq9v0eZ1OPo8Xi8gSUClAls8jk+qVad148bP"
"33s8TcY0K32mOTV07JhsP3UKKJUAy8IORYF3584erodopaGqh7qzWYEJBgGGgW3fPrQ/eyY0"
"5uePewzjxIGDB0U5HgcsC1BV0MOH+GtiojF/9+433P1qNd1pGCvs5uawUijwbDAIWBZsAwPw"
"5nJsRyBgc8fjYLZwK5lE6uZN88Hc3LdfmeYVDgDu12oLXUSrxvPnw8r6uo3z+cAQwRGJQOzv"
"B0sEKhZhJRJI3rplJlKpM+OWdRkAuO2gZnQ93UO02CgUjr9bLHKCzweGZcGYJqhchp5I4NGd"
"O9bjpaUvxol+2Xa211/FAKolSa0XySSq+TzYYBAAYGkaUKnA5LgWA6hvmP//PKgokx9tbspq"
"Pg8NgL61c0gSJL8f73R04O9KRV9Mp0+PtlrX/zvhgigO749GJ4dKJVc9l0MTgAVAZBg4BQEk"
"SeCcTjAAOhWF5/3+w7FsdvkPogXuR7f7s/d6eycPqKqrubKC+hZ28DxydnurzHFWwG5niefB"
"CALYVgu7wmGe2toOfby2lrVFIpFrn9brcmNpCU0ALIAdooiMw9FI1etfkmGUbaY5EXY4JIth"
"YAIw1tcxODgoEcddZeua9rQqCGB5HgwA0e3GmsdjPtH1s1/Xar+ON5vTi6p6+qmm6U5JAksE"
"VhBQbzahl0p57n1Nm9kQxVhXINAucxzSLpeZLBTOxHX98nbAfxItxMrlVV4UD+/q7OTJ58Pc"
"7Ow/uVTq81c1FYTo76HQo5k9expXnc6xt9X5OsuOPIhGtZndu//9DYgBwEt1gHq0YITgmAAA"
"AABJRU5ErkJggg==")
#----------------------------------------------------------------------
_ok = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAjdJ"
"REFUOI2tksFLk3Ecxp+97975vmuve1dWuiUTNIy1JlsLpkZG0aXLbv0B0aVDUMfVQTp0jJpF"
"EHl5LxUZgZcuQjAID4KUyWwyEU3d9m7O5d733dze97dfB1siJSn1nJ/P5+ELX+Afwx6YuAMB"
"AVgwjcaBBdIovP2eyKMLPYNdM+7kNKZA9i3gR+ENCeF4Hx+8VigVBgrKWrXKGp/2JeCfwhsW"
"Q/HTQiCaVTOYUiZtDuoMQqefrc1S9+uOEGNSRzqd+4j72/c1l4OOQNwn+aOFWg5TdBJEIKbH"
"dI9zHLMt6H3lHrjScfU5x3DSmOXNrVUUxwFQ6S3vDdh9cZ/zTHSz8R0pMguGMKaRMuX5peQ9"
"ZULPW8+PnB286L78zH/M76/DwCYtjSTefaAOQZjpEDofn5J8UR0qViqLoCpLql+IXFzS72IC"
"eQCwssR2NFfOtNXsFZx09SLkDnfSlsYTluUy7a3Hz6mWMrLGKswiJaV0WS6Uyr9gAGC7It0L"
"WrWYm99K9VdcqugSD8Pd6nG6RNeJCq9ZstwqNL1CMl/z8npdiRkPd2AAYJcTy41FcSVZt+lK"
"na9FaLspCg4ehDew3qJgs6qStUxerhItlr+h74KB5iPNgVZuGkm6QpQWmy3i8AoiY7dA1XTy"
"LZuVGYHGZi8t/gbvCABgDFS7vpVEgSgS29bv5CR7XtmQjxxyxt77En+Edwt+Svpua3MbRT5T"
"a9QXPGL7gxc9L/eE98wwHWaG6JD1783/kB9qTvueLt8LjwAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
_cdrom = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAArRJ"
"REFUOI11kc9rm3Ucx1/f5/eTLV2aJ2vqVseGWzeYDAbCCq2THQqiuB3mP+DBQ3ss3rysILLb"
"2I5FhKHkNFmFHkrFoVDQDautI02ZWqGJ7WzWEkzy/M73u1NKbNj79Dl8Xi8+P+BQhoeHj09N"
"Td1aWlr6s1qtNjY3N/dLpdIvExMTHwPG4f7/ZWRk5M35+fmnqidSSqWUUlEUqdnZ2W+B3Kv4"
"wbm5uaeNRkO1220VRZEKw1D5vq/CMDwQTk9PfwVoffTk5ORMpVJR5XJZ1Wo1FYahCoJAtVot"
"laapSpJEBUGgNjY2VLFYvNblDkzj4+PvJ0kCgJSSvb09tv7eiuv/1tMgDGg2m+zu7mKaJmNj"
"Yx90uYOj5PP5k2ma4jgOuqbT/K/JvYd3n4+eOu9cH7s+lMiE/f19hBAUCoUzfYIkSYJ8Po+u"
"69i2TZIk3Hz3w1MqUtT36iRpgu/7ZDIZfN+P+1ZYXV39bWBgANd1OZo9ilfwuDB0gYunL+K4"
"Dq1WCyEEcRyztra22idYWFj4srxW9j3PQ0pJo9EADWzHxvM8juWO4doZln9c3llfX/+my+nd"
"IgzDrUpceeftS1ffcHSX+os6Ukosy8I0THJHBnn87Cduf/H5/dZO++s+AcA/V2sfbYa/nmFb"
"QwYamjJACWrbVVY2HvMDiyxXnvzMXyz2HRGw8ifJ+6N/sNi+QzE4jbd9Auu5g3Jh6OxrjGZP"
"4HgUgh6oV2B++tZngxOXr2AbBpZpYGomujIR0kTFOqmQ/P56NVfiQb8gm80640fey9nPLKI4"
"IkKhAKk6CDocHyqQcVyuFK8NlnhgAOnhCag36k6pdJ92u43ruliWhRACgWDmkxl27G2anVam"
"93uih9dv3Lh569y5s5fjuCMNQ6BpIIROp9NB13XQ0R599+j7lZUnd7rQS0kMSYjIJmZ4AAAA"
"AElFTkSuQmCC")
#----------------------------------------------------------------------
_computer = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAshJ"
"REFUOI1dk91PXGUQxn/ve979YkHXmhZ325oUa9Wlxg9q8caoCRde9Hb/Bv4KQ8J/AXdceFF6"
"YYJXNCYWE0xITAxt1cjXKpiYsnQpH3v2sOfMTC8WAZ2rmWSeZ56ZJ+MAzKwEXIHEQ5ELYedp"
"QpKcV8Vise2cOwwAnU63sdFsTx0cnpiJoipkqkiaIaa0Wh2etQ4tyxRVo1xy0eefXf0G+DoA"
"ZJlea/7VGRksF1B1iICIQwUyEb79boMfl/8khDy4wLVamdF3X33LzHwAUJQ47k82U1QVkX7e"
"3u+y2XyB9w7THkZGlnkUNYDw705HHeXgqIdZH6wqmCq/r7XZPzBCroRKSvDKrZsVIt/HnREc"
"x8bRcYaZoCKICCIZf2wcY65IFAIQeOdWhfdH30D1PwSeuOvYfS5wqkBEiOMeu3t6Oj2jXC4x"
"+l6NblfO7Al9OMSJp9V2YJwe0XhxIPSyHFEAH2Vcvz5AL4vY2z85RV1YodM1dp8bDodI34nj"
"Y4+LSkQuUCwYUcjz9z8ppYLiLipQlLiT0NpLCCEHOFQDIuCDxzRgTtnd6zt1+RL4KLqgQDP9"
"6oscI28mmPVwPiKKgoUw4CLvyLLURFKX9nqc9E4oBCUfsnMbvfff3/lgoHK50vLLPy3zbLcV"
"jdy48eHdjz75slAouidPnj7+7denj1wUpXc+HrPq1ZqrDlcfOec0AFQqlZ8bjcYvW1tbgyJy"
"d3x8/F6pOHBlsPyKS9MeWZq+liS9oZWVlYcP7j/4YWJiYn92djY9e4xGoxEBQ8Db09PTC5ub"
"m7a+vmZLS0u2uLhoq6urtr29bXNzc4+HhoY+BS6NjY3lgLNjMj8/Hy0sLBTb7fbtarV6r16v"
"387n86+LiHfOHTabzfW1tbWHuVxueXh4uDMzM5M55+yM4GJMTU35OI7LOzs7AyLiarVaUq/X"
"O5OTk+n/e18CKWqFGqiV9Q4AAAAASUVORK5CYII=")
#----------------------------------------------------------------------
_folder_close = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAcBJ"
"REFUOI2Nk0FrE1EQx3+zL7vbmFKRQkE/gyfpISL4WYJHsQfPvXkQe6go+AnEi6dcBBEkhYgS"
"oQfFeqo0VJRobaspm2R339sdD9u0IclC/zAwzLz5zbzhPeFUnU5HuYDq9brMBB+/+KQXVavV"
"+jBZWxk79zffl3Z9dO8GQbCAiAAEM4DvvyI21m4SpyCiaK6ogqqiwN2nWzxbu0W1WpuBn00w"
"ih3H/YwkcbgMnFOcU5K0yKdJTLVawzk3H3D8z9GPHKqKy3QGYK0Fiqkm5Y2do77l3ec+mQhD"
"q+eWFgXjzr1ebzgXgBG2u0O+7A/JRYhzCjttqJrTbDb3G43G7blXyEQIlkI+dmNiPK5dqeBE"
"sJoXO7CGdru9VbrEXDzCyyEisPPH8XOgrCwaFgysXl/lwcttLqWjmUd0BnCeh78UYgQQiJxy"
"cpJj8gxcUZdbVw5Q47FYM1REESBTSJ0htYZkVCxChXKAXxGWq4bAnAPiDAbWMPCFEbD7bfew"
"FPD34Hfa3TuKlmth4PsVycWTQYoeDp39EXnpVVh522pvTgPGI4V3Nt7E08lJvXr+ZP3g6+uH"
"ZYB5EsCn+DwhEAHJ9KH/LOgEF+oT+k8AAAAASUVORK5CYII=")
#----------------------------------------------------------------------
_folder_open = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAvdJ"
"REFUOI1tU01o23UYft7/75ekqXVJWsO6HaZWzDyIih4UCkJBGIroUATxWPEyLx6EDZzz5Dzr"
"xB3FkygoCA7ZR0A3g60zWdutbrZLTNOvJI1pmvyT/8fv4/WQFFzwgZf36zm8HzyEeyEAEAAD"
"gAexM/B6iEsDAwak4eZwzXk48/gTr7w5+04qlUoMkwEgnnnyuekPz372xbGXjr8GAKPp9OSL"
"L796/PQnn57/5Y/lv9q+tp3A8KEHM48BqcT4o888CwA49taJ04vFeqPta22s5Wtz+cL5r77/"
"YW3HdY213Pa1Xd5w+WK+Yr/NremzX17MLZVqrQu5m/MEgM6c+yb70btvzLR6RlmtEInGhKvg"
"lDcb2Nr1jR+yTSbvc6YeGHGOpOMYjZAlInEhm7sm3/7g8+/em319ZqPR061OV5Z3Amq4yhoW"
"eiI1JqYOJZ3JiTFxIMasNEMb0FbHU2OjI6LWaAdyYXHpTk8T1qpN+unmHo4emUT64LhzOCGd"
"iNM/T3VPoWZCsk6Eo8KBUobageev77Q6cnE++2O+2DwVeBCuZ3g8GYfraax4/a9FRP8ZQgDM"
"ISJSQAeKtN/tblabDal3ioW529XVp6fSmZqr+M62j0AzAYAjJKKOQlT2J2FmRIQBQE6n6QWl"
"zXpTAvAvfX3u16MnP85U29r+vNIT9a5BIi4BADEBjEQIsQgBzBBEDCK5sFT5Z+VWZUsCwFpp"
"9cr2bm+27QGtPUZUAnuDFYwBg4jAzIBmrZQlZlHZ5UTY9RwJAI3SQm61VPZk9P54CHCfvA/D"
"VimjtCUCRGiYAKJY2Eg6brEuARD83crq8o1C9KEXpgOC0caQ0YqtNk5oyQGYwm6LwvpK26tc"
"z7ul61ld//MytHdDDgSky7fmsgcPPz/t6Q4TWABA0Nwgv3Z7y/v7t5y/XrikGsWrAO4CsPvz"
"yf1k++7vl+mp2hkVdE2wkV/2y/NXe+uFK/Ba8wDqw8IaePvf4gE58cj7ELEZAKP/o859qd+D"
"fwFu65tDy5tMTAAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
_hd = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAxlJ"
"REFUOI11ks1vG1UUxc+b98YzyfM4gxzSpHHttCFRbQto7YQgxRKIRRYIZRepEkQVzYJ/gAUi"
"i/wlSLBhFSEWZMUiQkLCSmUk0o6rNiExcvNRR/Z0xh6P5/OxqFwlfNzVWZz7u0dXh+B/Zn19"
"Pd1qtQpxHBeiKCoKId4aHR39dGdnx7zsY0Oxurq6FIbhZ4yxwgeVcnHuVuaarmvwPB/nL9r4"
"/Y+neHrQ+BLA5mUAHYp8Pr8uSdLXD+7fu/nRh5XktckpKOoIONcwNzeP998r483x8bvKSPKb"
"/f19Z7gnDYUsyz+nUiksLi4ioWqIBcFfBw/R/LOK1nkTCVVDpVJJLpbvfPWfCQzDON/e3v7k"
"9szk9Z7VwUuzA4WnoaXS6LQ7CAYD2C/bODlr3c3O3Pq2Vqt1ryQghKDb7X7XPG9BH5/ExNQN"
"DPo9nJ2+wMxsEfr4JPhYGkEYqrIsb/4rAQBwzp+HUfRF5no6MX1jFlOZmxAihtVuYpSnYDyq"
"QdUm0O323i2Xy99Xq1XzCiCZTPqcp/K247192jxA4DmI4wDPT88xMZXF7NxtPDaeIZfLUdd1"
"39jd3f2RXAYIIcjS0tLHy8vLP42NjUGWZTDGIEkS4jiGruuglIIQAtd1o5OTk3fYZQAhRGia"
"Vi0Wi0/m5+fzhFzhAwBc14VlWbAsi5qmeZ/901AqlazDw8MfSqXSZiKRgOM4sG0bpmmi0+mg"
"3++DUtpWFOWR53m/vT6xtbUl1et1cnR0JDUajTsrKyu/+L4/4nleGIZhw/O8x0EQPLQs69fj"
"4+Mnuq73NjY2PLK2tkYNw6CmaTLP85jv+wnf99O5XO7zKIrMs7OzZ77vdwkhPiHEppSaiqLY"
"09PTjmEYASkUCgnbtqnruiwIAjkMQyWKIkUIoQohZACyEIK+ehEJCCEOY8zmnPey2azHisVi"
"ZBjGq15LkqCURmEY+nEc94UQVAgxLJuQJClkjAWqqvrpdDqo1WohrdfrotVqxbZtR47jRJzz"
"kDEWaJrmqao64Jy7nHNX1/V+JpNxFxYWBnt7e/7FxUUEAH8DenV0NY9j5foAAAAASUVORK5C"
"YII=")
#----------------------------------------------------------------------
_new = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAqpJ"
"REFUOI1t0c1rXGUUx/Hvc+/czGtmEk1NTCYWY1BERAJVRMW4cCEIunHTnYjgQrduRHDTrX+F"
"u+ZvEF2IRVCRRkpt1VSTmqQ1rzNz733uc855XDSRkfHsz+ec3zmO/6mrVz9vPjHTm52e6c23"
"mq2lmpPFVE/6qrYUcXMON+2y7NP5Zy9/Wxtv/Gx9vXb5ynsfLczPvZnV0kfT+uycq6WdmO/V"
"82GaNtsPucx5NAyqoDYD8B+gc2m53mk13pluZy9DgptK8b5kZ/sPkqzH4xdmiMUeopJU4jKA"
"ZBwYDo4j0cRUiDESo3B8uMfmjV85Hea4GIgmqIRExJoTwGFd1LBgKpgZJp4qP6YoSqIJ0c4A"
"DS5xNjURwfv7Fk28acC5Gi6MsGqIqUA0iIKZYKYuOEsngKOjFZMgXlVIkgBSIOIxOwMsoBIQ"
"FSwGNxFhY2MjqkpQC2jwiB+gUiEqBA1UVYlIhYgSQmBiAwAViaqCaSCGHJGKO+6EnYMf+ObH"
"67zYW2C50aXSB701wAEZ0HzjlbWLVfArKlOgHvTBNO1FwsIBh6OK1aLNQtImRmmdAy2gD3Sf"
"ear/em/+ybWg+0g+4Pt7f7IzOkVmhXovoJmwuXeXraMDsE7jHPBAClwog8yS9ZJQ3qUoCm76"
"Q/J+TqsraDPH0iF3yl2G96B2uvxvBDmL8fAoL6crVVxZEipBNDCo/qYq95HkmMoLeQVVaNKN"
"uPEjeqCd+9C9+VfOonkyNS5al/Yu3J4qOJ3bJamarBw8x5R0bt0oTr4eB7aAzbIIa8lop4hp"
"WSPJ9p+fX71tMf3p59+3Xy1j+kISUh5L+5tvP73+Qf+196+NAwp8d+u37ft+/5evWquLmrSa"
"17uN/vbSpbfylz5Z+bg7eoQsNtIv/daVD9/94tr52/8BSS2agPSymFoAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
_removable = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAldJ"
"REFUOI2lkbFOXFcQhr+Zc+7uXRZvFi2GOFHiSInA7CLFCnaRCokij0DjyElFh9K6sfwQeQEX"
"rngAvwBNsCyniNCChCKFSCYOkWADd/fuvfeccUEgrCU3yS+NNNN88/8z8D8l14d+v38jTdv3"
"fD3tqEpdRJxE1AzDYYCFoswGg8HL5eXPDwH8dYBzzR8/aM9855x778ZQC2TZ+TEwB6ATAK8r"
"SZLgvefP316SZed47zl902fw+hXOOZxTfKKzz5//1Jpw8OSJaZIcfyFykeqPw1+QX7f5aOEb"
"pP+Iqv4p1YdfUlUF3omUbtwB/r5ysLa2cztNG+nl3P36W5qzPaZ2HzL67BH15ceMxxnjYkiM"
"FXOt5mSEznxn0aliZoSqRFXofNzjIHnA9M0F1HvG4yFVOQag0UhnJiIkkixEixTFiBgqQqg4"
"G/xFdfY7+eicNG0g6nBaQ6SiVivmJgDiZKEsc8CIoSLGQNqc4cbS9zSmpvEuQZ1D1CFS4BJ/"
"cwJQFKPFIRGwC6Aq6mp0Zm9hplQR1AzRSFUFLFhnAvBi5+e7z549Jc9zzGzy+WYYsHinS7N7"
"wnyyxNIn9+evAKurqx5ke3Pzh68smkRMMDCLglz2iHOJjeKJNXw7HB0dvwCQlZWV5ODgoFkU"
"RTOE0DSzGpCYmf9ngQfUzFREKqAQkaH3/qTb7b5xGxsb7O3tWVmWAPGyRCQApYgUIpKLyFBV"
"z1X1zHs/aLfbp/v7+4X8G9NkfX1dd3d3XZZlmue5izFKjFFU1er1emi1WqHX64Wtra0oIu8c"
"6j/qLUda/yKP2243AAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
class MultiDirDialog(wx.Dialog):
"""
A different implementation of :class:`DirDialog` which allows multiple
folders to be selected at once.
"""
def __init__(self, parent, message=_("Choose one or more folders:"), title=_("Browse For Folders"),
defaultPath="", style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER, agwStyle=DD_MULTIPLE, pos=wx.DefaultPosition,
size=wx.DefaultSize, name="multidirdialog"):
"""
Default class constructor.
:param `parent`: the dialog parent widget;
:param `message`: the message to show on the dialog;
:param `title`: the dialog title;
:param `defaultPath`: the default path, or the empty string;
:param `style`: the underlying :class:`Dialog` window style;
:param `agwStyle`: the AGW-specific dialog style; this can be a combination of the
following bits:
===================== =========== ==================================================
Window Styles Hex Value Description
===================== =========== ==================================================
``DD_NEW_DIR_BUTTON`` 0x000 Enable/disable the "Make new folder" button
``DD_DIR_MUST_EXIST`` 0x200 The dialog will allow the user to choose only an existing folder. When this style is not given, a "Create new directory" button is added to the dialog (on Windows) or some other way is provided to the user to type the name of a new folder.
``DD_MULTIPLE`` 0x400 Allows the selection of multiple folders.
===================== =========== ==================================================
:param `pos`: the dialog position;
:param `size`: the dialog size;
:param `name`: the dialog name.
"""
wx.Dialog.__init__(self, parent, pos=pos, size=size, style=style, name=name)
self.agwStyle = agwStyle
self.dirCtrl = wx.GenericDirCtrl(self, size=(300, 300), style=wx.DIRCTRL_3D_INTERNAL|wx.DIRCTRL_DIR_ONLY)
self.folderText = wx.TextCtrl(self, -1, defaultPath, style=wx.TE_PROCESS_ENTER)
self.CreateButtons()
self.SetProperties(title)
# Setup the layout and frame properties
self.SetupDirCtrl(defaultPath)
self.LayoutItems(message)
self.BindEvents()
if parent and pos == wx.DefaultPosition:
self.CenterOnParent()
def SetupDirCtrl(self, defaultPath):
"""
Setup the internal :class:`GenericDirCtrl` (icons, labels, etc...).
:param `defaultPath`: the default path for :class:`MultiDirDialog`, can be an
empty string.
"""
il = wx.ImageList(16, 16)
# Add images to list. You need to keep the same order in order for
# this to work!
# closed folder:
il.Add(_folder_close.GetBitmap())
# open folder:
il.Add(_folder_open.GetBitmap())
# root of filesystem (linux):
il.Add(_computer.GetBitmap())
# drive letter (windows):
il.Add(_hd.GetBitmap())
# cdrom drive:
il.Add(_cdrom.GetBitmap())
# removable drive on win98:
il.Add(_removable.GetBitmap())
# removable drive (floppy, flash, etc):
il.Add(_removable.GetBitmap())
# assign image list:
treeCtrl = self.dirCtrl.GetTreeCtrl()
treeCtrl.AssignImageList(il)
if self.agwStyle & DD_MULTIPLE:
treeCtrl.SetWindowStyle(treeCtrl.GetWindowStyle() | wx.TR_MULTIPLE)
if not defaultPath.strip():
return
# Set the wx.GenericDirCtrl default path
self.dirCtrl.ExpandPath(defaultPath)
self.dirCtrl.SetDefaultPath(defaultPath)
self.dirCtrl.SetPath(defaultPath)
self.folderText.SetValue(treeCtrl.GetItemText(treeCtrl.GetSelections()[0]))
def SetProperties(self, title):
"""
Sets few properties for the dialog.
:param `title`: the dialog title.
"""
self.SetTitle(title)
self.okButton.SetDefault()
if self.agwStyle & wx.DD_DIR_MUST_EXIST:
self.newButton.Enable(False)
def LayoutItems(self, message):
""" Layout the widgets using sizers. """
mainSizer = wx.BoxSizer(wx.VERTICAL)
textSizer = wx.BoxSizer(wx.HORIZONTAL)
bottomSizer = wx.BoxSizer(wx.HORIZONTAL)
staticText = wx.StaticText(self, -1, message)
f = staticText.GetFont()
f.SetWeight(wx.BOLD)
staticText.SetFont(f)
# Add the main wx.GenericDirCtrl
mainSizer.Add(staticText, 0, wx.EXPAND|wx.ALL, 10)
mainSizer.Add(self.dirCtrl, 1, wx.EXPAND|wx.ALL, 10)
label = wx.StaticText(self, -1, _("Folder:"))
textSizer.Add(label, 0, wx.LEFT|wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, 10)
textSizer.Add(self.folderText, 1, wx.RIGHT|wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, 10)
mainSizer.Add(textSizer, 0, wx.EXPAND|wx.BOTTOM, 10)
# Add the fancy buttons
bottomSizer.Add(self.newButton, 0, wx.ALL, 10)
bottomSizer.Add((0, 0), 1, wx.EXPAND)
bottomSizer.Add(self.okButton, 0, wx.TOP|wx.BOTTOM, 10)
bottomSizer.Add(self.cancelButton, 0, wx.TOP|wx.BOTTOM|wx.RIGHT, 10)
mainSizer.Add(bottomSizer, 0, wx.EXPAND)
# Layout the dialog
self.SetSizer(mainSizer)
mainSizer.Layout()
mainSizer.Fit(self)
mainSizer.SetSizeHints(self)
def GetPaths(self):
""" Returns the folders selected by the user, or the default path. """
# Retrieve the tree control and the selections the
# user has made
treeCtrl = self.dirCtrl.GetTreeCtrl()
selections = treeCtrl.GetSelections()
folders = []
# Loop recursively over the selected folder and its sub-direcories
for select in selections:
itemText = treeCtrl.GetItemText(select)
# Recurse on it.
folder = self.RecurseTopDir(treeCtrl, select, itemText)
folders.append(os.path.normpath(folder))
return folders
def RecurseTopDir(self, treeCtrl, item, itemText):
"""
Recurse a directory tree to include the parent-folder.
:param `treeCtrl`: the tree control associated with teh internal :class:`GenericDirCtrl`;
:param `item`: the selected tree control item;
:param `itemText`: the selected tree control item text.
"""
# Get the item parent
parent = treeCtrl.GetItemParent(item)
if parent != treeCtrl.GetRootItem():
# Not the root item, recurse again on it
itemText = treeCtrl.GetItemText(parent) + "/" + itemText
itemText = self.RecurseTopDir(treeCtrl, parent, itemText)
return itemText
def BindEvents(self):
""" Binds the events to specific methods. """
self.Bind(wx.EVT_BUTTON, self.OnOk, self.okButton)
self.Bind(wx.EVT_BUTTON, self.OnCancel, self.cancelButton)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_CHAR_HOOK, self.OnKeyUp)
self.dirCtrl.GetTreeCtrl().Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelChanged)
def CreateButtons(self):
""" Creates the ``OK``, ``Cancel`` and ``Make New Folder`` bitmap buttons. """
# Build a couple of fancy buttons
self.newButton = buttons.ThemedGenBitmapTextButton(self, wx.ID_NEW, _new.GetBitmap(),
_("Make New Folder"), size=(-1, 28))
self.okButton = buttons.ThemedGenBitmapTextButton(self, wx.ID_OK, _ok.GetBitmap(),
_("OK"), size=(-1, 28))
self.cancelButton = buttons.ThemedGenBitmapTextButton(self, wx.ID_CANCEL, _cancel.GetBitmap(),
_("Cancel"), size=(-1, 28))
def OnOk(self, event):
"""
Handles the ``wx.EVT_BUTTON`` event for the dialog.
:param `event`: a :class:`CommandEvent` event to be processed.
:note: This method handles the ``OK`` button press.
"""
self.EndModal(wx.ID_OK)
def OnCancel(self, event):
"""
Handles the ``wx.EVT_BUTTON`` event for the dialog.
:param `event`: a :class:`CommandEvent` event to be processed.
:note: This method handles the ``Cancel`` button press.
"""
self.OnClose(event)
def OnClose(self, event):
"""
Handles the ``wx.EVT_CLOSE`` event for the dialog.
:param `event`: a :class:`CloseEvent` event to be processed.
"""
self.EndModal(wx.ID_CANCEL)
def OnKeyUp(self, event):
"""
Handles the ``wx.EVT_CHAR_HOOK`` event for the dialog.
:param `event`: a :class:`KeyEvent` event to be processed.
"""
if event.GetKeyCode() == wx.WXK_ESCAPE:
# Close the dialog, no action
self.OnClose(event)
elif event.GetKeyCode() in [wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER]:
# Close the dialog, the user wants to continue
self.OnOk(event)
event.Skip()
def OnSelChanged(self, event):
"""
Handles the ``wx.EVT_TREE_SEL_CHANGED`` event for the tree control associated
with :class:`MultiDirDialog`.
:param `event`: a :class:`TreeEvent` event to be processed.
"""
if self.IsBeingDeleted():
# We are being destroyed...
event.Skip()
return
item = event.GetItem()
if not item.IsOk():
# Bad item?
event.Skip()
return
treeCtrl = self.dirCtrl.GetTreeCtrl()
text = treeCtrl.GetItemText(item)
# Set the item name into the text control
self.folderText.SetValue(text)
self.folderText.Refresh()
event.Skip()
| |
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High-level code for creating and running FIVO-related Tensorflow graphs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import tensorflow as tf
from fivo import bounds
from fivo import smc
from fivo.data import datasets
from fivo.models import base
from fivo.models import srnn
from fivo.models import vrnn
def create_dataset_and_model(config, split, shuffle, repeat):
"""Creates the dataset and model for a given config.
Args:
config: A configuration object with config values accessible as properties.
Most likely a FLAGS object. This function expects the properties
batch_size, dataset_path, dataset_type, and latent_size to be defined.
split: The dataset split to load.
shuffle: If true, shuffle the dataset randomly.
repeat: If true, repeat the dataset endlessly.
Returns:
inputs: A batch of input sequences represented as a dense Tensor of shape
[time, batch_size, data_dimension].
targets: A batch of target sequences represented as a dense Tensor of
shape [time, batch_size, data_dimension].
lens: An int Tensor of shape [batch_size] representing the lengths of each
sequence in the batch.
model: A vrnn.VRNNCell model object.
Raises:
ValueError: if the config is invalid.
"""
sigma_min = 0.0
if config.dataset_type == "pianoroll":
inputs, targets, lengths, mean = datasets.create_pianoroll_dataset(
config.dataset_path, split, config.batch_size, shuffle=shuffle,
repeat=repeat)
# Convert the mean of the training set to logit space so it can be used to
# initialize the bias of the generative distribution.
emission_bias_init = -tf.log(
1. / tf.clip_by_value(mean, 0.0001, 0.9999) - 1)
emission_distribution_class = base.ConditionalBernoulliDistribution
elif config.dataset_type == "speech":
inputs, targets, lengths = datasets.create_speech_dataset(
config.dataset_path, config.batch_size,
samples_per_timestep=config.data_dimension, prefetch_buffer_size=1,
shuffle=False, repeat=False)
# There is no bias for the generative distribution because the test set
# is assumed to be already standardized with the training set statistics.
mean = None
emission_bias_init = None
emission_distribution_class = base.ConditionalNormalDistribution
if config.model == "vrnn":
model = vrnn.create_vrnn(inputs.get_shape().as_list()[2],
config.latent_size,
emission_distribution_class,
emission_bias_init=emission_bias_init,
proposal_type=config.proposal_type,
sigma_min=sigma_min,
raw_sigma_bias=0.5,
use_tilt=(config.bound == "fivo-aux"))
elif config.model == "srnn":
model = srnn.create_srnn(inputs.get_shape().as_list()[2],
config.latent_size,
emission_distribution_class,
emission_bias_init=emission_bias_init,
proposal_type=config.proposal_type,
sigma_min=sigma_min,
raw_sigma_bias=0.5,
use_tilt=(config.bound == "fivo-aux"))
else:
raise ValueError("model flag: %s is unrecognized" % config.model)
return inputs, targets, lengths, model, mean
def restore_checkpoint_if_exists(saver, sess, logdir):
"""Looks for a checkpoint and restores the session from it if found.
Args:
saver: A tf.train.Saver for restoring the session.
sess: A TensorFlow session.
logdir: The directory to look for checkpoints in.
Returns:
True if a checkpoint was found and restored, False otherwise.
"""
checkpoint = tf.train.get_checkpoint_state(logdir)
if checkpoint:
checkpoint_name = os.path.basename(checkpoint.model_checkpoint_path)
full_checkpoint_path = os.path.join(logdir, checkpoint_name)
saver.restore(sess, full_checkpoint_path)
return True
return False
def wait_for_checkpoint(saver, sess, logdir):
"""Loops until the session is restored from a checkpoint in logdir.
Args:
saver: A tf.train.Saver for restoring the session.
sess: A TensorFlow session.
logdir: The directory to look for checkpoints in.
"""
while not restore_checkpoint_if_exists(saver, sess, logdir):
tf.logging.info("Checkpoint not found in %s, sleeping for 60 seconds."
% logdir)
time.sleep(60)
def run_train(config, create_dataset_and_model_fn=create_dataset_and_model):
"""Runs training for a sequential latent variable model.
Args:
config: A configuration object with config values accessible as properties.
Most likely a FLAGS object. For a list of expected properties and their
meaning see the flags defined in fivo.py.
create_dataset_and_model_fn: If present, calls this function to create a
dataset and model instead of create_dataset_and_model() above. The
signature must be the same.
"""
def create_logging_hook(step, bound_value):
"""Creates a logging hook that prints the bound value periodically."""
bound_label = config.bound + " bound"
if config.normalize_by_seq_len:
bound_label += " per timestep"
else:
bound_label += " per sequence"
def summary_formatter(log_dict):
return "Step %d, %s: %f" % (
log_dict["step"], bound_label, log_dict["bound_value"])
logging_hook = tf.train.LoggingTensorHook(
{"step": step, "bound_value": bound_value},
every_n_iter=config.summarize_every,
formatter=summary_formatter)
return logging_hook
def create_loss():
"""Creates the loss to be optimized.
Returns:
bound: A float Tensor containing the value of the bound that is
being optimized.
loss: A float Tensor that when differentiated yields the gradients
to apply to the model. Should be optimized via gradient descent.
"""
inputs, targets, lengths, model, _ = create_dataset_and_model_fn(
config, split="train", shuffle=True, repeat=True)
# Compute lower bounds on the log likelihood.
if config.bound == "elbo":
ll_per_seq, _, _ = bounds.iwae(
model, (inputs, targets), lengths, num_samples=1,
parallel_iterations=config.parallel_iterations
)
elif config.bound == "iwae":
ll_per_seq, _, _ = bounds.iwae(
model, (inputs, targets), lengths, num_samples=config.num_samples,
parallel_iterations=config.parallel_iterations
)
elif config.bound in ("fivo", "fivo-aux"):
if config.resampling_type == "relaxed":
ll_per_seq, _, _, _ = bounds.fivo(
model, (inputs, targets),
lengths,
num_samples=config.num_samples,
resampling_criterion=smc.ess_criterion,
resampling_type=config.resampling_type,
random_seed=config.random_seed,
relaxed_resampling_temperature=config.
relaxed_resampling_temperature,
parallel_iterations=config.parallel_iterations
)
else:
ll_per_seq, _, _, _ = bounds.fivo(
model, (inputs, targets), lengths, num_samples=config.num_samples,
resampling_criterion=smc.ess_criterion,
resampling_type=config.resampling_type,
random_seed=config.random_seed,
parallel_iterations=config.parallel_iterations
)
# Compute loss scaled by number of timesteps.
ll_per_t = tf.reduce_mean(ll_per_seq / tf.to_float(lengths))
ll_per_seq = tf.reduce_mean(ll_per_seq)
tf.summary.scalar("train_ll_per_seq", ll_per_seq)
tf.summary.scalar("train_ll_per_t", ll_per_t)
if config.normalize_by_seq_len:
return ll_per_t, -ll_per_t
else:
return ll_per_seq, -ll_per_seq
def create_graph():
"""Creates the training graph."""
global_step = tf.train.get_or_create_global_step()
bound, loss = create_loss()
opt = tf.train.AdamOptimizer(config.learning_rate)
grads = opt.compute_gradients(loss, var_list=tf.trainable_variables())
train_op = opt.apply_gradients(grads, global_step=global_step)
return bound, train_op, global_step
device = tf.train.replica_device_setter(ps_tasks=config.ps_tasks)
with tf.Graph().as_default():
if config.random_seed: tf.set_random_seed(config.random_seed)
with tf.device(device):
bound, train_op, global_step = create_graph()
log_hook = create_logging_hook(global_step, bound)
start_training = not config.stagger_workers
with tf.train.MonitoredTrainingSession(
master=config.master,
is_chief=config.task == 0,
hooks=[log_hook],
checkpoint_dir=config.logdir,
save_checkpoint_secs=120,
save_summaries_steps=config.summarize_every,
log_step_count_steps=config.summarize_every) as sess:
cur_step = -1
while not sess.should_stop() and cur_step <= config.max_steps:
if config.task > 0 and not start_training:
cur_step = sess.run(global_step)
tf.logging.info("task %d not active yet, sleeping at step %d" %
(config.task, cur_step))
time.sleep(30)
if cur_step >= config.task * 1000:
start_training = True
else:
_, cur_step = sess.run([train_op, global_step])
def run_eval(config, create_dataset_and_model_fn=create_dataset_and_model):
"""Runs evaluation for a sequential latent variable model.
This method runs only one evaluation over the dataset, writes summaries to
disk, and then terminates. It does not loop indefinitely.
Args:
config: A configuration object with config values accessible as properties.
Most likely a FLAGS object. For a list of expected properties and their
meaning see the flags defined in fivo.py.
create_dataset_and_model_fn: If present, calls this function to create a
dataset and model instead of create_dataset_and_model() above. The
signature must be the same.
"""
def create_graph():
"""Creates the evaluation graph.
Returns:
lower_bounds: A tuple of float Tensors containing the values of the 3
evidence lower bounds, summed across the batch.
total_batch_length: The total number of timesteps in the batch, summed
across batch examples.
batch_size: The batch size.
global_step: The global step the checkpoint was loaded from.
"""
global_step = tf.train.get_or_create_global_step()
inputs, targets, lengths, model, _ = create_dataset_and_model_fn(
config, split=config.split, shuffle=False, repeat=False)
# Compute lower bounds on the log likelihood.
elbo_ll_per_seq, _, _ = bounds.iwae(
model, (inputs, targets), lengths, num_samples=1,
parallel_iterations=config.parallel_iterations
)
iwae_ll_per_seq, _, _ = bounds.iwae(
model, (inputs, targets), lengths, num_samples=config.num_samples,
parallel_iterations=config.parallel_iterations
)
# The resampling type should only be used for training, so we ignore it.
fivo_ll_per_seq, _, _, _ = bounds.fivo(
model, (inputs, targets), lengths, num_samples=config.num_samples,
resampling_criterion=smc.ess_criterion, random_seed=config.random_seed,
parallel_iterations=config.parallel_iterations
)
elbo_ll = tf.reduce_sum(elbo_ll_per_seq)
iwae_ll = tf.reduce_sum(iwae_ll_per_seq)
fivo_ll = tf.reduce_sum(fivo_ll_per_seq)
batch_size = tf.shape(lengths)[0]
total_batch_length = tf.reduce_sum(lengths)
return ((elbo_ll, iwae_ll, fivo_ll), total_batch_length, batch_size,
global_step)
def average_bounds_over_dataset(lower_bounds, total_batch_length, batch_size,
sess):
"""Computes the values of the bounds, averaged over the datset.
Args:
lower_bounds: Tuple of float Tensors containing the values of the bounds
evaluated on a single batch.
total_batch_length: Integer Tensor that represents the total number of
timesteps in the current batch.
batch_size: Integer Tensor containing the batch size. This can vary if the
requested batch_size does not evenly divide the size of the dataset.
sess: A TensorFlow Session object.
Returns:
ll_per_t: A length 3 numpy array of floats containing each bound's average
value, normalized by the total number of timesteps in the datset. Can
be interpreted as a lower bound on the average log likelihood per
timestep in the dataset.
ll_per_seq: A length 3 numpy array of floats containing each bound's
average value, normalized by the number of sequences in the dataset.
Can be interpreted as a lower bound on the average log likelihood per
sequence in the datset.
"""
total_ll = np.zeros(3, dtype=np.float64)
total_n_elems = 0.0
total_length = 0.0
while True:
try:
outs = sess.run([lower_bounds, batch_size, total_batch_length])
except tf.errors.OutOfRangeError:
break
total_ll += outs[0]
total_n_elems += outs[1]
total_length += outs[2]
ll_per_t = total_ll / total_length
ll_per_seq = total_ll / total_n_elems
return ll_per_t, ll_per_seq
def summarize_lls(lls_per_t, lls_per_seq, summary_writer, step):
"""Creates log-likelihood lower bound summaries and writes them to disk.
Args:
lls_per_t: An array of 3 python floats, contains the values of the
evaluated bounds normalized by the number of timesteps.
lls_per_seq: An array of 3 python floats, contains the values of the
evaluated bounds normalized by the number of sequences.
summary_writer: A tf.SummaryWriter.
step: The current global step.
"""
def scalar_summary(name, value):
value = tf.Summary.Value(tag=name, simple_value=value)
return tf.Summary(value=[value])
for i, bound in enumerate(["elbo", "iwae", "fivo"]):
per_t_summary = scalar_summary("%s/%s_ll_per_t" % (config.split, bound),
lls_per_t[i])
per_seq_summary = scalar_summary("%s/%s_ll_per_seq" %
(config.split, bound),
lls_per_seq[i])
summary_writer.add_summary(per_t_summary, global_step=step)
summary_writer.add_summary(per_seq_summary, global_step=step)
summary_writer.flush()
with tf.Graph().as_default():
if config.random_seed: tf.set_random_seed(config.random_seed)
lower_bounds, total_batch_length, batch_size, global_step = create_graph()
summary_dir = config.logdir + "/" + config.split
summary_writer = tf.summary.FileWriter(
summary_dir, flush_secs=15, max_queue=100)
saver = tf.train.Saver()
with tf.train.SingularMonitoredSession() as sess:
wait_for_checkpoint(saver, sess, config.logdir)
step = sess.run(global_step)
tf.logging.info("Model restored from step %d, evaluating." % step)
ll_per_t, ll_per_seq = average_bounds_over_dataset(
lower_bounds, total_batch_length, batch_size, sess)
summarize_lls(ll_per_t, ll_per_seq, summary_writer, step)
tf.logging.info("%s elbo ll/t: %f, iwae ll/t: %f fivo ll/t: %f",
config.split, ll_per_t[0], ll_per_t[1], ll_per_t[2])
tf.logging.info("%s elbo ll/seq: %f, iwae ll/seq: %f fivo ll/seq: %f",
config.split, ll_per_seq[0], ll_per_seq[1], ll_per_seq[2])
def run_sample(config, create_dataset_and_model_fn=create_dataset_and_model):
"""Sample from the model. Only pianorolls and pose datasets are supported."""
def sample_from_model(model, initial_state, initial_inputs, mean):
"""Samples a sequence of outputs from the model.
The mean must be supplied -- if it isn't the results will be incorrect.
Args:
model: A model with sample_step implemented. See models/vrnn.py for an
example.
initial_state: The initial state of the model.
initial_inputs: The initial inputs to feed into the model.
mean: The mean of the training set, a Tensor of shape [data_dimension].
Returns:
samples: A Tensor of shape [sample_length, batch_size, num_timesteps,
data_dimension] containing the samples from the model.
"""
initial_state, initial_output = model.sample_step(initial_state,
initial_inputs, 0)
output_ta = tf.TensorArray(size=config.sample_length,
dtype=tf.float32,
dynamic_size=False,
clear_after_read=True)
output_ta = output_ta.write(0, initial_output)
t0 = tf.constant(1, dtype=tf.int32)
def sample_step(t, state, prev_outputs, output_ta):
state, output = model.sample_step(state, prev_outputs, t)
output_ta = output_ta.write(t, output)
centered_output = output - mean[tf.newaxis, :]
return t+1, state, centered_output, output_ta
def sample_predicate(t, *unused_args):
return t < config.sample_length
_, _, _, output_ta = tf.while_loop(
sample_predicate,
sample_step,
loop_vars=(t0, initial_state, initial_output, output_ta),
parallel_iterations=config.parallel_iterations
)
samples = output_ta.stack()
samples = tf.reshape(samples, [config.sample_length, config.batch_size,
config.num_samples, config.data_dimension])
return samples
def create_graph():
"""Creates the graph to sample from the model.
First, the model is conditioned on a prefix by sampling a batch of data
and trimming it to prefix_length. The configured bound is used to do the
conditioning. Then the final state from the conditioning is used to sample
from the model.
Returns:
samples: A Tensor of shape [sample_length, batch_size,
num_samples, data_dimension] representing samples from the model.
prefixes: A Tensor of shape [prefix_length, batch_size, data_dimension]
representing the prefixes the model was conditioned on.
"""
inputs, targets, lengths, model, mean = create_dataset_and_model_fn(
config, split=config.split, shuffle=True, repeat=True)
input_prefixes = inputs[:config.prefix_length]
target_prefixes = targets[:config.prefix_length]
prefix_lengths = tf.ones_like(lengths) * config.prefix_length
if config.bound == "elbo":
_, _, state = bounds.iwae(
model, (input_prefixes, target_prefixes),
prefix_lengths, num_samples=1)
elif config.bound == "iwae":
_, _, state = bounds.iwae(
model, (input_prefixes, target_prefixes),
prefix_lengths, num_samples=config.num_samples)
elif config.bound == "fivo":
_, _, _, state = bounds.fivo(
model, (input_prefixes, target_prefixes), prefix_lengths,
num_samples=config.num_samples,
resampling_criterion=smc.ess_criterion,
random_seed=config.random_seed)
sample_inputs = tf.tile(inputs[config.prefix_length],
[config.num_samples, 1])
samples = sample_from_model(model, state, sample_inputs, mean)
return samples, target_prefixes
with tf.Graph().as_default():
if config.random_seed:
tf.set_random_seed(config.random_seed)
samples, prefixes = create_graph()
if config.sample_out_dir:
out_dir = config.sample_our_dir
else:
out_dir = config.logdir
if not tf.gfile.Exists(out_dir):
tf.gfile.MakeDirs(out_dir)
with tf.train.SingularMonitoredSession(
checkpoint_dir=config.logdir) as sess:
samples_out, prefixes_out = sess.run([samples, prefixes])
with tf.gfile.Open(os.path.join(out_dir, "samples.npz"), "w") as fout:
np.save(fout, {"prefixes": prefixes_out, "samples": samples_out})
| |
"""Common Shell Utilities."""
import os
from subprocess import Popen, PIPE
from multiprocessing import Process
from threading import Thread
from ..core.meta import MetaMixin
from ..core.exc import FrameworkError
def cmd(command, capture=True, *args, **kwargs):
"""
Wrapper around ``exec_cmd`` and ``exec_cmd2`` depending on whether
capturing output is desired. Defaults to setting the Popen ``shell``
keyword argument to ``True`` (string command rather than list of command
and arguments).
Arguments:
command (str): The command (and arguments) to run.
capture (bool): Whether or not to capture output.
Other Parameters:
args: Additional arguments are passed to ``Popen()``.
kwargs: Additional keyword arguments are passed to ``Popen()``.
Returns:
tuple: When ``capture==True``, returns the ``(stdout, stderror,
return_code)`` of the command.
int: When ``capture==False``, returns only the ``exitcode`` of the
command.
Example:
.. code-block:: python
from cement.utils import shell
# execute a command and capture output
stdout, stderr, exitcode = shell.cmd('echo helloworld')
# execute a command but do not capture output
exitcode = shell.cmd('echo helloworld', capture=False)
"""
kwargs['shell'] = kwargs.get('shell', True)
if capture is True:
return exec_cmd(command, *args, **kwargs)
else:
return exec_cmd2(command, *args, **kwargs)
def exec_cmd(cmd_args, *args, **kwargs):
"""
Execute a shell call using Subprocess. All additional ``*args`` and
``**kwargs`` are passed directly to ``subprocess.Popen``. See
`Subprocess
<http://docs.python.org/library/subprocess.html>`_ for more information
on the features of ``Popen()``.
Args:
cmd_args (list): List of command line arguments.
Other Parameters:
args: Additional arguments are passed to ``Popen()``.
kwargs: Additional keyword arguments are passed to ``Popen()``.
Returns:
tuple: The ``(stdout, stderror, return_code)`` of the command.
Example:
.. code-block:: python
from cement.utils import shell
stdout, stderr, exitcode = shell.exec_cmd(['echo', 'helloworld'])
"""
if 'stdout' not in kwargs.keys():
kwargs['stdout'] = PIPE
if 'stderr' not in kwargs.keys():
kwargs['stderr'] = PIPE
proc = Popen(cmd_args, *args, **kwargs)
(stdout, stderr) = proc.communicate()
proc.wait()
return (stdout, stderr, proc.returncode)
def exec_cmd2(cmd_args, *args, **kwargs):
"""
Similar to ``exec_cmd``, however does not capture stdout, stderr (therefore
allowing it to print to console). All additional ``*args`` and
``**kwargs`` are passed directly to ``subprocess.Popen``. See `Subprocess
<http://docs.python.org/library/subprocess.html>`_ for more information
on the features of ``Popen()``.
Args:
cmd_args (list): List of command line arguments
Other Parameters:
args: Additional arguments are passed to ``Popen()``
kwargs: Additional keyword arguments are passed to ``Popen()``
Returns:
int: The integer return code of the command.
Example:
.. code-block:: python
from cement.utils import shell
exitcode = shell.exec_cmd2(['echo', 'helloworld'])
"""
proc = Popen(cmd_args, *args, **kwargs)
proc.wait()
return proc.returncode
def spawn(target, start=True, join=False, thread=False, *args, **kwargs):
"""
Wrapper around ``spawn_process`` and ``spawn_thread`` depending on
desired execution model.
Args:
target (function): The target function to execute in the sub-process.
Keyword Args:
start (bool): Call ``start()`` on the process before returning the
process object.
join (bool): Call ``join()`` on the process before returning the
process object. Only called if ``start == True``.
thread (bool): Whether to spawn as thread instead of process.
Other Parameters:
args: Additional arguments are passed to ``Process()``
kwargs: Additional keyword arguments are passed to ``Process()``.
Returns:
object: The process object returned by Process().
Example:
.. code-block:: python
from cement.utils import shell
def add(a, b):
print(a + b)
p = shell.spawn(add, args=(12, 27))
p.join()
"""
if thread is True:
return spawn_thread(target, start, join, *args, **kwargs)
else:
return spawn_process(target, start, join, *args, **kwargs)
def spawn_process(target, start=True, join=False, *args, **kwargs):
"""
A quick wrapper around ``multiprocessing.Process()``. By default the
``start()`` function will be called before the spawned process object is
returned. See `MultiProcessing
<https://docs.python.org/3/library/multiprocessing.html>`_ for more
information on the features of ``Process()``.
Args:
target (function): The target function to execute in the sub-process.
Keyword Args:
start (bool): Call ``start()`` on the process before returning the
process object.
join (bool): Call ``join()`` on the process before returning the
process object. Only called if ``start == True``.
Other Parameters:
args: Additional arguments are passed to ``Process()``
kwargs: Additional keyword arguments are passed to ``Process()``.
Returns:
object: The process object returned by Process().
Example:
.. code-block:: python
from cement.utils import shell
def add(a, b):
print(a + b)
p = shell.spawn_process(add, args=(12, 27))
p.join()
"""
proc = Process(target=target, *args, **kwargs)
if start and not join:
proc.start()
elif start and join:
proc.start()
proc.join()
return proc
def spawn_thread(target, start=True, join=False, *args, **kwargs):
"""
A quick wrapper around ``threading.Thread()``. By default the ``start()``
function will be called before the spawned thread object is returned
See `Threading
<https://docs.python.org/3/library/threading.html>`_ for more
information on the features of ``Thread()``.
Args:
target (function): The target function to execute in the thread.
Keyword Args:
start (bool): Call ``start()`` on the thread before returning the
thread object.
join (bool): Call ``join()`` on the thread before returning the thread
object. Only called if ``start == True``.
Other Parameters:
args: Additional arguments are passed to ``Thread()``.
kwargs: Additional keyword arguments are passed to ``Thread()``.
Returns:
object: The thread object returned by ``Thread()``.
Example:
.. code-block:: python
from cement.utils import shell
def add(a, b):
print(a + b)
t = shell.spawn_thread(add, args=(12, 27))
t.join()
"""
thr = Thread(target=target, *args, **kwargs)
if start and not join:
thr.start()
elif start and join:
thr.start()
thr.join()
return thr
class Prompt(MetaMixin):
"""
A wrapper around ``input`` whose purpose is to limit the redundent tasks of
gather usr input. Can be used in several ways depending on the use case
(simple input, options, and numbered selection).
Args:
text (str): The text displayed at the input prompt.
Example:
Simple prompt to halt operations and wait for user to hit enter:
.. code-block:: python
p = shell.Prompt("Press Enter To Continue", default='ENTER')
.. code-block:: text
$ python myapp.py
Press Enter To Continue
$
Provide a numbered list for longer selections:
.. code-block:: python
p = Prompt("Where do you live?",
options=[
'San Antonio, TX',
'Austin, TX',
'Dallas, TX',
'Houston, TX',
],
numbered = True,
)
.. code-block:: text
Where do you live?
1: San Antonio, TX
2: Austin, TX
3: Dallas, TX
4: Houston, TX
Enter the number for your selection:
Create a more complex prompt, and process the input from the user:
.. code-block:: python
class MyPrompt(Prompt):
class Meta:
text = "Do you agree to the terms?"
options = ['Yes', 'no', 'maybe-so']
options_separator = '|'
default = 'no'
clear = True
max_attempts = 99
def process_input(self):
if self.input.lower() == 'yes':
# do something crazy
pass
else:
# don't do anything... maybe exit?
print("User doesn't agree! I'm outa here")
sys.exit(1)
MyPrompt()
.. code-block:: text
$ python myapp.py
[TERMINAL CLEAR]
Do you agree to the terms? [Yes|no|maybe-so] no
User doesn't agree! I'm outa here
$ echo $?
$ 1
"""
class Meta:
"""
Optional meta-data (can also be passed as keyword arguments to the
parent class).
"""
#: The text that is displayed to prompt the user
text = "Tell me someting interesting:"
#: A default value to use if the user doesn't provide any input
default = None
#: Options to provide to the user. If set, the input must match one
#: of the items in the options selection.
options = None
#: Separator to use within the option selection (non-numbered)
options_separator = ','
#: Display options in a numbered list, where the user can enter a
#: number. Useful for long selections.
numbered = False
#: The text to display along with the numbered selection for user
#: input.
selection_text = "Enter the number for your selection:"
#: Whether or not to automatically prompt() the user once the class
#: is instantiated.
auto = True
#: Whether to treat user input as case insensitive (only used to
#: compare user input with available options).
case_insensitive = True
#: Whether or not to clear the terminal when prompting the user.
clear = False
#: Command to issue when clearing the terminal.
clear_command = 'clear'
#: Max attempts to get proper input from the user before giving up.
max_attempts = 10
#: Raise an exception when max_attempts is hit? If not, Prompt
#: passes the input through as `None`.
max_attempts_exception = True
def __init__(self, text=None, *args, **kw):
if text is not None:
kw['text'] = text
super(Prompt, self).__init__(*args, **kw)
self.input = None
if self._meta.auto:
self.prompt()
def _prompt(self):
if self._meta.clear:
os.system(self._meta.clear_command)
text = ""
if self._meta.options is not None:
if self._meta.numbered is True:
text = text + self._meta.text + "\n\n"
count = 1
for option in self._meta.options:
text = text + "%s: %s\n" % (count, option)
count += 1
text = text + "\n"
text = text + self._meta.selection_text
else:
sep = self._meta.options_separator
text = "%s [%s]" % (self._meta.text,
sep.join(self._meta.options))
else:
text = self._meta.text
self.input = input("%s " % text)
if self.input == '' and self._meta.default is not None:
self.input = self._meta.default
elif self.input == '':
self.input = None
def prompt(self):
"""
Prompt the user, and store their input as ``self.input``.
"""
attempt = 0
while self.input is None:
if attempt >= int(self._meta.max_attempts):
if self._meta.max_attempts_exception is True:
raise FrameworkError("Maximum attempts exceeded getting "
"valid user input")
else:
return self.input
attempt += 1
self._prompt()
if self.input is None:
continue
elif self._meta.options is not None:
if self._meta.numbered:
try:
self.input = self._meta.options[int(self.input) - 1]
except (IndexError, ValueError):
self.input = None
continue
else:
if self._meta.case_insensitive is True:
lower_options = [x.lower()
for x in self._meta.options]
if not self.input.lower() in lower_options:
self.input = None
continue
else:
if self.input not in self._meta.options:
self.input = None
continue
self.process_input()
return self.input
def process_input(self):
"""
Does not do anything. Is intended to be used in a sub-class to handle
user input after it is prompted.
"""
pass
| |
# -*- coding: utf-8 -*-
"""Algorithms for directed acyclic graphs (DAGs)."""
# Copyright (C) 2006-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from fractions import gcd
import networkx as nx
from networkx.utils.decorators import *
from ..utils import arbitrary_element
__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>',
'Dan Schult (dschult@colgate.edu)',
'Ben Edwards (bedwards@cs.unm.edu)'])
__all__ = ['descendants',
'ancestors',
'topological_sort',
'topological_sort_recursive',
'is_directed_acyclic_graph',
'is_aperiodic',
'transitive_closure',
'antichains',
'dag_longest_path',
'dag_longest_path_length']
def descendants(G, source):
"""Return all nodes reachable from `source` in G.
Parameters
----------
G : NetworkX DiGraph
source : node in G
Returns
-------
des : set()
The descendants of source in G
"""
if not G.has_node(source):
raise nx.NetworkXError("The node %s is not in the graph." % source)
des = set(nx.shortest_path_length(G, source=source).keys()) - set([source])
return des
def ancestors(G, source):
"""Return all nodes having a path to `source` in G.
Parameters
----------
G : NetworkX DiGraph
source : node in G
Returns
-------
ancestors : set()
The ancestors of source in G
"""
if not G.has_node(source):
raise nx.NetworkXError("The node %s is not in the graph." % source)
anc = set(nx.shortest_path_length(G, target=source).keys()) - set([source])
return anc
def is_directed_acyclic_graph(G):
"""Return True if the graph G is a directed acyclic graph (DAG) or
False if not.
Parameters
----------
G : NetworkX graph
A graph
Returns
-------
is_dag : bool
True if G is a DAG, false otherwise
"""
if not G.is_directed():
return False
try:
topological_sort(G, reverse=True)
return True
except nx.NetworkXUnfeasible:
return False
def topological_sort(G, nbunch=None, reverse=False):
"""Return a list of nodes in topological sort order.
A topological sort is a nonunique permutation of the nodes
such that an edge from u to v implies that u appears before v in the
topological sort order.
Parameters
----------
G : NetworkX digraph
A directed graph
nbunch : container of nodes (optional)
Explore graph in specified order given in nbunch
reverse : bool, optional
Return postorder instead of preorder if True.
Reverse mode is a bit more efficient.
Raises
------
NetworkXError
Topological sort is defined for directed graphs only. If the
graph G is undirected, a NetworkXError is raised.
NetworkXUnfeasible
If G is not a directed acyclic graph (DAG) no topological sort
exists and a NetworkXUnfeasible exception is raised.
Notes
-----
This algorithm is based on a description and proof in
The Algorithm Design Manual [1]_ .
See also
--------
is_directed_acyclic_graph
References
----------
.. [1] Skiena, S. S. The Algorithm Design Manual (Springer-Verlag, 1998).
http://www.amazon.com/exec/obidos/ASIN/0387948600/ref=ase_thealgorithmrepo/
"""
if not G.is_directed():
raise nx.NetworkXError(
"Topological sort not defined on undirected graphs.")
# nonrecursive version
seen = set()
order = []
explored = set()
if nbunch is None:
nbunch = G.nodes()
for v in nbunch: # process all vertices in G
if v in explored:
continue
fringe = [v] # nodes yet to look at
while fringe:
w = fringe[-1] # depth first search
if w in explored: # already looked down this branch
fringe.pop()
continue
seen.add(w) # mark as seen
# Check successors for cycles and for new nodes
new_nodes = []
for n in G[w]:
if n not in explored:
if n in seen: # CYCLE !!
raise nx.NetworkXUnfeasible("Graph contains a cycle.")
new_nodes.append(n)
if new_nodes: # Add new_nodes to fringe
fringe.extend(new_nodes)
else: # No new nodes so w is fully explored
explored.add(w)
order.append(w)
fringe.pop() # done considering this node
if reverse:
return order
else:
return list(reversed(order))
def topological_sort_recursive(G, nbunch=None, reverse=False):
"""Return a list of nodes in topological sort order.
A topological sort is a nonunique permutation of the nodes such
that an edge from u to v implies that u appears before v in the
topological sort order.
Parameters
----------
G : NetworkX digraph
nbunch : container of nodes (optional)
Explore graph in specified order given in nbunch
reverse : bool, optional
Return postorder instead of preorder if True.
Reverse mode is a bit more efficient.
Raises
------
NetworkXError
Topological sort is defined for directed graphs only. If the
graph G is undirected, a NetworkXError is raised.
NetworkXUnfeasible
If G is not a directed acyclic graph (DAG) no topological sort
exists and a NetworkXUnfeasible exception is raised.
Notes
-----
This is a recursive version of topological sort.
See also
--------
topological_sort
is_directed_acyclic_graph
"""
if not G.is_directed():
raise nx.NetworkXError(
"Topological sort not defined on undirected graphs.")
def _dfs(v):
ancestors.add(v)
for w in G[v]:
if w in ancestors:
raise nx.NetworkXUnfeasible("Graph contains a cycle.")
if w not in explored:
_dfs(w)
ancestors.remove(v)
explored.add(v)
order.append(v)
ancestors = set()
explored = set()
order = []
if nbunch is None:
nbunch = G.nodes()
for v in nbunch:
if v not in explored:
_dfs(v)
if reverse:
return order
else:
return list(reversed(order))
def is_aperiodic(G):
"""Return True if G is aperiodic.
A directed graph is aperiodic if there is no integer k > 1 that
divides the length of every cycle in the graph.
Parameters
----------
G : NetworkX DiGraph
Graph
Returns
-------
aperiodic : boolean
True if the graph is aperiodic False otherwise
Raises
------
NetworkXError
If G is not directed
Notes
-----
This uses the method outlined in [1]_, which runs in O(m) time
given m edges in G. Note that a graph is not aperiodic if it is
acyclic as every integer trivial divides length 0 cycles.
References
----------
.. [1] Jarvis, J. P.; Shier, D. R. (1996),
Graph-theoretic analysis of finite Markov chains,
in Shier, D. R.; Wallenius, K. T., Applied Mathematical Modeling:
A Multidisciplinary Approach, CRC Press.
"""
if not G.is_directed():
raise nx.NetworkXError(
"is_aperiodic not defined for undirected graphs")
s = arbitrary_element(G)
levels = {s: 0}
this_level = [s]
g = 0
l = 1
while this_level:
next_level = []
for u in this_level:
for v in G[u]:
if v in levels: # Non-Tree Edge
g = gcd(g, levels[u] - levels[v] + 1)
else: # Tree Edge
next_level.append(v)
levels[v] = l
this_level = next_level
l += 1
if len(levels) == len(G): # All nodes in tree
return g == 1
else:
return g == 1 and nx.is_aperiodic(G.subgraph(set(G) - set(levels)))
@not_implemented_for('undirected')
def transitive_closure(G):
""" Returns transitive closure of a directed graph
The transitive closure of G = (V,E) is a graph G+ = (V,E+) such that
for all v,w in V there is an edge (v,w) in E+ if and only if there
is a non-null path from v to w in G.
Parameters
----------
G : NetworkX DiGraph
Graph
Returns
-------
TC : NetworkX DiGraph
Graph
Raises
------
NetworkXNotImplemented
If G is not directed
References
----------
.. [1] http://www.ics.uci.edu/~eppstein/PADS/PartialOrder.py
"""
TC = nx.DiGraph()
TC.add_nodes_from(G.nodes())
TC.add_edges_from(G.edges())
for v in G:
TC.add_edges_from((v, u) for u in nx.dfs_preorder_nodes(G, source=v)
if v != u)
return TC
@not_implemented_for('undirected')
def antichains(G):
"""Generates antichains from a DAG.
An antichain is a subset of a partially ordered set such that any
two elements in the subset are incomparable.
Parameters
----------
G : NetworkX DiGraph
Graph
Returns
-------
antichain : generator object
Raises
------
NetworkXNotImplemented
If G is not directed
NetworkXUnfeasible
If G contains a cycle
Notes
-----
This function was originally developed by Peter Jipsen and Franco Saliola
for the SAGE project. It's included in NetworkX with permission from the
authors. Original SAGE code at:
https://sage.informatik.uni-goettingen.de/src/combinat/posets/hasse_diagram.py
References
----------
.. [1] Free Lattices, by R. Freese, J. Jezek and J. B. Nation,
AMS, Vol 42, 1995, p. 226.
"""
TC = nx.transitive_closure(G)
antichains_stacks = [([], nx.topological_sort(G, reverse=True))]
while antichains_stacks:
(antichain, stack) = antichains_stacks.pop()
# Invariant:
# - the elements of antichain are independent
# - the elements of stack are independent from those of antichain
yield antichain
while stack:
x = stack.pop()
new_antichain = antichain + [x]
new_stack = [
t for t in stack if not ((t in TC[x]) or (x in TC[t]))]
antichains_stacks.append((new_antichain, new_stack))
@not_implemented_for('undirected')
def dag_longest_path(G, weight='weight', default_weight=1):
"""Returns the longest path in a DAG
If G has edges with 'weight' attribute the edge data are used as weight values.
Parameters
----------
G : NetworkX DiGraph
Graph
weight : string (default 'weight')
Edge data key to use for weight
default_weight : integer (default 1)
The weight of edges that do not have a weight attribute
Returns
-------
path : list
Longest path
Raises
------
NetworkXNotImplemented
If G is not directed
See also
--------
dag_longest_path_length
"""
dist = {} # stores {v : (length, u)}
for v in nx.topological_sort(G):
us = [(dist[u][0] + data.get(weight, default_weight), u)
for u, data in G.pred[v].items()]
# Use the best predecessor if there is one and its distance is non-negative, otherwise terminate.
maxu = max(us) if us else (0, v)
dist[v] = maxu if maxu[0] >= 0 else (0, v)
u = None
v = max(dist, key=dist.get)
path = []
while u != v:
path.append(v)
u = v
v = dist[v][1]
path.reverse()
return path
@not_implemented_for('undirected')
def dag_longest_path_length(G):
"""Returns the longest path length in a DAG
Parameters
----------
G : NetworkX DiGraph
Graph
Returns
-------
path_length : int
Longest path length
Raises
------
NetworkXNotImplemented
If G is not directed
See also
--------
dag_longest_path
"""
path_length = len(nx.dag_longest_path(G)) - 1
return path_length
| |
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from boto.ec2 import networkinterface
import netaddr
import tempest.cloudscaling.thirdparty.scenario.aws_compat.base as aws_base
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest import test
import logging
logging.getLogger('boto').setLevel(logging.CRITICAL)
class VPC_NAT_Scenario(aws_base.BaseAWSTest):
"""
Based on 'VPC with Public and Private Subnets' scenario
(http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Scenario2.html)
Adapted to work with OpenStack with the following differences:
1. DNS is set up via DHCP options to 8.8.8.8 (boto has a bug, see Note).
2. Opened DNS ports (53) in DB and NAT security groups.
3. NAT instance is created with 2 interfaces in different subnets.
4. SourceDestCheck is disabled for second interface of NAT instance.
5. Default route in main route table is set to point to this interface.
As a result, DB instance's default route goes through the second interface
of NAT instance which is in the same subnet as the DB instance.
To allow several private subnets to work through the same NAT, more
secondary interfaces should be added to NAT instance for all of that
subnets, and separate route tables should be created for each of the
subnets.
"""
# NOTE(Alex) At the moment of this test's creation, boto has a bug with
# parsing result of setting up DHCP options. Order of the Key-Values
# returned in OpenStack for the Dictionaries is different from AWS's.
# Potential fix should be done in boto/vpc/dhcpoptions.py:
# DhcpConfigSet can be changed to:
#
# class DhcpConfigSet(dict):
#
# def startElement(self, name, attrs, connection):
# if name == 'valueSet':
# if not hasattr(self, '_value'):
# self._value = DhcpValueSet()
# return self._value
#
# def endElement(self, name, value, connection):
# if name == 'valueSet':
# if hasattr(self, '_name'):
# self[self._name] = self._value
# if name == 'key':
# self._name = value
# if hasattr(self, '_value'):
# self[self._name] = self._value
class Context(object):
vpc = None
internet_gateway = None
web_subnet = None
db_subnet = None
main_route_table = None
custom_route_table = None
web_security_group = None
nat_security_group = None
db_security_group = None
web_instance = None
db_instance = None
nat_instance = None
@classmethod
@test.safe_setup
def setUpClass(cls):
super(VPC_NAT_Scenario, cls).setUpClass()
cls.ctx = cls.Context()
cls.zone = cls.config.boto.aws_zone
cfg = cls.config.cloudscaling
cls.ssh_user = cfg.general_ssh_user_name
cls.vpc_cidr = netaddr.IPNetwork(cfg.vpc_cidr)
cls.web_subnet, cls.db_subnet = cls.vpc_cidr.subnet(
cfg.vpc_subnet_prefix, 2)
cls.test_client_cidr = netaddr.IPNetwork(cfg.test_client_cidr)
cls.image_id = cls._prepare_image_id(cfg.general_image_name)
cls.keypair = cls._prepare_key_pair()
@classmethod
def tearDownClass(cls):
if cls.ctx is not None:
for group in [cls.ctx.web_security_group,
cls.ctx.nat_security_group,
cls.ctx.db_security_group]:
if not group:
continue
try:
cls._revoke_security_group_linked_rules(group)
except Exception:
pass
super(VPC_NAT_Scenario, cls).tearDownClass()
@classmethod
def _revoke_security_group_linked_rules(cls, group):
groups = cls.vpc_client.get_all_security_groups(group_ids=[group.id])
if len(groups) == 0:
return
sg = groups[0]
for rule in sg.rules:
for grant in rule.grants:
if not grant.cidr_ip:
cls.vpc_client.revoke_security_group(
group_id=sg.id,
ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_security_group_group_id=grant.groupId)
for rule in sg.rules_egress:
for grant in rule.grants:
if not grant.cidr_ip:
cls.vpc_client.revoke_security_group_egress(
sg.id,
rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group_id=grant.groupId)
def test_000_create_vpc(self):
"""Create VPC"""
dhcp_opts = self.vpc_client.create_dhcp_options(
domain_name_servers=['8.8.8.8'])
self.assertIsNotNone(dhcp_opts)
self.assertTrue(dhcp_opts.id)
self.addResourceCleanUp(self.vpc_client.delete_dhcp_options,
dhcp_opts.id)
vpc = self.vpc_client.create_vpc(str(self.vpc_cidr))
self.assertIsNotNone(vpc)
self.assertTrue(vpc.id)
self.addResourceCleanUp(self.vpc_client.delete_vpc, vpc.id)
self.assertTrue(self.vpc_client.associate_dhcp_options(dhcp_opts.id,
vpc.id))
self.ctx.vpc = vpc
def test_001_create_internet_gateway(self):
"""Create internet gateway"""
ig = self.vpc_client.create_internet_gateway()
self.assertIsNotNone(ig)
self.assertTrue(ig.id)
self.addResourceCleanUp(self._destroy_internet_gateway, ig)
status = self.vpc_client.attach_internet_gateway(ig.id,
self.ctx.vpc.id)
self.assertTrue(status)
self.ctx.internet_gateway = ig
def test_010_create_subnets(self):
"""Create subnets"""
sn = self.vpc_client.create_subnet(self.ctx.vpc.id,
str(self.web_subnet),
self.zone)
self.assertIsNotNone(sn)
self.assertTrue(sn.id)
self.addResourceCleanUp(self.vpc_client.delete_subnet, sn.id)
self.ctx.web_subnet = sn
sn = self.vpc_client.create_subnet(self.ctx.vpc.id,
str(self.db_subnet),
self.zone)
self.assertIsNotNone(sn)
self.assertTrue(sn.id)
self.addResourceCleanUp(self.vpc_client.delete_subnet, sn.id)
self.ctx.db_subnet = sn
def test_020_get_main_route_table(self):
"""Describe auto created route table"""
rtables = self.vpc_client.get_all_route_tables(
filters=[("vpc-id", self.ctx.vpc.id)])
self.assertIsNotNone(rtables)
self.assertEqual(1, len(rtables))
self.ctx.main_route_table = rtables[0]
def test_025_create_custom_route_table(self):
"""Create route table for web servers"""
rtable = self.vpc_client.create_route_table(self.ctx.vpc.id)
self.assertIsNotNone(rtable)
self.assertTrue(rtable.id)
self.addResourceCleanUp(self.vpc_client.delete_route_table, rtable.id)
ig = self.ctx.internet_gateway
status = self.vpc_client.create_route(rtable.id, "0.0.0.0/0",
gateway_id=ig.id)
self.assertTrue(status)
association_id = self.vpc_client.associate_route_table(
rtable.id, self.ctx.web_subnet.id)
self.assertTrue(association_id)
self.addResourceCleanUp(self.vpc_client.disassociate_route_table,
association_id)
self.ctx.custom_route_table = rtable
def test_050_create_security_groups(self):
"""Create and tune security groups"""
sg = self.vpc_client.create_security_group(
data_utils.rand_name("WebServerSG-"),
data_utils.rand_name("description "),
self.ctx.vpc.id)
self.assertIsNotNone(sg)
self.assertTrue(sg.id)
self.addResourceCleanUp(self._destroy_security_group_wait, sg)
self.ctx.web_security_group = sg
sg = self.vpc_client.create_security_group(
data_utils.rand_name("NATSG-"),
data_utils.rand_name("description "),
self.ctx.vpc.id)
self.assertIsNotNone(sg)
self.assertTrue(sg.id)
self.addResourceCleanUp(self._destroy_security_group_wait, sg)
self.ctx.nat_security_group = sg
sg = self.vpc_client.create_security_group(
data_utils.rand_name("DBServerSG-"),
data_utils.rand_name("description "),
self.ctx.vpc.id)
self.assertIsNotNone(sg)
self.assertTrue(sg.id)
self.addResourceCleanUp(self._destroy_security_group_wait, sg)
self.ctx.db_security_group = sg
sg = self.ctx.web_security_group
status = self.vpc_client.revoke_security_group_egress(
sg.id, "-1", cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 1433, 1433,
src_group_id=self.ctx.db_security_group.id)
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 3306, 3306,
src_group_id=self.ctx.db_security_group.id)
self.assertTrue(status)
# NOTE(ft): especially for connectivity test
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 80, 80, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
# NOTE(ft): especially for connectivity test
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 22, 22,
src_group_id=self.ctx.db_security_group.id)
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=80, to_port=80,
cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=443, to_port=443,
cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=22, to_port=22,
cidr_ip=str(self.test_client_cidr))
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=3389,
to_port=3389, cidr_ip=str(self.test_client_cidr))
self.assertTrue(status)
sg = self.ctx.nat_security_group
status = self.vpc_client.revoke_security_group_egress(
sg.id, "-1", cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 80, 80, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 443, 443, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 53, 53, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "udp", 53, 53, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=53,
to_port=53, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="udp", from_port=53,
to_port=53, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=80, to_port=80,
cidr_ip=str(self.db_subnet))
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=443, to_port=443,
cidr_ip=str(self.db_subnet))
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp", from_port=22, to_port=22,
cidr_ip=str(self.test_client_cidr))
self.assertTrue(status)
sg = self.ctx.db_security_group
status = self.vpc_client.revoke_security_group_egress(
sg.id, "-1", cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 80, 80, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 443, 443, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "tcp", 53, 53, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group_egress(
sg.id, "udp", 53, 53, cidr_ip="0.0.0.0/0")
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp",
from_port=1433,
to_port=1433,
src_security_group_group_id=self.ctx.web_security_group.id)
self.assertTrue(status)
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp",
from_port=3306,
to_port=3306,
src_security_group_group_id=self.ctx.web_security_group.id)
self.assertTrue(status)
# NOTE(ft): especially for connectivity test
status = self.vpc_client.authorize_security_group(
group_id=sg.id, ip_protocol="tcp",
from_port=22,
to_port=22,
src_security_group_group_id=self.ctx.web_security_group.id)
self.assertTrue(status)
def test_100_launch_nat_instance(self):
"""Launch instances for NAT server"""
interface_web = networkinterface.NetworkInterfaceSpecification(
subnet_id=self.ctx.web_subnet.id,
groups=[self.ctx.nat_security_group.id])
interface_db = networkinterface.NetworkInterfaceSpecification(
subnet_id=self.ctx.db_subnet.id,
groups=[self.ctx.nat_security_group.id])
reservation = self.vpc_client.run_instances(
self.image_id,
key_name=self.keypair.name,
# security_group_ids=[self.ctx.nat_security_group.id],
instance_type=self.instance_type,
placement=self.zone,
# subnet_id=self.ctx.web_subnet.id
network_interfaces=(
networkinterface.NetworkInterfaceCollection(
interface_web, interface_db))
)
self.assertIsNotNone(reservation)
self.addResourceCleanUp(self.destroy_reservation, reservation)
self.assertEqual(1, len(reservation.instances))
instance = reservation.instances[0]
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
instance.ip_address = self._prepare_public_ip(
instance,
instance.interfaces[0].id)
internal_interface_id = instance.interfaces[1].id
status = self.vpc_client.modify_network_interface_attribute(
internal_interface_id,
attr='sourceDestCheck',
value=False)
self.assertTrue(status)
rtable = self.ctx.main_route_table
status = self.vpc_client.create_route(
rtable.id, "0.0.0.0/0",
interface_id=internal_interface_id)
self.assertTrue(status)
self.ctx.nat_instance = instance
def test_101_launch_instances(self):
"""Launch instances for web server and db server"""
reservation = self.vpc_client.run_instances(
self.image_id,
key_name=self.keypair.name,
security_group_ids=[self.ctx.web_security_group.id],
instance_type=self.instance_type,
placement=self.zone,
subnet_id=self.ctx.web_subnet.id)
self.assertIsNotNone(reservation)
self.addResourceCleanUp(self.destroy_reservation, reservation)
self.assertEqual(1, len(reservation.instances))
instance = reservation.instances[0]
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
instance.ip_address = self._prepare_public_ip(instance)
self.ctx.web_instance = instance
reservation = self.vpc_client.run_instances(
self.image_id,
key_name=self.keypair.name,
security_group_ids=[self.ctx.db_security_group.id],
instance_type=self.instance_type,
placement=self.zone,
subnet_id=self.ctx.db_subnet.id)
self.assertIsNotNone(reservation)
self.addResourceCleanUp(self.destroy_reservation, reservation)
self.assertEqual(1, len(reservation.instances))
instance = reservation.instances[0]
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
self.ctx.db_instance = instance
def test_102_tune_nat_instance(self):
"""Tune NAT in NAT instance"""
instance = self.ctx.nat_instance
address = instance.ip_address
ssh = remote_client.RemoteClient(address,
self.ssh_user,
pkey=self.keypair.material)
ssh.exec_command("sudo iptables -t nat -A POSTROUTING -s %s "
"-o eth0 -j MASQUERADE" % str(self.vpc_cidr))
ssh.exec_command("sudo sysctl -w net.ipv4.ip_forward=1")
ssh.exec_command("echo $'auto eth1\niface eth1 inet dhcp\n' "
"| sudo tee -a /etc/network/interfaces.d/eth1.cfg")
ssh.exec_command("sudo ifup eth1")
def test_200_check_connectivity(self):
"""Check inside and outside connectivities"""
web_ip = self.ctx.web_instance.ip_address
db_ip = self.ctx.db_instance.private_ip_address
ssh = remote_client.RemoteClient(web_ip,
self.ssh_user,
pkey=self.keypair.material)
ssh_conn = ssh.ssh_client._get_ssh_connection()
sftp = ssh_conn.open_sftp()
fr = sftp.file("key.pem", 'wb')
fr.set_pipelined(True)
fr.write(self.keypair.material)
fr.close()
ssh_conn.close()
ssh.exec_command('chmod 400 key.pem')
ssh.exec_command(
"ssh -i key.pem -o UserKnownHostsFile=/dev/null "
"-o StrictHostKeyChecking=no %(user)s@%(ip)s "
"curl -s http://google.com" %
{"user": self.ssh_user, "ip": db_ip})
| |
# Copyright 2015-2016 Dietrich Epp.
#
# This file is part of Kitten Teleporter. The Kitten Teleporter source
# code is distributed under the terms of the MIT license.
# See LICENSE.txt for details.
import base64
import collections
import hashlib
import json
import os
import pipes
import subprocess
import sys
class BuildFailure(Exception):
pass
def nbin(name):
"""Get the path to a binary installed with NPM."""
return os.path.join('./node_modules/.bin', name)
def all_files(root, *, exts=None):
"""List all files below the given root."""
if exts is not None:
if not exts:
return
exts = set(exts)
for dirpath, dirnames, filenames in os.walk(root):
dirnames[:] = [x for x in dirnames
if not x.startswith('.')]
for filename in filenames:
if filename.startswith('.'):
continue
if exts:
ext = os.path.splitext(filename)[1]
if ext not in exts:
continue
yield os.path.join(dirpath, filename)
def latest_mtime(files):
"""Get the latest modification timestamp of the given files."""
mtime = -1
for file in files:
mtime = max(mtime, os.stat(file).st_mtime)
return mtime
def format_cmd(cmd, *, cwd=None):
parts = []
if cwd is not None:
parts.append('cd {};'.format(pipes.quote(cwd)))
parts.append(pipes.quote(os.path.basename(cmd[0])))
for arg in cmd[1:]:
parts.append(pipes.quote(arg))
return ' '.join(parts)
def run_cmd(cmd, *, cwd=None):
"""Run a simple command."""
print(' ' + format_cmd(cmd, cwd=cwd), file=sys.stderr)
proc = subprocess.Popen(cmd, cwd=cwd)
proc.wait()
if proc.returncode != 0:
raise BuildFailure('Command failed: {}'.format(cmd[0]))
def run_pipe(cmd, data=None, *, cwd=None):
"""Pipe data through a single command."""
print(' ' + format_cmd(cmd, cwd=cwd), file=sys.stderr)
proc = subprocess.Popen(
cmd,
stdin=None if data is None else subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=None)
stdout, stderr = proc.communicate(data)
if proc.returncode != 0:
raise BuildFailure('Command failed: {}'.format(cmd[0]))
return stdout
CachedFile = collections.namedtuple('CachedFile', 'path fhash key intermediate')
def sort_key(path):
base, ext = os.path.splitext(path)
return ext, base
class BuildSystem(object):
"""The application build system."""
__slots__ = [
# Map of all build files.
'cache',
# Version of most recent build.
'version',
]
def __init__(self):
self.cache = {}
self.version = None
def copy(self, path, src, *, bust=False):
"""Copy a file and return the path."""
def get_data():
with open(src, 'rb') as fp:
return fp.read()
return self.build(path, get_data, deps=[src], bust=bust)
def write(self, path, data, *, bust=False):
"""Write data and return the path."""
return self.build(path, lambda x: x, args=[data], bust=bust)
def build(self, path, builder, *,
deps=[], args=(), kw={}, bust=False, intermediate=False):
"""Build a file and return the corrected path."""
mtime = latest_mtime(deps)
key = mtime, args, kw
try:
cached = self.cache[path]
except KeyError:
cached = None
else:
if key == cached.key:
return cached.path
print('Rebuilding {}'.format(path), file=sys.stderr)
data = builder(*args, **kw)
obj = hashlib.new('SHA256')
obj.update(data)
fhash = obj.digest()
if cached is not None and cached.fhash == fhash:
return cached.path
dirname, basename = os.path.split(path)
if bust:
out_name = '{0[0]}.{1}{0[1]}'.format(
os.path.splitext(basename),
base64.b16encode(fhash)[:8].lower().decode('UTF-8'))
out_path = os.path.join(dirname, out_name)
else:
out_path = path
cached = CachedFile(out_path, fhash, key, intermediate)
if dirname:
os.makedirs(dirname, exist_ok=True)
with open(out_path, 'wb') as fp:
fp.write(data)
self.cache[path] = cached
return out_path
def build_module(self, path, name, builder, *, intermediate=False):
"""Build a file from an NPM module."""
with open(os.path.join('node_modules', name, 'package.json')) as fp:
data = json.load(fp)
version = data['version']
dirname, basename = os.path.split(path)
out_name = '{0[0]}-{1}{0[1]}'.format(
os.path.splitext(basename), version)
out_path = os.path.join(dirname, out_name)
if not os.path.isfile(out_path):
data = builder()
if dirname:
os.makedirs(dirname, exist_ok=True)
with open(out_path, 'wb') as fp:
fp.write(data)
self.cache[out_path] = CachedFile(out_path, None, None, intermediate)
return out_path
def files(self, root):
"""Get a list of all files in the build system below the given root."""
if root and not root.endswith('/'):
root += '/'
files = []
for c in self.cache.values():
if not c.intermediate and c.path.startswith(root):
files.append(c.path[len(root):])
return files
def package(self, out_path, root):
"""Create a package for all of the files below the given root"""
files = self.files(root)
if not files:
raise BuildFailure('No files')
files.sort(key=sort_key)
try:
with open(out_path, 'wb') as fp:
subprocess.check_call(['tar', 'cvz'] + files,
stdout=fp, cwd=root)
except:
try:
os.unlink(out_path)
except FileNotFoundError:
pass
raise
def mark_intermediate(self, paths):
for path in paths:
self.cache[path] = self.cache[path]._replace(intermediate=True)
def compile_ts(config, tsconfig):
"""Compile TypeScript files."""
cmd = [nbin('tsc'), '-p', tsconfig]
run_cmd(cmd)
def browserify(config, output, modules, env):
"""Bundle a JavaScript application using browserify."""
dirname, outname = os.path.split(output)
dirname = dirname or '.'
cmd = [
os.path.abspath(nbin('browserify')),
'-o', outname,
]
if config.debug:
cmd.append('--debug')
if env:
cmd.extend(('-t', '[', 'envify'))
for key, value in env.items():
cmd.extend(('--' + key.upper(), value))
cmd.append(']')
cmd.extend(os.path.relpath(module, dirname) for module in modules)
run_cmd(cmd, cwd=dirname)
if config.debug:
cmd = [
nbin('sorcery'),
'--datauri',
'--input', output
]
run_cmd(cmd)
def minify_js(config, data):
"""Minify a JavaScript document."""
if config.debug:
return data
cmd = [
nbin('uglifyjs'),
'--mangle',
'--compress',
]
return run_pipe(cmd, data)
def minify_css(config, data):
"""Minify a CSS document."""
if config.debug:
return data
cmd = [
nbin('cleancss'),
]
return run_pipe(cmd, data)
def minify_html(config, data):
"""Minify an HTML document."""
if config.debug:
return data
cmd = [
nbin('html-minifier'),
'--collapse-whitespace',
]
return run_pipe(cmd, data)
def minify_json(config, path):
"""Minify a JSON document."""
with open(path) as fp:
obj = json.load(fp)
obj = json.dumps(obj, separators=(',', ':'), sort_keys=True)
return obj.encode('UTF-8')
def dump_json(config, obj, *, pretty=False):
"""Dump JSON in the configured format.
The indent parameter should be true or false.
"""
if config.debug:
indent = 2
separators = ', ', ': '
else:
indent = None
separators = ',', ':'
s = json.dumps(obj, indent=indent, separators=separators, sort_keys=True)
return s.encode('UTF-8')
| |
"""
unmatcher :: Regular expression reverser for Python
"""
__version__ = "0.1.4-dev"
__author__ = "Karol Kuczmarski"
__license__ = "Simplified BSD"
import random
import re
import string
import sys
# Python 2/3 compatibility shims
IS_PY3 = sys.version[0] == '3'
if IS_PY3:
imap = map
unichr = chr
xrange = range
else:
from itertools import imap
__all__ = ['reverse', 'ReversalError']
def reverse(pattern, *args, **kwargs):
"""Reverse the regular expression, returning a string that would match it.
:param pattern: Regular expression pattern, either compiled one or a string
Additional arguments (positional and keyword) will be used to supply
predefined string matches for capture groups present in the ``pattern``.
:return: String that matches ``pattern``
"""
if is_string(pattern):
flags = None
else:
# assuming regex object
flags = pattern.flags
pattern = pattern.pattern
sre_subpattern = re.sre_parse.parse(pattern)
# use positional and keyword arguments, if any, to build the initial array
# of capture group values that will be used by the reverser
groupvals = kwargs or {}
for i, value in enumerate(args, 1):
if i in groupvals:
raise ReversalError(
pattern,
"reverse() got multiple values for capture group '%s'" % i)
groupvals[i] = value
try:
groups = resolve_groupvals(sre_subpattern.pattern, groupvals)
except ValueError as e:
raise ReversalError(pattern, str(e))
# perform the reversal using the expression's AST and capture group values
reversal = Reversal(sre_subpattern.data, flags=flags, groups=groups,
string_class=type(pattern))
try:
return reversal.perform()
except ValueError as e:
raise ReversalError(pattern, str(e))
class ReversalError(ValueError):
"""Exception raised when an error has occurred
while reversing a regular expression.
"""
def __init__(self, pattern, message=None):
self.pattern = pattern
if message is None:
message = "unknown error while reversing pattern: %s" % (pattern,)
super(ReversalError, self).__init__(message)
# Implementation
is_string = lambda x: isinstance(x, (str if IS_PY3 else basestring))
def resolve_groupvals(sre_pattern, groupvals):
"""Resolve a dictionary of capture group values (mapped from either
their names or indices), returning an array of those values ("mapped" only
from capture groups indices).
:param sre_pattern: A ``sre_parse.Pattern`` object
:param groupvals: Dictionary mapping capture group names **or** indices
into string values for those groups
"""
group_count = sre_pattern.groups
names2indices = sre_pattern.groupdict
groups = [None] * group_count
for ref, value in groupvals.items():
try:
index = names2indices[ref] if is_string(ref) else ref
groups[index] = value
except (LookupError, TypeError):
raise ValueError("invalid capture group reference: %s" % ref)
return groups
class Reversal(object):
"""Encapsulates the reversal process of a single regular expression."""
# TODO: choose among Unicode characters if using Unicode
BUILTIN_CHARSETS = {
'word': string.ascii_letters + string.digits + '_',
'digit': string.digits,
'space': string.whitespace,
}
MAX_REPEAT = 64
def __init__(self, regex_ast, flags=None, groups=None, string_class=None):
"""Constructor.
Use keywords to pass arguments other than ``regex_ast``.
"""
self.regex_ast = regex_ast
self.flags = flags or 0
self.groups = groups or [None]
# use correct string class depending on Python version or argument
if string_class is None:
string_class = str if IS_PY3 else unicode
self._str = string_class
self._chr = unichr if string_class.__name__ == 'unicode' else chr
def perform(self):
return self._reverse_nodes(self.regex_ast)
# Reversing regex AST nodes
def _reverse_nodes(self, nodes):
"""Generates string matching given sequence of nodes
from regular expressions' abstract syntax tree (AST).
"""
return self._str().join(imap(self._reverse_node, nodes))
def _reverse_node(self, node):
"""Generates string matching given node from regular expression AST."""
type_, data = node
if type_ == re.sre_parse.LITERAL:
return self._reverse_literal_node(data)
if type_ == re.sre_parse.NOT_LITERAL:
return self._reverse_not_literal_node(data)
if type_ == re.sre_parse.ANY:
return random.choice(self._charset('any'))
if type_ == re.sre_parse.IN:
return self._reverse_in_node(data)
if type_ == re.sre_parse.BRANCH:
return self._reverse_branch_node(data)
if type_ in (re.sre_parse.MIN_REPEAT, re.sre_parse.MAX_REPEAT):
return self._reverse_repeat_node(data)
if type_ == re.sre_parse.SUBPATTERN:
return self._reverse_subpattern_node(data)
if type_ == re.sre_parse.GROUPREF:
return self._reverse_groupref_node(data)
if type_ == re.sre_parse.GROUPREF_EXISTS:
return self._reverse_groupref_exists_node(data)
if type_ in (re.sre_parse.ASSERT, re.sre_parse.ASSERT_NOT):
# TODO: see whether these are in any way relevant
# to string generation and support them if so
raise NotImplementedError(
"lookahead/behind assertion are not supported")
if type_ == re.sre_parse.AT:
# match-beginning (^) or match-end ($);
# irrelevant for string generation
return ''
raise NotImplementedError(
"unsupported regular expression element: %s" % type_)
def _reverse_literal_node(self, node_data):
"""Generates string matching the ``sre_parse.LITERAL`` node
from regexp. AST.
This node matches a literal character, a behavior which may optionally
be modified by certain regular expressions flags.
"""
char = self._chr(node_data)
if self.flags & re.IGNORECASE:
case_func = random.choice((self._str.lower, self._str.upper))
char = case_func(char)
return char
def _reverse_not_literal_node(self, node_data):
"""Generates string matching the ``sre_parse.NOT_LITERAL`` node
from regexp. AST.
This node matches characters *except* for given one, which corresponds
to ``[^X]`` syntax, where ``X`` is a character.
"""
excluded = self._chr(node_data)
if self.flags & re.IGNORECASE:
excluded = (excluded.lower(), excluded.upper())
return random.choice(self._negate(excluded))
def _reverse_in_node(self, node_data):
"""Generates string matching the ``sre_parse.IN`` node
from regular expr. AST.
This node matches a specified set of characters. Typically,
it is expressed using the ``[...]`` notation, but it can also arise
from simple uses of ``|`` operator, where all branches match
just one, literal character (e.g. ``a|b|c``).
"""
negate = str(node_data[0][0]).lower() == 'negate'
if negate:
node_data = node_data[1:]
charset = set()
for type_, data in node_data:
if type_ == re.sre_parse.LITERAL:
charset.add(self._chr(data))
elif type_ == re.sre_parse.RANGE:
min_char, max_char = data
charset.update(imap(self._chr, xrange(min_char, max_char + 1)))
elif type_ == re.sre_parse.CATEGORY:
data = str(data).lower() # for Python 3.5+
_, what = data.rsplit('_', 1) # category(_not)?_(digit|etc.)
category_chars = self._charset(what)
if '_not_' in data:
category_chars = self._negate(category_chars)
charset.update(category_chars)
else:
raise ValueError("invalid charset alternative: %s" % type_)
if negate:
charset = self._negate(charset)
return random.choice(list(charset))
def _reverse_repeat_node(self, node_data):
"""Generates string matching ``sre_parse.MIN_REPEAT``
or ``sre_parse.MAX_REPEAT`` node from regular expression AST.
This node matches a repetition of pattern matched by its child node.
"""
# ``[what]`` is always a 1-element list due to quirk in ``sre_parse``;
# for reference, see `sre_parse.py` (lines 503-514) in Python's stdlib
min_count, max_count, [what] = node_data
max_count = min(max_count, self.MAX_REPEAT)
count = random.randint(min_count, max_count)
return self._reverse_nodes([what] * count)
def _reverse_branch_node(self, node_data):
"""Generates string matching the ``sre_parse.BRANCH`` node
in regular expr. AST.
This node is similar to 'in', in a sense that it's also an alternative
between several variants. However, each variant here can consist
of more then one node.
"""
# first value is always ``None`` due to quirk in ``sre_parse`` module;
# for reference, see `sre_parse.py` (line 357) in Python's stdlib
_, variants = node_data
nodes = random.choice(variants)
return self._reverse_nodes(nodes)
def _reverse_subpattern_node(self, node_data):
"""Generates string matching the ``sre_parse.SUBPATTERN`` node
in regular expr. AST.
This node corresponds to parenthesised group inside the expression.
If this is a capture group, the reversed result is memorized,
so that it can be used when referring back to the capture through
``\1``, etc.
"""
index = node_data[0]
nodes = node_data[-1]
if index is None:
return self._reverse_nodes(nodes) # non-capture group
result = self.groups[index]
if result is None:
result = self.groups[index] = self._reverse_nodes(nodes)
return result
def _reverse_groupref_node(self, node_data):
"""Generates string matching the ``sre_parse.GROUPREF`` node
in regular expr. AST.
This node is a backreference to previously matched capture group.
"""
# AST always refers to capture groups by index,
# and detects circular/forward references at parse time,
# so handling of this node can be indeed very simple
index = node_data
return self.groups[index]
def _reverse_groupref_exists_node(self, node_data):
"""Generates string matching the ``sre_parse.GROUPREF_EXISTS`` node
in regexp. AST.
This node is a conditional test for one of the previously matched
capture groups. Depending on whether group was matched or not,
different subexpressions are matched next.
"""
index, yes_pattern, no_pattern = node_data
if self.groups[index] is not None:
return self._reverse_nodes(yes_pattern)
else:
return self._reverse_nodes(no_pattern) if no_pattern else ""
# Handling character sets
def _charset(self, name, flags=None):
"""Return chars belonging to charset of given name.
:param flags: Optional flags override
"""
# FIXME: take re.LOCALE and re.UNICODE flags into account
flags = self.flags if flags is None else flags
if name == 'any':
all_chars = string.printable
if not (flags & re.DOTALL):
all_chars = all_chars.replace("\n", "")
return all_chars
if name in self.BUILTIN_CHARSETS:
return self.BUILTIN_CHARSETS[name]
raise ValueError("invalid charset name '%s'" % name)
def _negate(self, charset):
"""Returns negated version of given charset."""
all_chars = self._charset('any')
return list(set(all_chars) - set(charset))
| |
import re
import sys
from decimal import Decimal
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.contrib.gis.db.backends.utils import SpatialOperation, SpatialFunction
from django.contrib.gis.db.backends.spatialite.adapter import SpatiaLiteAdapter
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.base import DatabaseOperations
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.functional import cached_property
class SpatiaLiteOperator(SpatialOperation):
"For SpatiaLite operators (e.g. `&&`, `~`)."
def __init__(self, operator):
super(SpatiaLiteOperator, self).__init__(operator=operator)
class SpatiaLiteFunction(SpatialFunction):
"For SpatiaLite function calls."
def __init__(self, function, **kwargs):
super(SpatiaLiteFunction, self).__init__(function, **kwargs)
class SpatiaLiteFunctionParam(SpatiaLiteFunction):
"For SpatiaLite functions that take another parameter."
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s)'
class SpatiaLiteDistance(SpatiaLiteFunction):
"For SpatiaLite distance operations."
dist_func = 'Distance'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s) %(operator)s %%s'
def __init__(self, operator):
super(SpatiaLiteDistance, self).__init__(self.dist_func,
operator=operator)
class SpatiaLiteRelate(SpatiaLiteFunctionParam):
"For SpatiaLite Relate(<geom>, <pattern>) calls."
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def __init__(self, pattern):
if not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
super(SpatiaLiteRelate, self).__init__('Relate')
# Valid distance types and substitutions
dtypes = (Decimal, Distance, float) + six.integer_types
def get_dist_ops(operator):
"Returns operations for regular distances; spherical distances are not currently supported."
return (SpatiaLiteDistance(operator),)
class SpatiaLiteOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = 'django.contrib.gis.db.models.sql.compiler'
name = 'spatialite'
spatialite = True
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
valid_aggregates = {'Extent', 'Union'}
Adapter = SpatiaLiteAdapter
Adaptor = Adapter # Backwards-compatibility alias.
area = 'Area'
centroid = 'Centroid'
contained = 'MbrWithin'
difference = 'Difference'
distance = 'Distance'
envelope = 'Envelope'
intersection = 'Intersection'
length = 'GLength' # OpenGis defines Length, but this conflicts with an SQLite reserved keyword
num_geom = 'NumGeometries'
num_points = 'NumPoints'
point_on_surface = 'PointOnSurface'
scale = 'ScaleCoords'
svg = 'AsSVG'
sym_difference = 'SymDifference'
transform = 'Transform'
translate = 'ShiftCoords'
union = 'GUnion' # OpenGis defines Union, but this conflicts with an SQLite reserved keyword
unionagg = 'GUnion'
from_text = 'GeomFromText'
from_wkb = 'GeomFromWKB'
select = 'AsText(%s)'
geometry_functions = {
'equals': SpatiaLiteFunction('Equals'),
'disjoint': SpatiaLiteFunction('Disjoint'),
'touches': SpatiaLiteFunction('Touches'),
'crosses': SpatiaLiteFunction('Crosses'),
'within': SpatiaLiteFunction('Within'),
'overlaps': SpatiaLiteFunction('Overlaps'),
'contains': SpatiaLiteFunction('Contains'),
'intersects': SpatiaLiteFunction('Intersects'),
'relate': (SpatiaLiteRelate, six.string_types),
# Returns true if B's bounding box completely contains A's bounding box.
'contained': SpatiaLiteFunction('MbrWithin'),
# Returns true if A's bounding box completely contains B's bounding box.
'bbcontains': SpatiaLiteFunction('MbrContains'),
# Returns true if A's bounding box overlaps B's bounding box.
'bboverlaps': SpatiaLiteFunction('MbrOverlaps'),
# These are implemented here as synonyms for Equals
'same_as': SpatiaLiteFunction('Equals'),
'exact': SpatiaLiteFunction('Equals'),
}
distance_functions = {
'distance_gt': (get_dist_ops('>'), dtypes),
'distance_gte': (get_dist_ops('>='), dtypes),
'distance_lt': (get_dist_ops('<'), dtypes),
'distance_lte': (get_dist_ops('<='), dtypes),
}
geometry_functions.update(distance_functions)
def __init__(self, connection):
super(DatabaseOperations, self).__init__(connection)
# Creating the GIS terms dictionary.
self.gis_terms = set(['isnull'])
self.gis_terms.update(self.geometry_functions)
@cached_property
def spatial_version(self):
"""Determine the version of the SpatiaLite library."""
try:
version = self.spatialite_version_tuple()[1:]
except Exception as msg:
new_msg = (
'Cannot determine the SpatiaLite version for the "%s" '
'database (error was "%s"). Was the SpatiaLite initialization '
'SQL loaded on this database?') % (self.connection.settings_dict['NAME'], msg)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(new_msg), sys.exc_info()[2])
if version < (2, 3, 0):
raise ImproperlyConfigured('GeoDjango only supports SpatiaLite versions '
'2.3.0 and above')
return version
@property
def _version_greater_2_4_0_rc4(self):
if self.spatial_version >= (2, 4, 1):
return True
elif self.spatial_version < (2, 4, 0):
return False
else:
# Spatialite 2.4.0-RC4 added AsGML and AsKML, however both
# RC2 (shipped in popular Debian/Ubuntu packages) and RC4
# report version as '2.4.0', so we fall back to feature detection
try:
self._get_spatialite_func("AsGML(GeomFromText('POINT(1 1)'))")
except DatabaseError:
return False
return True
@cached_property
def gml(self):
return 'AsGML' if self._version_greater_2_4_0_rc4 else None
@cached_property
def kml(self):
return 'AsKML' if self._version_greater_2_4_0_rc4 else None
@cached_property
def geojson(self):
return 'AsGeoJSON' if self.spatial_version >= (3, 0, 0) else None
def check_aggregate_support(self, aggregate):
"""
Checks if the given aggregate name is supported (that is, if it's
in `self.valid_aggregates`).
"""
super(SpatiaLiteOperations, self).check_aggregate_support(aggregate)
agg_name = aggregate.__class__.__name__
return agg_name in self.valid_aggregates
def convert_geom(self, wkt, geo_field):
"""
Converts geometry WKT returned from a SpatiaLite aggregate.
"""
if wkt:
return Geometry(wkt, geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Returns None because geometry columnas are added via the
`AddGeometryColumn` stored procedure on SpatiaLite.
"""
return None
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type. SpatiaLite only supports regular
cartesian-based queries (no spheroid/sphere calculations for point
geometries like PostGIS).
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
raise ValueError('SpatiaLite does not support distance queries on '
'geometry fields with a geodetic coordinate system. '
'Distance objects; use a numeric value of your '
'distance in degrees instead.')
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
return [dist_param]
def get_geom_placeholder(self, f, value):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
Transform() and GeomFromText() function call(s).
"""
def transform_value(value, srid):
return not (value is None or value.srid == srid)
if hasattr(value, 'expression'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitue in
# the column name instead.
return placeholder % self.get_expression_column(value)
else:
if transform_value(value, f.srid):
# Adding Transform() to the SQL placeholder.
return '%s(%s(%%s,%s), %s)' % (self.transform, self.from_text, value.srid, f.srid)
else:
return '%s(%%s,%s)' % (self.from_text, f.srid)
def _get_spatialite_func(self, func):
"""
Helper routine for calling SpatiaLite functions and returning
their result.
Any error occuring in this method should be handled by the caller.
"""
cursor = self.connection._cursor()
try:
cursor.execute('SELECT %s' % func)
row = cursor.fetchone()
finally:
cursor.close()
return row[0]
def geos_version(self):
"Returns the version of GEOS used by SpatiaLite as a string."
return self._get_spatialite_func('geos_version()')
def proj4_version(self):
"Returns the version of the PROJ.4 library used by SpatiaLite."
return self._get_spatialite_func('proj4_version()')
def spatialite_version(self):
"Returns the SpatiaLite library version as a string."
return self._get_spatialite_func('spatialite_version()')
def spatialite_version_tuple(self):
"""
Returns the SpatiaLite version as a tuple (version string, major,
minor, subminor).
"""
# Getting the SpatiaLite version.
try:
version = self.spatialite_version()
except DatabaseError:
# The `spatialite_version` function first appeared in version 2.3.1
# of SpatiaLite, so doing a fallback test for 2.3.0 (which is
# used by popular Debian/Ubuntu packages).
version = None
try:
tmp = self._get_spatialite_func("X(GeomFromText('POINT(1 1)'))")
if tmp == 1.0:
version = '2.3.0'
except DatabaseError:
pass
# If no version string defined, then just re-raise the original
# exception.
if version is None:
raise
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse SpatiaLite version string: %s' % version)
return (version, major, minor1, minor2)
def spatial_aggregate_sql(self, agg):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = agg.__class__.__name__
if not self.check_aggregate_support(agg):
raise NotImplementedError('%s spatial aggregate is not implmented for this backend.' % agg_name)
agg_name = agg_name.lower()
if agg_name == 'union':
agg_name += 'agg'
sql_template = self.select % '%(function)s(%(field)s)'
sql_function = getattr(self, agg_name)
return sql_template, sql_function
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
"""
Returns the SpatiaLite-specific SQL for the given lookup value
[a tuple of (alias, column, db_type)], lookup type, lookup
value, the model field, and the quoting function.
"""
alias, col, db_type = lvalue
# Getting the quoted field as `geo_col`.
geo_col = '%s.%s' % (qn(alias), qn(col))
if lookup_type in self.geometry_functions:
# See if a SpatiaLite geometry function matches the lookup type.
tmp = self.geometry_functions[lookup_type]
# Lookup types that are tuples take tuple arguments, e.g., 'relate' and
# distance lookups.
if isinstance(tmp, tuple):
# First element of tuple is the SpatiaLiteOperation instance, and the
# second element is either the type or a tuple of acceptable types
# that may passed in as further parameters for the lookup type.
op, arg_type = tmp
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, (tuple, list)):
raise ValueError('Tuple required for `%s` lookup type.' % lookup_type)
# Geometry is first element of lookup tuple.
geom = value[0]
# Number of valid tuple parameters depends on the lookup type.
if len(value) != 2:
raise ValueError('Incorrect number of parameters given for `%s` lookup type.' % lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise ValueError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1])))
# For lookup type `relate`, the op instance is not yet created (has
# to be instantiated here to check the pattern parameter).
if lookup_type == 'relate':
op = op(value[1])
elif lookup_type in self.distance_functions:
op = op[0]
else:
op = tmp
geom = value
# Calling the `as_sql` function on the operation instance.
return op.as_sql(geo_col, self.get_geom_placeholder(field, geom))
elif lookup_type == 'isnull':
# Handling 'isnull' lookup type
return "%s IS %sNULL" % (geo_col, ('' if value else 'NOT ')), []
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.spatialite.models import GeometryColumns
return GeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.spatialite.models import SpatialRefSys
return SpatialRefSys
| |
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
from iptest.assert_util import *
skiptest("win32")
from iptest.console_util import IronPythonInstance
remove_ironpython_dlls(testpath.public_testdir)
from sys import executable
from System import Environment
from sys import exec_prefix
extraArgs = ""
if "-X:LightweightScopes" in Environment.GetCommandLineArgs():
extraArgs += " -X:LightweightScopes"
def test_strings():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# String exception
response = ipi.ExecuteLine("raise 'foo'", True)
AreEqual(response.replace("\r\r\n", "\n").replace("\r", ""),
"""Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: exceptions must be classes, or instances, not str""")
# Multi-line string literal
ipi.ExecutePartialLine("\"\"\"Hello")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
AreEqual("'Hello\\n\\n\\nWorld'", ipi.ExecuteLine("World\"\"\""))
ipi.ExecutePartialLine("if False: print 3")
ipi.ExecutePartialLine("else: print 'hello'")
AreEqual(r'hello', ipi.ExecuteLine(""))
# Empty line
AreEqual("", ipi.ExecuteLine(""))
ipi.End()
def test_exceptions():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
response = ipi.ExecuteLine("raise Exception", True)
AreEqual(response,
'''Traceback (most recent call last):
File "<stdin>", line 1, in <module>
Exception'''.replace("\n", "\r\r\n") + "\r")
ipi.End()
def test_exceptions_nested():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("def a(): return b()")
ipi.ExecuteLine("")
ipi.ExecutePartialLine("def b(): return 1/0")
ipi.ExecuteLine("")
response = ipi.ExecuteLine("a()", True)
response = response.replace("\r\r\n", "\n").strip()
Assert(response.startswith('''Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 1, in a
File "<stdin>", line 1, in b
ZeroDivisionError:'''), response)
ipi.End()
###############################################################################
# Test "ipy.exe -i script.py"
def test_interactive_mode():
inputScript = testpath.test_inputs_dir + "\\simpleCommand.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i \"" + inputScript + "\"")
AreEqual(ipi.Start(), True)
ipi.EnsureInteractive()
AreEqual("1", ipi.ExecuteLine("x"))
ipi.End()
inputScript = testpath.test_inputs_dir + "\\raise.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i \"" + inputScript + "\"")
AreEqual(ipi.Start(), True)
ipi.ReadError()
ipi.EnsureInteractive()
AreEqual("1", ipi.ExecuteLine("x"))
ipi.End()
inputScript = testpath.test_inputs_dir + "\\syntaxError.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i \"" + inputScript + "\"")
AreEqual(ipi.Start(), True)
# ipi.EnsureInteractive()
AssertContains(ipi.ExecuteLine("x", True), "NameError")
ipi.End()
inputScript = testpath.test_inputs_dir + "\\exit.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i \"" + inputScript + "\"")
(result, output, output2, exitCode) = ipi.StartAndRunToCompletion()
AreEqual(exitCode, 0)
ipi.End()
# interactive + -c
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i -c x=2")
AreEqual(ipi.Start(), True)
ipi.EnsureInteractive()
Assert(ipi.ExecuteLine("x", True).find("2") != -1)
ipi.End()
###############################################################################
# Test sys.exitfunc
def test_sys_exitfunc():
import clr
inputScript = testpath.test_inputs_dir + "\\exitFuncRuns.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " \"" + inputScript + "\"")
(result, output, output2, exitCode) = ipi.StartAndRunToCompletion()
AreEqual(exitCode, 0)
AreEqual(output.find('hello world') > -1, True)
ipi.End()
args = extraArgs
if clr.GetCurrentRuntime().Configuration.DebugMode:
args = "-D " + args
inputScript = testpath.test_inputs_dir + "\\exitFuncRaises.py"
ipi = IronPythonInstance(executable, exec_prefix, args + " \"" + inputScript + "\"")
(result, output, output2, exitCode) = ipi.StartAndRunToCompletion()
AreEqual(exitCode, 0)
AreEqual(output2.find('Error in sys.exitfunc:') > -1, True)
AreEqual(output2.find('exitFuncRaises.py", line 19, in foo') > -1, True)
ipi.End()
# verify sys.exit(True) and sys.exit(False) return 1 and 0
ipi = IronPythonInstance(executable, exec_prefix, '-c "import sys; sys.exit(False)"')
res = ipi.StartAndRunToCompletion()
AreEqual(res[0], True) # should have started
AreEqual(res[1], '') # no std out
AreEqual(res[2], '') # no std err
AreEqual(res[3], 0) # should return 0
ipi = IronPythonInstance(executable, exec_prefix, '-c "import sys; sys.exit(True)"')
res = ipi.StartAndRunToCompletion()
AreEqual(res[0], True) # should have started
AreEqual(res[1], '') # no std out
AreEqual(res[2], '') # no std err
AreEqual(res[3], 1) # should return 0
# and verify it works at the interactive console as well
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
ipi.ExecuteLine("import sys")
AreEqual(ipi.ExecuteAndExit("sys.exit(False)"), 0)
# and verify it works at the interactive console as well
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
ipi.ExecuteLine("import sys")
AreEqual(ipi.ExecuteAndExit("sys.exit(True)"), 1)
#############################################################################
# verify we need to dedent to a previous valid indentation level
def test_indentation():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("if False:")
ipi.ExecutePartialLine(" print 'hello'")
response = ipi.ExecuteLine(" print 'goodbye'", True)
AreEqual(response.find('IndentationError') > 1, True)
ipi.End()
#############################################################################
# verify we dump exception details
def test_dump_exception():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -X:ExceptionDetail")
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("raise 'goodbye'", True)
AreEqual(response.count("IronPython.Hosting") >= 1, True)
ipi.End()
#############################################################################
# make sure we can enter try/except blocks
def test_try_except():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("try:")
ipi.ExecutePartialLine(" raise Exception('foo')")
ipi.ExecutePartialLine("except Exception, e:")
ipi.ExecutePartialLine(" if e.message=='foo':")
ipi.ExecutePartialLine(" print 'okay'")
response = ipi.ExecuteLine("")
Assert(response.find('okay') > -1)
ipi.End()
###########################################################
# Throw on "complete" incomplete syntax bug #864
def test_incomplate_syntax():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("class K:")
response = ipi.ExecuteLine("", True)
Assert("IndentationError:" in response)
ipi.End()
def test_incomplate_syntax_backslash():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
for i in xrange(4):
for j in xrange(i):
ipi.ExecutePartialLine("\\")
ipi.ExecutePartialLine("1 + \\")
for j in xrange(i):
ipi.ExecutePartialLine("\\")
response = ipi.ExecuteLine("2", True)
Assert("3" in response)
ipi.End()
###########################################################
# if , while, try, for and then EOF.
def test_missing_test():
for x in ['if', 'while', 'for', 'try']:
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine(x, True)
Assert("SyntaxError:" in response)
ipi.End()
##########################################################
# Support multiple-levels of indentation
def test_indentation_levels():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("class K:")
ipi.ExecutePartialLine(" def M(self):")
ipi.ExecutePartialLine(" if 1:")
ipi.ExecutePartialLine(" pass")
response = ipi.ExecuteLine("")
ipi.End()
##########################################################
# Support partial lists
def test_partial_lists():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("[1")
ipi.ExecutePartialLine(" ,")
ipi.ExecutePartialLine(" 2")
response = ipi.ExecuteLine("]")
Assert("[1, 2]" in response)
ipi.ExecutePartialLine("[")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
response = ipi.ExecuteLine("]")
Assert("[]" in response)
ipi.End()
def test_partial_lists_cp3530():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
try:
ipi.ExecutePartialLine("[{'a':None},")
response = ipi.ExecuteLine("]")
Assert("[{'a': None}]" in response, response)
ipi.ExecutePartialLine("[{'a'")
response = ipi.ExecutePartialLine(":None},")
response = ipi.ExecuteLine("]")
Assert("[{'a': None}]" in response, response)
ipi.ExecutePartialLine("[{'a':None},")
ipi.ExecutePartialLine("1,")
response = ipi.ExecuteLine("2]")
Assert("[{'a': None}, 1, 2]" in response, response)
finally:
ipi.End()
##########################################################
# Support partial tuples
def test_partial_tuples():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("(2")
ipi.ExecutePartialLine(" ,")
ipi.ExecutePartialLine(" 3")
response = ipi.ExecuteLine(")")
Assert("(2, 3)" in response)
ipi.ExecutePartialLine("(")
response = ipi.ExecuteLine(")")
Assert("()" in response)
ipi.ExecutePartialLine("'abc %s %s %s %s %s' % (")
ipi.ExecutePartialLine(" 'def'")
ipi.ExecutePartialLine(" ,'qrt',")
ipi.ExecutePartialLine(" 'jkl'")
ipi.ExecutePartialLine(",'jkl'")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine(",")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("'123'")
response = ipi.ExecuteLine(")")
Assert("'abc def qrt jkl jkl 123'" in response)
ipi.ExecutePartialLine("a = (")
ipi.ExecutePartialLine(" 1")
ipi.ExecutePartialLine(" , ")
ipi.ExecuteLine(")")
response = ipi.ExecuteLine("a")
Assert("(1,)" in response)
ipi.ExecutePartialLine("(")
ipi.ExecutePartialLine("'joe'")
ipi.ExecutePartialLine(" ")
ipi.ExecutePartialLine(" #")
ipi.ExecutePartialLine(",")
ipi.ExecutePartialLine("2")
response = ipi.ExecuteLine(")")
Assert("('joe', 2)" in response)
ipi.ExecutePartialLine("(")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
response = ipi.ExecuteLine(")")
Assert("()" in response)
ipi.End()
##########################################################
# Support partial dicts
def test_partial_dicts():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("{2:2")
ipi.ExecutePartialLine(" ,")
ipi.ExecutePartialLine(" 2:2")
response = ipi.ExecuteLine("}")
Assert("{2: 2}" in response)
ipi.ExecutePartialLine("{")
response = ipi.ExecuteLine("}")
Assert("{}" in response)
ipi.ExecutePartialLine("a = {")
ipi.ExecutePartialLine(" None:2")
ipi.ExecutePartialLine(" , ")
ipi.ExecuteLine("}")
response = ipi.ExecuteLine("a")
Assert("{None: 2}" in response)
ipi.ExecutePartialLine("{")
ipi.ExecutePartialLine("'joe'")
ipi.ExecutePartialLine(": ")
ipi.ExecutePartialLine(" 42")
ipi.ExecutePartialLine(",")
ipi.ExecutePartialLine("3:45")
response = ipi.ExecuteLine("}")
Assert(repr({'joe':42, 3:45}) in response)
ipi.ExecutePartialLine("{")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
response = ipi.ExecuteLine("}")
Assert("{}" in response)
ipi.End()
###########################################################
# Some whitespace wackiness
def test_whitespace():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecuteLine(" ")
response = ipi.ExecuteLine("")
ipi.End()
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecuteLine(" ")
response = ipi.ExecuteLine("2")
Assert("2" in response)
ipi.End()
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecuteLine(" ")
response = ipi.ExecuteLine(" 2", True)
Assert("SyntaxError:" in response)
ipi.End()
###########################################################
# test the indentation error in the interactive mode
def test_indentation_interactive():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("class C:pass")
response = ipi.ExecuteLine("")
AreEqual(response, "")
ipi.ExecutePartialLine("class D(C):")
response = ipi.ExecuteLine("", True)
Assert("IndentationError:" in response)
ipi.End()
###########################################################
# test /mta w/ no other args
def test_mta():
ipi = IronPythonInstance(executable, exec_prefix, '-X:MTA')
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("class C:pass")
response = ipi.ExecuteLine("")
AreEqual(response, "")
ipi.ExecutePartialLine("class D(C):")
response = ipi.ExecuteLine("", True)
Assert("IndentationError:" in response)
ipi.End()
###########################################################
# test for comments in interactive input
def test_comments():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("# this is some comment line")
AreEqual(response, "")
response = ipi.ExecuteLine(" # this is some comment line")
AreEqual(response, "")
response = ipi.ExecuteLine("# this is some more comment line")
AreEqual(response, "")
ipi.ExecutePartialLine("if 100:")
ipi.ExecutePartialLine(" print 100")
ipi.ExecutePartialLine("# this is some more comment line inside if")
ipi.ExecutePartialLine("# this is some indented comment line inside if")
ipi.ExecutePartialLine(" print 200")
response = ipi.ExecuteLine("")
AreEqual(response, "100" + newline + "200")
ipi.End()
def test_global_values():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecuteLine("import clr")
response = ipi.ExecuteLine("[x for x in globals().values()]")
Assert(response.startswith('['))
d = eval(ipi.ExecuteLine("globals().fromkeys(['a', 'b'], 'c')"))
AreEqual(d, {'a':'c', 'b':'c'})
def test_globals8961():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("print globals().keys()")
res = set(eval(response))
AreEqual(res, set(['__builtins__', '__name__', '__doc__']))
ipi.ExecuteLine("a = None")
response = ipi.ExecuteLine("print globals().keys()")
res = set(eval(response))
AreEqual(res, set(['__builtins__', '__name__', '__doc__', 'a']))
response = ipi.ExecuteLine("print globals().values()")
l = eval(response.replace("<module '__builtin__' (built-in)>", '"builtin"'))
res = set(l)
AreEqual(len(l), 4)
AreEqual(res, set(['builtin', '__main__', None]))
ipi.ExecuteLine("b = None")
response = ipi.ExecuteLine("print globals().values()")
l = eval(response.replace("<module '__builtin__' (built-in)>", '"builtin"'))
res = set(l)
AreEqual(len(l), 5)
AreEqual(res, set(['builtin', '__main__', None]))
def test_console_input_output():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
input_output = [
("x=100",""),
("x=200\n",""),
("\nx=300",""),
("\nx=400\n",""),
("500","500"),
("600\n\n\n\n\n\n\n\n\n\n\n","600"),
("valid=3;more_valid=4;valid","3"),
("valid=5;more_valid=6;more_valid\n\n\n\n\n","6"),
("valid=7;more_valid=8;#valid",""),
("valid=9;valid;# more_valid\n","9"),
("valid=11;more_valid=12;more_valid# should be valid input\n\n\n\n","12"),
]
for x in input_output:
AreEqual(ipi.Start(), True)
AreEqual(ipi.ExecuteLine(x[0]),x[1])
ipi.End()
# expect a clean exception message/stack from thread
def test_thrown_from_thread():
inputScript = path_combine(testpath.temporary_dir, "throwingfromthread.py")
write_to_file(inputScript, '''
def f(): raise AssertionError, 'hello'
import thread, time
thread.start_new_thread(f, tuple())
time.sleep(2)
''')
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " " + inputScript)
(result, output, output2, exitCode) = ipi.StartAndRunToCompletion()
AreEqual(exitCode, 0)
Assert("AssertionError: hello" in output2)
Assert("IronPython." not in output2) # '.' is necessary here
ipi.End()
def test_aform_feeds():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("\fprint 'hello'")
AreEqual(response, "hello")
response = ipi.ExecuteLine(" \fprint 'hello'")
AreEqual(response, "hello")
ipi.ExecutePartialLine("def f():")
ipi.ExecutePartialLine("\f print 'hello'")
ipi.ExecuteLine('')
response = ipi.ExecuteLine('f()')
AreEqual(response, "hello")
# \f resets indent to 0
ipi.ExecutePartialLine("def f():")
ipi.ExecutePartialLine(" \f x = 'hello'")
ipi.ExecutePartialLine("\f print x")
ipi.ExecuteLine('')
response = ipi.ExecuteLine('f()')
AreEqual(response, "hello")
# \f resets indent to 0
ipi.ExecutePartialLine("def f():")
ipi.ExecutePartialLine(" \f x = 'hello'")
ipi.ExecutePartialLine(" print x")
ipi.ExecuteLine('')
response = ipi.ExecuteLine('f()')
AreEqual(response, "hello")
def test_ipy_dash_S():
"""ipy -S should still install Lib into sys.path"""
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -S")
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("import sys")
response = ipi.ExecuteLine("print sys.path")
Assert(response.find('Lib') != -1)
def test_startup_dir():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("print dir()")
AreEqual(sorted(eval(response)), sorted(['__builtins__', '__doc__', '__name__']))
def test_ipy_dash_m():
import sys
for path in sys.path:
if path.find('Lib') != -1:
filename = System.IO.Path.Combine(path, 'somemodule.py')
break
try:
f = file(filename, 'w')
f.write('print "hello"\n')
f.write('import sys\n')
f.write('print sys.argv')
f.close()
# need to run these tests where we have access to runpy.py
path = System.IO.FileInfo(__file__).DirectoryName
# simple case works
ipi = IronPythonInstance(executable, path, extraArgs + " -m somemodule")
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(res, True) # run should have worked
AreEqual(exit, 0) # should have returned 0
output = output.replace('\r\n', '\n')
lines = output.split('\n')
AreEqual(lines[0], 'hello')
Assert(samefile(eval(lines[1])[0],
filename))
# we receive any arguments in sys.argv
ipi = IronPythonInstance(executable, path, extraArgs + " -m somemodule foo bar")
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(res, True) # run should have worked
AreEqual(exit, 0) # should have returned 0
output = output.replace('\r\n', '\n')
lines = output.split('\n')
AreEqual(lines[0], 'hello')
AreEqual(eval(lines[1]), [filename, 'foo', 'bar'])
f = file(filename, 'w')
f.write('print "hello"\n')
f.write('import sys\n')
f.write('sys.exit(1)')
f.close()
# sys.exit works
ipi = IronPythonInstance(executable, path, extraArgs + " -m somemodule")
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(res, True) # run should have worked
AreEqual(exit, 1) # should have returned 0
output = output.replace('\r\n', '\n')
lines = output.split('\n')
AreEqual(lines[0], 'hello')
finally:
nt.unlink(filename)
@disabled("CodePlex Work Item 10925")
def test_ipy_dash_m_negative():
# builtin modules should not work
for modname in [ "sys", "datetime" ]:
ipi = IronPythonInstance(executable, exec_prefix,
extraArgs + " -m " + modname)
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(exit, -1)
# Modules within packages should not work
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -m testpkg1.mod1")
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(res, True) # run should have worked
AreEqual(exit, 1) # should have returned 0
Assert("SyntaxError: invalid syntax" in err,
"stderr is:" + str(err))
def test_ipy_dash_m_pkgs():
# Python packages work
import nt
Assert("testpkg1" in [x.lower() for x in nt.listdir(nt.getcwd())], nt.getcwd())
old_ipy_path = get_environ_variable("IRONPYTHONPATH")
try:
nt.environ["IRONPYTHONPATH"] = nt.getcwd()
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -m testpkg1")
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(res, True) # run should have worked
AreEqual(exit, 0) # should have returned 0
AreEqual(output, "")
# Bad module names should not work
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -m libxyz")
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(res, True) # run should have worked
AreEqual(exit, 1) # should have returned 0
Assert("ImportError: No module named libxyz" in err,
"stderr is:" + str(err))
finally:
nt.environ["IRONPYTHONPATH"] = old_ipy_path
def test_ipy_dash_c():
"""verify ipy -c cmd doesn't print expression statements"""
ipi = IronPythonInstance(executable, exec_prefix, "-c True;False")
res = ipi.StartAndRunToCompletion()
AreEqual(res[0], True) # should have started
AreEqual(res[1], '') # no std out
AreEqual(res[2], '') # no std err
AreEqual(res[3], 0) # should return 0
#############################################################################
# CP11924 - verify 'from __future__ import division' works
def test_future_division():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecuteLine("from __future__ import division")
response = ipi.ExecuteLine("11/4")
AreEqual(response, "2.75")
ipi.End()
#############################################################################
# CP2206
def test_future_with():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("class K(object):")
ipi.ExecutePartialLine(" def __enter__(self): return 3.14")
ipi.ExecutePartialLine(" def __exit__(self, type, value, tb): return False")
ipi.ExecuteLine("")
ipi.ExecutePartialLine("with K() as d:")
ipi.ExecutePartialLine(" print d")
response = ipi.ExecuteLine("")
AreEqual(response, "3.14")
ipi.End()
#############################################################################
# Merlin 148481
def test_ipy_dash():
#Verify that typing a - in the arguments starts an interactive session
ipi = IronPythonInstance(executable, exec_prefix, "-")
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("42")
AreEqual(response, "42")
ipi.End()
#############################################################################
def test_mta():
ipi = IronPythonInstance(executable, exec_prefix, '-X:MTA')
AreEqual(ipi.Start(), True)
ipi.ExecuteLine("import System")
response = ipi.ExecuteLine("str(System.Threading.Thread.CurrentThread.ApartmentState)")
AreEqual(response, "'MTA'")
ipi.ExecutePartialLine("class C:pass")
response = ipi.ExecuteLine("")
AreEqual(response, "")
response = ipi.ExecuteLine("str(System.Threading.Thread.CurrentThread.ApartmentState)")
AreEqual(response, "'MTA'")
ipi.End()
def test_displayhook():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
ipi.ExecuteLine("import sys")
ipi.ExecutePartialLine("def f(x): print 'foo', x")
ipi.ExecuteLine("")
response = ipi.ExecuteLine("sys.displayhook = f")
response = ipi.ExecuteLine("42")
AreEqual(response, "foo 42")
def test_excepthook():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
ipi.ExecuteLine("import sys")
ipi.ExecutePartialLine("def f(*args): print 'foo', args")
ipi.ExecuteLine("")
response = ipi.ExecuteLine("sys.excepthook = f")
response = ipi.ExecuteLine("raise Exception", True)
AssertContains(response, "foo (<type 'exceptions.Exception'>, Exception(), <traceback object at")
def test_last_exception():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
ipi.ExecuteLine("import sys")
response = ipi.ExecuteLine("hasattr(sys, 'last_value')")
AreEqual(response, 'False')
AssertContains(ipi.ExecuteLine("x", True), "NameError")
response = ipi.ExecuteLine("sys.last_value")
AreEqual(response, "NameError(\"name 'x' is not defined\",)")
response = ipi.ExecuteLine("sys.last_type")
AreEqual(response, "<type 'exceptions.NameError'>")
response = ipi.ExecuteLine("sys.last_traceback")
AssertContains(response, "<traceback object at ")
def test_sta_sleep_Warning():
ipi = IronPythonInstance(executable, exec_prefix, '-c "from System.Threading import Thread;Thread.Sleep(100)"')
retval, stdouttext, stderrtext, exitcode = ipi.StartAndRunToCompletion()
Assert(stderrtext.endswith("RuntimeWarning: Calling Thread.Sleep on an STA thread doesn't pump messages. Use Thread.CurrentThread.Join instead.\r\n"))
def test_newline():
ipi = IronPythonInstance(executable, exec_prefix, "")
ipi.proc.Start()
ipi.reader = ipi.proc.StandardOutput
output = ipi.EatToPrompt()
Assert('\r\r\n' not in output)
Assert('\r\n' in output)
#############################################################################
# Remote console tests
from System.Diagnostics import Process
def get_process_ids(ipi):
ipi.EnsureInteractiveRemote()
ipi.proc.Refresh()
consoleProcessId = ipi.proc.Id
ipi.ExecuteLine("import System")
remoteRuntimeProcessId = ipi.ExecuteLineRemote("System.Diagnostics.Process.GetCurrentProcess().Id")
Assert(remoteRuntimeProcessId.isdigit(), "remoteRuntimeProcessId is '%s'" % remoteRuntimeProcessId)
return consoleProcessId, int(remoteRuntimeProcessId)
def start_remote_console(args = ""):
inputScript = testpath.test_inputs_dir + "\\RemoteConsole.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " \"" + inputScript + "\" -X:ExceptionDetail " + args)
AreEqual(ipi.Start(), True)
return ipi
# Basic check that the remote console actually uses two processes
def test_remote_console_processes():
# First check that a simple local console uses a single process
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
consoleProcessId, remoteRuntimeProcessId = get_process_ids(ipi)
AreEqual(consoleProcessId, remoteRuntimeProcessId)
ipi.End()
# Now use the remote console
ipi = start_remote_console()
consoleProcessId, remoteRuntimeProcessId = get_process_ids(ipi)
AreNotEqual(consoleProcessId, remoteRuntimeProcessId)
ipi.End()
# The remote runtime should terminate when the console terminates
def test_remote_runtime_normal_exit():
ipi = start_remote_console()
consoleProcessId, remoteRuntimeProcessId = get_process_ids(ipi)
runtimeProcess = Process.GetProcessById(remoteRuntimeProcessId)
Assert(not runtimeProcess.HasExited)
ipi.End()
runtimeProcess.WaitForExit() # The test is that this wait succeeds
# Stress the input-output streams
def test_remote_io():
ipi = start_remote_console()
for i in xrange(100):
AreEqual(ipi.ExecuteLineRemote("2+2"), "4")
ipi.End()
# Kill the remote runtime and ensure that another process starts up again
def test_remote_server_restart():
ipi = start_remote_console()
consoleProcessId, remoteRuntimeProcessId = get_process_ids(ipi)
runtimeProcess = Process.GetProcessById(remoteRuntimeProcessId)
AreNotEqual(runtimeProcess, consoleProcessId)
runtimeProcess.Kill()
runtimeProcess.WaitForExit()
# The Process.Exited event is fired asynchronously, and might take sometime to fire.
# Hence, we need to block for a known marker
ipi.EatToMarker("Remote runtime terminated")
# We need to press Enter to nudge the old console out of the ReadLine...
restartMessage = ipi.ExecuteLine("", True)
ipi.ReadError()
consoleProcessId2, remoteRuntimeProcessId2 = get_process_ids(ipi)
AreEqual(consoleProcessId, consoleProcessId2)
# This is technically not a 100% correct as there is a small chance the the process id might get reused
AreNotEqual(remoteRuntimeProcessId, remoteRuntimeProcessId2)
ipi.End()
# Check that an exception can be remoted back over the reverse channel
# Note that exceptions are not written to stdout by the remote process
def test_remote_console_exception():
ipi = start_remote_console()
zeroDivisionErrorOutput = ipi.ExecuteLine("1/0", True)
AssertContains(zeroDivisionErrorOutput, "ZeroDivisionError")
ipi.End()
def test_remote_startup_script():
ipi = start_remote_console("-i " + testpath.test_inputs_dir + "\\simpleCommand.py")
AreEqual(ipi.ExecuteLine("x"), "1")
ipi.End()
def get_abort_command_output():
ipi = start_remote_console()
ipi.ExecuteLine("import System")
ipi.ExecutePartialLine ("def Hang():")
ipi.ExecutePartialLine (" print 'ABORT ME!!!' # This string token should trigger an abort...")
ipi.ExecutePartialLine (" infinite = System.Threading.Timeout.Infinite")
ipi.ExecutePartialLine (" System.Threading.Thread.CurrentThread.Join(infinite)")
ipi.ExecuteLine ("")
result = ipi.ExecuteLine("Hang()", True)
ipi.End()
return result
def test_remote_abort_command():
for i in xrange(10):
output = get_abort_command_output()
if "KeyboardInterrupt" in output:
AssertDoesNotContain(output, "Thread was being aborted.") # ThreadAbortException
return
else:
# Rarely, under stress conditions, ThreadAbortException leaks through.
# Keep retrying until we actually get KeyboardInterrupt
AssertContains(output, "Thread was being aborted.") # ThreadAbortException
continue
Assert(False, "KeyboardInterrupt not thrown. Only KeyboardInterrupt was thrown")
def test_exception_slicing_warning():
ipi = IronPythonInstance(executable, exec_prefix, '-c "print Exception(*range(2))[1]"')
res = ipi.StartAndRunToCompletion()
AreEqual(res[0], True) # should have started
AreEqual(res[1], '1\r\n') # some std out
AreEqual(res[2], '') # no std err
AreEqual(res[3], 0) # should return 0
ipi = IronPythonInstance(executable, exec_prefix,
'-3 -c "import warnings;'
'warnings.filters.reverse();'
'warnings.filters.pop();'
'print Exception(*range(2))[1]"')
res = ipi.StartAndRunToCompletion()
AreEqual(res[0], True) # should have started
AreEqual(res[1], '1\r\n') # std out
Assert(res[2].endswith('DeprecationWarning: __getitem__ not supported for exception classes in 3.x; use args attribute\r\n')) #std err
AreEqual(res[3], 0) # should return 0
#------------------------------------------------------------------------------
run_test(__name__)
| |
#! /usr/bin/env python3
'''
pygnstats
==============================================================================
Author: Ferdinand Saufler <mail@saufler.de>
Version: 0.27.0
Date: 21.12.2014
For documentation please visit https://github.com/derwilly/pyngstats
==============================================================================
'''
import os
import re
import sys
import time
from subprocess import check_output, CalledProcessError
import platform
# version
version = '0.27.0'
# host
host = 'example.com' # or 192.168.0.1
# this hostname
hostname = platform.uname()[1]
# timeout in secounds
timeout = '3'
# count, stop after sending x requests
count = 1
# wait x secounds until next request
interval = 1
# default path
path = os.path.dirname(os.path.realpath(__file__))
# path for html reports
report_dir = path + '/reports'
# path for measured data
stat_dir = path + '/stats'
# path for html-templates
template_dir = path + '/templates'
# create report?
do_report = False
# do a ping?
do_ping = False
# show debug infos?
debug = False
# Hex color (FFFFFF) to RGB tuple (255,255,255)
def hex_to_rgb(value):
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i+lv/3], 16) for i in range(0, lv, lv/3))
# RGB tubple (255,255,255) to hex value (FFFFFF)
def rgb_to_hex(rgb):
return '%02x%02x%02x' % rgb
# a formatted, colored output
def out(msg, ctype='info'):
if not msg or not ctype:
return "error: No message or info"
else:
if ctype == "warn":
srt = "warn"
color = 166 # orange
elif ctype == "ok":
srt = " ok "
color = 34 # green
elif ctype == "fail":
srt = "fail"
color = 160 # red
else:
srt = "info"
color = 33 # blue
print('[\033[38;5;'+str(color)+'m'+srt+'\033[0m] ' +
time.strftime('%d.%m.%y %H:%M') + ' ' + msg)
# create report directory, if not exists
def create_report_dir(p):
if os.path.exists(p):
return True
else:
try:
os.makedirs(p)
out('report_dir ' + p + ' created.', 'ok')
return True
except:
out('cant create report_dir.', 'fail')
return False
# create stat directory, if not exists
def create_stat_dir(p):
if os.path.exists(p):
return True
else:
try:
os.makedirs(p)
out('stat_dir ' + p + ' created.', 'ok')
return True
except:
out('cant create stats_dir.', 'fail')
return False
def print_version():
print(
os.path.basename(__file__) + ' version ' + version + '\n' +
'please report bugs to <mail@saufler.de>\n' +
'https://github.com/derwilly/pyngstats')
def print_help():
print('Commandline interface:\n' +
'\033[1m --report \033[22m\n' +
' Generates a html-report. \n' +
'\033[1m --ping \033[22m\n' +
' Do a ping to the given host. \n' +
'\033[1m --report_dir \033[22m\n' +
' Specify the directory for html-reports. \n' +
'\033[1m --stat_dir \033[22m\n' +
' Specify the directory for statistics. \n' +
'\033[1m --host \033[22m\n' +
' Take that host for ping. \n' +
'\033[1m --timeout \033[22m\n' +
' Timeout in secounds (1-30).\n' +
'\033[1m --count \033[22m\n' +
' stop after sending "count" requests.\n' +
'\033[1m --interval \033[22m\n' +
' waiting x secounds until next request.\n' +
'\033[1m --debug \033[22m\n' +
' print additional debug information.\n' +
'\033[1m --version \033[22m\n' +
' print version info.\n')
def ping(loops=0):
latency_str = ''
pinfo = b''
loops += 1
if not create_stat_dir(stat_dir):
('failed to create stat_dir in do_ping procedure.', 'fail')
raise SystemExit
try:
pinfo = str(check_output(['ping', '-c', '1', '-w', timeout, host]))
except CalledProcessError as err:
if int(err.returncode) == 1:
print(err)
out('ping returned exit status "1", no reply from host.', 'fail')
elif int(err.returncode) == 2:
out('ping returned exit status "2", unknown error.', 'fail')
else:
out('ping returned an unknown error.', 'fail')
except Exception as err:
print(str(err.message))
try:
latency_str = str(re.findall(r"time=[0-9]{1,4}.[0-9]{1,4}", pinfo)[0])
except IndexError:
out('Index error in ping procedure.', 'fail')
except TypeError:
out('Type error in ping procedure', 'fail')
latency = latency_str[5:]
try:
with open(stat_dir + '/' + time.strftime('%y%m%d'), 'a') as f:
f.write(time.strftime('%H:%M:%S') + ' ' + latency + '\n')
except IOError:
out('cant write to file ' + stat_dir + '/' +
time.strftime('%y%m%d'), 'fail')
if count == 0:
time.sleep(interval)
ping()
elif loops < count:
time.sleep(interval)
ping(loops)
# command line options
if '--debug' in sys.argv:
debug = True
for i in sys.argv:
if '--report' in i:
do_report = True
if '--ping' in i:
do_ping = True
if '--report_dir=' in i:
rdir = str(re.findall(r"^--report_dir=.*", i)[0])
rdir = rdir[13:]
if create_report_dir(rdir):
report_dir = rdir
if debug:
out('using report directory ' + report_dir, 'info')
else:
out('cant use report_dir. please check the path.', 'fail')
raise SystemExit
if '--stat_dir=' in i:
sdir = str(re.findall(r"^--stat_dir=.*", i)[0])
sdir = sdir[11:]
if create_stat_dir(sdir):
stat_dir = sdir
if debug:
out('using stats directory ' + stat_dir, 'info')
else:
out('cant use stat_dir. please check the path.', 'fail')
raise SystemExit
if '--host=' in i:
ho = str(re.findall(r"^--host=.*", i)[0])
host = ho[7:]
if debug:
out('using host ' + host + ' for ping.', 'info')
if '--timeout=' in i:
ti = str(re.findall(r"^--timeout=.*", i)[0])
ti = ti[10:]
try:
if int(ti) > 0 and int(ti) <= 30:
timeout = ti
if debug:
out('using timeout ' + str(timeout) + ' secounds', 'info')
else:
timeout = 3
out('timeout must be an integer between 1 and 30. setting timeout = 3', 'warn')
except ValueError:
timeout = 3
out('timeout must be an integer between 1 and 30. setting timeout = 3', 'warn')
if '--count=' in i:
tmp = str(re.findall(r"^--count=.*", i)[0])
try:
tmp = int(tmp[8:])
if tmp > 0:
count = tmp
if debug:
out('using count = ' + str(count) + '.', 'info')
else:
count = 1
out('count must be an integer > 0. setting count = 1', 'warn')
except ValueError:
count = 1
out('count must be an integer > 0. setting count = 1', 'warn')
if '--interval=' in i:
tmp = str(re.findall(r"^--interval=.*", i)[0])
try:
tmp = int(tmp[11:])
if tmp > 0:
interval = tmp
if debug:
out('using interval = ' + str(interval) + '.', 'info')
else:
interval = 1
out('interval must be an integer > 0. setting interval = 1', 'warn')
except ValueError:
interval = 1
out('interval must be an integer > 0. setting interval = 1', 'warn')
if '--version' in i:
print_version()
if '--help' in i:
print_help()
# if do_ping = True, go on and ping the host
if do_ping:
ping()
# if do_report = True, generate the hmtl reports
if do_report:
report_list = {}
# create report directory if not exists
if not create_report_dir(report_dir):
out('failed to create report_dir in report procedure.', 'fail')
raise SystemExit
# generate the html reports
file_list = []
for stat_file in os.listdir(stat_dir):
file_list.append(stat_file)
file_list = sorted(file_list)
# load the template
try:
with open(template_dir + '/daily.html', 'r') as f:
template = f.read()
f.close()
except IOError:
out('cant read file ' + template_dir + '/daily.html', 'fail')
raise SystemExit
for stat_file in file_list:
current_template = template
data_counts = 0
latency = 0
latency_int = 0
latency_float = 0.0
highest_latency = 0.0
lowest_latency = 100.0
average_latency = 0.0
sum_latency = 0.0
packages_lost = 0
try:
with open(stat_dir + '/' + stat_file, 'r') as fi:
chart_data = ''
for line in fi:
date = line[:8]
latency = line[9:]
latency = latency.replace('\n', '')
latency = latency.replace(' ', '')
if not latency:
latency = 0
packages_lost += 1
try:
latency_float = float(latency)
latency_int = int(latency_float)
except:
continue
# colors
if latency_int >= 0 and latency_int <= 50:
val = (latency_int - 0) * 5
color = '#' + str(rgb_to_hex((val, 255, 0)))
elif latency_int >= 51 and latency_int <= 75:
val = (latency_int - 50) * 10
color = '#' + str(rgb_to_hex((255, 255-val, 0)))
elif latency_int >= 76 and latency_int <= 100:
val = (latency_int - 75) * 10
color = '#' + str(rgb_to_hex((255, 0, val)))
elif latency_int >= 101 and latency_int <= 125:
val = (latency_int - 100) * 10
color = '#' + str(rgb_to_hex((255-val, 0, 255)))
elif latency_int >= 126 and latency_int <= 150:
val = (latency_int - 125) * 10
color = '#' + str(rgb_to_hex((0, val, 255)))
elif latency_int > 150:
color = '#' + str(rgb_to_hex((0, 255, 255)))
else:
color = '#000000'
data_counts = data_counts + 1
chart_data+="['"+str(date)+"', "+str(data_counts)+", "+str(latency)+", 'color: "+color+";'],\n "
if(latency_float > 0):
if latency_float > highest_latency:
highest_latency = latency_float
if latency_float < lowest_latency:
lowest_latency = latency_float
sum_latency += latency_float
if data_counts > 0:
average_latency = sum_latency / data_counts
report_list[stat_file] = { 'name': stat_file,
'data_counts': data_counts,
'latency': latency,
'latency_int': latency_int,
'latency_float': latency_float,
'highest_latency': highest_latency,
'lowest_latency': lowest_latency,
'average_latency': average_latency,
'sum_latency': sum_latency,
'packages_lost': packages_lost }
chart_data = chart_data[0:len(chart_data)-16]
current_template = current_template.replace('%chart_data%', chart_data)
except ValueError as err:
out(str(err), 'fail')
except IOError as err:
out(str(err), 'fail')
except TypeError as err:
out(str(err), 'fail')
except:
out('Unexpected error:' + str(sys.exc_info()[0]), 'fail')
chart_title="title: 'Ping Statistics for "+stat_file[4:6]+"."+stat_file[2:4]+"."+stat_file[0:2]+" on "+hostname+"',"
current_template = current_template.replace('%chart_title%', chart_title)
footer = '<b>number of records</b>: ' + str(data_counts) + '<br>\n\t'
footer += '<b>lowest latency</b>: ' + str(round(lowest_latency,2)) + ' ms<br>\n\t'
footer += '<b>highest latency</b>: ' + str(round(highest_latency,2)) + ' ms<br>\n\t'
footer += '<b>average latency</b>: ' + str(round(average_latency,2)) + ' ms<br><br>\n\t'
footer += 'powered by <b><a href="https://github.com/derwilly/pyngstats" target="_blank">pyngstats</a></b> version: ' + version + '<br><br>'
current_template = current_template.replace('%footer%', footer)
try:
with open(report_dir + '/' + stat_file + '.html', 'w+') as f:
f.write(current_template)
except IOError:
out('cant write file ' + report_dir + '/' + stat_file + '.html', 'fail')
# Generate the frameset (index.html)
try:
with open(template_dir + '/index.html', 'r') as f:
template = f.read()
f.close()
except IOError:
out('cant read file ' + template_dir + '/index.html', 'fail')
raise SystemExit
try:
with open(report_dir + '/index.html', 'w+') as f:
f.write(template)
except IOError:
out('cant write file ' + report_dir + '/index.html', 'fail')
# Generate overview.html
try:
with open(template_dir + '/overview.html', 'r') as f:
template = f.read()
f.close()
except IOError:
out('cant read file ' + template_dir + '/overview.html', 'fail')
raise SystemExit
c = 0
chart_data = ''
for i in file_list:
chart_data+="["+str(c)+", '"+report_list[i]['name'][4:6]+'.'+report_list[i]['name'][2:4]+'.'+report_list[i]['name'][0:2]+"', "+str(report_list[i]['highest_latency'])+", "+str(report_list[i]['lowest_latency'])+", "+str(round(report_list[i]['average_latency'],3))+", "+str(report_list[i]['packages_lost'])+"],\n "
c += 1
chart_data = chart_data[:len(chart_data)-16]
template = template.replace('%chart_data%', chart_data)
chart_title="title: 'Ping Overview on "+hostname+"',"
template = template.replace('%chart_title%', chart_title)
powered_by = 'powered by <b><a href="https://github.com/derwilly/pyngstats" target="_blank">pyngstats</a></b> version: ' + version + '<br><br>'
template = template.replace('%powered_by%', powered_by)
try:
with open(report_dir + '/overview.html', 'w+') as f:
f.write(template)
except IOError:
out('cant write file ' + report_dir + '/overview.html', 'fail')
# Generate menu.html
try:
with open(template_dir + '/menu.html', 'r') as f:
template = f.read()
f.close()
except IOError:
out('cant read file ' + template_dir + '/menu.html', 'fail')
raise SystemExit
file_list = reversed(file_list)
links = ''
for j in file_list:
links+='<a href="'+report_list[j]['name']+'.html" target="frame_content">'+report_list[j]['name'][4:6]+'.'+report_list[j]['name'][2:4]+'.'+report_list[j]['name'][0:2]+'</a><br>\n\t'
template = template.replace('%links%', links)
try:
with open(report_dir + '/menu.html', 'w+') as f:
f.write(template)
except IOError:
out('cant write file ' + report_dir + '/menu.html', 'fail')
| |
from django.test import TestCase
from django.contrib.auth.models import Group
from hs_access_control.models import UserResourceProvenance, UserResourcePrivilege, \
GroupResourceProvenance, GroupResourcePrivilege, \
UserGroupProvenance, UserGroupPrivilege, \
PrivilegeCodes
from hs_core import hydroshare
from hs_core.testing import MockIRODSTestCaseMixin
from hs_access_control.tests.utilities import global_reset, \
is_equal_to_as_set, check_provenance_synchronization
__author__ = 'Alva'
class UnitTests(MockIRODSTestCaseMixin, TestCase):
""" test basic behavior of each routine """
def setUp(self):
super(UnitTests, self).setUp()
global_reset()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.alva = hydroshare.create_account(
'alva@gmail.com',
username='alva',
first_name='alva',
last_name='couch',
superuser=False,
groups=[]
)
self.george = hydroshare.create_account(
'george@gmail.com',
username='george',
first_name='george',
last_name='miller',
superuser=False,
groups=[]
)
self.john = hydroshare.create_account(
'john@gmail.com',
username='john',
first_name='john',
last_name='miller',
superuser=False,
groups=[]
)
self.admin = hydroshare.create_account(
'admin@gmail.com',
username='admin',
first_name='first_name_admin',
last_name='last_name_admin',
superuser=True,
groups=[]
)
# george creates a entity 'bikes'
self.bikes = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.george,
title='Bikes',
metadata=[],
)
# george creates a entity 'bikers'
self.bikers = self.george.uaccess.create_group('Bikers', 'Of the human powered kind')
# george creates a entity 'harps'
self.harps = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.george,
title='Harps',
metadata=[],
)
# george creates a entity 'harpers'
self.harpers = self.george.uaccess.create_group('Harpers', 'Without any ferries')
def test_user_resource_provenance_crosstalk(self):
george = self.george
alva = self.alva
bikes = self.bikes
harps = self.harps
john = self.john
# George grants Alva view privilege
UserResourcePrivilege.share(
resource=bikes,
user=alva,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.VIEW)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[alva]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=alva)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# George grants Alva privilege
UserResourcePrivilege.share(
resource=bikes,
user=alva,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[alva]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=alva)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Alva grants John privilege
UserResourcePrivilege.share(
resource=bikes,
user=john,
privilege=PrivilegeCodes.CHANGE,
grantor=alva)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[alva]))
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=alva),
[john]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=john)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, alva)
# now George overrides Alva on John's privilege
UserResourcePrivilege.share(
resource=bikes,
user=john,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.VIEW)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes, grantor=george), [
alva, john]))
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=alva),
[]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=john)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Crosstalk test: George grants Alva privilege over harps
UserResourcePrivilege.share(
resource=harps,
user=alva,
privilege=PrivilegeCodes.VIEW,
grantor=george)
# old privileges didn't change
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes, grantor=george), [
alva, john]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=alva)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# check new privileges: should be independent.
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=harps,
user=alva),
PrivilegeCodes.VIEW)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=harps,
user=alva),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=harps,
grantor=george),
[alva]))
record = UserResourceProvenance.get_current_record(
resource=harps, user=alva)
self.assertEqual(record.resource, harps)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
check_provenance_synchronization(self)
def test_user_group_provenance_crosstalk(self):
george = self.george
alva = self.alva
bikers = self.bikers
harpers = self.harpers
john = self.john
# George grants Alva view privilege
UserGroupPrivilege.share(
group=bikers,
user=alva,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.VIEW)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[alva]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=alva)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# George grants Alva privilege
UserGroupPrivilege.share(
group=bikers,
user=alva,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[alva]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=alva)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Alva grants John privilege
UserGroupPrivilege.share(
group=bikers,
user=john,
privilege=PrivilegeCodes.CHANGE,
grantor=alva)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[alva]))
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=alva),
[john]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=john)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, alva)
# now George overrides Alva on John's privilege
UserGroupPrivilege.share(
group=bikers,
user=john,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.VIEW)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers, grantor=george), [
alva, john]))
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=alva),
[]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=john)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Crosstalk test: George grants Alva privilege over harpers
UserGroupPrivilege.share(
group=harpers,
user=alva,
privilege=PrivilegeCodes.VIEW,
grantor=george)
# old privileges didn't change
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers, grantor=george), [
alva, john]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=alva)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# check new privileges: should be independent of old privileges
self.assertEqual(
UserGroupProvenance.get_privilege(
group=harpers,
user=alva),
PrivilegeCodes.VIEW)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=harpers,
user=alva),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=harpers,
grantor=george),
[alva]))
record = UserGroupProvenance.get_current_record(
group=harpers, user=alva)
self.assertEqual(record.group, harpers)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
check_provenance_synchronization(self)
def test_group_resource_provenance_crosstalk(self):
george = self.george
bikes = self.bikes
bikers = self.bikers
harps = self.harps
harpers = self.harpers
alva = self.alva
# George grants Bikers view privilege
GroupResourcePrivilege.share(
resource=bikes,
group=bikers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.VIEW)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=george),
[bikers]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=bikers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, bikers)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# George grants Harpers change privilege
GroupResourcePrivilege.share(
resource=bikes,
group=harpers,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.CHANGE)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes, grantor=george), [
bikers, harpers]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=harpers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, harpers)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Alva downgrades Harpers privilege
GroupResourcePrivilege.share(
resource=bikes,
group=harpers,
privilege=PrivilegeCodes.VIEW,
grantor=alva)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.VIEW)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=george),
[bikers]))
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=alva),
[harpers]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=harpers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, harpers)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, alva)
# now George overrides Alva on Harpers privilege
GroupResourcePrivilege.share(
resource=bikes,
group=harpers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.VIEW)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes, grantor=george), [
bikers, harpers]))
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=alva),
[]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=harpers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, harpers)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Crosstalk test: George grants bikers privilege over harps
GroupResourcePrivilege.share(
resource=harps,
group=bikers,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
# old privileges didn't change
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.VIEW)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes, grantor=george), [
bikers, harpers]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=bikers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, bikers)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# check new privileges: should be independent.
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=harps,
group=bikers),
PrivilegeCodes.CHANGE)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=harps,
group=bikers),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=harps,
grantor=george),
[bikers]))
record = GroupResourceProvenance.get_current_record(
resource=harps, group=bikers)
self.assertEqual(record.resource, harps)
self.assertEqual(record.group, bikers)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
check_provenance_synchronization(self)
def test_user_resource_provenance_undo_share(self):
george = self.george
alva = self.alva
bikes = self.bikes
harps = self.harps
john = self.john
# initial state: no undo to do.
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.NONE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.NONE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=alva) # no record
self.assertTrue(record is None)
# George grants Alva view privilege
UserResourcePrivilege.share(
resource=bikes,
user=alva,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.VIEW)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[alva]))
# update creates a record
record = UserResourceProvenance.get_current_record(
resource=bikes, user=alva)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Roll back alva's privilege
UserResourcePrivilege.undo_share(resource=bikes, user=alva, grantor=george)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.NONE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.NONE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[]))
# there is now a record
record = UserResourceProvenance.get_current_record(
resource=bikes, user=alva)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.NONE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, None)
# George grants Alva privilege
UserResourcePrivilege.share(
resource=bikes,
user=alva,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[alva]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=alva)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Alva grants John privilege
UserResourcePrivilege.share(
resource=bikes,
user=john,
privilege=PrivilegeCodes.CHANGE,
grantor=alva)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[alva]))
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=alva),
[john]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=john)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, alva)
# now George overrides Alva on John's privilege
UserResourcePrivilege.share(
resource=bikes,
user=john,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.VIEW)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes, grantor=george), [
alva, john]))
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=alva),
[]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=john)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# George changes mind and rolls back change
UserResourcePrivilege.undo_share(resource=bikes, user=john, grantor=george)
# privilege has been rolled back
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[alva]))
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=alva),
[]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=john)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, alva)
# Crosstalk test: George grants Alva privilege over harps
UserResourcePrivilege.share(
resource=harps,
user=alva,
privilege=PrivilegeCodes.VIEW,
grantor=george)
# old privileges didn't change
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[alva]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=john)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, alva)
# check new privileges: should be independent.
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=harps,
user=alva),
PrivilegeCodes.VIEW)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=harps,
user=alva),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=harps,
grantor=george),
[alva]))
record = UserResourceProvenance.get_current_record(
resource=harps, user=alva)
self.assertEqual(record.resource, harps)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# now roll back privilege over harps
UserResourcePrivilege.undo_share(resource=harps, user=alva, grantor=george)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=harps,
user=alva),
PrivilegeCodes.NONE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=harps,
user=alva),
PrivilegeCodes.NONE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=harps,
grantor=george),
[]))
record = UserResourceProvenance.get_current_record(
resource=harps, user=alva)
self.assertEqual(record.resource, harps)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.NONE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, None)
check_provenance_synchronization(self)
def test_user_group_provenance_undo_share(self):
george = self.george
alva = self.alva
bikers = self.bikers
harpers = self.harpers
john = self.john
# initial state: no undo to do.
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.NONE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.NONE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=alva) # no record
self.assertTrue(record is None)
# George grants Alva view privilege
UserGroupPrivilege.share(
group=bikers,
user=alva,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.VIEW)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[alva]))
# update creates a record
record = UserGroupProvenance.get_current_record(
group=bikers, user=alva)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Roll back alva's privilege
UserGroupPrivilege.undo_share(group=bikers, user=alva, grantor=george)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.NONE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.NONE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[]))
# there is now a record
record = UserGroupProvenance.get_current_record(
group=bikers, user=alva)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.NONE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, None)
# George grants Alva privilege
UserGroupPrivilege.share(
group=bikers,
user=alva,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[alva]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=alva)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Alva grants John privilege
UserGroupPrivilege.share(
group=bikers,
user=john,
privilege=PrivilegeCodes.CHANGE,
grantor=alva)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[alva]))
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=alva),
[john]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=john)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, alva)
# now George overrides Alva on John's privilege
UserGroupPrivilege.share(
group=bikers,
user=john,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.VIEW)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers, grantor=george), [
alva, john]))
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=alva),
[]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=john)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# George changes mind and rolls back change
UserGroupPrivilege.undo_share(group=bikers, user=john, grantor=george)
# privilege has been rolled back
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[alva]))
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=alva),
[]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=john)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, alva)
# Crosstalk test: George grants Alva privilege over harpers
UserGroupPrivilege.share(
group=harpers,
user=alva,
privilege=PrivilegeCodes.VIEW,
grantor=george)
# old privileges didn't change
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[alva]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=john)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, alva)
# check new privileges: should be independent.
self.assertEqual(
UserGroupProvenance.get_privilege(
group=harpers,
user=alva),
PrivilegeCodes.VIEW)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=harpers,
user=alva),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=harpers,
grantor=george),
[alva]))
record = UserGroupProvenance.get_current_record(
group=harpers, user=alva)
self.assertEqual(record.group, harpers)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# now roll back privilege over harpers
UserGroupPrivilege.undo_share(group=harpers, user=alva, grantor=george)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=harpers,
user=alva),
PrivilegeCodes.NONE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=harpers,
user=alva),
PrivilegeCodes.NONE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=harpers,
grantor=george),
[]))
record = UserGroupProvenance.get_current_record(
group=harpers, user=alva)
self.assertEqual(record.group, harpers)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.NONE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, None)
check_provenance_synchronization(self)
def test_group_resource_provenance_undo_share(self):
george = self.george
alva = self.alva
bikers = self.bikers
bikes = self.bikes
harps = self.harps
harpers = self.harpers
# initial state: no undo to do.
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.NONE)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.NONE)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=george),
[]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=bikers) # no record
self.assertTrue(record is None)
# George grants bikers view privilege
GroupResourcePrivilege.share(
resource=bikes,
group=bikers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.VIEW)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=george),
[bikers]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=bikers) # update creates a record
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, bikers)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Roll back bikers's privilege
GroupResourcePrivilege.undo_share(resource=bikes, group=bikers, grantor=george)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.NONE)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.NONE)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=george),
[]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=bikers) # there is now a record that is initial
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, bikers)
self.assertEqual(record.privilege, PrivilegeCodes.NONE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, None)
# George grants bikers privilege
GroupResourcePrivilege.share(
resource=bikes,
group=bikers,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.CHANGE)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=george),
[bikers]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=bikers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, bikers)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Alva grants harpers privilege
GroupResourcePrivilege.share(
resource=bikes,
group=harpers,
privilege=PrivilegeCodes.CHANGE,
grantor=alva)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.CHANGE)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=george),
[bikers]))
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=alva),
[harpers]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=harpers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, harpers)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, alva)
# now George overrides Alva on harpers' privilege
GroupResourcePrivilege.share(
resource=bikes,
group=harpers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.VIEW)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes, grantor=george), [
bikers, harpers]))
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=alva),
[]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=harpers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, harpers)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# George changes mind and rolls back change
GroupResourcePrivilege.undo_share(resource=bikes, group=harpers, grantor=george)
# privilege has been rolled back
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.CHANGE)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=george),
[bikers]))
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=alva),
[]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=harpers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, harpers)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, alva)
# Crosstalk test: George grants bikers privilege over harps
GroupResourcePrivilege.share(
resource=harps,
group=bikers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
# old privileges didn't change
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.CHANGE)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=george),
[bikers]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=harpers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, harpers)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, alva)
# check new privileges: should be independent.
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=harps,
group=bikers),
PrivilegeCodes.VIEW)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=harps,
group=bikers),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=harps,
grantor=george),
[bikers]))
record = GroupResourceProvenance.get_current_record(
resource=harps, group=bikers)
self.assertEqual(record.resource, harps)
self.assertEqual(record.group, bikers)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# now roll back privilege over harps
GroupResourcePrivilege.undo_share(resource=harps, group=bikers, grantor=george)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=harps,
group=bikers),
PrivilegeCodes.NONE)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=harps,
group=bikers),
PrivilegeCodes.NONE)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=harps,
grantor=george),
[]))
record = GroupResourceProvenance.get_current_record(
resource=harps, group=bikers)
self.assertEqual(record.resource, harps)
self.assertEqual(record.group, bikers)
self.assertEqual(record.privilege, PrivilegeCodes.NONE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, None)
check_provenance_synchronization(self)
| |
"""
SimpleConfigParser
Simple configuration file parser: Python module to parse configuration files
without sections. Based on ConfigParser from the standard library.
Author: Philippe Lagadec
Project website: http://www.decalage.info/python/configparser
Inspired from an idea posted by Fredrik Lundh:
http://mail.python.org/pipermail/python-dev/2002-November/029987.html
Usage: see end of source code and http://docs.python.org/library/configparser.html
"""
__author__ = 'Philippe Lagadec'
__version__ = '0.02'
#--- LICENSE ------------------------------------------------------------------
# The SimpleConfigParser Python module is copyright (c) Philippe Lagadec 2009-2010
#
# By obtaining, using, and/or copying this software and/or its associated
# documentation, you agree that you have read, understood, and will comply with
# the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and its
# associated documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appears in all copies, and that both
# that copyright notice and this permission notice appear in supporting
# documentation, and that the name of the author not be used in advertising or
# publicity pertaining to distribution of the software without specific,
# written prior permission.
#
# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#-------------------------------------------------------------------------
# CHANGELOG
# 2009-02-12 v0.01 PL: - initial version
# 2010-03-15 v0.02 PL: - updated tests and comments
#-------------------------------------------------------------------------
# TODO:
# - implement read() using the base class code
#=== IMPORTS ==================================================================
import ConfigParser
import StringIO
import logging
import os
import sys
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
#=== CONSTANTS ================================================================
# section name for options without section:
NOSECTION = 'NOSECTION'
DEFAULT_MESSAGE_NO_VALUE = "Required parameter '{0}' does not have a"\
" value in {1}."
DEFAULT_MESSAGE = "\nPlease set it with the appropriate value. Refer to "\
"config-example/settings.ini for assistance.\nProgram "\
"will now terminate..."
# Dictionary containing required file-related parameters along with custom
# message to be displayed in case of error
required_files_dict = {
"raw_xml_file": "\nIt should specify the name of the file containing raw"\
" data. ",
"translation_table_file": "\nIt should specify the name of the required "\
"xml file containing translation table. ",
"form_events_file": "\nIt should specify the name of the required xml "\
"file containing empty form events. ",
"research_id_to_redcap_id": "\nIt should specify the name of the xml "\
"file containing mapping of research ids to redcap ids. ",
"component_to_loinc_code_xml": "\nIt should specify the name of the "\
"required xml file containing a mapping of clinical component ids to "\
"loinc codes. "
}
required_server_parameters_list = [
'redcap_uri',
'token',
'redcap_support_receiver_email',
'smtp_host_for_outbound_mail',
'smtp_port_for_outbound_mail',
'emr_sftp_server_hostname',
'emr_sftp_server_username',
'emr_sftp_project_name',
'emr_data_file']
# Dictionary containing optional parameters along with their default values
optional_parameters_dict = {
"report_file_path": "report.xml",
"input_date_format": "%Y-%m-%d %H:%M:%S",
"output_date_format": "%Y-%m-%d",
"report_file_path2": "report.html",
"sender_email": "please-do-not-reply@example.com",
"project": "DEFAULT_PROJECT",
"rules": {},
"batch_warning_days": 13,
"rate_limiter_value_in_redcap": 600,
"batch_info_database": "redi.db",
"send_email": 'N',
"receiver_email": "test@example.com",
"verify_ssl": True,
"replace_fields_in_raw_data_xml": None,
"include_rule_errors_in_report": False,
"redcap_support_sender_email": 'please-do-not-reply@example.com',
"emr_sftp_server_port": 22,
"emr_sftp_server_password": None,
"emr_sftp_server_private_key": None,
"emr_sftp_server_private_key_pass": None,
"is_sort_by_lab_id": True,
}
class ConfigurationError(Exception):
pass
#=== CLASSES ==================================================================
class SimpleConfigParser(ConfigParser.RawConfigParser):
"""
Simple configuration file parser: based on ConfigParser from the standard
library, slightly modified to parse configuration files without sections.
Inspired from an idea posted by Fredrik Lundh:
http://mail.python.org/pipermail/python-dev/2002-November/029987.html
"""
def read(self, filename):
if not os.path.exists(filename):
logger.exception("Cannot find settings file: {0}. Program will "\
"now terminate...".format(filename))
sys.exit()
self.filename = filename
text = open(filename).read()
f = StringIO.StringIO("[%s]\n" % NOSECTION + text)
self.readfp(f, filename)
def getoption(self, option):
'get the value of an option'
opt_as_string = self.get(NOSECTION, option)
try:
# if the conversion to boolean fails we keep the string value
opt_as_bool = to_bool(opt_as_string)
return opt_as_bool
except ValueError:
pass
return opt_as_string
def getoptionslist(self):
'get a list of available options'
return self.options(NOSECTION)
def hasoption(self, option):
"""
return True if an option is available, False otherwise.
(NOTE: do not confuse with the original has_option)
"""
return self.has_option(NOSECTION, option)
def set_attributes(self):
# Check if configuration file is empty
if not self.getoptionslist():
message = "ERROR: Configuration file '{0}' is empty! Program "\
"will now terminate...".format(self.filename)
logger.error(message)
sys.exit()
else:
self.check_parameters()
def check_parameters(self):
"""
handle required and default optional_parameters_dict
"""
# check for required file parameters
# handled separately as these need a custom message to be displayed
for option in required_files_dict:
if not self.hasoption(option) or self.getoption(option) == "":
message = DEFAULT_MESSAGE_NO_VALUE.format(option, \
self.filename) + required_files_dict[option] +\
DEFAULT_MESSAGE
logger.error(message)
sys.exit()
else:
setattr(self, option, self.getoption(option))
# check for required server and emr parameters
for option in required_server_parameters_list:
if not self.hasoption(option) or self.getoption(option) == "":
message = DEFAULT_MESSAGE_NO_VALUE.format(option, \
self.filename) + DEFAULT_MESSAGE
logger.error(message)
sys.exit()
else:
logger.debug("Setting required parameter {} = {} "\
.format(option, self.getoption(option)))
setattr(self, option, self.getoption(option))
# check for receiver email if send_email = 'Y'
if self.hasoption('send_email') and self.getoption('send_email'):
if not self.hasoption('receiver_email') or \
self.getoption('receiver_email') == "":
message = DEFAULT_MESSAGE_NO_VALUE.format(option, \
self.filename) + DEFAULT_MESSAGE
logger.error(message)
sys.exit()
# set optional parameters with default values if missing
for option in optional_parameters_dict:
if not self.hasoption(option) or self.getoption(option) == "":
logger.warn("Parameter '{0}' in {1} does not have"\
" a value. Default value '{2}' applied.".format(option, \
self.filename, optional_parameters_dict[option]))
setattr(self, option, optional_parameters_dict[option])
else:
setattr(self, option, self.getoption(option))
#=== End class ================================================================
def to_bool(value):
"""
Helper function for translating strings into booleans
@see test/TestReadConfig.py
"""
valid = {
'true': True, 't': True, '1': True, 'y' : True,
'false': False, 'f': False, '0': False, 'n' : False
}
if not isinstance(value, str):
raise ValueError('Cannot check boolean value. Not a string.')
lower_value = value.lower()
if lower_value in valid:
return valid[lower_value]
else:
raise ValueError('Not a boolean string: "%s"' % value)
#=== MAIN =====================================================================
if __name__ == '__main__':
# simple tests when launched as a script instead of imported as module:
##cp = ConfigParser.ConfigParser()
# this raises an exception:
# ConfigParser.MissingSectionHeaderError: File contains no section headers.
# cp.read('config_without_section.ini')
print 'SimpleConfigParser tests:'
filename = 'sample_config_no_section.ini'
cp = SimpleConfigParser()
print 'Parsing %s...' % filename
cp.read(filename)
print 'Sections:', cp.sections()
# print cp.items(NOSECTION)
print 'getoptionslist():', cp.getoptionslist()
for option in cp.getoptionslist():
print "getoption('%s') = '%s'" % (option, cp.getoption(option))
print "hasoption('wrongname') =", cp.hasoption('wrongname')
print
print "Print out options by attribute instead of recursing the list"
cp.set_attributes()
print cp.option1
print cp.option2
| |
import io
import threading
import synapse.link as s_link
import synapse.async as s_async
import synapse.daemon as s_daemon
import synapse.neuron as s_neuron
import synapse.common as s_common
import synapse.telepath as s_telepath
import synapse.lib.session as s_session
from synapse.common import *
from synapse.tests.common import *
# some syntax sugar
class Net:
def __init__(self, info):
self.info = info
def __getattr__(self, name):
return self.info[name]
class FooBar:
def __init__(self):
pass
def foo(self, x):
return x + 20
def bar(self, x):
raise Exception('hi')
def getsess(self, prop):
sess = s_session.current()
return sess.get(prop)
def setsess(self, prop, valu):
sess = s_session.current()
sess.set(prop,valu)
import unittest
@unittest.skip('neuron tests temp disabled')
class TestNeuron(SynTest):
def initNeuNet(self, usepki=False):
'''
Construct a neuron mesh making sure to wait for all
link events ( so the mesh is "settled" before the test )
'''
neu0 = s_neuron.Neuron()
neu1 = s_neuron.Neuron()
neu2 = s_neuron.Neuron()
neu3 = s_neuron.Neuron()
if usepki:
root = neu0.pki.genRootToken(bits=512)
neu1.pki.setTokenTufo(root)
neu2.pki.setTokenTufo(root)
neu3.pki.setTokenTufo(root)
tokn0 = neu0.pki.genUserToken(neu0.iden, can=('mesh:join',), bits=512)
tokn1 = neu1.pki.genUserToken(neu1.iden, can=('mesh:join',), bits=512)
tokn2 = neu2.pki.genUserToken(neu2.iden, can=('mesh:join',), bits=512)
tokn3 = neu3.pki.genUserToken(neu3.iden, can=('mesh:join',), bits=512)
cert0 = neu0.pki.genTokenCert(tokn0, signas=root[0])
cert1 = neu0.pki.genTokenCert(tokn1, signas=root[0])
cert2 = neu0.pki.genTokenCert(tokn2, signas=root[0])
cert3 = neu0.pki.genTokenCert(tokn3, signas=root[0])
neu0.pki.setTokenCert(neu0.iden, cert0)
neu1.pki.setTokenCert(neu1.iden, cert1)
neu2.pki.setTokenCert(neu2.iden, cert2)
neu3.pki.setTokenCert(neu3.iden, cert3)
neu0.setNeuProp('usepki',1)
neu1.setNeuProp('usepki',1)
neu2.setNeuProp('usepki',1)
neu3.setNeuProp('usepki',1)
#print('NEU0: %r' % (neu0.iden,))
#print('NEU1: %r' % (neu1.iden,))
#print('NEU2: %r' % (neu2.iden,))
#print('NEU3: %r' % (neu3.iden,))
link0 = neu0.listen('tcp://127.0.0.1:0/neuron')
link1 = neu1.listen('tcp://127.0.0.1:0/neuron')
link2 = neu2.listen('tcp://127.0.0.1:0/neuron')
link3 = neu3.listen('tcp://127.0.0.1:0/neuron')
full0 = self.getTestWait(neu0,6,'neu:link:init')
full1 = self.getTestWait(neu1,6,'neu:link:init')
full2 = self.getTestWait(neu2,4,'neu:link:init')
full3 = self.getTestWait(neu3,2,'neu:link:init')
# connect neu0->neu1
wait0 = self.getTestWait(neu0,2,'neu:link:init')
wait1 = self.getTestWait(neu1,2,'neu:link:init')
neu0.connect('tcp://127.0.0.1:0/', port=link1[1]['port'] )
wait0.wait()
wait1.wait()
# connect neu0->neu2
wait0 = self.getTestWait(neu0,2,'neu:link:init')
wait2 = self.getTestWait(neu2,2,'neu:link:init')
neu0.connect('tcp://127.0.0.1:0/', port=link2[1]['port'] )
wait0.wait()
wait2.wait()
# connect neu2->neu3
wait2 = self.getTestWait(neu2,2,'neu:link:init')
wait3 = self.getTestWait(neu3,2,'neu:link:init')
neu2.connect('tcp://127.0.0.1:0/', port=link3[1]['port'] )
wait2.wait()
wait3.wait()
# make sure all neu:link:init mesgs have been consumed
full0.wait()
full1.wait()
full2.wait()
full3.wait()
return Net(locals())
def finiNeuNet(self, net):
w0 = self.getTestWait(net.neu0,1,'test:fini')
w1 = self.getTestWait(net.neu1,1,'test:fini')
w2 = self.getTestWait(net.neu2,1,'test:fini')
w3 = self.getTestWait(net.neu3,1,'test:fini')
net.neu0.storm('test:fini')
w0.wait()
w1.wait()
w2.wait()
w3.wait()
net.neu0.fini()
net.neu1.fini()
net.neu2.fini()
net.neu3.fini()
def newp_neuron_basics(self):
net = self.initNeuNet()
net.neu3.share('foo',FooBar())
dend = s_neuron.openlink(net.link0)
path = '%s/foo' % (net.neu3.iden,)
prox = dend.open(path)
task = ('foo',(30,),{})
job = dend.call( net.neu3.iden, 'foo', task )
self.assertIsNotNone(job)
self.assertEqual( dend.sync(job), 50)
self.assertEqual( prox.foo(11), 31 )
data = {}
def ondone(j):
data['ret'] = s_async.jobret(j)
job = prox.foo(12, ondone=ondone)
self.assertEqual( dend.sync(job), 32 )
self.assertEqual( data.get('ret'), 32 )
self.finiNeuNet(net)
def test_neuron_tree(self):
net = self.initNeuNet()
def flat(x):
ret = set()
todo = [ t for t in x ]
while todo:
n = todo.pop()
ret.add(n[0])
for k in n[1]:
todo.append(k)
return ret
# ensure each can build a tree to
# all the others...
iall = set([
net.neu0.iden,
net.neu1.iden,
net.neu2.iden,
net.neu3.iden,
])
set0 = flat( net.neu0.getPathTrees() )
set1 = flat( net.neu1.getPathTrees() )
set2 = flat( net.neu2.getPathTrees() )
set3 = flat( net.neu3.getPathTrees() )
self.assertEqual( len( iall & set0 ), 3 )
self.assertEqual( len( iall & set1 ), 3 )
self.assertEqual( len( iall & set2 ), 3 )
self.assertEqual( len( iall & set3 ), 3 )
self.finiNeuNet(net)
def test_neuron_storm(self):
net = self.initNeuNet()
w2 = self.getTestWait(net.neu2, 1, 'woot:baz')
w3 = self.getTestWait(net.neu3, 1, 'woot:baz')
net.neu1.storm('woot:baz', faz=30)
w2.wait()
w3.wait()
self.finiNeuNet(net)
def newp_neuron_ping(self):
net = self.initNeuNet()
dend = s_neuron.openlink(net.link0)
path = '%s/neuron' % (net.neu2.iden,)
prox1 = dend.open(path)
pong = prox1.ping()
self.assertIsNotNone(pong)
self.assertEqual( pong.get('iden'), net.neu2.iden )
self.finiNeuNet(net)
def newp_dendrite_share(self):
net = self.initNeuNet()
dend0 = s_neuron.openlink(net.link0)
dend3 = s_neuron.openlink(net.link3)
w0 = self.getTestWait(net.neu0, 2, 'neu:dend:init')
dend3.share('foobar0', FooBar(), tags=('foo.bar.0',))
dend3.share('foobar1', FooBar(), tags=('foo.bar.1',))
w0.wait()
self.assertIsNotNone( dend0.getDendByIden('foobar0') )
self.assertIsNotNone( dend0.getDendByIden('foobar1') )
bytag = dend0.getDendsByTag('foo.bar')
self.assertEqual( len(bytag), 2 )
bytag = dend0.getDendsByTag('foo.bar.0')
self.assertEqual( len(bytag), 1 )
def test_neuron_usepki_basics(self):
net = self.initNeuNet(usepki=True)
net.neu3.share('foo',FooBar())
dend = s_neuron.openlink(net.link0)
path = '%s/foo' % (net.neu3.iden,)
prox = dend.open(path)
task = ('foo',(30,),{})
job = dend.call( net.neu3.iden, 'foo', task )
self.assertIsNotNone(job)
self.assertEqual( dend.sync(job), 50)
self.assertEqual( prox.foo(11), 31 )
data = {}
def ondone(j):
data['ret'] = s_async.jobret(j)
job = prox.foo(12, ondone=ondone)
self.assertEqual( dend.sync(job), 32 )
self.assertEqual( data.get('ret'), 32 )
self.finiNeuNet(net)
class TempDisabled:
def getNeuNet(self):
env = TestEnv()
dmon = s_daemon.Daemon()
env.add('dmon',dmon,fini=True)
link = dmon.listen('tcp://127.0.0.1:0/')
neu0 = s_neuron.Neuron()
env.add('neu0', s_neuron.Neuron(), fini=True)
env.add('neu1', s_neuron.Neuron(), fini=True)
env.add('neu2', s_neuron.Neuron(), fini=True)
env.dmon.share('neu0', env.neu0)
env.dmon.share('neu1', env.neu1)
env.dmon.share('neu2', env.neu2)
#dmon.onfini( neu0.fini )
#dmon.onfini( neu1.fini )
#dmon.onfini( neu2.fini )
#dmon.share('neu0',neu0)
#dmon.share('neu1',neu1)
#dmon.share('neu2',neu2)
port = link[1].get('port')
env.add('neup0', s_telepath.openurl('tcp://127.0.0.1/neu0', port=port), fini=True)
env.add('neup1', s_telepath.openurl('tcp://127.0.0.1/neu1', port=port), fini=True)
env.add('neup2', s_telepath.openurl('tcp://127.0.0.1/neu2', port=port), fini=True)
wai0 = TestWaiter(env.neu1, 1, 'neu:link:init')
env.neu0.link( env.neup1 )
wai0.wait()
wai0 = TestWaiter(env.neu1, 1, 'neu:link:init')
env.neu0.link( env.neup2 )
wai0.wait()
return env
def test_neuron_route(self):
neu0 = s_neuron.Neuron()
neu1 = s_neuron.Neuron()
neu2 = s_neuron.Neuron()
neu0.link( neu1 )
neu0.link( neu2 )
wait0 = TestWaiter(neu1, 1, 'woot')
neu2.route( neu1.getIden(), 'woot', x=10)
events = wait0.wait()
self.assertEqual( events[0][1].get('x'), 10 )
neu0.fini()
neu1.fini()
neu2.fini()
def test_neuron_storm(self):
neu0 = s_neuron.Neuron()
neu1 = s_neuron.Neuron()
neu2 = s_neuron.Neuron()
neu0.link( neu1 )
neu0.link( neu2 )
wai0 = TestWaiter(neu1, 1, 'woot')
neu2.storm('woot', x=10)
events = wai0.wait()
self.assertEqual( events[0][1].get('x'), 10 )
neu0.fini()
neu1.fini()
neu2.fini()
def test_neuron_ping(self):
env = self.getNeuNet()
dend = s_neuron.Dendrite(env.neup1)
info = dend.ping(env.neu2.getIden())
self.assertIsNotNone( info.get('shared') )
env.fini()
def test_neuron_call(self):
env = self.getNeuNet()
#dmon,neu,pxy = self.getNeuNet()
env.neu2.share('foo',FooBar())
dend = s_neuron.Dendrite(env.neup1)
path = '%s/foo' % (env.neup2.getIden(),)
foo = dend.open(path)
self.assertEqual( foo.foo(10), 30 )
e = threading.Event()
data = {}
def jobdone(job):
data['ret'] = s_async.jobret(job)
e.set()
foo.foo(20, onfini=jobdone)
e.wait(timeout=3)
self.assertEqual( data.get('ret'), 40 )
envi.fini()
def test_neuron_sess(self):
env = self.getNeuNet()
#dmon,neu,pxy = self.getNeuNet()
iden2 = env.neu2.getIden()
env.neu2.share('foo',FooBar())
dend0 = s_neuron.Client(env.neup1)
dend1 = s_neuron.Client(env.neup1)
self.assertIsNotNone( dend0.getSidByIden(iden2) )
foo0 = dend0.open((iden2,None),'foo')
foo1 = dend1.open((iden2,None),'foo')
self.assertIsNone( foo0.getsess('hehe') )
self.assertIsNone( foo1.getsess('hehe') )
foo0.setsess('hehe','lulz')
self.assertIsNone( foo1.getsess('hehe') )
self.assertEqual( foo0.getsess('hehe'), 'lulz' )
env.fini()
def test_neuron_dend_find(self):
env = self.getNeuNet()
foo = FooBar()
dend0 = s_neuron.Client(env.neup1)
env.fini()
def test_neuron_usepki_call(self):
env = self.getNeuNet(usepki=True)
#dmon,neu,pxy = self.getNeuNet()
env.neu2.share('foo',FooBar())
dend = s_neuron.Dendrite(env.neup1)
path = '%s/foo' % (env.neup2.getIden(),)
foo = dend.open(path)
self.assertEqual( foo.foo(10), 30 )
e = threading.Event()
data = {}
def jobdone(job):
data['ret'] = s_async.jobret(job)
e.set()
foo.foo(20, onfini=jobdone)
e.wait(timeout=3)
self.assertEqual( data.get('ret'), 40 )
envi.fini()
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for generating random numbers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_stateful_random_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util.tf_export import tf_export
# A seed for random ops (stateful and stateless) will always be 1024
# bits, all of which will be sent to the C++ code. The actual C++
# implementation of some algorithms may only use a lower part of the bits.
MAX_INT64 = 2**63 - 1
MIN_INT64 = -(2**63)
UINT64_SPAN = 2**64
# 'Variable' doesn't support uint32 or uint64 yet (due to reasons explained in
# b/111604096 and cl/171681867), so I use signed int here. I choose int64
# instead of int32 here because `VarHandleOp` doesn't support int32 on GPU.
SEED_TYPE = "int64"
SEED_MIN = MIN_INT64
SEED_MAX = MAX_INT64
SEED_UINT_SPAN = UINT64_SPAN
SEED_TYPE_BITS = 64
SEED_BIT_MASK = 0xFFFFFFFFFFFFFFFF
SEED_SIZE = 16 # in units of SEED_TYPE
STATE_TYPE = SEED_TYPE
ALGORITHM_TYPE = STATE_TYPE
RNG_ALG_PHILOX = 1
RNG_ALG_THREEFRY = 2
DEFAULT_ALGORITHM = RNG_ALG_PHILOX
PHILOX_STATE_SIZE = 3
THREEFRY_STATE_SIZE = 2
def non_deterministic_ints(shape, dtype=dtypes.int64):
"""Non-deterministically generates some integers.
This op may use some OS-provided source of non-determinism (e.g. an RNG), so
each execution will give different results.
Args:
shape: the shape of the result.
dtype: (optional) the dtype of the result.
Returns:
a tensor whose element values are non-deterministically chosen.
"""
return gen_stateful_random_ops.non_deterministic_ints(
shape=shape, dtype=dtype)
def _uint_to_int(n):
if n > SEED_MAX:
n = n - SEED_UINT_SPAN
return n
def _make_1d_state(state_size, seed):
"""Makes a 1-D RNG state.
Args:
state_size: an integer.
seed: an integer or 1-D tensor.
Returns:
a 1-D tensor of shape [state_size] and dtype STATE_TYPE.
"""
int_types = (int,) if sys.version_info >= (3, 0) else (int, long)
if isinstance(seed, int_types):
# chop the Python integer (infinite precision) into chunks of SEED_TYPE
ls = []
for _ in range(state_size):
ls.append(seed & SEED_BIT_MASK)
seed >>= SEED_TYPE_BITS
seed = ls
# to avoid overflow error from np.asarray
seed = list(map(_uint_to_int, seed))
seed = np.asarray(seed, dtype=STATE_TYPE)
if len(seed.shape) != 1:
raise ValueError(
"seed should only have one dimension; got shape: %s" % seed.shape)
seed = seed[0:state_size]
# Padding with zeros on the *left* if too short. Padding on the right would
# cause a small seed to be used as the "counter" while the "key" is always
# zero (for counter-based RNG algorithms), because in the current memory
# layout counter is stored before key. In such a situation two RNGs with
# two different small seeds may generate overlapping outputs.
seed_size = seed.shape[0]
if seed_size < state_size:
seed = np.pad(
seed, [(state_size - seed_size, 0)],
mode="constant",
constant_values=0)
assert seed.shape == (state_size,), "Wrong seed.shape: %s" % seed.shape
return seed
def _get_state_size(alg):
if alg == RNG_ALG_PHILOX:
return PHILOX_STATE_SIZE
elif alg == RNG_ALG_THREEFRY:
return THREEFRY_STATE_SIZE
else:
raise ValueError("Unsupported algorithm id: %s" % alg)
def _make_state_from_seed(seed, alg):
return _make_1d_state(_get_state_size(alg), seed)
@tf_export("random.experimental.create_rng_state")
def create_rng_state(seed, algorithm):
"""Creates a RNG state.
Args:
seed: an integer or 1-D tensor.
algorithm: an integer representing the RNG algorithm.
Returns:
a 1-D tensor whose size depends on the algorithm.
"""
return _make_state_from_seed(seed, algorithm)
def _shape_tensor(shape):
"""Convert to an int32 or int64 tensor, defaulting to int64 if empty."""
if isinstance(shape, (tuple, list)) and not shape:
dtype = dtypes.int64
else:
dtype = None
return ops.convert_to_tensor(shape, dtype=dtype, name="shape")
def _convert_to_state_tensor(t):
if isinstance(t, list):
# to avoid out-of-range error from ops.convert_to_tensor
t = list(map(_uint_to_int, t))
return ops.convert_to_tensor(t, dtype=STATE_TYPE)
@tf_export("random.experimental.Generator")
class Generator(tracking.AutoTrackable):
"""Random-number generator.
It uses Variable to manage its internal state, and allows choosing an
Random-Number-Generation (RNG) algorithm.
CPU, GPU and TPU with the same algorithm and seed will generate the same
integer random numbers. Float-point results (such as the output of `normal`)
may have small numerical discrepancies between CPU and GPU.
"""
def __init__(self, copy_from=None, state=None, alg=None):
"""Creates a generator.
The new generator will be initialized by one of the following ways, with
decreasing precedence:
(1) If `copy_from` is not None, the new generator is initialized by copying
information from another generator.
(3) If `state` and `alg` are not None (they must be set together), the new
generator is initialized by a state.
Args:
copy_from: a generator to be copied from.
state: a vector of dtype STATE_TYPE representing the initial state of the
RNG, whose length and semantics are algorithm-specific.
alg: the RNG algorithm. Possible values are RNG_ALG_PHILOX for the
Philox algorithm and RNG_ALG_THREEFRY for the ThreeFry
algorithm (see paper 'Parallel Random Numbers: As Easy as 1, 2, 3'
[https://www.thesalmons.org/john/random123/papers/random123sc11.pdf]).
"""
if copy_from is not None:
# All other arguments should be None
assert (alg or state) is None
self._state_var = variables.Variable(copy_from.state, dtype=STATE_TYPE,
trainable=False)
self._alg_var = copy_from.algorithm
else:
assert alg is not None and state is not None
state = _convert_to_state_tensor(state)
state.shape.assert_is_compatible_with([_get_state_size(alg)])
self._state_var = variables.Variable(state, dtype=STATE_TYPE,
trainable=False)
self._alg_var = alg
@classmethod
def from_state(cls, state, alg):
"""Creates a generator from a state.
See `__init__` for description of `state` and `alg`.
Args:
state: the new state.
alg: the RNG algorithm.
Returns:
The new generator.
"""
return cls(alg=alg, state=state)
@classmethod
def from_seed(cls, seed, alg=None):
"""Creates a generator from a seed.
A seed is a 1024-bit unsigned integer represented either as a Python
integer or a vector of integers. Seeds shorter than 1024-bit will be
padded. The padding, the internal structure of a seed and the way a seed
is converted to a state are all opaque (unspecified). The only semantics
specification of seeds is that two different seeds are likely to produce
two independent generators (but no guarantee).
Args:
seed: the seed for the RNG.
alg: (optional) the RNG algorithm. If None, it will be auto-selected. See
`__init__` for its possible values.
Returns:
The new generator.
"""
if alg is None:
# TODO(wangpeng): more sophisticated algorithm selection
alg = DEFAULT_ALGORITHM
state = create_rng_state(seed, alg)
return cls(state=state, alg=alg)
@classmethod
def from_non_deterministic_state(cls, alg=None):
"""Creates a generator by non-deterministically initializing its state.
The source of the non-determinism will be platform- and time-dependent.
Args:
alg: (optional) the RNG algorithm. If None, it will be auto-selected. See
`__init__` for its possible values.
Returns:
The new generator.
"""
if alg is None:
# TODO(wangpeng): more sophisticated algorithm selection
alg = DEFAULT_ALGORITHM
state = non_deterministic_ints(shape=[_get_state_size(alg)],
dtype=SEED_TYPE)
return cls(state=state, alg=alg)
@classmethod
def from_key_counter(cls, key, counter, alg):
"""Creates a generator from a key and a counter.
This constructor only applies if the algorithm is a counter-based algorithm.
See method `key` for the meaning of "key" and "counter".
Args:
key: the key for the RNG, a scalar of type STATE_TYPE.
counter: a vector of dtype STATE_TYPE representing the initial counter for
the RNG, whose length is algorithm-specific.,
alg: the RNG algorithm. If None, it will be auto-selected. See
`__init__` for its possible values.
Returns:
The new generator.
"""
counter = _convert_to_state_tensor(counter)
key = _convert_to_state_tensor(key)
counter.shape.assert_is_compatible_with([_get_state_size(alg) - 1])
key.shape.assert_is_compatible_with([])
key = array_ops.reshape(key, [1])
state = array_ops.concat([counter, key], 0)
return cls(state=state, alg=alg)
def reset(self, state):
"""Resets the generator by a new state.
See `__init__` for the meaning of "state".
Args:
state: the new state.
"""
state = _convert_to_state_tensor(state)
state.shape.assert_is_compatible_with([_get_state_size(self.algorithm)])
self._state_var.assign(state)
def reset_from_seed(self, seed):
"""Resets the generator by a new seed.
See `from_seed` for the meaning of "seed".
Args:
seed: the new seed.
"""
state = create_rng_state(seed, self.algorithm)
self._state_var.assign(state)
def reset_from_key_counter(self, key, counter):
"""Resets the generator by a new key-counter pair.
See `from_key_counter` for the meaning of "key" and "counter".
Args:
key: the new key.
counter: the new counter.
"""
counter = _convert_to_state_tensor(counter)
key = _convert_to_state_tensor(key)
counter.shape.assert_is_compatible_with(
[_get_state_size(self.algorithm) - 1])
key.shape.assert_is_compatible_with([])
key = array_ops.reshape(key, [1])
state = array_ops.concat([counter, key], 0)
self._state_var.assign(state)
@property
def state(self):
"""The internal state of the RNG."""
return self._state_var
@property
def algorithm(self):
"""The RNG algorithm."""
return self._alg_var
def _standard_normal(self, shape, dtype):
return gen_stateful_random_ops.stateful_standard_normal_v2(
self.state.handle, self.algorithm, shape, dtype=dtype)
@property
def key(self):
"""The 'key' part of the state of a counter-based RNG.
For a counter-base RNG algorithm such as Philox and ThreeFry (as
described in paper 'Parallel Random Numbers: As Easy as 1, 2, 3'
[https://www.thesalmons.org/john/random123/papers/random123sc11.pdf]),
the RNG state consists of two parts: counter and key. The output is
generated via the formula: output=hash(key, counter), i.e. a hashing of
the counter parametrized by the key. Two RNGs with two different keys can
be thought as generating two independent random-number streams (a stream
is formed by increasing the counter).
Returns:
A scalar which is the 'key' part of the state, if the RNG algorithm is
counter-based; otherwise it raises a ValueError.
"""
alg = self.algorithm
if alg == RNG_ALG_PHILOX or alg == RNG_ALG_THREEFRY:
return self._state_var[-1]
else:
raise ValueError("Unsupported algorithm id: %s" % alg)
def skip(self, delta):
"""Advance the counter of a counter-based RNG.
Args:
delta: the amount of advancement. The state of the RNG after
`skip(n)` will be the same as that after `normal([n])`
(or any other distribution). The actual increment added to the
counter is an unspecified implementation detail.
"""
gen_stateful_random_ops.rng_skip(self.state.handle, self.algorithm, delta)
# The following functions return a tensor and as a side effect update
# self._state_var.
def normal(self, shape, mean=0.0, stddev=1.0, dtype=dtypes.float32,
name=None):
"""Outputs random values from a normal distribution.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output
tensor.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal
distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard
deviation of the normal distribution.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random normal values.
"""
with ops.name_scope(name, "stateful_normal", [shape, mean, stddev]) as name:
shape = _shape_tensor(shape)
mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
rnd = self._standard_normal(shape, dtype=dtype)
return math_ops.add(rnd * stddev, mean, name=name)
def _truncated_normal(self, shape, dtype):
return gen_stateful_random_ops.stateful_truncated_normal(
self.state.handle, self.algorithm, shape, dtype=dtype)
def truncated_normal(self, shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
name=None):
"""Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than
2 standard deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output
tensor.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard
deviation of the normal distribution, before truncation.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal
values.
"""
with ops.name_scope(
name, "truncated_normal", [shape, mean, stddev]) as name:
shape_tensor = _shape_tensor(shape)
mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
rnd = self._truncated_normal(shape_tensor, dtype=dtype)
mul = rnd * stddev_tensor
return math_ops.add(mul, mean_tensor, name=name)
def _uniform(self, shape, dtype):
return gen_stateful_random_ops.stateful_uniform(
self.state.handle, self.algorithm, shape=shape, dtype=dtype)
def uniform(self, shape, minval=0, maxval=None,
dtype=dtypes.float32, name=None):
"""Outputs random values from a uniform distribution.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range, while
the upper bound `maxval` is excluded. (For float numbers especially
low-precision types like bfloat16, because of
rounding, the result may sometimes include `maxval`.)
For floats, the default range is `[0, 1)`. For ints, at least `maxval` must
be specified explicitly.
In the integer case, the random integers are slightly biased unless
`maxval - minval` is an exact power of two. The bias is small for values of
`maxval - minval` significantly smaller than the range of the output (either
`2**32` or `2**64`).
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output
tensor.
minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on
the range of random values to generate. Defaults to 0.
maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on
the range of random values to generate. Defaults to 1 if `dtype` is
floating point.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random uniform values.
Raises:
ValueError: If `dtype` is integral and `maxval` is not specified.
"""
dtype = dtypes.as_dtype(dtype)
if maxval is None:
if dtype.is_integer:
raise ValueError("Must specify maxval for integer dtype %r" % dtype)
maxval = 1
with ops.name_scope(name, "stateful_uniform",
[shape, minval, maxval]) as name:
shape = _shape_tensor(shape)
minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max")
if dtype.is_integer:
return gen_stateful_random_ops.stateful_uniform_int(
self.state.handle, self.algorithm, shape=shape,
minval=minval, maxval=maxval, name=name)
else:
rnd = self._uniform(shape=shape, dtype=dtype)
return math_ops.add(rnd * (maxval - minval), minval, name=name)
def uniform_full_int(self, shape, dtype=dtypes.uint64, name=None):
"""Uniform distribution on an integer type's entire range.
The other method `uniform` only covers the range [minval, maxval), which
cannot be `dtype`'s full range because `maxval` is of type `dtype`.
Args:
shape: the shape of the output.
dtype: (optional) the integer type, default to uint64.
name: (optional) the name of the node.
Returns:
A tensor of random numbers of the required shape.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope(name, "stateful_uniform_full_int",
[shape]) as name:
shape = _shape_tensor(shape)
return gen_stateful_random_ops.stateful_uniform_full_int(
self.state.handle, self.algorithm, shape=shape,
dtype=dtype, name=name)
def binomial(self, shape, counts, probs, dtype=dtypes.int32, name=None):
"""Outputs random values from a binomial distribution.
The generated values follow a binomial distribution with specified count and
probability of success parameters.
Example:
```python
counts = [10., 20.]
# Probability of success.
probs = [0.8, 0.9]
rng = tf.random.experimental.Generator(seed=234)
binomial_samples = rng.binomial(shape=[2], counts=counts, probs=probs)
```
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output
tensor.
counts: A 0/1-D Tensor or Python value`. The counts of the binomial
distribution.
probs: A 0/1-D Tensor or Python value`. The probability of success for the
binomial distribution.
dtype: The type of the output. Default: tf.int32
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random binomial values.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope(name, "binomial", [shape, counts, probs]) as name:
counts = ops.convert_to_tensor(counts, name="counts")
probs = ops.convert_to_tensor(probs, name="probs")
shape_tensor = _shape_tensor(shape)
return gen_stateful_random_ops.stateful_random_binomial(
self.state.handle,
self.algorithm,
shape=shape_tensor,
counts=counts,
probs=probs,
dtype=dtype,
name=name)
# TODO(wangpeng): implement other distributions
def _make_int64_keys(self, shape=()):
# New independent keys are generated via
# `new_key[i] = hash(old_key, counter+i)`, which is exactly what
# `uniform_full_int(dtype=int64)` does for PhiloxRandom_64_128_128 and
# ThreeFry_64_64_64.
return self.uniform_full_int(shape=shape, dtype=dtypes.int64)
def make_seeds(self, count=1):
"""Generates seeds for stateless random ops.
For example:
```python
seeds = get_global_generator().make_seeds(count=10)
for i in range(10):
seed = seeds[:, i]
numbers = stateless_random_normal(shape=[2, 3], seed=seed)
...
```
Args:
count: the number of seed pairs (note that stateless random ops need a
pair of seeds to invoke).
Returns:
A tensor of shape [2, count] and dtype int64.
"""
alg = self.algorithm
if alg == RNG_ALG_PHILOX or alg == RNG_ALG_THREEFRY:
keys = self._make_int64_keys(shape=[count])
# The two seeds for stateless random ops don't have individual semantics
# and are scrambled together, so setting one to zero is fine.
zeros = array_ops.zeros_like(keys)
return array_ops.stack([keys, zeros])
else:
raise ValueError("Unsupported algorithm id: %s" % alg)
def split(self, count=1):
"""Returns a list of independent `Generator` objects.
Two generators are independent of each other in the sense that the
random-number streams they generate don't have statistically detectable
correlations. The new generators are also independent of the old one.
The old generator's state will be changed (like other random-number
generating methods), so two calls of `split` will return different
new generators.
For example:
```python
gens = get_global_generator().split(count=10)
for gen in gens:
numbers = gen.normal(shape=[2, 3])
# ...
gens2 = get_global_generator().split(count=10)
# gens2 will be different from gens
```
The new generators will be put on the current device (possible different
from the old generator's), for example:
```python
with tf.device("/device:CPU:0"):
gen = Generator(seed=1234) # gen is on CPU
with tf.device("/device:GPU:0"):
gens = gen.split(count=10) # gens are on GPU
```
Args:
count: the number of generators to return.
Returns:
A list (length `count`) of `Generator` objects independent of each other.
The new generators have the same RNG algorithm as the old one.
"""
def _key_to_state(alg, key):
# Padding with zeros on the left. The zeros will be the counter.
return [0] * (_get_state_size(alg) - 1) + [key]
alg = self.algorithm
if alg == RNG_ALG_PHILOX or alg == RNG_ALG_THREEFRY:
keys = self._make_int64_keys(shape=[count])
return [Generator(state=_key_to_state(alg, key), alg=alg)
for key in keys.numpy()]
else:
raise ValueError("Unsupported algorithm id: %s" % alg)
# It's not safe to create TF ops before `init_google` is called, so this is
# initialized to None and get a value the first time `get_global_generator` is
# called.
global_generator = None
@tf_export("random.experimental.get_global_generator")
def get_global_generator():
global global_generator
if global_generator is None:
global_generator = Generator.from_non_deterministic_state()
return global_generator
@tf_export("random.experimental.set_global_generator")
def set_global_generator(generator):
"""Replaces the global generator with another `Generator` object.
This function creates a new Generator object (and the Variable object within),
which does not work well with tf.function because (1) tf.function puts
restrictions on Variable creation thus reset_global_generator can't be freely
used inside tf.function; (2) redirecting a global variable to
a new object is problematic with tf.function because the old object may be
captured by a 'tf.function'ed function and still be used by it.
A 'tf.function'ed function only keeps weak references to variables,
so deleting a variable and then calling that function again may raise an
error, as demonstrated by
random_test.py/RandomTest.testResetGlobalGeneratorBadWithDefun .
Args:
generator: the new `Generator` object.
"""
global global_generator
global_generator = generator
| |
#!/usr/bin/env python2
'''
Unit tests for yedit
'''
import unittest
import os
# Removing invalid variable names for tests so that I can
# keep them brief
# pylint: disable=invalid-name,no-name-in-module
# Disable import-error b/c our libraries aren't loaded in jenkins
# pylint: disable=import-error
from yedit import Yedit
from yedit import YeditException
# pylint: disable=too-many-public-methods
# Silly pylint, moar tests!
class YeditTest(unittest.TestCase):
'''
Test class for yedit
'''
data = {'a': 'a',
'b': {'c': {'d': [{'e': 'x'}, 'f', 'g']}},
}
filename = 'yedit_test.yml'
def setUp(self):
''' setup method will create a file and set to known configuration '''
yed = Yedit(YeditTest.filename)
yed.yaml_dict = YeditTest.data
yed.write()
def test_load(self):
''' Testing a get '''
yed = Yedit('yedit_test.yml')
self.assertEqual(yed.yaml_dict, self.data)
def test_write(self):
''' Testing a simple write '''
yed = Yedit('yedit_test.yml')
yed.put('key1', 1)
yed.write()
self.assertTrue(yed.yaml_dict.has_key('key1'))
self.assertEqual(yed.yaml_dict['key1'], 1)
def test_write_x_y_z(self):
'''Testing a write of multilayer key'''
yed = Yedit('yedit_test.yml')
yed.put('x.y.z', 'modified')
yed.write()
yed.load()
self.assertEqual(yed.get('x.y.z'), 'modified')
def test_delete_a(self):
'''Testing a simple delete '''
yed = Yedit('yedit_test.yml')
yed.delete('a')
yed.write()
yed.load()
self.assertTrue(not yed.yaml_dict.has_key('a'))
def test_delete_b_c(self):
'''Testing delete of layered key '''
yed = Yedit('yedit_test.yml', separator=':')
yed.delete('b:c')
yed.write()
yed.load()
self.assertTrue(yed.yaml_dict.has_key('b'))
self.assertFalse(yed.yaml_dict['b'].has_key('c'))
def test_create(self):
'''Testing a create '''
os.unlink(YeditTest.filename)
yed = Yedit('yedit_test.yml')
yed.create('foo', 'bar')
yed.write()
yed.load()
self.assertTrue(yed.yaml_dict.has_key('foo'))
self.assertTrue(yed.yaml_dict['foo'] == 'bar')
def test_create_content(self):
'''Testing a create with content '''
content = {"foo": "bar"}
yed = Yedit("yedit_test.yml", content)
yed.write()
yed.load()
self.assertTrue(yed.yaml_dict.has_key('foo'))
self.assertTrue(yed.yaml_dict['foo'], 'bar')
def test_array_insert(self):
'''Testing a create with content '''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', 'inject')
self.assertTrue(yed.get('b:c:d[0]') == 'inject')
def test_array_insert_first_index(self):
'''Testing a create with content '''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', 'inject')
self.assertTrue(yed.get('b:c:d[1]') == 'f')
def test_array_insert_second_index(self):
'''Testing a create with content '''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', 'inject')
self.assertTrue(yed.get('b:c:d[2]') == 'g')
def test_dict_array_dict_access(self):
'''Testing a create with content'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
self.assertTrue(yed.get('b:c:d[0]:[0]:x:y') == 'inject')
def test_dict_array_dict_replace(self):
'''Testing multilevel delete'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
yed.put('b:c:d[0]:[0]:x:y', 'testing')
self.assertTrue(yed.yaml_dict.has_key('b'))
self.assertTrue(yed.yaml_dict['b'].has_key('c'))
self.assertTrue(yed.yaml_dict['b']['c'].has_key('d'))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
self.assertTrue(yed.yaml_dict['b']['c']['d'][0][0]['x'].has_key('y'))
self.assertTrue(yed.yaml_dict['b']['c']['d'][0][0]['x']['y'], 'testing')
def test_dict_array_dict_remove(self):
'''Testing multilevel delete'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
yed.delete('b:c:d[0]:[0]:x:y')
self.assertTrue(yed.yaml_dict.has_key('b'))
self.assertTrue(yed.yaml_dict['b'].has_key('c'))
self.assertTrue(yed.yaml_dict['b']['c'].has_key('d'))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
self.assertFalse(yed.yaml_dict['b']['c']['d'][0][0]['x'].has_key('y'))
def test_key_exists_in_dict(self):
'''Testing exist in dict'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
self.assertTrue(yed.exists('b:c', 'd'))
def test_key_exists_in_list(self):
'''Testing exist in list'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
self.assertTrue(yed.exists('b:c:d', [{'x': {'y': 'inject'}}]))
self.assertFalse(yed.exists('b:c:d', [{'x': {'y': 'test'}}]))
def test_update_to_list_with_index(self):
'''Testing update to list with index'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.update('x:y:z', [5, 6], index=2)
self.assertTrue(yed.get('x:y:z') == [1, 2, [5, 6]])
self.assertTrue(yed.exists('x:y:z', [5, 6]))
self.assertFalse(yed.exists('x:y:z', 4))
def test_update_to_list_with_curr_value(self):
'''Testing update to list with index'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.update('x:y:z', [5, 6], curr_value=3)
self.assertTrue(yed.get('x:y:z') == [1, 2, [5, 6]])
self.assertTrue(yed.exists('x:y:z', [5, 6]))
self.assertFalse(yed.exists('x:y:z', 4))
def test_update_to_list(self):
'''Testing update to list'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.update('x:y:z', [5, 6])
self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6]])
self.assertTrue(yed.exists('x:y:z', [5, 6]))
self.assertFalse(yed.exists('x:y:z', 4))
def test_append_twice_to_list(self):
'''Testing append to list'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.append('x:y:z', [5, 6])
yed.append('x:y:z', [5, 6])
self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6], [5, 6]])
# pylint: disable=maybe-no-member
self.assertTrue(2 == yed.get('x:y:z').count([5, 6]))
self.assertFalse(yed.exists('x:y:z', 4))
def test_add_item_to_dict(self):
'''Testing update to dict'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', {'a': 1, 'b': 2})
yed.update('x:y:z', {'c': 3, 'd': 4})
self.assertTrue(yed.get('x:y:z') == {'a': 1, 'b': 2, 'c': 3, 'd': 4})
self.assertTrue(yed.exists('x:y:z', {'c': 3}))
def test_first_level_dict_with_none_value(self):
'''test dict value with none value'''
yed = Yedit(content={'a': None}, separator=":")
yed.put('a:b:c', 'test')
self.assertTrue(yed.get('a:b:c') == 'test')
self.assertTrue(yed.get('a:b'), {'c': 'test'})
def test_adding_yaml_variable(self):
'''test dict value with none value'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('z:y', '{{test}}')
self.assertTrue(yed.get('z:y') == '{{test}}')
def test_keys_with_underscore(self):
'''test dict value with none value'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('z_:y_y', {'test': '{{test}}'})
self.assertTrue(yed.get('z_:y_y') == {'test': '{{test}}'})
def test_first_level_array_update(self):
'''test update on top level array'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}], separator=':')
yed.update('', {'c': 4})
self.assertTrue({'c': 4} in yed.get(''))
def test_first_level_array_delete(self):
'''test remove top level key'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}])
yed.delete('')
self.assertTrue({'b': 3} not in yed.get(''))
def test_first_level_array_get(self):
'''test dict value with none value'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}])
yed.get('')
self.assertTrue([{'a': 1}, {'b': 2}, {'b': 3}] == yed.yaml_dict)
def test_pop_list_item(self):
'''test dict value with none value'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}], separator=':')
yed.pop('', {'b': 2})
self.assertTrue([{'a': 1}, {'b': 3}] == yed.yaml_dict)
def test_pop_list_item_2(self):
'''test dict value with none value'''
z = range(10)
yed = Yedit(content=z, separator=':')
yed.pop('', 5)
z.pop(5)
self.assertTrue(z == yed.yaml_dict)
def test_pop_dict_key(self):
'''test dict value with none value'''
yed = Yedit(content={'a': {'b': {'c': 1, 'd': 2}}}, separator='#')
yed.pop('a#b', 'c')
self.assertTrue({'a': {'b': {'d': 2}}} == yed.yaml_dict)
def test_accessing_path_with_unexpected_objects(self):
'''test providing source path objects that differ from current object state'''
yed = Yedit(content={'a': {'b': {'c': ['d', 'e']}}})
with self.assertRaises(YeditException):
yed.put('a.b.c.d', 'x')
def test_creating_new_objects_with_embedded_list(self):
'''test creating new objects with an embedded list in the creation path'''
yed = Yedit(content={'a': {'b': 12}})
with self.assertRaises(YeditException):
yed.put('new.stuff[0].here', 'value')
def test_creating_new_objects_with_trailing_list(self):
'''test creating new object(s) where the final piece is a list'''
yed = Yedit(content={'a': {'b': 12}})
with self.assertRaises(YeditException):
yed.put('new.stuff.here[0]', 'item')
def tearDown(self):
'''TearDown method'''
os.unlink(YeditTest.filename)
if __name__ == "__main__":
unittest.main()
| |
"""
Functions for acting on a axis of an array.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
def axis_slice(a, start=None, stop=None, step=None, axis=-1):
"""Take a slice along axis 'axis' from 'a'.
Parameters
----------
a : numpy.ndarray
The array to be sliced.
start, stop, step : int or None
The slice parameters.
axis : int, optional
The axis of `a` to be sliced.
Examples
--------
>>> a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> axis_slice(a, start=0, stop=1, axis=1)
array([[1],
[4],
[7]])
>>> axis_slice(a, start=1, axis=0)
array([[4, 5, 6],
[7, 8, 9]])
Notes
-----
The keyword arguments start, stop and step are used by calling
slice(start, stop, step). This implies axis_slice() does not
handle its arguments the exactly the same as indexing. To select
a single index k, for example, use
axis_slice(a, start=k, stop=k+1)
In this case, the length of the axis 'axis' in the result will
be 1; the trivial dimension is not removed. (Use numpy.squeeze()
to remove trivial axes.)
"""
a_slice = [slice(None)] * a.ndim
a_slice[axis] = slice(start, stop, step)
b = a[tuple(a_slice)]
return b
def axis_reverse(a, axis=-1):
"""Reverse the 1-D slices of `a` along axis `axis`.
Returns axis_slice(a, step=-1, axis=axis).
"""
return axis_slice(a, step=-1, axis=axis)
def odd_ext(x, n, axis=-1):
"""
Odd extension at the boundaries of an array
Generate a new ndarray by making an odd extension of `x` along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import odd_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> odd_ext(a, 2)
array([[-1, 0, 1, 2, 3, 4, 5, 6, 7],
[-4, -1, 0, 1, 4, 9, 16, 23, 28]])
Odd extension is a "180 degree rotation" at the endpoints of the original
array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = odd_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='odd extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_end = axis_slice(x, start=0, stop=1, axis=axis)
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((2 * left_end - left_ext,
x,
2 * right_end - right_ext),
axis=axis)
return ext
def even_ext(x, n, axis=-1):
"""
Even extension at the boundaries of an array
Generate a new ndarray by making an even extension of `x` along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import even_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> even_ext(a, 2)
array([[ 3, 2, 1, 2, 3, 4, 5, 4, 3],
[ 4, 1, 0, 1, 4, 9, 16, 9, 4]])
Even extension is a "mirror image" at the boundaries of the original array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = even_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='even extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def const_ext(x, n, axis=-1):
"""
Constant extension at the boundaries of an array
Generate a new ndarray that is a constant extension of `x` along an axis.
The extension repeats the values at the first and last element of
the axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import const_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> const_ext(a, 2)
array([[ 1, 1, 1, 2, 3, 4, 5, 5, 5],
[ 0, 0, 0, 1, 4, 9, 16, 16, 16]])
Constant extension continues with the same values as the endpoints of the
array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = const_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='constant extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
left_end = axis_slice(x, start=0, stop=1, axis=axis)
ones_shape = [1] * x.ndim
ones_shape[axis] = n
ones = np.ones(ones_shape, dtype=x.dtype)
left_ext = ones * left_end
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = ones * right_end
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def zero_ext(x, n, axis=-1):
"""
Zero padding at the boundaries of an array
Generate a new ndarray that is a zero-padded extension of `x` along
an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the
axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import zero_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> zero_ext(a, 2)
array([[ 0, 0, 1, 2, 3, 4, 5, 0, 0],
[ 0, 0, 0, 1, 4, 9, 16, 0, 0]])
"""
if n < 1:
return x
zeros_shape = list(x.shape)
zeros_shape[axis] = n
zeros = np.zeros(zeros_shape, dtype=x.dtype)
ext = np.concatenate((zeros, x, zeros), axis=axis)
return ext
| |
# +--------------------------------------------------------------------------+
# | Licensed Materials - Property of IBM |
# | |
# | (C) Copyright IBM Corporation 2009-2013. |
# +--------------------------------------------------------------------------+
# | This module complies with Django 1.0 and is |
# | Licensed under the Apache License, Version 2.0 (the "License"); |
# | you may not use this file except in compliance with the License. |
# | You may obtain a copy of the License at |
# | http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable |
# | law or agreed to in writing, software distributed under the License is |
# | distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
# | KIND, either express or implied. See the License for the specific |
# | language governing permissions and limitations under the License. |
# +--------------------------------------------------------------------------+
# | Authors: Ambrish Bhargava, Tarun Pasrija, Rahul Priyadarshi |
# +--------------------------------------------------------------------------+
"""
Informix database backend for Django.
Requires: ibm_db_dbi (http://pypi.python.org/pypi/ibm_db) for python
"""
import sys
_IS_JYTHON = sys.platform.startswith('java')
from django.core.exceptions import ImproperlyConfigured
# Importing class from base module of django.db.backends
from django.db.backends import BaseDatabaseFeatures
from django.db.backends import BaseDatabaseWrapper
from django.db.backends import BaseDatabaseValidation
from django.db.backends.signals import connection_created
# Importing internal classes from ibm_db_django package.
from ibm_db_django.client import DatabaseClient
from ibm_db_django.creation import DatabaseCreation
from ibm_db_django.introspection import DatabaseIntrospection
from ibm_db_django.operations import DatabaseOperations
if not _IS_JYTHON:
import ibm_db_django.pybase as Base
import ibm_db_dbi as Database
else:
import ibm_db_django.jybase as Base
from com.ziclix.python.sql import zxJDBC as Database
# For checking django's version
from django import VERSION as djangoVersion
if djangoVersion[0:2] >= (1, 7):
from ibm_db_django.schemaEditor import DB2SchemaEditor
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
if djangoVersion[0:2] >= ( 1, 6 ):
Error = Database.Error
InterfaceError = Database.InterfaceError
DataError = Database.DataError
OperationalError = Database.OperationalError
InternalError = Database.InternalError
ProgrammingError = Database.ProgrammingError
NotSupportedError = Database.NotSupportedError
if _IS_JYTHON:
dbms_name = 'dbname'
else:
dbms_name = 'dbms_name'
class DatabaseFeatures(BaseDatabaseFeatures):
can_use_chunked_reads = True
# Save point is supported by DB2.
uses_savepoints = True
# Custom query class has been implemented
# django.db.backends.db2.query.query_class.DB2QueryClass
uses_custom_query_class = True
# transaction is supported by DB2
supports_transactions = True
supports_tablespaces = True
uppercases_column_names = True
interprets_empty_strings_as_nulls = False
allows_primary_key_0 = True
can_defer_constraint_checks = False
supports_forward_references = False
requires_rollback_on_dirty_transaction = True
supports_regex_backreferencing = True
supports_timezones = False
has_bulk_insert = False
has_select_for_update = True
supports_long_model_names = False
can_distinct_on_fields = False
supports_paramstyle_pyformat = False
supports_sequence_reset = True
#DB2 doesn't take default values as parameter
requires_literal_defaults = True
has_case_insensitive_like = True
can_introspect_big_integer_field = True
can_introspect_boolean_field = False
can_introspect_positive_integer_field = False
can_introspect_small_integer_field = True
can_introspect_null = True
can_introspect_max_length = True
can_introspect_ip_address_field = False
can_introspect_time_field = True
class DatabaseValidation(BaseDatabaseValidation):
# Need to do validation for DB2 and ibm_db version
def validate_field(self, errors, opts, f):
pass
class DatabaseWrapper(BaseDatabaseWrapper):
"""
This is the base class for Informix backend support for Django. The under lying
wrapper is IBM_DB_DBI (latest version can be downloaded from http://code.google.com/p/ibm-db/ or
http://pypi.python.org/pypi/ibm_db).
"""
vendor = 'Informix'
operators = {
"exact": "= %s",
"iexact": "LIKE %s ESCAPE '\\'",
"contains": "LIKE %s ESCAPE '\\'",
"icontains": "LIKE %s ESCAPE '\\'",
"gt": "> %s",
"gte": ">= %s",
"lt": "< %s",
"lte": "<= %s",
"startswith": "LIKE %s ESCAPE '\\'",
"endswith": "LIKE %s ESCAPE '\\'",
"istartswith": "LIKE %s ESCAPE '\\'",
"iendswith": "LIKE %s ESCAPE '\\'",
}
if djangoVersion[0:2] >= ( 1, 6 ):
Database = Database
# Constructor of DB2 backend support. Initializing all other classes.
def __init__(self, *args):
super(DatabaseWrapper, self).__init__(*args)
self.ops = DatabaseOperations(self)
if djangoVersion[0:2] <= ( 1, 0 ):
self.client = DatabaseClient()
else:
self.client = DatabaseClient(self)
if djangoVersion[0:2] <= ( 1, 2 ):
self.features = DatabaseFeatures()
else:
self.features = DatabaseFeatures(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
if djangoVersion[0:2] <= ( 1, 1 ):
self.validation = DatabaseValidation()
else:
self.validation = DatabaseValidation(self)
self.databaseWrapper = Base.DatabaseWrapper()
# Method to check if connection is live or not.
def __is_connection(self):
return self.connection is not None
# To get dict of connection parameters
def get_connection_params(self):
kwargs = {}
if djangoVersion[0:2] <= ( 1, 0 ):
database_name = self.settings.DATABASE_NAME
database_user = self.settings.DATABASE_USER
database_pass = self.settings.DATABASE_PASSWORD
database_host = self.settings.DATABASE_HOST
database_port = self.settings.DATABASE_PORT
database_options = self.settings.DATABASE_OPTIONS
elif djangoVersion[0:2] <= ( 1, 1 ):
settings_dict = self.settings_dict
database_name = settings_dict['DATABASE_NAME']
database_user = settings_dict['DATABASE_USER']
database_pass = settings_dict['DATABASE_PASSWORD']
database_host = settings_dict['DATABASE_HOST']
database_port = settings_dict['DATABASE_PORT']
database_options = settings_dict['DATABASE_OPTIONS']
else:
settings_dict = self.settings_dict
database_name = settings_dict['NAME']
database_user = settings_dict['USER']
database_pass = settings_dict['PASSWORD']
database_host = settings_dict['HOST']
database_port = settings_dict['PORT']
database_options = settings_dict['OPTIONS']
if database_name != '' and isinstance(database_name, str):
kwargs['database'] = database_name
else:
raise ImproperlyConfigured("Please specify the valid database Name to connect to")
if isinstance(database_user, str):
kwargs['user'] = database_user
if isinstance(database_pass, str):
kwargs['password'] = database_pass
if isinstance(database_host, str):
kwargs['host'] = database_host
if isinstance(database_port, str):
kwargs['port'] = database_port
if isinstance(database_host, str):
kwargs['host'] = database_host
if isinstance(database_options, dict):
kwargs['options'] = database_options
if djangoVersion[0:2] <= ( 1, 0 ):
if hasattr(self.settings, 'PCONNECT'):
kwargs['PCONNECT'] = self.settings.PCONNECT
else:
if ( list(settings_dict.keys()) ).__contains__('PCONNECT'):
kwargs['PCONNECT'] = settings_dict['PCONNECT']
return kwargs
# To get new connection from Database
def get_new_connection(self, conn_params):
connection = self.databaseWrapper.get_new_connection(conn_params)
if getattr(connection, dbms_name) == 'DB2':
self.features.has_bulk_insert = False
else:
self.features.has_bulk_insert = True
return connection
# Over-riding _cursor method to return DB2 cursor.
if djangoVersion[0:2] < ( 1, 6 ):
def _cursor(self, settings=None):
if not self.__is_connection():
if djangoVersion[0:2] <= ( 1, 0 ):
self.settings = settings
self.connection = self.get_new_connection(self.get_connection_params())
cursor = self.databaseWrapper._cursor(self.connection)
if djangoVersion[0:3] <= ( 1, 2, 2 ):
connection_created.send(sender=self.__class__)
else:
connection_created.send(sender=self.__class__, connection=self)
else:
cursor = self.databaseWrapper._cursor(self.connection)
return cursor
else:
def create_cursor(self):
return self.databaseWrapper._cursor(self.connection)
def init_connection_state(self):
pass
def is_usable(self):
if self.databaseWrapper.is_active(self.connection):
return True
else:
return False
def _set_autocommit(self, autocommit):
self.connection.set_autocommit(autocommit)
def close(self):
if djangoVersion[0:2] >= ( 1, 5 ):
self.validate_thread_sharing()
if self.connection is not None:
self.databaseWrapper.close(self.connection)
self.connection = None
def get_server_version(self):
if not self.connection:
self.cursor()
return self.databaseWrapper.get_server_version(self.connection)
def schema_editor(self, *args, **kwargs):
return DB2SchemaEditor(self, *args, **kwargs)
| |
"""
Utility classes and functions for the polynomial modules.
This module provides: error and warning objects; a polynomial base class;
and some routines used in both the `polynomial` and `chebyshev` modules.
Error objects
-------------
.. autosummary::
:toctree: generated/
PolyError base class for this sub-package's errors.
PolyDomainError raised when domains are mismatched.
Warning objects
---------------
.. autosummary::
:toctree: generated/
RankWarning raised in least-squares fit for rank-deficient matrix.
Base class
----------
.. autosummary::
:toctree: generated/
PolyBase Obsolete base class for the polynomial classes. Do not use.
Functions
---------
.. autosummary::
:toctree: generated/
as_series convert list of array_likes into 1-D arrays of common type.
trimseq remove trailing zeros.
trimcoef remove small trailing coefficients.
getdomain return the domain appropriate for a given set of abscissae.
mapdomain maps points between domains.
mapparms parameters of the linear map between domains.
"""
from __future__ import division, absolute_import, print_function
import operator
import warnings
import numpy as np
__all__ = [
'RankWarning', 'PolyError', 'PolyDomainError', 'as_series', 'trimseq',
'trimcoef', 'getdomain', 'mapdomain', 'mapparms', 'PolyBase']
#
# Warnings and Exceptions
#
class RankWarning(UserWarning):
"""Issued by chebfit when the design matrix is rank deficient."""
pass
class PolyError(Exception):
"""Base class for errors in this module."""
pass
class PolyDomainError(PolyError):
"""Issued by the generic Poly class when two domains don't match.
This is raised when an binary operation is passed Poly objects with
different domains.
"""
pass
#
# Base class for all polynomial types
#
class PolyBase(object):
"""
Base class for all polynomial types.
Deprecated in numpy 1.9.0, use the abstract
ABCPolyBase class instead. Note that the latter
requires a number of virtual functions to be
implemented.
"""
pass
#
# Helper functions to convert inputs to 1-D arrays
#
def trimseq(seq):
"""Remove small Poly series coefficients.
Parameters
----------
seq : sequence
Sequence of Poly series coefficients. This routine fails for
empty sequences.
Returns
-------
series : sequence
Subsequence with trailing zeros removed. If the resulting sequence
would be empty, return the first element. The returned sequence may
or may not be a view.
Notes
-----
Do not lose the type info if the sequence contains unknown objects.
"""
if len(seq) == 0:
return seq
else:
for i in range(len(seq) - 1, -1, -1):
if seq[i] != 0:
break
return seq[:i+1]
def as_series(alist, trim=True):
"""
Return argument as a list of 1-d arrays.
The returned list contains array(s) of dtype double, complex double, or
object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of
size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays
of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array
raises a Value Error if it is not first reshaped into either a 1-d or 2-d
array.
Parameters
----------
alist : array_like
A 1- or 2-d array_like
trim : boolean, optional
When True, trailing zeros are removed from the inputs.
When False, the inputs are passed through intact.
Returns
-------
[a1, a2,...] : list of 1-D arrays
A copy of the input data as a list of 1-d arrays.
Raises
------
ValueError
Raised when `as_series` cannot convert its input to 1-d arrays, or at
least one of the resulting arrays is empty.
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> a = np.arange(4)
>>> pu.as_series(a)
[array([0.]), array([1.]), array([2.]), array([3.])]
>>> b = np.arange(6).reshape((2,3))
>>> pu.as_series(b)
[array([0., 1., 2.]), array([3., 4., 5.])]
>>> pu.as_series((1, np.arange(3), np.arange(2, dtype=np.float16)))
[array([1.]), array([0., 1., 2.]), array([0., 1.])]
>>> pu.as_series([2, [1.1, 0.]])
[array([2.]), array([1.1])]
>>> pu.as_series([2, [1.1, 0.]], trim=False)
[array([2.]), array([1.1, 0. ])]
"""
arrays = [np.array(a, ndmin=1, copy=0) for a in alist]
if min([a.size for a in arrays]) == 0:
raise ValueError("Coefficient array is empty")
if any([a.ndim != 1 for a in arrays]):
raise ValueError("Coefficient array is not 1-d")
if trim:
arrays = [trimseq(a) for a in arrays]
if any([a.dtype == np.dtype(object) for a in arrays]):
ret = []
for a in arrays:
if a.dtype != np.dtype(object):
tmp = np.empty(len(a), dtype=np.dtype(object))
tmp[:] = a[:]
ret.append(tmp)
else:
ret.append(a.copy())
else:
try:
dtype = np.common_type(*arrays)
except Exception:
raise ValueError("Coefficient arrays have no common type")
ret = [np.array(a, copy=1, dtype=dtype) for a in arrays]
return ret
def trimcoef(c, tol=0):
"""
Remove "small" "trailing" coefficients from a polynomial.
"Small" means "small in absolute value" and is controlled by the
parameter `tol`; "trailing" means highest order coefficient(s), e.g., in
``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``)
both the 3-rd and 4-th order coefficients would be "trimmed."
Parameters
----------
c : array_like
1-d array of coefficients, ordered from lowest order to highest.
tol : number, optional
Trailing (i.e., highest order) elements with absolute value less
than or equal to `tol` (default value is zero) are removed.
Returns
-------
trimmed : ndarray
1-d array with trailing zeros removed. If the resulting series
would be empty, a series containing a single zero is returned.
Raises
------
ValueError
If `tol` < 0
See Also
--------
trimseq
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> pu.trimcoef((0,0,3,0,5,0,0))
array([0., 0., 3., 0., 5.])
>>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed
array([0.])
>>> i = complex(0,1) # works for complex
>>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3)
array([0.0003+0.j , 0.001 -0.001j])
"""
if tol < 0:
raise ValueError("tol must be non-negative")
[c] = as_series([c])
[ind] = np.nonzero(np.abs(c) > tol)
if len(ind) == 0:
return c[:1]*0
else:
return c[:ind[-1] + 1].copy()
def getdomain(x):
"""
Return a domain suitable for given abscissae.
Find a domain suitable for a polynomial or Chebyshev series
defined at the values supplied.
Parameters
----------
x : array_like
1-d array of abscissae whose domain will be determined.
Returns
-------
domain : ndarray
1-d array containing two values. If the inputs are complex, then
the two returned points are the lower left and upper right corners
of the smallest rectangle (aligned with the axes) in the complex
plane containing the points `x`. If the inputs are real, then the
two points are the ends of the smallest interval containing the
points `x`.
See Also
--------
mapparms, mapdomain
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> points = np.arange(4)**2 - 5; points
array([-5, -4, -1, 4])
>>> pu.getdomain(points)
array([-5., 4.])
>>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle
>>> pu.getdomain(c)
array([-1.-1.j, 1.+1.j])
"""
[x] = as_series([x], trim=False)
if x.dtype.char in np.typecodes['Complex']:
rmin, rmax = x.real.min(), x.real.max()
imin, imax = x.imag.min(), x.imag.max()
return np.array((complex(rmin, imin), complex(rmax, imax)))
else:
return np.array((x.min(), x.max()))
def mapparms(old, new):
"""
Linear map parameters between domains.
Return the parameters of the linear map ``offset + scale*x`` that maps
`old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``.
Parameters
----------
old, new : array_like
Domains. Each domain must (successfully) convert to a 1-d array
containing precisely two values.
Returns
-------
offset, scale : scalars
The map ``L(x) = offset + scale*x`` maps the first domain to the
second.
See Also
--------
getdomain, mapdomain
Notes
-----
Also works for complex numbers, and thus can be used to calculate the
parameters required to map any line in the complex plane to any other
line therein.
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> pu.mapparms((-1,1),(-1,1))
(0.0, 1.0)
>>> pu.mapparms((1,-1),(-1,1))
(-0.0, -1.0)
>>> i = complex(0,1)
>>> pu.mapparms((-i,-1),(1,i))
((1+1j), (1-0j))
"""
oldlen = old[1] - old[0]
newlen = new[1] - new[0]
off = (old[1]*new[0] - old[0]*new[1])/oldlen
scl = newlen/oldlen
return off, scl
def mapdomain(x, old, new):
"""
Apply linear map to input points.
The linear map ``offset + scale*x`` that maps the domain `old` to
the domain `new` is applied to the points `x`.
Parameters
----------
x : array_like
Points to be mapped. If `x` is a subtype of ndarray the subtype
will be preserved.
old, new : array_like
The two domains that determine the map. Each must (successfully)
convert to 1-d arrays containing precisely two values.
Returns
-------
x_out : ndarray
Array of points of the same shape as `x`, after application of the
linear map between the two domains.
See Also
--------
getdomain, mapparms
Notes
-----
Effectively, this implements:
.. math ::
x\\_out = new[0] + m(x - old[0])
where
.. math ::
m = \\frac{new[1]-new[0]}{old[1]-old[0]}
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> old_domain = (-1,1)
>>> new_domain = (0,2*np.pi)
>>> x = np.linspace(-1,1,6); x
array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ])
>>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out
array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, # may vary
6.28318531])
>>> x - pu.mapdomain(x_out, new_domain, old_domain)
array([0., 0., 0., 0., 0., 0.])
Also works for complex numbers (and thus can be used to map any line in
the complex plane to any other line therein).
>>> i = complex(0,1)
>>> old = (-1 - i, 1 + i)
>>> new = (-1 + i, 1 - i)
>>> z = np.linspace(old[0], old[1], 6); z
array([-1. -1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1. +1.j ])
>>> new_z = pu.mapdomain(z, old, new); new_z
array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary
"""
x = np.asanyarray(x)
off, scl = mapparms(old, new)
return off + scl*x
def _vander2d(vander_f, x, y, deg):
"""
Helper function used to implement the ``<type>vander2d`` functions.
Parameters
----------
vander_f : function(array_like, int) -> ndarray
The 1d vander function, such as ``polyvander``
x, y, deg :
See the ``<type>vander2d`` functions for more detail
"""
degx, degy = [
_deprecate_as_int(d, "degrees")
for d in deg
]
x, y = np.array((x, y), copy=0) + 0.0
vx = vander_f(x, degx)
vy = vander_f(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def _vander3d(vander_f, x, y, z, deg):
"""
Helper function used to implement the ``<type>vander3d`` functions.
Parameters
----------
vander_f : function(array_like, int) -> ndarray
The 1d vander function, such as ``polyvander``
x, y, z, deg :
See the ``<type>vander3d`` functions for more detail
"""
degx, degy, degz = [
_deprecate_as_int(d, "degrees")
for d in deg
]
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = vander_f(x, degx)
vy = vander_f(y, degy)
vz = vander_f(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def _fromroots(line_f, mul_f, roots):
"""
Helper function used to implement the ``<type>fromroots`` functions.
Parameters
----------
line_f : function(float, float) -> ndarray
The ``<type>line`` function, such as ``polyline``
mul_f : function(array_like, array_like) -> ndarray
The ``<type>mul`` function, such as ``polymul``
roots :
See the ``<type>fromroots`` functions for more detail
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = as_series([roots], trim=False)
roots.sort()
p = [line_f(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [mul_f(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = mul_f(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def _valnd(val_f, c, *args):
"""
Helper function used to implement the ``<type>val<n>d`` functions.
Parameters
----------
val_f : function(array_like, array_like, tensor: bool) -> array_like
The ``<type>val`` function, such as ``polyval``
c, args :
See the ``<type>val<n>d`` functions for more detail
"""
try:
args = tuple(np.array(args, copy=False))
except Exception:
# preserve the old error message
if len(args) == 2:
raise ValueError('x, y, z are incompatible')
elif len(args) == 3:
raise ValueError('x, y are incompatible')
else:
raise ValueError('ordinates are incompatible')
it = iter(args)
x0 = next(it)
# use tensor on only the first
c = val_f(x0, c)
for xi in it:
c = val_f(xi, c, tensor=False)
return c
def _gridnd(val_f, c, *args):
"""
Helper function used to implement the ``<type>grid<n>d`` functions.
Parameters
----------
val_f : function(array_like, array_like, tensor: bool) -> array_like
The ``<type>val`` function, such as ``polyval``
c, args :
See the ``<type>grid<n>d`` functions for more detail
"""
for xi in args:
c = val_f(xi, c)
return c
def _div(mul_f, c1, c2):
"""
Helper function used to implement the ``<type>div`` functions.
Implementation uses repeated subtraction of c2 multiplied by the nth basis.
For some polynomial types, a more efficient approach may be possible.
Parameters
----------
mul_f : function(array_like, array_like) -> array_like
The ``<type>mul`` function, such as ``polymul``
c1, c2 :
See the ``<type>div`` functions for more detail
"""
# c1, c2 are trimmed copies
[c1, c2] = as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = mul_f([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, trimseq(rem)
def _add(c1, c2):
""" Helper function used to implement the ``<type>add`` functions. """
# c1, c2 are trimmed copies
[c1, c2] = as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return trimseq(ret)
def _sub(c1, c2):
""" Helper function used to implement the ``<type>sub`` functions. """
# c1, c2 are trimmed copies
[c1, c2] = as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return trimseq(ret)
def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None):
"""
Helper function used to implement the ``<type>fit`` functions.
Parameters
----------
vander_f : function(array_like, int) -> ndarray
The 1d vander function, such as ``polyvander``
c1, c2 :
See the ``<type>fit`` functions for more detail
"""
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int or non-empty 1-D array of int")
if deg.min() < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
if deg.ndim == 0:
lmax = deg
order = lmax + 1
van = vander_f(x, lmax)
else:
deg = np.sort(deg)
lmax = deg[-1]
order = len(deg)
van = vander_f(x, lmax)[:, deg]
# set up the least squares matrices in transposed form
lhs = van.T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = np.linalg.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# Expand c to include non-fitted coefficients which are set to zero
if deg.ndim > 0:
if c.ndim == 2:
cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)
else:
cc = np.zeros(lmax+1, dtype=c.dtype)
cc[deg] = c
c = cc
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, RankWarning, stacklevel=2)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def _pow(mul_f, c, pow, maxpower):
"""
Helper function used to implement the ``<type>pow`` functions.
Parameters
----------
vander_f : function(array_like, int) -> ndarray
The 1d vander function, such as ``polyvander``
pow, maxpower :
See the ``<type>pow`` functions for more detail
mul_f : function(array_like, array_like) -> ndarray
The ``<type>mul`` function, such as ``polymul``
"""
# c is a trimmed copy
[c] = as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = mul_f(prd, c)
return prd
def _deprecate_as_int(x, desc):
"""
Like `operator.index`, but emits a deprecation warning when passed a float
Parameters
----------
x : int-like, or float with integral value
Value to interpret as an integer
desc : str
description to include in any error message
Raises
------
TypeError : if x is a non-integral float or non-numeric
DeprecationWarning : if x is an integral float
"""
try:
return operator.index(x)
except TypeError:
# Numpy 1.17.0, 2019-03-11
try:
ix = int(x)
except TypeError:
pass
else:
if ix == x:
warnings.warn(
"In future, this will raise TypeError, as {} will need to "
"be an integer not just an integral float."
.format(desc),
DeprecationWarning,
stacklevel=3
)
return ix
raise TypeError("{} must be an integer".format(desc))
| |
import pymc
import numpy as np
from numpy import exp, log
import moments, jacobians, defaults, istat
import warnings
"""
Bayesian Distribution Selection
===============================
:author: David Huard
:date: May 7, 2008
:institution: McGill University, Montreal, Qc, Canada
Introduction
------------
This module implements a function that, given a dataset, selects the `best`
statistical distributions among a set.
Finding the statistical distribution best suited for describing a particular
dataset is part art part science. Decision trees exist, that can help identify
the appropriate distribution by answering to some elementary questions.
.. image:: figs/decision_tree.jpg
Chi square.
Of course, simply fitting the Maximum Likelihood parameters and compare the
likelihoods of different distributions will lead to spurious results since
the likelihood is very much dependent on the whole shape of the distribution
and the number of parameters.
Concept
-------
Here, the idea is to apply Bayesian model selection to select the best model
(the statistical distribution). That is, we compute
.. math::
W_i \prop p(M_i | data, I)
where `W_i` stands for the weight of each distribution, :math:`M_i` the
different models, and `I` the prior information on the data. The way we solve
this equation is by inserting the distribution parameters as latent variables:
.. math::
p(M_i |data, I) & = \int p(M_i, \theta_i | data, I) d\theta_i \\
& = \int p(data | M_i, \theta_i) \pi(\theta_i | I).
Now :math:`p(data | M_i, \theta_i)` is simply the likelihood, but we still
need to define :math:`\pi(\theta | I)`.
The way this is done here is by defining a prior distribution on
X :math:`I : \pi(x)`. The problem now boils down to finding `\pi(\theta_i | I)`
such that
.. math::
\int p( x | M_i, \theta_i) \pi(\theta_i | I) d\theta_i = \pi(x).
This is problematic because there won't, in general, exist a unique solution for
`\pi(\theta_i | I)`. We need another constraint to define a unique solution.
One such constraint is entropy. We will hence define `\pi(\theta_i | I)` as the
distribution that satisfies the previous equation and maximizes the entropy.
The question now is: how to draw samples `\theta_i^j` that satisfy those
constraints ?
Here is a potential solution, I don't really know if it works.
1. N random values (r) are drawn from distribution :math:`M_i` using
:math:`\theta_i`.
2. The likelihood of `r` is evaluated using :math:`\pi(r)` and assigned to
the parameters :math:`\theta_i`.
In nature, systems tend to increase their entropy. So I kinda expect the same
thing to happen here. With time, the parameter distribution should increase its
entropy. Magical thinking !
The whole procedure can be implemented easily using pymc's MCMC algorithms.
The MCMC sampler will explore the parameter space and return a trace that
corresponds a random sample drawn from :math:`\pi(\theta_i | I)`. This sample
can then be used to compute :math:`E(p(data | M_i, \theta_i)`, which provides
the weight of model `i`.
Usage
-----
"""
def builder(data, distributions, xprior, initial_params={}):
"""Return a MCMC model selection sampler.
Parameters
---------
data : array
Data set used to select the distribution.
distributions : sequence
A collection of Stochastic instances.
For each given function, there has to be an entry in moments, jacobians
and defaults with the same __name__ as the distribution.
Basically, all we really need is a random method, a log probability
and default initial values. We should eventually let users define objects
that have those attributes.
xprior : function(x)
Function returning the log probability density of x. This is a prior
estimate of the shape of the distribution.
weights : sequence
The prior probability assigned to each distribution in distributions.
default_params : dict
A dictionary of initial parameters for the distribution.
"""
# 1. Extract the relevant information about each distribution:
# name, random generating function, log probability and default parameters.
names=[]; random_f={}; logp_f={}; init_val={}
for d in distributions:
name = d.__name__.lower()
if d.__module__ == 'pymc.distributions':
random = getattr(pymc, 'r%s'%name)
logp = getattr(pymc, name+'_like')
initial_values = guess_params_from_sample(data, name)
elif d.__module__ == 'pymc.ScipyDistributions':
raise ValueError, 'Scipy distributions not yet supported.'
else:
try:
random = d.random
logp = d.logp
initial_values = d.defaults
except:
raise ValueError, 'Unrecognized distribution %s'%d.__str__()
if initial_values is None:
raise ValueError, 'Distribution %s not supported. Skipping.'%name
names.append(name)
random_f[name] = random
logp_f[name] = logp
init_val[name] = initial_values
init_val.update(initial_params)
print random_f, logp_f, init_val
# 2. Define the various latent variables and their priors
nr = 10
latent= {}
for name in names:
prior_distribution = lambda value: xprior(random_f[name](size=nr, *value))
prior_distribution.__doc__ = \
"""Prior density for the parameters.
This function draws random values from the distribution
parameterized with values. The probability of these random values
is then computed using xprior."""
latent['%s_params'%name] = pymc.Stochastic(logp = prior_distribution,
doc = 'Prior for the parameters of the %s distribution'%name,
name = '%s_params'%name,
parents = {},
value = np.atleast_1d(init_val[name]),
)
# 3. Compute the probability for each model
lprob = {}
for name in names:
def logp(params):
lp = logp_f[name](data, *params)
return lp
lprob['%s_logp'%name] = pymc.Deterministic(eval=logp,
doc = 'Likelihood of the dataset given the distribution and the parameters.',
name = '%s_logp'%name,
parents = {'params':latent['%s_params'%name]})
input = latent
input.update(lprob)
input['names'] = names
M = pymc.MCMC(input=input)
#for name in names:
#M.use_step_method(pymc.AdaptiveMetropolis, input['%s_params'%name], verbose=3)
return M
def guess_params_from_sample(r, dist):
stats = istat.describe(r)
try:
f = getattr(istat, dist)
return np.atleast_1d(f(**stats))
except (NotImplementedError, AttributeError):
return defaults.pymc_default_list(dist)
def select_distribution(data, distributions, xprior, weights=None, initial_params={}):
# 1. Define the prior for the distributions.
N = len(distributions)
if weights is None:
weights = np.ones(N)/N
else:
assert(np.sum(weights) ==1.)
# 2. Create the MCMC sampler and sample
M = builder(data, distributions, xprior, initial_params)
return M
iter = 10000*N
tune = iter/5
M.sample(iter, tune)
# 3. Compute the weights
for name in names:
dtrm = getattr(M, '%s_logp'%name)
W[name] = np.mean(np.exp(dtrm.trace()))
return W
def test_builder():
from numpy import random
data = random.normal(3,.1,20)
return builder(data, [pymc.Lognormal, pymc.Normal], lambda x: pymc.uniform_like(x, 0, 100), initial_params={'normal':np.array([50.,1.])})
def test_selection():
N = 40
r = pymc.rexponential(2.0, size=N)
def prior_x(x):
"""Triangle distribution.
A = h*L/2 = 1
"""
L=5
h = 2./L
if np.all(0.< x) and np.all(x < L):
return np.sum(log(h - h/L *x))
else:
return -np.inf
W = select_distribution(r, [pymc.Exponweib, pymc.Exponential, pymc.Weibull, pymc.Chi2], prior_x)
return W
| |
#!/usr/bin/env python
'''
/**************************************************************************
*
* Copyright 2009 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
'''
VOID, UNSIGNED, SIGNED, FIXED, FLOAT = range(5)
SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_0, SWIZZLE_1, SWIZZLE_NONE, = range(7)
PLAIN = 'plain'
RGB = 'rgb'
SRGB = 'srgb'
YUV = 'yuv'
ZS = 'zs'
def is_pot(x):
return (x & (x - 1)) == 0
VERY_LARGE = 99999999999999999999999
class Channel:
'''Describe the channel of a color channel.'''
def __init__(self, type, norm, pure, size, name = ''):
self.type = type
self.norm = norm
self.pure = pure
self.size = size
self.sign = type in (SIGNED, FIXED, FLOAT)
self.name = name
def __str__(self):
s = str(self.type)
if self.norm:
s += 'n'
if self.pure:
s += 'p'
s += str(self.size)
return s
def __eq__(self, other):
return self.type == other.type and self.norm == other.norm and self.pure == other.pure and self.size == other.size
def max(self):
'''Maximum representable number.'''
if self.type == FLOAT:
return VERY_LARGE
if self.type == FIXED:
return (1 << (self.size/2)) - 1
if self.norm:
return 1
if self.type == UNSIGNED:
return (1 << self.size) - 1
if self.type == SIGNED:
return (1 << (self.size - 1)) - 1
assert False
def min(self):
'''Minimum representable number.'''
if self.type == FLOAT:
return -VERY_LARGE
if self.type == FIXED:
return -(1 << (self.size/2))
if self.type == UNSIGNED:
return 0
if self.norm:
return -1
if self.type == SIGNED:
return -(1 << (self.size - 1))
assert False
class Format:
'''Describe a pixel format.'''
def __init__(self, name, layout, block_width, block_height, le_channels, le_swizzles, be_channels, be_swizzles, colorspace):
self.name = name
self.layout = layout
self.block_width = block_width
self.block_height = block_height
self.le_channels = le_channels
self.le_swizzles = le_swizzles
self.be_channels = be_channels
self.be_swizzles = be_swizzles
self.name = name
self.colorspace = colorspace
def __str__(self):
return self.name
def short_name(self):
'''Make up a short norm for a format, suitable to be used as suffix in
function names.'''
name = self.name
if name.startswith('PIPE_FORMAT_'):
name = name[len('PIPE_FORMAT_'):]
name = name.lower()
return name
def block_size(self):
size = 0
for channel in self.le_channels:
size += channel.size
return size
def nr_channels(self):
nr_channels = 0
for channel in self.le_channels:
if channel.size:
nr_channels += 1
return nr_channels
def array_element(self):
if self.layout != PLAIN:
return None
ref_channel = self.le_channels[0]
if ref_channel.type == VOID:
ref_channel = self.le_channels[1]
for channel in self.le_channels:
if channel.size and (channel.size != ref_channel.size or channel.size % 8):
return None
if channel.type != VOID:
if channel.type != ref_channel.type:
return None
if channel.norm != ref_channel.norm:
return None
if channel.pure != ref_channel.pure:
return None
return ref_channel
def is_array(self):
return self.array_element() != None
def is_mixed(self):
if self.layout != PLAIN:
return False
ref_channel = self.le_channels[0]
if ref_channel.type == VOID:
ref_channel = self.le_channels[1]
for channel in self.le_channels[1:]:
if channel.type != VOID:
if channel.type != ref_channel.type:
return True
if channel.norm != ref_channel.norm:
return True
if channel.pure != ref_channel.pure:
return True
return False
def is_pot(self):
return is_pot(self.block_size())
def is_int(self):
if self.layout != PLAIN:
return False
for channel in self.le_channels:
if channel.type not in (VOID, UNSIGNED, SIGNED):
return False
return True
def is_float(self):
if self.layout != PLAIN:
return False
for channel in self.le_channels:
if channel.type not in (VOID, FLOAT):
return False
return True
def is_bitmask(self):
if self.layout != PLAIN:
return False
if self.block_size() not in (8, 16, 32):
return False
for channel in self.le_channels:
if channel.type not in (VOID, UNSIGNED, SIGNED):
return False
return True
def is_pure_color(self):
if self.layout != PLAIN or self.colorspace == ZS:
return False
pures = [channel.pure
for channel in self.le_channels
if channel.type != VOID]
for x in pures:
assert x == pures[0]
return pures[0]
def channel_type(self):
types = [channel.type
for channel in self.le_channels
if channel.type != VOID]
for x in types:
assert x == types[0]
return types[0]
def is_pure_signed(self):
return self.is_pure_color() and self.channel_type() == SIGNED
def is_pure_unsigned(self):
return self.is_pure_color() and self.channel_type() == UNSIGNED
def has_channel(self, id):
return self.le_swizzles[id] != SWIZZLE_NONE
def has_depth(self):
return self.colorspace == ZS and self.has_channel(0)
def has_stencil(self):
return self.colorspace == ZS and self.has_channel(1)
def stride(self):
return self.block_size()/8
_type_parse_map = {
'': VOID,
'x': VOID,
'u': UNSIGNED,
's': SIGNED,
'h': FIXED,
'f': FLOAT,
}
_swizzle_parse_map = {
'x': SWIZZLE_X,
'y': SWIZZLE_Y,
'z': SWIZZLE_Z,
'w': SWIZZLE_W,
'0': SWIZZLE_0,
'1': SWIZZLE_1,
'_': SWIZZLE_NONE,
}
def _parse_channels(fields, layout, colorspace, swizzles):
if layout == PLAIN:
names = ['']*4
if colorspace in (RGB, SRGB):
for i in range(4):
swizzle = swizzles[i]
if swizzle < 4:
names[swizzle] += 'rgba'[i]
elif colorspace == ZS:
for i in range(4):
swizzle = swizzles[i]
if swizzle < 4:
names[swizzle] += 'zs'[i]
else:
assert False
for i in range(4):
if names[i] == '':
names[i] = 'x'
else:
names = ['x', 'y', 'z', 'w']
channels = []
for i in range(0, 4):
field = fields[i]
if field:
type = _type_parse_map[field[0]]
if field[1] == 'n':
norm = True
pure = False
size = int(field[2:])
elif field[1] == 'p':
pure = True
norm = False
size = int(field[2:])
else:
norm = False
pure = False
size = int(field[1:])
else:
type = VOID
norm = False
pure = False
size = 0
channel = Channel(type, norm, pure, size, names[i])
channels.append(channel)
return channels
def parse(filename):
'''Parse the format descrition in CSV format in terms of the
Channel and Format classes above.'''
stream = open(filename)
formats = []
for line in stream:
try:
comment = line.index('#')
except ValueError:
pass
else:
line = line[:comment]
line = line.strip()
if not line:
continue
fields = [field.strip() for field in line.split(',')]
name = fields[0]
layout = fields[1]
block_width, block_height = map(int, fields[2:4])
colorspace = fields[9]
le_swizzles = [_swizzle_parse_map[swizzle] for swizzle in fields[8]]
le_channels = _parse_channels(fields[4:8], layout, colorspace, le_swizzles)
be_swizzles = [_swizzle_parse_map[swizzle] for swizzle in fields[8]]
be_channels = _parse_channels(fields[4:8], layout, colorspace, be_swizzles)
le_shift = 0
for channel in le_channels:
channel.shift = le_shift
le_shift += channel.size
be_shift = 0
for channel in be_channels[3::-1]:
channel.shift = be_shift
be_shift += channel.size
assert le_shift == be_shift
format = Format(name, layout, block_width, block_height, le_channels, le_swizzles, be_channels, be_swizzles, colorspace)
formats.append(format)
return formats
| |
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology
#
# Note: the L1 Cache latency is only used by the sequencer on fast path hits
#
class L1Cache(RubyCache):
latency = 2
#
# Note: the L2 Cache latency is not currently used
#
class L2Cache(RubyCache):
latency = 10
def define_options(parser):
parser.add_option("--l1-retries", type="int", default=1,
help="Token_CMP: # of l1 retries before going persistent")
parser.add_option("--timeout-latency", type="int", default=300,
help="Token_CMP: cycles until issuing again");
parser.add_option("--disable-dyn-timeouts", action="store_true",
help="Token_CMP: disable dyanimc timeouts, use fixed latency instead")
parser.add_option("--allow-atomic-migration", action="store_true",
help="allow migratory sharing for atomic only accessed blocks")
def create_vsystem(options, systems, ruby_system, total_num_cpus, total_mem_size, vm_cpus, vm_mems):
if buildEnv['PROTOCOL'] != 'MOESI_CMP_token':
panic("This script requires the MOESI_CMP_token protocol to be built.")
#
# number of tokens that the owner passes to requests so that shared blocks can
# respond to read requests
#
n_tokens = options.num_cpus + 1
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes must be
# listed before the directory nodes and directory nodes before dma nodes, etc.
#
l1_cntrl_nodes = []
l2_cntrl_nodes = []
dir_cntrl_nodes = []
dma_cntrl_nodes = []
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
l2_bits = int(math.log(options.num_l2caches, 2))
block_size_bits = int(math.log(options.cacheline_size, 2))
cntrl_count = 0
start_address = MemorySize("0B")
for (j, vm) in enumerate(systems):
for i in xrange(int(vm_cpus[j])):
#
# First create the Ruby objects associated with this cpu
#
l1i_cache = L1Cache(size = options.l1i_size,
assoc = options.l1i_assoc,
start_index_bit = block_size_bits)
l1d_cache = L1Cache(size = options.l1d_size,
assoc = options.l1d_assoc,
start_index_bit = block_size_bits)
l1_cntrl = L1Cache_Controller(version = len(l1_cntrl_nodes),
cntrl_id = cntrl_count,
L1Icache = l1i_cache,
L1Dcache = l1d_cache,
l2_select_num_bits = l2_bits,
N_tokens = n_tokens,
retry_threshold = \
options.l1_retries,
fixed_timeout_latency = \
options.timeout_latency,
dynamic_timeout_enabled = \
not options.disable_dyn_timeouts,
no_mig_atomic = not \
options.allow_atomic_migration,
send_evictions = (
options.cpu_type == "detailed"),
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
cpu_seq = RubySequencer(version = len(l1_cntrl_nodes),
icache = l1i_cache,
dcache = l1d_cache,
ruby_system = ruby_system,
virtualization_support = True,
real_address_range = AddrRange(start_address,start_address.value+MemorySize(vm_mems[j]).value))
l1_cntrl.sequencer = cpu_seq
if vm.piobus != None:
cpu_seq.pio_port = vm.piobus.slave
exec("ruby_system.l1_cntrl%d = l1_cntrl" % len(l1_cntrl_nodes))
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
cntrl_count += 1
start_address.value = start_address.value + MemorySize(vm_mems[j]).value
#print start_address
l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches):
#
# First create the Ruby objects associated with this cpu
#
l2_cache = L2Cache(size = options.l2_size,
assoc = options.l2_assoc,
start_index_bit = l2_index_start)
l2_cntrl = L2Cache_Controller(version = i,
cntrl_id = cntrl_count,
L2cache = l2_cache,
N_tokens = n_tokens,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
l2_cntrl_nodes.append(l2_cntrl)
cntrl_count += 1
#TODO: take care of phys_mem_size
phys_mem_size = total_mem_size
#assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system
# clk_divider value is a fix to pass regression.
ruby_system.memctrl_clk_domain = DerivedClockDomain(
clk_domain=ruby_system.clk_domain,
clk_divider=3)
for i in xrange(options.num_dirs):
#
# Create the Ruby objects associated with the directory controller
#
mem_cntrl = RubyMemoryControl(
clk_domain = ruby_system.memctrl_clk_domain,
version = i,
ruby_system = ruby_system)
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
dir_cntrl = Directory_Controller(version = i,
cntrl_id = cntrl_count,
directory = \
RubyDirectoryMemory(version = i,
use_map = options.use_map,
size = dir_size),
memBuffer = mem_cntrl,
l2_select_num_bits = l2_bits,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
cntrl_count += 1
for (j, vm) in enumerate(systems):
for i, dma_port in enumerate(vm._dma_ports):
#
# Create the Ruby objects associated with the dma controller
#
dma_seq = DMASequencer(version = len(dma_cntrl_nodes),
ruby_system = ruby_system)
dma_cntrl = DMA_Controller(version = len(dma_cntrl_nodes),
cntrl_id = cntrl_count,
dma_sequencer = dma_seq,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dma_cntrl%d = dma_cntrl" % len(dma_cntrl_nodes))
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % len(dma_cntrl_nodes))
dma_cntrl_nodes.append(dma_cntrl)
cntrl_count += 1
all_cntrls = l1_cntrl_nodes + \
l2_cntrl_nodes + \
dir_cntrl_nodes + \
dma_cntrl_nodes
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)
def create_system(options, system, piobus, dma_ports, ruby_system):
if buildEnv['PROTOCOL'] != 'MOESI_CMP_token':
panic("This script requires the MOESI_CMP_token protocol to be built.")
#
# number of tokens that the owner passes to requests so that shared blocks can
# respond to read requests
#
n_tokens = options.num_cpus + 1
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes must be
# listed before the directory nodes and directory nodes before dma nodes, etc.
#
l1_cntrl_nodes = []
l2_cntrl_nodes = []
dir_cntrl_nodes = []
dma_cntrl_nodes = []
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
l2_bits = int(math.log(options.num_l2caches, 2))
block_size_bits = int(math.log(options.cacheline_size, 2))
cntrl_count = 0
for i in xrange(options.num_cpus):
#
# First create the Ruby objects associated with this cpu
#
l1i_cache = L1Cache(size = options.l1i_size,
assoc = options.l1i_assoc,
start_index_bit = block_size_bits)
l1d_cache = L1Cache(size = options.l1d_size,
assoc = options.l1d_assoc,
start_index_bit = block_size_bits)
l1_cntrl = L1Cache_Controller(version = i,
cntrl_id = cntrl_count,
L1Icache = l1i_cache,
L1Dcache = l1d_cache,
l2_select_num_bits = l2_bits,
N_tokens = n_tokens,
retry_threshold = \
options.l1_retries,
fixed_timeout_latency = \
options.timeout_latency,
dynamic_timeout_enabled = \
not options.disable_dyn_timeouts,
no_mig_atomic = not \
options.allow_atomic_migration,
send_evictions = (
options.cpu_type == "detailed"),
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
cpu_seq = RubySequencer(version = i,
icache = l1i_cache,
dcache = l1d_cache,
ruby_system = ruby_system)
l1_cntrl.sequencer = cpu_seq
if piobus != None:
cpu_seq.pio_port = piobus.slave
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
cntrl_count += 1
l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches):
#
# First create the Ruby objects associated with this cpu
#
l2_cache = L2Cache(size = options.l2_size,
assoc = options.l2_assoc,
start_index_bit = l2_index_start)
l2_cntrl = L2Cache_Controller(version = i,
cntrl_id = cntrl_count,
L2cache = l2_cache,
N_tokens = n_tokens,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
l2_cntrl_nodes.append(l2_cntrl)
cntrl_count += 1
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system
# clk_divider value is a fix to pass regression.
ruby_system.memctrl_clk_domain = DerivedClockDomain(
clk_domain=ruby_system.clk_domain,
clk_divider=3)
for i in xrange(options.num_dirs):
#
# Create the Ruby objects associated with the directory controller
#
mem_cntrl = RubyMemoryControl(
clk_domain = ruby_system.memctrl_clk_domain,
version = i,
ruby_system = ruby_system)
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
dir_cntrl = Directory_Controller(version = i,
cntrl_id = cntrl_count,
directory = \
RubyDirectoryMemory(version = i,
use_map = options.use_map,
size = dir_size),
memBuffer = mem_cntrl,
l2_select_num_bits = l2_bits,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
cntrl_count += 1
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
#
dma_seq = DMASequencer(version = i,
ruby_system = ruby_system)
dma_cntrl = DMA_Controller(version = i,
cntrl_id = cntrl_count,
dma_sequencer = dma_seq,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
cntrl_count += 1
all_cntrls = l1_cntrl_nodes + \
l2_cntrl_nodes + \
dir_cntrl_nodes + \
dma_cntrl_nodes
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)
| |
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn import linear_model, datasets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
"""
Principle of Lars is to keep covariances tied and decreasing
"""
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert ocur == i + 1
else:
# no more than max_pred variables can go into the active set
assert ocur == X.shape[1]
def test_simple_precomputed():
"""
The same, with precomputed Gram matrix
"""
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert ocur == i + 1
else:
# no more than max_pred variables can go into the active set
assert ocur == X.shape[1]
def test_lars_lstsq():
"""
Test that Lars gives least square solution at the end
of the path
"""
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
"""
Test that Lars Lasso gives least square solution at the end
of the path
"""
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
"""Check that lars_path is robust to collinearity in input"""
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
_, _, coef_path_ = linear_model.lars_path(X, y)
assert (not np.isnan(coef_path_).any())
assert_array_almost_equal(np.dot(X, coef_path_[:, -1]), y)
def test_singular_matrix():
"""
Test when input is a singular matrix
"""
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0], [1, 0]])
def test_lasso_lars_vs_lasso_cd(verbose=False):
"""
Test that LassoLars and Lasso using coordinate descent give the
same results
"""
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = np.linalg.norm(c - lasso_cd.coef_)
assert error < 0.01
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = np.linalg.norm(clf1.coef_ - clf2.coef_)
assert err < 1e-3
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = np.linalg.norm(c - lasso_cd.coef_)
assert error < 0.01
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
"""
Test that LassoLars and Lasso using coordinate descent give the
same results when early stopping is used.
(test : before, in the middle, and in the last part of the path)
"""
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = np.linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert error < 0.01
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = np.linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert error < 0.01
def test_lars_add_features(verbose=False):
"""
assure that at least some features get added if necessary
test for 6d2b4c
"""
linear_model.Lars(verbose=verbose, fit_intercept=True).fit(
np.array([[ 0.02863763, 0.88144085, -0.02052429, -0.10648066, -0.06396584, -0.18338974],
[ 0.02038287, 0.51463335, -0.31734681, -0.12830467, 0.16870657, 0.02169503],
[ 0.14411476, 0.37666599, 0.2764702 , 0.0723859 , -0.03812009, 0.03663579],
[-0.29411448, 0.33321005, 0.09429278, -0.10635334, 0.02827505, -0.07307312],
[-0.40929514, 0.57692643, -0.12559217, 0.19001991, 0.07381565, -0.0072319 ],
[-0.01763028, 1. , 0.04437242, 0.11870747, 0.1235008 , -0.27375014],
[-0.06482493, 0.1233536 , 0.15686536, 0.02059646, -0.31723546, 0.42050836],
[-0.18806577, 0.01970053, 0.02258482, -0.03216307, 0.17196751, 0.34123213],
[ 0.11277307, 0.15590351, 0.11231502, 0.22009306, 0.1811108 , 0.51456405],
[ 0.03228484, -0.12317732, -0.34223564, 0.08323492, -0.15770904, 0.39392212],
[-0.00586796, 0.04902901, 0.18020746, 0.04370165, -0.06686751, 0.50099547],
[-0.12951744, 0.21978613, -0.04762174, -0.27227304, -0.02722684, 0.57449581]]),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert len(lars.coef_.nonzero()[0]) == 6
def test_lars_cv():
""" Test the LassoLarsCV object by checking that the optimal alpha
increases as the number of samples increases.
This property is not actualy garantied in general and is just a
property of the given dataset, with the given steps chosen.
"""
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha)
old_alpha = lars_cv.alpha
def test_lasso_lars_ic():
""" Test the LassoLarsIC object by checking that
- some good features are selected.
- alpha_bic > alpha_aic
- n_nonzero_bic < n_nonzero_aic
"""
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert lars_bic.alpha_ > lars_aic.alpha_
assert len(nonzero_bic) < len(nonzero_aic)
assert np.max(nonzero_bic) < diabetes.data.shape[1]
if __name__ == '__main__':
import nose
nose.runmodule()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, print_function
import subprocess
import sys
import pytest
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from mock import patch, Mock, MagicMock
except ImportError:
from unittest.mock import patch, Mock
from pygmount.core.samba import (MountCifsWrapper, MountSmbShares,
InstallRequiredPackageError, run_command)
class FakeLockFailedException(Exception):
pass
class FakeCache(object):
def __init__(self, commit=True):
self.packages = {}
self._commit = commit
def __getitem__(self, item):
return self.packages[item]
def __setitem__(self, key, value):
"""
key = package_name
value = tuple(boolean for is_installed, ...)
"""
mock = Mock()
mock.is_installed = value[0]
mock.mark_install.return_value = True
self.packages[key] = mock
def commit(self):
if isinstance(self._commit, Exception):
raise self._commit
return self._commit
# [(package, True, Exception),..]
def get_fake_apt_cache(list_packages_data=None, commit=True):
mock_apt = Mock()
mock_apt.LockFailedException = FakeLockFailedException
if list_packages_data:
cache = FakeCache(commit=commit)
mock_apt._cache = cache
for data in list_packages_data:
cache[data[0]] = data[1:]
mock_apt.cache.Cache.return_value = cache
return mock_apt
def get_fake_configparser(config_data):
class FakeConfigParser(object):
def __init__(self):
self._data = list(config_data)
def read(self, filename):
pass
def sections(self):
return [d[0] for d in self._data]
def items(self, section):
for k, values in self._data:
if k == section:
return values.items()
return FakeConfigParser
class MountCifsWrapperTest(unittest.TestCase):
def setUp(self):
self.server = 'server.example.com'
self.share = 'share_1'
self.mountpoint = '/mnt/mountpoint'
def _check_options(self, text_options, **kwargs):
self.assertTrue(text_options.startswith('-o'))
options = text_options.split('-o')[1].strip().split(',')
for option in kwargs:
text_option = ('{option}={value}'.format(option=option,
value=kwargs[option])
if kwargs[option] else option)
self.assertIn(text_option, options)
def test_options_setter(self):
wrapper = MountCifsWrapper(self.server, self.share, self.mountpoint)
options = {'foo': 'bar'}
wrapper.options = options
self.assertEqual(wrapper._options, options)
def test_options_getter_with_option_that_is_none(self):
option = 'foo'
options = {option: None}
wrapper = MountCifsWrapper(self.server, self.share, self.mountpoint)
wrapper.options = options
self._check_options(wrapper.options, **options)
def test_options_getter_with_option_with_value(self):
options = {'foo': 'bar'}
wrapper = MountCifsWrapper(self.server, self.share,
self.mountpoint)
wrapper.options = options
self._check_options(wrapper.options, **options)
def test_options_getter_with_option_with_value_and_none_option(self):
options = {'foo': 'bar', 'foo1': None}
wrapper = MountCifsWrapper(self.server, self.share,
self.mountpoint)
wrapper.options = options
self._check_options(wrapper.options, **options)
def test_init_with_empty_kwargs(self):
wrapper = MountCifsWrapper(self.server, self.share, self.mountpoint)
self.assertEqual(wrapper.command_name, 'mount')
self.assertEqual(wrapper.filesystem_type, 'cifs')
self.assertEqual(wrapper.server, self.server)
self.assertEqual(wrapper.share, self.share)
self.assertEqual(wrapper.mountpoint, self.mountpoint)
self.assertEqual(wrapper._options, {})
def test_service_getter(self):
wrapper = MountCifsWrapper(self.server, self.share, self.mountpoint)
self.assertEqual(wrapper.service,
'//{self.server}/{self.share}'.format(self=self))
def test_command_getter_without_options(self):
wrapper = MountCifsWrapper(self.server, self.share, self.mountpoint)
self.assertTrue(
wrapper.command,
'mount -t cifs {wrapper.service} {wrapper.mountpoint}'.format(
wrapper=wrapper))
def test_command_getter_with_options(self):
options = {'foo': None}
wrapper = MountCifsWrapper(self.server, self.share, self.mountpoint,
**options)
command_without_options = (
'mount -t cifs {wrapper.service} {wrapper.mountpoint}'.format(
wrapper=wrapper))
self._check_options(
wrapper.command.replace(command_without_options, '').strip(),
**options)
def test_contains_return_true(self):
options = {'foo': None}
wrapper = MountCifsWrapper(self.server, self.share, self.mountpoint,
**options)
self.assertTrue('foo' in wrapper)
def test_contains_return_false(self):
wrapper = MountCifsWrapper(self.server, self.share,
self.mountpoint)
self.assertFalse('fake' in wrapper)
def test_getitem_return_option(self):
options = {'foo': 'bar'}
wrapper = MountCifsWrapper(self.server, self.share, self.mountpoint,
**options)
self.assertEqual(wrapper['foo'], 'bar')
def test_setitem_set_option(self):
wrapper = MountCifsWrapper(self.server, self.share,
self.mountpoint)
wrapper['foo'] = 'bar'
self.assertIn(('foo', 'bar'), wrapper._options.items())
class RunCommandTest(unittest.TestCase):
def setUp(self):
self.command = 'ln -s'
@patch('pygmount.core.samba.subprocess')
def test_run_command_with_check_output_function(
self, mock_subprocess):
check_output = Mock()
check_output.__name__ = 'check_output'
mock_subprocess.check_output = check_output
run_command(self.command)
check_output.assert_called_once_with(
self.command, stderr=mock_subprocess.STDOUT, shell=True)
@patch('pygmount.core.samba.subprocess', autospec=True)
def test_run_command_with_check_call_function(
self, mock_subprocess):
mock_subprocess.check_call.__name__ = 'check_call'
if 'check_output' in dir(mock_subprocess):
mock_subprocess.check_output = mock_subprocess.check_call
run_command(self.command)
mock_subprocess.check_call.assert_called_once_with(
self.command, stderr=mock_subprocess.STDOUT, shell=True)
@patch('pygmount.core.samba.subprocess')
def test_run_command_check_output_return_tuple(
self, mock_subprocess):
check_output = Mock()
check_output.__name__ = 'check_output'
check_output.return_value = 'command output'
mock_subprocess.check_output = check_output
result = run_command(self.command)
self.assertEqual(result[0], 0)
self.assertEqual(result[1], check_output.return_value)
@patch('pygmount.core.samba.subprocess', autospec=True)
def test_run_command_check_call_return_tuple(
self, mock_subprocess):
mock_subprocess.check_call.__name__ = 'check_call'
mock_subprocess.check_call.return_value = 0
if 'check_output' in dir(mock_subprocess):
mock_subprocess.check_output = mock_subprocess.check_call
result = run_command(self.command)
self.assertEqual(result[0], 0)
self.assertIsNone(result[1])
@patch('pygmount.core.samba.subprocess')
def test_run_command_in_check_output_occours_exception(
self, mock_subprocess):
check_output = Mock()
mock_subprocess.CalledProcessError = subprocess.CalledProcessError
check_output.side_effect = subprocess.CalledProcessError(1,
self.command)
result = run_command(self.command)
self.assertEqual(result[0], 1)
self.assertIsNone(result[1])
@patch('pygmount.core.samba.subprocess', autospec=True)
def test_run_command_in_check_output_occours_exception(
self, mock_subprocess):
mock_subprocess.CalledProcessError = subprocess.CalledProcessError
mock_subprocess.check_call.side_effect = subprocess.CalledProcessError(
1, self.command)
if 'check_output' in dir(mock_subprocess):
mock_subprocess.check_output = mock_subprocess.check_call
result = run_command(self.command)
self.assertEqual(result[0], 1)
self.assertIsNone(result[1])
class MountSmbSharesTest(unittest.TestCase):
def test_apt_pkg_requirements_setter(self):
mss = MountSmbShares()
packages = ['package1', 'package2']
mss.required_packages = packages
self.assertIsInstance(mss._required_packages, list)
self.assertListEqual(mss._required_packages, packages)
def test_apt_pkg_requirements_getter_setter(self):
mss = MountSmbShares()
packages = ['package1', 'package2']
mss.required_packages = packages
self.assertListEqual(mss.required_packages, packages)
@patch('pygmount.core.samba.apt',
get_fake_apt_cache([('package1', True)]))
def test_install_apt_package_with_package_not_in_cache(self):
mss = MountSmbShares()
package = 'fake_package'
self.assertRaises(InstallRequiredPackageError,
mss.install_apt_package, package)
@patch('pygmount.core.samba.apt',
get_fake_apt_cache([('package1', True)]))
def test_install_apt_package_with_package_already_installed(self):
mss = MountSmbShares()
package = 'package1'
self.assertIsNone(mss.install_apt_package(package))
@patch('pygmount.core.samba.apt', get_fake_apt_cache(
[('package1', False)], commit=FakeLockFailedException()))
def test_install_apt_package_raise_exception_lock_failed(self):
mss = MountSmbShares()
package = 'package1'
with pytest.raises(InstallRequiredPackageError) as e:
mss.install_apt_package(package)
self.assertIsInstance(e.value.source, FakeLockFailedException)
self.assertEqual(str(e.value),
'Impossibile installare i pacchetti richiesti con un '
' utente che non ha diritti amministrativi.')
@patch('pygmount.core.samba.apt', get_fake_apt_cache(
[('package1', False)], commit=Exception()))
def test_install_apt_package_raise_generic_exception(self):
mss = MountSmbShares()
package = 'package1'
with pytest.raises(InstallRequiredPackageError) as e:
mss.install_apt_package(package)
self.assertIsInstance(e.value.source, Exception)
self.assertEqual(str(e.value),
'Errore genrico nell\'installazione del pacchetto'
' "{package}".'.format(package=package))
@patch('pygmount.core.samba.apt')
@patch('pygmount.core.samba.MountSmbShares.install_apt_package')
def test_run_failed_for_apt_lock_failed(self, mock_install_apt, mock_apt):
"""
Test that run return 1 if into an install_apt_package method
occurs an apt.LockFailedException exception
"""
mss = MountSmbShares()
mss.required_packages = ['package1']
mock_apt.LockFailedException = FakeLockFailedException
mock_install_apt.side_effect = (
InstallRequiredPackageError(
"IRPE", source=FakeLockFailedException("FLFE")))
self.assertEqual(mss.run(), 1)
@patch('pygmount.core.samba.MountCifsWrapper')
def test_read_config_parser_simple(self, mock_wrapper):
data = [('absoluthe_share', {'hostname': 'server_windows.example',
'share': 'condivisione',
'mountpoint': '/mnt/mountpoint'})]
with patch('pygmount.core.samba.ConfigParser',
get_fake_configparser(data)):
mss = MountSmbShares()
mss.set_shares()
mock_wrapper.assert_called_once_with(data[0][1]['hostname'],
data[0][1]['share'],
data[0][1]['mountpoint'])
self.assertEqual(len(mss.shares), 1)
self.assertEqual(mss.shares[0][0], data[0][0])
self.assertIsNone(mss.shares[0][2])
self.assertIsNone(mss.shares[0][3])
@patch('pygmount.core.samba.MountCifsWrapper')
def test_read_config_parser_with_only_username(self, mock_wrapper):
username = 'user1'
hostname = 'server_windows.example'
data = [('absoluthe_share',
{'hostname': username + '@' + hostname,
'share': 'condivisione',
'mountpoint': '/mnt/mountpoint'})]
with patch('pygmount.core.samba.ConfigParser',
get_fake_configparser(data)):
mss = MountSmbShares()
mss.set_shares()
mock_wrapper.assert_called_once_with(hostname,
data[0][1]['share'],
data[0][1]['mountpoint'],
username=username)
self.assertEqual(len(mss.shares), 1)
self.assertEqual(mss.shares[0][0], data[0][0])
@patch('pygmount.core.samba.MountCifsWrapper')
def test_read_config_parser_with_only_username_and_password(
self, mock_wrapper):
username = 'user1'
password = 'password'
hostname = 'server_windows.example'
data = [('absoluthe_share',
{'hostname': username + ':' + password + '@' + hostname,
'share': 'condivisione',
'mountpoint': '/mnt/mountpoint'})]
with patch('pygmount.core.samba.ConfigParser',
get_fake_configparser(data)):
mss = MountSmbShares()
mss.set_shares()
mock_wrapper.assert_called_once_with(hostname,
data[0][1]['share'],
data[0][1][
'mountpoint'],
username=username,
password=password)
self.assertEqual(len(mss.shares), 1)
self.assertEqual(mss.shares[0][0], data[0][0])
@patch('pygmount.core.samba.MountCifsWrapper')
def test_read_config_parser_with_options(self, mock_wrapper):
options = {'option_1': 'option_1', 'option_2': 'option_2'}
data = [
('absoluthe_share', {'hostname': 'server_windows.example',
'share': 'condivisione',
'mountpoint': '/mnt/mountpoint'})]
data[0][1].update(options)
with patch('pygmount.core.samba.ConfigParser',
get_fake_configparser(data)):
mss = MountSmbShares()
mss.set_shares()
mock_wrapper.assert_called_once_with(
data[0][1]['hostname'],
data[0][1]['share'],
data[0][1]['mountpoint'],
**options)
self.assertEqual(len(mss.shares), 1)
self.assertEqual(mss.shares[0][0], data[0][0])
@patch('pygmount.core.samba.MountCifsWrapper')
def test_read_config_parser_with_hook_pre_command(self, mock_wrapper):
hook = {'hook_pre_command': 'ls -l'}
data = [('absoluthe_share', {'hostname': 'server_windows.example',
'share': 'condivisione',
'mountpoint': '/mnt/mountpoint'})]
data[0][1].update(hook)
with patch('pygmount.core.samba.ConfigParser',
get_fake_configparser(data)):
mss = MountSmbShares()
mss.set_shares()
mock_wrapper.assert_called_once_with(data[0][1]['hostname'],
data[0][1]['share'],
data[0][1]['mountpoint'])
self.assertEqual(len(mss.shares), 1)
self.assertEqual(mss.shares[0][2], list(hook.values())[0])
@patch('pygmount.core.samba.MountCifsWrapper')
def test_read_config_parser_with_hook_post_command(self, mock_wrapper):
hook = {'hook_post_command': 'ls -l'}
data = [
('absoluthe_share', {'hostname': 'server_windows.example',
'share': 'condivisione',
'mountpoint': '/mnt/mountpoint'})]
data[0][1].update(hook)
with patch('pygmount.core.samba.ConfigParser',
get_fake_configparser(data)):
mss = MountSmbShares()
mss.set_shares()
mock_wrapper.assert_called_once_with(
data[0][1]['hostname'],
data[0][1]['share'],
data[0][1]['mountpoint'])
self.assertEqual(len(mss.shares), 1)
self.assertEqual(mss.shares[0][3], list(hook.values())[0])
| |
from urllib.parse import unquote
import re
import os
import glob
import time
import shutil
import tempfile
import logging
from math import floor
import lxml.etree
import collections
from indra.databases import go_client, mesh_client
from indra.statements import *
from indra.databases.chebi_client import get_chebi_id_from_cas, \
get_chebi_name_from_id
from indra.databases.hgnc_client import get_hgnc_from_entrez, get_uniprot_id, \
get_hgnc_name
from indra.util import read_unicode_csv
from indra.sources.reach.processor import ReachProcessor, Site
from .fix_csxml_character_encoding import fix_character_encoding
logger = logging.getLogger(__name__)
MedscanEntity = collections.namedtuple('MedscanEntity', ['name', 'urn', 'type',
'properties',
'ch_start', 'ch_end'])
MedscanProperty = collections.namedtuple('MedscanProperty',
['type', 'name', 'urn'])
def _read_famplex_map():
fname = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../resources/famplex_map.tsv')
famplex_map = {}
csv_rows = read_unicode_csv(fname, delimiter='\t')
for row in csv_rows:
source_ns = row[0]
source_id = row[1]
be_id = row[2]
famplex_map[(source_ns, source_id)] = be_id
return famplex_map
famplex_map = _read_famplex_map()
def _fix_different_refs(a1, a2, ref_key):
if all(ref_key in a.db_refs for a in [a1, a2]) \
and a1.db_refs[ref_key] != a2.db_refs[ref_key]:
a1.name = a1.db_refs[ref_key]
a2.name = a2.db_refs[ref_key]
return True
return False
def _is_statement_in_list(new_stmt, old_stmt_list):
"""Return True of given statement is equivalent to on in a list
Determines whether the statement is equivalent to any statement in the
given list of statements, with equivalency determined by Statement's
equals method.
Parameters
----------
new_stmt : indra.statements.Statement
The statement to compare with
old_stmt_list : list[indra.statements.Statement]
The statement list whose entries we compare with statement
Returns
-------
in_list : bool
True if statement is equivalent to any statements in the list
"""
for old_stmt in old_stmt_list:
if old_stmt.equals(new_stmt):
return True
elif old_stmt.evidence_equals(new_stmt) and old_stmt.matches(new_stmt):
# If we're comparing a complex, make sure the agents are sorted.
if isinstance(new_stmt, Complex):
agent_pairs = zip(old_stmt.sorted_members(),
new_stmt.sorted_members())
else:
agent_pairs = zip(old_stmt.agent_list(), new_stmt.agent_list())
# Compare agent-by-agent.
for ag_old, ag_new in agent_pairs:
s_old = set(ag_old.db_refs.items())
s_new = set(ag_new.db_refs.items())
# If they're equal this isn't the one we're interested in.
if s_old == s_new:
continue
# If the new statement has nothing new to offer, just ignore it
if s_old > s_new:
return True
# If the new statement does have something new, add it to the
# existing statement. And then ignore it.
if s_new > s_old:
ag_old.db_refs.update(ag_new.db_refs)
return True
# If this is a case where different CHEBI ids were mapped to
# the same entity, set the agent name to the CHEBI id.
if _fix_different_refs(ag_old, ag_new, 'CHEBI'):
# Check to make sure the newly described statement does
# not match anything.
return _is_statement_in_list(new_stmt, old_stmt_list)
# If this is a case, like above, but with UMLS IDs, do the same
# thing as above. This will likely never be improved.
if _fix_different_refs(ag_old, ag_new, 'UMLS'):
# Check to make sure the newly described statement does
# not match anything.
return _is_statement_in_list(new_stmt, old_stmt_list)
logger.warning("Found an unexpected kind of duplicate. "
"Ignoring it.")
return True
# This means all the agents matched, which can happen if the
# original issue was the ordering of agents in a Complex.
return True
elif old_stmt.get_hash(True, True) == new_stmt.get_hash(True, True):
# Check to see if we can improve the annotation of the existing
# statement.
e_old = old_stmt.evidence[0]
e_new = new_stmt.evidence[0]
if e_old.annotations['last_verb'] is None:
e_old.annotations['last_verb'] = e_new.annotations['last_verb']
# If the evidence is "the same", modulo annotations, just ignore it
if e_old.get_source_hash(True) == e_new.get_source_hash(True):
return True
return False
class ProteinSiteInfo(object):
"""Represent a site on a protein, extracted from a StateEffect event.
Parameters
----------
site_text : str
The site as a string (ex. S22)
object_text : str
The protein being modified, as the string that appeared in the original
sentence
"""
def __init__(self, site_text, object_text):
self.site_text = site_text
self.object_text = object_text
def get_sites(self):
"""Parse the site-text string and return a list of sites.
Returns
-------
sites : list[Site]
A list of position-residue pairs corresponding to the site-text
"""
st = self.site_text
suffixes = [' residue', ' residues', ',', '/']
for suffix in suffixes:
if st.endswith(suffix):
st = st[:-len(suffix)]
assert(not st.endswith(','))
# Strip parentheses
st = st.replace('(', '')
st = st.replace(')', '')
st = st.replace(' or ', ' and ') # Treat end and or the same
sites = []
parts = st.split(' and ')
for part in parts:
if part.endswith(','):
part = part[:-1]
if len(part.strip()) > 0:
sites.extend(ReachProcessor._parse_site_text(part.strip()))
return sites
# These normalized verbs are mapped to IncreaseAmount statements
INCREASE_AMOUNT_VERBS = ['ExpressionControl-positive',
'MolSynthesis-positive',
'CellExpression',
'QuantitativeChange-positive',
'PromoterBinding']
# These normalized verbs are mapped to DecreaseAmount statements
DECREASE_AMOUNT_VERBS = ['ExpressionControl-negative',
'MolSynthesis-negative',
'miRNAEffect-negative',
'QuantitativeChange-negative']
# These normalized verbs are mapped to Activation statements (indirect)
ACTIVATION_VERBS = ['UnknownRegulation-positive',
'Regulation-positive']
# These normalized verbs are mapped to Activation statements (direct)
D_ACTIVATION_VERBS = ['DirectRegulation-positive',
'DirectRegulation-positive--direct interaction']
# All activation verbs
ALL_ACTIVATION_VERBS = ACTIVATION_VERBS + D_ACTIVATION_VERBS
# These normalized verbs are mapped to Inhibition statements (indirect)
INHIBITION_VERBS = ['UnknownRegulation-negative',
'Regulation-negative']
# These normalized verbs are mapped to Inhibition statements (direct)
D_INHIBITION_VERBS = ['DirectRegulation-negative',
'DirectRegulation-negative--direct interaction']
# All inhibition verbs
ALL_INHIBITION_VERBS = INHIBITION_VERBS + D_INHIBITION_VERBS
PMID_PATT = re.compile('info:pmid/(\d+)')
class MedscanProcessor(object):
"""Processes Medscan data into INDRA statements.
The special StateEffect event conveys information about the binding
site of a protein modification. Sometimes this is paired with additional
event information in a seperate SVO. When we encounter a StateEffect, we
don't process into an INDRA statement right away, but instead store
the site information and use it if we encounter a ProtModification
event within the same sentence.
Attributes
----------
statements : list<str>
A list of extracted INDRA statements
sentence_statements : list<str>
A list of statements for the sentence we are currently processing.
Deduplicated and added to the main statement list when we finish
processing a sentence.
num_entities : int
The total number of subject or object entities the processor attempted
to resolve
num_entities_not_found : int
The number of subject or object IDs which could not be resolved by
looking in the list of entities or tagged phrases.
last_site_info_in_sentence : SiteInfo
Stored protein site info from the last StateEffect event within the
sentence, allowing us to combine information from StateEffect and
ProtModification events within a single sentence in a single INDRA
statement. This is reset at the end of each sentence
"""
def __init__(self):
self.statements = []
self.sentence_statements = []
self.num_entities_not_found = 0
self.num_entities = 0
self.last_site_info_in_sentence = None
self.files_processed = 0
self._gen = None
self._tmp_dir = None
self._pmids_handled = set()
self._sentences_handled = set()
self.__f = None
return
def iter_statements(self, populate=True):
if self._gen is None and not self.statements:
raise InputError("No generator has been initialized. Use "
"`process_directory` or `process_file` first.")
if self.statements and not self._gen:
for stmt in self.statements:
yield stmt
else:
for stmt in self._gen:
if populate:
self.statements.append(stmt)
yield stmt
def process_directory(self, directory_name, lazy=False):
# Process each file
glob_pattern = os.path.join(directory_name, '*.csxml')
files = glob.glob(glob_pattern)
self._gen = self._iter_over_files(files)
if not lazy:
for stmt in self._gen:
self.statements.append(stmt)
return
def _iter_over_files(self, files):
# Create temporary directory into which to put the csxml files with
# normalized character encodings
self.__tmp_dir = tempfile.mkdtemp('indra_medscan_processor')
tmp_file = os.path.join(self.__tmp_dir, 'fixed_char_encoding')
num_files = float(len(files))
percent_done = 0
start_time_s = time.time()
logger.info("%d files to read" % int(num_files))
for filename in files:
logger.info('Processing %s' % filename)
fix_character_encoding(filename, tmp_file)
with open(tmp_file, 'rb') as self.__f:
for stmt in self._iter_through_csxml_file_from_handle():
yield stmt
percent_done_now = floor(100.0 * self.files_processed / num_files)
if percent_done_now > percent_done:
percent_done = percent_done_now
ellapsed_s = time.time() - start_time_s
ellapsed_min = ellapsed_s / 60.0
msg = 'Processed %d of %d files (%f%% complete, %f minutes)' % \
(self.files_processed, num_files, percent_done,
ellapsed_min)
logger.info(msg)
# Delete the temporary directory
shutil.rmtree(self.__tmp_dir)
return
def process_csxml_file(self, filename, interval=None, lazy=False):
"""Processes a filehandle to MedScan csxml input into INDRA
statements.
The CSXML format consists of a top-level `<batch>` root element
containing a series of `<doc>` (document) elements, in turn containing
`<sec>` (section) elements, and in turn containing `<sent>` (sentence)
elements.
Within the `<sent>` element, a series of additional elements appear in
the following order:
* `<toks>`, which contains a tokenized form of the sentence in its text
attribute
* `<textmods>`, which describes any preprocessing/normalization done to
the underlying text
* `<match>` elements, each of which contains one of more `<entity>`
elements, describing entities in the text with their identifiers.
The local IDs of each entities are given in the `msid` attribute of
this element; these IDs are then referenced in any subsequent SVO
elements.
* `<svo>` elements, representing subject-verb-object triples. SVO
elements with a `type` attribute of `CONTROL` represent normalized
regulation relationships; they often represent the normalized
extraction of the immediately preceding (but unnormalized SVO
element). However, in some cases there can be a "CONTROL" SVO
element without its parent immediately preceding it.
Parameters
----------
filename : string
The path to a Medscan csxml file.
interval : (start, end) or None
Select the interval of documents to read, starting with the
`start`th document and ending before the `end`th document. If
either is None, the value is considered undefined. If the value
exceeds the bounds of available documents, it will simply be
ignored.
lazy : bool
If True, only create a generator which can be used by the
`get_statements` method. If True, populate the statements list now.
"""
if interval is None:
interval = (None, None)
tmp_fname = tempfile.mktemp(os.path.basename(filename))
fix_character_encoding(filename, tmp_fname)
self.__f = open(tmp_fname, 'rb')
self._gen = self._iter_through_csxml_file_from_handle(*interval)
if not lazy:
for stmt in self._gen:
self.statements.append(stmt)
return
def _iter_through_csxml_file_from_handle(self, start=None, stop=None):
pmid = None
sec = None
tagged_sent = None
doc_idx = 0
entities = {}
match_text = None
in_prop = False
last_relation = None
property_entities = []
property_name = None
# Go through the document again and extract statements
good_relations = []
skipping_doc = False
skipping_sent = False
for event, elem in lxml.etree.iterparse(self.__f,
events=('start', 'end'),
encoding='utf-8',
recover=True):
if elem.tag in ['attr', 'toks']:
continue
# If opening up a new doc, set the PMID
if event == 'start' and elem.tag == 'doc':
if start is not None and doc_idx < start:
logger.info("Skipping document number %d." % doc_idx)
skipping_doc = True
continue
if stop is not None and doc_idx >= stop:
logger.info("Reach the end of the allocated docs.")
break
uri = elem.attrib.get('uri')
re_pmid = PMID_PATT.match(uri)
if re_pmid is None:
logger.warning("Could not extract pmid from: %s." % uri)
skipping_doc = True
pmid = re_pmid.group(1)
pmid_num = int(pmid)
if pmid_num in self._pmids_handled:
logger.warning("Skipping repeated pmid: %s from %s."
% (pmid, self.__f.name))
skipping_doc = True
# If getting a section, set the section type
elif event == 'start' and elem.tag == 'sec' and not skipping_doc:
sec = elem.attrib.get('type')
# Set the sentence context
elif event == 'start' and elem.tag == 'sent' and not skipping_doc:
tagged_sent = elem.attrib.get('msrc')
h = hash(tagged_sent)
if h in self._sentences_handled:
skipping_sent = True
continue
skipping_sent = False
# Reset last_relation between sentences, since we will only be
# interested in the relation immediately preceding a CONTROL
# statement but within the same sentence.
last_relation = None
entities = {}
elif event == 'end' and elem.tag == 'sent' and not skipping_doc \
and not skipping_sent:
# End of sentence; deduplicate and copy statements from this
# sentence to the main statements list
for s in self.sentence_statements:
yield s
self.sentence_statements = []
self._sentences_handled.add(h)
good_relations = []
# Reset site info
self.last_site_info_in_sentence = None
elif event == 'start' and elem.tag == 'match' and not skipping_doc\
and not skipping_sent:
match_text = elem.attrib.get('chars')
match_start = int(elem.attrib.get('coff'))
match_end = int(elem.attrib.get('clen')) + match_start
elif event == 'start' and elem.tag == 'entity' \
and not skipping_doc and not skipping_sent:
if not in_prop:
ent_id = elem.attrib['msid']
ent_urn = elem.attrib.get('urn')
ent_type = elem.attrib['type']
entities[ent_id] = MedscanEntity(match_text, ent_urn,
ent_type, {},
match_start, match_end)
else:
ent_type = elem.attrib['type']
ent_urn = elem.attrib['urn']
ent_name = elem.attrib['name']
property_entities.append(MedscanEntity(ent_name, ent_urn,
ent_type, None,
None, None))
elif event == 'start' and elem.tag == 'svo' and not skipping_doc \
and not skipping_sent:
subj = elem.attrib.get('subj')
verb = elem.attrib.get('verb')
obj = elem.attrib.get('obj')
svo_type = elem.attrib.get('type')
# Aggregate information about the relation
relation = MedscanRelation(pmid=pmid, sec=sec, uri=uri,
tagged_sentence=tagged_sent,
entities=entities, subj=subj,
verb=verb, obj=obj,
svo_type=svo_type)
if svo_type == 'CONTROL':
good_relations.append(relation)
self.process_relation(relation, last_relation)
else:
# Sometimes a CONTROL SVO can be after an unnormalized SVO
# that is a more specific but less uniform version of the
# same extracted statement.
last_relation = relation
elif event == 'start' and elem.tag == 'prop' and not skipping_doc \
and not skipping_sent:
in_prop = True
property_name = elem.attrib.get('name')
property_entities = []
elif event == 'end' and elem.tag == 'prop' and not skipping_doc \
and not skipping_sent:
in_prop = False
entities[ent_id].properties[property_name] = property_entities
elif event == 'end' and elem.tag == 'doc':
doc_idx += 1
# Give a status update
if doc_idx % 100 == 0:
logger.info("Processed %d documents" % doc_idx)
self._pmids_handled.add(pmid_num)
self._sentences_handled = set()
# Solution for memory leak found here:
# https://stackoverflow.com/questions/12160418/why-is-lxml-etree-iterparse-eating-up-all-my-memory?lq=1
elem.clear()
self.files_processed += 1
self.__f.close()
return
def _add_statement(self, stmt):
if not _is_statement_in_list(stmt, self.sentence_statements):
self.sentence_statements.append(stmt)
return
def process_relation(self, relation, last_relation):
"""Process a relation into an INDRA statement.
Parameters
----------
relation : MedscanRelation
The relation to process (a CONTROL svo with normalized verb)
last_relation : MedscanRelation
The relation immediately proceding the relation to process within
the same sentence, or None if there are no preceding relations
within the same sentence. This proceeding relation, if available,
will refer to the same interaction but with an unnormalized
(potentially more specific) verb, and is used when processing
protein modification events.
"""
subj_res = self.agent_from_entity(relation, relation.subj)
obj_res = self.agent_from_entity(relation, relation.obj)
if subj_res is None or obj_res is None:
# Don't extract a statement if the subject or object cannot
# be resolved
return
subj, subj_bounds = subj_res
obj, obj_bounds = obj_res
# Make evidence object
untagged_sentence = _untag_sentence(relation.tagged_sentence)
if last_relation:
last_verb = last_relation.verb
else:
last_verb = None
# Get the entity information with the character coordinates
annotations = {'verb': relation.verb, 'last_verb': last_verb,
'agents': {'coords': [subj_bounds, obj_bounds]}}
epistemics = dict()
epistemics['direct'] = False # Overridden later if needed
ev = [Evidence(source_api='medscan', source_id=relation.uri,
pmid=relation.pmid, text=untagged_sentence,
annotations=annotations, epistemics=epistemics)]
if relation.verb in INCREASE_AMOUNT_VERBS:
# If the normalized verb corresponds to an IncreaseAmount statement
# then make one
self._add_statement(IncreaseAmount(subj, obj, evidence=ev))
elif relation.verb in DECREASE_AMOUNT_VERBS:
# If the normalized verb corresponds to a DecreaseAmount statement
# then make one
self._add_statement(DecreaseAmount(subj, obj, evidence=ev))
elif relation.verb in ALL_ACTIVATION_VERBS:
# If the normalized verb corresponds to an Activation statement,
# then make one
if relation.verb in D_ACTIVATION_VERBS:
ev[0].epistemics['direction'] = True
self._add_statement(Activation(subj, obj, evidence=ev))
elif relation.verb in ALL_INHIBITION_VERBS:
# If the normalized verb corresponds to an Inhibition statement,
# then make one
if relation.verb in D_INHIBITION_VERBS:
ev[0].epistemics['direct'] = True
self._add_statement(Inhibition(subj, obj, evidence=ev))
elif relation.verb == 'ProtModification':
# The normalized verb 'ProtModification' is too vague to make
# an INDRA statement. We look at the unnormalized verb in the
# previous svo element, if available, to decide what type of
# INDRA statement to construct.
if last_relation is None:
# We cannot make a statement unless we have more fine-grained
# information on the relation type from a preceding
# unnormalized SVO
return
# Map the unnormalized verb to an INDRA statement type
if last_relation.verb == 'TK{phosphorylate}':
statement_type = Phosphorylation
elif last_relation.verb == 'TK{dephosphorylate}':
statement_type = Dephosphorylation
elif last_relation.verb == 'TK{ubiquitinate}':
statement_type = Ubiquitination
elif last_relation.verb == 'TK{acetylate}':
statement_type = Acetylation
elif last_relation.verb == 'TK{methylate}':
statement_type = Methylation
elif last_relation.verb == 'TK{deacetylate}':
statement_type = Deacetylation
elif last_relation.verb == 'TK{demethylate}':
statement_type = Demethylation
elif last_relation.verb == 'TK{hyperphosphorylate}':
statement_type = Phosphorylation
elif last_relation.verb == 'TK{hydroxylate}':
statement_type = Hydroxylation
elif last_relation.verb == 'TK{sumoylate}':
statement_type = Sumoylation
elif last_relation.verb == 'TK{palmitoylate}':
statement_type = Palmitoylation
elif last_relation.verb == 'TK{glycosylate}':
statement_type = Glycosylation
elif last_relation.verb == 'TK{ribosylate}':
statement_type = Ribosylation
elif last_relation.verb == 'TK{deglycosylate}':
statement_type = Deglycosylation
elif last_relation.verb == 'TK{myristylate}':
statement_type = Myristoylation
elif last_relation.verb == 'TK{farnesylate}':
statement_type = Farnesylation
elif last_relation.verb == 'TK{desumoylate}':
statement_type = Desumoylation
elif last_relation.verb == 'TK{geranylgeranylate}':
statement_type = Geranylgeranylation
elif last_relation.verb == 'TK{deacylate}':
statement_type = Deacetylation
else:
# This unnormalized verb is not handled, do not extract an
# INDRA statement
return
obj_text = obj.db_refs['TEXT']
last_info = self.last_site_info_in_sentence
if last_info is not None and obj_text == last_info.object_text:
for site in self.last_site_info_in_sentence.get_sites():
r = site.residue
p = site.position
s = statement_type(subj, obj, residue=r, position=p,
evidence=ev)
self._add_statement(s)
else:
self._add_statement(statement_type(subj, obj, evidence=ev))
elif relation.verb == 'Binding':
# The Binding normalized verb corresponds to the INDRA Complex
# statement.
self._add_statement(
Complex([subj, obj], evidence=ev)
)
elif relation.verb == 'ProtModification-negative':
pass # TODO? These occur so infrequently so maybe not worth it
elif relation.verb == 'Regulation-unknown':
pass # TODO? These occur so infrequently so maybe not worth it
elif relation.verb == 'StateEffect-positive':
pass
# self._add_statement(
# ActiveForm(subj, obj, evidence=ev)
# )
# TODO: disabling for now, since not sure whether we should set
# the is_active flag
elif relation.verb == 'StateEffect':
self.last_site_info_in_sentence = \
ProteinSiteInfo(site_text=subj.name,
object_text=obj.db_refs['TEXT'])
return
def agent_from_entity(self, relation, entity_id):
"""Create a (potentially grounded) INDRA Agent object from a given
Medscan entity describing the subject or object.
Uses helper functions to convert a Medscan URN to an INDRA db_refs
grounding dictionary.
If the entity has properties indicating that it is a protein with
a mutation or modification, then constructs the needed ModCondition
or MutCondition.
Parameters
----------
relation : MedscanRelation
The current relation being processed
entity_id : str
The ID of the entity to process
Returns
-------
agent : indra.statements.Agent
A potentially grounded INDRA agent representing this entity
"""
# Extract sentence tags mapping ids to the text. We refer to this
# mapping only if the entity doesn't appear in the grounded entity
# list
tags = _extract_sentence_tags(relation.tagged_sentence)
if entity_id is None:
return None
self.num_entities += 1
entity_id = _extract_id(entity_id)
if entity_id not in relation.entities and \
entity_id not in tags:
# Could not find the entity in either the list of grounded
# entities of the items tagged in the sentence. Happens for
# a very small percentage of the dataset.
self.num_entities_not_found += 1
return None
if entity_id not in relation.entities:
# The entity is not in the grounded entity list
# Instead, make an ungrounded entity, with TEXT corresponding to
# the words with the given entity id tagged in the sentence.
entity_data = tags[entity_id]
db_refs = {'TEXT': entity_data['text']}
ag = Agent(normalize_medscan_name(db_refs['TEXT']),
db_refs=db_refs)
return ag, entity_data['bounds']
else:
entity = relation.entities[entity_id]
bounds = (entity.ch_start, entity.ch_end)
prop = entity.properties
if len(prop.keys()) == 2 and 'Protein' in prop \
and 'Mutation' in prop:
# Handle the special case where the entity is a protein
# with a mutation or modification, with those details
# described in the entity properties
protein = prop['Protein']
assert(len(protein) == 1)
protein = protein[0]
mutation = prop['Mutation']
assert(len(mutation) == 1)
mutation = mutation[0]
db_refs, db_name = _urn_to_db_refs(protein.urn)
if db_refs is None:
return None
db_refs['TEXT'] = protein.name
if db_name is None:
agent_name = db_refs['TEXT']
else:
agent_name = db_name
# Check mutation.type. Only some types correspond to situations
# that can be represented in INDRA; return None if we cannot
# map to an INDRA statement (which will block processing of
# the statement in process_relation).
if mutation.type == 'AASite':
# Do not handle this
# Example:
# MedscanEntity(name='D1', urn='urn:agi-aa:D1',
# type='AASite', properties=None)
return None
elif mutation.type == 'Mutation':
# Convert mutation properties to an INDRA MutCondition
r_old, pos, r_new = _parse_mut_string(mutation.name)
if r_old is None:
logger.warning('Could not parse mutation string: ' +
mutation.name)
# Don't create an agent
return None
else:
try:
cond = MutCondition(pos, r_old, r_new)
ag = Agent(normalize_medscan_name(agent_name),
db_refs=db_refs, mutations=[cond])
return ag, bounds
except BaseException:
logger.warning('Could not parse mutation ' +
'string: ' + mutation.name)
return None
elif mutation.type == 'MethSite':
# Convert methylation site information to an INDRA
# ModCondition
res, pos = _parse_mod_string(mutation.name)
if res is None:
return None
cond = ModCondition('methylation', res, pos)
ag = Agent(normalize_medscan_name(agent_name),
db_refs=db_refs, mods=[cond])
return ag, bounds
# Example:
# MedscanEntity(name='R457',
# urn='urn:agi-s-llid:R457-2185', type='MethSite',
# properties=None)
elif mutation.type == 'PhosphoSite':
# Convert phosphorylation site information to an INDRA
# ModCondition
res, pos = _parse_mod_string(mutation.name)
if res is None:
return None
cond = ModCondition('phosphorylation', res, pos)
ag = Agent(normalize_medscan_name(agent_name),
db_refs=db_refs, mods=[cond])
return ag, bounds
# Example:
# MedscanEntity(name='S455',
# urn='urn:agi-s-llid:S455-47', type='PhosphoSite',
# properties=None)
pass
elif mutation.type == 'Lysine':
# Ambiguous whether this is a methylation or
# demethylation; skip
# Example:
# MedscanEntity(name='K150',
# urn='urn:agi-s-llid:K150-5624', type='Lysine',
# properties=None)
return None
else:
logger.warning('Processor currently cannot process ' +
'mutations of type ' + mutation.type)
else:
# Handle the more common case where we just ground the entity
# without mutation or modification information
db_refs, db_name = _urn_to_db_refs(entity.urn)
if db_refs is None:
return None
db_refs['TEXT'] = entity.name
if db_name is None:
agent_name = db_refs['TEXT']
else:
agent_name = db_name
ag = Agent(normalize_medscan_name(agent_name),
db_refs=db_refs)
return ag, bounds
class MedscanRelation(object):
"""A structure representing the information contained in a Medscan
SVO xml element as well as associated entities and properties.
Attributes
----------
pmid : str
The URI of the current document (such as a PMID)
sec : str
The section of the document the relation occurs in
entities : dict
A dictionary mapping entity IDs from the same sentence to MedscanEntity
objects.
tagged_sentence : str
The sentence from which the relation was extracted, with some tagged
phrases and annotations.
subj : str
The entity ID of the subject
verb : str
The verb in the relationship between the subject and the object
obj : str
The entity ID of the object
svo_type : str
The type of SVO relationship (for example, CONTROL indicates
that the verb is normalized)
"""
def __init__(self, pmid, uri, sec, entities, tagged_sentence, subj, verb, obj,
svo_type):
self.pmid = pmid
self.uri = uri
self.sec = sec
self.entities = entities
self.tagged_sentence = tagged_sentence
self.subj = subj
self.verb = verb
self.obj = obj
self.svo_type = svo_type
def normalize_medscan_name(name):
"""Removes the "complex" and "complex complex" suffixes from a medscan
agent name so that it better corresponds with the grounding map.
Parameters
----------
name: str
The Medscan agent name
Returns
-------
norm_name: str
The Medscan agent name with the "complex" and "complex complex"
suffixes removed.
"""
suffix = ' complex'
for i in range(2):
if name.endswith(suffix):
name = name[:-len(suffix)]
return name
MOD_PATT = re.compile('([A-Za-z])+([0-9]+)')
def _parse_mod_string(s):
"""Parses a string referring to a protein modification of the form
(residue)(position), such as T47.
Parameters
----------
s : str
A string representation of a protein residue and position being
modified
Returns
-------
residue : str
The residue being modified (example: T)
position : str
The position at which the modification is happening (example: 47)
"""
m = MOD_PATT.match(s)
assert m is not None
return m.groups()
MUT_PATT = re.compile('([A-Za-z]+)([0-9]+)([A-Za-z]+)')
def _parse_mut_string(s):
"""
A string representation of a protein mutation of the form
(old residue)(position)(new residue). Example: T34U.
Parameters
----------
s : str
The string representation of the protein mutation
Returns
-------
old_residue : str
The old residue, or None of the mutation string cannot be parsed
position : str
The position at which the mutation occurs, or None if the mutation
string cannot be parsed
new_residue : str
The new residue, or None if the mutation string cannot be parsed
"""
m = MUT_PATT.match(s)
if m is None:
# Mutation string does not fit this pattern, other patterns not
# currently supported
return None, None, None
else:
return m.groups()
URN_PATT = re.compile('urn:([^:]+):([^:]+)')
def _urn_to_db_refs(urn):
"""Converts a Medscan URN to an INDRA db_refs dictionary with grounding
information.
Parameters
----------
urn : str
A Medscan URN
Returns
-------
db_refs : dict
A dictionary with grounding information, mapping databases to database
identifiers. If the Medscan URN is not recognized, returns an empty
dictionary.
db_name : str
The Famplex name, if available; otherwise the HGNC name if available;
otherwise None
"""
# Convert a urn to a db_refs dictionary
if urn is None:
return {}, None
m = URN_PATT.match(urn)
if m is None:
return None, None
urn_type, urn_id = m.groups()
db_refs = {}
db_name = None
# TODO: support more types of URNs
if urn_type == 'agi-cas':
# Identifier is CAS, convert to CHEBI
chebi_id = get_chebi_id_from_cas(urn_id)
if chebi_id:
db_refs['CHEBI'] = 'CHEBI:%s' % chebi_id
db_name = get_chebi_name_from_id(chebi_id)
elif urn_type == 'agi-llid':
# This is an Entrez ID, convert to HGNC
hgnc_id = get_hgnc_from_entrez(urn_id)
if hgnc_id is not None:
db_refs['HGNC'] = hgnc_id
# Convert the HGNC ID to a Uniprot ID
uniprot_id = get_uniprot_id(hgnc_id)
if uniprot_id is not None:
db_refs['UP'] = uniprot_id
# Try to lookup HGNC name; if it's available, set it to the
# agent name
db_name = get_hgnc_name(hgnc_id)
elif urn_type in ['agi-meshdis', 'agi-ncimorgan', 'agi-ncimtissue',
'agi-ncimcelltype']:
if urn_id.startswith('C') and urn_id[1:].isdigit():
# Identifier is probably UMLS
db_refs['UMLS'] = urn_id
else:
# Identifier is MESH
urn_mesh_name = unquote(urn_id)
mesh_id, mesh_name = mesh_client.get_mesh_id_name(urn_mesh_name)
if mesh_id:
db_refs['MESH'] = mesh_id
db_name = mesh_name
else:
db_name = urn_mesh_name
elif urn_type == 'agi-gocomplex':
# Identifier is GO
db_refs['GO'] = 'GO:%s' % urn_id
elif urn_type == 'agi-go':
# Identifier is GO
db_refs['GO'] = 'GO:%s' % urn_id
# If we have a GO or MESH grounding, see if there is a corresponding
# Famplex grounding
db_sometimes_maps_to_famplex = ['GO', 'MESH']
for db in db_sometimes_maps_to_famplex:
if db in db_refs:
key = (db, db_refs[db])
if key in famplex_map:
db_refs['FPLX'] = famplex_map[key]
# If the urn corresponds to an eccode, groudn to famplex if that eccode
# is in the Famplex equivalences table
if urn.startswith('urn:agi-enz'):
tokens = urn.split(':')
eccode = tokens[2]
key = ('ECCODE', eccode)
if key in famplex_map:
db_refs['FPLX'] = famplex_map[key]
# If the Medscan URN itself maps to a Famplex id, add a Famplex grounding
key = ('MEDSCAN', urn)
if key in famplex_map:
db_refs['FPLX'] = famplex_map[key]
# If there is a Famplex grounding, use Famplex for entity name
if 'FPLX' in db_refs:
db_name = db_refs['FPLX']
elif 'GO' in db_refs:
db_name = go_client.get_go_label(db_refs['GO'])
return db_refs, db_name
TAG_PATT = re.compile('ID{([0-9,]+)=([^}]+)}')
JUNK_PATT = re.compile('(CONTEXT|GLOSSARY){[^}]+}+')
ID_PATT = re.compile('ID\\{([0-9]+)\\}')
def _extract_id(id_string):
"""Extracts the numeric ID from the representation of the subject or
object ID that appears as an attribute of the svo element in the Medscan
XML document.
Parameters
----------
id_string : str
The ID representation that appears in the svo element in the XML
document (example: ID{123})
Returns
-------
id : str
The numeric ID, extracted from the svo element's attribute
(example: 123)
"""
matches = ID_PATT.match(id_string)
assert matches is not None
return matches.group(1)
def _untag_sentence(tagged_sentence):
"""Removes all tags in the sentence, returning the original sentence
without Medscan annotations.
Parameters
----------
tagged_sentence : str
The tagged sentence
Returns
-------
untagged_sentence : str
Sentence with tags and annotations stripped out
"""
untagged_sentence = TAG_PATT.sub('\\2', tagged_sentence)
clean_sentence = JUNK_PATT.sub('', untagged_sentence)
return clean_sentence.strip()
def _extract_sentence_tags(tagged_sentence):
"""Given a tagged sentence, extracts a dictionary mapping tags to the words
or phrases that they tag.
Parameters
----------
tagged_sentence : str
The sentence with Medscan annotations and tags
Returns
-------
tags : dict
A dictionary mapping tags to the words or phrases that they tag.
"""
untagged_sentence = _untag_sentence(tagged_sentence)
decluttered_sentence = JUNK_PATT.sub('', tagged_sentence)
tags = {}
# Iteratively look for all matches of this pattern
endpos = 0
while True:
match = TAG_PATT.search(decluttered_sentence, pos=endpos)
if not match:
break
endpos = match.end()
text = match.group(2)
text = text.replace('CONTEXT', '')
text = text.replace('GLOSSARY', '')
text = text.strip()
start = untagged_sentence.index(text)
stop = start + len(text)
tag_key = match.group(1)
if ',' in tag_key:
for sub_key in tag_key.split(','):
if sub_key == '0':
continue
tags[sub_key] = {'text': text, 'bounds': (start, stop)}
else:
tags[tag_key] = {'text': text, 'bounds': (start, stop)}
return tags
| |
#!/usr/bin/env python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parameters to control Mapreduce."""
__all__ = ["CONFIG_NAMESPACE",
"config"]
import pickle
# To break circular dependency and more.
# pylint: disable=g-import-not-at-top
# For the mapreduce in python 25 runtime, this import will fail.
# TODO(user): Remove all pipeline import protections after 25 mr defunct.
try:
from pipeline import util as pipeline_util
except ImportError:
pipeline_util = None
from google.appengine.api import lib_config
CONFIG_NAMESPACE = "mapreduce"
# pylint: disable=protected-access
# pylint: disable=invalid-name
class _JobConfigMeta(type):
"""Metaclass that controls class creation."""
_OPTIONS = "_options"
_REQUIRED = "_required"
def __new__(mcs, classname, bases, class_dict):
"""Creates a _Config class and modifies its class dict.
Args:
classname: name of the class.
bases: a list of base classes.
class_dict: original class dict.
Returns:
A new _Config class. The modified class will have two fields.
_options field is a dict from option name to _Option objects.
_required field is a set of required option names.
"""
options = {}
required = set()
for name, option in class_dict.iteritems():
if isinstance(option, _Option):
options[name] = option
if option.required:
required.add(name)
for name in options:
class_dict.pop(name)
class_dict[mcs._OPTIONS] = options
class_dict[mcs._REQUIRED] = required
cls = type.__new__(mcs, classname, bases, class_dict)
# Handle inheritance of _Config.
if object not in bases:
parent_options = {}
# Update options from the root down.
for c in reversed(cls.__mro__):
if mcs._OPTIONS in c.__dict__:
# Children override parent.
parent_options.update(c.__dict__[mcs._OPTIONS])
if mcs._REQUIRED in c.__dict__:
required.update(c.__dict__[mcs._REQUIRED])
for k, v in parent_options.iteritems():
if k not in options:
options[k] = v
return cls
class _Option(object):
"""An option for _Config."""
def __init__(self, kind, required=False, default_factory=None,
can_be_none=False):
"""Init.
Args:
kind: type of the option.
required: whether user is required to supply a value.
default_factory: a factory, when called, returns the default value.
can_be_none: whether value can be None.
Raises:
ValueError: if arguments aren't compatible.
"""
if required and default_factory is not None:
raise ValueError("No default_factory value when option is required.")
self.kind = kind
self.required = required
self.default_factory = default_factory
self.can_be_none = can_be_none
class _Config(object):
"""Root class for all per job configuration."""
__metaclass__ = _JobConfigMeta
def __init__(self, _lenient=False, **kwds):
"""Init.
Args:
_lenient: When true, no option is required.
**kwds: keyword arguments for options and their values.
"""
self._verify_keys(kwds, _lenient)
self._set_values(kwds, _lenient)
def _verify_keys(self, kwds, _lenient):
keys = set()
for k in kwds:
if k not in self._options:
raise ValueError("Option %s is not supported." % (k))
keys.add(k)
if not _lenient:
missing = self._required - keys
if missing:
raise ValueError("Options %s are required." % tuple(missing))
def _set_values(self, kwds, _lenient):
for k, option in self._options.iteritems():
v = kwds.get(k)
if v is None and option.default_factory:
v = option.default_factory()
setattr(self, k, v)
if _lenient:
continue
if v is None and option.can_be_none:
continue
if isinstance(v, type) and not issubclass(v, option.kind):
raise TypeError(
"Expect subclass of %r for option %s. Got %r" % (
option.kind, k, v))
if not isinstance(v, type) and not isinstance(v, option.kind):
raise TypeError("Expect type %r for option %s. Got %r" % (
option.kind, k, v))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.__dict__ == self.__dict__
def __repr__(self):
return str(self.__dict__)
def to_json(self):
return {"config": pickle.dumps(self)}
@classmethod
def from_json(cls, json):
return pickle.loads(json["config"])
# TODO(user): Make more of these private.
class _ConfigDefaults(object):
"""Default configs.
Do not change parameters whose names begin with _.
SHARD_MAX_ATTEMPTS: Max attempts to execute a shard before giving up.
TASK_MAX_ATTEMPTS: Max attempts to execute a task before dropping it. Task
is any taskqueue task created by MR framework. A task is dropped
when its X-AppEngine-TaskExecutionCount is bigger than this number.
Dropping a task will cause abort on the entire MR job.
TASK_MAX_DATA_PROCESSING_ATTEMPTS:
Max times to execute a task when previous task attempts failed during
data processing stage. An MR work task has three major stages:
initial setup, data processing, and final checkpoint.
Setup stage should be allowed to be retried more times than data processing
stage: setup failures are caused by unavailable GAE services while
data processing failures are mostly due to user function error out on
certain input data. Thus, set TASK_MAX_ATTEMPTS higher than this parameter.
QUEUE_NAME: Default queue for MR.
SHARD_COUNT: Default shard count.
PROCESSING_RATE_PER_SEC: Default rate of processed entities per second.
BASE_PATH : Base path of mapreduce and pipeline handlers.
"""
SHARD_MAX_ATTEMPTS = 4
# Arbitrary big number.
TASK_MAX_ATTEMPTS = 31
TASK_MAX_DATA_PROCESSING_ATTEMPTS = 11
QUEUE_NAME = "default"
SHARD_COUNT = 8
# Maximum number of mapper calls per second.
# This parameter is useful for testing to force short slices.
# Maybe make this a private constant instead.
# If people want to rate limit their jobs, they can reduce shard count.
PROCESSING_RATE_PER_SEC = 1000000
# This path will be changed by build process when this is a part of SDK.
BASE_PATH = "/mapreduce"
# TODO(user): find a proper value for this.
# The amount of time to perform scanning in one slice. New slice will be
# scheduled as soon as current one takes this long.
_SLICE_DURATION_SEC = 15
# Delay between consecutive controller callback invocations.
_CONTROLLER_PERIOD_SEC = 2
# TODO(user): changes this name to app_config
config = lib_config.register(CONFIG_NAMESPACE, _ConfigDefaults.__dict__)
# The following are constants that depends on the value of _config.
# They are constants because _config is completely initialized on the first
# request of an instance and will never change until user deploy a new version.
_DEFAULT_PIPELINE_BASE_PATH = config.BASE_PATH + "/pipeline"
# See b/11341023 for context.
_GCS_URLFETCH_TIMEOUT_SEC = 30
# If a lock has been held longer than this value, mapreduce will start to use
# logs API to check if the request has ended.
_LEASE_DURATION_SEC = config._SLICE_DURATION_SEC * 1.1
# In rare occasions, Logs API misses log entries. Thus
# if a lock has been held longer than this timeout, mapreduce assumes the
# request holding the lock has died, regardless of Logs API.
# 10 mins is taskqueue task timeout on a frontend.
_MAX_LEASE_DURATION_SEC = max(10 * 60 + 30, config._SLICE_DURATION_SEC * 1.5)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the normalization layer classes and their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.training import moving_averages
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import variables
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
class BatchNormalization(base._Layer): # pylint: disable=protected-access
"""Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Arguments:
axis: Integer, the axis that should be normalized (typically the features
axis). For instance, after a `Convolution2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: A string, the name of the layer.
"""
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(
name=name, trainable=trainable, **kwargs)
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = beta_initializer
self.gamma_initializer = gamma_initializer
self.moving_mean_initializer = moving_mean_initializer
self.moving_variance_initializer = moving_variance_initializer
self.beta_regularizer = beta_regularizer
self.gamma_regularizer = gamma_regularizer
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if not input_shape.ndims:
raise ValueError('Input has undefined rank:', input_shape)
ndim = len(input_shape)
if self.axis < 0:
axis = ndim + self.axis
else:
axis = self.axis
if axis < 0 or axis >= ndim:
raise ValueError('Value of `axis` argument ' + str(self.axis) +
' is out of range for input with rank ' + str(ndim))
param_dim = input_shape[axis]
if not param_dim.value:
raise ValueError('Input has undefined `axis` dimension. Input shape: ',
input_shape)
if self.center:
self.beta = vs.get_variable('beta',
shape=(param_dim,),
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
trainable=True)
else:
self.beta = None
if self.scale:
self.gamma = vs.get_variable('gamma',
shape=(param_dim,),
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
trainable=True)
else:
self.gamma = None
# Disable variable partitioning when creating the moving mean and variance
partitioner = vs.get_variable_scope().partitioner
try:
vs.get_variable_scope().set_partitioner(None)
self.moving_mean = vs.get_variable(
'moving_mean',
shape=(param_dim,),
initializer=self.moving_mean_initializer,
trainable=False)
self.moving_variance = vs.get_variable(
'moving_variance',
shape=(param_dim,),
initializer=self.moving_variance_initializer,
trainable=False)
finally:
vs.get_variable_scope().set_partitioner(partitioner)
def call(self, inputs, training=False):
# First, compute the axes along which to reduce the mean / variance,
# as well as the broadcast shape to be used for all parameters.
input_shape = inputs.get_shape()
ndim = len(input_shape)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis].value
# Determines whether broadcasting is needed.
needs_broadcasting = (sorted(reduction_axes) != range(ndim)[:-1])
# Determine a boolean value for `training`: could be True, False, or None.
training_value = utils.constant_value(training)
if needs_broadcasting:
# In this case we must explictly broadcast all parameters.
if self.center:
broadcast_beta = array_ops.reshape(self.beta, broadcast_shape)
else:
broadcast_beta = None
if self.scale:
broadcast_gamma = array_ops.reshape(self.gamma, broadcast_shape)
else:
broadcast_gamma = None
if training_value is not False:
if needs_broadcasting:
broadcast_mean, broadcast_variance = nn.moments(
inputs, reduction_axes, keep_dims=True)
mean = array_ops.reshape(broadcast_mean, [-1])
variance = array_ops.reshape(broadcast_variance, [-1])
else:
mean, variance = nn.moments(inputs, reduction_axes)
# Prepare updates if necessary.
if not self.updates:
mean_update = moving_averages.assign_moving_average(
self.moving_mean, mean, self.momentum, zero_debias=False)
variance_update = moving_averages.assign_moving_average(
self.moving_variance, variance, self.momentum, zero_debias=False)
# In the future this should be refactored into a self.add_update
# methods in order to allow for instance-based BN layer sharing
# across unrelated input streams (e.g. like in Keras).
self.updates.append(mean_update)
self.updates.append(variance_update)
# Normalize batch. We do this inside separate functions for training
# and inference so as to avoid evaluating both branches.
def normalize_in_test():
if needs_broadcasting:
broadcast_moving_mean = array_ops.reshape(self.moving_mean,
broadcast_shape)
broadcast_moving_variance = array_ops.reshape(self.moving_variance,
broadcast_shape)
return nn.batch_normalization(inputs,
broadcast_moving_mean,
broadcast_moving_variance,
broadcast_beta,
broadcast_gamma,
self.epsilon)
else:
return nn.batch_normalization(inputs,
self.moving_mean,
self.moving_variance,
self.beta if self.center else None,
self.gamma if self.scale else None,
self.epsilon)
def normalize_in_training():
if needs_broadcasting:
return nn.batch_normalization(inputs,
broadcast_mean,
broadcast_variance,
broadcast_beta,
broadcast_gamma,
self.epsilon)
else:
return nn.batch_normalization(inputs,
mean,
variance,
self.beta if self.center else None,
self.gamma if self.scale else None,
self.epsilon)
return utils.smart_cond(training,
normalize_in_training,
normalize_in_test)
def batch_normalization(inputs,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
training=False,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the batch normalization layer.
Reference: http://arxiv.org/abs/1502.03167
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Arguments:
inputs: Tensor input.
axis: Integer, the axis that should be normalized (typically the features
axis). For instance, after a `Convolution2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(normalized with statistics of the current batch) or in inference mode
(normalized with moving statistics).
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
"""
layer = BatchNormalization(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs, training=training)
# Aliases
BatchNorm = BatchNormalization
batch_norm = batch_normalization
| |
"""Module otsun.materials for treating materials
The module relies on a basic class `Material` with two subclasses
`VolumeMaterial` and `SurfaceMaterial`, and several subclasses of them
for specific materials.
"""
import json
import zipfile
from FreeCAD import Base
from .optics import Phenomenon, OpticalState, reflection, refraction, matrix_reflectance, \
calculate_reflectance, simple_polarization_reflection, simple_polarization_refraction, \
simple_reflection, shure_refraction, lambertian_reflection
from .math import arccos, parallel_orthogonal_components, rad_to_deg, myrandom, normalize, \
constant_function, correct_normal, tabulated_function
from numpy import sqrt
import numpy as np
from autologging import traced
from .logging_unit import logger
import pandas as pd
class NumpyEncoder(json.JSONEncoder):
"""Wrapper to dump numpy arrays as json"""
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def load_from_txt_or_csv(file):
df = pd.read_table(
file,
sep=None,
index_col=0,
header=None,
comment='#',
engine='python'
).interpolate(method='index')
df.insert(0,0,df.index)
return df.values
@traced(logger)
class Material(object):
"""
Class used to represent materials and their physical properties
The `properties` dictionary holds the physical properties of the material.
Its contents are specific to the kind of material.
Parameters
----------
name : str
Name of the material
properties : dict
Dictionary with physical properties of the material
Attributes
----------
classname : str
String identifying the class
"""
by_name = {}
"""
Dict that associates the name of each created material with the material itself
"""
def __init__(self, name, properties=None):
self.by_name[name] = self
self.name = name
self.classname = ""
if properties is None:
properties = {}
self.properties = properties
@staticmethod
def plain_properties_to_properties(plain_properties):
"""
Converts properties of a material in plain format (json) to internal format
Parameters
----------
plain_properties : dict
Returns
-------
dict
"""
properties = {}
for key in plain_properties:
plain_property = plain_properties[key]
prop_type = plain_property['type']
prop_value = plain_property['value']
if prop_type == 'scalar':
properties[key] = prop_value
if prop_type == 'constant':
properties[key] = constant_function(prop_value)
if prop_type == 'tabulated':
properties[key] = tabulated_function(
np.array(prop_value[0]), np.array(prop_value[1]))
if prop_type == 'matrix':
properties[key] = matrix_reflectance(np.array(prop_value))
properties['plain_properties'] = plain_properties
return properties
@staticmethod
def properties_to_plain_properties(properties):
"""
Converts properties of a material in internal format to plain (json ready) format
Since the plain properties are stored in the internal format,
no need for conversions
Parameters
----------
properties : dict
Returns
-------
dict
"""
return properties.get('plain_properties', None)
@classmethod
def get_from_label(cls, label):
"""
Returns the material given its label
Given a `label` of an object (from a FreeCad document) of the form
XXX(MatYYY,OTHER_INFO), returns the material whose name is MatYYY
Parameters
----------
label : str
Returns
-------
Material
"""
if ("(" not in label) or (")" not in label):
return None
start = label.find("(")
end = label.find(")")
string = label[start + 1:end]
name = string.split(',')[0]
return cls.by_name.get(name, None)
@classmethod
def create(cls, name, properties):
"""Wrapper to create a material"""
_ = cls(name, properties)
@classmethod
def load_from_json(cls, info):
if type(info).__name__ == 'dict':
info = [info]
names = []
for mat_spec in info:
classname = mat_spec['classname']
logger.debug(classname)
the_class = globals()[classname]
name = mat_spec['name']
names.append(name)
if issubclass(the_class, TwoLayerMaterial):
name_front_layer = mat_spec['name_front_layer']
name_back_layer = mat_spec['name_back_layer']
mat = TwoLayerMaterial(name, name_front_layer, name_back_layer)
else:
plain_properties = mat_spec['plain_properties']
properties = the_class.plain_properties_to_properties(plain_properties)
mat = Material(name, properties)
mat.__class__ = the_class
if len(names) == 1:
return names[0]
else:
return names
@classmethod
def load_from_json_fileobject(cls, f):
"""
Load materials from a json fileobject
If the file contains a single dict, then it means that it contains a
single material. Otherwise it contains an array, where each entry is a dict
representing a material.
Parameters
----------
f : BinaryIO
File object
Returns
-------
str
String with the name of the last material imported from the file
"""
info = json.load(f)
return cls.load_from_json(info)
@classmethod
def load_from_json_file(cls, filename):
"""
Load materials from a json file
Parameters
----------
filename : str
Name of the file
Returns
-------
str
String with the name of the last material imported from the file
"""
try:
with open(filename, 'rb') as f:
return cls.load_from_json_fileobject(f)
except IOError:
logger.exception("error in processing file %s", filename)
@classmethod
def load_from_json_zip(cls, filename):
"""
Load all materials from a zip file
Parameters
----------
filename : str
Name of the file
"""
try:
with zipfile.ZipFile(filename) as z:
for matfile in z.namelist():
try:
with z.open(matfile) as f:
cls.load_from_json_fileobject(f)
except:
logger.exception("File %s in zip contains errors", matfile)
except IOError:
logger.exception("error in processing file %s", filename)
def get_n(self, wavelength):
"""
Returns the (complex) refractive index at a certain wavelength
Parameters
----------
wavelength : float
Returns
-------
complex
"""
n = self.properties['index_of_refraction'](wavelength)
if 'extinction_coefficient' in self.properties:
kappaf = self.properties['extinction_coefficient']
kappa = kappaf(wavelength)
return n + 1j * kappa
else:
return n
def change_of_optical_state(self, *args):
"""
Computes how a ray behaves when interacting with the material.
MUST be subclassed
The material where the ray is actually located is held in
ray.current_material.
Parameters
----------
args
Variable length argument list
Returns
-------
OpticalState
"""
pass
def to_json(self):
"""Converts material to json. MUST be subclassed"""
return ""
def save_to_json_file(self, filename):
"""
Save material to json file
Parameters
----------
filename : str
Name of the file
"""
with open(filename, 'w') as f:
f.write(self.to_json())
@staticmethod
def all_to_json():
"""
Convert all materials to json
Returns
-------
list of str
"""
materials = Material.by_name.values()
simple_mats = [material for material in materials if
not isinstance(material, TwoLayerMaterial)]
composite_mats = [material for material in materials if
isinstance(material, TwoLayerMaterial)]
all_mats = simple_mats + composite_mats
return [mat.to_json() for mat in all_mats]
@staticmethod
def save_all_to_json_file(filename):
"""
Saves all materials to text file
Parameters
----------
filename
"""
with open(filename, 'w') as f:
f.write('[\n')
f.write(',\n'.join(Material.all_to_json()))
f.write('\n]')
@traced(logger)
class VolumeMaterial(Material):
"""
Subclass of Material for materials with volume
The `properties` parameter must be a dict with the physical properties
describing the material. At least, the following must be provided:
'index_of_refraction': float (index of refraction of the material, as a function
of its wavelength, only real part)
'extinction_coefficient': float (imaginary part of the index of refraction
of the material as a function of its wavelength)
"""
def __init__(self, name, properties=None):
super(VolumeMaterial, self).__init__(name, properties)
def change_of_optical_state(self, ray, normal_vector):
"""
Compute the change of optical state
Computes the new optical state when `ray` hits the material
at a point with given `normal_vector`
Parameters
----------
ray : Ray
normal_vector : Base.Vector
Returns
-------
OpticalState
"""
wavelength = ray.wavelength
if isinstance(ray.current_medium(), PolarizedThinFilm):
return OpticalState(ray.current_polarization(),
ray.current_direction(), Phenomenon.REFRACTION, self) # TODO: Set solid
else:
n1 = ray.current_medium().get_n(wavelength)
n2 = self.get_n(wavelength)
optical_state = refraction(ray.current_direction(), normal_vector, n1, n2, ray.current_polarization())
if optical_state.phenomenon == Phenomenon.REFRACTION:
optical_state.material = self # TODO: Set solid
else:
optical_state.material = ray.current_medium() # TODO: Set solid
return optical_state
def to_json(self):
"""
Dumps the material in json format.
"""
return json.dumps(
{
'name': self.name,
'classname': self.__class__.__name__,
'plain_properties': self.properties.get(
'plain_properties', None)
}, cls=NumpyEncoder, indent=4
)
class SimpleVolumeMaterial(VolumeMaterial):
"""
Subclass of `VolumeMaterial` for those materials with constant properties.
"""
def __init__(self, name, index_of_refraction, attenuation_coefficient=None):
plain_properties = {
'index_of_refraction': {
'type': 'constant',
'value': index_of_refraction
},
'attenuation_coefficient': {
'type': 'constant',
'value': attenuation_coefficient
}
}
super(SimpleVolumeMaterial, self).__init__(name, {})
self.properties = Material.plain_properties_to_properties(plain_properties)
class WavelengthVolumeMaterial(VolumeMaterial):
"""
Subclass of `VolumeMaterial` for materials with tabulated index of refraction.
"""
def __init__(self, name, file_index_of_refraction):
data_refraction = load_from_txt_or_csv(file_index_of_refraction)
# data_refraction = np.loadtxt(file_index_of_refraction, usecols=(0, 1, 2))
wavelength_values = data_refraction[:, 0]
n_values = data_refraction[:, 1]
k_values = data_refraction[:, 2]
plain_properties = {
'index_of_refraction': {
'type': 'tabulated',
'value': [wavelength_values, n_values]
},
'extinction_coefficient': {
'type': 'tabulated',
'value': [wavelength_values, k_values]
}
}
super(WavelengthVolumeMaterial, self).__init__(name)
self.properties = Material.plain_properties_to_properties(plain_properties)
class PVMaterial(VolumeMaterial):
"""
Subclass of `VolumeMaterial` for photovoltaic materials.
"""
def __init__(self, name, file_index_of_refraction):
# file_index_of_refraction with three columns:
# wavelenth in nm,
# real(index of refraction),
# imaginary(index of refraction)
data_refraction = load_from_txt_or_csv(file_index_of_refraction)
# data_refraction = np.loadtxt(file_index_of_refraction, usecols=(0, 1, 2))
wavelength_values = data_refraction[:, 0]
n_values = data_refraction[:, 1]
k_values = data_refraction[:, 2]
plain_properties = {
'index_of_refraction': {
'type': 'tabulated',
'value': [wavelength_values, n_values]
},
'extinction_coefficient': {
'type': 'tabulated',
'value': [wavelength_values, k_values]
},
'PV_material': {
'type': 'scalar',
'value': True
}
}
super(PVMaterial, self).__init__(name)
self.properties = Material.plain_properties_to_properties(plain_properties)
def get_PV_data(self, ray, energy_before):
"""
Computes the photovoltaic data stored in a ray.
Parameters
----------
ray : Ray
Ray that has passed through the PV material
energy_before : float
Energy of the ray before passing through the PV material
Returns
-------
float, tuple of floats
"""
alpha = self.properties['extinction_coefficient'](
ray.wavelength) * 4 * np.pi / (ray.wavelength / 1E6) # mm-1
# TODO: @Ramon: Revisar angle (sona raro)
angle_incident = arccos(
- ray.last_normal.dot(ray.current_direction())) * 180.0 / np.pi
point_1 = ray.points[-1]
point_2 = ray.points[-2]
return (energy_before - ray.energy,
(point_2.x, point_2.y, point_2.z, point_1.x, point_1.y, point_1.z,
energy_before, ray.energy, ray.wavelength, alpha, angle_incident, self.name)
)
class PolarizedThinFilm(VolumeMaterial):
"""
Subclass of `VolumeMaterial` for polarized thin film materials.
"""
def __init__(self, name, file_thin_film, file_front, file_back):
# thin film material calculated by TMM method, six columns:
# wavelenth in nm,
# angle in deg.,
# reflectance s-polarized (perpendicular),
# reflectance p-polarized (parallel),
# transmittance s-polarized,
# transmittance p-polarized
# the values in coating_material should be in the corresponding
# order columns
### data = np.loadtxt(file_thin_film)
data = load_from_txt_or_csv(file_thin_film)
data_reflectance = data[:, [0, 1, 2, 3]]
data_transmittance = data[:, [0, 1, 4, 5]]
if file_front != 'Vacuum':
data_refraction_front = load_from_txt_or_csv(file_front)
# data_refraction_front = np.loadtxt(file_front, usecols=(0, 1, 2))
wavelength_values_front = data_refraction_front[:, 0]
n_values_front = data_refraction_front[:, 1]
k_values_front = data_refraction_front[:, 2]
index_of_refraction_front = {
'type': 'tabulated',
'value': [wavelength_values_front, n_values_front]
}
extinction_coefficient_front = {
'type': 'tabulated',
'value': [wavelength_values_front, k_values_front]
}
else:
index_of_refraction_front = {
'type': 'constant',
'value': 1.0
}
extinction_coefficient_front = {
'type': 'constant',
'value': 0.0
}
if file_back != 'Vacuum':
data_refraction_back = load_from_txt_or_csv(file_back)
# data_refraction_back = np.loadtxt(file_back, usecols=(0, 1, 2))
wavelength_values_back = data_refraction_back[:, 0]
n_values_back = data_refraction_back[:, 1]
k_values_back = data_refraction_back[:, 2]
index_of_refraction_back = {
'type': 'tabulated',
'value': [wavelength_values_back, n_values_back]
}
extinction_coefficient_back = {
'type': 'tabulated',
'value': [wavelength_values_back, k_values_back]
}
else:
index_of_refraction_back = {
'type': 'constant',
'value': 1.0
}
extinction_coefficient_back = {
'type': 'constant',
'value': 0.0
}
plain_properties = {
'Matrix_reflectance_thin_film': {
'type': 'matrix',
'value': data_reflectance
},
'Matrix_transmittance_thin_film': {
'type': 'matrix',
'value': data_transmittance
},
'index_of_refraction_front': index_of_refraction_front,
'extinction_coefficient_front': extinction_coefficient_front,
'index_of_refraction_back': index_of_refraction_back,
'extinction_coefficient_back': extinction_coefficient_back,
'thin_film': {
'type': 'scalar',
'value': True
},
}
super(PolarizedThinFilm, self).__init__(name)
self.properties = Material.plain_properties_to_properties(plain_properties)
@staticmethod
def calculate_state_thin_film(incident, normal_vector, n1, n2, polarization_vector,
properties, wavelength):
"""
Helper function for the computation of the optical state once the ray has passed through the film
"""
# returns optical state of the ray in thin film material
normal = correct_normal(normal_vector, incident)
backside = False
if normal != normal_vector:
backside = True
r = n1 / n2
c1 = - normal.dot(incident)
# cos (incident_angle)
c2sq = 1.0 - r * r * (1.0 - c1 * c1)
# cos (refracted_angle) ** 2
if c2sq.real < 0:
# total internal reflection
state = reflection(incident, normal, polarization_vector)
return 0.0, state
# no energy is abosrbed in the thinfilm
c2 = sqrt(c2sq)
# cos (refracted_angle)
if c2.real > 1:
# avoiding invalid solutions
c2 = 1
parallel_v, perpendicular_v, normal_parallel_plane = \
parallel_orthogonal_components(polarization_vector, incident, normal)
# parallel and perpendicular components of polarization vector
# and orthogonal vector of the parallel plane
ref_per = perpendicular_v.Length ** 2.0 / polarization_vector.Length ** 2.0
# weight of perpendicular component: 0 < ref_per < 1
if backside:
# Ray intercepted on the backside of the transparent surface
inc_angle = rad_to_deg(arccos(c2.real))
else:
inc_angle = rad_to_deg(arccos(c1))
reflectance_matrix = properties['Matrix_reflectance_thin_film']
r_matrix = reflectance_matrix(inc_angle, wavelength)
# reflectance dependent of incidence angle and wavelength
# We decide the polarization projection onto the parallel / perpendicular plane
if myrandom() < ref_per:
reflectance = calculate_reflectance(r_matrix, inc_angle, wavelength)[0]
# reflectance for s-polarized (perpendicular) light
perpendicular_polarized = True
polarization_vector = normalize(perpendicular_v)
else:
reflectance = calculate_reflectance(r_matrix, inc_angle, wavelength)[1]
# reflectance for p-polarized (parallel) light
perpendicular_polarized = False
polarization_vector = normalize(parallel_v)
if myrandom() < reflectance:
# ray reflected
reflected_direction = simple_reflection(incident, normal).normalize()
if not perpendicular_polarized:
# reflection changes the parallel component of incident polarization
polarization_vector = simple_polarization_reflection(
incident, normal, normal_parallel_plane, polarization_vector)
return 0.0, OpticalState(polarization_vector, reflected_direction,
Phenomenon.REFLEXION) # TODO: Set solid
else:
# ray refracted: computing the refracted direction and energy absorbed in the thinfilm
transmittance_matrix = properties['Matrix_transmittance_thin_film']
t_matrix = transmittance_matrix(inc_angle, wavelength)
# transmittance dependent of incidence angle and wavelength
if perpendicular_polarized:
transmittance = calculate_reflectance(t_matrix, inc_angle, wavelength)[0]
else:
transmittance = calculate_reflectance(t_matrix, inc_angle, wavelength)[1]
factor_energy_absorbed = (1 - reflectance - transmittance) / (1 - reflectance)
refracted_direction = incident * r.real + normal * (r.real * c1 - c2.real)
refracted_direction.normalize()
if not perpendicular_polarized:
# refraction changes the parallel component of incident polarization
polarization_vector = \
simple_polarization_refraction(
incident, normal, normal_parallel_plane, c2, polarization_vector)
return (factor_energy_absorbed,
OpticalState(polarization_vector, refracted_direction,
Phenomenon.REFRACTION)) # TODO: Set solid
def change_of_optical_state(self, ray, normal_vector):
# the ray impacts on thin film material
n1 = ray.current_medium().get_n(ray.wavelength)
n_front = self.properties['index_of_refraction_front'](ray.wavelength)
k_front = self.properties['extinction_coefficient_front'](ray.wavelength)
n_back = self.properties['index_of_refraction_back'](ray.wavelength)
k_back = self.properties['extinction_coefficient_back'](ray.wavelength)
properties = self.properties
if n1 == n_front + 1j * k_front:
n2 = n_back + 1j * k_back
else:
n2 = n_front + 1j * k_front
factor_energy_absorbed, optical_state = (
self.calculate_state_thin_film(
ray.current_direction(), normal_vector, n1, n2,
ray.current_polarization(),
properties, ray.wavelength))
optical_state.extra_data['factor_energy_absorbed'] = \
factor_energy_absorbed
if optical_state.phenomenon == Phenomenon.REFRACTION:
optical_state.material = self # TODO: Set solid
else:
optical_state.material = ray.current_medium() # TODO: Set solid
return optical_state
vacuum_medium = SimpleVolumeMaterial("Vacuum", 1.0, 0.0)
@traced(logger)
class SurfaceMaterial(Material):
"""
Subclass of Material for 2-dimensional materials (without volume)
The `properties` parameter must be a dict with the physical properties
describing the material. At least, the following must be provided:
'probability_of_reflection': probability that a photon gets reflected,
as a function of its wavelength.
'probability_of_absorption': probability that a photon gets absorbed,
as a function of its wavelength.
'probability_of_transmittance': probability that a photon passes through
the material, as a function of its wavelength.
"""
def __init__(self, name, properties):
super(SurfaceMaterial, self).__init__(name, properties)
self.properties = properties
@classmethod
def create(cls, name, properties):
_ = cls(name, properties)
def compute_probabilities(self, ray):
"""
Computes the tuple of probabilities that a ray hitting the surface gets reflected, absorbed or transmitted
"""
properties = self.properties
try:
por = properties['probability_of_reflection'](ray.wavelength)
except KeyError:
por = 1.0
try:
poa = properties['probability_of_absorption'](ray.wavelength)
except KeyError:
poa = 1 - por
try:
pot = properties['probability_of_transmittance'](ray.wavelength)
except KeyError:
pot = 0.0
return [por, poa, pot]
def decide_phenomenon(self, ray):
"""
Decides which phenomenon will take place when a ray hits the surface.
"""
phenomena = [
Phenomenon.REFLEXION,
Phenomenon.ABSORPTION,
Phenomenon.TRANSMITTANCE]
probabilities = self.compute_probabilities(ray)
phenomenon = np.random.choice(phenomena, 1, p=probabilities)[0]
return phenomenon
def change_of_optical_state(self, ray, normal_vector, nearby_material):
phenomenon = self.decide_phenomenon(ray)
properties = self.properties
if phenomenon == Phenomenon.REFLEXION:
polarization_vector = ray.current_polarization()
incident = ray.current_direction()
if self.properties.get('lambertian_material', False):
state = lambertian_reflection(ray.current_direction(), normal_vector)
else:
state = reflection(incident, normal_vector, polarization_vector, False)
state.material = ray.current_medium() # TODO: Set solid
state.apply_dispersion(properties, normal_vector)
return state
if phenomenon == Phenomenon.ABSORPTION:
if self.properties.get('thermal_material', False):
return (OpticalState(Base.Vector(0.0, 0.0, 0.0),
Base.Vector(0.0, 0.0, 0.0),
Phenomenon.ENERGY_ABSORBED,
self)) # TODO: Set solid
else:
return (OpticalState(Base.Vector(0.0, 0.0, 0.0),
Base.Vector(0.0, 0.0, 0.0),
Phenomenon.ABSORPTION,
self)) # TODO: Set solid
if phenomenon == Phenomenon.TRANSMITTANCE:
# refraction in transparent layer
n1 = ray.current_medium().get_n(ray.wavelength)
n2 = nearby_material.get_n(ray.wavelength)
if n1 == n2: # transparent_simple_layer
state = OpticalState(ray.current_polarization(),
ray.current_direction(),
Phenomenon.REFRACTION,
nearby_material) # TODO: Set solid
else:
state = shure_refraction(ray.current_direction(), normal_vector,
n1, n2, ray.current_polarization(),
self.properties.get('lambertian_material', False))
state.material = nearby_material # TODO: Set solid
return state
@classmethod
def from_plain_properties(cls, name, plain_properties):
"""
Loads a material from its properties stored in a simple dictionary
Parameters
----------
name : str
plain_properties : dict
Returns
-------
Material
"""
properties = Material.plain_properties_to_properties(plain_properties)
material = cls(name, properties)
return material
def to_json(self):
return json.dumps(
{
'name': self.name,
'classname': self.__class__.__name__,
'plain_properties': self.properties.get(
'plain_properties', None),
}, cls=NumpyEncoder, indent=4
)
@traced(logger)
class OpaqueSimpleLayer(SurfaceMaterial):
"""
Subclass of `SurfaceMaterial` for completely opaque layers.
"""
def __init__(self, name):
plain_properties = {
'probability_of_reflection': {
'type': 'constant',
'value': 0.0
},
'probability_of_absorption': {
'type': 'constant',
'value': 1.0
},
'probability_of_transmittance': {
'type': 'constant',
'value': 0.0
},
'thermal_material': {
'type': 'scalar',
'value': False
}
}
properties = Material.plain_properties_to_properties(plain_properties)
super(OpaqueSimpleLayer, self).__init__(name, properties)
@traced(logger)
class TransparentSimpleLayer(SurfaceMaterial):
"""
Subclass of `SurfaceMaterial` for transparent layers.
"""
def __init__(self, name, pot):
plain_properties = {
'probability_of_reflection': {
'type': 'constant',
'value': 1.0 - pot
},
'probability_of_absorption': {
'type': 'constant',
'value': 0.0
},
'probability_of_transmittance': {
'type': 'constant',
'value': pot
},
'thermal_material': {
'type': 'scalar',
'value': False
},
'specular_material': {
'type': 'scalar',
'value': True
}
}
properties = Material.plain_properties_to_properties(plain_properties)
super(TransparentSimpleLayer, self).__init__(name, properties)
@traced(logger)
class AbsorberSimpleLayer(SurfaceMaterial):
"""
Subclass of `SurfaceMaterial` for absorber layers with behaviour independent of the wavelength.
"""
def __init__(self, name, poa):
plain_properties = {
'probability_of_reflection': {
'type': 'constant',
'value': 1.0 - poa
},
'probability_of_absorption': {
'type': 'constant',
'value': poa
},
'probability_of_transmittance': {
'type': 'constant',
'value': 0.0
},
'thermal_material': {
'type': 'scalar',
'value': True
},
'specular_material': {
'type': 'scalar',
'value': True
}
}
properties = Material.plain_properties_to_properties(plain_properties)
super(AbsorberSimpleLayer, self).__init__(name, properties)
@traced(logger)
class AbsorberLambertianLayer(SurfaceMaterial):
"""
Subclass of `SurfaceMaterial` for absorber layers with lambertian behaviour when reflecting rays.
"""
def __init__(self, name, poa):
plain_properties = {
'probability_of_reflection': {
'type': 'constant',
'value': 1.0 - poa
},
'probability_of_absorption': {
'type': 'constant',
'value': poa
},
'probability_of_transmittance': {
'type': 'constant',
'value': 0.0
},
'thermal_material': {
'type': 'scalar',
'value': True
},
'lambertian_material': {
'type': 'scalar',
'value': True
}
}
properties = Material.plain_properties_to_properties(plain_properties)
super(AbsorberLambertianLayer, self).__init__(name, properties)
@traced(logger)
class ReflectorSpecularLayer(SurfaceMaterial):
"""
Subclass of `SurfaceMaterial` for reflector specular layers.
"""
def __init__(self, name, por, sigma_1=None, sigma_2=None, k=None):
plain_properties = {
'probability_of_reflection': {
'type': 'constant',
'value': por
},
'probability_of_absorption': {
'type': 'constant',
'value': 1.0 - por
},
'probability_of_transmittance': {
'type': 'constant',
'value': 0.0
},
'thermal_material': {
'type': 'scalar',
'value': False
},
'specular_material': {
'type': 'scalar',
'value': True
},
'sigma_1': {
'type': 'scalar',
'value': sigma_1
},
'sigma_2': {
'type': 'scalar',
'value': sigma_2
},
'k': {
'type': 'scalar',
'value': k
}
}
properties = Material.plain_properties_to_properties(plain_properties)
super(ReflectorSpecularLayer, self).__init__(name, properties)
@traced(logger)
class ReflectorLambertianLayer(SurfaceMaterial):
"""
Subclass of `SurfaceMaterial` for reflector layers with lambertian behaviour.
"""
def __init__(self, name, por):
plain_properties = {
'probability_of_reflection': {
'type': 'constant',
'value': por
},
'probability_of_absorption': {
'type': 'constant',
'value': 1.0 - por
},
'probability_of_transmittance': {
'type': 'constant',
'value': 0.0
},
'thermal_material': {
'type': 'scalar',
'value': False
},
'lambertian_material': {
'type': 'scalar',
'value': True
},
}
properties = Material.plain_properties_to_properties(plain_properties)
super(ReflectorLambertianLayer, self).__init__(name, properties)
@traced(logger)
class AbsorberTWModelLayer(SurfaceMaterial):
"""
Subclass of `SurfaceMaterial` for materials using Tesfamichael-Wackelgard model.
"""
def __init__(self, name, poa, b_constant, c_constant):
plain_properties = {
'probability_of_reflection': {
'type': 'constant',
'value': 1.0 - poa
},
'probability_of_absorption': {
'type': 'constant',
'value': poa
},
'probability_of_transmittance': {
'type': 'constant',
'value': 0.0
},
'thermal_material': {
'type': 'scalar',
'value': True
},
'TW_model': {
'type': 'scalar',
'value': True
},
'b_constant': {
'type': 'scalar',
'value': b_constant
},
'c_constant': {
'type': 'scalar',
'value': c_constant
}
}
properties = Material.plain_properties_to_properties(plain_properties)
super(AbsorberTWModelLayer, self).__init__(name, properties)
@staticmethod
def tw_absorptance_ratio(normal_vector, b_constant, c_constant, incident):
"""Angular Solar Absorptance model for selective absorber material.
Given by the formula 1 - b * (1/cos - 1) ** c, based on:
Tesfamichael, T., and Wackelgard, E., 2000, "Angular Solar Absorptance and
Incident Angle Modifier of Selective Absorbers for Solar Thermal Collectors,"
Sol. Energy, 68, pp. 335-341.
Parameters
----------
normal_vector : Base.Vector
normal vector of the surface at the point of incidence
b_constant : float
c_constant : float
incident : Base.Vector
direction vector of the incident ray
Returns
-------
float
"""
# We assume the normal is normalized.
normal = correct_normal(normal_vector, incident)
c1 = - normal.dot(incident)
inc_angle = rad_to_deg(arccos(c1))
# incidence angle
if inc_angle < 80.0:
absorption_ratio = 1.0 - b_constant * abs((1.0 / c1 - 1.0)) ** c_constant
else:
y0 = 1.0 - b_constant * (1.0 / np.cos(80.0 * np.pi / 180.0) - 1.0) ** c_constant
m = y0 / 10.0
absorption_ratio = y0 - m * (inc_angle - 80.0)
return absorption_ratio
def change_of_optical_state(self, ray, normal_vector, nearby_material):
properties = self.properties
b_constant = properties['b_constant']
c_constant = properties['c_constant']
absorption_ratio = self.tw_absorptance_ratio(
normal_vector, b_constant, c_constant, ray.current_direction())
absorption = properties['probability_of_absorption'](ray.wavelength) * absorption_ratio
reflectance = 1.0 - absorption
if myrandom() < reflectance:
polarization_vector = ray.current_polarization()
state = reflection(ray.current_direction(), normal_vector, polarization_vector, False)
state.material = ray.current_medium() # TODO: Set solid
return state
else:
# absorption in absorber material: the ray is killed
return OpticalState(Base.Vector(0.0, 0.0, 0.0),
Base.Vector(0.0, 0.0, 0.0),
Phenomenon.ENERGY_ABSORBED, self) # TODO: Set solid
@traced(logger)
class MetallicLayer(SurfaceMaterial):
"""
Subclass of `SurfaceMaterial` for metallic layers.
"""
@traced(logger)
class MetallicSpecularLayer(MetallicLayer):
"""
Subclass of `MetallicLayer` for metallic layers with specular properties.
"""
def __init__(self, name, file_index_of_refraction,
sigma_1=None, sigma_2=None, k=None):
# file_index_of_refraction with three columns: wavelenth in nm,
# real(index of refraction), imaginary(index of refraction)
data_refraction = load_from_txt_or_csv(file_index_of_refraction)
# data_refraction = np.loadtxt(file_index_of_refraction, usecols=(0, 1, 2))
wavelength_values = data_refraction[:, 0]
n_values = data_refraction[:, 1]
k_values = data_refraction[:, 2]
plain_properties = {
'index_of_refraction': { # TODO: @Ramon Crec que aixo no s'empra per res
'type': 'tabulated',
'value': [wavelength_values, n_values]
},
'extinction_coefficient': {
'type': 'tabulated',
'value': [wavelength_values, k_values]
},
'sigma_1': {
'type': 'scalar',
'value': sigma_1
},
'sigma_2': {
'type': 'scalar',
'value': sigma_2
},
'k': {
'type': 'scalar',
'value': k
}
}
properties = Material.plain_properties_to_properties(plain_properties)
super(MetallicSpecularLayer, self).__init__(name, properties)
def change_of_optical_state(self, ray, normal_vector, nearby_material):
properties = self.properties
polarization_vector = ray.current_polarization()
n1 = ray.current_medium().get_n(ray.wavelength)
n2 = self.get_n(ray.wavelength)
incident = ray.current_direction()
state = refraction(incident, normal_vector, n1, n2, polarization_vector)
if state.phenomenon == Phenomenon.REFLEXION:
state.material = ray.current_medium() # TODO: Set solid
state.apply_dispersion(properties, normal_vector)
return state
if state.phenomenon == Phenomenon.REFRACTION:
# refraction in metallic layer: the ray is killed
return OpticalState(Base.Vector(0.0, 0.0, 0.0),
Base.Vector(0.0, 0.0, 0.0),
Phenomenon.ABSORPTION, self) # TODO: Set solid
@traced(logger)
class MetallicLambertianLayer(MetallicLayer):
"""
Subclass of `MetallicLayer` for metallic layers with lambertian behaviour.
"""
def __init__(self, name, file_index_of_refraction):
# file_index_of_refraction with three columns:
# wavelenth in nm, real(index of refraction),
# imaginary(index of refraction)
data_refraction = load_from_txt_or_csv(file_index_of_refraction)
# data_refraction = np.loadtxt(file_index_of_refraction, usecols=(0, 1, 2))
wavelength_values = data_refraction[:, 0]
n_values = data_refraction[:, 1]
k_values = data_refraction[:, 2]
plain_properties = {
'index_of_refraction': {
'type': 'tabulated',
'value': [wavelength_values, n_values]
},
'extinction_coefficient': {
'type': 'tabulated',
'value': [wavelength_values, k_values]
},
}
properties = Material.plain_properties_to_properties(plain_properties)
super(MetallicLambertianLayer, self).__init__(name, properties)
def change_of_optical_state(self, ray, normal_vector, nearby_material):
polarization_vector = ray.current_polarization()
n1 = ray.current_medium().get_n(ray.wavelength)
n2 = self.get_n(ray.wavelength)
incident = ray.current_direction()
state = refraction(incident, normal_vector, n1, n2, polarization_vector, True)
if state.phenomenon == Phenomenon.REFLEXION:
state.material = ray.current_medium() # TODO: Set solid
return state
if state.phenomenon == Phenomenon.REFRACTION:
# refraction in metallic layer: the ray is killed
return OpticalState(Base.Vector(0.0, 0.0, 0.0),
Base.Vector(0.0, 0.0, 0.0),
Phenomenon.ABSORPTION, self) # TODO: Set solid
@traced(logger)
class PolarizedCoatingLayer(SurfaceMaterial):
"""
Subclass of `SurfaceMaterial` for polarized coating layers
"""
def __init__(self, *args):
super(PolarizedCoatingLayer, self).__init__(*args)
def precompute_change_of_optical_state(self, ray, normal_vector):
polarization_vector = ray.current_polarization()
incident = ray.current_direction()
wavelength = ray.wavelength
properties = self.properties
normal = correct_normal(normal_vector, incident)
c1 = - normal.dot(incident)
inc_angle = rad_to_deg(arccos(c1))
# incidence angle
parallel_v, perpendicular_v, normal_parallel_plane = \
parallel_orthogonal_components(polarization_vector, incident, normal)
ref_per = perpendicular_v.Length ** 2.0 / polarization_vector.Length ** 2.0
reflectance_matrix = properties['Matrix_reflectance_coating']
r_matrix = reflectance_matrix(inc_angle, wavelength)
# reflectance dependent of incidence angle and wavelength
# We decide the polarization projection onto the parallel / perpendicular plane
if myrandom() < ref_per:
reflectance = calculate_reflectance(r_matrix, inc_angle, wavelength)[0]
# reflectance for s-polarized (perpendicular) light
perpendicular_polarized = True
polarization_vector = normalize(perpendicular_v)
else:
reflectance = calculate_reflectance(r_matrix, inc_angle, wavelength)[1]
# reflectance for p-polarized (parallel) light
perpendicular_polarized = False
polarization_vector = normalize(parallel_v)
if myrandom() < reflectance:
# ray reflected
reflected = simple_reflection(incident, normal).normalize()
if not perpendicular_polarized:
# reflection changes the parallel component of incident polarization
polarization_vector = simple_polarization_reflection(
incident, normal, normal_parallel_plane, polarization_vector)
state = OpticalState(polarization_vector, reflected, Phenomenon.REFLEXION, self)
state.material = ray.current_medium() # TODO: Set solid
state.apply_dispersion(properties, normal_vector)
return state
else:
return (inc_angle, incident, perpendicular_polarized,
reflectance, normal_parallel_plane)
@traced(logger)
class PolarizedCoatingReflectorLayer(PolarizedCoatingLayer):
"""
Subclass of `PolarizedCoatingLayer` for reflector layers.
"""
def __init__(self, name, coating_file, sigma_1=None, sigma_2=None, k=None):
# coating_material with four columns:
# wavelenth in nm,
# angle in deg.,
# reflectance s-polarized (perpendicular),
# reflectance p-polarized (parallel)
# the values in coating_material should be in the corresponding
# order columns
data_material = load_from_txt_or_csv(coating_file)
# data_material = np.loadtxt(coating_file, usecols=(0, 1, 2, 3))
plain_properties = {
'Matrix_reflectance_coating': {
'type': 'matrix',
'value': data_material
},
'sigma_1': {
'type': 'scalar',
'value': sigma_1
},
'sigma_2': {
'type': 'scalar',
'value': sigma_2
},
'k': {
'type': 'scalar',
'value': k
}
}
properties = Material.plain_properties_to_properties(plain_properties)
super(PolarizedCoatingReflectorLayer, self).__init__(name, properties)
def change_of_optical_state(self, ray, normal_vector, nearby_material):
new_state = self.precompute_change_of_optical_state(ray, normal_vector)
if isinstance(new_state, OpticalState):
return new_state
else:
# ray is killed in the coating reflector
return OpticalState(Base.Vector(0.0, 0.0, 0.0),
Base.Vector(0.0, 0.0, 0.0),
Phenomenon.ABSORPTION) # TODO: Set solid
@traced(logger)
class PolarizedCoatingAbsorberLayer(PolarizedCoatingLayer):
"""
Subclass of `PolarizedCoatingLayer` for absorber layers.
"""
def __init__(self, name, coating_file):
# coating_material with four columns:
# wavelenth in nm,
# angle in deg.,
# reflectance s-polarized (perpendicular),
# reflectance p-polarized (parallel)
# the values in coating_material should be in the corresponding order
# columns
data_material = load_from_txt_or_csv(coating_file)
# data_material = np.loadtxt(coating_file, usecols=(0, 1, 2, 3))
plain_properties = {
'Matrix_reflectance_coating': {
'type': 'matrix',
'value': data_material
},
'probability_of_transmittance': {
'type': 'constant',
'value': 0
},
}
properties = Material.plain_properties_to_properties(plain_properties)
super(PolarizedCoatingAbsorberLayer, self).__init__(name, properties)
def change_of_optical_state(self, ray, normal_vector, nearby_material):
new_state = self.precompute_change_of_optical_state(ray, normal_vector)
if isinstance(new_state, OpticalState):
return new_state
else:
# ray refracted: ray energy will be absorbed in the coating absorber
state = OpticalState(Base.Vector(0.0, 0.0, 0.0),
Base.Vector(0.0, 0.0, 0.0),
Phenomenon.ENERGY_ABSORBED, self) # TODO: Set solid
return state
@traced(logger)
class PolarizedCoatingTransparentLayer(PolarizedCoatingLayer):
"""
Subclass of `PolarizedCoatingLayer` for transparent layers.
"""
def __init__(self, name, coating_file):
# coatingmaterial calculated by TMM method, six columns:
# wavelength in nm,
# angle in deg.,
# reflectance s-polarized (perpendicular),
# reflectance p-polarized (parallel),
# transmittance s-polarized,
# transmittance p-polarized
# the values in coating_material should be in the corresponding
# order columns
### data = np.loadtxt(coating_file)
data = load_from_txt_or_csv(coating_file)
data_reflectance = data[:, [0, 1, 2, 3]]
data_transmittance = data[:, [0, 1, 4, 5]]
plain_properties = {
'Matrix_reflectance_coating': {
'type': 'matrix',
'value': data_reflectance
},
'Matrix_transmittance_coating': {
'type': 'matrix',
'value': data_transmittance
},
}
properties = Material.plain_properties_to_properties(plain_properties)
super(PolarizedCoatingTransparentLayer, self).__init__(name, properties)
def change_of_optical_state(self, ray, normal_vector, nearby_material):
polarization_vector = ray.current_polarization()
incident = ray.current_direction()
wavelength = ray.wavelength
properties = self.properties
normal = correct_normal(normal_vector, incident)
n1 = ray.current_medium().get_n(ray.wavelength)
n2 = nearby_material.get_n(ray.wavelength)
r = n1 / n2
c1 = - normal.dot(incident)
# cos (incident_angle)
c2sq = 1.0 - r * r * (1.0 - c1 * c1)
# cos (refracted_angle) ** 2
if c2sq.real < 0:
# total internal reflection
state = reflection(incident, normal, polarization_vector)
state.material = ray.current_medium() # TODO: Set solid
return state
c2 = sqrt(c2sq)
if c2.real > 1:
# avoiding invalid solutions
c2 = 1
new_state = self.precompute_change_of_optical_state(ray, normal_vector)
if isinstance(new_state, OpticalState):
return new_state
else:
(inc_angle, incident, perpendicular_polarized,
reflectance, normal_parallel_plane) = new_state
# ray refracted: computing the refracted direction and energy absorbed in coating
transmittance_matrix = properties['Matrix_transmittance_coating']
t_matrix = transmittance_matrix(inc_angle, wavelength)
# transmittance dependent of incidence angle and wavelength
if perpendicular_polarized:
transmittance = calculate_reflectance(t_matrix, inc_angle, wavelength)[0]
else:
transmittance = calculate_reflectance(t_matrix, inc_angle, wavelength)[1]
factor_energy_absorbed = (1 - reflectance - transmittance) / (1 - reflectance)
refracted_direction = incident * r.real + normal * (r.real * c1 - c2.real)
refracted_direction.normalize()
if not perpendicular_polarized:
# refraction changes the parallel component of incident polarization
polarization_vector = \
simple_polarization_refraction(
incident, normal, normal_parallel_plane, c2, polarization_vector)
optical_state = OpticalState(polarization_vector, refracted_direction,
Phenomenon.REFRACTION, nearby_material) # TODO: Set solid
optical_state.extra_data['factor_energy_absorbed'] = \
factor_energy_absorbed
return optical_state
@traced(logger)
class TwoLayerMaterial(Material):
"""
Subclass of `Material` for surface materials formed by two layers (back and front)
"""
def __init__(self, name, name_front_layer, name_back_layer):
super(TwoLayerMaterial, self).__init__(name, {})
self.name_front_layer = name_front_layer
self.name_back_layer = name_back_layer
self.front_material = Material.by_name[name_front_layer]
self.back_material = Material.by_name[name_back_layer]
def to_json(self):
return json.dumps(
{
'name': self.name,
'classname': 'TwoLayerMaterial',
'name_front_layer': self.name_front_layer,
'name_back_layer': self.name_back_layer
}, cls=NumpyEncoder, indent=4
)
def change_of_optical_state(self, ray, normal_vector, nearby_material):
if ray.current_direction().dot(normal_vector) < 0:
# Ray intercepted on the frontside of the surface
material = self.front_material
else:
# Ray intercepted on the backside of the surface
material = self.back_material
return material.change_of_optical_state(ray, normal_vector, nearby_material)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2010 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from __future__ import with_statement
import cmd
import locale
import os.path
import pkg_resources
from shlex import shlex
import StringIO
import sys
import traceback
from trac import __version__ as VERSION
from trac.admin import AdminCommandError, AdminCommandManager
from trac.core import TracError
from trac.env import Environment
from trac.ticket.model import *
from trac.util import translation
from trac.util.html import html
from trac.util.text import console_print, exception_to_unicode, printout, \
printerr, raw_input, to_unicode, \
getpreferredencoding
from trac.util.translation import _, ngettext, get_negotiated_locale, \
has_babel, cleandoc_
from trac.versioncontrol.api import RepositoryManager
from trac.wiki.admin import WikiAdmin
from trac.wiki.macros import WikiMacroBase
TRAC_VERSION = pkg_resources.get_distribution('Trac').version
rl_completion_suppress_append = None
LANG = os.environ.get('LANG')
def find_readline_lib():
"""Return the name (and possibly the full path) of the readline library
linked to the readline module.
"""
import readline
with open(readline.__file__, "rb") as f:
data = f.read()
import re
m = re.search('\0([^\0]*libreadline[^\0]*)\0', data)
if m:
return m.group(1)
return None
class TracAdmin(cmd.Cmd):
intro = ''
doc_header = 'Trac Admin Console %(version)s\n' \
'Available Commands:\n' \
% {'version': TRAC_VERSION}
ruler = ''
prompt = "Trac> "
envname = None
__env = None
needs_upgrade = None
def __init__(self, envdir=None):
cmd.Cmd.__init__(self)
try:
import readline
delims = readline.get_completer_delims()
for c in '-/:()\\':
delims = delims.replace(c, '')
readline.set_completer_delims(delims)
# Work around trailing space automatically inserted by libreadline
# until Python gets fixed, see http://bugs.python.org/issue5833
import ctypes
lib_name = find_readline_lib()
if lib_name is not None:
lib = ctypes.cdll.LoadLibrary(lib_name)
global rl_completion_suppress_append
rl_completion_suppress_append = ctypes.c_int.in_dll(lib,
"rl_completion_suppress_append")
except Exception:
pass
self.interactive = False
if envdir:
self.env_set(os.path.abspath(envdir))
def emptyline(self):
pass
def onecmd(self, line):
"""`line` may be a `str` or an `unicode` object"""
try:
if isinstance(line, str):
if self.interactive:
encoding = sys.stdin.encoding
else:
encoding = getpreferredencoding() # sys.argv
line = to_unicode(line, encoding)
if self.interactive:
line = line.replace('\\', '\\\\')
rv = cmd.Cmd.onecmd(self, line) or 0
except SystemExit:
raise
except AdminCommandError, e:
printerr(_("Error: %(msg)s", msg=to_unicode(e)))
if e.show_usage:
print
self.do_help(e.cmd or self.arg_tokenize(line)[0])
rv = 2
except TracError, e:
printerr(exception_to_unicode(e))
rv = 2
except Exception, e:
printerr(exception_to_unicode(e))
rv = 2
if self.env_check():
self.env.log.error("Exception in trac-admin command: %s",
exception_to_unicode(e, traceback=True))
if not self.interactive:
return rv
def run(self):
self.interactive = True
printout(_("""Welcome to trac-admin %(version)s
Interactive Trac administration console.
Copyright (C) 2003-2013 Edgewall Software
Type: '?' or 'help' for help on commands.
""", version=TRAC_VERSION))
self.cmdloop()
##
## Environment methods
##
def env_set(self, envname, env=None):
self.envname = envname
self.prompt = "Trac [%s]> " % self.envname
if env is not None:
self.__env = env
def env_check(self):
if not self.__env:
try:
self._init_env()
except Exception:
return False
return True
@property
def env(self):
try:
if not self.__env:
self._init_env()
return self.__env
except Exception, e:
printerr(_("Failed to open environment: %(err)s",
err=exception_to_unicode(e, traceback=True)))
sys.exit(1)
def _init_env(self):
self.__env = env = Environment(self.envname)
# fixup language according to env settings
if has_babel:
default = env.config.get('trac', 'default_language', '')
negotiated = get_negotiated_locale([LANG, default])
if negotiated:
translation.activate(negotiated)
##
## Utility methods
##
def arg_tokenize(self, argstr):
"""`argstr` is an `unicode` string
... but shlex is not unicode friendly.
"""
lex = shlex(argstr.encode('utf-8'), posix=True)
lex.whitespace_split = True
lex.commenters = ''
if os.name == 'nt':
lex.escape = ''
return [unicode(token, 'utf-8') for token in lex] or ['']
def word_complete(self, text, words):
words = list(set(a for a in words if a.startswith(text)))
if len(words) == 1:
words[0] += ' ' # Only one choice, skip to next arg
return words
@staticmethod
def split_help_text(text):
import re
paragraphs = re.split(r'(?m)(?:^[ \t]*\n){1,}', text)
return [re.sub(r'(?m)\s+', ' ', each.strip())
for each in paragraphs]
@classmethod
def print_doc(cls, docs, stream=None, short=False, long=False):
if stream is None:
stream = sys.stdout
docs = [doc for doc in docs if doc[2]]
if not docs:
return
if short:
max_len = max(len(doc[0]) for doc in docs)
for (cmd, args, doc) in docs:
paragraphs = cls.split_help_text(doc)
console_print(stream, '%s %s' % (cmd.ljust(max_len),
paragraphs[0]))
else:
import textwrap
for (cmd, args, doc) in docs:
paragraphs = cls.split_help_text(doc)
console_print(stream, '%s %s\n' % (cmd, args))
console_print(stream, ' %s\n' % paragraphs[0])
if (long or len(docs) == 1) and len(paragraphs) > 1:
for paragraph in paragraphs[1:]:
console_print(stream, textwrap.fill(paragraph, 79,
initial_indent=' ', subsequent_indent=' ')
+ '\n')
##
## Command dispatcher
##
def complete_line(self, text, line, cmd_only=False):
if rl_completion_suppress_append is not None:
rl_completion_suppress_append.value = 1
args = self.arg_tokenize(line)
if line and line[-1] == ' ': # Space starts new argument
args.append('')
if self.env_check():
cmd_mgr = AdminCommandManager(self.env)
try:
comp = cmd_mgr.complete_command(args, cmd_only)
except Exception, e:
printerr()
printerr(_('Completion error: %(err)s',
err=exception_to_unicode(e)))
self.env.log.error("trac-admin completion error: %s",
exception_to_unicode(e, traceback=True))
comp = []
if len(args) == 1:
comp.extend(name[3:] for name in self.get_names()
if name.startswith('do_'))
try:
return comp.complete(text)
except AttributeError:
return self.word_complete(text, comp)
def completenames(self, text, line, begidx, endidx):
return self.complete_line(text, line, True)
def completedefault(self, text, line, begidx, endidx):
return self.complete_line(text, line)
def default(self, line):
try:
if not self.__env:
self._init_env()
if self.needs_upgrade is None:
self.needs_upgrade = self.__env.needs_upgrade()
except TracError, e:
raise AdminCommandError(to_unicode(e))
except Exception, e:
raise AdminCommandError(exception_to_unicode(e))
args = self.arg_tokenize(line)
if args[0] == 'upgrade':
self.needs_upgrade = None
elif self.needs_upgrade:
raise TracError(_('The Trac Environment needs to be upgraded.\n\n'
'Run "trac-admin %(path)s upgrade"',
path=self.envname))
cmd_mgr = AdminCommandManager(self.env)
return cmd_mgr.execute_command(*args)
##
## Available Commands
##
## Help
_help_help = [('help', '', 'Show documentation')]
@classmethod
def all_docs(cls, env=None):
docs = (cls._help_help + cls._help_initenv)
if env is not None:
docs.extend(AdminCommandManager(env).get_command_help())
return docs
def complete_help(self, text, line, begidx, endidx):
return self.complete_line(text, line[5:], True)
def do_help(self, line=None):
arg = self.arg_tokenize(line)
if arg[0]:
doc = getattr(self, "_help_" + arg[0], None)
if doc is None and self.env_check():
cmd_mgr = AdminCommandManager(self.env)
doc = cmd_mgr.get_command_help(arg)
if doc:
self.print_doc(doc)
else:
printerr(_("No documentation found for '%(cmd)s'."
" Use 'help' to see the list of commands.",
cmd=' '.join(arg)))
cmds = cmd_mgr.get_similar_commands(arg[0])
if cmds:
printout('')
printout(ngettext("Did you mean this?",
"Did you mean one of these?",
len(cmds)))
for cmd in cmds:
printout(' ' + cmd)
else:
printout(_("trac-admin - The Trac Administration Console "
"%(version)s", version=TRAC_VERSION))
if not self.interactive:
print
printout(_("Usage: trac-admin </path/to/projenv> "
"[command [subcommand] [option ...]]\n")
)
printout(_("Invoking trac-admin without command starts "
"interactive mode.\n"))
env = self.env if self.env_check() else None
self.print_doc(self.all_docs(env), short=True)
## Quit / EOF
_help_quit = [('quit', '', 'Exit the program')]
_help_exit = _help_quit
_help_EOF = _help_quit
def do_quit(self, line):
print
sys.exit()
do_exit = do_quit # Alias
do_EOF = do_quit # Alias
## Initenv
_help_initenv = [
('initenv', '[<projectname> <db> [<repostype> <repospath>]]',
"""Create and initialize a new environment
If no arguments are given, then the required parameters are requested
interactively.
One or more optional arguments --inherit=PATH can be used to specify
the "[inherit] file" option at environment creation time, so that only
the options not already specified in one of the global configuration
files are written to the conf/trac.ini file of the newly created
environment. Relative paths are resolved relative to the "conf"
directory of the new environment.
""")]
def do_initdb(self, line):
self.do_initenv(line)
def get_initenv_args(self):
returnvals = []
printout(_("Creating a new Trac environment at %(envname)s",
envname=self.envname))
printout(_("""
Trac will first ask a few questions about your environment
in order to initialize and prepare the project database.
Please enter the name of your project.
This name will be used in page titles and descriptions.
"""))
dp = 'My Project'
returnvals.append(raw_input(_("Project Name [%(default)s]> ",
default=dp)).strip() or dp)
printout(_("""
Please specify the connection string for the database to use.
By default, a local SQLite database is created in the environment
directory. It is also possible to use an already existing
PostgreSQL database (check the Trac documentation for the exact
connection string syntax).
"""))
ddb = 'sqlite:db/trac.db'
prompt = _("Database connection string [%(default)s]> ", default=ddb)
returnvals.append(raw_input(prompt).strip() or ddb)
print
return returnvals
def do_initenv(self, line):
def initenv_error(msg):
printerr(_("Initenv for '%(env)s' failed.", env=self.envname),
"\n" + msg)
if self.env_check():
initenv_error(_("Does an environment already exist?"))
return 2
if os.path.exists(self.envname) and os.listdir(self.envname):
initenv_error(_("Directory exists and is not empty."))
return 2
if not os.path.exists(os.path.dirname(self.envname)):
initenv_error(_("Base directory '%(env)s' does not exist. Please "
"create it manually and retry.",
env=os.path.dirname(self.envname)))
return 2
arg = self.arg_tokenize(line)
inherit_paths = []
i = 0
while i < len(arg):
item = arg[i]
if item.startswith('--inherit='):
inherit_paths.append(arg.pop(i)[10:])
else:
i += 1
arg = arg or [''] # Reset to usual empty in case we popped the only one
project_name = None
db_str = None
repository_type = None
repository_dir = None
if len(arg) == 1 and not arg[0]:
project_name, db_str = self.get_initenv_args()
elif len(arg) == 2:
project_name, db_str = arg
elif len(arg) == 4:
project_name, db_str, repository_type, repository_dir = arg
else:
initenv_error('Wrong number of arguments: %d' % len(arg))
return 2
try:
printout(_("Creating and Initializing Project"))
options = [
('project', 'name', project_name),
('trac', 'database', db_str),
]
if repository_dir:
options.extend([
('trac', 'repository_type', repository_type),
('trac', 'repository_dir', repository_dir),
])
if inherit_paths:
options.append(('inherit', 'file',
",\n ".join(inherit_paths)))
try:
self.__env = Environment(self.envname, create=True,
options=options)
except Exception, e:
initenv_error(_('Failed to create environment.'))
printerr(e)
traceback.print_exc()
sys.exit(1)
# Add a few default wiki pages
printout(_(" Installing default wiki pages"))
pages_dir = pkg_resources.resource_filename('trac.wiki',
'default-pages')
WikiAdmin(self.__env).load_pages(pages_dir)
if repository_dir:
try:
repos = RepositoryManager(self.__env).get_repository('')
if repos:
printout(_(" Indexing default repository"))
repos.sync(self._resync_feedback)
except TracError, e:
printerr(_("""
---------------------------------------------------------------------
Warning: couldn't index the default repository.
This can happen for a variety of reasons: wrong repository type,
no appropriate third party library for this repository type,
no actual repository at the specified repository path...
You can nevertheless start using your Trac environment, but
you'll need to check again your trac.ini file and the [trac]
repository_type and repository_path settings.
"""))
except Exception, e:
initenv_error(to_unicode(e))
traceback.print_exc()
return 2
printout(_("""
---------------------------------------------------------------------
Project environment for '%(project_name)s' created.
You may now configure the environment by editing the file:
%(config_path)s
If you'd like to take this new project environment for a test drive,
try running the Trac standalone web server `tracd`:
tracd --port 8000 %(project_path)s
Then point your browser to http://localhost:8000/%(project_dir)s.
There you can also browse the documentation for your installed
version of Trac, including information on further setup (such as
deploying Trac to a real web server).
The latest documentation can also always be found on the project
website:
http://trac.edgewall.org/
Congratulations!
""", project_name=project_name, project_path=self.envname,
project_dir=os.path.basename(self.envname),
config_path=os.path.join(self.envname, 'conf', 'trac.ini')))
def _resync_feedback(self, rev):
sys.stdout.write(' [%s]\r' % rev)
sys.stdout.flush()
class TracAdminHelpMacro(WikiMacroBase):
_domain = 'messages'
_description = cleandoc_(
"""Display help for trac-admin commands.
Examples:
{{{
[[TracAdminHelp]] # all commands
[[TracAdminHelp(wiki)]] # all wiki commands
[[TracAdminHelp(wiki export)]] # the "wiki export" command
[[TracAdminHelp(upgrade)]] # the upgrade command
}}}
""")
def expand_macro(self, formatter, name, content):
if content:
arg = content.strip().split()
doc = getattr(TracAdmin, "_help_" + arg[0], None)
if doc is None:
cmd_mgr = AdminCommandManager(self.env)
doc = cmd_mgr.get_command_help(arg)
if not doc:
raise TracError('Unknown trac-admin command "%s"' % content)
else:
doc = TracAdmin.all_docs(self.env)
buf = StringIO.StringIO()
TracAdmin.print_doc(doc, buf, long=True)
return html.PRE(buf.getvalue(), class_='wiki')
def run(args=None):
"""Main entry point."""
if args is None:
args = sys.argv[1:]
locale = None
if has_babel:
import babel
try:
locale = get_negotiated_locale([LANG]) or babel.Locale.default()
except babel.UnknownLocaleError:
pass
translation.activate(locale)
admin = TracAdmin()
if len(args) > 0:
if args[0] in ('-h', '--help', 'help'):
return admin.onecmd(' '.join(['help'] + args[1:]))
elif args[0] in ('-v','--version'):
printout(os.path.basename(sys.argv[0]), TRAC_VERSION)
else:
env_path = os.path.abspath(args[0])
try:
unicode(env_path, 'ascii')
except UnicodeDecodeError:
printerr(_("Non-ascii environment path '%(path)s' not "
"supported.", path=to_unicode(env_path)))
sys.exit(2)
admin.env_set(env_path)
if len(args) > 1:
s_args = ' '.join(["'%s'" % c for c in args[2:]])
command = args[1] + ' ' + s_args
return admin.onecmd(command)
else:
while True:
try:
admin.run()
except KeyboardInterrupt:
admin.do_quit('')
else:
return admin.onecmd("help")
if __name__ == '__main__':
pkg_resources.require('Trac==%s' % VERSION)
sys.exit(run())
| |
"""
Functions for identifying peaks in signals.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.six import xrange
from scipy.signal.wavelets import cwt, ricker
from scipy.stats import scoreatpercentile
__all__ = ['argrelmin', 'argrelmax', 'argrelextrema', 'find_peaks_cwt']
def _boolrelextrema(data, comparator, axis=0, order=1, mode='clip'):
"""
Calculate the relative extrema of `data`.
Relative extrema are calculated by finding locations where
``comparator(data[n], data[n+1:n+order+1])`` is True.
Parameters
----------
data : ndarray
Array in which to find the relative extrema.
comparator : callable
Function to use to compare two data points.
Should take 2 numbers as arguments.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n,n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated. 'wrap' (wrap around) or
'clip' (treat overflow as the same as the last (or first) element).
Default 'clip'. See numpy.take
Returns
-------
extrema : ndarray
Boolean array of the same shape as `data` that is True at an extrema,
False otherwise.
See also
--------
argrelmax, argrelmin
Examples
--------
>>> testdata = np.array([1,2,3,2,1])
>>> _boolrelextrema(testdata, np.greater, axis=0)
array([False, False, True, False, False], dtype=bool)
"""
if((int(order) != order) or (order < 1)):
raise ValueError('Order must be an int >= 1')
datalen = data.shape[axis]
locs = np.arange(0, datalen)
results = np.ones(data.shape, dtype=bool)
main = data.take(locs, axis=axis, mode=mode)
for shift in xrange(1, order + 1):
plus = data.take(locs + shift, axis=axis, mode=mode)
minus = data.take(locs - shift, axis=axis, mode=mode)
results &= comparator(main, plus)
results &= comparator(main, minus)
if(~results.any()):
return results
return results
def argrelmin(data, axis=0, order=1, mode='clip'):
"""
Calculate the relative minima of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative minima.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated.
Available options are 'wrap' (wrap around) or 'clip' (treat overflow
as the same as the last (or first) element).
Default 'clip'. See numpy.take
Returns
-------
extrema : tuple of ndarrays
Indices of the minima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
See Also
--------
argrelextrema, argrelmax
Notes
-----
This function uses `argrelextrema` with np.less as comparator.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelmin
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelmin(x)
(array([1, 5]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelmin(y, axis=1)
(array([0, 2]), array([2, 1]))
"""
return argrelextrema(data, np.less, axis, order, mode)
def argrelmax(data, axis=0, order=1, mode='clip'):
"""
Calculate the relative maxima of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative maxima.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated.
Available options are 'wrap' (wrap around) or 'clip' (treat overflow
as the same as the last (or first) element).
Default 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
See Also
--------
argrelextrema, argrelmin
Notes
-----
This function uses `argrelextrema` with np.greater as comparator.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelmax
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelmax(x)
(array([3, 6]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelmax(y, axis=1)
(array([0]), array([1]))
"""
return argrelextrema(data, np.greater, axis, order, mode)
def argrelextrema(data, comparator, axis=0, order=1, mode='clip'):
"""
Calculate the relative extrema of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative extrema.
comparator : callable
Function to use to compare two data points.
Should take 2 numbers as arguments.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated. 'wrap' (wrap around) or
'clip' (treat overflow as the same as the last (or first) element).
Default is 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
See Also
--------
argrelmin, argrelmax
Notes
-----
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelextrema
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelextrema(x, np.greater)
(array([3, 6]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelextrema(y, np.less, axis=1)
(array([0, 2]), array([2, 1]))
"""
results = _boolrelextrema(data, comparator,
axis, order, mode)
return np.where(results)
def _identify_ridge_lines(matr, max_distances, gap_thresh):
"""
Identify ridges in the 2-D matrix.
Expect that the width of the wavelet feature increases with increasing row
number.
Parameters
----------
matr : 2-D ndarray
Matrix in which to identify ridge lines.
max_distances : 1-D sequence
At each row, a ridge line is only connected
if the relative max at row[n] is within
`max_distances`[n] from the relative max at row[n+1].
gap_thresh : int
If a relative maximum is not found within `max_distances`,
there will be a gap. A ridge line is discontinued if
there are more than `gap_thresh` points without connecting
a new relative maximum.
Returns
-------
ridge_lines : tuple
Tuple of 2 1-D sequences. `ridge_lines`[ii][0] are the rows of the
ii-th ridge-line, `ridge_lines`[ii][1] are the columns. Empty if none
found. Each ridge-line will be sorted by row (increasing), but the
order of the ridge lines is not specified.
References
----------
Bioinformatics (2006) 22 (17): 2059-2065.
doi: 10.1093/bioinformatics/btl355
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
Examples
--------
>>> data = np.random.rand(5,5)
>>> ridge_lines = _identify_ridge_lines(data, 1, 1)
Notes
-----
This function is intended to be used in conjunction with `cwt`
as part of `find_peaks_cwt`.
"""
if(len(max_distances) < matr.shape[0]):
raise ValueError('Max_distances must have at least as many rows '
'as matr')
all_max_cols = _boolrelextrema(matr, np.greater, axis=1, order=1)
# Highest row for which there are any relative maxima
has_relmax = np.where(all_max_cols.any(axis=1))[0]
if(len(has_relmax) == 0):
return []
start_row = has_relmax[-1]
# Each ridge line is a 3-tuple:
# rows, cols,Gap number
ridge_lines = [[[start_row],
[col],
0] for col in np.where(all_max_cols[start_row])[0]]
final_lines = []
rows = np.arange(start_row - 1, -1, -1)
cols = np.arange(0, matr.shape[1])
for row in rows:
this_max_cols = cols[all_max_cols[row]]
# Increment gap number of each line,
# set it to zero later if appropriate
for line in ridge_lines:
line[2] += 1
# XXX These should always be all_max_cols[row]
# But the order might be different. Might be an efficiency gain
# to make sure the order is the same and avoid this iteration
prev_ridge_cols = np.array([line[1][-1] for line in ridge_lines])
# Look through every relative maximum found at current row
# Attempt to connect them with existing ridge lines.
for ind, col in enumerate(this_max_cols):
# If there is a previous ridge line within
# the max_distance to connect to, do so.
# Otherwise start a new one.
line = None
if(len(prev_ridge_cols) > 0):
diffs = np.abs(col - prev_ridge_cols)
closest = np.argmin(diffs)
if diffs[closest] <= max_distances[row]:
line = ridge_lines[closest]
if(line is not None):
# Found a point close enough, extend current ridge line
line[1].append(col)
line[0].append(row)
line[2] = 0
else:
new_line = [[row],
[col],
0]
ridge_lines.append(new_line)
# Remove the ridge lines with gap_number too high
# XXX Modifying a list while iterating over it.
# Should be safe, since we iterate backwards, but
# still tacky.
for ind in xrange(len(ridge_lines) - 1, -1, -1):
line = ridge_lines[ind]
if line[2] > gap_thresh:
final_lines.append(line)
del ridge_lines[ind]
out_lines = []
for line in (final_lines + ridge_lines):
sortargs = np.array(np.argsort(line[0]))
rows, cols = np.zeros_like(sortargs), np.zeros_like(sortargs)
rows[sortargs] = line[0]
cols[sortargs] = line[1]
out_lines.append([rows, cols])
return out_lines
def _filter_ridge_lines(cwt, ridge_lines, window_size=None, min_length=None,
min_snr=1, noise_perc=10):
"""
Filter ridge lines according to prescribed criteria. Intended
to be used for finding relative maxima.
Parameters
----------
cwt : 2-D ndarray
Continuous wavelet transform from which the `ridge_lines` were defined.
ridge_lines : 1-D sequence
Each element should contain 2 sequences, the rows and columns
of the ridge line (respectively).
window_size : int, optional
Size of window to use to calculate noise floor.
Default is ``cwt.shape[1] / 20``.
min_length : int, optional
Minimum length a ridge line needs to be acceptable.
Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
min_snr : float, optional
Minimum SNR ratio. Default 1. The signal is the value of
the cwt matrix at the shortest length scale (``cwt[0, loc]``), the
noise is the `noise_perc`th percentile of datapoints contained within a
window of `window_size` around ``cwt[0, loc]``.
noise_perc : float, optional
When calculating the noise floor, percentile of data points
examined below which to consider noise. Calculated using
scipy.stats.scoreatpercentile.
References
----------
Bioinformatics (2006) 22 (17): 2059-2065. doi: 10.1093/bioinformatics/btl355
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
"""
num_points = cwt.shape[1]
if min_length is None:
min_length = np.ceil(cwt.shape[0] / 4)
if window_size is None:
window_size = np.ceil(num_points / 20)
window_size = int(window_size)
hf_window, odd = divmod(window_size, 2)
# Filter based on SNR
row_one = cwt[0, :]
noises = np.zeros_like(row_one)
for ind, val in enumerate(row_one):
window_start = max(ind - hf_window, 0)
window_end = min(ind + hf_window + odd, num_points)
noises[ind] = scoreatpercentile(row_one[window_start:window_end],
per=noise_perc)
def filt_func(line):
if len(line[0]) < min_length:
return False
snr = abs(cwt[line[0][0], line[1][0]] / noises[line[1][0]])
if snr < min_snr:
return False
return True
return list(filter(filt_func, ridge_lines))
def find_peaks_cwt(vector, widths, wavelet=None, max_distances=None,
gap_thresh=None, min_length=None, min_snr=1, noise_perc=10):
"""
Attempt to find the peaks in a 1-D array.
The general approach is to smooth `vector` by convolving it with
`wavelet(width)` for each width in `widths`. Relative maxima which
appear at enough length scales, and with sufficiently high SNR, are
accepted.
Parameters
----------
vector : ndarray
1-D array in which to find the peaks.
widths : sequence
1-D array of widths to use for calculating the CWT matrix. In general,
this range should cover the expected width of peaks of interest.
wavelet : callable, optional
Should take a single variable and return a 1-D array to convolve
with `vector`. Should be normalized to unit area.
Default is the ricker wavelet.
max_distances : ndarray, optional
At each row, a ridge line is only connected if the relative max at
row[n] is within ``max_distances[n]`` from the relative max at
``row[n+1]``. Default value is ``widths/4``.
gap_thresh : float, optional
If a relative maximum is not found within `max_distances`,
there will be a gap. A ridge line is discontinued if there are more
than `gap_thresh` points without connecting a new relative maximum.
Default is 2.
min_length : int, optional
Minimum length a ridge line needs to be acceptable.
Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
min_snr : float, optional
Minimum SNR ratio. Default 1. The signal is the value of
the cwt matrix at the shortest length scale (``cwt[0, loc]``), the
noise is the `noise_perc`th percentile of datapoints contained within a
window of `window_size` around ``cwt[0, loc]``.
noise_perc : float, optional
When calculating the noise floor, percentile of data points
examined below which to consider noise. Calculated using
`stats.scoreatpercentile`. Default is 10.
Returns
-------
peaks_indices : list
Indices of the locations in the `vector` where peaks were found.
The list is sorted.
See Also
--------
cwt
Notes
-----
This approach was designed for finding sharp peaks among noisy data,
however with proper parameter selection it should function well for
different peak shapes.
The algorithm is as follows:
1. Perform a continuous wavelet transform on `vector`, for the supplied
`widths`. This is a convolution of `vector` with `wavelet(width)` for
each width in `widths`. See `cwt`
2. Identify "ridge lines" in the cwt matrix. These are relative maxima
at each row, connected across adjacent rows. See identify_ridge_lines
3. Filter the ridge_lines using filter_ridge_lines.
.. versionadded:: 0.11.0
References
----------
.. [1] Bioinformatics (2006) 22 (17): 2059-2065.
doi: 10.1093/bioinformatics/btl355
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
Examples
--------
>>> from scipy import signal
>>> xs = np.arange(0, np.pi, 0.05)
>>> data = np.sin(xs)
>>> peakind = signal.find_peaks_cwt(data, np.arange(1,10))
>>> peakind, xs[peakind], data[peakind]
([32], array([ 1.6]), array([ 0.9995736]))
"""
if gap_thresh is None:
gap_thresh = np.ceil(widths[0])
if max_distances is None:
max_distances = widths / 4.0
if wavelet is None:
wavelet = ricker
cwt_dat = cwt(vector, wavelet, widths)
ridge_lines = _identify_ridge_lines(cwt_dat, max_distances, gap_thresh)
filtered = _filter_ridge_lines(cwt_dat, ridge_lines, min_length=min_length,
min_snr=min_snr, noise_perc=noise_perc)
max_locs = [x[1][0] for x in filtered]
return sorted(max_locs)
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import unittest
import pytest
from mock import Mock
from pants.backend.core.targets.dependencies import Dependencies
from pants.backend.jvm.artifact import Artifact
from pants.backend.jvm.repository import Repository
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.jar_publish import JarPublish
from pants.base.build_file_aliases import BuildFileAliases
from pants.base.exceptions import TaskError
from pants.scm.scm import Scm
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_mkdir, safe_walk
from pants_test.tasks.test_base import TaskTest
class JarPublishTest(TaskTest):
@classmethod
def task_type(cls):
return JarPublish
def test_smoke_publish(self):
with temporary_dir() as publish_dir:
task = self.prepare_task(args=['--test-local=%s' % publish_dir],
build_graph=self.build_graph,
build_file_parser=self.build_file_parser)
task.scm = Mock()
task.execute()
@property
def alias_groups(self):
self.push_db_basedir = os.path.join(self.build_root, "pushdb")
safe_mkdir(self.push_db_basedir)
return BuildFileAliases.create(
targets={
'jar_library': JarLibrary,
'java_library': JavaLibrary,
'target': Dependencies,
},
objects={
'artifact': Artifact,
'internal': Repository(name='internal', url='http://example.com',
push_db_basedir=self.push_db_basedir),
},
)
def _prepare_for_publishing(self, with_alias=False):
targets = {}
targets['a'] = self.create_library('a', 'java_library', 'a', ['A.java'],
provides="""artifact(org='com.example', name='nail', repo=internal)""")
targets['b'] = self.create_library('b', 'java_library', 'b', ['B.java'],
provides="""artifact(org='com.example', name='shoe', repo=internal)""",
dependencies=['a'])
if with_alias:
# add an alias target between c and b
targets['z'] = self.create_library('z', 'target', 'z', dependencies=['b'])
c_deps = ['z']
else:
c_deps = ['b']
targets['c'] = self.create_library('c', 'java_library', 'c', ['C.java'],
provides="""artifact(org='com.example', name='horse', repo=internal)""",
dependencies=c_deps)
return targets.values()
def _get_config(self):
return """
[jar-publish]
repos: {
'internal': {
'resolver': 'example.com',
'confs': ['default', 'sources', 'docs', 'changelog'],
}
}
"""
def _prepare_mocks(self, task):
task.scm = Mock()
task.scm.changed_files = Mock(return_value=[])
task._copy_artifact = Mock()
task.create_source_jar = Mock()
task.create_doc_jar = Mock()
task.changelog = Mock(return_value="Many changes")
task.publish = Mock()
task.confirm_push = Mock(return_value=True)
def test_publish_unlisted_repo(self):
# Note that we set a different config here, so repos:internal has no config
config = """
[jar-publish]
repos: {
'another-repo': {
'resolver': 'example.org',
'confs': ['default', 'sources', 'docs', 'changelog'],
}
}
"""
targets = self._prepare_for_publishing()
with temporary_dir():
task = self.prepare_task(config=config,
args=['--no-test-dryrun'],
build_graph=self.build_graph,
build_file_parser=self.build_file_parser,
targets=targets)
self._prepare_mocks(task)
with self.assertRaises(TaskError):
try:
task.execute()
except TaskError as e:
assert "Repository internal has no" in str(e)
raise e
def test_publish_local_dryrun(self):
targets = self._prepare_for_publishing()
with temporary_dir() as publish_dir:
task = self.prepare_task(args=['--test-local=%s' % publish_dir],
build_graph=self.build_graph,
build_file_parser=self.build_file_parser,
targets=targets)
self._prepare_mocks(task)
task.execute()
# Nothing is written to the pushdb during a dryrun publish
# (maybe some directories are created, but git will ignore them)
files = []
for _, _, filenames in safe_walk(self.push_db_basedir):
files.extend(filenames)
self.assertEquals(0, len(files),
"Nothing should be written to the pushdb during a dryrun publish")
self.assertEquals(0, task.confirm_push.call_count,
"Expected confirm_push not to be called")
self.assertEquals(0, task.publish.call_count,
"Expected publish not to be called")
def test_publish_local(self):
for with_alias in [True, False]:
targets = self._prepare_for_publishing(with_alias=with_alias)
with temporary_dir() as publish_dir:
task = self.prepare_task(args=['--test-local=%s' % publish_dir,
'--no-test-dryrun'],
build_graph=self.build_graph,
build_file_parser=self.build_file_parser,
targets=targets)
self._prepare_mocks(task)
task.execute()
#Nothing is written to the pushdb during a local publish
#(maybe some directories are created, but git will ignore them)
files = []
for _, _, filenames in safe_walk(self.push_db_basedir):
files.extend(filenames)
self.assertEquals(0, len(files),
"Nothing should be written to the pushdb during a local publish")
publishable_count = len(targets) - (1 if with_alias else 0)
self.assertEquals(publishable_count, task.confirm_push.call_count,
"Expected one call to confirm_push per artifact")
self.assertEquals(publishable_count, task.publish.call_count,
"Expected one call to publish per artifact")
def test_publish_remote(self):
targets = self._prepare_for_publishing()
task = self.prepare_task(config=self._get_config(),
args=['--no-test-dryrun'],
build_graph=self.build_graph,
build_file_parser=self.build_file_parser,
targets=targets)
self._prepare_mocks(task)
task.execute()
# One file per task is written to the pushdb during a local publish
files = []
for _, _, filenames in safe_walk(self.push_db_basedir):
files.extend(filenames)
self.assertEquals(len(targets), len(files),
"During a remote publish, one pushdb should be written per target")
self.assertEquals(len(targets), task.confirm_push.call_count,
"Expected one call to confirm_push per artifact")
self.assertEquals(len(targets), task.publish.call_count,
"Expected one call to publish per artifact")
self.assertEquals(len(targets), task.scm.tag.call_count,
"Expected one call to scm.tag per artifact")
def test_publish_retry_works(self):
targets = self._prepare_for_publishing()
task = self.prepare_task(config=self._get_config(),
args=['--no-test-dryrun',
'--test-scm-push-attempts=3'],
build_graph=self.build_graph,
build_file_parser=self.build_file_parser,
targets=[targets[0]])
self._prepare_mocks(task)
task.scm.push = Mock()
task.scm.push.side_effect = FailNTimes(2, Scm.RemoteException)
task.execute()
# Two failures, one success
self.assertEquals(2 + 1, task.scm.push.call_count)
def test_publish_retry_eventually_fails(self):
targets = self._prepare_for_publishing()
#confirm that we fail if we have too many failed push attempts
task = self.prepare_task(config=self._get_config(),
args=['--no-test-dryrun',
'--test-scm-push-attempts=3'],
build_graph=self.build_graph,
build_file_parser=self.build_file_parser,
targets=[targets[0]])
self._prepare_mocks(task)
task.scm.push = Mock()
task.scm.push.side_effect = FailNTimes(3, Scm.RemoteException)
with self.assertRaises(Scm.RemoteException):
task.execute()
def test_publish_local_only(self):
with pytest.raises(TaskError) as exc:
self.prepare_task()
class FailNTimes:
def __init__(self, tries, exc_type, success=None):
self.tries = tries
self.exc_type = exc_type
self.success = success
def __call__(self, *args, **kwargs):
self.tries -= 1
if self.tries >= 0:
raise self.exc_type()
else:
return self.success
class FailNTimesTest(unittest.TestCase):
def test_fail_n_times(self):
with self.assertRaises(ValueError):
foo = Mock()
foo.bar.side_effect = FailNTimes(1, ValueError)
foo.bar()
foo.bar()
| |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Implementation of a GL Program object.
This class parses the source code to obtain the names and types of
uniforms, attributes, varyings and constants. This information is used
to provide the user with a natural way to set variables.
Gloo vs GLIR
------------
Done in this class:
* Check the data shape given for uniforms and attributes
* Convert uniform data to array of the correct type
* Check whether any variables are set that are not present in source code
Done by GLIR:
* Check whether a set uniform/attribute is not active (a warning is given)
* Check whether anactive attribute or uniform is not set (a warning is given)
"""
import re
import numpy as np
from .globject import GLObject
from .buffer import VertexBuffer, IndexBuffer, DataBuffer
from .texture import BaseTexture, Texture2D, Texture3D, Texture1D
from ..util import logger
from .util import check_enum
from ..ext.six import string_types
from .context import get_current_canvas
from .preprocessor import preprocess
# ----------------------------------------------------------- Program class ---
class Program(GLObject):
""" Shader program object
A Program is an object to which shaders can be attached and linked to
create the final program.
Uniforms and attributes can be set using indexing: e.g.
``program['a_pos'] = pos_data`` and ``program['u_color'] = (1, 0, 0)``.
Parameters
----------
vert : str
The vertex shader to be used by this program
frag : str
The fragment shader to be used by this program
count : int (optional)
The program will prepare a structured vertex buffer of count
vertices. All attributes set using ``prog['attr'] = X`` will
be combined into a structured vbo with interleaved elements, which
is more efficient than having one vbo per attribute.
Notes
-----
If several shaders are specified, only one can contain the main
function. OpenGL ES 2.0 does not support a list of shaders.
"""
_GLIR_TYPE = 'Program'
_gtypes = { # DTYPE, NUMEL
'float': (np.float32, 1),
'vec2': (np.float32, 2),
'vec3': (np.float32, 3),
'vec4': (np.float32, 4),
'int': (np.int32, 1),
'ivec2': (np.int32, 2),
'ivec3': (np.int32, 3),
'ivec4': (np.int32, 4),
'bool': (np.int32, 1),
'bvec2': (np.bool, 2),
'bvec3': (np.bool, 3),
'bvec4': (np.bool, 4),
'mat2': (np.float32, 4),
'mat3': (np.float32, 9),
'mat4': (np.float32, 16),
'sampler1D': (np.uint32, 1),
'sampler2D': (np.uint32, 1),
'sampler3D': (np.uint32, 1),
}
# ---------------------------------
def __init__(self, vert=None, frag=None, count=0):
GLObject.__init__(self)
# Init source code for vertex and fragment shader
self._shaders = '', ''
# Init description of variables obtained from source code
self._code_variables = {} # name -> (kind, type_, name)
# Init user-defined data for attributes and uniforms
self._user_variables = {} # name -> data / buffer / texture
# Init pending user-defined data
self._pending_variables = {} # name -> data
# NOTE: we *could* allow vert and frag to be a tuple/list of shaders,
# but that would complicate the GLIR implementation, and it seems
# unncessary
# Check and set shaders
if isinstance(vert, string_types) and isinstance(frag, string_types):
self.set_shaders(vert, frag)
elif not (vert is None and frag is None):
raise ValueError('Vert and frag must either both be str or None')
# Build associated structured vertex buffer if count is given.
# This makes it easy to create a structured vertex buffer
# without having to create a numpy array with structured dtype.
# All assignments must be done before the GLIR commands are
# sent away for parsing (in draw) though.
self._count = count
self._buffer = None # Set to None in draw()
if self._count > 0:
dtype = []
for kind, type_, name, size in self._code_variables.values():
if kind == 'attribute':
dt, numel = self._gtypes[type_]
dtype.append((name, dt, numel))
self._buffer = np.zeros(self._count, dtype=dtype)
self.bind(VertexBuffer(self._buffer))
def set_shaders(self, vert, frag):
""" Set the vertex and fragment shaders.
Parameters
----------
vert : str
Source code for vertex shader.
frag : str
Source code for fragment shaders.
"""
if not vert or not frag:
raise ValueError('Vertex and fragment code must both be non-empty')
# pre-process shader code for #include directives
vert, frag = preprocess(vert), preprocess(frag)
# Store source code, send it to glir, parse the code for variables
self._shaders = vert, frag
self._glir.command('SHADERS', self._id, vert, frag)
# All current variables become pending variables again
for key, val in self._user_variables.items():
self._pending_variables[key] = val
self._user_variables = {}
# Parse code (and process pending variables)
self._parse_variables_from_code()
@property
def shaders(self):
""" Source code for vertex and fragment shader
"""
return self._shaders
@property
def variables(self):
""" A list of the variables in use by the current program
The list is obtained by parsing the GLSL source code.
Returns
-------
variables : list
Each variable is represented as a tuple (kind, type, name),
where `kind` is 'attribute', 'uniform', 'uniform_array',
'varying' or 'const'.
"""
# Note that internally the variables are stored as a dict
# that maps names -> tuples, for easy looking up by name.
return [x[:3] for x in self._code_variables.values()]
def _parse_variables_from_code(self):
""" Parse uniforms, attributes and varyings from the source code.
"""
# Get one string of code with comments removed
code = '\n\n'.join(self._shaders)
code = re.sub(r'(.*)(//.*)', r'\1', code, re.M)
# Regexp to look for variable names
var_regexp = ("\s*VARIABLE\s+" # kind of variable
"((highp|mediump|lowp)\s+)?" # Precision (optional)
"(?P<type>\w+)\s+" # type
"(?P<name>\w+)\s*" # name
"(\[(?P<size>\d+)\])?" # size (optional)
"(\s*\=\s*[0-9.]+)?" # default value (optional)
"\s*;" # end
)
# Parse uniforms, attributes and varyings
self._code_variables = {}
for kind in ('uniform', 'attribute', 'varying', 'const'):
regex = re.compile(var_regexp.replace('VARIABLE', kind),
flags=re.MULTILINE)
for m in re.finditer(regex, code):
gtype = m.group('type')
size = int(m.group('size')) if m.group('size') else -1
this_kind = kind
if size >= 1:
# uniform arrays get added both as individuals and full
for i in range(size):
name = '%s[%d]' % (m.group('name'), i)
self._code_variables[name] = kind, gtype, name, -1
this_kind = 'uniform_array'
name = m.group('name')
self._code_variables[name] = this_kind, gtype, name, size
# Now that our code variables are up-to date, we can process
# the variables that were set but yet unknown.
self._process_pending_variables()
def bind(self, data):
""" Bind a VertexBuffer that has structured data
Parameters
----------
data : VertexBuffer
The vertex buffer to bind. The field names of the array
are mapped to attribute names in GLSL.
"""
# Check
if not isinstance(data, VertexBuffer):
raise ValueError('Program.bind() requires a VertexBuffer.')
# Apply
for name in data.dtype.names:
self[name] = data[name]
def _process_pending_variables(self):
""" Try to apply the variables that were set but not known yet.
"""
# Clear our list of pending variables
self._pending_variables, pending = {}, self._pending_variables
# Try to apply it. On failure, it will be added again
for name, data in pending.items():
self[name] = data
def __setitem__(self, name, data):
""" Setting uniform or attribute data
This method requires the information about the variable that we
know from parsing the source code. If this information is not
yet available, the data is stored in a list of pending data,
and we attempt to set it once new shading code has been set.
For uniforms, the data can represent a plain uniform or a
sampler. In the latter case, this method accepts a Texture
object or a numpy array which is used to update the existing
texture. A new texture is created if necessary.
For attributes, the data can be a tuple/float which GLSL will
use for the value of all vertices. This method also acceps VBO
data as a VertexBuffer object or a numpy array which is used
to update the existing VertexBuffer. A new VertexBuffer is
created if necessary.
By passing None as data, the uniform or attribute can be
"unregistered". This can be useful to get rid of variables that
are no longer present or active in the new source code that is
about to be set.
"""
# Deal with local buffer storage (see count argument in __init__)
if (self._buffer is not None) and not isinstance(data, DataBuffer):
if name in self._buffer.dtype.names:
self._buffer[name] = data
return
# Delete?
if data is None:
self._user_variables.pop(name, None)
self._pending_variables.pop(name, None)
return
if name in self._code_variables:
kind, type_, name, size = self._code_variables[name]
if kind == 'uniform':
if type_.startswith('sampler'):
# Texture data; overwrite or update
tex = self._user_variables.get(name, None)
if isinstance(data, BaseTexture):
pass
elif tex and hasattr(tex, 'set_data'):
tex.set_data(data)
return
elif type_ == 'sampler1D':
data = Texture1D(data)
elif type_ == 'sampler2D':
data = Texture2D(data)
elif type_ == 'sampler3D':
data = Texture3D(data)
else:
# This should not happen
raise RuntimeError('Unknown type %s' % type_)
# Store and send GLIR command
self._user_variables[name] = data
self.glir.associate(data.glir)
self._glir.command('TEXTURE', self._id, name, data.id)
else:
# Normal uniform; convert to np array and check size
dtype, numel = self._gtypes[type_]
data = np.array(data, dtype=dtype).ravel()
if data.size != numel:
raise ValueError('Uniform %r needs %i elements, '
'not %i.' % (name, numel, data.size))
# Store and send GLIR command
self._user_variables[name] = data
self._glir.command('UNIFORM', self._id, name, type_, data)
elif kind == 'uniform_array':
# Normal uniform; convert to np array and check size
dtype, numel = self._gtypes[type_]
data = np.atleast_2d(data).astype(dtype)
need_shape = (size, numel)
if data.shape != need_shape:
raise ValueError('Uniform array %r needs shape %s not %s'
% (name, need_shape, data.shape))
data = data.ravel()
# Store and send GLIR command
self._user_variables[name] = data
self._glir.command('UNIFORM', self._id, name, type_, data)
elif kind == 'attribute':
# Is this a constant value per vertex
is_constant = False
def isscalar(x):
return isinstance(x, (float, int))
if isscalar(data):
is_constant = True
elif isinstance(data, (tuple, list)):
is_constant = all([isscalar(e) for e in data])
if not is_constant:
# VBO data; overwrite or update
vbo = self._user_variables.get(name, None)
if isinstance(data, DataBuffer):
pass
elif vbo is not None and hasattr(vbo, 'set_data'):
vbo.set_data(data)
return
else:
data = VertexBuffer(data)
# Store and send GLIR command
if data.dtype is not None:
numel = self._gtypes[type_][1]
if data._last_dim and data._last_dim != numel:
raise ValueError('data.shape[-1] must be %s '
'not %s for %s'
% (numel, data._last_dim, name))
self._user_variables[name] = data
value = (data.id, data.stride, data.offset)
self.glir.associate(data.glir)
self._glir.command('ATTRIBUTE', self._id,
name, type_, value)
else:
# Single-value attribute; convert to array and check size
dtype, numel = self._gtypes[type_]
data = np.array(data, dtype=dtype)
if data.ndim == 0:
data.shape = data.size
if data.size != numel:
raise ValueError('Attribute %r needs %i elements, '
'not %i.' % (name, numel, data.size))
# Store and send GLIR command
self._user_variables[name] = data
value = tuple([0] + [i for i in data])
self._glir.command('ATTRIBUTE', self._id,
name, type_, value)
else:
raise KeyError('Cannot set data for a %s.' % kind)
else:
# This variable is not defined in the current source code,
# so we cannot establish whether this is a uniform or
# attribute, nor check its type. Try again later.
self._pending_variables[name] = data
def __contains__(self, key):
return key in self._code_variables
def __getitem__(self, name):
""" Get user-defined data for attributes and uniforms.
"""
if name in self._user_variables:
return self._user_variables[name]
elif name in self._pending_variables:
return self._pending_variables[name]
else:
raise KeyError("Unknown uniform or attribute %s" % name)
def draw(self, mode='triangles', indices=None, check_error=True):
""" Draw the attribute arrays in the specified mode.
Parameters
----------
mode : str | GL_ENUM
'points', 'lines', 'line_strip', 'line_loop', 'triangles',
'triangle_strip', or 'triangle_fan'.
indices : array
Array of indices to draw.
check_error:
Check error after draw.
"""
# Invalidate buffer (data has already been sent)
self._buffer = None
# Check if mode is valid
mode = check_enum(mode)
if mode not in ['points', 'lines', 'line_strip', 'line_loop',
'triangles', 'triangle_strip', 'triangle_fan']:
raise ValueError('Invalid draw mode: %r' % mode)
# Check leftover variables, warn, discard them
# In GLIR we check whether all attributes are indeed set
for name in self._pending_variables:
logger.warn('Variable %r is given but not known.' % name)
self._pending_variables = {}
# Check attribute sizes
attributes = [vbo for vbo in self._user_variables.values()
if isinstance(vbo, DataBuffer)]
sizes = [a.size for a in attributes]
if len(attributes) < 1:
raise RuntimeError('Must have at least one attribute')
if not all(s == sizes[0] for s in sizes[1:]):
msg = '\n'.join(['%s: %s' % (str(a), a.size) for a in attributes])
raise RuntimeError('All attributes must have the same size, got:\n'
'%s' % msg)
# Get the glir queue that we need now
canvas = get_current_canvas()
assert canvas is not None
# Associate canvas
canvas.context.glir.associate(self.glir)
# Indexbuffer
if isinstance(indices, IndexBuffer):
canvas.context.glir.associate(indices.glir)
logger.debug("Program drawing %r with index buffer" % mode)
gltypes = {np.dtype(np.uint8): 'UNSIGNED_BYTE',
np.dtype(np.uint16): 'UNSIGNED_SHORT',
np.dtype(np.uint32): 'UNSIGNED_INT'}
selection = indices.id, gltypes[indices.dtype], indices.size
canvas.context.glir.command('DRAW', self._id, mode, selection)
elif indices is None:
selection = 0, attributes[0].size
logger.debug("Program drawing %r with %r" % (mode, selection))
canvas.context.glir.command('DRAW', self._id, mode, selection)
else:
raise TypeError("Invalid index: %r (must be IndexBuffer)" %
indices)
# Process GLIR commands
canvas.context.flush_commands()
| |
import datetime
import os
import django.utils.copycompat as copy
from django.conf import settings
from django.db.models.fields import Field
from django.core.files.base import File, ContentFile
from django.core.files.storage import default_storage
from django.core.files.images import ImageFile, get_image_dimensions
from django.core.files.uploadedfile import UploadedFile
from django.utils.functional import curry
from django.db.models import signals
from django.utils.encoding import force_unicode, smart_str
from django.utils.translation import ugettext_lazy, ugettext as _
from django import forms
from django.db.models.loading import cache
class FieldFile(File):
def __init__(self, instance, field, name):
super(FieldFile, self).__init__(None, name)
self.instance = instance
self.field = field
self.storage = field.storage
self._committed = True
def __eq__(self, other):
# Older code may be expecting FileField values to be simple strings.
# By overriding the == operator, it can remain backwards compatibility.
if hasattr(other, 'name'):
return self.name == other.name
return self.name == other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# Required because we defined a custom __eq__.
return hash(self.name)
# The standard File contains most of the necessary properties, but
# FieldFiles can be instantiated without a name, so that needs to
# be checked for here.
def _require_file(self):
if not self:
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file') or self._file is None:
self._file = self.storage.open(self.name, 'rb')
return self._file
def _set_file(self, file):
self._file = file
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
def _get_path(self):
self._require_file()
return self.storage.path(self.name)
path = property(_get_path)
def _get_url(self):
self._require_file()
return self.storage.url(self.name)
url = property(_get_url)
def _get_size(self):
self._require_file()
if not self._committed:
return len(self.file)
return self.storage.size(self.name)
size = property(_get_size)
def open(self, mode='rb'):
self._require_file()
self.file.open(mode)
# open() doesn't alter the file's contents, but it does reset the pointer
open.alters_data = True
# In addition to the standard File API, FieldFiles have extra methods
# to further manipulate the underlying file, as well as update the
# associated model instance.
def save(self, name, content, save=True):
name = self.field.generate_filename(self.instance, name)
self.name = self.storage.save(name, content)
setattr(self.instance, self.field.name, self.name)
# Update the filesize cache
self._size = len(content)
self._committed = True
# Save the object because it has changed, unless save is False
if save:
self.instance.save()
save.alters_data = True
def delete(self, save=True):
# Only close the file if it's already open, which we know by the
# presence of self._file
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name)
self.name = None
setattr(self.instance, self.field.name, self.name)
# Delete the filesize cache
if hasattr(self, '_size'):
del self._size
self._committed = False
if save:
self.instance.save()
delete.alters_data = True
def _get_closed(self):
file = getattr(self, '_file', None)
return file is None or file.closed
closed = property(_get_closed)
def close(self):
file = getattr(self, '_file', None)
if file is not None:
file.close()
def __getstate__(self):
# FieldFile needs access to its associated model field and an instance
# it's attached to in order to work properly, but the only necessary
# data to be pickled is the file's name itself. Everything else will
# be restored later, by FileDescriptor below.
return {'name': self.name, 'closed': False, '_committed': True, '_file': None}
class FileDescriptor(object):
"""
The descriptor for the file attribute on the model instance. Returns a
FieldFile when accessed so you can do stuff like::
>>> instance.file.size
Assigns a file object on assignment so you can do::
>>> instance.file = File(...)
"""
def __init__(self, field):
self.field = field
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError(
"The '%s' attribute can only be accessed from %s instances."
% (self.field.name, owner.__name__))
# This is slightly complicated, so worth an explanation.
# instance.file`needs to ultimately return some instance of `File`,
# probably a subclass. Additionally, this returned object needs to have
# the FieldFile API so that accounts can easily do things like
# instance.file.path and have that delegated to the file storage engine.
# Easy enough if we're strict about assignment in __set__, but if you
# peek below you can see that we're not. So depending on the current
# value of the field we have to dynamically construct some sort of
# "thing" to return.
# The instance dict contains whatever was originally assigned
# in __set__.
file = instance.__dict__[self.field.name]
# If this value is a string (instance.file = "path/to/file") or None
# then we simply wrap it with the appropriate attribute class according
# to the file field. [This is FieldFile for FileFields and
# ImageFieldFile for ImageFields; it's also conceivable that user
# subclasses might also want to subclass the attribute class]. This
# object understands how to convert a path to a file, and also how to
# handle None.
if isinstance(file, basestring) or file is None:
attr = self.field.attr_class(instance, self.field, file)
instance.__dict__[self.field.name] = attr
# Other types of files may be assigned as well, but they need to have
# the FieldFile interface added to the. Thus, we wrap any other type of
# File inside a FieldFile (well, the field's attr_class, which is
# usually FieldFile).
elif isinstance(file, File) and not isinstance(file, FieldFile):
file_copy = self.field.attr_class(instance, self.field, file.name)
file_copy.file = file
file_copy._committed = False
instance.__dict__[self.field.name] = file_copy
# Finally, because of the (some would say boneheaded) way pickle works,
# the underlying FieldFile might not actually itself have an associated
# file. So we need to reset the details of the FieldFile in those cases.
elif isinstance(file, FieldFile) and not hasattr(file, 'field'):
file.instance = instance
file.field = self.field
file.storage = self.field.storage
# That was fun, wasn't it?
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
class FileField(Field):
# The class to wrap instance attributes in. Accessing the file object off
# the instance will always return an instance of attr_class.
attr_class = FieldFile
# The descriptor to use for accessing the attribute off of the class.
descriptor_class = FileDescriptor
description = ugettext_lazy("File path")
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
for arg in ('primary_key', 'unique'):
if arg in kwargs:
raise TypeError("'%s' is not a valid argument for %s." % (arg, self.__class__))
self.storage = storage or default_storage
self.upload_to = upload_to
if callable(upload_to):
self.generate_filename = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FileField, self).__init__(verbose_name, name, **kwargs)
def get_internal_type(self):
return "FileField"
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'name'):
value = value.name
return super(FileField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
"Returns field's value prepared for saving into a database."
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
return unicode(value)
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
file = super(FileField, self).pre_save(model_instance, add)
if file and not file._committed:
# Commit the file to storage prior to saving the model
file.save(file.name, file, save=False)
return file
def contribute_to_class(self, cls, name):
super(FileField, self).contribute_to_class(cls, name)
setattr(cls, self.name, self.descriptor_class(self))
signals.post_delete.connect(self.delete_file, sender=cls)
def delete_file(self, instance, sender, **kwargs):
file = getattr(instance, self.attname)
# If no other object of this type references the file,
# and it's not the default value for future objects,
# delete it from the backend.
if file and file.name != self.default and \
not sender._default_manager.filter(**{self.name: file.name}):
file.delete(save=False)
elif file:
# Otherwise, just close the file, so it doesn't tie up resources.
file.close()
def get_directory_name(self):
return os.path.normpath(force_unicode(datetime.datetime.now().strftime(smart_str(self.upload_to))))
def get_filename(self, filename):
return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename)))
def generate_filename(self, instance, filename):
return os.path.join(self.get_directory_name(), self.get_filename(filename))
def save_form_data(self, instance, data):
# Important: None means "no change", other false value means "clear"
# This subtle distinction (rather than a more explicit marker) is
# needed because we need to consume values that are also sane for a
# regular (non Model-) Form to find in its cleaned_data dictionary.
if data is not None:
# This value will be converted to unicode and stored in the
# database, so leaving False as-is is not acceptable.
if not data:
data = ''
setattr(instance, self.name, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FileField, 'max_length': self.max_length}
# If a file has been provided previously, then the form doesn't require
# that a new file is provided this time.
# The code to mark the form field as not required is used by
# form_for_instance, but can probably be removed once form_for_instance
# is gone. ModelForm uses a different method to check for an existing file.
if 'initial' in kwargs:
defaults['required'] = False
defaults.update(kwargs)
return super(FileField, self).formfield(**defaults)
class ImageFileDescriptor(FileDescriptor):
"""
Just like the FileDescriptor, but for ImageFields. The only difference is
assigning the width/height to the width_field/height_field, if appropriate.
"""
def __set__(self, instance, value):
previous_file = instance.__dict__.get(self.field.name)
super(ImageFileDescriptor, self).__set__(instance, value)
# To prevent recalculating image dimensions when we are instantiating
# an object from the database (bug #11084), only update dimensions if
# the field had a value before this assignment. Since the default
# value for FileField subclasses is an instance of field.attr_class,
# previous_file will only be None when we are called from
# Model.__init__(). The ImageField.update_dimension_fields method
# hooked up to the post_init signal handles the Model.__init__() cases.
# Assignment happening outside of Model.__init__() will trigger the
# update right here.
if previous_file is not None:
self.field.update_dimension_fields(instance, force=True)
class ImageFieldFile(ImageFile, FieldFile):
def delete(self, save=True):
# Clear the image dimensions cache
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
super(ImageFieldFile, self).delete(save)
class ImageField(FileField):
attr_class = ImageFieldFile
descriptor_class = ImageFileDescriptor
description = ugettext_lazy("File path")
def __init__(self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs):
self.width_field, self.height_field = width_field, height_field
FileField.__init__(self, verbose_name, name, **kwargs)
def contribute_to_class(self, cls, name):
super(ImageField, self).contribute_to_class(cls, name)
# Attach update_dimension_fields so that dimension fields declared
# after their corresponding image field don't stay cleared by
# Model.__init__, see bug #11196.
signals.post_init.connect(self.update_dimension_fields, sender=cls)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
"""
Updates field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions
won't be updated if the dimensions fields are already populated. This
avoids unnecessary recalculation when loading an object from the
database.
Dimensions can be forced to update with force=True, which is how
ImageFileDescriptor.__set__ calls this method.
"""
# Nothing to update if the field doesn't have have dimension fields.
has_dimension_fields = self.width_field or self.height_field
if not has_dimension_fields:
return
# getattr will call the ImageFileDescriptor's __get__ method, which
# coerces the assigned value into an instance of self.attr_class
# (ImageFieldFile in this case).
file = getattr(instance, self.attname)
# Nothing to update if we have no file and not being forced to update.
if not file and not force:
return
dimension_fields_filled = not(
(self.width_field and not getattr(instance, self.width_field))
or (self.height_field and not getattr(instance, self.height_field))
)
# When both dimension fields have values, we are most likely loading
# data from the database or updating an image field that already had
# an image stored. In the first case, we don't want to update the
# dimension fields because we are already getting their values from the
# database. In the second case, we do want to update the dimensions
# fields and will skip this return because force will be True since we
# were called from ImageFileDescriptor.__set__.
if dimension_fields_filled and not force:
return
# file should be an instance of ImageFieldFile or should be None.
if file:
width = file.width
height = file.height
else:
# No file, so clear dimensions fields.
width = None
height = None
# Update the width and height fields.
if self.width_field:
setattr(instance, self.width_field, width)
if self.height_field:
setattr(instance, self.height_field, height)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ImageField}
defaults.update(kwargs)
return super(ImageField, self).formfield(**defaults)
| |
"""Unit tests for tftpy."""
# vim: ts=4 sw=4 et ai:
# -*- coding: utf8 -*-
import logging
import os
import threading
import time
import unittest
from contextlib import contextmanager
from errno import EINTR
from multiprocessing import Queue
from shutil import rmtree
from tempfile import mkdtemp
import tftpy
log = logging.getLogger("tftpy")
log.setLevel(logging.DEBUG)
# console handler
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(levelname)s [%(name)s:%(lineno)s] %(message)s")
handler.setFormatter(formatter)
log.addHandler(handler)
class TestTftpyClasses(unittest.TestCase):
def testTftpPacketRRQ(self):
log.debug("===> Running testcase testTftpPacketRRQ")
options = {}
rrq = tftpy.TftpPacketTypes.TftpPacketRRQ()
rrq.filename = "myfilename"
rrq.mode = "octet"
rrq.options = options
rrq.encode()
self.assertIsNotNone(rrq.buffer, "Buffer populated")
rrq.decode()
self.assertEqual(rrq.filename, "myfilename", "Filename correct")
self.assertEqual(rrq.mode, "octet", "Mode correct")
self.assertEqual(rrq.options, options, "Options correct")
# repeat test with options
rrq.options = {"blksize": "1024"}
rrq.filename = "myfilename"
rrq.mode = "octet"
rrq.encode()
self.assertIsNotNone(rrq.buffer, "Buffer populated")
rrq.decode()
self.assertEqual(rrq.filename, "myfilename", "Filename correct")
self.assertEqual(rrq.mode, "octet", "Mode correct")
self.assertEqual(rrq.options["blksize"], "1024", "blksize correct")
def testTftpPacketWRQ(self):
log.debug("===> Running test case testTftpPacketWRQ")
options = {}
wrq = tftpy.TftpPacketTypes.TftpPacketWRQ()
wrq.filename = "myfilename"
wrq.mode = "octet"
wrq.options = options
wrq.encode()
self.assertIsNotNone(wrq.buffer, "Buffer populated")
wrq.decode()
self.assertEqual(wrq.opcode, 2, "Opcode correct")
self.assertEqual(wrq.filename, "myfilename", "Filename correct")
self.assertEqual(wrq.mode, "octet", "Mode correct")
self.assertEqual(wrq.options, options, "Options correct")
# repeat test with options
wrq.options = {"blksize": "1024"}
wrq.filename = "myfilename"
wrq.mode = "octet"
wrq.encode()
self.assertIsNotNone(wrq.buffer, "Buffer populated")
wrq.decode()
self.assertEqual(wrq.opcode, 2, "Opcode correct")
self.assertEqual(wrq.filename, "myfilename", "Filename correct")
self.assertEqual(wrq.mode, "octet", "Mode correct")
self.assertEqual(wrq.options["blksize"], "1024", "Blksize correct")
def testTftpPacketDAT(self):
log.debug("===> Running testcase testTftpPacketDAT")
dat = tftpy.TftpPacketTypes.TftpPacketDAT()
dat.blocknumber = 5
data = b"this is some data"
dat.data = data
dat.encode()
self.assertIsNotNone(dat.buffer, "Buffer populated")
dat.decode()
self.assertEqual(dat.opcode, 3, "DAT opcode is correct")
self.assertEqual(dat.blocknumber, 5, "Block number is correct")
self.assertEqual(dat.data, data, "DAT data is correct")
def testTftpPacketACK(self):
log.debug("===> Running testcase testTftpPacketACK")
ack = tftpy.TftpPacketTypes.TftpPacketACK()
ack.blocknumber = 6
ack.encode()
self.assertIsNotNone(ack.buffer, "Buffer populated")
ack.decode()
self.assertEqual(ack.opcode, 4, "ACK opcode is correct")
self.assertEqual(ack.blocknumber, 6, "ACK blocknumber correct")
def testTftpPacketERR(self):
log.debug("===> Running testcase testTftpPacketERR")
err = tftpy.TftpPacketTypes.TftpPacketERR()
err.errorcode = 4
err.encode()
self.assertIsNotNone(err.buffer, "Buffer populated")
err.decode()
self.assertEqual(err.opcode, 5, "ERR opcode is correct")
self.assertEqual(err.errorcode, 4, "ERR errorcode is correct")
def testTftpPacketOACK(self):
log.debug("===> Running testcase testTftpPacketOACK")
oack = tftpy.TftpPacketTypes.TftpPacketOACK()
# Test that if we make blksize a number, it comes back a string.
oack.options = {"blksize": 2048}
oack.encode()
self.assertIsNotNone(oack.buffer, "Buffer populated")
oack.decode()
self.assertEqual(oack.opcode, 6, "OACK opcode is correct")
self.assertEqual(
oack.options["blksize"], "2048", "OACK blksize option is correct"
)
# Test string to string
oack.options = {"blksize": "4096"}
oack.encode()
self.assertIsNotNone(oack.buffer, "Buffer populated")
oack.decode()
self.assertEqual(oack.opcode, 6, "OACK opcode is correct")
self.assertEqual(
oack.options["blksize"], "4096", "OACK blksize option is correct"
)
def testTftpPacketFactory(self):
log.debug("===> Running testcase testTftpPacketFactory")
# Make sure that the correct class is created for the correct opcode.
classes = {
1: tftpy.TftpPacketTypes.TftpPacketRRQ,
2: tftpy.TftpPacketTypes.TftpPacketWRQ,
3: tftpy.TftpPacketTypes.TftpPacketDAT,
4: tftpy.TftpPacketTypes.TftpPacketACK,
5: tftpy.TftpPacketTypes.TftpPacketERR,
6: tftpy.TftpPacketTypes.TftpPacketOACK,
}
factory = tftpy.TftpPacketFactory.TftpPacketFactory()
for opcode in classes:
self.assertTrue(
isinstance(factory._TftpPacketFactory__create(opcode), classes[opcode]),
"opcode %d returns the correct class" % opcode,
)
class TestTftpyState(unittest.TestCase):
def clientServerUploadOptions(
self, options, input=None, transmitname=None, server_kwargs=None
):
"""Fire up a client and a server and do an upload."""
root = "/tmp"
home = os.path.dirname(os.path.abspath(__file__))
filename = "640KBFILE"
input_path = os.path.join(home, filename)
if not input:
input = input_path
if transmitname:
filename = transmitname
server_kwargs = server_kwargs or {}
server = tftpy.TftpServer(root, **server_kwargs)
client = tftpy.TftpClient("localhost", 20001, options)
# Fork a server and run the client in this process.
child_pid = os.fork()
if child_pid:
# parent - let the server start
try:
time.sleep(1)
client.upload(filename, input)
finally:
os.kill(child_pid, 15)
os.waitpid(child_pid, 0)
else:
server.listen("localhost", 20001)
def clientServerDownloadOptions(
self,
options,
output="/tmp/out",
cretries=tftpy.DEF_TIMEOUT_RETRIES,
sretries=tftpy.DEF_TIMEOUT_RETRIES,
):
"""Fire up a client and a server and do a download."""
root = os.path.dirname(os.path.abspath(__file__))
server = tftpy.TftpServer(root)
client = tftpy.TftpClient("localhost", 20001, options)
# Fork a server and run the client in this process.
child_pid = os.fork()
if child_pid:
# parent - let the server start
try:
time.sleep(1)
client.download("640KBFILE", output, retries=cretries)
finally:
os.kill(child_pid, 15)
os.waitpid(child_pid, 0)
else:
server.listen("localhost", 20001, retries=sretries)
@contextmanager
def dummyServerDir(self):
tmpdir = mkdtemp()
for dirname in ("foo", "foo-private", "other"):
os.mkdir(os.path.join(tmpdir, dirname))
with open(os.path.join(tmpdir, dirname, "bar"), "w") as w:
w.write("baz")
try:
yield tmpdir
finally:
rmtree(tmpdir)
def testClientServerNoOptions(self):
self.clientServerDownloadOptions({})
def testClientServerNoOptionsRetries(self):
self.clientServerDownloadOptions({}, cretries=5, sretries=5)
def testClientServerTsizeOptions(self):
self.clientServerDownloadOptions({"tsize": 64 * 1024})
def testClientFileObject(self):
output = open("/tmp/out", "wb")
self.clientServerDownloadOptions({}, output)
def testClientServerBlksize(self):
for blksize in [512, 1024, 2048, 4096]:
self.clientServerDownloadOptions({"blksize": blksize})
def testClientServerUploadNoOptions(self):
self.clientServerUploadOptions({})
def testClientServerUploadFileObj(self):
fileobj = open("t/640KBFILE", "rb")
self.clientServerUploadOptions({}, input=fileobj)
def testClientServerUploadWithSubdirs(self):
self.clientServerUploadOptions({}, transmitname="foo/bar/640KBFILE")
def testClientServerUploadStartingSlash(self):
self.clientServerUploadOptions({}, transmitname="/foo/bar/640KBFILE")
def testClientServerUploadOptions(self):
for blksize in [512, 1024, 2048, 4096]:
self.clientServerUploadOptions({"blksize": blksize})
def customUploadHelper(self, return_func):
q = Queue()
def upload_open(path, context):
q.put("called")
return return_func(path)
self.clientServerUploadOptions({}, server_kwargs={"upload_open": upload_open})
self.assertEqual(q.get(True, 1), "called")
def testClientServerUploadCustomOpen(self):
self.customUploadHelper(lambda p: open(p, "wb"))
def testClientServerUploadCustomOpenForbids(self):
with self.assertRaisesRegex(tftpy.TftpException, "Access violation"):
self.customUploadHelper(lambda p: None)
def testClientServerUploadTsize(self):
self.clientServerUploadOptions(
{"tsize": 64 * 1024}, transmitname="/foo/bar/640KBFILE"
)
def testClientServerNoOptionsDelay(self):
tftpy.TftpStates.DELAY_BLOCK = 10
self.clientServerDownloadOptions({})
tftpy.TftpStates.DELAY_BLOCK = 0
def testServerNoOptions(self):
raddress = "127.0.0.2"
rport = 10000
timeout = 5
root = os.path.dirname(os.path.abspath(__file__))
# Testing without the dyn_func_file set.
serverstate = tftpy.TftpContexts.TftpContextServer(
raddress, rport, timeout, root
)
self.assertTrue(isinstance(serverstate, tftpy.TftpContexts.TftpContextServer))
rrq = tftpy.TftpPacketTypes.TftpPacketRRQ()
rrq.filename = "640KBFILE"
rrq.mode = "octet"
rrq.options = {}
# Start the download.
serverstate.start(rrq.encode().buffer)
# At a 512 byte blocksize, this should be 1280 packets exactly.
for block in range(1, 1281):
# Should be in expectack state.
self.assertTrue(
isinstance(serverstate.state, tftpy.TftpStates.TftpStateExpectACK)
)
ack = tftpy.TftpPacketTypes.TftpPacketACK()
ack.blocknumber = block % 65536
serverstate.state = serverstate.state.handle(ack, raddress, rport)
# The last DAT packet should be empty, indicating a completed
# transfer.
ack = tftpy.TftpPacketTypes.TftpPacketACK()
ack.blocknumber = 1281 % 65536
finalstate = serverstate.state.handle(ack, raddress, rport)
self.assertTrue(finalstate is None)
def testServerNoOptionsSubdir(self):
raddress = "127.0.0.2"
rport = 10000
timeout = 5
root = os.path.dirname(os.path.abspath(__file__))
# Testing without the dyn_func_file set.
serverstate = tftpy.TftpContexts.TftpContextServer(
raddress, rport, timeout, root
)
self.assertTrue(isinstance(serverstate, tftpy.TftpContexts.TftpContextServer))
rrq = tftpy.TftpPacketTypes.TftpPacketRRQ()
rrq.filename = "640KBFILE"
rrq.mode = "octet"
rrq.options = {}
# Start the download.
serverstate.start(rrq.encode().buffer)
# At a 512 byte blocksize, this should be 1280 packets exactly.
for block in range(1, 1281):
# Should be in expectack state, or None
self.assertTrue(
isinstance(serverstate.state, tftpy.TftpStates.TftpStateExpectACK)
)
ack = tftpy.TftpPacketTypes.TftpPacketACK()
ack.blocknumber = block % 65536
serverstate.state = serverstate.state.handle(ack, raddress, rport)
# The last DAT packet should be empty, indicating a completed
# transfer.
ack = tftpy.TftpPacketTypes.TftpPacketACK()
ack.blocknumber = 1281 % 65536
finalstate = serverstate.state.handle(ack, raddress, rport)
self.assertTrue(finalstate is None)
def testServerInsecurePathAbsolute(self):
raddress = "127.0.0.2"
rport = 10000
timeout = 5
with self.dummyServerDir() as d:
root = os.path.join(os.path.abspath(d), "foo")
serverstate = tftpy.TftpContexts.TftpContextServer(
raddress, rport, timeout, root
)
rrq = tftpy.TftpPacketTypes.TftpPacketRRQ()
rrq.filename = os.path.join(os.path.abspath(d), "other/bar")
rrq.mode = "octet"
rrq.options = {}
# Start the download.
self.assertRaises(
tftpy.TftpException, serverstate.start, rrq.encode().buffer
)
def testServerInsecurePathRelative(self):
raddress = "127.0.0.2"
rport = 10000
timeout = 5
with self.dummyServerDir() as d:
root = os.path.join(os.path.abspath(d), "foo")
serverstate = tftpy.TftpContexts.TftpContextServer(
raddress, rport, timeout, root
)
rrq = tftpy.TftpPacketTypes.TftpPacketRRQ()
rrq.filename = "../other/bar"
rrq.mode = "octet"
rrq.options = {}
# Start the download.
self.assertRaises(
tftpy.TftpException, serverstate.start, rrq.encode().buffer
)
def testServerInsecurePathRootSibling(self):
raddress = "127.0.0.2"
rport = 10000
timeout = 5
with self.dummyServerDir() as d:
root = os.path.join(os.path.abspath(d), "foo")
serverstate = tftpy.TftpContexts.TftpContextServer(
raddress, rport, timeout, root
)
rrq = tftpy.TftpPacketTypes.TftpPacketRRQ()
rrq.filename = root + "-private/bar"
rrq.mode = "octet"
rrq.options = {}
# Start the download.
self.assertRaises(
tftpy.TftpException, serverstate.start, rrq.encode().buffer
)
def testServerSecurePathAbsolute(self):
raddress = "127.0.0.2"
rport = 10000
timeout = 5
with self.dummyServerDir() as d:
root = os.path.join(os.path.abspath(d), "foo")
serverstate = tftpy.TftpContexts.TftpContextServer(
raddress, rport, timeout, root
)
rrq = tftpy.TftpPacketTypes.TftpPacketRRQ()
rrq.filename = os.path.join(root, "bar")
rrq.mode = "octet"
rrq.options = {}
# Start the download.
serverstate.start(rrq.encode().buffer)
# Should be in expectack state.
self.assertTrue(
isinstance(serverstate.state, tftpy.TftpStates.TftpStateExpectACK)
)
def testServerSecurePathRelative(self):
raddress = "127.0.0.2"
rport = 10000
timeout = 5
with self.dummyServerDir() as d:
root = os.path.join(os.path.abspath(d), "foo")
serverstate = tftpy.TftpContexts.TftpContextServer(
raddress, rport, timeout, root
)
rrq = tftpy.TftpPacketTypes.TftpPacketRRQ()
rrq.filename = "bar"
rrq.mode = "octet"
rrq.options = {}
# Start the download.
serverstate.start(rrq.encode().buffer)
# Should be in expectack state.
self.assertTrue(
isinstance(serverstate.state, tftpy.TftpStates.TftpStateExpectACK)
)
def testServerDownloadWithStopNow(self, output="/tmp/out"):
log.debug("===> Running testcase testServerDownloadWithStopNow")
root = os.path.dirname(os.path.abspath(__file__))
server = tftpy.TftpServer(root)
client = tftpy.TftpClient("localhost", 20001, {})
# Fork a server and run the client in this process.
child_pid = os.fork()
if child_pid:
try:
# parent - let the server start
stopped_early = False
time.sleep(1)
def delay_hook(pkt):
time.sleep(0.005) # 5ms
client.download("640KBFILE", output, delay_hook)
except:
log.warning("client threw exception as expected")
stopped_early = True
finally:
os.kill(child_pid, 15)
os.waitpid(child_pid, 0)
self.assertTrue(stopped_early == True, "Server should not exit early")
else:
import signal
def handlealarm(signum, frame):
server.stop(now=True)
signal.signal(signal.SIGALRM, handlealarm)
signal.alarm(2)
try:
server.listen("localhost", 20001)
log.error("server didn't throw exception")
except Exception as err:
log.error("server got unexpected exception %s" % err)
# Wait until parent kills us
while True:
time.sleep(1)
def testServerDownloadWithStopNotNow(self, output="/tmp/out"):
log.debug("===> Running testcase testServerDownloadWithStopNotNow")
root = os.path.dirname(os.path.abspath(__file__))
server = tftpy.TftpServer(root)
client = tftpy.TftpClient("localhost", 20001, {})
# Fork a server and run the client in this process.
child_pid = os.fork()
if child_pid:
try:
stopped_early = True
# parent - let the server start
time.sleep(1)
def delay_hook(pkt):
time.sleep(0.005) # 5ms
client.download("640KBFILE", output, delay_hook)
stopped_early = False
except:
log.warning("client threw exception as expected")
finally:
os.kill(child_pid, 15)
os.waitpid(child_pid, 0)
self.assertTrue(stopped_early == False, "Server should not exit early")
else:
import signal
def handlealarm(signum, frame):
server.stop(now=False)
signal.signal(signal.SIGALRM, handlealarm)
signal.alarm(2)
try:
server.listen("localhost", 20001)
except Exception as err:
log.error("server threw exception %s" % err)
# Wait until parent kills us
while True:
time.sleep(1)
def testServerDownloadWithDynamicPort(self, output="/tmp/out"):
log.debug("===> Running testcase testServerDownloadWithDynamicPort")
root = os.path.dirname(os.path.abspath(__file__))
server = tftpy.TftpServer(root)
server_thread = threading.Thread(
target=server.listen, kwargs={"listenip": "localhost", "listenport": 0}
)
server_thread.start()
try:
server.is_running.wait()
client = tftpy.TftpClient("localhost", server.listenport, {})
time.sleep(1)
client.download("640KBFILE", output)
finally:
server.stop(now=False)
server_thread.join()
if __name__ == "__main__":
unittest.main()
| |
# Copyright 2018 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Utility functions for google.protobuf.field_mask_pb2.FieldMask.
Supports advanced field mask semantics:
- Refer to fields and map keys using . literals:
- Supported map key types: string, integer types, bool.
- Floating point (including double and float), enum, and bytes keys are not
supported by protobuf or this implementation.
- Fields: 'publisher.name' means field name of field publisher
- string map keys: 'metadata.year' means string key 'year' of map field
metadata
- integer map keys (e.g. int32): 'year_ratings.0' means integer key 0 of a map
field year_ratings
- bool map keys: 'access_text.true' means boolean key true of a map field
access_text
- String map keys that cannot be represented as an unquoted string literal,
must be quoted using backticks: metadata.`year.published`, metadata.`17`,
metadata.``. Backtick can be escaped with ``: a.`b``c` means map key "b`c"
of map field a.
- Refer to all map keys using a * literal: "topics.*.archived" means field
"archived" of all map values of map field "topic".
- Refer to all elements of a repeated field using a * literal: authors.*.name
- Refer to all fields of a message using * literal: publisher.*.
- Prohibit addressing a single element in repeated fields: authors.0.name
FieldMask.paths string grammar:
path = segment {'.' segment}
segment = literal | '*' | quoted_string;
literal = string | integer | bool
string = (letter | '_') {letter | '_' | digit}
integer = ['-'] digit {digit};
bool = 'true' | 'false';
quoted_string = '`' { utf8-no-backtick | '``' } '`'
TODO(nodir): replace spec above with a link to a spec when it is available.
"""
from google import protobuf
from google.protobuf import descriptor
__all__ = [
'EXCLUDE',
'INCLUDE_ENTIRELY',
'INCLUDE_PARTIALLY',
'Mask',
'STAR',
]
# Used in a parsed path to represent a star segment.
# See Mask docstring.
STAR = object()
EXCLUDE = 0
INCLUDE_PARTIALLY = 1
INCLUDE_ENTIRELY = 2
class Mask(object):
"""A tree representation of a field mask. Serves as a tree node too.
Each node represents a segment of a paths string, e.g. 'bar' in 'foo.bar.qux'.
A Field mask with paths ['a', 'b.c'] is parsed as
<root>
a
b
c
Attrs:
desc: a descriptor of the message of the field this node represents.
If the field type is not a message, then desc is None and the node
must be a leaf.
repeated: True means that the segment represents a repeated field, and not
one of the elements. Children of the node are the field elements.
children: a dict that maps a segment to its node, e.g. children of the root
in the example above has keys 'a' and 'b', and values are Mask objects. A
segment can be of type str, int, bool or it can be the value of
field_masks.STAR for '*' segments.
"""
def __init__(self, desc=None, repeated=False, children=None):
"""Initializes the mask.
The arguments initialize attributes of the same names, see Mask docstring.
"""
self.desc = desc
self.repeated = repeated
self.children = children or {}
def trim(self, message):
"""Clears message fields that are not in the mask.
The message must be a google.protobuf.message.Message.
Uses self.includes to decide what to trim, see its docstring.
If self is a leaf, this is a noop.
"""
for f, v in message.ListFields():
incl = self._includes((f.name,))
if incl == INCLUDE_ENTIRELY:
continue
if incl == EXCLUDE:
message.ClearField(f.name)
continue
assert incl == INCLUDE_PARTIALLY
# Child for this field must exist because INCLUDE_PARTIALLY.
child = self.children[f.name]
if not f.message_type:
# The field is scalar, but the field mask does not specify to
# include it entirely. Skip it because scalars do not have
# subfields. Note that from_field_mask would fail on such a mask
# because a scalar field cannot be followed by other fields.
message.ClearField(f.name)
continue
# Trim the field value.
if f.message_type.GetOptions().map_entry:
for mk, mv in v.items():
incl = self._includes((f.name, mk))
if incl == INCLUDE_ENTIRELY:
pass
elif incl == EXCLUDE:
v.pop(mk)
elif isinstance(mv, protobuf.message.Message):
assert incl == INCLUDE_PARTIALLY
# Child for mk must exist because INCLUDE_PARTIALLY.
child.children[mk].trim(mv)
else:
# The field is scalar, see the comment above.
v.pop(mk)
elif f.label == descriptor.FieldDescriptor.LABEL_REPEATED:
star_child = child.children[STAR]
for rv in v:
star_child.trim(rv)
else:
child.trim(v)
def includes(self, path):
"""Tells if a field value at the given path must be included.
Args:
path: a path string. Must use canonical field names, i.e. not json names.
Returns:
EXCLUDE if the field value must be excluded.
INCLUDE_PARTIALLY if some subfields of the field value must be included.
INCLUDE_ENTIRELY if the field value must be included entirely.
Raises:
ValueError: path is a string and it is invalid according to
self.desc and self.repeated.
"""
assert path
return self._includes(
_parse_path(path, self.desc, repeated=self.repeated)[0]
)
def _includes(self, path, start_at=0):
"""Implements includes()."""
if not self.children:
return INCLUDE_ENTIRELY
if start_at == len(path):
# This node is intermediate and we've exhausted the path.
# Some of the value's subfields are included, so includes this value
# partially.
return INCLUDE_PARTIALLY
# Find children that match current segment.
seg = path[start_at]
children = [self.children.get(seg)]
if seg != STAR:
# self might have a star child
# e.g. self is {'a': {'b': {}}, STAR: {'c': {}}}
# If seg is 'x', we should check the star child.
children.append(self.children.get(STAR))
children = [c for c in children if c is not None]
if not children:
# Nothing matched.
return EXCLUDE
return max(c._includes(path, start_at + 1) for c in children)
def merge(self, src, dest):
"""Merges masked fields from src to dest.
Merges even empty/unset fields, as long as they are present in the mask.
Overwrites repeated/map fields entirely. Does not support partial updates of
such fields.
"""
assert isinstance(src, protobuf.message.Message)
assert type(src) == type(dest) # pylint: disable=unidiomatic-typecheck
for f_name, submask in self.children.items():
include_partially = bool(submask.children)
dest_value = getattr(dest, f_name)
src_value = getattr(src, f_name)
f_desc = dest.DESCRIPTOR.fields_by_name[f_name]
is_repeated = f_desc.label == descriptor.FieldDescriptor.LABEL_REPEATED
is_message = f_desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE
# Only non-repeated submessages can be merged partially.
if include_partially and is_message and not is_repeated:
submask.merge(src_value, dest_value)
# Otherwise overwrite entirely.
elif is_repeated:
dest.ClearField(f_name)
dest_value = getattr(dest, f_name) # restore after ClearField.
dest_value.extend(src_value)
elif is_message:
dest_value.CopyFrom(src_value)
else:
# Scalar value.
setattr(dest, f_name, src_value)
def submask(self, path):
"""Returns a sub-mask given a path from self to it.
For example, for a mask ["a.b.c"], mask.submask('a.b') will return a mask
["c"].
If self includes the path entirely, returns a Mask that includes everything.
For example, for mask ["a"], mask.submask("a.b") returns a mask without
children.
Args:
path: a path string. Must use canonical field names, i.e. not json names.
Returns:
A Mask or None.
Raises:
ValueError: path is a string and it is invalid according to
self.desc and self.repeated.
"""
assert path
parsed_path, desc, repeated = _parse_path(
path, self.desc, repeated=self.repeated
)
if self._includes(parsed_path) == INCLUDE_ENTIRELY:
# Return a mask that includes everything.
return Mask(desc=desc, repeated=repeated)
return self._submask(parsed_path)
def _submask(self, path, start_at=0):
"""Implements submask()."""
if start_at == len(path):
return self
child = self.children.get(path[start_at])
return child and child._submask(path, start_at + 1)
@classmethod
def from_field_mask(
cls, field_mask, desc, json_names=False, update_mask=False):
"""Parses a field mask to a Mask.
Removes trailing stars, e.g. parses ['a.*'] as ['a'].
Removes redundant paths, e.g. parses ['a', 'a.b'] as ['a'].
Args:
field_mask: a google.protobuf.field_mask_pb2.FieldMask instance.
desc: a google.protobuf.descriptor.Descriptor for the target message.
json_names: True if field_mask uses json field names for field names,
e.g. "fooBar" instead of "foo_bar".
Field names will be parsed in the canonical form.
update_mask: if True, the field_mask is treated as an update mask.
In an update mask, a repeated field is allowed only as the last
field in a paths string.
Raises:
ValueError if a field path is invalid.
"""
parsed_paths = []
for p in field_mask.paths:
try:
parsed_paths.append(_parse_path(p, desc, json_names=json_names)[0])
except ValueError as ex:
raise ValueError('invalid path "%s": %s' % (p, ex))
parsed_paths = _normalize_paths(parsed_paths)
root = cls(desc)
for i, p in enumerate(parsed_paths):
node = root
node_name = ''
for seg in p:
if node.repeated and update_mask:
raise ValueError(
('update mask allows a repeated field only at the last '
'position; field "%s" in "%s" is not last')
% (node_name, field_mask.paths[i]))
if seg not in node.children:
if node.desc.GetOptions().map_entry:
child = cls(node.desc.fields_by_name['value'].message_type)
elif node.repeated:
child = cls(node.desc)
else:
field = node.desc.fields_by_name[seg]
repeated = field.label == descriptor.FieldDescriptor.LABEL_REPEATED
child = cls(field.message_type, repeated=repeated)
node.children[seg] = child
node = node.children[seg]
node_name = seg
return root
def __eq__(self, other):
"""Returns True if other is equivalent to self."""
return (
self.desc == other.desc and
self.repeated == other.repeated and
self.children == other.children)
def __ne__(self, other):
"""Returns False if other is equivalent to self."""
return not (self == other)
def __repr__(self):
"""Returns a string representation of the Mask."""
return 'Mask(%r, %r, %r)' % (self.desc, self.repeated, self.children)
def _normalize_paths(paths):
"""Normalizes field paths. Returns a new set of paths.
paths must be parsed, see _parse_path.
Removes trailing stars, e.g. convertes ('a', STAR) to ('a',).
Removes paths that have a segment prefix already present in paths,
e.g. removes ('a', 'b') from [('a', 'b'), ('a',)].
"""
paths = _remove_trailing_stars(paths)
return {p for p in paths if not any(p[:i] in paths for i in range(len(p)))}
def _remove_trailing_stars(paths):
ret = set()
for p in paths:
assert isinstance(p, tuple), p
if p[-1] == STAR:
p = p[:-1]
ret.add(p)
return ret
# Token types.
_STAR, _PERIOD, _LITERAL, _STRING, _INTEGER, _UNKNOWN, _EOF = range(7)
_INTEGER_FIELD_TYPES = {
descriptor.FieldDescriptor.TYPE_INT64,
descriptor.FieldDescriptor.TYPE_INT32,
descriptor.FieldDescriptor.TYPE_UINT32,
descriptor.FieldDescriptor.TYPE_UINT64,
descriptor.FieldDescriptor.TYPE_FIXED64,
descriptor.FieldDescriptor.TYPE_FIXED32,
descriptor.FieldDescriptor.TYPE_SFIXED64,
descriptor.FieldDescriptor.TYPE_SFIXED32,
}
_SUPPORTED_MAP_KEY_TYPES = _INTEGER_FIELD_TYPES | {
descriptor.FieldDescriptor.TYPE_STRING,
descriptor.FieldDescriptor.TYPE_BOOL,
}
def _parse_path(path, desc, repeated=False, json_names=False):
"""Parses a field path to a tuple of segments.
See grammar in the module docstring.
Args:
path: a field path.
desc: a google.protobuf.descriptor.Descriptor of the target message.
repeated: True means that desc is a repeated field. For example,
the target field is a repeated message field and path starts with an
index.
json_names: True if path uses json field names for field names,
e.g. "fooBar" instead of "foo_bar".
Field names will be parsed in the canonical form.
Returns:
A tuple (segments, desc, repeated), where segments is a a tuple of segments.
A star is returned as STAR object.
Raises:
ValueError if path is invalid.
"""
tokens = list(_tokenize(path))
ctx = _ParseContext(desc, repeated)
peek = lambda: tokens[ctx.i]
def read():
tok = peek()
ctx.i += 1
return tok
def read_path():
segs = []
while True:
seg, must_be_last = read_segment()
segs.append(seg)
tok_type, tok = read()
if tok_type == _EOF:
break
if must_be_last:
raise ValueError('unexpected token "%s"; expected end of string' % tok)
if tok_type != _PERIOD:
raise ValueError('unexpected token "%s"; expected a period' % tok)
return tuple(segs), ctx.desc, ctx.repeated
def read_segment():
"""Returns (segment, must_be_last) tuple."""
tok_type, tok = peek()
assert tok
if tok_type == _PERIOD:
raise ValueError('a segment cannot start with a period')
if tok_type == _EOF:
raise ValueError('unexpected end')
is_map_key = ctx.desc and ctx.desc.GetOptions().map_entry
if ctx.repeated and not is_map_key:
if tok_type != _STAR:
raise ValueError('unexpected token "%s", expected a star' % tok)
read() # Swallow star.
ctx.repeated = False
return STAR, False
if ctx.desc is None:
raise ValueError(
'scalar field "%s" cannot have subfields' % ctx.field_path
)
if is_map_key:
key_type = ctx.desc.fields_by_name['key'].type
if key_type not in _SUPPORTED_MAP_KEY_TYPES:
raise ValueError('unsupported key type of field "%s"' % ctx.field_path)
if tok_type == _STAR:
read() # Swallow star.
seg = STAR
elif key_type == descriptor.FieldDescriptor.TYPE_BOOL:
seg = read_bool()
elif key_type in _INTEGER_FIELD_TYPES:
seg = read_integer()
else:
assert key_type == descriptor.FieldDescriptor.TYPE_STRING
seg = read_string()
ctx.advance_to_field(ctx.desc.fields_by_name['value'])
return seg, False
if tok_type == _STAR:
# Include all fields.
read() # Swallow star.
# A STAR field cannot be followed by subfields.
return STAR, True
if tok_type != _LITERAL:
raise ValueError('unexpected token "%s"; expected a field name' % tok)
read() # Swallow field name.
field = _find_field(ctx.desc, tok, json_names)
if field is None:
raise ValueError(
'field "%s" does not exist in message %s' % (tok, ctx.desc.full_name)
)
ctx.advance_to_field(field)
return field.name, False
def read_bool():
tok_type, tok = read()
if tok_type != _LITERAL or tok not in ('true', 'false'):
raise ValueError('unexpected token "%s", expected true or false' % tok)
return tok == 'true'
def read_integer():
tok_type, tok = read()
if tok_type != _INTEGER:
raise ValueError('unexpected token "%s"; expected an integer' % tok)
return int(tok)
def read_string():
tok_type, tok = read()
if tok_type not in (_LITERAL, _STRING):
raise ValueError('unexpected token "%s"; expected a string' % tok)
return tok
return read_path()
def _find_field(desc, name, json_name):
if not json_name:
return desc.fields_by_name.get(name)
for f in desc.fields:
if f.json_name == name:
return f
return None
class _ParseContext(object):
"""Context of parsing in _parse_path."""
def __init__(self, desc, repeated):
self.i = 0
self.desc = desc
self.repeated = repeated
self._field_path = [] # full path of the current field
def advance_to_field(self, field):
"""Advances the context to the next message field.
Args:
field: a google.protobuf.descriptor.FieldDescriptor to move to.
"""
self.desc = field.message_type
self.repeated = field.label == descriptor.FieldDescriptor.LABEL_REPEATED
self._field_path.append(field.name)
@property
def field_path(self):
return '.'.join(self._field_path)
def _tokenize(path):
"""Transforms path to an iterator of (token_type, string) tuples.
Raises:
ValueError if a quoted string is not closed.
"""
assert isinstance(path, basestring), path
i = 0
while i < len(path):
start = i
c = path[i]
i += 1
if c == '`':
quoted_string = [] # Parsed quoted string as list of string parts.
while True:
next_backtick = path.find('`', i)
if next_backtick == -1:
raise ValueError('a quoted string is not closed')
quoted_string.append(path[i:next_backtick])
i = next_backtick + 1 # Swallow the discovered backtick.
escaped_backtick = i < len(path) and path[i] == '`'
if not escaped_backtick:
break
quoted_string.append('`')
i += 1 # Swallow second backtick.
yield (_STRING, ''.join(quoted_string))
elif c == '*':
yield (_STAR, c)
elif c == '.':
yield (_PERIOD, c)
elif c == '-' or c.isdigit():
while i < len(path) and path[i].isdigit():
i += 1
yield (_INTEGER, path[start:i])
elif c == '_' or c.isalpha():
while i < len(path) and (path[i].isalnum() or path[i] == '_'):
i += 1
yield (_LITERAL, path[start:i])
else:
yield (_UNKNOWN, c)
yield (_EOF, '<eof>')
| |
import numpy as np
import torch
from torch import nn
import os
import os.path as osp
from collections import OrderedDict
from torch.autograd import Variable
import itertools
from ..util import util as util
from .base_model import BaseModel
from . import networks_basic as networks
from scipy.ndimage import zoom
import fractions
import functools
import skimage.transform
#from IPython import embed
class DistModel(BaseModel):
def name(self):
return self.model_name
def initialize(self, model='net-lin', net='alex', model_path=None, colorspace='Lab', use_gpu=True, printNet=False, spatial=False, spatial_shape=None, spatial_order=1, spatial_factor=None, is_train=False, lr=.0001, beta1=0.5):
'''
INPUTS
model - ['net-lin'] for linearly calibrated network
['net'] for off-the-shelf network
['L2'] for L2 distance in Lab colorspace
['SSIM'] for ssim in RGB colorspace
net - ['squeeze','alex','vgg']
model_path - if None, will look in weights/[NET_NAME].pth
colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM
use_gpu - bool - whether or not to use a GPU
printNet - bool - whether or not to print network architecture out
spatial - bool - whether to output an array containing varying distances across spatial dimensions
spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below).
spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images.
spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear).
is_train - bool - [True] for training mode
lr - float - initial learning rate
beta1 - float - initial momentum term for adam
'''
BaseModel.initialize(self, use_gpu=use_gpu)
self.model = model
self.net = net
self.use_gpu = use_gpu
self.is_train = is_train
self.spatial = spatial
self.spatial_shape = spatial_shape
self.spatial_order = spatial_order
self.spatial_factor = spatial_factor
self.model_name = '%s [%s]'%(model,net)
if(self.model == 'net-lin'): # pretrained net + linear layer
self.net = networks.PNetLin(use_gpu=use_gpu,pnet_type=net,use_dropout=True,spatial=spatial)
kw = {}
if not use_gpu:
kw['map_location'] = 'cpu'
if(model_path is None):
base_dir = osp.dirname(osp.dirname(osp.abspath(__file__)))
model_path = osp.join(base_dir, 'weights/%s.pth' % net)
self.net.load_state_dict(torch.load(model_path, **kw))
elif(self.model=='net'): # pretrained network
assert not self.spatial, 'spatial argument not supported yet for uncalibrated networks'
self.net = networks.PNet(use_gpu=use_gpu,pnet_type=net)
self.is_fake_net = True
elif(self.model in ['L2','l2']):
self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing
self.model_name = 'L2'
elif(self.model in ['DSSIM','dssim','SSIM','ssim']):
self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace)
self.model_name = 'SSIM'
else:
raise ValueError("Model [%s] not recognized." % self.model)
self.parameters = list(self.net.parameters())
if self.is_train: # training mode
# extra network on top to go from distances (d0,d1) => predicted human judgment (h*)
self.rankLoss = networks.BCERankingLoss(use_gpu=use_gpu)
self.parameters+=self.rankLoss.parameters
self.lr = lr
self.old_lr = lr
self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999))
else: # test mode
self.net.eval()
if(printNet):
print('---------- Networks initialized -------------')
networks.print_network(self.net)
print('-----------------------------------------------')
def forward_pair(self,in1,in2,retPerLayer=False):
if(retPerLayer):
return self.net.forward(in1,in2, retPerLayer=True)
else:
return self.net.forward(in1,in2)
def forward(self, in0, in1, retNumpy=True):
''' Function computes the distance between image patches in0 and in1
INPUTS
in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1]
retNumpy - [False] to return as torch.Tensor, [True] to return as numpy array
OUTPUT
computed distances between in0 and in1
'''
self.input_ref = in0
self.input_p0 = in1
if(self.use_gpu):
self.input_ref = self.input_ref.cuda()
self.input_p0 = self.input_p0.cuda()
self.var_ref = Variable(self.input_ref,requires_grad=True)
self.var_p0 = Variable(self.input_p0,requires_grad=True)
self.d0 = self.forward_pair(self.var_ref, self.var_p0)
self.loss_total = self.d0
def convert_output(d0):
if(retNumpy):
ans = d0.cpu().data.numpy()
if not self.spatial:
ans = ans.flatten()
else:
assert(ans.shape[0] == 1 and len(ans.shape) == 4)
return ans[0,...].transpose([1, 2, 0]) # Reshape to usual numpy image format: (height, width, channels)
return ans
else:
return d0
if self.spatial:
L = [convert_output(x) for x in self.d0]
spatial_shape = self.spatial_shape
if spatial_shape is None:
if(self.spatial_factor is None):
spatial_shape = (in0.size()[2],in0.size()[3])
else:
spatial_shape = (max([x.shape[0] for x in L])*self.spatial_factor, max([x.shape[1] for x in L])*self.spatial_factor)
L = [skimage.transform.resize(x, spatial_shape, order=self.spatial_order, mode='edge') for x in L]
L = np.mean(np.concatenate(L, 2) * len(L), 2)
return L
else:
return convert_output(self.d0)
# ***** TRAINING FUNCTIONS *****
def optimize_parameters(self):
self.forward_train()
self.optimizer_net.zero_grad()
self.backward_train()
self.optimizer_net.step()
self.clamp_weights()
def clamp_weights(self):
for module in self.net.modules():
if(hasattr(module, 'weight') and module.kernel_size==(1,1)):
module.weight.data = torch.clamp(module.weight.data,min=0)
def set_input(self, data):
self.input_ref = data['ref']
self.input_p0 = data['p0']
self.input_p1 = data['p1']
self.input_judge = data['judge']
if(self.use_gpu):
self.input_ref = self.input_ref.cuda()
self.input_p0 = self.input_p0.cuda()
self.input_p1 = self.input_p1.cuda()
self.input_judge = self.input_judge.cuda()
self.var_ref = Variable(self.input_ref,requires_grad=True)
self.var_p0 = Variable(self.input_p0,requires_grad=True)
self.var_p1 = Variable(self.input_p1,requires_grad=True)
def forward_train(self): # run forward pass
self.d0 = self.forward_pair(self.var_ref, self.var_p0)
self.d1 = self.forward_pair(self.var_ref, self.var_p1)
self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge)
# var_judge
self.var_judge = Variable(1.*self.input_judge).view(self.d0.size())
self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.)
return self.loss_total
def backward_train(self):
torch.mean(self.loss_total).backward()
def compute_accuracy(self,d0,d1,judge):
''' d0, d1 are Variables, judge is a Tensor '''
d1_lt_d0 = (d1<d0).cpu().data.numpy().flatten()
judge_per = judge.cpu().numpy().flatten()
return d1_lt_d0*judge_per + (1-d1_lt_d0)*(1-judge_per)
def get_current_errors(self):
retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),
('acc_r', self.acc_r)])
for key in retDict.keys():
retDict[key] = np.mean(retDict[key])
return retDict
def get_current_visuals(self):
zoom_factor = 256/self.var_ref.data.size()[2]
ref_img = util.tensor2im(self.var_ref.data)
p0_img = util.tensor2im(self.var_p0.data)
p1_img = util.tensor2im(self.var_p1.data)
ref_img_vis = zoom(ref_img,[zoom_factor, zoom_factor, 1],order=0)
p0_img_vis = zoom(p0_img,[zoom_factor, zoom_factor, 1],order=0)
p1_img_vis = zoom(p1_img,[zoom_factor, zoom_factor, 1],order=0)
return OrderedDict([('ref', ref_img_vis),
('p0', p0_img_vis),
('p1', p1_img_vis)])
def save(self, path, label):
self.save_network(self.net, path, '', label)
self.save_network(self.rankLoss.net, path, 'rank', label)
def update_learning_rate(self,nepoch_decay):
lrd = self.lr / nepoch_decay
lr = self.old_lr - lrd
for param_group in self.optimizer_net.param_groups:
param_group['lr'] = lr
print('update lr [%s] decay: %f -> %f' % (type,self.old_lr, lr))
self.old_lr = lr
def score_2afc_dataset(data_loader,func):
''' Function computes Two Alternative Forced Choice (2AFC) score using
distance function 'func' in dataset 'data_loader'
INPUTS
data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside
func - callable distance function - calling d=func(in0,in1) should take 2
pytorch tensors with shape Nx3xXxY, and return numpy array of length N
OUTPUTS
[0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators
[1] - dictionary with following elements
d0s,d1s - N arrays containing distances between reference patch to perturbed patches
gts - N array in [0,1], preferred patch selected by human evaluators
(closer to "0" for left patch p0, "1" for right patch p1,
"0.6" means 60pct people preferred right patch, 40pct preferred left)
scores - N array in [0,1], corresponding to what percentage function agreed with humans
CONSTS
N - number of test triplets in data_loader
'''
d0s = []
d1s = []
gts = []
# bar = pb.ProgressBar(max_value=data_loader.load_data().__len__())
for (i,data) in enumerate(data_loader.load_data()):
d0s+=func(data['ref'],data['p0']).tolist()
d1s+=func(data['ref'],data['p1']).tolist()
gts+=data['judge'].cpu().numpy().flatten().tolist()
# bar.update(i)
d0s = np.array(d0s)
d1s = np.array(d1s)
gts = np.array(gts)
scores = (d0s<d1s)*(1.-gts) + (d1s<d0s)*gts + (d1s==d0s)*.5
return(np.mean(scores), dict(d0s=d0s,d1s=d1s,gts=gts,scores=scores))
def score_jnd_dataset(data_loader,func):
''' Function computes JND score using distance function 'func' in dataset 'data_loader'
INPUTS
data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside
func - callable distance function - calling d=func(in0,in1) should take 2
pytorch tensors with shape Nx3xXxY, and return numpy array of length N
OUTPUTS
[0] - JND score in [0,1], mAP score (area under precision-recall curve)
[1] - dictionary with following elements
ds - N array containing distances between two patches shown to human evaluator
sames - N array containing fraction of people who thought the two patches were identical
CONSTS
N - number of test triplets in data_loader
'''
ds = []
gts = []
# bar = pb.ProgressBar(max_value=data_loader.load_data().__len__())
for (i,data) in enumerate(data_loader.load_data()):
ds+=func(data['p0'],data['p1']).tolist()
gts+=data['same'].cpu().numpy().flatten().tolist()
# bar.update(i)
sames = np.array(gts)
ds = np.array(ds)
sorted_inds = np.argsort(ds)
ds_sorted = ds[sorted_inds]
sames_sorted = sames[sorted_inds]
TPs = np.cumsum(sames_sorted)
FPs = np.cumsum(1-sames_sorted)
FNs = np.sum(sames_sorted)-TPs
precs = TPs/(TPs+FPs)
recs = TPs/(TPs+FNs)
score = util.voc_ap(recs,precs)
return(score, dict(ds=ds,sames=sames))
| |
from itertools import izip_longest, islice
import re
from letters import is_vowell, to_tipa, to_order_tuple
class WordParseError(Exception):
pass
class Word(object):
def __init__(self, morphemes, syllables, category):
self._morphemes = tuple(morphemes)
self._syllables = tuple(syllables)
self._category = category
def __eq__(self, other):
return (
self._morphemes == other._morphemes and
self._syllables == other._syllables and
self._category == other._category
)
def __ne__(self, other):
return not self == other
def __repr__(self):
return '<Word(%s, %s, %s)>' % (
repr(self._morphemes), repr(syllables), repr(self._category))
def __hash__(self):
return hash((self._morphemes, self._syllables, self._category))
@property
def category(self):
return self._category
def iter_morphemes(self):
for m in self._morphemes:
yield m
def iter_syllables(self):
for s in self._syllables:
yield s
def iter_complete_morphemes(self):
"""
Iterates through "complete" morphemes. That is, morphemes who only contain
whole syllables.
"""
consumed_morpheme_letters = 0
consumed_syllable_letters = 0
syllable_iter = self.iter_syllables()
morpheme_iter = self.iter_morphemes()
pending_morphemes = None
pending_syllables = []
while True:
if consumed_morpheme_letters == consumed_syllable_letters:
# If we have both syllables and morphemes pending, we should yield
# them.
if pending_syllables and pending_morpheme:
yield pending_morpheme, tuple(pending_syllables)
# Either way we should reset the pending variables.
pending_morpheme = next(morpheme_iter)
consumed_morpheme_letters += pending_morpheme.letter_count()
s = next(syllable_iter)
consumed_syllable_letters += s.letter_count()
pending_syllables = [s]
elif consumed_syllable_letters < consumed_morpheme_letters:
# There are more syllables in this morpheme.
s = next(syllable_iter)
consumed_syllable_letters += s.letter_count()
pending_syllables.append(s)
elif consumed_morpheme_letters < consumed_syllable_letters:
# Well, both the current pending morpheme and the next morpheme must be
# invalid. Consume them, but indicate they are invalid by setting the
# pending morpheme to None.
m = next(morpheme_iter)
consumed_morpheme_letters += m.letter_count()
pending_morpheme = None
class Morpheme(object):
def __init__(self, letters, gloss, is_particle=False, is_suffix=False):
self._letters = tuple(letters)
self._gloss = gloss
self._is_particle = is_particle
self._is_suffix = is_suffix
def __eq__(self, other):
return (
self._letters == other._letters and
self._gloss == other._gloss and
self._is_particle == other._is_particle and
self._is_suffix == other._is_suffix
)
def __hash__(self):
return hash((
self._letters,
self._gloss,
self._is_particle,
self._is_suffix,
))
def __ne__(self, other):
return not self == other
def __repr__(self):
return u'<Morpheme(%s, %s)>' % (repr(list(
token for flag, token in [(self._is_suffix, 'suffix'),
(self._is_particle, 'particle')]
if flag
)), repr(self._letters))
def iter_letters(self):
for l in self._letters:
yield l
def letter_count(self):
return len(self._letters)
def text(self):
return ''.join(l.text() for l in self.iter_letters())
@property
def gloss(self):
return self._gloss
class SyllablesMustHaveVowells(WordParseError):
pass
class Syllable(object):
def __init__(self, letters, tone):
self._letters = letters
self._tone = tone
if not self.has_vowell():
raise SyllablesMustHaveVowells(
'Sylable %s with tone %s does not have a vowell' % (
self._letters, self._tone))
def __eq__(self, other):
return (
self._letters == other._letters and
self._tone == other._tone
)
def __ne__(self, other):
return not self == other
def letters(self):
"""
Returns a tuple of the letters in this syllable.
"""
return tuple(self._letters)
@property
def tone(self):
return self._tone
def __hash__(self):
return hash((
self.letters(), self._tone
))
def __repr__(self):
return u'<Syllable(%s, %s)>' % (repr(self._letters), repr(self._tone))
def has_vowell(self):
return any(l.is_vowell() for l in self._letters)
def iter_letters(self):
for l in self._letters:
yield l
def letter_count(self):
return len(self._letters)
def text(self):
return ''.join(l.text() for l in self.iter_letters())
class InvalidLetter(WordParseError):
pass
class Letter(object):
"""
Represents a single IPA letter.
Has the logic built-in to determine if it is or is not a vowell.
This is tracked as a single unicode character, and some bits that mark nasal,
labialized, and long-vowell diacritics.
"""
def __init__(self, text, is_nasal, is_labialized, is_long):
if is_nasal and is_labialized:
raise InvalidLetter('Cannot be both nasal and have a raised w')
self._text = text
if text == 'r':
self._text = 'l'
self._is_nasal = is_nasal
self._is_labialized = is_labialized
self._is_long = is_long
if (is_nasal or is_long) and not self.is_vowell():
raise InvalidLetter('Nasal letters must be vowells (%s)' % repr(self))
if is_labialized and self.is_vowell():
raise InvalidLetter('Letters with a w must be consonants (%s)' %
repr(self))
@property
def is_nasal(self):
return self._is_nasal
def is_vowell(self):
try:
return is_vowell(self._text)
except IndexError as e:
raise InvalidLetter(
"Unknown letter when determining if something was a vowell: '%s'" %
e.args[0]
)
def to_tipa(self):
return '%s%s%s' % (
self._nasal_prefix(), to_tipa(self._text), self._suffix())
def __eq__(self, other):
if type(self) != type(other):
return NotImplemented
return (
self._text == other._text and
self._is_labialized == other._is_labialized and
self._is_nasal == other._is_nasal and
self._is_long == other._is_long
)
def _to_tuple(self):
return (
to_order_tuple(self._text),
self._is_nasal,
self._is_labialized,
self._is_long
)
def __lt__(self, other):
return self._to_tuple() < other._to_tuple()
def __le__(self, other):
return self == other or self < other
def __gt__(self, other):
return not self <= other
def __ge__(self, other):
return not self < other
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((
self._text, self._is_labialized, self._is_nasal, self._is_long
))
def _nasal_prefix(self):
if self._is_nasal:
return '\\~'
return ''
def _suffix(self):
if self._is_long:
return ':'
if self._is_labialized:
return '^{w}'
return ''
def text(self):
return u'%s%s%s' % (self._nasal_prefix(),
self._text,
self._suffix())
def __repr__(self):
return u'<Letter %s>' % repr(self.text())
_NASAL = '\~'
_LONG = ':'
_LABIALIZED = '^{w}'
_NASAL_SUFFIX_1 = '^~'
_NASAL_SUFFIX_2 = '^{~}'
_NASAL_SUFFIX_3 = '~'
_DIGRAPHS = ['kp', 'gb']
def make_letter(text):
is_labialized = False
is_nasal = False
is_long = False
if text.startswith(_NASAL):
text = text[len(_NASAL):]
is_nasal = True
if text.endswith(_LABIALIZED):
text = text[:-len(_LABIALIZED)]
is_labialized = True
if text.endswith(_LONG):
text = text[:-len(_LONG)]
is_long = True
if text.endswith(_NASAL_SUFFIX_1):
text = text[:-len(_NASAL_SUFFIX_1)]
is_nasal = True
if text.endswith(_NASAL_SUFFIX_2):
text = text[:-len(_NASAL_SUFFIX_2)]
is_nasal = True
if text.endswith(_NASAL_SUFFIX_3):
text = text[:-len(_NASAL_SUFFIX_3)]
is_nasal = True
return Letter(text, is_nasal, is_labialized, is_long)
def _clean_text(text):
processed_text = text
for removed_letter in u'() \xa0':
processed_text = processed_text.replace(removed_letter, '')
return processed_text
def make_letters(text):
result = []
processed_text = _clean_text(text)
while processed_text:
split = 1
if processed_text.startswith(_NASAL):
split = len(_NASAL)+1
elif any(processed_text.startswith(x) for x in _DIGRAPHS):
split = 2
letter = processed_text[:split]
rest = processed_text[split:]
for suffix in [
_NASAL_SUFFIX_1,
_NASAL_SUFFIX_2,
_NASAL_SUFFIX_3,
_LABIALIZED,
_LONG
]:
if rest.startswith(suffix):
letter += rest[:len(suffix)]
rest = rest[len(suffix):]
result.append(make_letter(letter))
processed_text = rest
return result
def make_syllable(text, tone):
return Syllable(make_letters(text), unicode(tone))
def _syllable_letter_grouper(letters):
"""
Takes a list of letters, and generates sub-lists of letters that are
valid syllables.
The algorithm is:
1) If there is only 1 vowell left in the remaining letters, return all
remaining letters.
2) Otherwise return all remaining letters until the first vowell.
:param letters: A list of Letter instances.
:yields: Lists of letters, 1 per syllable.
"""
vowell_count = sum(1 for l in letters if l.is_vowell())
letter_iterator = iter(letters)
while vowell_count > 1:
next_chunk = []
while True:
c = next(letter_iterator)
next_chunk.append(c)
if c.is_vowell():
break
vowell_count -= 1
yield next_chunk
yield list(x for x in letter_iterator)
class ToneTextSyllableMismatch(WordParseError):
pass
def _raise(ex):
raise ex
def _count_syllables(text):
x = list(_syllable_letter_grouper(make_letters(text)))
if len(x) != 1:
return len(x)
if any(l.is_vowell() for l in x[0]):
return 1
return 0
def make_syllables(text, tones):
letters = make_letters(text)
return list(
Syllable(letters, unicode(tone))
if letters is not None and tone is not None
else _raise(ToneTextSyllableMismatch(
'Unequal number of syllables in text(%s) and tone(%s).' % (repr(text),
repr(tones))
))
for letters, tone in izip_longest(
_syllable_letter_grouper(letters),
tones.split('.')
)
)
def make_morpheme(text, gloss, is_particle=False, is_suffix=False):
return Morpheme(
make_letters(text),
gloss,
is_particle=is_particle,
is_suffix=is_suffix
)
class MorphemeMismatch(WordParseError):
pass
def make_morphemes(texts, glosses):
results = []
have_root = False
for text, gloss in izip_longest(
texts.split('-'), glosses.split('-')):
if text is None or gloss is None:
raise MorphemeMismatch(
'Could not divide into morphemes text(%s), gloss(%s)' % (
texts, glosses)
)
part = (gloss=="PART")
suffix = have_root and not part
morpheme_gloss = gloss
if part:
morpheme_gloss = None
results.append(make_morpheme(
text, morpheme_gloss, is_particle=part, is_suffix=suffix
))
have_root = have_root or not part
return results
class BadIPATone(WordParseError):
pass
def make_word(ipa, gloss, category):
if not re.match(r'.*\^\{[0-9.-]+\}$', ipa):
raise BadIPATone("Could not extract tone from ipa(%s)" % repr(ipa))
breakpoint = ipa.rfind('^')
text = ipa[:breakpoint]
tone = ipa[breakpoint+2:-1].replace('-', '.')
return Word(
make_morphemes(text, gloss),
make_syllables(text.replace('-', ''), tone),
category
)
| |
from django.core import validators
from django.core.exceptions import PermissionDenied
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.conf import settings
from django.utils.translation import ugettext, ungettext
from django.utils.encoding import smart_unicode, force_unicode
from django.utils.maxlength import LegacyMaxlength
FORM_FIELD_ID_PREFIX = 'id_'
class EmptyValue(Exception):
"This is raised when empty data is provided"
pass
class Manipulator(object):
# List of permission strings. User must have at least one to manipulate.
# None means everybody has permission.
required_permission = ''
def __init__(self):
# List of FormField objects
self.fields = []
def __getitem__(self, field_name):
"Looks up field by field name; raises KeyError on failure"
for field in self.fields:
if field.field_name == field_name:
return field
raise KeyError, "Field %s not found\n%s" % (field_name, repr(self.fields))
def __delitem__(self, field_name):
"Deletes the field with the given field name; raises KeyError on failure"
for i, field in enumerate(self.fields):
if field.field_name == field_name:
del self.fields[i]
return
raise KeyError, "Field %s not found" % field_name
def check_permissions(self, user):
"""Confirms user has required permissions to use this manipulator; raises
PermissionDenied on failure."""
if self.required_permission is None:
return
if user.has_perm(self.required_permission):
return
raise PermissionDenied
def prepare(self, new_data):
"""
Makes any necessary preparations to new_data, in place, before data has
been validated.
"""
for field in self.fields:
field.prepare(new_data)
def get_validation_errors(self, new_data):
"Returns dictionary mapping field_names to error-message lists"
errors = {}
self.prepare(new_data)
for field in self.fields:
errors.update(field.get_validation_errors(new_data))
val_name = 'validate_%s' % field.field_name
if hasattr(self, val_name):
val = getattr(self, val_name)
try:
field.run_validator(new_data, val)
except (validators.ValidationError, validators.CriticalValidationError), e:
errors.setdefault(field.field_name, []).extend(e.messages)
# if field.is_required and not new_data.get(field.field_name, False):
# errors.setdefault(field.field_name, []).append(ugettext_lazy('This field is required.'))
# continue
# try:
# validator_list = field.validator_list
# if hasattr(self, 'validate_%s' % field.field_name):
# validator_list.append(getattr(self, 'validate_%s' % field.field_name))
# for validator in validator_list:
# if field.is_required or new_data.get(field.field_name, False) or hasattr(validator, 'always_test'):
# try:
# if hasattr(field, 'requires_data_list'):
# validator(new_data.getlist(field.field_name), new_data)
# else:
# validator(new_data.get(field.field_name, ''), new_data)
# except validators.ValidationError, e:
# errors.setdefault(field.field_name, []).extend(e.messages)
# # If a CriticalValidationError is raised, ignore any other ValidationErrors
# # for this particular field
# except validators.CriticalValidationError, e:
# errors.setdefault(field.field_name, []).extend(e.messages)
return errors
def save(self, new_data):
"Saves the changes and returns the new object"
# changes is a dictionary-like object keyed by field_name
raise NotImplementedError
def do_html2python(self, new_data):
"""
Convert the data from HTML data types to Python datatypes, changing the
object in place. This happens after validation but before storage. This
must happen after validation because html2python functions aren't
expected to deal with invalid input.
"""
for field in self.fields:
field.convert_post_data(new_data)
class FormWrapper(object):
"""
A wrapper linking a Manipulator to the template system.
This allows dictionary-style lookups of formfields. It also handles feeding
prepopulated data and validation error messages to the formfield objects.
"""
def __init__(self, manipulator, data=None, error_dict=None, edit_inline=True):
self.manipulator = manipulator
if data is None:
data = {}
if error_dict is None:
error_dict = {}
self.data = data
self.error_dict = error_dict
self._inline_collections = None
self.edit_inline = edit_inline
def __repr__(self):
return repr(self.__dict__)
def __getitem__(self, key):
for field in self.manipulator.fields:
if field.field_name == key:
data = field.extract_data(self.data)
return FormFieldWrapper(field, data, self.error_dict.get(field.field_name, []))
if self.edit_inline:
self.fill_inline_collections()
for inline_collection in self._inline_collections:
# The 'orig_name' comparison is for backwards compatibility
# with hand-crafted forms.
if inline_collection.name == key or (':' not in key and inline_collection.orig_name == key):
return inline_collection
raise KeyError, "Could not find Formfield or InlineObjectCollection named %r" % key
def fill_inline_collections(self):
if not self._inline_collections:
ic = []
related_objects = self.manipulator.get_related_objects()
for rel_obj in related_objects:
data = rel_obj.extract_data(self.data)
inline_collection = InlineObjectCollection(self.manipulator, rel_obj, data, self.error_dict)
ic.append(inline_collection)
self._inline_collections = ic
def has_errors(self):
return self.error_dict != {}
def _get_fields(self):
try:
return self._fields
except AttributeError:
self._fields = [self.__getitem__(field.field_name) for field in self.manipulator.fields]
return self._fields
fields = property(_get_fields)
class FormFieldWrapper(object):
"A bridge between the template system and an individual form field. Used by FormWrapper."
def __init__(self, formfield, data, error_list):
self.formfield, self.data, self.error_list = formfield, data, error_list
self.field_name = self.formfield.field_name # for convenience in templates
def __str__(self):
"Renders the field"
return unicode(self).encode('utf-8')
def __unicode__(self):
"Renders the field"
return force_unicode(self.formfield.render(self.data))
def __repr__(self):
return '<FormFieldWrapper for "%s">' % self.formfield.field_name
def field_list(self):
"""
Like __str__(), but returns a list. Use this when the field's render()
method returns a list.
"""
return self.formfield.render(self.data)
def errors(self):
return self.error_list
def html_error_list(self):
if self.errors():
return mark_safe('<ul class="errorlist"><li>%s</li></ul>' % '</li><li>'.join([escape(e) for e in self.errors()]))
else:
return mark_safe('')
def get_id(self):
return self.formfield.get_id()
class FormFieldCollection(FormFieldWrapper):
"A utility class that gives the template access to a dict of FormFieldWrappers"
def __init__(self, formfield_dict):
self.formfield_dict = formfield_dict
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return unicode(self.formfield_dict)
def __getitem__(self, template_key):
"Look up field by template key; raise KeyError on failure"
return self.formfield_dict[template_key]
def __repr__(self):
return "<FormFieldCollection: %s>" % self.formfield_dict
def errors(self):
"Returns list of all errors in this collection's formfields"
errors = []
for field in self.formfield_dict.values():
if hasattr(field, 'errors'):
errors.extend(field.errors())
return errors
def has_errors(self):
return bool(len(self.errors()))
def html_combined_error_list(self):
return mark_safe(''.join([field.html_error_list() for field in self.formfield_dict.values() if hasattr(field, 'errors')]))
class InlineObjectCollection(object):
"An object that acts like a sparse list of form field collections."
def __init__(self, parent_manipulator, rel_obj, data, errors):
self.parent_manipulator = parent_manipulator
self.rel_obj = rel_obj
self.data = data
self.errors = errors
self._collections = None
self.name = rel_obj.name
# This is the name used prior to fixing #1839. Needs for backwards
# compatibility.
self.orig_name = rel_obj.opts.module_name
def __len__(self):
self.fill()
return self._collections.__len__()
def __getitem__(self, k):
self.fill()
return self._collections.__getitem__(k)
def __setitem__(self, k, v):
self.fill()
return self._collections.__setitem__(k,v)
def __delitem__(self, k):
self.fill()
return self._collections.__delitem__(k)
def __iter__(self):
self.fill()
return iter(self._collections.values())
def items(self):
self.fill()
return self._collections.items()
def fill(self):
if self._collections:
return
else:
var_name = self.rel_obj.opts.object_name.lower()
collections = {}
orig = None
if hasattr(self.parent_manipulator, 'original_object'):
orig = self.parent_manipulator.original_object
orig_list = self.rel_obj.get_list(orig)
for i, instance in enumerate(orig_list):
collection = {'original': instance}
for f in self.rel_obj.editable_fields():
for field_name in f.get_manipulator_field_names(''):
full_field_name = '%s.%d.%s' % (var_name, i, field_name)
field = self.parent_manipulator[full_field_name]
data = field.extract_data(self.data)
errors = self.errors.get(full_field_name, [])
collection[field_name] = FormFieldWrapper(field, data, errors)
collections[i] = FormFieldCollection(collection)
self._collections = collections
class FormField(object):
"""Abstract class representing a form field.
Classes that extend FormField should define the following attributes:
field_name
The field's name for use by programs.
validator_list
A list of validation tests (callback functions) that the data for
this field must pass in order to be added or changed.
is_required
A Boolean. Is it a required field?
Subclasses should also implement a render(data) method, which is responsible
for rending the form field in XHTML.
"""
# Provide backwards compatibility for the maxlength attribute and
# argument for this class and all subclasses.
__metaclass__ = LegacyMaxlength
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.render(u'')
def __repr__(self):
return 'FormField "%s"' % self.field_name
def prepare(self, new_data):
"Hook for doing something to new_data (in place) before validation."
pass
def html2python(data):
"Hook for converting an HTML datatype (e.g. 'on' for checkboxes) to a Python type"
return data
html2python = staticmethod(html2python)
def render(self, data):
raise NotImplementedError
def get_member_name(self):
if hasattr(self, 'member_name'):
return self.member_name
else:
return self.field_name
def extract_data(self, data_dict):
if hasattr(self, 'requires_data_list') and hasattr(data_dict, 'getlist'):
data = data_dict.getlist(self.get_member_name())
else:
data = data_dict.get(self.get_member_name(), None)
if data is None:
data = ''
return data
def convert_post_data(self, new_data):
name = self.get_member_name()
if self.field_name in new_data:
d = new_data.getlist(self.field_name)
try:
converted_data = [self.__class__.html2python(data) for data in d]
except ValueError:
converted_data = d
new_data.setlist(name, converted_data)
else:
try:
#individual fields deal with None values themselves
new_data.setlist(name, [self.__class__.html2python(None)])
except EmptyValue:
new_data.setlist(name, [])
def run_validator(self, new_data, validator):
if self.is_required or new_data.get(self.field_name, False) or hasattr(validator, 'always_test'):
if hasattr(self, 'requires_data_list'):
validator(new_data.getlist(self.field_name), new_data)
else:
validator(new_data.get(self.field_name, ''), new_data)
def get_validation_errors(self, new_data):
errors = {}
if self.is_required and not new_data.get(self.field_name, False):
errors.setdefault(self.field_name, []).append(ugettext('This field is required.'))
return errors
try:
for validator in self.validator_list:
try:
self.run_validator(new_data, validator)
except validators.ValidationError, e:
errors.setdefault(self.field_name, []).extend(e.messages)
# If a CriticalValidationError is raised, ignore any other ValidationErrors
# for this particular field
except validators.CriticalValidationError, e:
errors.setdefault(self.field_name, []).extend(e.messages)
return errors
def get_id(self):
"Returns the HTML 'id' attribute for this form field."
return FORM_FIELD_ID_PREFIX + self.field_name
####################
# GENERIC WIDGETS #
####################
class TextField(FormField):
input_type = "text"
def __init__(self, field_name, length=30, max_length=None, is_required=False, validator_list=None, member_name=None):
if validator_list is None: validator_list = []
self.field_name = field_name
self.length, self.max_length = length, max_length
self.is_required = is_required
self.validator_list = [self.isValidLength, self.hasNoNewlines] + validator_list
if member_name != None:
self.member_name = member_name
def isValidLength(self, data, form):
if data and self.max_length and len(smart_unicode(data)) > self.max_length:
raise validators.ValidationError, ungettext("Ensure your text is less than %s character.",
"Ensure your text is less than %s characters.", self.max_length) % self.max_length
def hasNoNewlines(self, data, form):
if data and '\n' in data:
raise validators.ValidationError, ugettext("Line breaks are not allowed here.")
def render(self, data):
if data is None:
data = u''
max_length = u''
if self.max_length:
max_length = u'maxlength="%s" ' % self.max_length
return mark_safe(u'<input type="%s" id="%s" class="v%s%s" name="%s" size="%s" value="%s" %s/>' % \
(self.input_type, self.get_id(), self.__class__.__name__, self.is_required and u' required' or '',
self.field_name, self.length, escape(data), max_length))
def html2python(data):
return data
html2python = staticmethod(html2python)
class PasswordField(TextField):
input_type = "password"
class LargeTextField(TextField):
def __init__(self, field_name, rows=10, cols=40, is_required=False, validator_list=None, max_length=None):
if validator_list is None: validator_list = []
self.field_name = field_name
self.rows, self.cols, self.is_required = rows, cols, is_required
self.validator_list = validator_list[:]
if max_length:
self.validator_list.append(self.isValidLength)
self.max_length = max_length
def render(self, data):
if data is None:
data = ''
return mark_safe(u'<textarea id="%s" class="v%s%s" name="%s" rows="%s" cols="%s">%s</textarea>' % \
(self.get_id(), self.__class__.__name__, self.is_required and u' required' or u'',
self.field_name, self.rows, self.cols, escape(data)))
class HiddenField(FormField):
def __init__(self, field_name, is_required=False, validator_list=None, max_length=None):
if validator_list is None: validator_list = []
self.field_name, self.is_required = field_name, is_required
self.validator_list = validator_list[:]
def render(self, data):
return mark_safe(u'<input type="hidden" id="%s" name="%s" value="%s" />' % \
(self.get_id(), self.field_name, escape(data)))
class CheckboxField(FormField):
def __init__(self, field_name, checked_by_default=False, validator_list=None, is_required=False):
if validator_list is None: validator_list = []
self.field_name = field_name
self.checked_by_default = checked_by_default
self.is_required = is_required
self.validator_list = validator_list[:]
def render(self, data):
checked_html = ''
if data or (data is '' and self.checked_by_default):
checked_html = ' checked="checked"'
return mark_safe(u'<input type="checkbox" id="%s" class="v%s" name="%s"%s />' % \
(self.get_id(), self.__class__.__name__,
self.field_name, checked_html))
def html2python(data):
"Convert value from browser ('on' or '') to a Python boolean"
if data == 'on':
return True
return False
html2python = staticmethod(html2python)
class SelectField(FormField):
def __init__(self, field_name, choices=None, size=1, is_required=False, validator_list=None, member_name=None):
if validator_list is None: validator_list = []
if choices is None: choices = []
choices = [(k, smart_unicode(v, strings_only=True)) for k, v in choices]
self.field_name = field_name
# choices is a list of (value, human-readable key) tuples because order matters
self.choices, self.size, self.is_required = choices, size, is_required
self.validator_list = [self.isValidChoice] + validator_list
if member_name != None:
self.member_name = member_name
def render(self, data):
output = [u'<select id="%s" class="v%s%s" name="%s" size="%s">' % \
(self.get_id(), self.__class__.__name__,
self.is_required and u' required' or u'', self.field_name, self.size)]
str_data = smart_unicode(data) # normalize to string
for value, display_name in self.choices:
selected_html = u''
if smart_unicode(value) == str_data:
selected_html = u' selected="selected"'
output.append(u' <option value="%s"%s>%s</option>' % (escape(value), selected_html, force_unicode(escape(display_name))))
output.append(u' </select>')
return mark_safe(u'\n'.join(output))
def isValidChoice(self, data, form):
str_data = smart_unicode(data)
str_choices = [smart_unicode(item[0]) for item in self.choices]
if str_data not in str_choices:
raise validators.ValidationError, ugettext("Select a valid choice; '%(data)s' is not in %(choices)s.") % {'data': str_data, 'choices': str_choices}
class NullSelectField(SelectField):
"This SelectField converts blank fields to None"
def html2python(data):
if not data:
return None
return data
html2python = staticmethod(html2python)
class RadioSelectField(FormField):
def __init__(self, field_name, choices=None, ul_class='', is_required=False, validator_list=None, member_name=None):
if validator_list is None: validator_list = []
if choices is None: choices = []
choices = [(k, smart_unicode(v)) for k, v in choices]
self.field_name = field_name
# choices is a list of (value, human-readable key) tuples because order matters
self.choices, self.is_required = choices, is_required
self.validator_list = [self.isValidChoice] + validator_list
self.ul_class = ul_class
if member_name != None:
self.member_name = member_name
def render(self, data):
"""
Returns a special object, RadioFieldRenderer, that is iterable *and*
has a default unicode() rendered output.
This allows for flexible use in templates. You can just use the default
rendering:
{{ field_name }}
...which will output the radio buttons in an unordered list.
Or, you can manually traverse each radio option for special layout:
{% for option in field_name.field_list %}
{{ option.field }} {{ option.label }}<br />
{% endfor %}
"""
class RadioFieldRenderer:
def __init__(self, datalist, ul_class):
self.datalist, self.ul_class = datalist, ul_class
def __unicode__(self):
"Default unicode() output for this radio field -- a <ul>"
output = [u'<ul%s>' % (self.ul_class and u' class="%s"' % self.ul_class or u'')]
output.extend([u'<li>%s %s</li>' % (d['field'], d['label']) for d in self.datalist])
output.append(u'</ul>')
return mark_safe(u''.join(output))
def __iter__(self):
for d in self.datalist:
yield d
def __len__(self):
return len(self.datalist)
datalist = []
str_data = smart_unicode(data) # normalize to string
for i, (value, display_name) in enumerate(self.choices):
selected_html = ''
if smart_unicode(value) == str_data:
selected_html = u' checked="checked"'
datalist.append({
'value': value,
'name': display_name,
'field': mark_safe(u'<input type="radio" id="%s" name="%s" value="%s"%s/>' % \
(self.get_id() + u'_' + unicode(i), self.field_name, value, selected_html)),
'label': mark_safe(u'<label for="%s">%s</label>' % \
(self.get_id() + u'_' + unicode(i), display_name),
)})
return RadioFieldRenderer(datalist, self.ul_class)
def isValidChoice(self, data, form):
str_data = smart_unicode(data)
str_choices = [smart_unicode(item[0]) for item in self.choices]
if str_data not in str_choices:
raise validators.ValidationError, ugettext("Select a valid choice; '%(data)s' is not in %(choices)s.") % {'data':str_data, 'choices':str_choices}
class NullBooleanField(SelectField):
"This SelectField provides 'Yes', 'No' and 'Unknown', mapping results to True, False or None"
def __init__(self, field_name, is_required=False, validator_list=None):
if validator_list is None: validator_list = []
SelectField.__init__(self, field_name, choices=[('1', ugettext('Unknown')), ('2', ugettext('Yes')), ('3', ugettext('No'))],
is_required=is_required, validator_list=validator_list)
def render(self, data):
if data is None: data = '1'
elif data == True: data = '2'
elif data == False: data = '3'
return SelectField.render(self, data)
def html2python(data):
return {None: None, '1': None, '2': True, '3': False}[data]
html2python = staticmethod(html2python)
class SelectMultipleField(SelectField):
requires_data_list = True
def render(self, data):
output = [u'<select id="%s" class="v%s%s" name="%s" size="%s" multiple="multiple">' % \
(self.get_id(), self.__class__.__name__, self.is_required and u' required' or u'',
self.field_name, self.size)]
str_data_list = map(smart_unicode, data) # normalize to strings
for value, choice in self.choices:
selected_html = u''
if smart_unicode(value) in str_data_list:
selected_html = u' selected="selected"'
output.append(u' <option value="%s"%s>%s</option>' % (escape(value), selected_html, force_unicode(escape(choice))))
output.append(u' </select>')
return mark_safe(u'\n'.join(output))
def isValidChoice(self, field_data, all_data):
# data is something like ['1', '2', '3']
str_choices = [smart_unicode(item[0]) for item in self.choices]
for val in map(smart_unicode, field_data):
if val not in str_choices:
raise validators.ValidationError, ugettext("Select a valid choice; '%(data)s' is not in %(choices)s.") % {'data':val, 'choices':str_choices}
def html2python(data):
if data is None:
raise EmptyValue
return data
html2python = staticmethod(html2python)
class CheckboxSelectMultipleField(SelectMultipleField):
"""
This has an identical interface to SelectMultipleField, except the rendered
widget is different. Instead of a <select multiple>, this widget outputs a
<ul> of <input type="checkbox">es.
Of course, that results in multiple form elements for the same "single"
field, so this class's prepare() method flattens the split data elements
back into the single list that validators, renderers and save() expect.
"""
requires_data_list = True
def __init__(self, field_name, choices=None, ul_class='', validator_list=None):
if validator_list is None: validator_list = []
if choices is None: choices = []
self.ul_class = ul_class
SelectMultipleField.__init__(self, field_name, choices, size=1, is_required=False, validator_list=validator_list)
def prepare(self, new_data):
# new_data has "split" this field into several fields, so flatten it
# back into a single list.
data_list = []
for value, readable_value in self.choices:
if new_data.get('%s%s' % (self.field_name, value), '') == 'on':
data_list.append(value)
new_data.setlist(self.field_name, data_list)
def render(self, data):
output = [u'<ul%s>' % (self.ul_class and u' class="%s"' % self.ul_class or u'')]
str_data_list = map(smart_unicode, data) # normalize to strings
for value, choice in self.choices:
checked_html = u''
if smart_unicode(value) in str_data_list:
checked_html = u' checked="checked"'
field_name = u'%s%s' % (self.field_name, value)
output.append(u'<li><input type="checkbox" id="%s" class="v%s" name="%s"%s value="on" /> <label for="%s">%s</label></li>' % \
(self.get_id() + escape(value), self.__class__.__name__, field_name, checked_html,
self.get_id() + escape(value), choice))
output.append(u'</ul>')
return mark_safe(u'\n'.join(output))
####################
# FILE UPLOADS #
####################
class FileUploadField(FormField):
def __init__(self, field_name, is_required=False, validator_list=None, max_length=None):
if validator_list is None: validator_list = []
self.field_name, self.is_required = field_name, is_required
self.validator_list = [self.isNonEmptyFile] + validator_list
def isNonEmptyFile(self, new_data, all_data):
if hasattr(new_data, 'upload_errors'):
upload_errors = new_data.upload_errors()
if upload_errors:
raise validators.CriticalValidationError, upload_errors
try:
file_size = new_data.file_size
except AttributeError:
file_size = len(new_data['content'])
if not file_size:
raise validators.CriticalValidationError, ugettext("The submitted file is empty.")
def render(self, data):
return mark_safe(u'<input type="file" id="%s" class="v%s" name="%s" />' % \
(self.get_id(), self.__class__.__name__, self.field_name))
def prepare(self, new_data):
if hasattr(new_data, 'upload_errors'):
upload_errors = new_data.upload_errors()
new_data[self.field_name] = { '_file_upload_error': upload_errors }
def html2python(data):
if data is None:
raise EmptyValue
return data
html2python = staticmethod(html2python)
class ImageUploadField(FileUploadField):
"A FileUploadField that raises CriticalValidationError if the uploaded file isn't an image."
def __init__(self, *args, **kwargs):
FileUploadField.__init__(self, *args, **kwargs)
self.validator_list.insert(0, self.isValidImage)
def isValidImage(self, field_data, all_data):
try:
validators.isValidImage(field_data, all_data)
except validators.ValidationError, e:
raise validators.CriticalValidationError, e.messages
####################
# INTEGERS/FLOATS #
####################
class IntegerField(TextField):
def __init__(self, field_name, length=10, max_length=None, is_required=False, validator_list=None, member_name=None):
if validator_list is None: validator_list = []
validator_list = [self.isInteger] + validator_list
if member_name is not None:
self.member_name = member_name
TextField.__init__(self, field_name, length, max_length, is_required, validator_list)
def isInteger(self, field_data, all_data):
try:
validators.isInteger(field_data, all_data)
except validators.ValidationError, e:
raise validators.CriticalValidationError, e.messages
def html2python(data):
if data == '' or data is None:
return None
return int(data)
html2python = staticmethod(html2python)
class SmallIntegerField(IntegerField):
def __init__(self, field_name, length=5, max_length=5, is_required=False, validator_list=None):
if validator_list is None: validator_list = []
validator_list = [self.isSmallInteger] + validator_list
IntegerField.__init__(self, field_name, length, max_length, is_required, validator_list)
def isSmallInteger(self, field_data, all_data):
if not -32768 <= int(field_data) <= 32767:
raise validators.CriticalValidationError, ugettext("Enter a whole number between -32,768 and 32,767.")
class PositiveIntegerField(IntegerField):
def __init__(self, field_name, length=10, max_length=None, is_required=False, validator_list=None):
if validator_list is None: validator_list = []
validator_list = [self.isPositive] + validator_list
IntegerField.__init__(self, field_name, length, max_length, is_required, validator_list)
def isPositive(self, field_data, all_data):
if int(field_data) < 0:
raise validators.CriticalValidationError, ugettext("Enter a positive number.")
class PositiveSmallIntegerField(IntegerField):
def __init__(self, field_name, length=5, max_length=None, is_required=False, validator_list=None):
if validator_list is None: validator_list = []
validator_list = [self.isPositiveSmall] + validator_list
IntegerField.__init__(self, field_name, length, max_length, is_required, validator_list)
def isPositiveSmall(self, field_data, all_data):
if not 0 <= int(field_data) <= 32767:
raise validators.CriticalValidationError, ugettext("Enter a whole number between 0 and 32,767.")
class FloatField(TextField):
def __init__(self, field_name, is_required=False, validator_list=None):
if validator_list is None: validator_list = []
validator_list = [validators.isValidFloat] + validator_list
TextField.__init__(self, field_name, is_required=is_required, validator_list=validator_list)
def html2python(data):
if data == '' or data is None:
return None
return float(data)
html2python = staticmethod(html2python)
class DecimalField(TextField):
def __init__(self, field_name, max_digits, decimal_places, is_required=False, validator_list=None):
if validator_list is None: validator_list = []
self.max_digits, self.decimal_places = max_digits, decimal_places
validator_list = [self.isValidDecimal] + validator_list
# Initialise the TextField, making sure it's large enough to fit the number with a - sign and a decimal point.
super(DecimalField, self).__init__(field_name, max_digits+2, max_digits+2, is_required, validator_list)
def isValidDecimal(self, field_data, all_data):
v = validators.IsValidDecimal(self.max_digits, self.decimal_places)
try:
v(field_data, all_data)
except validators.ValidationError, e:
raise validators.CriticalValidationError, e.messages
def html2python(data):
if data == '' or data is None:
return None
try:
import decimal
except ImportError:
from django.utils import _decimal as decimal
try:
return decimal.Decimal(data)
except decimal.InvalidOperation, e:
raise ValueError, e
html2python = staticmethod(html2python)
####################
# DATES AND TIMES #
####################
class DatetimeField(TextField):
"""A FormField that automatically converts its data to a datetime.datetime object.
The data should be in the format YYYY-MM-DD HH:MM:SS."""
def __init__(self, field_name, length=30, max_length=None, is_required=False, validator_list=None):
if validator_list is None: validator_list = []
self.field_name = field_name
self.length, self.max_length = length, max_length
self.is_required = is_required
self.validator_list = [validators.isValidANSIDatetime] + validator_list
def html2python(data):
"Converts the field into a datetime.datetime object"
import datetime
try:
date, time = data.split()
y, m, d = date.split('-')
timebits = time.split(':')
h, mn = timebits[:2]
if len(timebits) > 2:
s = int(timebits[2])
else:
s = 0
return datetime.datetime(int(y), int(m), int(d), int(h), int(mn), s)
except ValueError:
return None
html2python = staticmethod(html2python)
class DateField(TextField):
"""A FormField that automatically converts its data to a datetime.date object.
The data should be in the format YYYY-MM-DD."""
def __init__(self, field_name, is_required=False, validator_list=None):
if validator_list is None: validator_list = []
validator_list = [self.isValidDate] + validator_list
TextField.__init__(self, field_name, length=10, max_length=10,
is_required=is_required, validator_list=validator_list)
def isValidDate(self, field_data, all_data):
try:
validators.isValidANSIDate(field_data, all_data)
except validators.ValidationError, e:
raise validators.CriticalValidationError, e.messages
def html2python(data):
"Converts the field into a datetime.date object"
import time, datetime
try:
time_tuple = time.strptime(data, '%Y-%m-%d')
return datetime.date(*time_tuple[0:3])
except (ValueError, TypeError):
return None
html2python = staticmethod(html2python)
class TimeField(TextField):
"""A FormField that automatically converts its data to a datetime.time object.
The data should be in the format HH:MM:SS or HH:MM:SS.mmmmmm."""
def __init__(self, field_name, is_required=False, validator_list=None):
if validator_list is None: validator_list = []
validator_list = [self.isValidTime] + validator_list
TextField.__init__(self, field_name, length=8, max_length=8,
is_required=is_required, validator_list=validator_list)
def isValidTime(self, field_data, all_data):
try:
validators.isValidANSITime(field_data, all_data)
except validators.ValidationError, e:
raise validators.CriticalValidationError, e.messages
def html2python(data):
"Converts the field into a datetime.time object"
import time, datetime
try:
part_list = data.split('.')
try:
time_tuple = time.strptime(part_list[0], '%H:%M:%S')
except ValueError: # seconds weren't provided
time_tuple = time.strptime(part_list[0], '%H:%M')
t = datetime.time(*time_tuple[3:6])
if (len(part_list) == 2):
t = t.replace(microsecond=int(part_list[1]))
return t
except (ValueError, TypeError, AttributeError):
return None
html2python = staticmethod(html2python)
####################
# INTERNET-RELATED #
####################
class EmailField(TextField):
"A convenience FormField for validating e-mail addresses"
def __init__(self, field_name, length=50, max_length=75, is_required=False, validator_list=None):
if validator_list is None: validator_list = []
validator_list = [self.isValidEmail] + validator_list
TextField.__init__(self, field_name, length, max_length=max_length,
is_required=is_required, validator_list=validator_list)
def isValidEmail(self, field_data, all_data):
try:
validators.isValidEmail(field_data, all_data)
except validators.ValidationError, e:
raise validators.CriticalValidationError, e.messages
class URLField(TextField):
"A convenience FormField for validating URLs"
def __init__(self, field_name, length=50, max_length=200, is_required=False, validator_list=None):
if validator_list is None: validator_list = []
validator_list = [self.isValidURL] + validator_list
TextField.__init__(self, field_name, length=length, max_length=max_length,
is_required=is_required, validator_list=validator_list)
def isValidURL(self, field_data, all_data):
try:
validators.isValidURL(field_data, all_data)
except validators.ValidationError, e:
raise validators.CriticalValidationError, e.messages
class IPAddressField(TextField):
def __init__(self, field_name, length=15, max_length=15, is_required=False, validator_list=None):
if validator_list is None: validator_list = []
validator_list = [self.isValidIPAddress] + validator_list
TextField.__init__(self, field_name, length=length, max_length=max_length,
is_required=is_required, validator_list=validator_list)
def isValidIPAddress(self, field_data, all_data):
try:
validators.isValidIPAddress4(field_data, all_data)
except validators.ValidationError, e:
raise validators.CriticalValidationError, e.messages
def html2python(data):
return data or None
html2python = staticmethod(html2python)
####################
# MISCELLANEOUS #
####################
class FilePathField(SelectField):
"A SelectField whose choices are the files in a given directory."
def __init__(self, field_name, path, match=None, recursive=False, is_required=False, validator_list=None, max_length=None):
import os
from django.db.models import BLANK_CHOICE_DASH
if match is not None:
import re
match_re = re.compile(match)
choices = not is_required and BLANK_CHOICE_DASH[:] or []
if recursive:
for root, dirs, files in os.walk(path):
for f in files:
if match is None or match_re.search(f):
f = os.path.join(root, f)
choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in os.listdir(path):
full_file = os.path.join(path, f)
if os.path.isfile(full_file) and (match is None or match_re.search(f)):
choices.append((full_file, f))
except OSError:
pass
SelectField.__init__(self, field_name, choices, 1, is_required, validator_list)
class PhoneNumberField(TextField):
"A convenience FormField for validating phone numbers (e.g. '630-555-1234')"
def __init__(self, field_name, is_required=False, validator_list=None):
if validator_list is None: validator_list = []
validator_list = [self.isValidPhone] + validator_list
TextField.__init__(self, field_name, length=12, max_length=12,
is_required=is_required, validator_list=validator_list)
def isValidPhone(self, field_data, all_data):
try:
validators.isValidPhone(field_data, all_data)
except validators.ValidationError, e:
raise validators.CriticalValidationError, e.messages
class USStateField(TextField):
"A convenience FormField for validating U.S. states (e.g. 'IL')"
def __init__(self, field_name, is_required=False, validator_list=None):
if validator_list is None: validator_list = []
validator_list = [self.isValidUSState] + validator_list
TextField.__init__(self, field_name, length=2, max_length=2,
is_required=is_required, validator_list=validator_list)
def isValidUSState(self, field_data, all_data):
try:
validators.isValidUSState(field_data, all_data)
except validators.ValidationError, e:
raise validators.CriticalValidationError, e.messages
def html2python(data):
if data:
return data.upper() # Should always be stored in upper case
return data
html2python = staticmethod(html2python)
class CommaSeparatedIntegerField(TextField):
"A convenience FormField for validating comma-separated integer fields"
def __init__(self, field_name, max_length=None, is_required=False, validator_list=None):
if validator_list is None: validator_list = []
validator_list = [self.isCommaSeparatedIntegerList] + validator_list
TextField.__init__(self, field_name, length=20, max_length=max_length,
is_required=is_required, validator_list=validator_list)
def isCommaSeparatedIntegerList(self, field_data, all_data):
try:
validators.isCommaSeparatedIntegerList(field_data, all_data)
except validators.ValidationError, e:
raise validators.CriticalValidationError, e.messages
def render(self, data):
if data is None:
data = u''
elif isinstance(data, (list, tuple)):
data = u','.join(data)
return super(CommaSeparatedIntegerField, self).render(data)
class RawIdAdminField(CommaSeparatedIntegerField):
def html2python(data):
if data:
return data.split(',')
else:
return []
html2python = staticmethod(html2python)
class XMLLargeTextField(LargeTextField):
"""
A LargeTextField with an XML validator. The schema_path argument is the
full path to a Relax NG compact schema to validate against.
"""
def __init__(self, field_name, schema_path, **kwargs):
self.schema_path = schema_path
kwargs.setdefault('validator_list', []).insert(0, self.isValidXML)
LargeTextField.__init__(self, field_name, **kwargs)
def isValidXML(self, field_data, all_data):
v = validators.RelaxNGCompact(self.schema_path)
try:
v(field_data, all_data)
except validators.ValidationError, e:
raise validators.CriticalValidationError, e.messages
| |
import time
import random
import hashlib
import six
from social.utils import setting_name, module_member
from social.store import OpenIdStore, OpenIdSessionWrapper
from social.pipeline import DEFAULT_AUTH_PIPELINE, DEFAULT_DISCONNECT_PIPELINE
class BaseTemplateStrategy(object):
def __init__(self, strategy):
self.strategy = strategy
def render(self, tpl=None, html=None, context=None):
if not tpl and not html:
raise ValueError('Missing template or html parameters')
context = context or {}
if tpl:
return self.render_template(tpl, context)
else:
return self.render_string(html, context)
def render_template(self, tpl, context):
raise NotImplementedError('Implement in subclass')
def render_string(self, html, context):
raise NotImplementedError('Implement in subclass')
class BaseStrategy(object):
ALLOWED_CHARS = 'abcdefghijklmnopqrstuvwxyz' \
'ABCDEFGHIJKLMNOPQRSTUVWXYZ' \
'0123456789'
def __init__(self, backend=None, storage=None, request=None,
tpl=BaseTemplateStrategy, backends=None, *args, **kwargs):
self.tpl = tpl(self)
self.request = request
self.storage = storage
self.backends = backends
self.backend = backend(strategy=self, *args, **kwargs) \
if backend else None
def setting(self, name, default=None, backend=None):
names = [setting_name(name), name]
backend = backend or getattr(self, 'backend', None)
if backend:
names.insert(0, setting_name(backend.name, name))
for name in names:
try:
return self.get_setting(name)
except (AttributeError, KeyError):
pass
return default
def start(self):
# Clean any partial pipeline info before starting the process
self.clean_partial_pipeline()
if self.backend.uses_redirect():
return self.redirect(self.backend.auth_url())
else:
return self.html(self.backend.auth_html())
def complete(self, *args, **kwargs):
return self.backend.auth_complete(*args, **kwargs)
def continue_pipeline(self, *args, **kwargs):
return self.backend.continue_pipeline(*args, **kwargs)
def disconnect(self, user, association_id=None, *args, **kwargs):
return self.backend.disconnect(
user=user, association_id=association_id,
*args, **kwargs
)
def authenticate(self, *args, **kwargs):
kwargs['strategy'] = self
kwargs['storage'] = self.storage
kwargs['backend'] = self.backend
return self.backend.authenticate(*args, **kwargs)
def create_user(self, *args, **kwargs):
return self.storage.user.create_user(*args, **kwargs)
def get_user(self, *args, **kwargs):
return self.storage.user.get_user(*args, **kwargs)
def session_setdefault(self, name, value):
self.session_set(name, value)
return self.session_get(name)
def openid_session_dict(self, name):
# Many frameworks are switching the session serialization from Pickle
# to JSON to avoid code execution risks. Flask did this from Flask
# 0.10, Django is switching to JSON by default from version 1.6.
#
# Sadly python-openid stores classes instances in the session which
# fails the JSON serialization, the classes are:
#
# openid.yadis.manager.YadisServiceManager
# openid.consumer.discover.OpenIDServiceEndpoint
#
# This method will return a wrapper over the session value used with
# openid (a dict) which will automatically keep a pickled value for the
# mentioned classes.
return OpenIdSessionWrapper(self.session_setdefault(name, {}))
def to_session_value(self, val):
return val
def from_session_value(self, val):
return val
def partial_to_session(self, next, backend, request=None, *args, **kwargs):
user = kwargs.get('user')
social = kwargs.get('social')
clean_kwargs = {
'response': kwargs.get('response') or {},
'details': kwargs.get('details') or {},
'username': kwargs.get('username'),
'uid': kwargs.get('uid'),
'is_new': kwargs.get('is_new') or False,
'new_association': kwargs.get('new_association') or False,
'user': user and user.id or None,
'social': social and {
'provider': social.provider,
'uid': social.uid
} or None
}
# Only allow well-known serializable types
types = (dict, list, tuple, set) + six.integer_types + \
six.string_types + (six.text_type,) + (six.binary_type,)
clean_kwargs.update((name, value) for name, value in kwargs.items()
if isinstance(value, types))
# Clean any MergeDict data type from the values
clean_kwargs.update((name, dict(value))
for name, value in clean_kwargs.items()
if isinstance(value, dict))
return {
'next': next,
'backend': backend.name,
'args': tuple(map(self.to_session_value, args)),
'kwargs': dict((key, self.to_session_value(val))
for key, val in clean_kwargs.items())
}
def partial_from_session(self, session):
kwargs = session['kwargs'].copy()
user = kwargs.get('user')
social = kwargs.get('social')
if isinstance(social, dict):
kwargs['social'] = self.storage.user.get_social_auth(**social)
if user:
kwargs['user'] = self.storage.user.get_user(user)
return (
session['next'],
session['backend'],
list(map(self.from_session_value, session['args'])),
dict((key, self.from_session_value(val))
for key, val in kwargs.items())
)
def clean_partial_pipeline(self, name='partial_pipeline'):
self.session_pop(name)
def openid_store(self):
return OpenIdStore(self)
def get_pipeline(self):
return self.setting('PIPELINE', DEFAULT_AUTH_PIPELINE)
def get_disconnect_pipeline(self):
return self.setting('DISCONNECT_PIPELINE', DEFAULT_DISCONNECT_PIPELINE)
def random_string(self, length=12, chars=ALLOWED_CHARS):
# Implementation borrowed from django 1.4
try:
random.SystemRandom()
except NotImplementedError:
key = self.setting('SECRET_KEY', '')
seed = '{0}{1}{2}'.format(random.getstate(), time.time(), key)
random.seed(hashlib.sha256(seed.encode()).digest())
return ''.join([random.choice(chars) for i in range(length)])
def absolute_uri(self, path=None):
uri = self.build_absolute_uri(path)
if self.setting('ON_HTTPS'):
uri = uri.replace('http://', 'https://')
return uri
def get_language(self):
"""Return current language"""
return ''
def send_email_validation(self, email):
email_validation = self.setting('EMAIL_VALIDATION_FUNCTION')
send_email = module_member(email_validation)
code = self.storage.code.make_code(email)
send_email(self, code)
return code
def validate_email(self, email, code):
verification_code = self.storage.code.get_code(code)
if not verification_code or verification_code.code != code:
return False
else:
verification_code.verify()
return True
def render_html(self, tpl=None, html=None, context=None):
"""Render given template or raw html with given context"""
return self.tpl.render(tpl, html, context)
# Implement the following methods on strategies sub-classes
def redirect(self, url):
"""Return a response redirect to the given URL"""
raise NotImplementedError('Implement in subclass')
def get_setting(self, name):
"""Return value for given setting name"""
raise NotImplementedError('Implement in subclass')
def html(self, content):
"""Return HTTP response with given content"""
raise NotImplementedError('Implement in subclass')
def request_data(self, merge=True):
"""Return current request data (POST or GET)"""
raise NotImplementedError('Implement in subclass')
def request_host(self):
"""Return current host value"""
raise NotImplementedError('Implement in subclass')
def session_get(self, name, default=None):
"""Return session value for given key"""
raise NotImplementedError('Implement in subclass')
def session_set(self, name, value):
"""Set session value for given key"""
raise NotImplementedError('Implement in subclass')
def session_pop(self, name):
"""Pop session value for given key"""
raise NotImplementedError('Implement in subclass')
def build_absolute_uri(self, path=None):
"""Build absolute URI with given (optional) path"""
raise NotImplementedError('Implement in subclass')
| |
'''
Given input of a fixed k, checks whether the cop number of a graph,
G, is less than or equal to k.
'''
import networkx as nx
from copy import deepcopy
def kproduct(graph):
'''
Applies the tensor product to graph k-1 times.
INPUT:
graph: A networkx graph
OUTPUT
tensor: The graph on which the tensor product has been applied k-1 times.
'''
tensor = graph.copy()
for i in range(0, k-1):
tensor = nx.tensor_product(tensor, graph)
return tensor
def isOriginal(vertex):
'''
Goes through all the vertices of the original graph G,
and checks if vertex is one of them.
INPUT
vertex: A hashable object representing a vertex
OUTPUT
original: A boolean value on whether vertex is one of the originals
'''
original = False
#Go through all of G's nodes and check if they are equal to vertex
for node in G.nodes():
if node == vertex:
original = True
break
return original
def extractVertices(vertex):
'''
Given a vertex resulting from a tensor product,
extracts each component of the vertex.
INPUT
vertex: A nest of 2-tuples
OUTPUT
components: A list of of the vertices nested in vertex
'''
#There are k - 1 nests because the tensor product is applied k-1 times
levels = k - 1
#In this case the tensor product was not applied and vertex is not a tuple
if levels == 0:
return [vertex]
components = [vertex[1]]
curNest = vertex[0]
#Go through each nest and take the vertices
for i in range(0, levels):
#If the current nest is not a tuple we have hit the last layer
if isOriginal(curNest):
components.append(curNest)
break
components.append(curNest[1])
curNest = curNest[0]
return components
def Gneighborhood(vertex):
'''
Given a vertex from a tensor product,
returns the neighborhood of each component
vertex in G.
INPUT
vertex: A vertex from a tensor product
OUTPUT
neighborhood: A set of all vertices in the G neighborhood
of the components of vertex
'''
neighborhood = set()
#Check if we are dealing with just one vertex
if isOriginal(vertex):
subvertices = [vertex]
else:
#We want to get every component of vertex. It is a nest of two tuples
subvertices = extractVertices(vertex)
#Go through each vertex and get it's neighbors
for vertex in subvertices:
#The ego graph returns the neighbors of vertex in G
neighbors = nx.ego_graph(G, vertex).nodes()
neighborhood.update(neighbors)
return neighborhood
def NGSet(vertexSet):
'''
Given a set of vertices in P, returns the union of
their G neighborhoods.
INPUT
vertexSet: A set of vertices in P
OUTPUT
setNeighborhood: The G neighborhood of vertexSet
'''
setNeighborhood = set()
#Add on each vertex's neighborhood
for vertex in vertexSet:
setNeighborhood.update(NG[str(vertex)])
return setNeighborhood
def checkChange(fOld, fNew):
'''
Given an old set for f, and a new one,
checks whether they have any values that are different.
INPUT
fOld: f before modification
fNew: f after modification
OUTPUT
dif: Boolean value whether fOld = fNew
'''
dif = False
#Go through each vertex and compare the sets
for vertex in fOld:
#A^B = {} iff A = B
if len(fOld[vertex] ^ fNew[vertex]) != 0:
dif = True
break
return dif
def getCopNumber():
'''
Finds the cop number of G by the algorithm on pg. 122
of The Game of Cops and Robbers on Graphs.
OUTPUT
greater: True is the cop number is greater than k and false otherwise
'''
#Now we want to check update f according to property 2 on page 120
#We want to keep updating until f stops changing or f(u) = {}
changing = True
empty = False
fOld = deepcopy(f)
while changing:
#print 'Starting new f update'
#Check if f has any extra values that don't fit prop 2
for edge in P.edges():
source = str(edge[0])
target = str(edge[1])
#Update f
f[source] = f[source] & NGSet(f[target])
f[target] = f[target] & NGSet(f[source])
#Stop if we have made an empty set
if len(f[source]) == 0 or len(f[target]) == 0:
changing = False
empty = True
break
#If we haven't already found an empty f(u) check if f has changed
if not empty:
#Stop if f hasn't changed
if not checkChange(fOld, f):
changing = False
else:
fOld = deepcopy(f)
#Now check if there are any empty values for f
if not empty:
for vertex in f:
if len(f[vertex]) == 0:
empty = True
#The existence of empty values for f means the cop number <= k
if empty:
return False
else:
return True
Gnodes = set()
P = nx.Graph()
Pnodes = set()
NGP = {}
NG = {}
f = {}
def initGraph():
'''
Makes sure the initial graph is reflexive and creates the tensor
products, neighborhoods, and initializes f.
'''
#Let us use the globals for Gnodes, P, and Pnodes
global Gnodes, P, Pnodes, NGP, NG, f
Gnodes = set(G.nodes())
#Add self loops at each vertex
for node in G.nodes():
G.add_edge(node, node)
#Get the kth tensor product
P = kproduct(G)
Pnodes = set(P.nodes())
#Make sure our globals are reset
NGP = {}
NG = {}
f = {}
#Make a dict where the keys are vertices of P and entries are their G neighbors
for node in Pnodes:
NGP[str(node)] = Gneighborhood(node)
#Now make a dict for the neighborhood of all the vertices of G
for node in Gnodes:
NG[str(node)] = Gneighborhood(node)
#Create f given on page 120 of The Game of Cops and Robbers on Graphs
for node in Pnodes:
f[str(node)] = Gnodes - NGP[str(node)]
#If we are running from the cmd line use arguments
if __name__ == '__main__':
import sys
if len(sys.argv) != 3:
print('The format for the arguments is: graphFileLocation k')
exit()
k = int(sys.argv[2])
G = nx.read_multiline_adjlist(sys.argv[1])
initGraph()
numK = getCopNumber()
if numK:
print('The cop number is >', k)
else:
print('The cop number is <=', k)
G = 0
k = 0
#Otherwise we are using a module
def copk(graph, kVal):
global G, k
#Set globals for the module
G = graph.copy()
k = kVal
#Create the initial values and find whether it fits
initGraph()
numK = getCopNumber()
return numK
def cop_num(graph):
'''
Given a graph, calculates it's cop number by repeatedly calling
copk until it returns false.
INPUT
graph: A networkx graph
OUTPUT
copNum: The cop number of graph
'''
copNum = 1
#Call copk until it finds a value where copNum <= that value
while copk(graph, copNum):
copNum = copNum + 1
return copNum
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from collections import OrderedDict
import cherrypy
from ..constants import SettingDefault, SettingKey
from .model_base import Model, ValidationException
from girder.utility import camelcase, plugin_utilities
class Setting(Model):
"""
This model represents server-wide configuration settings as key/value pairs.
"""
def initialize(self):
self.name = 'setting'
self.ensureIndices(['key'])
self._corsSettingsCache = None
def validate(self, doc):
"""
This method is in charge of validating that the setting key is a valid
key, and that for that key, the provided value is valid. It first
allows plugins to validate the setting, but if none of them can, it
assumes it is a core setting and does the validation here.
"""
key = doc['key']
funcName = 'validate'+camelcase(key)
if callable(getattr(self, funcName, None)):
getattr(self, funcName)(doc)
else:
raise ValidationException(
'Invalid setting key "{}".'.format(key), 'key')
return doc
def validateCorePluginsEnabled(self, doc):
"""
Ensures that the set of plugins passed in is a list of valid plugin
names. Removes any invalid plugin names, removes duplicates, and adds
all transitive dependencies to the enabled list.
"""
if not type(doc['value']) is list:
raise ValidationException(
'Plugins enabled setting must be a list.', 'value')
allPlugins = plugin_utilities.findAllPlugins()
doc['value'] = set(doc['value'])
def addDeps(plugin):
for dep in allPlugins[plugin]['dependencies']:
if dep not in doc['value']:
doc['value'].add(dep)
addDeps(dep)
for enabled in list(doc['value']):
if enabled in allPlugins:
addDeps(enabled)
else:
doc['value'].remove(enabled)
doc['value'] = list(doc['value'])
def validateCoreAddToGroupPolicy(self, doc):
doc['value'] = doc['value'].lower()
if doc['value'] not in ('never', 'noadmin', 'nomod', 'yesadmin',
'yesmod', ''):
raise ValidationException(
'Add to group policy must be one of "never", "noadmin", '
'"nomod", "yesadmin", or "yesmod".', 'value')
def validateCoreCookieLifetime(self, doc):
try:
doc['value'] = int(doc['value'])
if doc['value'] > 0:
return
except ValueError:
pass # We want to raise the ValidationException
raise ValidationException(
'Cookie lifetime must be an integer > 0.', 'value')
def validateCoreCorsAllowMethods(self, doc):
if isinstance(doc['value'], basestring):
methods = doc['value'].replace(",", " ").strip().upper().split()
# remove duplicates
methods = list(OrderedDict.fromkeys(methods))
doc['value'] = ", ".join(methods)
self.corsSettingsCacheClear()
return
raise ValidationException(
'Allowed methods must be a comma-separated list or an empty '
'string.', 'value')
def validateCoreCorsAllowHeaders(self, doc):
if isinstance(doc['value'], basestring):
headers = doc['value'].replace(",", " ").strip().split()
# remove duplicates
headers = list(OrderedDict.fromkeys(headers))
doc['value'] = ", ".join(headers)
self.corsSettingsCacheClear()
return
raise ValidationException(
'Allowed headers must be a comma-separated list or an empty '
'string.', 'value')
def validateCoreCorsAllowOrigin(self, doc):
if isinstance(doc['value'], basestring):
origins = doc['value'].replace(",", " ").strip().split()
origins = [origin.rstrip('/') for origin in origins]
# remove duplicates
origins = list(OrderedDict.fromkeys(origins))
doc['value'] = ", ".join(origins)
self.corsSettingsCacheClear()
return
raise ValidationException(
'Allowed origin must be a comma-separated list of base urls or * '
'or an empty string.', 'value')
def validateCoreEmailFromAddress(self, doc):
if not doc['value']:
raise ValidationException(
'Email from address must not be blank.', 'value')
def validateCoreEmailHost(self, doc):
if isinstance(doc['value'], basestring):
doc['value'] = doc['value'].strip()
return
raise ValidationException(
'Email host must be a string.', 'value')
def defaultCoreEmailHost(self):
if (cherrypy.request and cherrypy.request.local and
cherrypy.request.local.name):
host = '://'.join((cherrypy.request.scheme,
cherrypy.request.local.name))
if cherrypy.request.local.port != 80:
host += ':{}'.format(cherrypy.request.local.port)
return host
def validateCoreRegistrationPolicy(self, doc):
doc['value'] = doc['value'].lower()
if doc['value'] not in ('open', 'closed'):
raise ValidationException(
'Registration policy must be either "open" or "closed".',
'value')
def validateCoreSmtpHost(self, doc):
if not doc['value']:
raise ValidationException(
'SMTP host must not be blank.', 'value')
def validateCoreUploadMinimumChunkSize(self, doc):
try:
doc['value'] = int(doc['value'])
if doc['value'] >= 0:
return
except ValueError:
pass # We want to raise the ValidationException
raise ValidationException(
'Upload minimum chunk size must be an integer >= 0.',
'value')
def get(self, key, default='__default__'):
"""
Retrieve a setting by its key.
:param key: The key identifying the setting.
:type key: str
:param default: If no such setting exists, returns this value instead.
:returns: The value, or the default value if the key is not found.
"""
setting = self.findOne({'key': key})
if setting is None:
if default is '__default__':
default = self.getDefault(key)
return default
else:
return setting['value']
def set(self, key, value):
"""
Save a setting. If a setting for this key already exists, this will
replace the existing value.
:param key: The key identifying the setting.
:type key: str
:param value: The object to store for this setting.
:returns: The document representing the saved Setting.
"""
setting = self.findOne({'key': key})
if setting is None:
setting = {
'key': key,
'value': value
}
else:
setting['value'] = value
return self.save(setting)
def unset(self, key):
"""
Remove the setting for this key. If no such setting exists, this is
a no-op.
:param key: The key identifying the setting to be removed.
:type key: str
"""
for setting in self.find({'key': key}):
self.remove(setting)
def getDefault(self, key):
"""
Retrieve the system default for a value.
:param key: The key identifying the setting.
:type key: str
:returns: The default value if the key is present in both SettingKey
and referenced in SettingDefault; otherwise None.
"""
default = None
if key in SettingDefault.defaults:
default = SettingDefault.defaults[key]
else:
funcName = 'default'+camelcase(key)
if callable(getattr(self, funcName, None)):
default = getattr(self, funcName)()
return default
def corsSettingsCacheClear(self):
"""
Clear the CORS information we have stored for quick use, forcing it to
be reloaded the next time it is requested.
"""
self._corsSettingsCache = None
def corsSettingsDict(self):
"""Return a dictionary of CORS settings. This parses the user settings
into a format that is useful for the REST api. The dictionary
contains:
* allowOrigin: None if no CORS settings are present, or a list of
allowed origins. If the list contains '*', all origins are allowed.
* allowMethods: None if all methods allowed, or a list of allowed
methods. Note that regardless of this list, GET, HEAD, and some POST
methods are always allowed. These are always upper case.
* allowHeaders: a set of allowed headers. This includes the headers
which are always allowed by CORS. There are always all lower case.
:returns: a dictionary as described above.
"""
if self._corsSettingsCache is None:
cors = {}
allowOrigin = self.get(SettingKey.CORS_ALLOW_ORIGIN)
if not allowOrigin:
cors['allowOrigin'] = None
else:
cors['allowOrigin'] = allowOrigin.replace(",", " ").strip(). \
split()
methods = self.get(SettingKey.CORS_ALLOW_METHODS)
if not methods:
cors['allowMethods'] = None
else:
cors['allowMethods'] = methods.replace(",", " ").strip(). \
upper().split()
headers = set(self.get(SettingKey.CORS_ALLOW_HEADERS).replace(
",", " ").strip().lower().split())
headers = {header.lower() for header in headers.union({
'Accept',
# in defaults: Accept-Encoding
'Accept-Language',
# in defaults: Authorization
# in defaults: Content-Dispostion
'Connection',
'Content-Language',
'Content-Length',
# Content-Type is in the defaults besides being listed here,
# because some CORS requests don't have to permit it. We side
# on always allowing it, though it may need to be in the
# allowed headers that are sent to the browser for the browser
# to be willing to send the CORS request
'Content-Type',
# in defaults: Cookie
# in defaults: Girder-Token
'Host',
'Origin',
'Referer',
'User-Agent'})}
cors['allowHeaders'] = headers
self._corsSettingsCache = cors
return self._corsSettingsCache
| |
import datetime
import requests
class Forecast():
def __init__(self, data, url, headers):
self.url = url
self.http_headers = headers
self.json = data
def update(self):
r = requests.get(self.url)
self.data = r.json()
self.http_headers = r.headers
def currently(self):
try:
if 'currently' not in self.json:
url = "%s&exclude=%s" % (self.url.split('&')[0],
'minutely,hourly,daily,alerts,flags')
response = requests.get(url).json()
self.json['currently'] = response['currently']
return ForecastioDataPoint(self.json['currently'])
except:
return ForecastioDataPoint()
def minutely(self):
try:
if 'minutely' not in self.json:
url = "%s&exclude=%s" % (self.url.split('&')[0],
'currently,hourly,daily,alerts,flags')
response = requests.get(url).json()
self.json['minutely'] = response['minutely']
return ForecastioDataBlock(self.json['minutely'])
except:
return ForecastioDataBlock()
def hourly(self):
try:
if 'hourly' not in self.json:
url = "%s&exclude=%s" % (self.url.split('&')[0],
'minutely,currently,daily,alerts,flags')
response = requests.get(url).json()
self.json['hourly'] = response['hourly']
return ForecastioDataBlock(self.json['hourly'])
except:
return ForecastioDataBlock()
def daily(self):
try:
if 'daily' not in self.json:
url = "%s&exclude=%s" % (self.url.split('&')[0],
'minutely,currently,hourly,alerts,flags')
response = requests.get(url).json()
self.json['daily'] = response['daily']
return ForecastioDataBlock(self.json['daily'])
except:
return ForecastioDataBlock()
class ForecastioDataBlock():
def __init__(self, d=None):
try:
self.summary = d['summary']
except:
self.summary = None
try:
self.icon = d['icon']
except:
self.icon = None
self.data = []
if d is not None:
for datapoint in d['data']:
self.data.append(ForecastioDataPoint(datapoint))
def __unicode__(self):
return '<ForecastioDataBlock instance: ' \
'%s with %d ForecastioDataPoints>' % (self.summary,
len(self.data),)
def __str__(self):
return unicode(self).encode('utf-8')
class ForecastioDataPoint():
def __init__(self, d=None):
try:
self.time = datetime.datetime.fromtimestamp(int(d['time']))
except:
self.time = None
try:
self.icon = d['icon']
except:
self.icon = None
try:
self.summary = d['summary']
except:
self.summary = None
try:
sr_time = int(d['sunriseTime'])
self.sunriseTime = datetime.datetime.fromtimestamp(sr_time)
except:
self.sunriseTime = None
try:
ss_time = int(d['sunsetTime'])
self.sunsetTime = datetime.datetime.fromtimestamp(ss_time)
except:
self.sunsetTime = None
try:
self.precipIntensity = d['precipIntensity']
except:
self.precipIntensity = None
try:
self.precipIntensityMax = d['precipIntensityMax']
except:
self.precipIntensityMax = None
try:
self.precipIntensityMaxTime = d['precipIntensityMaxTime']
except:
self.precipIntensityMaxTime = None
try:
self.precipProbability = d['precipProbability']
except:
self.precipProbability = None
try:
self.precipType = d['precipType']
except:
self.precipType = None
try:
self.precipAccumulation = d['precipAccumulation']
except:
self.precipAccumulation = None
try:
self.temperature = d['temperature']
except:
self.temperature = None
try:
self.temperatureMin = d['temperatureMin']
except:
self.temperatureMin = None
try:
self.temperatureMinTime = d['temperatureMinTime']
except:
self.temperatureMinTime = None
try:
self.temperatureMax = d['temperatureMax']
except:
self.temperatureMax = None
try:
self.temperatureMaxTime = d['temperatureMaxTime']
except:
self.temperatureMaxTime = None
try:
self.dewPoint = d['dewPoint']
except:
self.dewPoint = None
try:
self.windspeed = d['windSpeed']
except:
self.windspeed = None
try:
self.windbaring = d['windBearing']
except:
self.windbaring = None
try:
self.cloudcover = d['cloudCover']
except:
self.cloudcover = None
try:
self.humidity = d['humidity']
except:
self.humidity = None
try:
self.pressure = d['pressure']
except:
self.pressure = None
try:
self.visbility = d['visbility']
except:
self.visbility = None
try:
self.ozone = d['ozone']
except:
self.ozone = None
def __unicode__(self):
return '<ForecastioDataPoint instance: ' \
'%s at %s>' % (self.summary, self.time,)
def __str__(self):
return unicode(self).encode('utf-8')
| |
r"""
SANS Resolution Simulator
=========================
Propagate a neutron from an isotropic source through a source and sample
pinhole and onto a detector. For each pixel on the detector, compute the
effective resolution.
Usage
=====
Modify instrument geometry, target pixels and number of neutrons at the
bottom of the script and run using::
python res.py
You can look at the various stages of the simulation by uncommenting
the intermediate "plot" function calls.
Theory
======
The first step is to generate a set of neutrons at small angle $\theta$
along the direction of the beam, and uniform phi, with starting position
$(x,y)$ in the source aperture. Each neutron is given an independent
wavelength $\lambda$ from a triangular distribution resulting from the
upstream velocity selector. The $\theta$ range is determined by the
distance between the source aperture and the sample aperture, with
extra radius to account for finite aperture size and gravity effects.
The $(\theta, \phi)$ spherical coordinates used to generate the initial
neutron population are converted to $(\theta_{\rm az}, \theta_{\rm el})
to make gravity effects easier to calculate.
The sample aperture is shifted slightly upward from $(0,0)$ so that
a beam of neutrons of the alignment wavelength will be centered on
the detector. The aperture is not readjusted when changing wavelengths,
which will result in a main beam that is slightly above $(0,0)$ for
shorter wavelengths, or below for longer wavelengths. At 14m, changing
from 8 A to 16 A will drop the beam by 10 pixels or so. Since data
reduction will recenter $(q_x,q_y)$ on the detector position, the
detector is shifted so that the center pixel is at $q=0$.
After filtering through the sample aperture, we are left with a
selection neutrons at position $(s_x, s_y)$ and angle
$(s_{\rm az}, s_{\rm el})$ incident on the sample. For each
source neutron, we generate a random position $(p_x, p_y)$ within
the target pixel on the detector, end determine the direction
$q_{\rm az}$ and throwing angle $q_{\rm el}$ required to reach that
detector position.
To determine the $(\theta,\phi)$ angle of scattering, we compare
the position $D$ on the detector of the incident neutron travelling
in a straight line without gravity (this is the beam center), to
the position $P$ on the detector of the scattered neutron travelling
in a straight line without gravity (this is the relative $(q_x,q_y$)
of the scattered neutron). Given the position $S$ of the sample
$\theta = \tan^{-1}(||Q-D||/||D-S||)$ and
$\phi = \tan^{-1}((pn_y-d_y)/(pn_x-d_x))$.
The scattering intensity $I(q)$ which we are using to compute the
resolution effects is only a function of angle and wavelength.
The gravity effect is accounted for in determining the throwing
angle required to reach the pixel.
We can estimate the resolution of the pixel $(i,j)$ by looking
at the histograms of our populations of $\theta$, $\phi$,
$Q_\parallel = \frac{4 \pi}{\lambda} \sin(theta/2)$
and $Q_\perp = Q_\parallel (\phi - \bar\phi)$ where $\bar\phi$
is the nominal scattering angle of the pixel.
The above $(\theta,\phi)$ calculation is slightly incorrect since
the point $P$ is in the plane of the detector, which is not quite
normal to the direction of the beam $(s_{\rm az}, s_{\rm el})$
incident on the sample. This effect is insignificant so it is
not active in the code, but it is calculated as follows.
Let $S = (s_x, s_y, z_s)$ be a point in the sample where we have a
simulated neutron travelling in direction
$n = (s_\rm{az}, s_\rm{el}) = (s_\theta \cos s_\phi, s_\theta \sin s_\phi)$,
and let $P = (p_x, p_y, z_d)$ be a point on the detector which receives the
scattered neutron. We calculate the point $D = (d_x, d_y, z_d)
= (s_x + (z_d-z_s)*\tan s_\rm{az}, s_y + (z_d-z_s)*\tan s_\rm{el}, z_d)$
where the neutron travelling along $n$ would intercept the detector. We then
take the plane through $D$ normal to $n$ and intersect it with the line
$\bar{SP}$ as follows:
.. math::
Pn = S + { (D-S) \cdot n \over I \cdot n } I
= S + { ||D-S|| \over I \cdot n } I
where the $n = (D-S) / ||D-S||$ is the plane normal to the incident neutron
where it would intercept the detector, and $I = (P-S) / ||P-S||$ is the
direction of the scattered neutron which would intercept the detector at $P$.
Given the small angles used in SAS, $Pn \approx P$.
"""
from __future__ import division, print_function
from numpy import (
sqrt, exp, log, pi, sin, cos, tan,
arccos, arcsin, arctan, arctan2, degrees)
from scipy.stats import circmean, circstd
from numpy.random import rand
import numpy as np
import matplotlib.pyplot as plt
# pylint: disable=invalid-name
earth_gravity = 9.80665 # m/s^2
neutron_mass = 1.00866491597 #(43) u
plancks_constant = 4.13566733e-15 #(10) eV s
electron_volt = 1.602176487e-19 #(40) J / eV
atomic_mass_constant = 1.660538782e-27 #(83) kg / u
VELOCITY_FACTOR = (plancks_constant*electron_volt
/ (neutron_mass * atomic_mass_constant)) * 1e10
def to_velocity(wavelength): # m/s
"""convert velocity in m/s to wavelength in Angstroms"""
return VELOCITY_FACTOR / wavelength
def to_wavelength(velocity): # A
"""convert wavelength in Angstroms to velocity in m/s"""
return VELOCITY_FACTOR / velocity
def plot3(x, y, z):
"""3D scatter plot in unit box"""
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x, y, z)
ax.set_xlim3d(-1, 1)
ax.set_ylim3d(-1, 1)
ax.set_zlim3d(-1, 1)
fig.canvas.draw()
def plot(x, y, title):
"""1D scatter plot with equal axes"""
plt.plot(x, y, '.')
plt.axis('equal')
plt.title(title)
plt.grid(True)
def plot_angles(theta, phi, bins=50):
"""plot angle densities"""
plt.subplot(131)
plt.hist(degrees(theta), bins=bins)
plt.xlabel("theta (degrees)")
plt.grid(True)
plt.subplot(132)
plt.hist(degrees(phi), bins=bins)
plt.xlabel("phi (degrees)")
plt.grid(True)
plt.subplot(133)
plt.plot(degrees(theta), degrees(phi), '.', ms=1)
plt.grid(True)
plt.xlabel('theta (degrees)')
plt.ylabel('phi (degrees)')
def plot_q(q, phi, title, plot_phi=True):
"""plot q densities"""
plt.suptitle(title)
ax = plt.subplot(131 if plot_phi else 111)
n, bins, patches = plt.hist(q, bins=50, density=True)
mean, std = np.mean(q), np.std(q, ddof=1)
plt.plot(bins, exp(-0.5*((bins-mean)/std)**2)/sqrt(2*pi*std**2))
q_low, q_high = mean-2.5*std, mean+3*std
#q_low, q_high = mean**2/(mean + 3*std), mean + 3*std
trans = ax.get_xaxis_transform()
ax.vlines([q_low, q_high], 0, 1, transform=trans, linestyle='dashed')
plt.text(q_low, 1, "\n $\\mu - 2.5\\sigma$",
transform=trans, va='top', ha='left')
plt.text(q_high, 1, "\n$\\mu + 3\\sigma$ ",
transform=trans, va='top', ha='right')
plt.grid(True)
plt.xlabel("Q (1/A)")
if not plot_phi:
return
ax = plt.subplot(132)
n, bins, patches = plt.hist(degrees(phi), bins=50, density=True)
mean, std = np.mean(degrees(phi)), np.std(degrees(phi), ddof=1)
plt.plot(bins, exp(-0.5*((bins-mean)/std)**2)/sqrt(2*pi*std**2))
plt.grid(True)
plt.xlabel("phi (degrees)")
plt.subplot(133)
plt.plot(q, degrees(phi), '.', ms=1)
plt.grid(True)
plt.xlabel('Q (1/A)')
plt.ylabel('phi (degrees)')
def plot_qperp(q, qperp, title):
"""plot q parallel vs. q perpendicular"""
plt.subplot(131)
plt.hist(q, bins=50)
plt.grid(True)
plt.xlabel(r"$Q_\parallel (1/A)$")
plt.subplot(132)
plt.hist(qperp, bins=50)
plt.grid(True)
plt.xlabel(r"$Q_\perp (1/A)$")
plt.subplot(133)
plt.plot(q, qperp, '.')
plt.grid(True)
plt.xlabel(r'$Q_\parallel (1/A)$')
plt.ylabel(r'$Q_\perp (1/A)$')
plt.suptitle(title)
def triangle(N, a=-1, b=1, c=0):
"""
Pull random numbers from a triangular distribution over [a, b]
with peak at c in [a, b].
"""
cutoff = (c-a)/(b-a)
U = rand(N)
idx = (U > cutoff)
X = np.empty_like(U)
X[~idx] = a + sqrt(U[~idx]*(b-a)*(c-a))
X[idx] = b - sqrt((1-U[idx])*(b-a)*(b-c))
return X
def ballistics(az, el, L, x, y, d, a=earth_gravity):
# velocity (mm/s)
v = to_velocity(L)
vx, vy = v*cos(el), v*sin(el)
# distance (m) between source and sample in the direction of travel
daz = 0.001*d/cos(az)
# position on sample (mm)
x = x + 1000*daz*(sin(az))
y = y + 1000*daz*(tan(el) - 0.5*a*daz/vx**2)
# velocity, wavelength and elevation on sample
vy = vy - a*daz/vx
v = sqrt(vx**2 + vy**2)
el = arctan2(vy, vx)
L = to_wavelength(v)
return az, el, L, x, y
def throwing_angle(v, x, y, a=earth_gravity):
"""
angle to throw at velocity v so that (0, 0) -> (x, y)
returns the valid index, and the plus and minus angles which
allow the ball to get there
if there is only one angle, it is returned twice
"""
if a == 0:
idx = slice(None, None, None)
angle = arctan2(y, x)
return idx, angle, angle
else:
radical = v**4 - a*(a*x**2 + 2*y*v**2)
radical[radical < 0] = 0
#plus = arctan2(v**2 + sqrt(radical), a*x)
minus = arctan2(v**2 - sqrt(radical), a*x)
#plt.subplot(131); plt.hist(radical)
#plt.subplot(132); plt.hist(degrees(plus))
#plt.subplot(133); plt.hist(degrees(minus))
return minus
def aperture_alignment(wavelength, aligned_wavelength, Dsource, Ddetector):
# SANS instruments are approximately horizontal, so they will experience
# a gravity drop proportional to the distance travelled. We can set
# the elevation required to hit the target based on this distance. We
# will add this correction to all elevations.
Ddetector += Dsource
if aligned_wavelength is None:
aligned_wavelength = wavelength
aligned_velocity = to_velocity(aligned_wavelength) # m/s
el = 0.5*arcsin(earth_gravity*0.001*(Ddetector)/aligned_velocity**2)
velocity = to_velocity(wavelength) # m/s
# We need to shift the sample aperture into the ballistic trajectory by
# computing the height of the ball at the source to sample distance
y = Dsource*tan(el) \
- 1000*0.5*earth_gravity*(0.001*Dsource/(velocity*cos(el)))**2
# We need to compute the position p where the direct beam will encounter
# the detector.
p = Ddetector*tan(el) \
- 1000*0.5*earth_gravity*(0.001*Ddetector/(velocity*cos(el)))**2
return el, y, p
def nominal_q(sx, sy, az_in, el_in, az_out, el_out, dz):
nx, ny = sx + dz * tan(az_in), sy + dz * tan(el_in)
nd = sqrt((nx-sx)**2 + (ny-sy)**2 + dz**2)
#plt.subplot(131); plot(nx/5, ny/5, 'G: direct flight beam center')
px, py = sx + dz * tan(az_out), sy + dz * tan(el_out)
pd = sqrt((px-sx)**2 + (py-sy)**2 + dz**2)
#plt.subplot(122); plot(px/5, py/5, 'G: direct flight scattered beam')
if 0:
# Correction to move px, py into the q normal plane. This is
# insignificant for small angle scattering.
nx_hat, ny_hat, nz_hat = (nx-sx)/nd, (ny-sy)/nd, dz/nd
px_hat, py_hat, pz_hat = (px-sx)/pd, (py-sy)/pd, dz/pd
d = nd / (px_hat*nx_hat + py_hat*ny_hat + pz_hat*nz_hat)
px, py = sx + px_hat*d, sy + py_hat*d
#plt.subplot(122); plot((px)/5, (py)/5, 'G: scattered beam on q normal plane')
# Note: px, py is the location of the scattered neutron relative to the
# beam center without gravity in detector coordinates, not the qx, qy vector
# in inverse coordinates. This allows us to compute the scattered angle at
# the sample, returning theta and phi.
qd = sqrt((px-nx)**2 + (py-ny)**2)
theta, phi = arctan2(qd, nd)/2, arctan2(py-ny, px-nx)
return theta, phi
def resolution(R1, R2, D1, D2, dx, dy, L, dL):
dQx = sqrt((2*pi/(L*D2))**2)
class Spectrum:
def __init__(self, L, I):
self.L = L
self.I = I
# [0, p1, p1+p1, .., 1]
self._cdf = np.hstack((0., np.cumsum(I)/np.sum(I)))
self._edges = np.hstack((
(3*L[0]-L[1])/2, # Half-step before first wavelength
(L[:-1]+L[1:])/2, # Mid-points between wavelengths
(3*L[-1]-L[-2])/2, # Half-step after last wavelength
))
self.center = np.sum(L*I)/np.sum(I)
def rand(self, n=1):
samples = np.interp(np.random.rand(n), self._cdf, self._edges)
return samples
class Triangle:
def __init__(self, wavelength, resolution):
self._dL = wavelength*resolution
self.center = wavelength
def rand(self, n=1):
# source wavelength is a triangular distribution with fwhm resolution dL/L
samples = triangle(n)*self._dL + self.center
return samples
def neutrons_on_sample(Rsource, Rsample, Rbeamstop, Dsource, Ddetector,
wavelength, wavelength_resolution, aligned_wavelength,
N):
# ===== Random initial state ====
# theta is pi/2 minus latitude, phi is longitude, z is the rotational axis
# The z-axis connects the center of the apertures and detector.
# Limit the source angles to those that can make it from one side of the
# source aperture to the other side of the sample aperture.
#print("v_min=%.2f m/s, gravity_drop=%.2f mm"%(min_velocity, gravity_drop))
limit = 1 - 1/sqrt(1+((Rsource+Rsample)/Dsource)**2)
theta, phi = arccos(1-rand(N)*limit), 2*pi*rand(N)
#plot3(sin(theta)*cos(phi), sin(theta)*sin(phi), cos(theta)); return
#print("limit", limit)
#plot(degrees(theta), degrees(phi), "polar vs equatorial angles"); return
if isinstance(wavelength_resolution, tuple):
dist = Spectrum(*wavelength_resolution)
else:
dist = Triangle(wavelength, wavelength_resolution)
L = dist.rand(len(theta))
#plt.figure(); plt.hist(L, bins=50); # plt.show()
# source position: x, y is the isotropic incident location
alpha, r = 2*pi*rand(N), Rsource*arccos(rand(N))*2/pi
x, y = r*cos(alpha), r*sin(alpha)
#plot(x, y, "neutron position in source aperture"); return
# ==== Gravity correction ====
# gravity calculations work better with azimuth and elevation
az, el = theta*cos(phi), theta*sin(phi)
delta_el, delta_y, delta_p = aperture_alignment(
wavelength, aligned_wavelength, Dsource, Ddetector)
el += delta_el
# ===== Compute image on sample =====
#plot(degrees(az), degrees(el), "azimuthal angle vs elevation"); return
s_az, s_el, s_L, s_x, s_y = ballistics(az, el, L, x, y, Dsource)
#plt.hist(s_L, bins=50); return # updated wavelengths
#plot(s_x, s_y, "G: neutron position on sample aperture"); return
#plot(s_az, s_el, "G: sample aperture azimuthal angle vs elevation"); return
# filter through sample aperture
idx = (s_x**2 + (s_y-delta_y)**2 < Rsample**2)
s_az, s_el, s_L, s_x, s_y = [w[idx] for w in (s_az, s_el, s_L, s_x, s_y)]
#plt.hist(s_L, bins=50); plt.title("G: sample wavelength"); return
#plot(az[idx], el[idx], "G: sample azimuthal angle vs elevation"); return
#plot(s_az, s_el, "G: sample azimuthal angle vs elevation"); return
#plot(s_x, s_y, "G: neutron position in sample"); return
return s_az, s_el, s_L, s_x, s_y
# All lengths are in millimeters
def pinhole(pixel_i, pixel_j, pixel_width=5, pixel_height=5,
source_aperture=50.8, sample_aperture=12.7,
source_distance=8500, detector_distance=4000,
beamstop=50.8,
wavelength=8, wavelength_resolution=0.12, aligned_wavelength=None,
N=5000, phi_mask=7.1,
Iq=None):
PI, PJ = np.meshgrid(pixel_i, pixel_j)
# ===== Generate a population of neutrons at the sample position =====
Rsource = source_aperture/2
Rsample = sample_aperture/2
Rbeamstop = beamstop/2
Dsource = source_distance
Ddetector = detector_distance
delta_el, delta_y, delta_p = aperture_alignment(
wavelength, aligned_wavelength, Dsource, Ddetector)
s_az, s_el, s_L, s_x, s_y = neutrons_on_sample(
Rsource, Rsample, Rbeamstop, Dsource, Ddetector,
wavelength, wavelength_resolution, aligned_wavelength,
N)
# ==== Compute image on detector without sample ====
#
#d_az, d_el, d_L, d_x, d_y = ballistics(s_az, s_el, s_L, s_x, s_y, Ddetector)
### filter through beam stop
##idx = (d_x**2 + (d_y-delta_y)**2 < Rbeamstop**2)
##s_az, s_el, s_L, s_x, s_y = [w[idx] for w in (s_az, s_el, s_L, s_x, s_y)]
##d_az, d_el, d_L, d_x, d_y = [w[idx] for w in (d_az, d_el, d_L, d_x, d_y)]
#plot(d_x/pixel_width, d_y/pixel_height, "G: neutron detector pixel"); return
# ==== Scatter off sample ====
mode = None
#mode = 'sum'
#mode = 'scatter'
if mode == 'sum' and Iq is not None:
# For each pixel, compute the scattering angle between the neutron
# on a direct path to the detector vs the pixel center, and compute
# I(q) based on that. Seems to underestimate the dQ/Q resolution
# for the pixels, so don't use this without figuring out what's wrong.
raise NotImplementedError("experimental code; see source")
# pixel centers relative to beam center
cx, cy = PI*pixel_width, PJ*pixel_height
pixel_r = sqrt(cx**2 + cy**2)
pixel_theta = arctan2(pixel_r, Ddetector)/2
#pixel_phi = arctan2(cy, cx)
pixel_nominal_q = 4*pi * sin(pixel_theta)/wavelength
# find neutron position on the detector without scattering
d_az, d_el, d_L, d_x, d_y = ballistics(s_az, s_el, s_L, s_x, s_y, Ddetector)
# find scattering angle from each neutron to each pixel
r = sqrt(((d_x-s_x)[:, None] - cx.flatten()[None, :])**2
+ ((d_y-s_y)[:, None] - (cy+delta_p).flatten()[None, :])**2)
theta = arctan2(r, Ddetector)/2
# find q value for each neutron at each pixel
q = 4*pi*sin(theta)/d_L[:, None]
# accumulate scattering patterns across all neutrons
I = Iq(q)
pixel_Iq = np.sum(I, axis=0).reshape(PI.shape)
pixel_dIq = pixel_Iq/sqrt(len(s_x))
pixel_q = np.mean(q, axis=0).reshape(PI.shape)
pixel_dq = np.std(q, axis=0, ddof=1).reshape(PI.shape)
#print("pixel_Iq", pixel_q.shape, pixel_Iq.shape)
if mode == 'scatter' and Iq is not None:
# For each neutron figure out the relative probability of the neutron
# arriving in each individual pixel, then choose one to add it to.
# The result is way off, probably because it doesn't include the
# probability that the neutron goes to none of the pixels.
raise NotImplementedError("experimental code; see source")
# pixel centers relative to beam center
cx, cy = PI*pixel_width, PJ*pixel_height
pixel_r = sqrt(cx**2 + cy**2)
pixel_theta = arctan2(pixel_r, Ddetector)/2
#pixel_phi = arctan2(cy, cx)
pixel_q = 4*pi * sin(pixel_theta)/wavelength
# find neutron position on the detector without scattering
d_az, d_el, d_L, d_x, d_y = ballistics(s_az, s_el, s_L, s_x, s_y, Ddetector)
# find scattering angle from each neutron to each pixel
# For each neutron generate the probability distribution corresponding
# to the various pixels that the neutron might land in and pick one.
counts = np.zeros(pixel_q.size, 'i')
counts_q = np.zeros(pixel_q.size, 'd')
for xk, yk, Lk in zip(d_x-s_x, d_y-s_y, d_L):
r = sqrt((xk - cx)**2 + (yk-delta_p - cy)**2)
theta = arctan2(r, Ddetector)/2
# find q value for each neutron at each pixel
q = (4*pi*sin(theta)/Lk).flatten()
# accumulate scattering patterns across all neutrons
invcdf = np.cumsum(Iq(q))
U = np.random.uniform(0, invcdf[-1])
index = np.searchsorted(invcdf, U)
counts[index] += 1
counts_q[index] += q[index]
counts_q /= counts + (counts==0)
counts.reshape(cx.shape)
counts_q.reshape(cx.shape)
stats = []
current_j = 1000001 # arbitrary unlikely number
for p_i, p_j in zip(PI.flat, PJ.flat):
if current_j != p_j:
print("pixel j=%d"%p_j)
current_j = p_j
## Generate a new set of points on the sample for each pixel
#s_az, s_el, s_L, s_x, s_y = neutrons_on_sample(
# Rsource, Rsample, Rbeamstop, Dsource, Ddetector,
# wavelength, wavelength_resolution, aligned_wavelength,
# N)
# ==== Compute scattering theta, phi for pixel ====
# find random point in pixel i, j to scatter to
xl, xu = (p_i-0.5)*pixel_width, (p_i+0.5)*pixel_width
yl, yu = delta_p+(p_j-0.5)*pixel_height, delta_p+(p_j+0.5)*pixel_height
p_x, p_y = rand(len(s_x))*(xu-xl)+xl, rand(len(s_x))*(yu-yl)+yl
#plot(px, py, "px,py pixel locations"); return
# find the scattering angle necessary to reach point P on the detector
q_az = arctan2(p_x-s_x, np.ones_like(s_x)*Ddetector)
q_el = throwing_angle(to_velocity(s_L), 0.001*Ddetector/cos(q_az),
0.001*(p_y-s_y))
#q_theta = arccos(sin(s_el)*sin(q_el) + cos(s_el)*cos(q_el)*cos(q_az-s_az))
#q_theta_2 = arctan2(sqrt((d_x-p_x)**2+(d_y-p_y)**2)), Ddetector)
#q_phi = arctan2(q_el, q_az)
# Note that q scattering calculations look at positions on the detector
# assuming neutrons travel in a straight line, and not the positions
# according to ballistics. The ballistics are taken into account by the
# choice of initial angle such that the neutron will hit the target
# position. The scattering function operates solely on incident and
# scattered angle with no hint of gravity, and so the resolution
# function which mixes the scattering theory must reflect this.
q_theta, q_phi = nominal_q(s_x, s_y, s_az, s_el, q_az, q_el, Ddetector)
q = 4*pi*sin(q_theta)/s_L
#return
# filter through beam stop, corrected for gravity alignment
#print(Rbeamstop**2, xu**2 + (yu-delta_p)**2, xl**2 + (yl-delta_p)**2)
idx = (p_x**2 + (p_y-delta_p)**2 > Rbeamstop**2)
q_theta, q_phi, q = [w[idx] for w in (q_theta, q_phi, q)]
# ==== calculate stats ====
cx, cy = p_i*pixel_width, p_j*pixel_height
theta_nominal = arctan2(sqrt(cx**2+cy**2), Ddetector)/2
phi_nominal = arctan2(cy, cx)
q_nominal = 4*pi*sin(theta_nominal)/wavelength
qperp_nominal = 0
# Approximate q_perp as arc length between nominal phi and actual phi
# at radius q.
qperp = q*(q_phi-phi_nominal)
if len(q) > 1:
theta_mean, theta_std = np.mean(q_theta), np.std(q_theta)
phi_mean, phi_std = circmean(q_phi, -pi, pi), circstd(q_phi, -pi, pi)
q_mean, q_std = np.mean(q), np.std(q)
qperp_mean, qperp_std = np.mean(qperp), np.std(qperp)
# weight each neutron by the sample scattering
I = np.sum(Iq(q))/len(q) if Iq is not None else 0
dI = I/sqrt(len(q))
else:
print("no q values for (%d, %d)"%(p_i, p_j))
theta_mean, theta_std = theta_nominal, 0.
phi_mean, phi_std = phi_nominal, 0.
q_mean, q_std = q_nominal, 0.
qperp_mean, qperp_std = qperp_nominal, 0.
I, dI = [], []
stats.append([
theta_nominal, theta_mean, theta_std,
phi_nominal, phi_mean, phi_std,
q_nominal, q_mean, q_std,
qperp_nominal, qperp_mean, qperp_std,
I, dI,
])
config = "src-ap:%.1fcm samp-ap:%.1fcm src-dist:%.1fm det-dist:%.1fm L:%.1fA" % (
source_aperture/10, sample_aperture/10,
Dsource/1000, Ddetector/1000, wavelength)
if len(stats) == 0:
pass # No samples fell in detector region
elif len(stats) == 1:
# print stats
pixel_config = "%s pixel:%d,%d (%dX%d mm^2)" %(
config, p_i, p_j, pixel_width, pixel_height)
print(pixel_config)
#print(" nominal lambda: %.4f actual lambda: %.4f +/- %.4f (Ang)"
# % (wavelength, np.mean(s_L), np.std(s_L)))
print(" nominal 1/lambda: %.4f actual 1/lambda: %.4f +/- %.4f (1/Ang)"
% (1./wavelength, np.mean(1./s_L), np.std(1./s_L)))
print(" nominal theta: %.4f actual theta: %.4f +/- %.4f (degrees)"
% (degrees(theta_nominal), degrees(theta_mean), degrees(theta_std)))
#print(" nominal phi: %.4f actual phi: %.4f +/- %.4f (degrees)"
# % (degrees(phi_nominal), degrees(phi_mean), degrees(phi_std)))
print(" nominal q: %.4f actual q: %.4f +/- %.4f (1/Ang)"
% (q_nominal, q_mean, q_std))
#plt.hist(degrees(q_az), bins=50); plt.title("G: scattered rotation"); plt.figure()
#plt.hist(degrees(q_el), bins=50); plt.title("G: scattered elevation"); plt.figure()
#plt.hist(degrees(q_theta), bins=50); plt.title("G: Q theta"); plt.figure()
#plt.hist(q, bins=50, density=True); plt.title("G: Q"); plt.figure()
# plot resolution
qual = "for pixel %d,%d"%(p_i, p_j)
#plot_angles(q_theta, q_phi); plt.figure()
plot_q(q, q_phi, "Q %s"%qual, plot_phi=True)
#plot_q(q, q_phi, "Q %s"%qual, plot_phi=False)
#plot_q(np.log10(q), q_phi, "Q %s"%qual, plot_phi=False)
#plot_qperp(q, qperp, "Q %s"%qual)
plt.suptitle(pixel_config)
elif len(pixel_i) == 1 or len(pixel_j) == 1:
stats = np.array(stats)
plt.suptitle(config)
plt.subplot(221)
plt.plot(stats[:, 6], degrees(stats[:, 2]), '.')
plt.grid(True)
plt.xlabel(r'$Q (1/A)$')
plt.ylabel(r'$\Delta\theta (\degree)$')
plt.subplot(222)
plt.plot(stats[:, 6], degrees(stats[:, 5]), '.')
plt.grid(True)
plt.xlabel(r'$Q (1/A)$')
plt.ylabel(r'$\Delta\phi (\degree)$')
if Iq is not None:
q, dq, I, dI = stats[:, 7], stats[:, 8], stats[:, 12], stats[:, 13]
plt.subplot(223)
plt.plot(q, 100*dq/q, '.')
plt.grid(True)
plt.xlabel(r'$Q (1/A)$')
plt.ylabel(r'$\Delta Q/Q (\%)$')
plt.subplot(224)
plt.errorbar(q, I, dI, fmt='.')
plt.xscale('log')
plt.yscale('log')
if mode == 'sum':
pixel_r, pixel_q, pixel_Iq, pixel_dIq, pixel_dq = (
v.flatten() for v in (pixel_r, pixel_q, pixel_Iq, pixel_dIq, pixel_dq)
)
mask = pixel_r >= Rbeamstop
#plt.loglog(pixel_q[mask], pixel_Iq[mask], '.')
plt.loglog(pixel_q, pixel_Iq, '.')
np.savetxt("res_sum.dat", np.array([pixel_q, pixel_Iq, pixel_dIq, pixel_dq]).T)
if mode == 'scatter':
qp, Ip = pixel_q.flatten(), counts.flatten()
qp = counts_q.flatten()
mask = (pixel_r.flatten() >= Rbeamstop) & (qp > 0)
qp, Ip = qp[mask], Ip[mask]
plt.loglog(qp, Ip, '.')
coeff = np.polyfit(log(qp), log(Ip), 1)
plt.loglog(qp, exp(np.polyval(coeff, log(qp))), '-')
print("fit to line", coeff)
if False: # add fit to line slope (for power law and fractal)
coeff = np.polyfit(log(q[1:-1]), log(I[1:-1]), 1)
plt.loglog(q, exp(np.polyval(coeff, log(q))), '-')
print("fit to line", coeff)
plt.grid(True)
plt.xlabel(r'$Q (1/A)$')
plt.ylabel(r'$I (1/cm)$')
np.savetxt("res_Iq.dat", np.array([q, I, dI, dq]).T)
if 1:
plt.figure()
plt.plot(stats[:, 6], stats[:, 7], '.')
plt.xlabel("Q nominal")
plt.ylabel("Q mean")
else:
plt.subplot(223)
plt.plot(stats[:, 6], stats[:, 8], '.')
plt.grid(True)
plt.xlabel(r'$Q (1/A)$')
plt.ylabel(r'$\Delta Q_\parallel (1/A)$')
plt.subplot(224)
plt.plot(stats[:, 6], stats[:, 11], '.')
plt.grid(True)
plt.xlabel(r'$Q (1/A)$')
plt.ylabel(r'$\Delta Q_\perp (1/A)$')
else:
stats = np.array(stats)
plt.suptitle(config)
plt.subplot(131)
data, title = degrees(stats[:, 2]), r"$\Delta\theta$"
data = np.ma.array(data, mask=(stats[:, 2] == 0))
data = data.reshape(len(pixel_i), len(pixel_j))
#mask = (PI**2 + PJ**2 < phi_mask**2)
#data = np.ma.array(data, mask=mask)
#data, title = stats[:, 1]-stats[:, 0], r"$\theta - \hat\theta$"
#data = np.clip(stats[:, 1]-stats[:, 0], 0, 0.02)
plt.pcolormesh(pixel_i, pixel_j, data)
plt.grid(True)
plt.axis('equal')
plt.title(title)
plt.colorbar()
plt.subplot(132)
data, title = degrees(stats[:, 5]), r"$\Delta\phi$"
#data, title = stats[:, 4]-stats[:, 3], r"$\phi - \hat\phi$"
data = np.ma.array(data, mask=(stats[:, 5] == 0))
data = data.reshape(len(pixel_i), len(pixel_j))
#mask = (PI < phi_mask) & (abs(PJ) < phi_mask)
plt.pcolormesh(pixel_i, pixel_j, data)
plt.grid(True)
plt.axis('equal')
plt.title(title)
plt.colorbar()
plt.subplot(133)
#data, title = stats[:, 8], r"$\Delta q$"
data, title = stats[:, 8]/stats[:, 6], r"$\Delta q/q$"
data = np.ma.array(data, mask=(stats[:, 8] == 0))
data = data.reshape(len(pixel_i), len(pixel_j))
#mask = (PI**2+PJ**2 < phi_mask**2)
#data = np.ma.array(data, mask=mask)
#data, title = stats[:, 7]-stats[:, 6], r"$q - \hat q$"
#data = np.clip(stats[:, 7]-stats[:, 6], 0, 0.0005)
plt.pcolormesh(pixel_i, pixel_j, data)
plt.grid(True)
plt.axis('equal')
plt.title(title)
plt.colorbar()
def sphere(L, theta, phi, radius, contrast):
q = 4*pi*sin(phi)/L
qr = q*radius
bes = 1.0*np.ones_like(qr)
idx = qr != 0
qr = qr[idx]
sn, cn = sin(qr), cos(qr)
bes[idx] = 3.0*(sn-qr*cn)/(qr*qr*qr)
fq = (bes * contrast * 4/3*pi*radius**3)
Iq = 1e-4*fq**2
return Iq
def sphere_form(q, r):
qr = q*r
sn, cn = sin(qr), cos(qr)
F = 3.0*(sn-qr*cn)/(qr*qr*qr)
return F**2
def fractal(q, fractal_dim, radius, cor_length):
from scipy.special import gamma
D = fractal_dim
# Note: for large D-1, sin((D-1)*arctan(q*cor_length) can go negative
t1 = gamma(D+1.)/(D-1)*sin((D-1)*arctan(q*cor_length))
t2 = (q*radius) ** (-D)
t3 = (1.0 + (q*cor_length)**-2) ** (-0.5*(D-1))
term = t1 * t2 * t3
return 1.0 + term
if __name__ == "__main__":
# ==== select Q range
fields = ("source_distance", "detector_distance",
"source_aperture", "sample_aperture",
"beamstop",
"wavelength", "wavelength_resolution")
values = (
#16270, 13170, 28.6, 25.4, 50.8, 13, 0.109 # 13m @ 13A max resolution
#16270, 13170, 28.6, 25.4, 50.8, 13, 0.25 # 13m @ 13A 25% dL/L
16270, 13170, 50.0, 25.4, 50.8, 13, 0.25 # 13m @ 13A 25% dL/L divergent
#15727, 14547, 76.0, 25.4, 50.8, 6, 0.124 # 14.5m @ 6A low Q
#6959, 4000, 50.8, 9.5, 50.8, 6, 0.145 # 4m @ 6A on NG7
#13125, 13000, 50.8, 49.5, 101.6, 6, 0.14 # 13m @ 6A on NG7
#10070, 4050, 100.0, 25.4, 50.8, 8, 0.125 # 4m @ 8A
#10070, 4050, 100.0, 50.9, 87.5, 8, 0.125 # 4m @ 8A; very bad res
#3870, 1380, 100.0, 25.4, 50.8, 6, 0.236 # 1.3m @ 6A max flux
#3870, 1380, 100.0, 50.9, 50.8, 6, 0.236 # 1.3m @ 6A max flux; very bad res
#3914, 566.7, 64.22, 50, 50.8, 6, 0.124 # sasview dataloader test jan08002.abs
#3914, 1000, 64.22, 50, 0, 6, 0.124 # sasview dataloader test jan08002.abs
)
# Parameters from NCNR VAX format files
# resolution.ap12dis*1000, det.dis*1000
# resolution.ap1, resolution.ap2
# det.bstop
# resolution.lmda, resolution.dlmda
geom = dict(zip(fields, values))
#geom['Iq'] = lambda q: 1e-7*q**-4 # Power law
geom['Iq'] = lambda q, r=80: sphere_form(q, r)
#geom['Iq'] = lambda q, r=8, D=2.5, xi=200.: sphere_form(q, r) * fractal(q, D, r, xi)
# ==== remove gravity
#geom["aligned_wavelength"] = geom["wavelength"] = 0.001
# ==== select precision
N = 10000000 # high precision
#N = 1000000 # mid precision
#N = 100000 # low precision
# ==== select detector portion
if 0:
# various detector regions
#i = j = np.arange(-63.5, 64) # full detector SLOW!!!
i = j = np.arange(-63.5, 64, 8) # down sampled
#i, j = np.arange(6, 64), [0] # horizontal line
#i, j = [0], np.arange(3.5, 64) # vertical line
#i, j = [6], [6] # low Q point
#i, j = [45], [45] # high Q point
plt.figure(); pinhole(i, j, N=N,
#pixel_width=0.5, pixel_height=0.5,
**geom)
else:
# variety of single point distributions
#geom['beamstop'] = 0.
# first pixel after the beam stop, assuming 5 mm per pixel
p_min = (geom['beamstop']+10)//10
plt.figure(); pinhole([p_min], [0], N=N, **geom)
#plt.figure(); pinhole([0], [0], N=N, **geom)
#plt.figure(); pinhole([1], [0], N=N, **geom)
#plt.figure(); pinhole([2], [0], N=N, **geom)
#plt.figure(); pinhole([3], [0], N=N, **geom)
#plt.figure(); pinhole([4], [0], N=N, **geom)
#plt.figure(); pinhole([6], [0], N=N, **geom)
#plt.figure(); pinhole([9], [0], N=N, **geom)
#plt.figure(); pinhole([10], [0], N=N, **geom)
#plt.figure(); pinhole([20], [0], N=N, **geom)
#plt.figure(); pinhole([40], [0], N=N, **geom)
#plt.figure(); pinhole([60], [0], N=N, **geom)
#plt.figure(); pinhole([0], [p_min], N=N, **geom)
#plt.figure(); pinhole([0], [20], N=N, **geom)
#plt.figure(); pinhole([0], [60], N=N, **geom)
#plt.figure(); pinhole([0], [-60], N=N, **geom)
plt.show()
| |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras text category_encoding preprocessing layer."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.keras import backend
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers.preprocessing import category_encoding
from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class CategoryEncodingInputTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
def test_dense_input_sparse_output(self):
input_array = constant_op.constant([[1, 2, 3], [3, 3, 0]])
# The expected output should be (X for missing value):
# [[X, 1, 1, 1, X, X]
# [1, X, X, 2, X, X]]
expected_indices = [[0, 1], [0, 2], [0, 3], [1, 0], [1, 3]]
expected_values = [1, 1, 1, 1, 2]
num_tokens = 6
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
sp_output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(expected_values, sp_output_dataset.values)
self.assertAllEqual(expected_indices, sp_output_dataset.indices)
# Assert sparse output is same as dense output.
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens,
output_mode=category_encoding.COUNT,
sparse=False)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(sp_output_dataset, default_value=0),
output_dataset)
def test_sparse_input(self):
input_array = np.array([[1, 2, 3, 0], [0, 3, 1, 0]], dtype=np.int64)
sparse_tensor_data = sparse_ops.from_dense(input_array)
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0, 0],
[0, 1, 0, 1, 0, 0]]
# pyformat: enable
num_tokens = 6
expected_output_shape = [None, num_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(sparse_tensor_data, steps=1)
self.assertAllEqual(expected_output, output_dataset)
def test_sparse_input_with_weights(self):
input_array = np.array([[1, 2, 3, 4], [4, 3, 1, 4]], dtype=np.int64)
weights_array = np.array([[.1, .2, .3, .4], [.2, .1, .4, .3]])
sparse_tensor_data = sparse_ops.from_dense(input_array)
sparse_weight_data = sparse_ops.from_dense(weights_array)
# pyformat: disable
expected_output = [[0, .1, .2, .3, .4, 0],
[0, .4, 0, .1, .5, 0]]
# pyformat: enable
num_tokens = 6
expected_output_shape = [None, num_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)
weight_data = keras.Input(shape=(None,), dtype=dtypes.float32, sparse=True)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.COUNT)
int_data = layer(input_data, count_weights=weight_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=[input_data, weight_data], outputs=int_data)
output_dataset = model.predict([sparse_tensor_data, sparse_weight_data],
steps=1)
self.assertAllClose(expected_output, output_dataset)
def test_sparse_input_sparse_output(self):
sp_inp = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1], [2, 0], [2, 1], [3, 1]],
values=[0, 2, 1, 1, 0],
dense_shape=[4, 2])
input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)
# The expected output should be (X for missing value):
# [[1, X, X, X]
# [X, X, 1, X]
# [X, 2, X, X]
# [1, X, X, X]]
expected_indices = [[0, 0], [1, 2], [2, 1], [3, 0]]
expected_values = [1, 1, 2, 1]
num_tokens = 6
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
sp_output_dataset = model.predict(sp_inp, steps=1)
self.assertAllEqual(expected_values, sp_output_dataset.values)
self.assertAllEqual(expected_indices, sp_output_dataset.indices)
# Assert sparse output is same as dense output.
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens,
output_mode=category_encoding.COUNT,
sparse=False)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(sp_inp, steps=1)
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(sp_output_dataset, default_value=0),
output_dataset)
def test_sparse_input_sparse_output_with_weights(self):
indices = [[0, 0], [1, 1], [2, 0], [2, 1], [3, 1]]
sp_inp = sparse_tensor.SparseTensor(
indices=indices, values=[0, 2, 1, 1, 0], dense_shape=[4, 2])
input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)
sp_weight = sparse_tensor.SparseTensor(
indices=indices, values=[.1, .2, .4, .3, .2], dense_shape=[4, 2])
weight_data = keras.Input(shape=(None,), dtype=dtypes.float32, sparse=True)
# The expected output should be (X for missing value):
# [[1, X, X, X]
# [X, X, 1, X]
# [X, 2, X, X]
# [1, X, X, X]]
expected_indices = [[0, 0], [1, 2], [2, 1], [3, 0]]
expected_values = [.1, .2, .7, .2]
num_tokens = 6
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)
int_data = layer(input_data, count_weights=weight_data)
model = keras.Model(inputs=[input_data, weight_data], outputs=int_data)
sp_output_dataset = model.predict([sp_inp, sp_weight], steps=1)
self.assertAllClose(expected_values, sp_output_dataset.values)
self.assertAllEqual(expected_indices, sp_output_dataset.indices)
def test_ragged_input(self):
input_array = ragged_factory_ops.constant([[1, 2, 3], [3, 1]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0, 0],
[0, 1, 0, 1, 0, 0]]
# pyformat: enable
num_tokens = 6
expected_output_shape = [None, num_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int32, ragged=True)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(expected_output, output_dataset)
def test_ragged_input_sparse_output(self):
input_array = ragged_factory_ops.constant([[1, 2, 3], [3, 3]])
# The expected output should be (X for missing value):
# [[X, 1, 1, 1]
# [X, X, X, 2]]
expected_indices = [[0, 1], [0, 2], [0, 3], [1, 3]]
expected_values = [1, 1, 1, 2]
num_tokens = 6
input_data = keras.Input(shape=(None,), dtype=dtypes.int32, ragged=True)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
sp_output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(expected_values, sp_output_dataset.values)
self.assertAllEqual(expected_indices, sp_output_dataset.indices)
# Assert sparse output is same as dense output.
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens,
output_mode=category_encoding.COUNT,
sparse=False)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(sp_output_dataset, default_value=0),
output_dataset)
def test_sparse_output_and_dense_layer(self):
input_array = constant_op.constant([[1, 2, 3], [3, 3, 0]])
num_tokens = 4
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
encoding_layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.COUNT, sparse=True)
int_data = encoding_layer(input_data)
dense_layer = keras.layers.Dense(units=1)
output_data = dense_layer(int_data)
model = keras.Model(inputs=input_data, outputs=output_data)
_ = model.predict(input_array, steps=1)
def test_dense_oov_input(self):
valid_array = constant_op.constant([[0, 1, 2], [0, 1, 2]])
invalid_array = constant_op.constant([[0, 1, 2], [2, 3, 1]])
num_tokens = 3
expected_output_shape = [None, num_tokens]
encoder_layer = category_encoding.CategoryEncoding(num_tokens)
input_data = keras.Input(shape=(3,), dtype=dtypes.int32)
int_data = encoder_layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
# Call predict once on valid input to compile a graph and test control flow.
_ = model.predict(valid_array, steps=1)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
".*must be in the range 0 <= values < num_tokens.*"):
_ = model.predict(invalid_array, steps=1)
def test_dense_negative(self):
valid_array = constant_op.constant([[0, 1, 2], [0, 1, 2]])
invalid_array = constant_op.constant([[1, 2, 0], [2, 2, -1]])
num_tokens = 3
expected_output_shape = [None, num_tokens]
encoder_layer = category_encoding.CategoryEncoding(num_tokens)
input_data = keras.Input(shape=(3,), dtype=dtypes.int32)
int_data = encoder_layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
# Call predict once on valid input to compile a graph and test control flow.
_ = model.predict(valid_array, steps=1)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
".*must be in the range 0 <= values < num_tokens.*"):
_ = model.predict(invalid_array, steps=1)
def test_legacy_max_tokens_arg(self):
input_array = np.array([[1, 2, 3, 1]])
expected_output = [[0, 1, 1, 1, 0, 0]]
num_tokens = 6
expected_output_shape = [None, num_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = category_encoding.CategoryEncoding(
max_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@keras_parameterized.run_all_keras_modes
class CategoryEncodingOutputTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
def test_one_hot_output(self):
input_data = np.array([[3], [2], [0], [1]])
expected_output = [
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
]
num_tokens = 4
expected_output_shape = [None, num_tokens]
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.ONE_HOT)
inputs = keras.Input(shape=(1,), dtype=dtypes.int32)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_dataset = model(input_data)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
self.assertAllEqual(expected_output, output_dataset)
def test_one_hot_output_rank_one_input(self):
input_data = np.array([3, 2, 0, 1])
expected_output = [
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
]
num_tokens = 4
expected_output_shape = [None, num_tokens]
# Test call on layer directly.
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.ONE_HOT)
output_data = layer(input_data)
self.assertAllEqual(expected_output, output_data)
# Test call on model.
inputs = keras.Input(shape=(1,), dtype=dtypes.int32)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model(input_data)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
self.assertAllEqual(expected_output, output_data)
def test_one_hot_output_rank_zero_input(self):
input_data = np.array(3)
expected_output = [0, 0, 0, 1]
num_tokens = 4
expected_output_shape = [None, num_tokens]
# Test call on layer directly.
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.ONE_HOT)
output_data = layer(input_data)
self.assertAllEqual(expected_output, output_data)
# Test call on model.
inputs = keras.Input(shape=(1,), dtype=dtypes.int32)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model(input_data)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
self.assertAllEqual(expected_output, output_data)
def test_one_hot_rank_3_output_fails(self):
layer = category_encoding.CategoryEncoding(
num_tokens=4, output_mode=category_encoding.ONE_HOT)
with self.assertRaisesRegex(ValueError, "only outputs up to rank 2"):
_ = layer(keras.Input(shape=(4,), dtype=dtypes.int32))
with self.assertRaisesRegex(ValueError, "only outputs up to rank 2"):
_ = layer(np.array([[3, 2, 0, 1], [3, 2, 0, 1]]))
def test_multi_hot_output(self):
input_data = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
expected_output = [
[0, 1, 1, 1, 0, 0],
[1, 1, 0, 1, 0, 0],
]
num_tokens = 6
expected_output_shape = [None, num_tokens]
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)
inputs = keras.Input(shape=(None,), dtype=dtypes.int32)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model.predict(input_data)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
self.assertAllEqual(expected_output, output_data)
def test_multi_hot_output_rank_one_input(self):
input_data = np.array([3, 2, 0, 1])
expected_output = [1, 1, 1, 1, 0, 0]
num_tokens = 6
expected_output_shape = [None, num_tokens]
# Test call on layer directly.
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)
output_data = layer(input_data)
self.assertAllEqual(expected_output, output_data)
# Test call on model.
inputs = keras.Input(shape=(4,), dtype=dtypes.int32)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model(input_data)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
self.assertAllEqual(expected_output, output_data)
def test_multi_hot_output_rank_zero_input(self):
input_data = np.array(3)
expected_output = [0, 0, 0, 1, 0, 0]
num_tokens = 6
expected_output_shape = [None, num_tokens]
# Test call on layer directly.
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=category_encoding.MULTI_HOT)
output_data = layer(input_data)
self.assertAllEqual(expected_output, output_data)
# Test call on model.
inputs = keras.Input(shape=(4,), dtype=dtypes.int32)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model(input_data)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
self.assertAllEqual(expected_output, output_data)
def test_multi_hot_rank_3_output_fails(self):
layer = category_encoding.CategoryEncoding(
num_tokens=4, output_mode=category_encoding.ONE_HOT)
with self.assertRaisesRegex(ValueError, "only outputs up to rank 2"):
_ = layer(keras.Input(shape=(3, 4,), dtype=dtypes.int32))
with self.assertRaisesRegex(ValueError, "only outputs up to rank 2"):
_ = layer(np.array([[[3, 2, 0, 1], [3, 2, 0, 1]]]))
def test_count_output(self):
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
# pyformat: disable
expected_output = [[0, 2, 1, 1, 0, 0],
[2, 1, 0, 1, 0, 0]]
# pyformat: enable
num_tokens = 6
expected_output_shape = [None, num_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = category_encoding.CategoryEncoding(
num_tokens=6, output_mode=category_encoding.COUNT)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
class CategoryEncodingModelBuildingTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
@parameterized.named_parameters(
{
"testcase_name": "count_output",
"num_tokens": 5,
"output_mode": category_encoding.COUNT
}, {
"testcase_name": "multi_hot_output",
"num_tokens": 5,
"output_mode": category_encoding.MULTI_HOT
})
def test_end_to_end_bagged_modeling(self, output_mode, num_tokens):
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = category_encoding.CategoryEncoding(
num_tokens=num_tokens, output_mode=output_mode)
weights = []
if num_tokens is None:
layer.set_num_elements(5)
layer.set_weights(weights)
int_data = layer(input_data)
float_data = backend.cast(int_data, dtype="float32")
output_data = core.Dense(64)(float_data)
model = keras.Model(inputs=input_data, outputs=output_data)
_ = model.predict(input_array)
if __name__ == "__main__":
test.main()
| |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from wagtail.wagtailcore.models import Page
from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailsnippets.models import register_snippet
from wagtail.wagtailsearch import index
from taggit.models import TaggedItemBase, Tag as TaggitTag
from modelcluster.fields import ParentalKey
from .abstracts import EntryAbstract
from .utils import import_model
from .routes import BlogRoutes
from .managers import TagManager, CategoryManager
Entry = import_model(getattr(settings, 'PUPUT_ENTRY_MODEL', EntryAbstract))
class BlogPage(BlogRoutes, Page):
description = models.CharField(verbose_name=_('Description'), max_length=255, blank=True,
help_text=_("The blog description that will appear under the title."))
header_image = models.ForeignKey('wagtailimages.Image', verbose_name=_('Header image'), null=True, blank=True,
on_delete=models.SET_NULL, related_name='+')
display_comments = models.BooleanField(default=False, verbose_name=_('Display comments'))
display_categories = models.BooleanField(default=True, verbose_name=_('Display categories'))
display_tags = models.BooleanField(default=True, verbose_name=_('Display tags'))
display_popular_entries = models.BooleanField(default=True, verbose_name=_('Display popular entries'))
display_last_entries = models.BooleanField(default=True, verbose_name=_('Display last entries'))
display_archive = models.BooleanField(default=True, verbose_name=_('Display archive'))
disqus_api_secret = models.TextField(blank=True)
disqus_shortname = models.CharField(max_length=128, blank=True)
num_entries_page = models.IntegerField(default=5, verbose_name=_('Entries per page'))
num_last_entries = models.IntegerField(default=3, verbose_name=_('Last entries limit'))
num_popular_entries = models.IntegerField(default=3, verbose_name=_('Popular entries limit'))
num_tags_entry_header = models.IntegerField(default=5, verbose_name=_('Tags limit entry header'))
content_panels = Page.content_panels + [
FieldPanel('description', classname="full"),
ImageChooserPanel('header_image'),
]
settings_panels = Page.settings_panels + [
MultiFieldPanel([
FieldPanel('display_categories'),
FieldPanel('display_tags'),
FieldPanel('display_popular_entries'),
FieldPanel('display_last_entries'),
FieldPanel('display_archive'),
], heading=_("Widgets")),
MultiFieldPanel([
FieldPanel('num_entries_page'),
FieldPanel('num_last_entries'),
FieldPanel('num_popular_entries'),
FieldPanel('num_tags_entry_header'),
], heading=_("Parameters")),
MultiFieldPanel([
FieldPanel('display_comments'),
FieldPanel('disqus_api_secret'),
FieldPanel('disqus_shortname'),
], heading=_("Comments")),
]
subpage_types = ['puput.EntryPage']
def get_entries(self):
return EntryPage.objects.descendant_of(self).live().order_by('-date').select_related('owner__username')
def get_context(self, request, *args, **kwargs):
context = super(BlogPage, self).get_context(request, *args, **kwargs)
context['entries'] = self.entries
context['blog_page'] = self
context['search_type'] = getattr(self, 'search_type', "")
context['search_term'] = getattr(self, 'search_term', "")
return context
class Meta:
verbose_name = _('Blog')
@register_snippet
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=80, unique=True, verbose_name=_('Category name'))
slug = models.SlugField(unique=True, max_length=80)
parent = models.ForeignKey('self', blank=True, null=True, related_name="children",
verbose_name=_('Parent category'))
description = models.CharField(max_length=500, blank=True, verbose_name=_('Description'))
objects = CategoryManager()
panels = [
FieldPanel('name'),
FieldPanel('parent'),
FieldPanel('description'),
]
def __str__(self):
return self.name
def clean(self):
if self.parent:
parent = self.parent
if self.parent == self:
raise ValidationError(_('Parent category cannot be self.'))
if parent.parent and parent.parent == self:
raise ValidationError(_('Cannot have circular Parents.'))
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
return super(Category, self).save(*args, **kwargs)
class Meta:
ordering = ['name']
verbose_name = _("Category")
verbose_name_plural = _("Categories")
class CategoryEntryPage(models.Model):
category = models.ForeignKey(Category, related_name="+", verbose_name=_('Category'))
page = ParentalKey('EntryPage', related_name='entry_categories')
panels = [
FieldPanel('category')
]
class TagEntryPage(TaggedItemBase):
content_object = ParentalKey('EntryPage', related_name='entry_tags')
@register_snippet
class Tag(TaggitTag):
objects = TagManager()
class Meta:
proxy = True
class EntryPageRelated(models.Model):
entrypage_from = ParentalKey('EntryPage', verbose_name=_("Entry"), related_name='related_entrypage_from')
entrypage_to = ParentalKey('EntryPage', verbose_name=_("Entry"), related_name='related_entrypage_to')
class EntryPage(Page, Entry):
# Search
search_fields = Page.search_fields + (
index.SearchField('body'),
index.SearchField('excerpt'),
index.FilterField('page_ptr_id')
)
# Panels
content_panels = [
MultiFieldPanel([
FieldPanel('title', classname="title"),
ImageChooserPanel('header_image'),
FieldPanel('body', classname="full"),
FieldPanel('excerpt', classname="full"),
], heading=_("Content")),
MultiFieldPanel([
FieldPanel('tags'),
InlinePanel('entry_categories', label=_("Categories")),
InlinePanel('related_entrypage_from', label=_("Related Entries")),
], heading=_("Metadata")),
] + getattr(Entry, 'content_panels', [])
promote_panels = Page.promote_panels + getattr(Entry, 'promote_panels', [])
settings_panels = Page.settings_panels + [
FieldPanel('date'),
FieldPanel('owner'),
] + getattr(Entry, 'settings_panels', [])
# Parent and child settings
parent_page_types = ['puput.BlogPage']
subpage_types = []
@property
def blog_page(self):
return BlogPage.objects.ancestor_of(self).first()
@property
def related(self):
return [related.entrypage_to for related in self.related_entrypage_from.all()]
@property
def has_related(self):
return self.related_entrypage_from.count() > 0
def get_context(self, request, *args, **kwargs):
context = super(EntryPage, self).get_context(request, *args, **kwargs)
context['blog_page'] = self.blog_page
return context
class Meta:
verbose_name = _('Entry')
verbose_name_plural = _('Entries')
EntryPage._meta.get_field('owner').editable = True
| |
# Copyright (c) 2011 Sam Rushing
#
# key.py - OpenSSL wrapper
#
# This file is modified from python-bitcoinlib.
#
"""ECC secp256k1 crypto routines
WARNING: This module does not mlock() secrets; your private keys may end up on
disk in swap! Use with caution!
"""
import ctypes
import ctypes.util
import hashlib
import sys
ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('ssl') or 'libeay32')
ssl.BN_new.restype = ctypes.c_void_p
ssl.BN_new.argtypes = []
ssl.BN_bin2bn.restype = ctypes.c_void_p
ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p]
ssl.BN_CTX_free.restype = None
ssl.BN_CTX_free.argtypes = [ctypes.c_void_p]
ssl.BN_CTX_new.restype = ctypes.c_void_p
ssl.BN_CTX_new.argtypes = []
ssl.ECDH_compute_key.restype = ctypes.c_int
ssl.ECDH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_sign.restype = ctypes.c_int
ssl.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_verify.restype = ctypes.c_int
ssl.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
ssl.EC_KEY_free.restype = None
ssl.EC_KEY_free.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
ssl.EC_KEY_get0_group.restype = ctypes.c_void_p
ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p
ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_set_private_key.restype = ctypes.c_int
ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_KEY_set_conv_form.restype = None
ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int]
ssl.EC_KEY_set_public_key.restype = ctypes.c_int
ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.i2o_ECPublicKey.restype = ctypes.c_void_p
ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_POINT_new.restype = ctypes.c_void_p
ssl.EC_POINT_new.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_free.restype = None
ssl.EC_POINT_free.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_mul.restype = ctypes.c_int
ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
# this specifies the curve used with ECDSA.
NID_secp256k1 = 714 # from openssl/obj_mac.h
# Thx to Sam Devlin for the ctypes magic 64-bit fix.
def _check_result(val, func, args):
if val == 0:
raise ValueError
else:
return ctypes.c_void_p (val)
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.errcheck = _check_result
class CECKey(object):
"""Wrapper around OpenSSL's EC_KEY"""
POINT_CONVERSION_COMPRESSED = 2
POINT_CONVERSION_UNCOMPRESSED = 4
def __init__(self):
self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1)
def __del__(self):
if ssl:
ssl.EC_KEY_free(self.k)
self.k = None
def set_secretbytes(self, secret):
priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new())
group = ssl.EC_KEY_get0_group(self.k)
pub_key = ssl.EC_POINT_new(group)
ctx = ssl.BN_CTX_new()
if not ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx):
raise ValueError("Could not derive public key from the supplied secret.")
ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx)
ssl.EC_KEY_set_private_key(self.k, priv_key)
ssl.EC_KEY_set_public_key(self.k, pub_key)
ssl.EC_POINT_free(pub_key)
ssl.BN_CTX_free(ctx)
return self.k
def set_privkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def set_pubkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def get_privkey(self):
size = ssl.i2d_ECPrivateKey(self.k, 0)
mb_pri = ctypes.create_string_buffer(size)
ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri)))
return mb_pri.raw
def get_pubkey(self):
size = ssl.i2o_ECPublicKey(self.k, 0)
mb = ctypes.create_string_buffer(size)
ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb)))
return mb.raw
def get_raw_ecdh_key(self, other_pubkey):
ecdh_keybuffer = ctypes.create_string_buffer(32)
r = ssl.ECDH_compute_key(ctypes.pointer(ecdh_keybuffer), 32,
ssl.EC_KEY_get0_public_key(other_pubkey.k),
self.k, 0)
if r != 32:
raise Exception('CKey.get_ecdh_key(): ECDH_compute_key() failed')
return ecdh_keybuffer.raw
def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()):
# FIXME: be warned it's not clear what the kdf should be as a default
r = self.get_raw_ecdh_key(other_pubkey)
return kdf(r)
def sign(self, hash):
# FIXME: need unit tests for below cases
if not isinstance(hash, bytes):
raise TypeError('Hash must be bytes instance; got %r' % hash.__class__)
if len(hash) != 32:
raise ValueError('Hash must be exactly 32 bytes long')
sig_size0 = ctypes.c_uint32()
sig_size0.value = ssl.ECDSA_size(self.k)
mb_sig = ctypes.create_string_buffer(sig_size0.value)
result = ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k)
assert 1 == result
return mb_sig.raw[:sig_size0.value]
def verify(self, hash, sig):
"""Verify a DER signature"""
return ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1
def set_compressed(self, compressed):
if compressed:
form = self.POINT_CONVERSION_COMPRESSED
else:
form = self.POINT_CONVERSION_UNCOMPRESSED
ssl.EC_KEY_set_conv_form(self.k, form)
class CPubKey(bytes):
"""An encapsulated public key
Attributes:
is_valid - Corresponds to CPubKey.IsValid()
is_fullyvalid - Corresponds to CPubKey.IsFullyValid()
is_compressed - Corresponds to CPubKey.IsCompressed()
"""
def __new__(cls, buf, _cec_key=None):
self = super(CPubKey, cls).__new__(cls, buf)
if _cec_key is None:
_cec_key = CECKey()
self._cec_key = _cec_key
self.is_fullyvalid = _cec_key.set_pubkey(self) != 0
return self
@property
def is_valid(self):
return len(self) > 0
@property
def is_compressed(self):
return len(self) == 33
def verify(self, hash, sig):
return self._cec_key.verify(hash, sig)
def __str__(self):
return repr(self)
def __repr__(self):
# Always have represent as b'<secret>' so test cases don't have to
# change for py2/3
if sys.version > '3':
return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
else:
return '%s(b%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Authenticator', fields ['user', 'type']
db.create_unique('auth_authenticator', ['user_id', 'type'])
def backwards(self, orm):
# Removing unique constraint on 'Authenticator', fields ['user', 'type']
db.delete_unique('auth_authenticator', ['user_id', 'type'])
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True'}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2016, 9, 29, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {'object_name': 'DSymBundle'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'sdk': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymSDK']"})
},
'sentry.dsymobject': {
'Meta': {'object_name': 'DSymObject'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_path': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'db_index': 'True'}),
'vmaddr': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'vmsize': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'})
},
'sentry.dsymsdk': {
'Meta': {'object_name': 'DSymSDK', 'index_together': "[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"},
'dsym_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'sdk_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'version_build': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {'unique_together': "[('object', 'address')]", 'object_name': 'DSymSymbol'},
'address': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'),)", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {'object_name': 'GlobalDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project', 'key', 'value'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('project_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
}
}
complete_apps = ['sentry']
| |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting online file editing."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import base64
import cgi
import os
import urllib
import messages
import yaml
import appengine_config
from common import schema_fields
from controllers.utils import ApplicationHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import courses
from models import roles
from models import transforms
from models import vfs
from modules.oeditor import oeditor
from google.appengine.api import users
# Set of string. The relative, normalized path bases we allow uploading of
# binary data into.
ALLOWED_ASSET_BINARY_BASES = frozenset([
'assets/img',
])
# Set of string. The relative, normalized path bases we allow uploading of text
# data into.
ALLOWED_ASSET_TEXT_BASES = frozenset([
'assets/css',
'assets/lib',
'views'
])
# Set of string. The relative, normalized path bases we allow uploading into.
ALLOWED_ASSET_UPLOAD_BASES = ALLOWED_ASSET_BINARY_BASES.union(
ALLOWED_ASSET_TEXT_BASES)
MAX_ASSET_UPLOAD_SIZE_K = 500
def is_editable_fs(app_context):
return app_context.fs.impl.__class__ == vfs.DatastoreBackedFileSystem
def is_text_payload(payload):
try:
transforms.dumps(payload)
return True
except: # All errors are equivalently bad. pylint: disable=bare-except
return False
def is_readonly_asset(asset):
return not getattr(asset, 'metadata', None)
def strip_leading_and_trailing_slashes(path_base):
"""Given a path base string of the form '/foo/bar/', return 'foo/bar'."""
return path_base.lstrip('/').rstrip('/')
class FilesRights(object):
"""Manages view/edit rights for files."""
@classmethod
def can_view(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class FileManagerAndEditor(ApplicationHandler):
"""An editor for editing and managing files."""
local_fs = vfs.LocalReadOnlyFileSystem(logical_home_folder='/')
def _get_delete_url(self, base_url, key, xsrf_token_name):
return '%s?%s' % (
self.canonicalize_url(base_url),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(
self.create_xsrf_token(xsrf_token_name)),
}))
def _get_normalized_base(self):
"""Gets base arg from URL and normalizes it for membership checks."""
base = self.request.get('base')
assert base
base = strip_leading_and_trailing_slashes(base)
assert base in ALLOWED_ASSET_UPLOAD_BASES
return base
def post_create_or_edit_settings(self):
"""Handles creation or/and editing of course.yaml."""
assert is_editable_fs(self.app_context)
# Check if course.yaml exists; create if not.
fs = self.app_context.fs.impl
course_yaml = fs.physical_to_logical('/course.yaml')
if not fs.isfile(course_yaml):
fs.put(course_yaml, vfs.string_to_stream(
courses.EMPTY_COURSE_YAML % users.get_current_user().email()))
self.redirect(self.get_action_url('edit_settings', key='/course.yaml'))
def get_edit_settings(self):
"""Shows editor for course.yaml."""
key = self.request.get('key')
exit_url = self.canonicalize_url('/dashboard?action=settings')
rest_url = self.canonicalize_url('/rest/files/item')
form_html = oeditor.ObjectEditor.get_html_for(
self,
FilesItemRESTHandler.SCHEMA_JSON,
FilesItemRESTHandler.SCHEMA_ANNOTATIONS_DICT,
key, rest_url, exit_url,
required_modules=FilesItemRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Edit Settings')
template_values['page_description'] = messages.EDIT_SETTINGS_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
def get_add_asset(self):
"""Show an upload dialog for assets."""
key = self._get_normalized_base()
exit_url = self.canonicalize_url('/dashboard?action=assets')
rest_url = self.canonicalize_url(
AssetItemRESTHandler.URI)
form_html = oeditor.ObjectEditor.get_html_for(
self,
AssetItemRESTHandler.SCHEMA_JSON,
AssetItemRESTHandler.SCHEMA_ANNOTATIONS_DICT,
key, rest_url, exit_url, save_method='upload', auto_return=True,
required_modules=AssetItemRESTHandler.REQUIRED_MODULES,
save_button_caption='Upload')
template_values = {}
template_values['page_title'] = self.format_title('Upload Asset')
template_values['page_description'] = messages.UPLOAD_ASSET_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
def get_delete_asset(self):
"""Show an review/delete page for assets."""
uri = self.request.get('uri')
exit_url = self.canonicalize_url('/dashboard?action=assets')
rest_url = self.canonicalize_url(
AssetUriRESTHandler.URI)
delete_url = self._get_delete_url(
FilesItemRESTHandler.URI, uri, 'delete-asset')
form_html = oeditor.ObjectEditor.get_html_for(
self,
AssetUriRESTHandler.SCHEMA_JSON,
AssetUriRESTHandler.SCHEMA_ANNOTATIONS_DICT,
uri, rest_url, exit_url, save_method='',
delete_url=delete_url, delete_method='delete')
template_values = {}
template_values['page_title'] = self.format_title('View Asset')
template_values['main_content'] = form_html
self.render_page(template_values)
def get_manage_text_asset(self):
"""Show an edit/save/delete/revert form for a text asset."""
assert is_editable_fs(self.app_context)
uri = self.request.get('uri')
assert uri
asset = self.app_context.fs.impl.get(
os.path.join(appengine_config.BUNDLE_ROOT, uri))
assert asset
asset_in_datastore_fs = not is_readonly_asset(asset)
try:
asset_in_local_fs = bool(self.local_fs.get(uri))
except IOError:
asset_in_local_fs = False
exit_url = self.canonicalize_url('/dashboard?action=assets')
rest_url = self.canonicalize_url(TextAssetRESTHandler.URI)
delete_button_caption = 'Delete'
delete_message = None
delete_url = None
if asset_in_datastore_fs:
delete_message = 'Are you sure you want to delete %s?' % uri
delete_url = self._get_delete_url(
TextAssetRESTHandler.URI, uri,
TextAssetRESTHandler.XSRF_TOKEN_NAME)
if asset_in_local_fs:
delete_message = (
'Are you sure you want to restore %s to the original version? '
'All your customizations will be lost.' % uri)
delete_button_caption = 'Restore original'
# Disable the save button if the payload is not text by setting method
# to ''.
save_method = 'put' if is_text_payload(asset.read()) else ''
form_html = oeditor.ObjectEditor.get_html_for(
self,
TextAssetRESTHandler.SCHEMA.get_json_schema(),
TextAssetRESTHandler.SCHEMA.get_schema_dict(),
uri,
rest_url,
exit_url,
delete_button_caption=delete_button_caption,
delete_method='delete',
delete_message=delete_message,
delete_url=delete_url,
required_modules=TextAssetRESTHandler.REQUIRED_MODULES,
save_method=save_method,
)
self.render_page({
'page_title': self.format_title('Edit ' + uri),
'main_content': form_html,
})
class TextAssetRESTHandler(BaseRESTHandler):
"""REST endpoints for text assets."""
ERROR_MESSAGE_UNEDITABLE = (
'Error: contents are not text and cannot be edited.')
REQUIRED_MODULES = [
'inputex-hidden',
'inputex-textarea',
]
SCHEMA = schema_fields.FieldRegistry('Edit asset', description='Text Asset')
SCHEMA.add_property(schema_fields.SchemaField(
'contents', 'Contents', 'text',
))
SCHEMA.add_property(schema_fields.SchemaField(
'is_text', 'Is Text', 'boolean', hidden=True,
))
SCHEMA.add_property(schema_fields.SchemaField(
'readonly', 'ReadOnly', 'boolean', hidden=True,
))
URI = '/rest/assets/text'
XSRF_TOKEN_NAME = 'manage-text-asset'
def _check_asset_in_allowed_bases(self, filename):
assert os.path.dirname(filename) in ALLOWED_ASSET_UPLOAD_BASES
def delete(self):
"""Handles the delete verb."""
assert is_editable_fs(self.app_context)
filename = self.request.get('key')
if not (filename and self.assert_xsrf_token_or_fail(
self.request, self.XSRF_TOKEN_NAME, {'key': filename})):
return
if not FilesRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': filename})
return
self._check_asset_in_allowed_bases(filename)
self.app_context.fs.impl.delete(
os.path.join(appengine_config.BUNDLE_ROOT, filename))
transforms.send_json_response(self, 200, 'Done.')
def get(self):
"""Handles the get verb."""
assert FilesRights.can_edit(self)
filename = self.request.get('key')
assert filename
asset = self.app_context.fs.impl.get(
os.path.join(appengine_config.BUNDLE_ROOT, filename))
assert asset
contents = asset.read()
is_text = is_text_payload(contents)
if not is_text:
contents = self.ERROR_MESSAGE_UNEDITABLE
json_message = 'Success.' if is_text else self.ERROR_MESSAGE_UNEDITABLE
json_payload = {
'contents': contents,
'is_text': is_text,
'readonly': is_readonly_asset(asset),
}
transforms.send_json_response(
self, 200, json_message, payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(self.XSRF_TOKEN_NAME))
def put(self):
"""Handles the put verb."""
assert is_editable_fs(self.app_context)
request = self.request.get('request')
assert request
request = transforms.loads(request)
payload = transforms.loads(request.get('payload'))
filename = request.get('key')
if not (filename and self.assert_xsrf_token_or_fail(
request, self.XSRF_TOKEN_NAME, {'key': filename})):
return
if not FilesRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': filename})
return
self._check_asset_in_allowed_bases(filename)
self.app_context.fs.impl.put(
os.path.join(appengine_config.BUNDLE_ROOT, filename),
vfs.string_to_stream(unicode(payload.get('contents'))))
transforms.send_json_response(self, 200, 'Saved.')
class FilesItemRESTHandler(BaseRESTHandler):
"""Provides REST API for a file."""
SCHEMA_JSON = """
{
"id": "Text File",
"type": "object",
"description": "Text File",
"properties": {
"key" : {"type": "string"},
"encoding" : {"type": "string"},
"content": {"type": "text"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Text File'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable'}),
(['properties', 'encoding', '_inputex'], {
'label': 'Encoding', '_type': 'uneditable'}),
(['properties', 'content', '_inputex'], {
'label': 'Content', '_type': 'text'})]
REQUIRED_MODULES = [
'inputex-string', 'inputex-textarea', 'inputex-select',
'inputex-uneditable']
URI = '/rest/files/item'
FILE_ENCODING_TEXT = 'text/utf-8'
FILE_ENCODING_BINARY = 'binary/base64'
FILE_EXTENTION_TEXT = ['.js', '.css', '.yaml', '.html', '.csv']
@classmethod
def is_text_file(cls, filename):
# TODO(psimakov): this needs to be better and not use linear search
for extention in cls.FILE_EXTENTION_TEXT:
if filename.endswith(extention):
return True
return False
@classmethod
def validate_content(cls, filename, content):
# TODO(psimakov): handle more file types here
if filename.endswith('.yaml'):
yaml.safe_load(content)
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
assert is_editable_fs(self.app_context)
key = self.request.get('key')
if not FilesRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
# Load data if possible.
fs = self.app_context.fs.impl
filename = fs.physical_to_logical(key)
try:
stream = fs.get(filename)
except: # pylint: disable=bare-except
stream = None
if not stream:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
# Prepare data.
entity = {'key': key}
if self.is_text_file(key):
entity['encoding'] = self.FILE_ENCODING_TEXT
entity['content'] = vfs.stream_to_string(stream)
else:
entity['encoding'] = self.FILE_ENCODING_BINARY
entity['content'] = base64.b64encode(stream.read())
# Render JSON response.
json_payload = transforms.dict_to_json(
entity,
FilesItemRESTHandler.SCHEMA_DICT)
transforms.send_json_response(
self, 200, 'Success.',
payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(
'file-put'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
assert is_editable_fs(self.app_context)
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'file-put', {'key': key}):
return
# TODO(psimakov): we don't allow editing of all files; restrict further
if not FilesRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
payload = request.get('payload')
entity = transforms.loads(payload)
encoding = entity['encoding']
content = entity['content']
# Validate the file content.
errors = []
try:
if encoding == self.FILE_ENCODING_TEXT:
content_stream = vfs.string_to_stream(content)
elif encoding == self.FILE_ENCODING_BINARY:
content_stream = base64.b64decode(content)
else:
errors.append('Unknown encoding: %s.' % encoding)
self.validate_content(key, content)
except Exception as e: # pylint: disable=W0703
errors.append('Validation error: %s' % e)
if errors:
transforms.send_json_response(self, 412, ''.join(errors))
return
# Store new file content.
fs = self.app_context.fs.impl
filename = fs.physical_to_logical(key)
fs.put(filename, content_stream)
# Send reply.
transforms.send_json_response(self, 200, 'Saved.')
def delete(self):
"""Handles REST DELETE verb."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, 'delete-asset', {'key': key}):
return
if not FilesRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
fs = self.app_context.fs.impl
path = fs.physical_to_logical(key)
if not fs.isfile(path):
transforms.send_json_response(
self, 403, 'File does not exist.', None)
return
fs.delete(path)
transforms.send_json_response(self, 200, 'Deleted.')
class AssetItemRESTHandler(BaseRESTHandler):
"""Provides REST API for managing assets."""
URI = '/rest/assets/item'
SCHEMA_JSON = """
{
"id": "Asset",
"type": "object",
"description": "Asset",
"properties": {
"base": {"type": "string"},
"file": {"type": "string", "optional": true}
}
}
"""
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Upload Asset'),
(['properties', 'base', '_inputex'], {
'label': 'Base', '_type': 'uneditable'}),
(['properties', 'file', '_inputex'], {
'label': 'File', '_type': 'file'})]
REQUIRED_MODULES = [
'inputex-string', 'inputex-uneditable', 'inputex-file',
'io-upload-iframe']
def _can_write_payload_to_base(self, payload, base):
"""Determine if a given payload type can be put in a base directory."""
# Binary data can go in images; text data can go anywhere else.
if base in ALLOWED_ASSET_BINARY_BASES:
return True
else:
return is_text_payload(payload) and base in ALLOWED_ASSET_TEXT_BASES
def get(self):
"""Provides empty initial content for asset upload editor."""
# TODO(jorr): Pass base URI through as request param when generalized.
base = self.request.get('key')
assert base in ALLOWED_ASSET_UPLOAD_BASES
json_payload = {'file': '', 'base': base}
transforms.send_json_response(
self, 200, 'Success.', payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token('asset-upload'))
def post(self):
"""Handles asset uploads."""
assert is_editable_fs(self.app_context)
if not FilesRights.can_add(self):
transforms.send_file_upload_response(
self, 401, 'Access denied.')
return
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(request, 'asset-upload', None):
return
payload = transforms.loads(request['payload'])
base = payload['base']
assert base in ALLOWED_ASSET_UPLOAD_BASES
upload = self.request.POST['file']
if not isinstance(upload, cgi.FieldStorage):
transforms.send_file_upload_response(
self, 403, 'No file specified.')
return
filename = os.path.split(upload.filename)[1]
assert filename
physical_path = os.path.join(base, filename)
fs = self.app_context.fs.impl
path = fs.physical_to_logical(physical_path)
if fs.isfile(path):
transforms.send_file_upload_response(
self, 403, 'Cannot overwrite existing file.')
return
content = upload.file.read()
if not self._can_write_payload_to_base(content, base):
transforms.send_file_upload_response(
self, 403, 'Cannot write binary data to %s.' % base)
return
upload.file.seek(0)
if len(content) > MAX_ASSET_UPLOAD_SIZE_K * 1024:
transforms.send_file_upload_response(
self, 403,
'Max allowed file upload size is %dK' % MAX_ASSET_UPLOAD_SIZE_K)
return
fs.put(path, upload.file)
transforms.send_file_upload_response(self, 200, 'Saved.')
class AssetUriRESTHandler(BaseRESTHandler):
"""Provides REST API for managing asserts by means of their URIs."""
# TODO(jorr): Refactor the asset management classes to have more meaningful
# REST URI's and class names
URI = '/rest/assets/uri'
SCHEMA_JSON = """
{
"id": "Asset",
"type": "object",
"description": "Asset",
"properties": {
"uri": {"type": "string"}
}
}
"""
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Image or Document'),
(['properties', 'uri', '_inputex'], {
'label': 'Asset',
'_type': 'uneditable',
'visu': {
'visuType': 'funcName',
'funcName': 'renderAsset'}})]
def get(self):
"""Handles REST GET verb and returns the uri of the asset."""
uri = self.request.get('key')
if not FilesRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': uri})
return
transforms.send_json_response(
self, 200, 'Success.',
payload_dict={'uri': uri},
xsrf_token=XsrfTokenManager.create_xsrf_token('asset-delete'))
| |
# Libraries
import sys
import os
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from sqlalchemy import *
from sqlalchemy.orm import *
# My Imports
from databaseschema import *
from genericdelegates import *
from functions import *
import modelsandviews
import ui_forms.ui_prodprepform
# if pack in volume are changed here ask if should be updated in database
localTITLE = 'Production Prep'
(PD_ID) = 0 #// brackets are only for looks here
(BATCH_ID, BASE) = (ITEM, QTY) = range(1, 3)
(DESC) = 3
(PERCENT, INFL) = (PACK, VOLUME) = range(4, 6)
(TOTAL) = WEIGHT = 6
### BaseAssembly Model =======================
class BaseAssemblyModel(QAbstractTableModel):
def __init__(self, session, parent=None):
super(BaseAssemblyModel, self).__init__(parent)
self.records = []
self.records.append(PrepAssembly())
self.session = session
def rowCount(self, index=QModelIndex()):
return len(self.records)
def columnCount(self, index=QModelIndex()):
return 7
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
if section == PD_ID:
return QVariant('PD ID')
elif section == BATCH_ID:
return QVariant('Batch ID')
elif section == BASE:
return QVariant('Base No')
elif section == DESC:
return QVariant('Description')
elif section == PERCENT:
return QVariant('Percent')
elif section == INFL:
return QVariant('Inflation')
elif section == WEIGHT:
return QVariant('Weight')
return QVariant(section + 1)
def flags(self, index):
flag = QAbstractTableModel.flags(self, index)
if index.column() not in (PD_ID, BATCH_ID, DESC, WEIGHT):
flag |=Qt.ItemIsEditable
return flag
def data(self, index, role=Qt.DisplayRole):
if not index.isValid() or not (0 <= index.row() < len(self.records)):
return QVariant()
record = self.records[index.row()]
column = index.column()
if role == Qt.DisplayRole:
if column == PD_ID:
return QVariant(record.pd_id)
elif column == BATCH_ID:
return QVariant(record.batch_id)
elif column == BASE:
base_no = dLookup(BaseHeader.base_no, BaseHeader.base_id == str(record.base_id))
return QVariant(base_no)
elif column == PERCENT:
if not record.percentage:
return QVariant(record.percentage)
return QVariant(round(float(getType(record.percentage)), 4))
elif column == DESC:
base_desc = dLookup(BaseHeader.base_desc, BaseHeader.base_id==record.base_id)
return QVariant(base_desc)
elif column == INFL:
if not record.inflation:
return QVariant(record.inflation)
return QVariant(round(float(getType(record.inflation)), 4))
elif column == WEIGHT:
if not record.weight:
return QVariant(record.weight)
return QVariant(round(float(getType(record.weight)), 4))
return QVariant()
def setData(self, index, value, role=Qt.EditRole):
if index.isValid() and role == Qt.EditRole:
record = self.records[index.row()]
column = index.column()
if column == PD_ID:
record.pd_id = value.toInt()[0]
elif column == BATCH_ID:
record.batch_id = value.toInt()[0]
elif column == BASE:
base_id = value.toInt()[0]
record.base_id = base_id
record.inflation = float(getType(dLookup(BaseHeader.inflation_factor, BaseHeader.base_id==base_id)))
elif column == PERCENT:
percent, ok = value.toFloat()
if ok:
percent = (percent / 100) if percent > 1 else percent
record.percentage = percent
elif column == INFL:
record.inflation = value.toFloat()[0]
elif column == WEIGHT:
record.weight = value.toFloat()[0]
self.emit(SIGNAL("dataChanged(QModelIndex, QModelIndex)"), index, index)
return True
return False
def insertRows(self, position, rows=1, index=QModelIndex(), item_id=None):
self.beginInsertRows(QModelIndex(), position, position + rows - 1)
for row in range(rows):
self.records.insert(position + row + 1, PrepAssembly(item_id))
self.endInsertRows()
return True
def removeRows(self, position, rows=1, index=QModelIndex()):
self.beginRemoveRows(QModelIndex(), position, position + rows - 1)
self.records = self.records[:position] + self.records[position + rows:]
self.endRemoveRows()
return True
### Data and Calculations =============
def getWeight(self, baseId):
""" takes a base id to use for the criteria, and returns a sum of the weight for the base. """
weight_query = self.session.query(func.sum(BaseDetail.bom_qty).label('sumQty')).join(BaseHeader).filter(BaseHeader.base_id==baseId)
for i in weight_query:
weight = float(getType(i.sumQty))
return weight
def calcIndWeight(self, literNeeded, pdId):
""" Create an array that looks like this:
BaseAssembly:
Base ID Base % factor Value |infl Weight
---------------------------------------- |-------------
3 BSRB 50 1.7 1364.5 |850 500 1000 for 1lt
53 FORG 100 1.7 6.8225 |4.25 2.5 2.5 for 500lt
1 BCRM 50 1.95 1364.5 |975 500 1000 for 1lt
23 FRVN 100 1.95 4.0935 |2.925 1.5 .75 for 250lt
---------------------------------------------------------
2739.92 1832.175 2008 - > if we multiply the value column by the inflation factor column we get 5000
liters needed 5000
multiplier 2.729
then we post the value column to the model
"""
calc_array = {}
base_id_list = []
row_index = 0
#// construct a list of bases to work with, add an index so we know where to post the resulting value
for i in self.records:
if i.pd_id == pdId:
base_id_list += [(row_index, i.base_id, i.percentage, i.inflation)]
row_index += 1
#// construct the above array
for line in base_id_list:
row_index, base_id, percent, inflation = line
percent = 1 if not percent else float(getType(percent))
inflation = 1 if not inflation else float(getType(inflation))
if base_id:
where_cond = BaseHeader.base_id==base_id
calc_array.setdefault(row_index, {})
internal = calc_array[row_index]
base_no = dLookup(BaseHeader.base_no, where_cond)
internal['base_id'] = base_id
internal['base'] = base_no
internal['percent'] = percent
internal['inflation'] = inflation
#// lets make the base a default arbitrary value of 1000 so we know how much flavour to use.
weight = 1000 * percent
#// get base weight for flavour
weight_flavor = self.getWeight(base_id)
#// lets check to see if base is a flavour base, in that case w'ill calculate the weight as follows:
#// the actual weight times the intended weight divided by the base weight which is 1000 * percent
#// Ex. actual weight = 2.5 and its intended for 250lt of mix but we r doing it for a 1000lt so = 2.5 * (1000/250)
base_type = dLookup(BaseHeader.base_type, where_cond)
if base_type == 'Flavor':
base_volume = dLookup(BaseHeader.base_volume, where_cond)
weight = weight_flavor * (weight / float(getType(base_volume)))
inf_weight = weight * inflation
internal['weight'] = weight
internal['inf_weight'] = inf_weight
print row_index, internal
liter_got = sum(calc_array[i]['inf_weight'] for i in calc_array.keys())
multiplier = literNeeded / nonZero(liter_got, 1)
#// now that we have the multiplier, lets post the value to model
for key in calc_array.keys():
weight_needed = round(calc_array[key]['weight'] * multiplier, 4)
index = self.index(key, WEIGHT)
self.setData(index, QVariant(weight_needed))
print 'Result: ', liter_got, multiplier
def createBatches(self, date, prep_id):
""" returns a list of batch headers and details to record to database. """
batches = details = []
to_create_list = []
got_it = []
#// condense the list into a single base and sum of weight
#// have a list where u store the bases already in the new list, for reference not take it again
for i in self.records:
if i.base_id and i.base_id not in got_it:
total_volume = sum(r.weight for r in self.records if r.base_id==i.base_id)
inflation = i.inflation
to_create_list += [(i.base_id, total_volume, inflation)]
got_it += [i.base_id]
#// now that we have the list of what we need to create, lets create batches.
id_increment = 1
for base in to_create_list:
#// create header instance
base_id = base[0]
where_cond = BaseHeader.base_id==base_id
batch_id = dMax(BatchHeader.batch_id) + id_increment
batch_date = date
base_volume = dLookup(BaseHeader.base_volume, where_cond)
multiple = 1
inflation_factor = base[2]
batch_memo = None
batches += [BatchHeader(batch_id, base_id, batch_date, unicode(base_volume), unicode(multiple),
unicode(inflation_factor), batch_memo, None, prep_id)]
#// create detail instance, first get the details from base table, then transfer it to batch table
#// but first lets get a multiplier to use to multiply qty
base_weight = self.getWeight(base_id)
weight_needed = base[1]
multiplier = weight_needed / nonZero(base_weight, 1)
detail_query = self.session.query(BaseDetail).filter(BaseDetail.base_id==base_id)
for item in detail_query:
bom_id = item.bom_id
cost = None
bom_qty = float(getType(item.bom_qty)) * multiplier
details += [BatchDetail(batch_id, bom_id, cost, unicode(bom_qty))]
#// update the model with batch id
base_no = dLookup(BaseHeader.base_no, BaseHeader.base_id==base_id)
begin_index = self.index(0, BASE)
index_list = self.match(begin_index, Qt.DisplayRole, QVariant(base_no), hits=-1)
for i in index_list:
index = self.index(i.row(), BATCH_ID)
self.setData(index, QVariant(batch_id))
id_increment += 1
return (batches, details)
### Operations ============
def sort(self):
self.records.sort(key=lambda item: item.pd_id, reverse=True)
def load(self, itemList, clear=True, itemId=None):
""" loads data in BaseAssembly Model.
Takes 'ItemList' a list of data to load
Takes 'Clear' a boolean if True will clear all data already in the model, Default is True
Takes 'ItemId' to load into item_id column """
#// Prepare variables for later use
item_id = itemId
add = added = False
#// clear is True then clear the model properly.
if clear:
self.beginResetModel()
self.records = []
self.endResetModel()
#// if the list to load comes with an itemId,
#// it means that its a detial id for instances pd_id. in that case delete all records that don't have an id
#// as not to accumulate empty rows, looks ugly on table view
if itemId:
for rec in self.records:
if not rec.pd_id:
i = self.records.index(rec)
self.records.pop(i)
#// now lets load up the model
for item in itemList:
#// if no itemId was passed
if not itemId:
#// then check if the item_id was changed
if item_id != item.pd_id:
#// if it changed add a new empty line
add = True
#// keep track of the item_id for verification later if change
item_id = item.pd_id
#// finally lets get the list elements and get them ready for importing into the model
#// if the list comes when a new item is added to detail, then it will be a list from base assembly,
#// then there wont be a batch id
try:
batch_id = item.batch_id
except AttributeError:
batch_id = None
#// if the list is being passed when doing a recall, then it will come from prepAssembly table,
#// then it wont have a base id, and w'ill need to look it up from the batch_header table
try:
base_id = item.base_id
except AttributeError:
if batch_id:
base_id = dLookup(BatchHeader.base_id, BatchHeader.batch_id==batch_id)
else:
base_id = None
try:
infl = item.inflation
except AttributeError:
if base_id:
infl = float(getType(dLookup(BaseHeader.inflation_factor, BaseHeader.base_id==base_id)))
else:
infl = None
percentage = item.percentage
self.records.append(PrepAssembly(item_id, batch_id, percentage, infl, base_id))
#// we said before to add an empty line, here is where we actually listen
if add:
self.records.append(PrepAssembly(item_id)) # add an empty line, every time the item_id changes
add = False
#// we tell him that we have already added a line, so he doesnt need to do it later
added = True
#// there are no individual empty lines for each item_id, add one empty line at the end
if not added:
self.records.append(PrepAssembly(item_id))
self.sort()
def save(self):
records_ = []
for rec in self.records:
if rec.base_id:
records_ += [PrepAssembly(rec.pd_id, rec.batch_id, unicode(rec.percentage), unicode(rec.inflation))]
return records_
def clear(self):
self.beginResetModel()
self.records = []
self.records.append(PrepAssembly())
self.endResetModel()
def copy(self, indexList):
clipboard = QApplication.clipboard()
clipText = QString()
indexList.sort()
previous = indexList[0]
for current in indexList:
text = self.data(current, Qt.DisplayRole).toString()
if current.row() != previous.row():
clipText.append('\n')
else:
clipText.append('\t')
clipText.append(text)
previous = current
clipText.remove(0, 1)
clipboard.setText(clipText)
### Detail Model Setup ===========================================
class PrepDetailModel(QAbstractTableModel):
def __init__(self, assemModel, parent=None):
super(PrepDetailModel, self).__init__(parent)
self.records = []
self.records.append(PrepDetail())
self.assemblyModel = assemModel
def rowCount(self, index=QModelIndex()):
return len(self.records)
def columnCount(self, index=QModelIndex()):
return 7
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
if section == PD_ID:
return QVariant('PD ID')
elif section == ITEM:
return QVariant('Item')
elif section == QTY:
return QVariant('Qty')
elif section == DESC:
return QVariant('Description')
elif section == PACK:
return QVariant('Pack')
elif section == VOLUME:
return QVariant('Volume')
elif section == TOTAL:
return QVariant('Total')
return QVariant(section + 1)
def flags(self, index):
flag = QAbstractTableModel.flags(self, index)
if index.column() not in (PD_ID, DESC, TOTAL):
flag |=Qt.ItemIsEditable
return flag
def data(self, index, role=Qt.DisplayRole):
if not index.isValid() or not (0 <= index.row() < len(self.records)):
return QVariant()
record = self.records[index.row()]
column = index.column()
if role == Qt.DisplayRole:
if column == PD_ID:
return QVariant(record.pd_id)
elif column == ITEM:
item_id = record.item_id
item_no = dLookup(Items.item_no, Items.item_id==item_id)
return QVariant(item_no)
elif column == QTY:
if not record.qty:
return QVariant(record.qty)
return QVariant(round(record.qty, 2))
elif column == DESC:
item_desc = dLookup(Items.item_desc, Items.item_id==record.item_id)
return QVariant(item_desc)
elif column == PACK:
if not record.pack:
return QVariant()
return QVariant(round(record.pack, 2))
elif column == VOLUME:
if not record.volume:
return QVariant()
return QVariant(round(record.volume, 2))
elif column == TOTAL:
if not record.total:
return QVariant()
return QVariant(round(record.total, 2))
elif role == Qt.EditRole:
if column == ITEM:
return QVariant(record.item_id)
return QVariant()
def setData(self, index, value, role=Qt.EditRole):
if index.isValid() and role==Qt.EditRole:
record = self.records[index.row()]
column = index.column()
pd_id = dMax(PrepDetail.pd_id) + 1
if column == PD_ID:
record.pd_id = value.toInt()[0]
elif column == ITEM:
if not record.pd_id:
record.pd_id = pd_id + index.row()
item_id = value.toInt()[0]
record.item_id = item_id
record.pack = float(getType(dLookup(Items.pack, Items.item_id==item_id)))
record.volume = float(getType(dLookup(Items.volume, Items.item_id==item_id)))
elif column == QTY:
qty, ok = value.toFloat()
if ok:
record.qty = qty
record.total = self.calcTotal(record)
elif column == PACK:
record.pack = value.toFloat()[0]
elif column == VOLUME:
record.volume = value.toFloat()[0]
record.total = self.calcTotal(record)
self.emit(SIGNAL("dataChanged(QModelIndex, QModelIndex)"), index, index)
return True
return False
def insertRows(self, position, rows=1, index=QModelIndex()):
self.beginInsertRows(QModelIndex(), position, position + rows - 1)
for row in range(rows):
self.records.insert(position + row + 1, PrepDetail())
self.endInsertRows()
return True
def removeRows(self, position, rows=1, index=QModelIndex()):
self.beginRemoveRows(QModelIndex(), position, position + rows - 1)
self.records = self.records[:position] + self.records[position + rows:]
self.endRemoveRows()
return True
### Data and Calculations =============
def calcTotal(self, record):
qty = float(getType(record.qty))
volume = float(getType(record.volume))
total = qty * volume
return total
def recalcModel(self):
self.beginResetModel()
for rec in self.records:
rec.total = self.calcTotal(rec)
self.endResetModel()
def getTotals(self):
sum_qty = sum(nonZero(i.qty) for i in self.records)
sum_total = sum(nonZero(i.qty) * nonZero(i.volume) for i in self.records)
return (sum_qty, sum_total)
### Model Operations ==================
def calcAssemblyWeight(self):
for record in self.records:
if record.item_id:
pdId = record.pd_id
literNeeded = record.total if record.total else 1
self.assemblyModel.calcIndWeight(literNeeded, pdId)
def copy(self, indexList):
clipboard = QApplication.clipboard()
clipText = QString()
indexList.sort()
previous = indexList[0]
for current in indexList:
text = self.data(current, Qt.DisplayRole).toString()
if current.row() != previous.row():
clipText.append('\n')
else:
clipText.append('\t')
clipText.append(text)
previous = current
clipText.remove(0, 1)
clipboard.setText(clipText)
def paste(self, position, index=QModelIndex()):
myList = []
clipboard = QApplication.clipboard()
text = clipboard.text()
rows = text.split('\n')
id_count = position
for rec in rows:
col = rec.split('\t')
id_count += 1
pd_id = dMax(PrepAssembly.pd_id) + id_count
item_id = dLookup(Items.item_id, Items.item_no==str(col[0]))
qty = pack = volume = None
var_list = [qty, pack, volume]
if item_id:
for i in range(len(var_list)):
try:
var_list[i] = float(getType(col[i + 1]))
except ValueError:
var_list[i] = None
except IndexError:
continue
qty, pack, volume = var_list
pack = float(getType(dLookup(Items.pack, Items.item_id==item_id))) if not pack else pack
volume = float(getType(dLookup(Items.volume, Items.item_id==item_id))) if not volume else volume
myList += [PrepDetail(pd_id, item_id, qty, pack, volume)]
rowCount = len(myList)
self.beginInsertRows(QModelIndex(), position, position + rowCount - 1)
for row in range(rowCount):
self.records.insert(position + row, myList[row])
self.endInsertRows()
self.recalcModel()
return rowCount
def load(self, recordList):
self.beginResetModel()
self.records = []
self.endResetModel()
for record in recordList:
self.records.append(PrepDetail(record.pd_id, record.item_id, float(getType(record.qty)),
float(getType(record.pack)), float(getType(record.volume))))
self.recalcModel()
def save(self, header_id):
records_ = []
for record in self.records:
if record.item_id:
records_ += [PrepDetail(record.pd_id, record.item_id, unicode(record.qty),
unicode(record.pack), unicode(record.volume), header_id)]
return records_
def clear(self):
self.beginResetModel()
self.records = []
self.records.append(PrepDetail())
self.endResetModel()
### Form Setup ===================================================
class ProductionPrep(QDialog, ui_forms.ui_prodprepform.Ui_ProdPrep):
def __init__(self, itemModel, baseModel, parent=None):
super(ProductionPrep, self).__init__(parent)
self.setupUi(self)
self.my_parent = parent
self.record_id = None
self.dirty = False
self.editing = False
self.session = Session()
if self.my_parent:
self.date_dateEdit.setDate(self.my_parent.getDate())
self.v_prepNo_label.setText(str(dMax(PrepHeader.prep_id) + 1))
self.assemblyModel = BaseAssemblyModel(self.session)
self.detailModel = PrepDetailModel(self.assemblyModel)
self.detail_tableView.setModel(self.detailModel)
delegate = GenericDelegate(self)
delegate.insertDelegate(ITEM, ComboDelegate(itemModel, True))
delegate.insertDelegate(QTY, NumberDelegate())
delegate.insertDelegate(PACK, NumberDelegate())
delegate.insertDelegate(VOLUME, NumberDelegate())
self.detail_tableView.setItemDelegate(delegate)
self.detail_tableView.setColumnHidden(PD_ID, True)
self.detail_tableView.setColumnWidth(ITEM, 75)
self.detail_tableView.setColumnWidth(QTY, 75)
self.detail_tableView.setColumnWidth(DESC, 250)
self.detail_tableView.setColumnWidth(PACK, 50)
self.detail_tableView.setColumnWidth(VOLUME, 50)
self.detail_tableView.horizontalHeader().setStretchLastSection(True)
self.assemblyProxyModel = QSortFilterProxyModel()
self.assemblyProxyModel.setFilterKeyColumn(0)
self.assemblyProxyModel.setSourceModel(self.assemblyModel)
self.batch_tableView.setModel(self.assemblyProxyModel)
batch_dlg = GenericDelegate(self)
batch_dlg.insertDelegate(BASE, ComboDelegate(baseModel, True))
batch_dlg.insertDelegate(PERCENT, NumberDelegate())
batch_dlg.insertDelegate(INFL, NumberDelegate())
self.batch_tableView.setItemDelegate(batch_dlg)
self.batch_tableView.verticalHeader().setVisible(False)
self.batch_tableView.setColumnHidden(PD_ID, True)
self.batch_tableView.setColumnHidden(BATCH_ID, True)
self.batch_tableView.setColumnWidth(BASE, 75)
self.batch_tableView.setColumnWidth(DESC, 300)
self.batch_tableView.setColumnWidth(PERCENT, 50)
self.batch_tableView.setColumnWidth(INFL, 50)
self.batch_tableView.horizontalHeader().setStretchLastSection(True)
self.detailModel.dataChanged.connect(self.addAssemblies)
self.detailModel.dataChanged.connect(lambda: self.autoAddRow(self.detail_tableView, self.detailModel))
self.detail_tableView.selectionModel().currentRowChanged.connect(self.setFilter)
self.detail_tableView.doubleClicked.connect(self.editItem)
self.date_dateEdit.dateChanged.connect(self.setParentDate)
self.newButton.clicked.connect(self.clear)
self.calcButton.clicked.connect(self.recalc)
self.saveButton.clicked.connect(self.save)
self.deleteButton.clicked.connect(lambda: self.delete(header=True))
self.closeButton.clicked.connect(self.accept)
### Form Behaviour ============================================
def setDirty(self):
self.updateSumTotals()
self.dirty = True
self.setWindowTitle("%s - Editing..." % localTITLE)
def setParentDate(self):
date = self.date_dateEdit.date().toPyDate()
self.my_parent.setDate(date)
def contextMenuEvent(self, event):
menu = QMenu(self)
if self.detail_tableView.hasFocus():
view = self.detail_tableView
model = self.detailModel
copyAction = menu.addAction('Copy', QObject, 'Ctrl+C')
pasteAction = menu.addAction('Paste', QObject, 'Ctrl+V')
insertAction = menu.addAction("Insert Line", QObject, "Ctrl+I")
deleteAction = menu.addAction("Delete Line", QObject, "Ctrl+D")
copyAction.triggered.connect(lambda: self.copy(view, model))
pasteAction.triggered.connect(lambda: self.paste(view, model))
insertAction.triggered.connect(lambda: self.insertRow(view, model))
deleteAction.triggered.connect(lambda: self.removeRow(view, model))
addActions(self, view, (insertAction, deleteAction))
elif self.batch_tableView.hasFocus():
view = self.batch_tableView
model = self.assemblyModel
copyAction = menu.addAction('Copy', QObject, 'Ctrl+C')
insertAction = menu.addAction("Insert Line", QObject, "Ctrl+I")
deleteAction = menu.addAction("Delete Line", QObject, "Ctrl+D")
copyAction.triggered.connect(lambda: self.copy(view, model))
insertAction.triggered.connect(lambda: self.insertRow(view, model))
deleteAction.triggered.connect(lambda: self.removeRow(view, model))
menu.exec_(event.globalPos())
def autoAddRow(self, view, model):
self.setDirty()
row = view.currentIndex().row()
if model.rowCount() == row + 1:
self.insertRow(view, model)
def insertRow(self, view, model):
if view is not None:
index = view.currentIndex()
row = index.row() + 1
if model == self.assemblyModel:
pd_id, ok = self.fgd_id()
if ok:
model.insertRows(row, item_id=pd_id)
self.setFilter()
else:
model.insertRows(row)
view.setFocus()
view.setCurrentIndex(index)
def removeRow(self, view, model):
rowsSelected = view.selectionModel().selectedRows()
if not rowsSelected:
row = view.currentIndex().row()
rows = 1
else:
for i in rowsSelected:
row = i.row()
rows = len(rowsSelected)
row = row - rows + 1
if model == self.assemblyModel:
proxy_index = self.assemblyProxyModel.index(view.currentIndex().row(), 0)
row = self.assemblyProxyModel.mapToSource(proxy_index).row()
rows = 1
model.removeRows(row, rows)
if model.rowCount() < 1:
self.insertRow(view, model)
self.setDirty()
self.updateSumTotals()
def copy(self, view, model):
if model.rowCount() <= 1:
return
selectedItems = view.selectionModel().selectedIndexes()
model.copy(selectedItems)
def paste(self, view, model):
row = view.currentIndex().row()
rows = model.paste(row)
for i in range(rows):
index = view.model().index(i + row, 1)
view.setCurrentIndex(index)
self.addAssemblies(recall=False)
self.updateSumTotals()
def fgd_id(self):
index = self.detail_tableView.currentIndex()
row = index.row()
idIndex = self.detailModel.index(row, 0)
fgd_id, ok = self.detailModel.data(idIndex, Qt.DisplayRole).toInt()
return (fgd_id, ok)
def deleteAssemblies(self, fgd_id):
beginIndex = self.assemblyModel.index(0, 0)
baseIndexList = self.assemblyModel.match(beginIndex, Qt.DisplayRole, QVariant(fgd_id), hits=-1)
if not baseIndexList:
return
while baseIndexList:
position = baseIndexList[0].row()
self.assemblyModel.removeRows(position)
baseIndexList = self.assemblyModel.match(beginIndex, Qt.DisplayRole, QVariant(fgd_id))
def addAssemblies(self, recall=False):
if recall == True:
baseList = self.session.query(PrepAssembly).join(PrepDetail).filter(PrepDetail.header_id==self.record_id)
clear = True
fgd_id = None
else:
index = self.detail_tableView.currentIndex()
if not index.column() == 1:
return
row = index.row()
myIndex = self.detailModel.index(row, 1)
fgd_id = self.fgd_id()[0]
fg_num = str(self.detailModel.data(myIndex, Qt.DisplayRole).toString())
fg_id = dLookup(Items.item_id, Items.item_no==fg_num)
self.deleteAssemblies(fgd_id)
baseList = self.session.query(BaseAssembly).filter(BaseAssembly.item_id==fg_id)
clear = False
self.assemblyModel.load(baseList, clear, fgd_id)
self.assemblyProxyModel.reset()
self.setFilter()
def setFilter(self):
fgd_id = self.fgd_id()[0]
self.assemblyProxyModel.setFilterFixedString(str(fgd_id))
def editItem(self):
row = self.detail_tableView.currentIndex().row()
index = self.detailModel.index(row, ITEM)
item_id = self.detailModel.data(index, Qt.EditRole).toString()
if not item_id:
return
form = self.my_parent.itemForm()
form.recall(1, str(item_id))
### Data and calculations ==================================
def updateModel(self):
self.detailModel.recalcModel()
def updateSumTotals(self):
sum_qty, sum_total = self.detailModel.getTotals()
self.v_totalQty_label.setText(str(sum_qty))
self.v_totalLiters_label.setText(str(sum_total))
def recalc(self):
self.detailModel.calcAssemblyWeight()
### Form Operations ========================================
def recall(self, prep_id):
if self.dirty:
answer = QMessageBox.question(self, "Editing - %s" % localTITLE, "Would you like to save your data?",
QMessageBox.Yes| QMessageBox.Discard| QMessageBox.Cancel)
if answer == QMessageBox.Cancel:
return
elif answer == QMessageBox.Yes:
self.save()
self.record_id = prep_id
self.v_prepNo_label.setText(str(prep_id))
records = self.session.query(PrepHeader).filter(PrepHeader.prep_id==prep_id)
for record in records:
self.date_dateEdit.setDate(record.prep_date)
self.note_lineEdit.setText(str(record.prep_memo))
details = self.session.query(PrepDetail).filter(PrepDetail.header_id==prep_id)
self.detailModel.load(details)
self.addAssemblies(recall=True)
self.editing = True
self.updateSumTotals()
def save(self):
self.recalc()
if not self.assemblyModel.save():
QMessageBox.information(self, 'Saving - %s' % localTITLE, 'No assemblies found', QMessageBox.Ok)
self.dirty = False
return
prep_id = self.record_id
prep_date = self.date_dateEdit.date().toPyDate()
prep_memo = str(self.note_lineEdit.text())
if self.editing:
self.delete(header=False)
self.session.query(PrepHeader).filter(PrepHeader.prep_id==prep_id).update({'prep_date': prep_date, 'prep_memo': prep_memo})
else:
prep_id = dMax(PrepHeader.prep_id) + 1
self.session.add(PrepHeader(prep_id, prep_date, prep_memo))
batches, details = self.assemblyModel.createBatches(prep_date, prep_id)
prep_details = self.detailModel.save(prep_id)
assembly_details = self.assemblyModel.save()
self.session.add_all(prep_details)
self.session.add_all(assembly_details)
self.session.add_all(batches)
self.session.add_all(details)
self.sendToDB()
self.dirty = False
self.editing = True
self.my_parent.refreshModels()
self.setWindowTitle('%s - (Data Saved)' % localTITLE)
def delete(self, header=True):
if not self.record_id:
return
prep_id = self.record_id
#// if function was called by user, header is probably true, ask if user is sure you never know.
if header:
prod_id = dLookup(PrepHeader.prod_id, PrepHeader.prep_id==prep_id)
if prod_id != 'None':
QMessageBox.information(self, "Delete - %s" % localTITLE, "This preparation is already used for a production \n Can't Delete",
QMessageBox.Ok)
return
answer = QMessageBox.question(self, "Delete - %s" % localTITLE, "Are you sure you " \
"want to delete Prep: %s:, %s" % (self.v_prepNo_label.text(),
self.date_dateEdit.date().toPyDate()),
QMessageBox.Yes| QMessageBox.No, QMessageBox.NoButton)
if answer == QMessageBox.No:
return
self.session.query(PrepHeader).filter(PrepHeader.prep_id==prep_id).delete()
#// delete the assemblies
dtl_qry = self.session.query(PrepDetail.pd_id).filter(PrepDetail.header_id==prep_id)
self.session.query(PrepAssembly).filter(PrepAssembly.pd_id.in_(dtl_qry)).delete('fetch')
#// lets delete the details
self.session.query(PrepDetail).filter(PrepDetail.header_id==prep_id).delete()
#// lets delete the batches that was created by this prep
btch_qry = self.session.query(BatchHeader.batch_id).filter(BatchHeader.prep_id==prep_id).subquery()
self.session.query(BatchDetail).filter(BatchDetail.base_id.in_(btch_qry)).delete('fetch')
self.session.query(BatchHeader).filter(BatchHeader.prep_id==prep_id).delete()
if header:
self.sendToDB()
self.my_parent.refreshModels()
self.clear()
def sendToDB(self):
try:
self.session.flush
self.session.commit()
except Exception, e:
self.session.rollback()
raise e
def clear(self):
self.v_prepNo_label.setText(str(dMax(PrepHeader.prep_id) + 1))
self.v_totalLiters_label.clear()
self.v_totalQty_label.clear()
self.note_lineEdit.clear()
self.detailModel.clear()
self.assemblyModel.clear()
if defaultDate() == 'current':
self.date_dateEdit.setDate(QDate.currentDate())
self.dirty = False
self.editing = False
self.setWindowTitle(localTITLE)
def reject(self):
self.accept()
def accept(self):
if self.dirty:
answer = QMessageBox.question(self, "Editing - %s" % localTITLE, "Would you like to save your data?",
QMessageBox.Yes| QMessageBox.No| QMessageBox.Cancel)
if answer == QMessageBox.Cancel:
return
elif answer == QMessageBox.No:
QDialog.accept(self)
elif answer == QMessageBox.Yes:
self.save()
QDialog.accept(self)
self.my_parent.formClosed()
if __name__ == '__main__':
app = QApplication(sys.argv)
setupDatabase("Production.sqlite")
item = modelsandviews.ItemModel('Items')
base = modelsandviews.BaseListModel()
form = ProductionPrep(item, base)
form.show()
app.exec_()
| |
import hashlib
import random
import string
import transaction
from cryptacular.bcrypt import BCRYPTPasswordManager
from pyramid.threadlocal import get_current_request
from pyramid.util import DottedNameResolver
from sqlalchemy import (Column,
ForeignKey,
Index,
Table,
types,
Unicode)
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (relationship,
scoped_session,
sessionmaker,
synonym)
from sqlalchemy.sql.expression import func
from zope.sqlalchemy import register
from apex.lib.db import get_or_create
DBSession = scoped_session(sessionmaker())
register(DBSession)
Base = declarative_base()
auth_group_table = Table('auth_auth_groups', Base.metadata,
Column('auth_id', types.Integer(), \
ForeignKey('auth_id.id', onupdate='CASCADE', ondelete='CASCADE')),
Column('group_id', types.Integer(), \
ForeignKey('auth_groups.id', onupdate='CASCADE', ondelete='CASCADE'))
)
# need to create Unique index on (auth_id,group_id)
Index('auth_group', auth_group_table.c.auth_id, auth_group_table.c.group_id)
class AuthGroup(Base):
""" Table name: auth_groups
::
id = Column(types.Integer(), primary_key=True)
name = Column(Unicode(80), unique=True, nullable=False)
description = Column(Unicode(255), default=u'')
"""
__tablename__ = 'auth_groups'
__table_args__ = {'sqlite_autoincrement': True}
id = Column(types.Integer(), primary_key=True)
name = Column(Unicode(80), unique=True, nullable=False)
description = Column(Unicode(255), default=u'')
users = relationship('AuthID', secondary=auth_group_table, \
backref='auth_groups')
def __repr__(self):
return u'%s' % self.name
def __unicode__(self):
return self.name
class AuthID(Base):
""" Table name: auth_id
::
id = Column(types.Integer(), primary_key=True)
display_name = Column(Unicode(80), default=u'')
active = Column(types.Enum(u'Y',u'N',u'D', name=u'active'), default=u'Y')
created = Column(types.DateTime(), default=func.now())
"""
__tablename__ = 'auth_id'
__table_args__ = {'sqlite_autoincrement': True}
id = Column(types.Integer(), primary_key=True)
display_name = Column(Unicode(80), default=u'')
active = Column(types.Enum(u'Y',u'N',u'D', name=u'active'), default=u'Y')
created = Column(types.DateTime(), default=func.now())
groups = relationship('AuthGroup', secondary=auth_group_table, \
backref='auth_users')
users = relationship('AuthUser')
"""
Fix this to use association_proxy
groups = association_proxy('auth_group_table', 'authgroup')
"""
last_login = relationship('AuthUserLog', \
order_by='AuthUserLog.id.desc()', uselist=False)
login_log = relationship('AuthUserLog', \
order_by='AuthUserLog.id')
def in_group(self, group):
"""
Returns True or False if the user is or isn't in the group.
"""
return group in [g.name for g in self.groups]
@classmethod
def get_by_id(cls, id):
"""
Returns AuthID object or None by id
.. code-block:: python
from apex.models import AuthID
user = AuthID.get_by_id(1)
"""
return DBSession.query(cls).filter(cls.id==id).first()
def get_profile(self, request=None):
"""
Returns AuthUser.profile object, creates record if it doesn't exist.
.. code-block:: python
from apex.models import AuthUser
user = AuthUser.get_by_id(1)
profile = user.get_profile(request)
in **development.ini**
.. code-block:: python
apex.auth_profile =
"""
if not request:
request = get_current_request()
auth_profile = request.registry.settings.get('apex.auth_profile')
if auth_profile:
resolver = DottedNameResolver(auth_profile.split('.')[0])
profile_cls = resolver.resolve(auth_profile)
return get_or_create(DBSession, profile_cls, auth_id=self.id)
@property
def group_list(self):
group_list = []
if self.groups:
for group in self.groups:
group_list.append(group.name)
return ','.join( map( str, group_list ) )
class AuthUser(Base):
""" Table name: auth_users
::
id = Column(types.Integer(), primary_key=True)
login = Column(Unicode(80), default=u'', index=True)
_password = Column('password', Unicode(80), default=u'')
email = Column(Unicode(80), default=u'', index=True)
active = Column(types.Enum(u'Y',u'N',u'D'), default=u'Y')
"""
__tablename__ = 'auth_users'
__table_args__ = {'sqlite_autoincrement': True}
id = Column(types.Integer(), primary_key=True)
auth_id = Column(types.Integer, ForeignKey(AuthID.id), index=True)
provider = Column(Unicode(80), default=u'local', index=True)
login = Column(Unicode(80), default=u'', index=True)
salt = Column(Unicode(24))
_password = Column('password', Unicode(80), default=u'')
email = Column(Unicode(80), default=u'', index=True)
created = Column(types.DateTime(), default=func.now())
active = Column(types.Enum(u'Y',u'N',u'D', name=u'active'), default=u'Y')
# need unique index on auth_id, provider, login
# create unique index ilp on auth_users (auth_id,login,provider);
# how do we handle same auth on multiple ids?
def _set_password(self, password):
self.salt = self.get_salt(24)
password = password + self.salt
self._password = BCRYPTPasswordManager().encode(password, rounds=12)
def _get_password(self):
return self._password
password = synonym('_password', descriptor=property(_get_password, \
_set_password))
def get_salt(self, length):
m = hashlib.sha256()
word = ''
for i in list(range(length)):
word += random.choice(string.ascii_letters)
m.update(word.encode('utf-8'))
return str(m.hexdigest()[:length])
@classmethod
def get_by_id(cls, id):
"""
Returns AuthUser object or None by id
.. code-block:: python
from apex.models import AuthID
user = AuthID.get_by_id(1)
"""
return DBSession.query(cls).filter(cls.id==id).first()
@classmethod
def get_by_login(cls, login):
"""
Returns AuthUser object or None by login
.. code-block:: python
from apex.models import AuthUser
user = AuthUser.get_by_login('login')
"""
return DBSession.query(cls).filter(cls.login==login).first()
@classmethod
def get_by_email(cls, email):
"""
Returns AuthUser object or None by email
.. code-block:: python
from apex.models import AuthUser
user = AuthUser.get_by_email('email@address.com')
"""
return DBSession.query(cls).filter(cls.email==email).first()
@classmethod
def check_password(cls, **kwargs):
if 'id' in kwargs:
user = cls.get_by_id(kwargs['id'])
if 'login' in kwargs:
user = cls.get_by_login(kwargs['login'])
if not user:
return False
try:
if BCRYPTPasswordManager().check(user.password,
'%s%s' % (kwargs['password'], user.salt)):
return True
except TypeError:
pass
request = get_current_request()
fallback_auth = request.registry.settings.get('apex.fallback_auth')
if fallback_auth:
resolver = DottedNameResolver(fallback_auth.split('.', 1)[0])
fallback = resolver.resolve(fallback_auth)
return fallback().check(DBSession, request, user, \
kwargs['password'])
return False
class AuthUserLog(Base):
"""
event:
L - Login
R - Register
P - Password
F - Forgot
"""
__tablename__ = 'auth_user_log'
__table_args__ = {'sqlite_autoincrement': True}
id = Column(types.Integer, primary_key=True)
auth_id = Column(types.Integer, ForeignKey(AuthID.id), index=True)
user_id = Column(types.Integer, ForeignKey(AuthUser.id), index=True)
time = Column(types.DateTime(), default=func.now())
ip_addr = Column(Unicode(39), nullable=False)
event = Column(types.Enum(u'L',u'R',u'P',u'F', name=u'event'), default=u'L')
def populate(settings):
session = DBSession()
default_groups = []
if 'apex.default_groups' in settings:
for name in settings['apex.default_groups'].split(','):
default_groups.append((str(name.strip()),u''))
else:
default_groups = [(u'users',u'User Group'), \
(u'admin',u'Admin Group')]
for name, description in default_groups:
group = AuthGroup(name=name, description=description)
session.add(group)
session.flush()
transaction.commit()
def initialize_sql(engine, settings):
DBSession.configure(bind=engine)
Base.metadata.bind = engine
Base.metadata.create_all(engine)
if 'apex.velruse_providers' in settings:
pass
#SQLBase.metadata.bind = engine
#SQLBase.metadata.create_all(engine)
try:
populate(settings)
except IntegrityError:
transaction.abort()
| |
import json
import uuid
import jsonschema
import anchore_engine.configuration.localconfig
from anchore_engine.apis.context import ApiRequestContextProxy
from anchore_engine.clients.services import http, internal_client_for
from anchore_engine.clients.services.simplequeue import SimpleQueueClient
from anchore_engine.db.entities.common import anchore_now
from anchore_engine.subsys import logger
NOTIFICATION_MAPPING = {
"policy_eval": "PolicyEvalNotification",
"tag_update": "TagUpdateNotification",
"vuln_update": "VulnUpdateNotification",
"analysis_update": "AnalysisUpdateNotification",
}
class NotificationValidationError(Exception):
def __init__(self, user_id, notification_id, subscription_type):
super(NotificationValidationError, self).__init__(
"Notification Payload failed schema validation, cannot deliver. user_id={}, notification_id={}, subscription_type={}".format(
user_id, notification_id, subscription_type
)
)
self.user_id = user_id
self.notification_id = notification_id
self.subscription_type = subscription_type
class Notification(object):
def __init__(self, queue_id, user_id, user_email):
self.queue_id = queue_id
self.user_id = user_id
self.data_id = str(uuid.uuid4())
self.last_updated = anchore_now()
self.created_at = anchore_now()
self.record_state_key = "active"
self.tries = 0
self.max_tries = self.created_at + 3600
self.data = BaseNotificationData(user_id, user_email, queue_id)
def to_dict(self):
n_dict = dict()
n_dict["queueId"] = self.queue_id
n_dict["userId"] = self.user_id
n_dict["dataId"] = self.data_id
n_dict["created_at"] = self.created_at
n_dict["last_updated"] = self.last_updated
n_dict["record_state_key"] = self.record_state_key
n_dict["tries"] = self.tries
n_dict["max_tries"] = self.max_tries
n_dict["data"] = self.data.to_dict()
return n_dict
def to_json(self):
return json.dumps(self.to_dict())
class BaseNotificationData(object):
def __init__(self, user_id, user_email, notification_type):
self.notification_user = user_id
self.notification_user_email = user_email
self.notification_type = notification_type
self.notification_payload = self.build_payload(notification_type)
def build_payload(self, notification_type):
if notification_type == "policy_eval":
return TestPolicyEvalNotificationPayload(
self.notification_user, notification_type
)
elif notification_type == "tag_update":
return TestTagUpdateNotificationPayload(
self.notification_user, notification_type
)
elif notification_type == "vuln_update":
return TestVulnUpdateNotificationPayload(
self.notification_user, notification_type
)
elif notification_type == "analysis_update":
return TestAnalysisUpdateNotificationPayload(
self.notification_user, notification_type
)
def to_dict(self):
data = dict()
data["notification_user"] = self.notification_user
data["notification_user_email"] = self.notification_user_email
data["notification_type"] = self.notification_type
data["notification_payload"] = self.notification_payload.to_dict()
return data
class BaseNotificationPayload(object):
def __init__(self, user_id, subscription_type):
self.user_id = user_id
self.subscription_key = str(uuid.uuid4())
self.subscription_type = subscription_type
self.notification_id = str(uuid.uuid4())
def to_dict(self):
payload = dict()
payload["userId"] = self.user_id
payload["subscription_key"] = self.subscription_key
payload["subscription_type"] = self.subscription_type
payload["notificationId"] = self.notification_id
return payload
class TestPolicyEvalNotificationPayload(BaseNotificationPayload):
class Eval(object):
def __init__(self):
self.image_digest = "test_image_digest"
self.status = "pass"
def to_dict(self):
eval_dict = dict()
eval_dict["image_digest"] = self.image_digest
eval_dict["status"] = self.status
return eval_dict
def __init__(self, user_id, subscription_type):
super().__init__(user_id, subscription_type)
self.curr_eval = self.Eval()
self.last_eval = self.Eval()
self.annotations = ["test"]
def to_dict(self):
payload = super().to_dict()
payload["curr_eval"] = self.curr_eval.to_dict()
payload["last_eval"] = self.last_eval.to_dict()
payload["annotations"] = self.annotations
return payload
class TestTagUpdateNotificationPayload(BaseNotificationPayload):
def __init__(self, user_id, subscription_type):
super().__init__(user_id, subscription_type)
self.curr_eval = ["test_image_digest"]
self.last_eval = ["test_image_digest"]
self.annotations = ["test"]
def to_dict(self):
payload = super().to_dict()
payload["curr_eval"] = self.curr_eval
payload["last_eval"] = self.last_eval
payload["annotations"] = self.annotations
return payload
class TestVulnUpdateNotificationPayload(BaseNotificationPayload):
class VulnDiffResult(object):
def __init__(self):
self.added = ["test1"]
self.updated = ["test2"]
self.removed = ["test3"]
def to_dict(self):
diff = dict()
diff["added"] = self.added
diff["updated"] = self.updated
diff["removed"] = self.removed
return diff
def __init__(self, user_id, subscription_type):
super().__init__(user_id, subscription_type)
self.diff_vulnerability_result = self.VulnDiffResult()
self.image_digest = "test_image_digest"
self.annotations = ["test"]
def to_dict(self):
payload = super().to_dict()
payload["diff_vulnerability_result"] = self.diff_vulnerability_result.to_dict()
payload["image_digest"] = self.image_digest
payload["annotations"] = self.annotations
return payload
class TestAnalysisUpdateNotificationPayload(BaseNotificationPayload):
class AnalysisUpdateEval:
def __init__(self, status):
self.analysis_status = status
self.annotations = ["test"]
self.image_digest = "test_image_digest"
def to_dict(self):
aue = dict()
aue["analysis_status"] = self.analysis_status
aue["annotations"] = self.annotations
aue["image_digest"] = self.image_digest
return aue
def __init__(self, user_id, subscription_type):
super().__init__(user_id, subscription_type)
self.curr_eval = self.AnalysisUpdateEval("analyzed")
self.last_eval = self.AnalysisUpdateEval("analyzing")
self.annotations = ["test"]
def to_dict(self):
payload = super().to_dict()
payload["curr_eval"] = self.curr_eval.to_dict()
payload["last_eval"] = self.last_eval.to_dict()
payload["annotations"] = self.annotations
return payload
def queue_notification(userId, subscription_key, subscription_type, payload):
"""
Put a Notification in the Queue!
"""
q_client = internal_client_for(SimpleQueueClient, None)
rc = False
try:
nobj = {
"userId": userId,
"subscription_key": subscription_key,
"notificationId": str(uuid.uuid4()),
}
if payload:
nobj.update(payload)
if not q_client.is_inqueue(subscription_type, nobj):
rc = q_client.enqueue(subscription_type, nobj)
except Exception as err:
logger.warn("failed to create/enqueue notification")
raise err
return rc
def make_notification(user_record, subscription_type, notification):
ret = {}
try:
payload_data = {
"notification_user": user_record["name"],
"notification_user_email": user_record["email"],
"notification_type": subscription_type,
"notification_payload": notification,
}
json.dumps(payload_data)
ret = payload_data
except Exception as err:
raise Exception("cannot prepare notification - exception: " + str(err))
return ret
def notify(user_record, notification):
"""
Notifications are sent periodically based on polling a queue for a particular type of subscription
(anchore_engine.common.subscription_types + [event_log_type])
This method is responsible for actually distributing notifications according to the notification_modes defined
below (currently only webhook supported)
Note: The notification passed in is not coming from make_notification method above, but rather from
db_queues.get_all, which passes a QueueItem (see anchore_engine/subsys/catalog.py) serialized as a dict
(data field is a json)
:param user_record: the account sending the notification
:param notification: a dict loaded from db_queues.get_all. Ex:
{
"queueId": "subscription type actual",
"userId": "acct name",
"queueName": "string",
"dataId": "notificationId",
"created_at": 981173106,
"last_updated": 981173106,
"record_state_key": "active",
"record_state_val": "",
"tries": 0,
"max_tries": 981173206,
"data": {
"notification_user": "account name",
"notification_user_email": "account email",
"notification_type": "same as subscription type",
"notification_payload": {
"userId": "from original notification",
"notificationId": "from original notification",
"subscription_type": " from event details",
"subscription_key": "from event resource id"
}
}
}
:return: boolean (True if successful)
"""
notification_modes = ["webhook"]
logger.debug("sending notification: " + json.dumps(notification, indent=4))
for notification_mode in notification_modes:
if notification_mode == "webhook":
rc = do_notify_webhook(user_record, notification)
return True
# TODO: this is not currently invoked anywhere and needs and update to not require a specific API schema but load
# the notification schemas from another location
def validate_schema(notification):
"""
Check if the notification conforms to the Schema outlined in the Swagger Spec.
Also only do this for the types we know (policy_eval, vuln_update, tag_update, analysis_update)
:param notification: notification object to deliver
"""
ret = False
notification_type = notification.get("data", {}).get("notification_type", None)
if notification_type not in NOTIFICATION_MAPPING.keys():
logger.debug(
"Not doing Schema validation for Notification Type: {}".format(
notification_type
)
)
return ret
elif not notification_type:
logger.warn("Notification Type not resolved: {}".format(notification))
return ret
notification_schema_definition = NOTIFICATION_MAPPING.get(
notification_type, "NotificationBase"
)
spec = ApiRequestContextProxy.get_service().api_spec
schema = spec.get("definitions", {}).get(notification_schema_definition)
try:
jsonschema.validate(notification, schema)
ret = True
except jsonschema.ValidationError as e:
logger.error(
"Notification does not pass validation, still delivering for backwards compatibility: {}".format(
e
)
)
ret = False
return ret
def do_notify_webhook(user_record, notification):
notification_type = notification["data"]["notification_type"]
subvars = [
("<userId>", user_record["name"]),
("<notification_type>", notification_type),
]
try:
payload = json.dumps(notification)
except Exception as err:
raise Exception(
"could not prepare notification as JSON - exception: " + str(err)
)
webhooks = {}
localconfig = anchore_engine.configuration.localconfig.get_config()
if "webhooks" in localconfig:
webhooks.update(localconfig["webhooks"])
if webhooks:
rootuser = webhooks.pop("webhook_user", None)
rootpw = webhooks.pop("webhook_pass", None)
rootverify = webhooks.pop("ssl_verify", None)
for ntype in [notification_type, "general"]:
if ntype in webhooks:
webhook = webhooks[ntype]
rc = do_notify_webhook_type(
webhook=webhook,
user=webhook.pop("webhook_user", rootuser),
pw=webhook.pop("webhook_pass", rootpw),
verify=webhook.pop("ssl_verify", rootverify),
subvars=subvars,
payload=payload,
)
logger.debug(
"warning: notification generated, but no matching webhook could be found in config to send it to - dropping notification"
)
return False
def do_notify_webhook_type(**kwargs):
webhook = kwargs["webhook"]
user = kwargs["user"]
pw = kwargs["pw"]
verify = kwargs["verify"]
subvars = kwargs["subvars"]
payload = kwargs["payload"]
if not user and not pw:
auth = None
else:
auth = (user, pw)
url = webhook["url"]
if not url:
raise Exception("Cannot send webhook, no URL configured")
for subkey, subval in subvars:
url = url.replace(subkey, subval)
try:
logger.info("webhook post: " + str(url) + " : " + payload)
headers = {"Content-Type": "application/json"}
result = http.anchy_post(
url, data=payload, auth=auth, timeout=2.0, verify=verify, headers=headers
)
logger.info("webhook response: " + str(result))
return result
except Exception as err:
raise Exception(
"failed to post notification to webhook - exception: " + str(err)
)
| |
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic linux scsi subsystem and Multipath utilities.
Note, this is not iSCSI.
"""
import os
import re
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from cinder.brick import exception
from cinder.brick import executor
from cinder.i18n import _LW, _LE
from cinder.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
MULTIPATH_ERROR_REGEX = re.compile("\w{3} \d+ \d\d:\d\d:\d\d \|.*$")
MULTIPATH_WWID_REGEX = re.compile("\((?P<wwid>.+)\)")
class LinuxSCSI(executor.Executor):
def __init__(self, root_helper, execute=putils.execute,
*args, **kwargs):
super(LinuxSCSI, self).__init__(root_helper, execute,
*args, **kwargs)
def echo_scsi_command(self, path, content):
"""Used to echo strings to scsi subsystem."""
args = ["-a", path]
kwargs = dict(process_input=content,
run_as_root=True,
root_helper=self._root_helper)
self._execute('tee', *args, **kwargs)
def get_name_from_path(self, path):
"""Translates /dev/disk/by-path/ entry to /dev/sdX."""
name = os.path.realpath(path)
if name.startswith("/dev/"):
return name
else:
return None
def remove_scsi_device(self, device):
"""Removes a scsi device based upon /dev/sdX name."""
path = "/sys/block/%s/device/delete" % device.replace("/dev/", "")
if os.path.exists(path):
# flush any outstanding IO first
self.flush_device_io(device)
LOG.debug("Remove SCSI device(%(dev)s) with %(path)s",
{'dev': device, 'path': path})
self.echo_scsi_command(path, "1")
def wait_for_volume_removal(self, volume_path):
"""This is used to ensure that volumes are gone."""
def _wait_for_volume_removal(volume_path):
LOG.debug("Waiting for SCSI mount point %s to be removed.",
volume_path)
if os.path.exists(volume_path):
if self.tries >= self.scan_attempts:
LOG.error(_LE("Exceeded the number of attempts to detect "
"volume removal."))
raise exception.VolumePathNotRemoved(
volume_path=volume_path)
LOG.debug("%(path)s still exists, rescanning. Try number: "
"%(tries)s",
{'path': volume_path, 'tries': self.tries})
self.tries = self.tries + 1
else:
LOG.debug("SCSI mount point %s has been removed.", volume_path)
raise loopingcall.LoopingCallDone()
# Setup a loop here to give the kernel time
# to remove the volume from /dev/disk/by-path/
self.tries = 0
self.scan_attempts = 3
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_volume_removal, volume_path)
timer.start(interval=2).wait()
def get_device_info(self, device):
(out, _err) = self._execute('sg_scan', device, run_as_root=True,
root_helper=self._root_helper)
dev_info = {'device': device, 'host': None,
'channel': None, 'id': None, 'lun': None}
if out:
line = out.strip()
line = line.replace(device + ": ", "")
info = line.split(" ")
for item in info:
if '=' in item:
pair = item.split('=')
dev_info[pair[0]] = pair[1]
elif 'scsi' in item:
dev_info['host'] = item.replace('scsi', '')
return dev_info
def remove_multipath_device(self, multipath_name):
"""This removes LUNs associated with a multipath device
and the multipath device itself.
"""
LOG.debug("remove multipath device %s", multipath_name)
mpath_dev = self.find_multipath_device(multipath_name)
if mpath_dev:
devices = mpath_dev['devices']
LOG.debug("multipath LUNs to remove %s", devices)
for device in devices:
self.remove_scsi_device(device['device'])
self.flush_multipath_device(mpath_dev['id'])
def flush_device_io(self, device):
"""This is used to flush any remaining IO in the buffers."""
try:
LOG.debug("Flushing IO for device %s", device)
self._execute('blockdev', '--flushbufs', device, run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
LOG.warning(_LW("Failed to flush IO buffers prior to removing"
" device: (%(code)s)"),
{'code': exc.exit_code})
def flush_multipath_device(self, device):
try:
LOG.debug("Flush multipath device %s", device)
self._execute('multipath', '-f', device, run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
LOG.warning(_LW("multipath call failed exit (%(code)s)"),
{'code': exc.exit_code})
def flush_multipath_devices(self):
try:
self._execute('multipath', '-F', run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
LOG.warning(_LW("multipath call failed exit (%(code)s)"),
{'code': exc.exit_code})
def find_multipath_device(self, device):
"""Find a multipath device associated with a LUN device name.
device can be either a /dev/sdX entry or a multipath id.
"""
mdev = None
devices = []
out = None
try:
(out, _err) = self._execute('multipath', '-l', device,
run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
LOG.warning(_LW("multipath call failed exit (%(code)s)"),
{'code': exc.exit_code})
return None
if out:
lines = out.strip()
lines = lines.split("\n")
lines = [line for line in lines
if not re.match(MULTIPATH_ERROR_REGEX, line)]
if lines:
# Use the device name, be it the WWID, mpathN or custom alias
# of a device to build the device path. This should be the
# first item on the first line of output from `multipath -l
# ${path}` or `multipath -l ${wwid}`..
mdev_name = lines[0].split(" ")[0]
mdev = '/dev/mapper/%s' % mdev_name
# Find the WWID for the LUN if we are using mpathN or aliases.
wwid_search = MULTIPATH_WWID_REGEX.search(lines[0])
if wwid_search is not None:
mdev_id = wwid_search.group('wwid')
else:
mdev_id = mdev_name
# Confirm that the device is present.
try:
os.stat(mdev)
except OSError:
LOG.warning(_LW("Couldn't find multipath device %s"), mdev)
return None
LOG.debug("Found multipath device = %(mdev)s",
{'mdev': mdev})
device_lines = lines[3:]
for dev_line in device_lines:
if dev_line.find("policy") != -1:
continue
dev_line = dev_line.lstrip(' |-`')
dev_info = dev_line.split()
address = dev_info[0].split(":")
dev = {'device': '/dev/%s' % dev_info[1],
'host': address[0], 'channel': address[1],
'id': address[2], 'lun': address[3]
}
devices.append(dev)
if mdev is not None:
info = {"device": mdev,
"id": mdev_id,
"name": mdev_name,
"devices": devices}
return info
return None
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
restore_point_collection_name: str,
vm_restore_point_name: str,
disk_restore_point_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-30"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"restorePointCollectionName": _SERIALIZER.url("restore_point_collection_name", restore_point_collection_name, 'str'),
"vmRestorePointName": _SERIALIZER.url("vm_restore_point_name", vm_restore_point_name, 'str'),
"diskRestorePointName": _SERIALIZER.url("disk_restore_point_name", disk_restore_point_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_restore_point_request(
subscription_id: str,
resource_group_name: str,
restore_point_collection_name: str,
vm_restore_point_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-30"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"restorePointCollectionName": _SERIALIZER.url("restore_point_collection_name", restore_point_collection_name, 'str'),
"vmRestorePointName": _SERIALIZER.url("vm_restore_point_name", vm_restore_point_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class DiskRestorePointOperations(object):
"""DiskRestorePointOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_09_30.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
restore_point_collection_name: str,
vm_restore_point_name: str,
disk_restore_point_name: str,
**kwargs: Any
) -> "_models.DiskRestorePoint":
"""Get disk restorePoint resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param restore_point_collection_name: The name of the restore point collection that the disk
restore point belongs. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum
name length is 80 characters.
:type restore_point_collection_name: str
:param vm_restore_point_name: The name of the vm restore point that the disk disk restore point
belongs. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is
80 characters.
:type vm_restore_point_name: str
:param disk_restore_point_name: The name of the disk restore point created. Supported
characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_restore_point_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiskRestorePoint, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_09_30.models.DiskRestorePoint
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskRestorePoint"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
restore_point_collection_name=restore_point_collection_name,
vm_restore_point_name=vm_restore_point_name,
disk_restore_point_name=disk_restore_point_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiskRestorePoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}'} # type: ignore
@distributed_trace
def list_by_restore_point(
self,
resource_group_name: str,
restore_point_collection_name: str,
vm_restore_point_name: str,
**kwargs: Any
) -> Iterable["_models.DiskRestorePointList"]:
"""Lists diskRestorePoints under a vmRestorePoint.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param restore_point_collection_name: The name of the restore point collection that the disk
restore point belongs. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum
name length is 80 characters.
:type restore_point_collection_name: str
:param vm_restore_point_name: The name of the vm restore point that the disk disk restore point
belongs. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is
80 characters.
:type vm_restore_point_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskRestorePointList or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2020_09_30.models.DiskRestorePointList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskRestorePointList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_restore_point_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
restore_point_collection_name=restore_point_collection_name,
vm_restore_point_name=vm_restore_point_name,
template_url=self.list_by_restore_point.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_restore_point_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
restore_point_collection_name=restore_point_collection_name,
vm_restore_point_name=vm_restore_point_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DiskRestorePointList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_restore_point.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints'} # type: ignore
| |
import json
import os
import time
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from elasticsearch.exceptions import NotFoundError
import olympia.core.logger
from olympia.addons.indexers import AddonIndexer
from olympia.amo.celery import task
from olympia.amo.search import get_es
from olympia.lib.es.utils import (
flag_reindexing_amo,
is_reindexing_amo,
timestamp_index,
unflag_reindexing_amo,
)
logger = olympia.core.logger.getLogger('z.elasticsearch')
ES = get_es()
def get_indexer(alias):
"""Return indexer python module for a given alias.
This needs to be dynamic to work with testing correctly, since tests change
the value of settings.ES_INDEXES to hit test-specific aliases.
"""
modules = {
# The keys are the index alias names, the values the indexer classes.
# The 'default' in ES_INDEXES is actually named 'addons'
settings.ES_INDEXES['default']: AddonIndexer,
}
return modules[alias]
@task
def delete_indexes(indexes):
indices = ','.join(indexes)
logger.info('Removing indices %r' % indices)
ES.indices.delete(indices, ignore=[404, 500])
@task
def update_aliases(actions):
logger.info('Rebuilding aliases with actions: %s' % actions)
ES.indices.update_aliases({'actions': actions})
@task(ignore_result=False)
def create_new_index(alias, new_index):
logger.info(f'Create the index {new_index}, for alias: {alias}')
get_indexer(alias).create_new_index(new_index)
@task(ignore_result=False)
def flag_database(new_index, old_index, alias):
"""Flags the database to indicate that the reindexing has started."""
logger.info('Flagging the database to start the reindexation')
flag_reindexing_amo(new_index=new_index, old_index=old_index, alias=alias)
@task
def unflag_database():
"""Unflag the database to indicate that the reindexing is over."""
logger.info('Unflagging the database')
unflag_reindexing_amo()
def gather_index_data_tasks(alias, index):
"""
Return a group of indexing tasks for that index.
"""
logger.info(f'Returning reindexing group for {index}')
return get_indexer(alias).reindex_tasks_group(index)
_SUMMARY = """
*** Reindexation done ***
Current Aliases configuration:
%s
"""
class Command(BaseCommand):
help = 'Reindex all ES indexes'
def add_arguments(self, parser):
parser.add_argument(
'--force',
action='store_true',
help=('Bypass the database flag that says another indexation is ongoing'),
default=False,
),
parser.add_argument(
'--wipe',
action='store_true',
help=('Deletes AMO indexes prior to reindexing.'),
default=False,
),
parser.add_argument(
'--key',
action='store',
help=(
'Key in settings.ES_INDEXES corresponding to the alias to '
'reindex. Can be one of the following: %s. Default is '
'"default", which contains Add-ons data.' % (self.accepted_keys())
),
default='default',
),
parser.add_argument(
'--noinput',
action='store_true',
help=('Do not ask for confirmation before wiping. Default: False'),
default=False,
),
def accepted_keys(self):
return ', '.join(settings.ES_INDEXES.keys())
def handle(self, *args, **kwargs):
"""Reindexing work.
Creates a task chain that creates new indexes over the old ones so the
search feature works while the indexation occurs.
"""
force = kwargs['force']
if is_reindexing_amo() and not force:
raise CommandError('Indexation already occurring - use --force to bypass')
alias = settings.ES_INDEXES.get(kwargs['key'], None)
if alias is None:
raise CommandError(
'Invalid --key parameter. It should be one of: %s.'
% (self.accepted_keys())
)
self.stdout.write('Starting the reindexation for %s.' % alias)
if kwargs['wipe']:
skip_confirmation = kwargs['noinput']
confirm = ''
if not skip_confirmation:
confirm = input(
'Are you sure you want to wipe all AMO '
'Elasticsearch indexes? (yes/no): '
)
while confirm not in ('yes', 'no'):
confirm = input('Please enter either "yes" or "no": ')
if confirm == 'yes' or skip_confirmation:
unflag_database()
# Retrieve the actual index and delete it. That way whether or
# not this was an alias or an index (which is wrong, but
# happens if data was indexed before the first reindex was
# done) doesn't matter.
try:
index = next(iter(ES.indices.get(alias)))
ES.indices.delete(index)
except NotFoundError:
pass
else:
raise CommandError('Aborted.')
elif force:
unflag_database()
workflow = self.create_workflow(alias)
self.execute_workflow(workflow)
def create_workflow(self, alias):
alias_actions = []
def add_alias_action(action, index, alias):
action = {action: {'index': index, 'alias': alias}}
if action in alias_actions:
return
alias_actions.append(action)
# Creating a task chain.
self.stdout.write('Building the task chain')
to_remove = []
old_index = None
try:
olds = ES.indices.get_alias(alias)
for old_index in olds:
# Mark the index to be removed later.
to_remove.append(old_index)
# Mark the alias to be removed from that index.
add_alias_action('remove', old_index, alias)
except NotFoundError:
# If the alias dit not exist, ignore it, don't try to remove
# it.
pass
# Create a new index, using the alias name with a timestamp.
new_index = timestamp_index(alias)
# Mark the alias to be added at the end.
add_alias_action('add', new_index, alias)
# If old_index is None that could mean it's a full index.
# In that case we want to continue index in it.
if ES.indices.exists(alias):
old_index = alias
# Main chain for this alias that:
# - creates the new index
# - then, flags the database (which in turn makes every index call
# index data on both the old and the new index).
workflow = create_new_index.si(alias, new_index) | flag_database.si(
new_index, old_index, alias
)
# ... Then start indexing data. gather_index_data_tasks() is a
# function returning a group of indexing tasks.
index_data_tasks = gather_index_data_tasks(alias, new_index)
if index_data_tasks.tasks:
# Add the group to the chain, if it's not empty.
workflow |= index_data_tasks
# Chain with a task that updates the aliases to point to the new
# index and remove the old aliases, if any.
workflow |= update_aliases.si(alias_actions)
# Chain with a task that unflags the database - there's no need to
# duplicate the indexing anymore.
workflow |= unflag_database.si()
# Finish the chain by a task that deletes the old indexes, if any.
if to_remove:
workflow |= delete_indexes.si(to_remove)
return workflow
def execute_workflow(self, workflow):
# Let's do it.
self.stdout.write('Running all indexation tasks')
os.environ['FORCE_INDEXING'] = '1'
try:
workflow.apply_async()
if not getattr(settings, 'CELERY_TASK_ALWAYS_EAGER', False):
time.sleep(10) # give celeryd some time to flag the DB
while is_reindexing_amo():
self.stdout.write('.')
self.stdout.flush()
time.sleep(5)
finally:
del os.environ['FORCE_INDEXING']
self.stdout.write('\n')
# Let's return the /_aliases values.
aliases = ES.indices.get_alias()
aliases = json.dumps(aliases, sort_keys=True, indent=4)
summary = _SUMMARY % aliases
self.stdout.write(summary)
| |
# Copyright (c) 2010 Aldo Cortesi
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 oitel
# Copyright (c) 2011 Kenji_Takahashi
# Copyright (c) 2011 Paul Colomiets
# Copyright (c) 2012, 2014 roger
# Copyright (c) 2012 nullzion
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014-2015 Sean Vig
# Copyright (c) 2014 Nathan Hoad
# Copyright (c) 2014 dequis
# Copyright (c) 2014 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
import collections
import math
import cairocffi
import xcffib.xproto
from . import pangocffi
from . import utils
class TextLayout(object):
def __init__(self, drawer, text, colour, font_family, font_size,
font_shadow, wrap=True, markup=False):
self.drawer, self.colour = drawer, colour
layout = drawer.ctx.create_layout()
layout.set_alignment(pangocffi.ALIGN_CENTER)
if not wrap: # pango wraps by default
layout.set_ellipsize(pangocffi.ELLIPSIZE_END)
desc = pangocffi.FontDescription.from_string(font_family)
desc.set_absolute_size(pangocffi.units_from_double(float(font_size)))
layout.set_font_description(desc)
self.font_shadow = font_shadow
self.layout = layout
self.markup = markup
self.text = text
self._width = None
def finalize(self):
self.layout.finalize()
@property
def text(self):
return self.layout.get_text()
@text.setter
def text(self, value):
if self.markup:
# pangocffi doesn't like None here, so we use "".
if value is None:
value = ''
attrlist, value, accel_char = pangocffi.parse_markup(value)
self.layout.set_attributes(attrlist)
self.layout.set_text(utils.scrub_to_utf8(value))
@property
def width(self):
if self._width is not None:
return self._width
else:
return self.layout.get_pixel_size()[0]
@width.setter
def width(self, value):
self._width = value
self.layout.set_width(pangocffi.units_from_double(value))
@width.deleter
def width(self):
self._width = None
self.layout.set_width(-1)
@property
def height(self):
return self.layout.get_pixel_size()[1]
def fontdescription(self):
return self.layout.get_font_description()
@property
def font_family(self):
d = self.fontdescription()
return d.get_family()
@font_family.setter
def font_family(self, font):
d = self.fontdescription()
d.set_family(font)
self.layout.set_font_description(d)
@property
def font_size(self):
d = self.fontdescription()
return d.get_size()
@font_size.setter
def font_size(self, size):
d = self.fontdescription()
d.set_size(size)
d.set_absolute_size(pangocffi.units_from_double(size))
self.layout.set_font_description(d)
def draw(self, x, y):
if self.font_shadow is not None:
self.drawer.set_source_rgb(self.font_shadow)
self.drawer.ctx.move_to(x + 1, y + 1)
self.drawer.ctx.show_layout(self.layout)
self.drawer.set_source_rgb(self.colour)
self.drawer.ctx.move_to(x, y)
self.drawer.ctx.show_layout(self.layout)
def framed(self, border_width, border_color, pad_x, pad_y, highlight_color=None):
return TextFrame(self, border_width, border_color, pad_x, pad_y, highlight_color=highlight_color)
class TextFrame(object):
def __init__(self, layout, border_width, border_color, pad_x, pad_y, highlight_color=None):
self.layout = layout
self.border_width = border_width
self.border_color = border_color
self.drawer = self.layout.drawer
self.highlight_color = highlight_color
if isinstance(pad_x, collections.Iterable):
self.pad_left = pad_x[0]
self.pad_right = pad_x[1]
else:
self.pad_left = self.pad_right = pad_x
if isinstance(pad_y, collections.Iterable):
self.pad_top = pad_y[0]
self.pad_bottom = pad_y[1]
else:
self.pad_top = self.pad_bottom = pad_y
def draw(self, x, y, rounded=True, fill=False, line=False, highlight=False):
self.drawer.set_source_rgb(self.border_color)
opts = [
x, y,
self.layout.width + self.pad_left + self.pad_right,
self.layout.height + self.pad_top + self.pad_bottom,
self.border_width
]
if line:
if highlight:
self.drawer.set_source_rgb(self.highlight_color)
self.drawer.fillrect(*opts)
self.drawer.set_source_rgb(self.border_color)
# change to only fill in bottom line
opts[1] = self.height - self.border_width # y
opts[3] = self.border_width # height
self.drawer.fillrect(*opts)
elif fill:
if rounded:
self.drawer.rounded_fillrect(*opts)
else:
self.drawer.fillrect(*opts)
else:
if rounded:
self.drawer.rounded_rectangle(*opts)
else:
self.drawer.rectangle(*opts)
self.drawer.ctx.stroke()
self.layout.draw(
x + self.pad_left,
y + self.pad_top
)
def draw_fill(self, x, y, rounded=True):
self.draw(x, y, rounded=rounded, fill=True)
def draw_line(self, x, y, highlighted):
self.draw(x, y, line=True, highlight=highlighted)
@property
def height(self):
return self.layout.height + self.pad_top + self.pad_bottom
@property
def width(self):
return self.layout.width + self.pad_left + self.pad_right
class Drawer(object):
""" A helper class for drawing and text layout.
We have a drawer object for each widget in the bar. The underlying surface
is a pixmap with the same size as the bar itself. We draw to the pixmap
starting at offset 0, 0, and when the time comes to display to the window,
we copy the appropriate portion of the pixmap onto the window.
"""
def __init__(self, qtile, wid, width, height):
self.qtile = qtile
self.wid, self.width, self.height = wid, width, height
self.pixmap = self.qtile.conn.conn.generate_id()
self.gc = self.qtile.conn.conn.generate_id()
self.qtile.conn.conn.core.CreatePixmap(
self.qtile.conn.default_screen.root_depth,
self.pixmap,
self.wid,
self.width,
self.height
)
self.qtile.conn.conn.core.CreateGC(
self.gc,
self.wid,
xcffib.xproto.GC.Foreground | xcffib.xproto.GC.Background,
[
self.qtile.conn.default_screen.black_pixel,
self.qtile.conn.default_screen.white_pixel
]
)
self.surface = cairocffi.XCBSurface(
qtile.conn.conn,
self.pixmap,
self.find_root_visual(),
self.width,
self.height,
)
self.ctx = self.new_ctx()
self.clear((0, 0, 1))
def finalize(self):
self.qtile.conn.conn.core.FreeGC(self.gc)
self.qtile.conn.conn.core.FreePixmap(self.pixmap)
self.ctx = None
self.surface = None
def _rounded_rect(self, x, y, width, height, linewidth):
aspect = 1.0
corner_radius = height / 10.0
radius = corner_radius / aspect
degrees = math.pi / 180.0
self.ctx.new_sub_path()
delta = radius + linewidth / 2
self.ctx.arc(x + width - delta, y + delta, radius,
-90 * degrees, 0 * degrees)
self.ctx.arc(x + width - delta, y + height - delta,
radius, 0 * degrees, 90 * degrees)
self.ctx.arc(x + delta, y + height - delta, radius,
90 * degrees, 180 * degrees)
self.ctx.arc(x + delta, y + delta, radius,
180 * degrees, 270 * degrees)
self.ctx.close_path()
def rounded_rectangle(self, x, y, width, height, linewidth):
self._rounded_rect(x, y, width, height, linewidth)
self.ctx.set_line_width(linewidth)
self.ctx.stroke()
def rounded_fillrect(self, x, y, width, height, linewidth):
self._rounded_rect(x, y, width, height, linewidth)
self.ctx.fill()
def rectangle(self, x, y, width, height, linewidth=2):
self.ctx.set_line_width(linewidth)
self.ctx.rectangle(x, y, width, height)
self.ctx.stroke()
def fillrect(self, x, y, width, height, linewidth=2):
self.ctx.set_line_width(linewidth)
self.ctx.rectangle(x, y, width, height)
self.ctx.fill()
self.ctx.stroke()
def draw(self, offsetx=0, offsety=0, width=None, height=None):
"""
Parameters
==========
offsetx :
the X offset to start drawing at.
offsety :
the Y offset to start drawing at.
width :
the X portion of the canvas to draw at the starting point.
height :
the Y portion of the canvas to draw at the starting point.
"""
self.qtile.conn.conn.core.CopyArea(
self.pixmap,
self.wid,
self.gc,
0, 0, # srcx, srcy
offsetx, offsety, # dstx, dsty
self.width if width is None else width,
self.height if height is None else height
)
def find_root_visual(self):
for i in self.qtile.conn.default_screen.allowed_depths:
for v in i.visuals:
if v.visual_id == self.qtile.conn.default_screen.root_visual:
return v
def new_ctx(self):
return pangocffi.CairoContext(cairocffi.Context(self.surface))
def set_source_rgb(self, colour):
if type(colour) == list:
if len(colour) == 0:
# defaults to black
self.ctx.set_source_rgba(*utils.rgb("#000000"))
elif len(colour) == 1:
self.ctx.set_source_rgba(*utils.rgb(colour[0]))
else:
linear = cairocffi.LinearGradient(0.0, 0.0, 0.0, self.height)
step_size = 1.0 / (len(colour) - 1)
step = 0.0
for c in colour:
rgb_col = utils.rgb(c)
if len(rgb_col) < 4:
rgb_col[3] = 1
linear.add_color_stop_rgba(step, *rgb_col)
step += step_size
self.ctx.set_source(linear)
else:
self.ctx.set_source_rgba(*utils.rgb(colour))
def clear(self, colour):
self.set_source_rgb(colour)
self.ctx.rectangle(0, 0, self.width, self.height)
self.ctx.fill()
self.ctx.stroke()
def textlayout(self, text, colour, font_family, font_size, font_shadow,
markup=False, **kw):
"""Get a text layout"""
return TextLayout(self, text, colour, font_family, font_size,
font_shadow, markup=markup, **kw)
def max_layout_size(self, texts, font_family, font_size):
sizelayout = self.textlayout(
"", "ffffff", font_family, font_size, None)
widths, heights = [], []
for i in texts:
sizelayout.text = i
widths.append(sizelayout.width)
heights.append(sizelayout.height)
return max(widths), max(heights)
# Old text layout functions, to be deprecated.
def set_font(self, fontface, size, antialias=True):
self.ctx.select_font_face(fontface)
self.ctx.set_font_size(size)
fo = self.ctx.get_font_options()
fo.set_antialias(cairocffi.ANTIALIAS_SUBPIXEL)
def text_extents(self, text):
return self.ctx.text_extents(utils.scrub_to_utf8(text))
def font_extents(self):
return self.ctx.font_extents()
def fit_fontsize(self, heightlimit):
"""Try to find a maximum font size that fits any strings within the height"""
self.ctx.set_font_size(heightlimit)
asc, desc, height, _, _ = self.font_extents()
self.ctx.set_font_size(
int(heightlimit * heightlimit / height))
return self.font_extents()
def fit_text(self, strings, heightlimit):
"""Try to find a maximum font size that fits all strings within the height"""
self.ctx.set_font_size(heightlimit)
_, _, _, maxheight, _, _ = self.ctx.text_extents("".join(strings))
if not maxheight:
return 0, 0
self.ctx.set_font_size(
int(heightlimit * heightlimit / maxheight))
maxwidth, maxheight = 0, 0
for i in strings:
_, _, x, y, _, _ = self.ctx.text_extents(i)
maxwidth = max(maxwidth, x)
maxheight = max(maxheight, y)
return maxwidth, maxheight
def draw_vbar(self, color, x, y1, y2, linewidth=1):
self.set_source_rgb(color)
self.ctx.move_to(x, y1)
self.ctx.line_to(x, y2)
self.ctx.set_line_width(linewidth)
self.ctx.stroke()
def draw_hbar(self, color, x1, x2, y, linewidth=1):
self.set_source_rgb(color)
self.ctx.move_to(x1, y)
self.ctx.line_to(x2, y)
self.ctx.set_line_width(linewidth)
self.ctx.stroke()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class LinearClassifierTest(tf.test.TestCase):
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
self.assertTrue('centered_bias_weight' in classifier.get_variable_names())
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertFalse('centered_bias_weight' in classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language': tf.SparseTensor(values=['hindi'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
classifier_no_reg = tf.contrib.learn.LinearClassifier(
feature_columns=[language])
classifier_with_reg = tf.contrib.learn.LinearClassifier(
feature_columns=[language],
optimizer=tf.train.FtrlOptimizer(learning_rate=1.0,
l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(
input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(
input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language': tf.SparseTensor(values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
shape=[3, 1])
}, tf.constant([[1], [1], [1]], dtype=tf.int32)
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
classifier = tf.contrib.learn.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithInvalidDimension(self):
"""Tests a ValueError is raised if a real valued feature has dimension>1."""
def input_fn():
return {
'example_id': tf.constant(['1', '2']),
'sq_footage': tf.constant([[800.0, 200.0], [650.0, 500.0]])
}, tf.constant([[1.0], [0.0]])
sq_footage = tf.contrib.layers.real_valued_column('sq_footage', dimension=2)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(feature_columns=[sq_footage],
optimizer=sdca_optimizer)
with self.assertRaises(ValueError):
_ = classifier.fit(input_fn=input_fn, steps=100)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2']),
'maintenance_cost': tf.constant([[500.0], [200.0]]),
'sq_footage': tf.constant([[800.0], [600.0]]),
'weights': tf.constant([[1.0], [1.0]])
}, tf.constant([[0], [1]])
maintenance_cost = tf.contrib.layers.real_valued_column('maintenance_cost')
sq_footage = tf.contrib.layers.real_valued_column('sq_footage')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': tf.constant([[1000.0], [600.0], [700.0]]),
'weights': tf.constant([[1.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0])
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id',
symmetric_l2_regularization=1.0)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=2)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.4], [0.6], [0.3]]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[1.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price = tf.contrib.layers.real_valued_column('price')
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=2)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClasssifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.SparseTensor(values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 5]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 5])
}, tf.constant([[1], [0], [1]])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = tf.contrib.layers.weighted_sparse_column(
country, 'price')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[country_weighted_by_price],
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=2)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'language': tf.SparseTensor(values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1]),
'country': tf.SparseTensor(values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])
}, tf.constant([[0], [0], [1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[country_language],
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=2)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.6], [0.8], [0.3]]),
'sq_footage': tf.constant([[900.0], [700.0], [600.0]]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[3.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price = tf.contrib.layers.real_valued_column('price')
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = tf.contrib.layers.crossed_column(
[sq_footage_bucket, country],
hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=2)
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age': tf.constant([[1], [2]]),
'language': tf.SparseTensor(values=['greek', 'chinise'],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
}, tf.constant([[1], [0]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
# Evaluate on trained mdoel
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=2)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(tf.test.TestCase):
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[10.]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearRegressor(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(x)
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns)
regressor.fit(x, y, batch_size=32, steps=20000)
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor.weights_.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def boston_input_fn():
boston = tf.contrib.learn.datasets.load_boston()
features = tf.cast(tf.reshape(tf.constant(boston.data), [-1, 13]), tf.float32)
target = tf.cast(tf.reshape(tf.constant(boston.target), [-1, 1]), tf.float32)
return features, target
class FeatureColumnTest(tf.test.TestCase):
# TODO(b/29580537): Remove when we deprecate feature column inference.
def testTrainWithInferredFeatureColumns(self):
est = tf.contrib.learn.LinearRegressor()
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
def testTrain(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = tf.contrib.learn.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
tf.test.main()
| |
import wx
import wx.grid as gridlib
import wx.combo
from pprint import PrettyPrinter
import json
import os
from CreatorView.RelativePaths import relative_music_path,relative_dependencies_path
from uuid import uuid4
import traceback
import re
class CreatorView(wx.Panel):
def __init__(self, parent, size, name, musicPath=relative_music_path + "TwoMandolins.mp3", sender = None):
wx.Panel.__init__(self, size=size, parent=parent)
self.name = name
self.parent = parent
self.musicPath = musicPath
self.sender = sender
self.ackMsgs = {} #for confirming blocking send operations
rootSizer = wx.BoxSizer(wx.VERTICAL)
topSizer = wx.BoxSizer(wx.HORIZONTAL)
buttonsSizer = wx.BoxSizer(wx.HORIZONTAL)
chosenSetSizer = wx.BoxSizer(wx.HORIZONTAL)
self.centerSizer = wx.BoxSizer(wx.HORIZONTAL)
choiceList = ["Resources", "Buildings", "Dwellers"]
self.modes = wx.ComboBox(self, choices=choiceList, value="Buildings", style=wx.CB_READONLY)
self.modes.Bind(wx.EVT_COMBOBOX, self.OnCombo)
topSizer.Add(self.modes)
self.dependenciesHeaders = {}
# ^ list of type [[resourcesColumnsNames[0]...],[buildingsColumnsnames[0]...], [dwellersColumnsNames[0]...]]
# needed at the time of creating dependencies via Create button
resourcesColumnsNames = ["Resource\nName", "Predecessor", "Successor", "Description", "Texture path", "Start income", "Ico path"]
self.dependenciesHeaders["Resources"] = resourcesColumnsNames
resourcesInfo = "Resource name identifies this object; must be unique throughout this dependency set.\n" \
"Predecessor is the name of a resource that this object requires\n" \
"Successor is the name of a resource next in the hierarchy\n" \
"Description goes to the tutorial module\nStart income tells how much of a resource you get at start; set to zero if you do not want this resource to be produced at start" \
"Ico is the path to a file that contains image representing this object"
dwellersColumnsNames = ["Dweller\nName", "Predecessor", "Successor", "Description", "Consumes",
"Consume Rate", "Ico path"]
self.dependenciesHeaders["Dwellers"] = dwellersColumnsNames
dwellersInfo =\
"Dweller name identifies this object; must be unique throughout this dependency set.\n" \
"Predecessor is the name of a dweller that this object requires to exist\n" \
"Successor is the name of a dweller next in the hierarchy\n" \
"Description goes to the tutorial module\n" \
"Ico is the path to a file that contains image representing this object\n" +\
">>Consumes<< is a list of resource identifiers that this dweller consumes, whose items are separated" \
" via semi-colons;\n >>ConsumeRate<< is a rate at which resources listed in >>Consumes<< are removed" \
" from the stock pile; those are semi-colon separated float values, each representing a resource at the" \
" same position in >>Consumes<< list"
buildingsColumnsNames = ["Building\nName", "Dweller\nName", "Dwellers\namount", "Predecessor", "Successor",
"Description", "Produces", "Consumes", "Consume Rate", "Produce Rate", "Cost\nin\nresources","Texture path",
"Ico path", "Type"]
self.dependenciesHeaders["Buildings"] = buildingsColumnsNames
buildingsInfo =\
"Building name identifies this object; must be unique throughout this dependency set.\n" \
"Dweller is a name existing in Dwellers list\n" \
"Predecessor is the name of a dweller that this object requires\n" \
"Successor is the name of a dweller next in the hierarchy\n" \
"Description goes to the tutorial module\nCost in resources is a comma-separated list of pairs resource_name:units, that indicates how many units of resources player is due to have\n" \
"Ico is the path to a file that contains image representing this object\n" +\
">>Produces<< is a semi-colon separated sequence of items existing at the resources list that" \
" this building produces;\n Same rule applies for >>Consumes<< list\n>>Consume<< and >>Produce<<" \
" rates ar metrics indicating pace at which building consumes a resource or produces it, respectively\n" \
"Type can be either domastic or industrial\n texture path is a path to a file containing an image to be" \
" loaded at play-time and displayed on the map\n"
self.infos = {"Resources": resourcesInfo, "Buildings": buildingsInfo, "Dwellers": dwellersInfo}
buildingsGrid = gridlib.Grid(self)
buildingsGrid.CreateGrid(1, len(buildingsColumnsNames))
for i, name in enumerate(buildingsColumnsNames):
buildingsGrid.SetColLabelValue(i, name)
buildingsGrid.Show()
resourcesGrid = gridlib.Grid(self)
resourcesGrid.CreateGrid(1, len(resourcesColumnsNames))
for i, name in enumerate(resourcesColumnsNames):
resourcesGrid.SetColLabelValue(i, name)
resourcesGrid.Hide()
dwellersGrid = gridlib.Grid(self)
dwellersGrid.CreateGrid(1, len(dwellersColumnsNames))
for i, name in enumerate(dwellersColumnsNames):
dwellersGrid.SetColLabelValue(i, name)
dwellersGrid.Hide()
self.tables = {"Resources": resourcesGrid, "Dwellers": dwellersGrid, "Buildings": buildingsGrid}
self.centerSizer.Add(buildingsGrid)
self.centerSizer.Add(resourcesGrid)
self.centerSizer.Add(dwellersGrid)
self.currentGrid = "Buildings"
self.ctrlMsgField = wx.StaticText(self, label=self.infos[self.currentGrid])
self.centerSizer.Add(self.ctrlMsgField, 0, wx.EXPAND, 5)
self.ctrlText = wx.TextCtrl(self, -1, "Default Set")
chosenSetSizer.Add(self.ctrlText)
load_btn = wx.Button(self, label="Load Created dependencies")
self.Bind(wx.EVT_BUTTON, self.loadDependencies, load_btn)
buttonsSizer.Add(load_btn, 0, wx.EXPAND, 5)
menu_btn = wx.Button(self, label="Menu")
self.Bind(wx.EVT_BUTTON, self.retToMenu, menu_btn)
buttonsSizer.Add(menu_btn, 0, wx.ALL | wx.EXPAND, 5)
save_btn = wx.Button(self, label = "Save")
self.Bind(wx.EVT_BUTTON, self.save, save_btn)
buttonsSizer.Add(save_btn, 0, wx.EXPAND, 5)
create_btn = wx.Button(self, label="Create")
self.Bind(wx.EVT_BUTTON, self.createDependencies, create_btn)
buttonsSizer.Add(create_btn, 0, wx.EXPAND, 5)
add_row_btn = wx.Button(self, label="Add row")
self.Bind(wx.EVT_BUTTON, self.addRow, add_row_btn)
buttonsSizer.Add(add_row_btn, 0, wx.EXPAND, 5)
delete_row_btn = wx.Button(self, label= "Delete Row")
self.Bind(wx.EVT_BUTTON, self.deleteRow, delete_row_btn)
buttonsSizer.Add(delete_row_btn, 0, wx.EXPAND, 5)
self.errorMsgField = wx.StaticText(self, label=self.infos[self.currentGrid])
self.defaultErrorFieldMsg = "Be careful when deleting rows;" +\
"\nclick on one of the cells in a row you want to delete;"+\
"\nthen press delete button;"+\
"\nBefore creating dependencies, all cells must be filled;"+\
"\nOtherwise we cannot procced"
self.errorMsgField.SetLabelText(self.defaultErrorFieldMsg)
buttonsSizer.Add(self.errorMsgField, 0, wx.EXPAND, 5)
rootSizer.Add(topSizer, 0, wx.CENTER)
rootSizer.Add(self.centerSizer, 0, wx.EXPAND, 5)
rootSizer.Add(buttonsSizer, 0, wx.CENTER)
rootSizer.Add(chosenSetSizer, 0, wx.CENTER)
self.SetSizer(rootSizer)
rootSizer.SetDimension(0, 0, size[0], size[1])
self.Bind(wx.EVT_SHOW, self.onShow, self)
def OnCombo(self, event):
print "combobox:", event.GetString()
self.tables[self.currentGrid].Hide()
self.tables[event.GetString()].Show()
self.currentGrid = event.GetString()
self.ctrlMsgField.SetLabel(self.infos[self.currentGrid])
self.centerSizer.Layout()
def onShow(self, event):
global pygame
if event.GetShow():
self.resetView() # true means that one empty row is always present in each grid after reset
try:
import pygame
pygame.init()
pygame.mixer.init()
pygame.mixer.music.load(
#os.path.dirname(os.path.abspath(__file__)) + "\\" +
self.musicPath)
pygame.mixer.music.play()
except Exception:
print "Problem with music"
else:
try:
pygame.quit()
except Exception:
print "creator: problem with pygame quit"
def retToMenu(self, event):
#self.parent.setView("Menu")
msg = {}
msg["To"] = "CreatorNode"
msg["Operation"] = "MoveTo"
msg["Args"] = {}
msg["Args"]["TargetView"] = "MainMenu"
msg["Args"]["TargetControlNode"] = "MainMenuNode"
self.sender.send(json.dumps(msg))
def getGridsContent(self):
gridNamesList = ["Resources", "Buildings", "Dwellers"]
dependencies = {} #initialize empty lists for each grid; lists will be of dict type
for gridName in gridNamesList:
grid = self.tables[gridName]
gridRowsNum, gridColsNum = grid.GetNumberRows(), grid.GetNumberCols()
dependencies[gridName] = [
{
self.dependenciesHeaders[gridName][j] : grid.GetCellValue(i,j) for j in range(gridColsNum)
} for i in range(gridRowsNum)
]
# ^ here we have a dictionary representing an entire dependency set;
# each key holds textual representation of each grid's content;
# that representation is a list of rows in a grid;
# each row content is described via a dictonary mapping the column name to its value in a particular row
return dependencies
def createDependencies(self, event):
gridNamesList = ["Resources", "Buildings", "Dwellers"]
dependencies = self.getGridsContent()
pp = PrettyPrinter()
pp.pprint(dependencies)
#check if each cell has content (is not empty)
for gridName in gridNamesList:
tableTextRepresentation = dependencies[gridName]
for row_index, row in enumerate(tableTextRepresentation):
for columnName, value in row.items():
if value == "":
errorMsg = "Dependencies not created: empty cell\nat: " + gridName + "\nrow: " + str(row_index) + "\ncolumn name: " + columnName
print errorMsg
self.errorMsgField.SetLabelText(errorMsg)
return
setName = self.ctrlText.GetValue()
if re.sub(r'\s', "", setName) == "":
errorMsg = "Please, fill dependencies set name field"
print errorMsg
self.errorMsgField.SetLabelText(errorMsg)
return
msg = "Dependencies sent to further processing to creator controller"
print msg
self.errorMsgField.SetLabelText(msg)
uuid = uuid4().__str__()
self.ackMsgs[uuid] = False
msg = {}
msg["To"] = "CreatorNode"
msg["Operation"] = "Parse"
msg["Args"] = {}
msg["Args"]["Dependencies"] = dependencies
msg["Args"]["DependenciesSetName"] = setName
msg["Args"]["UUID"] = uuid
stream = json.dumps(msg)
print stream
self.sender.send(stream)
while not self.ackMsgs[uuid]: pass
msg = "Dependencies created successfully, please go to the Loader menu now to check what was created"
print msg
self.errorMsgField.SetLabelText(msg)
def save(self, event):
dependencies = self.getGridsContent()
dlg = wx.FileDialog(
self,
defaultDir = relative_dependencies_path,
message = "Choose a file to save",
wildcard = "*.dep",
style = wx.FD_SAVE
)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
with open(path, "wb+") as f:
f.write(json.dumps(dependencies).replace(",",",\n"))
def addRow(self, event):
self.tables[self.currentGrid].AppendRows()
self.centerSizer.Layout()
pos = self.tables[self.currentGrid].GetGridCursorRow()
msg = "row added to " + self.currentGrid + " current pos:" + str(pos)
print msg
self.errorMsgField.SetLabelText(msg)
def deleteRow(self, event):
pos = self.tables[self.currentGrid].GetGridCursorRow()
self.tables[self.currentGrid].DeleteRows(pos)
msg = "Row removed from " + self.currentGrid + " at: " + str(pos)
print msg
self.errorMsgField.SetLabelText(msg)
def readMsg(self, msg):
print "Creator view got msg", msg
jsonMsg = json.loads(msg)
operation = jsonMsg["Operation"]
if operation == "ParseConfirm":
self.ackMsgs[jsonMsg["Args"]["UUID"]] = True #unblock blocked thread
def dependencyLoadFail(self):
errorMsg = "Not a valid file format, need a .dep file"
print errorMsg
self.errorMsgField.SetLabelText(errorMsg)
def resetGrids(self, addStartRow = False):
gridNamesList = ["Resources", "Buildings", "Dwellers"]
for gridName in gridNamesList:
grid = self.tables[gridName]
rowsNum = grid.GetNumberRows()
for i in range(rowsNum):
grid.DeleteRows()
if addStartRow: grid.AppendRows()
self.centerSizer.Layout()
def resetView(self):
self.resetGrids(True)
self.errorMsgField.SetLabelText(self.defaultErrorFieldMsg)
self.tables[self.currentGrid].Hide()
self.tables["Buildings"].Show()
self.currentGrid = "Buildings"
self.ctrlMsgField.SetLabel(self.infos[self.currentGrid])
self.modes.SetStringSelection("Buildings")
self.centerSizer.Layout()
self.ctrlText.SetLabelText("Default Set")
def fillGridsWithContent(self, content_dict):
self.resetGrids()
try:
gridNamesList = ["Resources", "Buildings", "Dwellers"]
for gridName in gridNamesList:
tableTextRepresentation = content_dict[gridName]
for row_index, row in enumerate(tableTextRepresentation):
self.tables[gridName].AppendRows(1)
for columnName, value in row.items():
j = self.dependenciesHeaders[gridName].index(columnName)
self.tables[gridName].SetCellValue(row_index,j, value)
except Exception:
return False
self.centerSizer.Layout()
return True
def loadDependencies(self, event):
dlg = wx.FileDialog(
self,
defaultDir = relative_dependencies_path,
message = "Choose a file",
wildcard="*.dep",
style = wx.FD_OPEN | wx.FD_MULTIPLE
)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
print "You chose the following file:", path
if path.endswith(".dep"):
with open (path, "r+") as dependency_file:
dependency_content = dependency_file.read().replace("u'","'").replace("'","\"")
print dependency_content
try:
dependency_dict = json.loads(dependency_content)
grids_copy = self.getGridsContent() #if sth goes wrong, we can restore previous state
if not self.fillGridsWithContent(dependency_dict):
errorMsg = "Error while loading file"
print errorMsg
self.errorMsgField.SetLabelText(errorMsg)
self.fillGridsWithContent(grids_copy) #here we restore previous state of grids
else:
msg = "Dependencies loaded successfully!"
print msg
self.errorMsgField.SetLabelText(msg)
except Exception:
self.dependencyLoadFail()
traceback.print_exc()
else:
self.dependencyLoadFail()
| |
from __future__ import absolute_import, print_function
import cython
from .. import __version__
import collections
import re, os, sys, time
from glob import iglob
try:
import gzip
gzip_open = gzip.open
gzip_ext = '.gz'
except ImportError:
gzip_open = open
gzip_ext = ''
import shutil
import subprocess
import os
try:
import hashlib
except ImportError:
import md5 as hashlib
try:
from io import open as io_open
except ImportError:
from codecs import open as io_open
try:
from os.path import relpath as _relpath
except ImportError:
# Py<2.6
def _relpath(path, start=os.path.curdir):
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
try:
import pythran
PythranAvailable = True
except:
PythranAvailable = False
from distutils.extension import Extension
from distutils.util import strtobool
from .. import Utils
from ..Utils import (cached_function, cached_method, path_exists,
safe_makedirs, copy_file_to_dir_if_newer, is_package_dir)
from ..Compiler.Main import Context, CompilationOptions, default_options
join_path = cached_function(os.path.join)
copy_once_if_newer = cached_function(copy_file_to_dir_if_newer)
safe_makedirs_once = cached_function(safe_makedirs)
if sys.version_info[0] < 3:
# stupid Py2 distutils enforces str type in list of sources
_fs_encoding = sys.getfilesystemencoding()
if _fs_encoding is None:
_fs_encoding = sys.getdefaultencoding()
def encode_filename_in_py2(filename):
if not isinstance(filename, bytes):
return filename.encode(_fs_encoding)
return filename
else:
def encode_filename_in_py2(filename):
return filename
basestring = str
def extended_iglob(pattern):
if '{' in pattern:
m = re.match('(.*){([^}]+)}(.*)', pattern)
if m:
before, switch, after = m.groups()
for case in switch.split(','):
for path in extended_iglob(before + case + after):
yield path
return
if '**/' in pattern:
seen = set()
first, rest = pattern.split('**/', 1)
if first:
first = iglob(first+'/')
else:
first = ['']
for root in first:
for path in extended_iglob(join_path(root, rest)):
if path not in seen:
seen.add(path)
yield path
for path in extended_iglob(join_path(root, '*', '**/' + rest)):
if path not in seen:
seen.add(path)
yield path
else:
for path in iglob(pattern):
yield path
def nonempty(it, error_msg="expected non-empty iterator"):
empty = True
for value in it:
empty = False
yield value
if empty:
raise ValueError(error_msg)
@cached_function
def file_hash(filename):
path = os.path.normpath(filename.encode("UTF-8"))
prefix = (str(len(path)) + ":").encode("UTF-8")
m = hashlib.md5(prefix)
m.update(path)
f = open(filename, 'rb')
try:
data = f.read(65000)
while data:
m.update(data)
data = f.read(65000)
finally:
f.close()
return m.hexdigest()
def parse_list(s):
"""
>>> parse_list("")
[]
>>> parse_list("a")
['a']
>>> parse_list("a b c")
['a', 'b', 'c']
>>> parse_list("[a, b, c]")
['a', 'b', 'c']
>>> parse_list('a " " b')
['a', ' ', 'b']
>>> parse_list('[a, ",a", "a,", ",", ]')
['a', ',a', 'a,', ',']
"""
if len(s) >= 2 and s[0] == '[' and s[-1] == ']':
s = s[1:-1]
delimiter = ','
else:
delimiter = ' '
s, literals = strip_string_literals(s)
def unquote(literal):
literal = literal.strip()
if literal[0] in "'\"":
return literals[literal[1:-1]]
else:
return literal
return [unquote(item) for item in s.split(delimiter) if item.strip()]
transitive_str = object()
transitive_list = object()
bool_or = object()
distutils_settings = {
'name': str,
'sources': list,
'define_macros': list,
'undef_macros': list,
'libraries': transitive_list,
'library_dirs': transitive_list,
'runtime_library_dirs': transitive_list,
'include_dirs': transitive_list,
'extra_objects': list,
'extra_compile_args': transitive_list,
'extra_link_args': transitive_list,
'export_symbols': list,
'depends': transitive_list,
'language': transitive_str,
'np_pythran': bool_or
}
@cython.locals(start=cython.Py_ssize_t, end=cython.Py_ssize_t)
def line_iter(source):
if isinstance(source, basestring):
start = 0
while True:
end = source.find('\n', start)
if end == -1:
yield source[start:]
return
yield source[start:end]
start = end+1
else:
for line in source:
yield line
class DistutilsInfo(object):
def __init__(self, source=None, exn=None):
self.values = {}
if source is not None:
for line in line_iter(source):
line = line.lstrip()
if not line:
continue
if line[0] != '#':
break
line = line[1:].lstrip()
kind = next((k for k in ("distutils:","cython:") if line.startswith(k)), None)
if not kind is None:
key, _, value = [s.strip() for s in line[len(kind):].partition('=')]
type = distutils_settings.get(key, None)
if line.startswith("cython:") and type is None: continue
if type in (list, transitive_list):
value = parse_list(value)
if key == 'define_macros':
value = [tuple(macro.split('=', 1))
if '=' in macro else (macro, None)
for macro in value]
if type is bool_or:
value = strtobool(value)
self.values[key] = value
elif exn is not None:
for key in distutils_settings:
if key in ('name', 'sources','np_pythran'):
continue
value = getattr(exn, key, None)
if value:
self.values[key] = value
def merge(self, other):
if other is None:
return self
for key, value in other.values.items():
type = distutils_settings[key]
if type is transitive_str and key not in self.values:
self.values[key] = value
elif type is transitive_list:
if key in self.values:
# Change a *copy* of the list (Trac #845)
all = self.values[key][:]
for v in value:
if v not in all:
all.append(v)
value = all
self.values[key] = value
elif type is bool_or:
self.values[key] = self.values.get(key, False) | value
return self
def subs(self, aliases):
if aliases is None:
return self
resolved = DistutilsInfo()
for key, value in self.values.items():
type = distutils_settings[key]
if type in [list, transitive_list]:
new_value_list = []
for v in value:
if v in aliases:
v = aliases[v]
if isinstance(v, list):
new_value_list += v
else:
new_value_list.append(v)
value = new_value_list
else:
if value in aliases:
value = aliases[value]
resolved.values[key] = value
return resolved
def apply(self, extension):
for key, value in self.values.items():
type = distutils_settings[key]
if type in [list, transitive_list]:
value = getattr(extension, key) + list(value)
setattr(extension, key, value)
@cython.locals(start=cython.Py_ssize_t, q=cython.Py_ssize_t,
single_q=cython.Py_ssize_t, double_q=cython.Py_ssize_t,
hash_mark=cython.Py_ssize_t, end=cython.Py_ssize_t,
k=cython.Py_ssize_t, counter=cython.Py_ssize_t, quote_len=cython.Py_ssize_t)
def strip_string_literals(code, prefix='__Pyx_L'):
"""
Normalizes every string literal to be of the form '__Pyx_Lxxx',
returning the normalized code and a mapping of labels to
string literals.
"""
new_code = []
literals = {}
counter = 0
start = q = 0
in_quote = False
hash_mark = single_q = double_q = -1
code_len = len(code)
quote_type = quote_len = None
while True:
if hash_mark < q:
hash_mark = code.find('#', q)
if single_q < q:
single_q = code.find("'", q)
if double_q < q:
double_q = code.find('"', q)
q = min(single_q, double_q)
if q == -1:
q = max(single_q, double_q)
# We're done.
if q == -1 and hash_mark == -1:
new_code.append(code[start:])
break
# Try to close the quote.
elif in_quote:
if code[q-1] == u'\\':
k = 2
while q >= k and code[q-k] == u'\\':
k += 1
if k % 2 == 0:
q += 1
continue
if code[q] == quote_type and (
quote_len == 1 or (code_len > q + 2 and quote_type == code[q+1] == code[q+2])):
counter += 1
label = "%s%s_" % (prefix, counter)
literals[label] = code[start+quote_len:q]
full_quote = code[q:q+quote_len]
new_code.append(full_quote)
new_code.append(label)
new_code.append(full_quote)
q += quote_len
in_quote = False
start = q
else:
q += 1
# Process comment.
elif -1 != hash_mark and (hash_mark < q or q == -1):
new_code.append(code[start:hash_mark+1])
end = code.find('\n', hash_mark)
counter += 1
label = "%s%s_" % (prefix, counter)
if end == -1:
end_or_none = None
else:
end_or_none = end
literals[label] = code[hash_mark+1:end_or_none]
new_code.append(label)
if end == -1:
break
start = q = end
# Open the quote.
else:
if code_len >= q+3 and (code[q] == code[q+1] == code[q+2]):
quote_len = 3
else:
quote_len = 1
in_quote = True
quote_type = code[q]
new_code.append(code[start:q])
start = q
q += quote_len
return "".join(new_code), literals
# We need to allow spaces to allow for conditional compilation like
# IF ...:
# cimport ...
dependency_regex = re.compile(r"(?:^\s*from +([0-9a-zA-Z_.]+) +cimport)|"
r"(?:^\s*cimport +([0-9a-zA-Z_.]+(?: *, *[0-9a-zA-Z_.]+)*))|"
r"(?:^\s*cdef +extern +from +['\"]([^'\"]+)['\"])|"
r"(?:^\s*include +['\"]([^'\"]+)['\"])", re.M)
def normalize_existing(base_path, rel_paths):
return normalize_existing0(os.path.dirname(base_path), tuple(set(rel_paths)))
@cached_function
def normalize_existing0(base_dir, rel_paths):
"""
Given some base directory ``base_dir`` and a list of path names
``rel_paths``, normalize each relative path name ``rel`` by
replacing it by ``os.path.join(base, rel)`` if that file exists.
Return a couple ``(normalized, needed_base)`` where ``normalized``
if the list of normalized file names and ``needed_base`` is
``base_dir`` if we actually needed ``base_dir``. If no paths were
changed (for example, if all paths were already absolute), then
``needed_base`` is ``None``.
"""
normalized = []
needed_base = None
for rel in rel_paths:
if os.path.isabs(rel):
normalized.append(rel)
continue
path = join_path(base_dir, rel)
if path_exists(path):
normalized.append(os.path.normpath(path))
needed_base = base_dir
else:
normalized.append(rel)
return (normalized, needed_base)
def resolve_depends(depends, include_dirs):
include_dirs = tuple(include_dirs)
resolved = []
for depend in depends:
path = resolve_depend(depend, include_dirs)
if path is not None:
resolved.append(path)
return resolved
@cached_function
def resolve_depend(depend, include_dirs):
if depend[0] == '<' and depend[-1] == '>':
return None
for dir in include_dirs:
path = join_path(dir, depend)
if path_exists(path):
return os.path.normpath(path)
return None
@cached_function
def package(filename):
dir = os.path.dirname(os.path.abspath(str(filename)))
if dir != filename and is_package_dir(dir):
return package(dir) + (os.path.basename(dir),)
else:
return ()
@cached_function
def fully_qualified_name(filename):
module = os.path.splitext(os.path.basename(filename))[0]
return '.'.join(package(filename) + (module,))
@cached_function
def parse_dependencies(source_filename):
# Actual parsing is way too slow, so we use regular expressions.
# The only catch is that we must strip comments and string
# literals ahead of time.
fh = Utils.open_source_file(source_filename, error_handling='ignore')
try:
source = fh.read()
finally:
fh.close()
distutils_info = DistutilsInfo(source)
source, literals = strip_string_literals(source)
source = source.replace('\\\n', ' ').replace('\t', ' ')
# TODO: pure mode
cimports = []
includes = []
externs = []
for m in dependency_regex.finditer(source):
cimport_from, cimport_list, extern, include = m.groups()
if cimport_from:
cimports.append(cimport_from)
elif cimport_list:
cimports.extend(x.strip() for x in cimport_list.split(","))
elif extern:
externs.append(literals[extern])
else:
includes.append(literals[include])
return cimports, includes, externs, distutils_info
class DependencyTree(object):
def __init__(self, context, quiet=False):
self.context = context
self.quiet = quiet
self._transitive_cache = {}
def parse_dependencies(self, source_filename):
if path_exists(source_filename):
source_filename = os.path.normpath(source_filename)
return parse_dependencies(source_filename)
@cached_method
def included_files(self, filename):
# This is messy because included files are textually included, resolving
# cimports (but not includes) relative to the including file.
all = set()
for include in self.parse_dependencies(filename)[1]:
include_path = join_path(os.path.dirname(filename), include)
if not path_exists(include_path):
include_path = self.context.find_include_file(include, None)
if include_path:
if '.' + os.path.sep in include_path:
include_path = os.path.normpath(include_path)
all.add(include_path)
all.update(self.included_files(include_path))
elif not self.quiet:
print("Unable to locate '%s' referenced from '%s'" % (filename, include))
return all
@cached_method
def cimports_externs_incdirs(self, filename):
# This is really ugly. Nested cimports are resolved with respect to the
# includer, but includes are resolved with respect to the includee.
cimports, includes, externs = self.parse_dependencies(filename)[:3]
cimports = set(cimports)
externs = set(externs)
incdirs = set()
for include in self.included_files(filename):
included_cimports, included_externs, included_incdirs = self.cimports_externs_incdirs(include)
cimports.update(included_cimports)
externs.update(included_externs)
incdirs.update(included_incdirs)
externs, incdir = normalize_existing(filename, externs)
if incdir:
incdirs.add(incdir)
return tuple(cimports), externs, incdirs
def cimports(self, filename):
return self.cimports_externs_incdirs(filename)[0]
def package(self, filename):
return package(filename)
def fully_qualified_name(self, filename):
return fully_qualified_name(filename)
@cached_method
def find_pxd(self, module, filename=None):
is_relative = module[0] == '.'
if is_relative and not filename:
raise NotImplementedError("New relative imports.")
if filename is not None:
module_path = module.split('.')
if is_relative:
module_path.pop(0) # just explicitly relative
package_path = list(self.package(filename))
while module_path and not module_path[0]:
try:
package_path.pop()
except IndexError:
return None # FIXME: error?
module_path.pop(0)
relative = '.'.join(package_path + module_path)
pxd = self.context.find_pxd_file(relative, None)
if pxd:
return pxd
if is_relative:
return None # FIXME: error?
return self.context.find_pxd_file(module, None)
@cached_method
def cimported_files(self, filename):
if filename[-4:] == '.pyx' and path_exists(filename[:-4] + '.pxd'):
pxd_list = [filename[:-4] + '.pxd']
else:
pxd_list = []
for module in self.cimports(filename):
if module[:7] == 'cython.' or module == 'cython':
continue
pxd_file = self.find_pxd(module, filename)
if pxd_file is not None:
pxd_list.append(pxd_file)
elif not self.quiet:
print("%s: cannot find cimported module '%s'" % (filename, module))
return tuple(pxd_list)
@cached_method
def immediate_dependencies(self, filename):
all = set([filename])
all.update(self.cimported_files(filename))
all.update(self.included_files(filename))
return all
def all_dependencies(self, filename):
return self.transitive_merge(filename, self.immediate_dependencies, set.union)
@cached_method
def timestamp(self, filename):
return os.path.getmtime(filename)
def extract_timestamp(self, filename):
return self.timestamp(filename), filename
def newest_dependency(self, filename):
return max([self.extract_timestamp(f) for f in self.all_dependencies(filename)])
def transitive_fingerprint(self, filename, extra=None):
try:
m = hashlib.md5(__version__.encode('UTF-8'))
m.update(file_hash(filename).encode('UTF-8'))
for x in sorted(self.all_dependencies(filename)):
if os.path.splitext(x)[1] not in ('.c', '.cpp', '.h'):
m.update(file_hash(x).encode('UTF-8'))
if extra is not None:
m.update(str(extra).encode('UTF-8'))
return m.hexdigest()
except IOError:
return None
def distutils_info0(self, filename):
info = self.parse_dependencies(filename)[3]
kwds = info.values
cimports, externs, incdirs = self.cimports_externs_incdirs(filename)
# Add dependencies on "cdef extern from ..." files
if externs:
if 'depends' in kwds:
kwds['depends'] = list(set(kwds['depends']).union(externs))
else:
kwds['depends'] = list(externs)
# Add include_dirs to ensure that the C compiler will find the
# "cdef extern from ..." files
if incdirs:
include_dirs = list(kwds.get('include_dirs', []))
for inc in incdirs:
if inc not in include_dirs:
include_dirs.append(inc)
kwds['include_dirs'] = include_dirs
return info
def distutils_info(self, filename, aliases=None, base=None):
return (self.transitive_merge(filename, self.distutils_info0, DistutilsInfo.merge)
.subs(aliases)
.merge(base))
def transitive_merge(self, node, extract, merge):
try:
seen = self._transitive_cache[extract, merge]
except KeyError:
seen = self._transitive_cache[extract, merge] = {}
return self.transitive_merge_helper(
node, extract, merge, seen, {}, self.cimported_files)[0]
def transitive_merge_helper(self, node, extract, merge, seen, stack, outgoing):
if node in seen:
return seen[node], None
deps = extract(node)
if node in stack:
return deps, node
try:
stack[node] = len(stack)
loop = None
for next in outgoing(node):
sub_deps, sub_loop = self.transitive_merge_helper(next, extract, merge, seen, stack, outgoing)
if sub_loop is not None:
if loop is not None and stack[loop] < stack[sub_loop]:
pass
else:
loop = sub_loop
deps = merge(deps, sub_deps)
if loop == node:
loop = None
if loop is None:
seen[node] = deps
return deps, loop
finally:
del stack[node]
_dep_tree = None
def create_dependency_tree(ctx=None, quiet=False):
global _dep_tree
if _dep_tree is None:
if ctx is None:
ctx = Context(["."], CompilationOptions(default_options))
_dep_tree = DependencyTree(ctx, quiet=quiet)
return _dep_tree
# If this changes, change also docs/src/reference/compilation.rst
# which mentions this function
def default_create_extension(template, kwds):
if 'depends' in kwds:
include_dirs = kwds.get('include_dirs', []) + ["."]
depends = resolve_depends(kwds['depends'], include_dirs)
kwds['depends'] = sorted(set(depends + template.depends))
t = template.__class__
ext = t(**kwds)
metadata = dict(distutils=kwds, module_name=kwds['name'])
return (ext, metadata)
# This may be useful for advanced users?
def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet=False, language=None,
exclude_failures=False):
if language is not None:
print('Please put "# distutils: language=%s" in your .pyx or .pxd file(s)' % language)
if exclude is None:
exclude = []
if patterns is None:
return [], {}
elif isinstance(patterns, basestring) or not isinstance(patterns, collections.Iterable):
patterns = [patterns]
explicit_modules = set([m.name for m in patterns if isinstance(m, Extension)])
seen = set()
deps = create_dependency_tree(ctx, quiet=quiet)
to_exclude = set()
if not isinstance(exclude, list):
exclude = [exclude]
for pattern in exclude:
to_exclude.update(map(os.path.abspath, extended_iglob(pattern)))
module_list = []
module_metadata = {}
# workaround for setuptools
if 'setuptools' in sys.modules:
Extension_distutils = sys.modules['setuptools.extension']._Extension
Extension_setuptools = sys.modules['setuptools'].Extension
else:
# dummy class, in case we do not have setuptools
Extension_distutils = Extension
class Extension_setuptools(Extension): pass
# if no create_extension() function is defined, use a simple
# default function.
create_extension = ctx.options.create_extension or default_create_extension
for pattern in patterns:
if isinstance(pattern, str):
filepattern = pattern
template = Extension(pattern, []) # Fake Extension without sources
name = '*'
base = None
ext_language = language
elif isinstance(pattern, (Extension_distutils, Extension_setuptools)):
cython_sources = [s for s in pattern.sources
if os.path.splitext(s)[1] in ('.py', '.pyx')]
if cython_sources:
filepattern = cython_sources[0]
if len(cython_sources) > 1:
print("Warning: Multiple cython sources found for extension '%s': %s\n"
"See http://cython.readthedocs.io/en/latest/src/userguide/sharing_declarations.html "
"for sharing declarations among Cython files." % (pattern.name, cython_sources))
else:
# ignore non-cython modules
module_list.append(pattern)
continue
template = pattern
name = template.name
base = DistutilsInfo(exn=template)
ext_language = None # do not override whatever the Extension says
else:
msg = str("pattern is not of type str nor subclass of Extension (%s)"
" but of type %s and class %s" % (repr(Extension),
type(pattern),
pattern.__class__))
raise TypeError(msg)
for file in nonempty(sorted(extended_iglob(filepattern)), "'%s' doesn't match any files" % filepattern):
if os.path.abspath(file) in to_exclude:
continue
pkg = deps.package(file)
module_name = deps.fully_qualified_name(file)
if '*' in name:
if module_name in explicit_modules:
continue
elif name != module_name:
print("Warning: Extension name '%s' does not match fully qualified name '%s' of '%s'" % (
name, module_name, file))
module_name = name
if module_name not in seen:
try:
kwds = deps.distutils_info(file, aliases, base).values
except Exception:
if exclude_failures:
continue
raise
if base is not None:
for key, value in base.values.items():
if key not in kwds:
kwds[key] = value
kwds['name'] = module_name
sources = [file] + [m for m in template.sources if m != filepattern]
if 'sources' in kwds:
# allow users to add .c files etc.
for source in kwds['sources']:
source = encode_filename_in_py2(source)
if source not in sources:
sources.append(source)
kwds['sources'] = sources
if ext_language and 'language' not in kwds:
kwds['language'] = ext_language
np_pythran = kwds.pop('np_pythran', False)
# Create the new extension
m, metadata = create_extension(template, kwds)
if np_pythran:
if not PythranAvailable:
raise RuntimeError("You first need to install Pythran to use the np_pythran directive.")
pythran_ext = pythran.config.make_extension()
m.include_dirs.extend(pythran_ext['include_dirs'])
m.extra_compile_args.extend(pythran_ext['extra_compile_args'])
m.extra_link_args.extend(pythran_ext['extra_link_args'])
m.define_macros.extend(pythran_ext['define_macros'])
m.undef_macros.extend(pythran_ext['undef_macros'])
m.library_dirs.extend(pythran_ext['library_dirs'])
m.libraries.extend(pythran_ext['libraries'])
# These options are not compatible with the way normal Cython extensions work
try:
m.extra_compile_args.remove("-fwhole-program")
except ValueError: pass
try:
m.extra_compile_args.remove("-fvisibility=hidden")
except ValueError: pass
m.language = 'c++'
m.np_pythran = np_pythran
module_list.append(m)
# Store metadata (this will be written as JSON in the
# generated C file but otherwise has no purpose)
module_metadata[module_name] = metadata
if file not in m.sources:
# Old setuptools unconditionally replaces .pyx with .c
m.sources.remove(file.rsplit('.')[0] + '.c')
m.sources.insert(0, file)
seen.add(name)
return module_list, module_metadata
# This is the user-exposed entry point.
def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=False, language=None,
exclude_failures=False, **options):
"""
Compile a set of source modules into C/C++ files and return a list of distutils
Extension objects for them.
As module list, pass either a glob pattern, a list of glob patterns or a list of
Extension objects. The latter allows you to configure the extensions separately
through the normal distutils options.
When using glob patterns, you can exclude certain module names explicitly
by passing them into the 'exclude' option.
To globally enable C++ mode, you can pass language='c++'. Otherwise, this
will be determined at a per-file level based on compiler directives. This
affects only modules found based on file names. Extension instances passed
into cythonize() will not be changed.
For parallel compilation, set the 'nthreads' option to the number of
concurrent builds.
For a broad 'try to compile' mode that ignores compilation failures and
simply excludes the failed extensions, pass 'exclude_failures=True'. Note
that this only really makes sense for compiling .py files which can also
be used without compilation.
Additional compilation options can be passed as keyword arguments.
"""
if exclude is None:
exclude = []
if 'include_path' not in options:
options['include_path'] = ['.']
if 'common_utility_include_dir' in options:
if options.get('cache'):
raise NotImplementedError("common_utility_include_dir does not yet work with caching")
safe_makedirs(options['common_utility_include_dir'])
if PythranAvailable:
pythran_options = CompilationOptions(**options);
pythran_options.cplus = True
pythran_options.np_pythran = True
pythran_include_dir = os.path.dirname(pythran.__file__)
c_options = CompilationOptions(**options)
cpp_options = CompilationOptions(**options); cpp_options.cplus = True
ctx = c_options.create_context()
options = c_options
module_list, module_metadata = create_extension_list(
module_list,
exclude=exclude,
ctx=ctx,
quiet=quiet,
exclude_failures=exclude_failures,
language=language,
aliases=aliases)
deps = create_dependency_tree(ctx, quiet=quiet)
build_dir = getattr(options, 'build_dir', None)
modules_by_cfile = {}
to_compile = []
for m in module_list:
if build_dir:
root = os.getcwd() # distutil extension depends are relative to cwd
def copy_to_build_dir(filepath, root=root):
filepath_abs = os.path.abspath(filepath)
if os.path.isabs(filepath):
filepath = filepath_abs
if filepath_abs.startswith(root):
mod_dir = join_path(build_dir,
os.path.dirname(_relpath(filepath, root)))
copy_once_if_newer(filepath_abs, mod_dir)
for dep in m.depends:
copy_to_build_dir(dep)
new_sources = []
for source in m.sources:
base, ext = os.path.splitext(source)
if ext in ('.pyx', '.py'):
if m.np_pythran:
c_file = base + '.cpp'
options = pythran_options
elif m.language == 'c++':
c_file = base + '.cpp'
options = cpp_options
else:
c_file = base + '.c'
options = c_options
# setup for out of place build directory if enabled
if build_dir:
c_file = os.path.join(build_dir, c_file)
dir = os.path.dirname(c_file)
safe_makedirs_once(dir)
if os.path.exists(c_file):
c_timestamp = os.path.getmtime(c_file)
else:
c_timestamp = -1
# Priority goes first to modified files, second to direct
# dependents, and finally to indirect dependents.
if c_timestamp < deps.timestamp(source):
dep_timestamp, dep = deps.timestamp(source), source
priority = 0
else:
dep_timestamp, dep = deps.newest_dependency(source)
priority = 2 - (dep in deps.immediate_dependencies(source))
if force or c_timestamp < dep_timestamp:
if not quiet and not force:
if source == dep:
print("Compiling %s because it changed." % source)
else:
print("Compiling %s because it depends on %s." % (source, dep))
if not force and options.cache:
extra = m.language
fingerprint = deps.transitive_fingerprint(source, extra)
else:
fingerprint = None
to_compile.append((priority, source, c_file, fingerprint, quiet,
options, not exclude_failures, module_metadata.get(m.name)))
new_sources.append(c_file)
if c_file not in modules_by_cfile:
modules_by_cfile[c_file] = [m]
else:
modules_by_cfile[c_file].append(m)
else:
new_sources.append(source)
if build_dir:
copy_to_build_dir(source)
m.sources = new_sources
if options.cache:
if not os.path.exists(options.cache):
os.makedirs(options.cache)
to_compile.sort()
# Drop "priority" component of "to_compile" entries and add a
# simple progress indicator.
N = len(to_compile)
progress_fmt = "[{0:%d}/{1}] " % len(str(N))
for i in range(N):
progress = progress_fmt.format(i+1, N)
to_compile[i] = to_compile[i][1:] + (progress,)
if N <= 1:
nthreads = 0
if nthreads:
# Requires multiprocessing (or Python >= 2.6)
try:
import multiprocessing
pool = multiprocessing.Pool(
nthreads, initializer=_init_multiprocessing_helper)
except (ImportError, OSError):
print("multiprocessing required for parallel cythonization")
nthreads = 0
else:
# This is a bit more involved than it should be, because KeyboardInterrupts
# break the multiprocessing workers when using a normal pool.map().
# See, for example:
# http://noswap.com/blog/python-multiprocessing-keyboardinterrupt
try:
result = pool.map_async(cythonize_one_helper, to_compile, chunksize=1)
pool.close()
while not result.ready():
try:
result.get(99999) # seconds
except multiprocessing.TimeoutError:
pass
except KeyboardInterrupt:
pool.terminate()
raise
pool.join()
if not nthreads:
for args in to_compile:
cythonize_one(*args)
if exclude_failures:
failed_modules = set()
for c_file, modules in modules_by_cfile.items():
if not os.path.exists(c_file):
failed_modules.update(modules)
elif os.path.getsize(c_file) < 200:
f = io_open(c_file, 'r', encoding='iso8859-1')
try:
if f.read(len('#error ')) == '#error ':
# dead compilation result
failed_modules.update(modules)
finally:
f.close()
if failed_modules:
for module in failed_modules:
module_list.remove(module)
print("Failed compilations: %s" % ', '.join(sorted([
module.name for module in failed_modules])))
if options.cache:
cleanup_cache(options.cache, getattr(options, 'cache_size', 1024 * 1024 * 100))
# cythonize() is often followed by the (non-Python-buffered)
# compiler output, flush now to avoid interleaving output.
sys.stdout.flush()
return module_list
if os.environ.get('XML_RESULTS'):
compile_result_dir = os.environ['XML_RESULTS']
def record_results(func):
def with_record(*args):
t = time.time()
success = True
try:
try:
func(*args)
except:
success = False
finally:
t = time.time() - t
module = fully_qualified_name(args[0])
name = "cythonize." + module
failures = 1 - success
if success:
failure_item = ""
else:
failure_item = "failure"
output = open(os.path.join(compile_result_dir, name + ".xml"), "w")
output.write("""
<?xml version="1.0" ?>
<testsuite name="%(name)s" errors="0" failures="%(failures)s" tests="1" time="%(t)s">
<testcase classname="%(name)s" name="cythonize">
%(failure_item)s
</testcase>
</testsuite>
""".strip() % locals())
output.close()
return with_record
else:
def record_results(func):
return func
# TODO: Share context? Issue: pyx processing leaks into pxd module
@record_results
def cythonize_one(pyx_file, c_file, fingerprint, quiet, options=None, raise_on_failure=True, embedded_metadata=None, progress=""):
from ..Compiler.Main import compile, default_options
from ..Compiler.Errors import CompileError, PyrexError
if fingerprint:
if not os.path.exists(options.cache):
try:
os.mkdir(options.cache)
except:
if not os.path.exists(options.cache):
raise
# Cython-generated c files are highly compressible.
# (E.g. a compression ratio of about 10 for Sage).
fingerprint_file = join_path(
options.cache, "%s-%s%s" % (os.path.basename(c_file), fingerprint, gzip_ext))
if os.path.exists(fingerprint_file):
if not quiet:
print("%sFound compiled %s in cache" % (progress, pyx_file))
os.utime(fingerprint_file, None)
g = gzip_open(fingerprint_file, 'rb')
try:
f = open(c_file, 'wb')
try:
shutil.copyfileobj(g, f)
finally:
f.close()
finally:
g.close()
return
if not quiet:
print("%sCythonizing %s" % (progress, pyx_file))
if options is None:
options = CompilationOptions(default_options)
options.output_file = c_file
options.embedded_metadata = embedded_metadata
any_failures = 0
try:
result = compile([pyx_file], options)
if result.num_errors > 0:
any_failures = 1
except (EnvironmentError, PyrexError) as e:
sys.stderr.write('%s\n' % e)
any_failures = 1
# XXX
import traceback
traceback.print_exc()
except Exception:
if raise_on_failure:
raise
import traceback
traceback.print_exc()
any_failures = 1
if any_failures:
if raise_on_failure:
raise CompileError(None, pyx_file)
elif os.path.exists(c_file):
os.remove(c_file)
elif fingerprint:
f = open(c_file, 'rb')
try:
g = gzip_open(fingerprint_file, 'wb')
try:
shutil.copyfileobj(f, g)
finally:
g.close()
finally:
f.close()
def cythonize_one_helper(m):
import traceback
try:
return cythonize_one(*m)
except Exception:
traceback.print_exc()
raise
def _init_multiprocessing_helper():
# KeyboardInterrupt kills workers, so don't let them get it
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
def cleanup_cache(cache, target_size, ratio=.85):
try:
p = subprocess.Popen(['du', '-s', '-k', os.path.abspath(cache)], stdout=subprocess.PIPE)
res = p.wait()
if res == 0:
total_size = 1024 * int(p.stdout.read().strip().split()[0])
if total_size < target_size:
return
except (OSError, ValueError):
pass
total_size = 0
all = []
for file in os.listdir(cache):
path = join_path(cache, file)
s = os.stat(path)
total_size += s.st_size
all.append((s.st_atime, s.st_size, path))
if total_size > target_size:
for time, size, file in reversed(sorted(all)):
os.unlink(file)
total_size -= size
if total_size < target_size * ratio:
break
| |
"""
Module providing easy API for working with remote files and folders.
"""
import hashlib
import re
import os
import six
from functools import partial
from fabric.context_managers import hide, settings
from fabric.operations import put, run, sudo
from fabric.state import env
from fabric.utils import abort, apply_lcwd
def exists(path, use_sudo=False, verbose=False):
"""
Return True if given path exists on the current remote host.
If ``use_sudo`` is True, will use `sudo` instead of `run`.
`exists` will, by default, hide all output (including the run line, stdout,
stderr and any warning resulting from the file not existing) in order to
avoid cluttering output. You may specify ``verbose=True`` to change this
behavior.
"""
func = use_sudo and sudo or run
cmd = 'test -e %s' % _expand_path(path)
# If verbose, run normally
if verbose:
with settings(warn_only=True):
return not func(cmd).failed
# Otherwise, be quiet
with settings(hide('everything'), warn_only=True):
return not func(cmd).failed
def is_link(path, use_sudo=False, verbose=False):
"""
Return True if the given path is a symlink on the current remote host.
If ``use_sudo`` is True, will use `.sudo` instead of `.run`.
`.is_link` will, by default, hide all output. Give ``verbose=True`` to
change this.
"""
func = sudo if use_sudo else run
cmd = 'test -L "$(echo %s)"' % path
args, kwargs = [], {'warn_only': True}
if not verbose:
args = [hide('everything')]
with settings(*args, **kwargs):
return func(cmd).succeeded
def first(*args, **kwargs):
"""
Given one or more file paths, returns first one found, or None if none
exist. May specify ``use_sudo`` and ``verbose`` which are passed to
`exists`.
"""
for directory in args:
if exists(directory, **kwargs):
return directory
def upload_template(filename, destination, context=None, use_jinja=False,
template_dir=None, use_sudo=False, backup=True, mirror_local_mode=False,
mode=None, pty=None, keep_trailing_newline=False, temp_dir=''):
"""
Render and upload a template text file to a remote host.
Returns the result of the inner call to `~fabric.operations.put` -- see its
documentation for details.
``filename`` should be the path to a text file, which may contain `Python
string interpolation formatting
<http://docs.python.org/library/stdtypes.html#string-formatting>`_ and will
be rendered with the given context dictionary ``context`` (if given.)
Alternately, if ``use_jinja`` is set to True and you have the Jinja2
templating library available, Jinja will be used to render the template
instead. Templates will be loaded from the invoking user's current working
directory by default, or from ``template_dir`` if given.
The resulting rendered file will be uploaded to the remote file path
``destination``. If the destination file already exists, it will be
renamed with a ``.bak`` extension unless ``backup=False`` is specified.
By default, the file will be copied to ``destination`` as the logged-in
user; specify ``use_sudo=True`` to use `sudo` instead.
The ``mirror_local_mode``, ``mode``, and ``temp_dir`` kwargs are passed
directly to an internal `~fabric.operations.put` call; please see its
documentation for details on these two options.
The ``pty`` kwarg will be passed verbatim to any internal
`~fabric.operations.run`/`~fabric.operations.sudo` calls, such as those
used for testing directory-ness, making backups, etc.
The ``keep_trailing_newline`` kwarg will be passed when creating
Jinja2 Environment which is False by default, same as Jinja2's
behaviour.
.. versionchanged:: 1.1
Added the ``backup``, ``mirror_local_mode`` and ``mode`` kwargs.
.. versionchanged:: 1.9
Added the ``pty`` kwarg.
.. versionchanged:: 1.11
Added the ``keep_trailing_newline`` kwarg.
.. versionchanged:: 1.11
Added the ``temp_dir`` kwarg.
"""
func = use_sudo and sudo or run
if pty is not None:
func = partial(func, pty=pty)
# Normalize destination to be an actual filename, due to using StringIO
with settings(hide('everything'), warn_only=True):
if func('test -d %s' % _expand_path(destination)).succeeded:
sep = "" if destination.endswith('/') else "/"
destination += sep + os.path.basename(filename)
# Use mode kwarg to implement mirror_local_mode, again due to using
# StringIO
if mirror_local_mode and mode is None:
mode = os.stat(apply_lcwd(filename, env)).st_mode
# To prevent put() from trying to do this
# logic itself
mirror_local_mode = False
# Process template
text = None
if use_jinja:
try:
template_dir = template_dir or os.getcwd()
template_dir = apply_lcwd(template_dir, env)
from jinja2 import Environment, FileSystemLoader
jenv = Environment(loader=FileSystemLoader(template_dir),
keep_trailing_newline=keep_trailing_newline)
text = jenv.get_template(filename).render(**context or {})
# Force to a byte representation of Unicode, or str()ification
# within Paramiko's SFTP machinery may cause decode issues for
# truly non-ASCII characters.
text = text.encode('utf-8')
except ImportError:
import traceback
tb = traceback.format_exc()
abort(tb + "\nUnable to import Jinja2 -- see above.")
else:
if template_dir:
filename = os.path.join(template_dir, filename)
filename = apply_lcwd(filename, env)
with open(os.path.expanduser(filename)) as inputfile:
text = inputfile.read()
if context:
text = text % context
# Back up original file
if backup and exists(destination):
func("cp %s{,.bak}" % _expand_path(destination))
if six.PY3 is True and isinstance(text, bytes):
text = text.decode('utf-8')
# Upload the file.
return put(
local_path=six.StringIO(text),
remote_path=destination,
use_sudo=use_sudo,
mirror_local_mode=mirror_local_mode,
mode=mode,
temp_dir=temp_dir
)
def sed(filename, before, after, limit='', use_sudo=False, backup='.bak',
flags='', shell=False):
"""
Run a search-and-replace on ``filename`` with given regex patterns.
Equivalent to ``sed -i<backup> -r -e "/<limit>/ s/<before>/<after>/<flags>g"
<filename>``. Setting ``backup`` to an empty string will, disable backup
file creation.
For convenience, ``before`` and ``after`` will automatically escape forward
slashes, single quotes and parentheses for you, so you don't need to
specify e.g. ``http:\/\/foo\.com``, instead just using ``http://foo\.com``
is fine.
If ``use_sudo`` is True, will use `sudo` instead of `run`.
The ``shell`` argument will be eventually passed to `run`/`sudo`. It
defaults to False in order to avoid problems with many nested levels of
quotes and backslashes. However, setting it to True may help when using
``~fabric.operations.cd`` to wrap explicit or implicit ``sudo`` calls.
(``cd`` by it's nature is a shell built-in, not a standalone command, so it
should be called within a shell.)
Other options may be specified with sed-compatible regex flags -- for
example, to make the search and replace case insensitive, specify
``flags="i"``. The ``g`` flag is always specified regardless, so you do not
need to remember to include it when overriding this parameter.
.. versionadded:: 1.1
The ``flags`` parameter.
.. versionadded:: 1.6
Added the ``shell`` keyword argument.
"""
func = use_sudo and sudo or run
# Characters to be escaped in both
for char in "/'":
before = before.replace(char, r'\%s' % char)
after = after.replace(char, r'\%s' % char)
# Characters to be escaped in replacement only (they're useful in regexen
# in the 'before' part)
for char in "()":
after = after.replace(char, r'\%s' % char)
if limit:
limit = r'/%s/ ' % limit
context = {
'script': r"'%ss/%s/%s/%sg'" % (limit, before, after, flags),
'filename': _expand_path(filename),
'backup': backup
}
# Test the OS because of differences between sed versions
with hide('running', 'stdout'):
platform = run("uname")
if platform in ('NetBSD', 'OpenBSD', 'QNX'):
# Attempt to protect against failures/collisions
hasher = hashlib.sha1()
hasher.update(env.host_string)
hasher.update(filename)
context['tmp'] = "/tmp/%s" % hasher.hexdigest()
# Use temp file to work around lack of -i
expr = r"""cp -p %(filename)s %(tmp)s \
&& sed -r -e %(script)s %(filename)s > %(tmp)s \
&& cp -p %(filename)s %(filename)s%(backup)s \
&& mv %(tmp)s %(filename)s"""
else:
context['extended_regex'] = '-E' if platform == 'Darwin' else '-r'
expr = r"sed -i%(backup)s %(extended_regex)s -e %(script)s %(filename)s"
command = expr % context
return func(command, shell=shell)
def uncomment(filename, regex, use_sudo=False, char='#', backup='.bak',
shell=False):
"""
Attempt to uncomment all lines in ``filename`` matching ``regex``.
The default comment delimiter is `#` and may be overridden by the ``char``
argument.
This function uses the `sed` function, and will accept the same
``use_sudo``, ``shell`` and ``backup`` keyword arguments that `sed` does.
`uncomment` will remove a single whitespace character following the comment
character, if it exists, but will preserve all preceding whitespace. For
example, ``# foo`` would become ``foo`` (the single space is stripped) but
`` # foo`` would become `` foo`` (the single space is still stripped,
but the preceding 4 spaces are not.)
.. versionchanged:: 1.6
Added the ``shell`` keyword argument.
"""
return sed(
filename,
before=r'^([[:space:]]*)%s[[:space:]]?' % char,
after=r'\1',
limit=regex,
use_sudo=use_sudo,
backup=backup,
shell=shell
)
def comment(filename, regex, use_sudo=False, char='#', backup='.bak',
shell=False):
"""
Attempt to comment out all lines in ``filename`` matching ``regex``.
The default commenting character is `#` and may be overridden by the
``char`` argument.
This function uses the `sed` function, and will accept the same
``use_sudo``, ``shell`` and ``backup`` keyword arguments that `sed` does.
`comment` will prepend the comment character to the beginning of the line,
so that lines end up looking like so::
this line is uncommented
#this line is commented
# this line is indented and commented
In other words, comment characters will not "follow" indentation as they
sometimes do when inserted by hand. Neither will they have a trailing space
unless you specify e.g. ``char='# '``.
.. note::
In order to preserve the line being commented out, this function will
wrap your ``regex`` argument in parentheses, so you don't need to. It
will ensure that any preceding/trailing ``^`` or ``$`` characters are
correctly moved outside the parentheses. For example, calling
``comment(filename, r'^foo$')`` will result in a `sed` call with the
"before" regex of ``r'^(foo)$'`` (and the "after" regex, naturally, of
``r'#\\1'``.)
.. versionadded:: 1.5
Added the ``shell`` keyword argument.
"""
carot, dollar = '', ''
if regex.startswith('^'):
carot = '^'
regex = regex[1:]
if regex.endswith('$'):
dollar = '$'
regex = regex[:-1]
regex = "%s(%s)%s" % (carot, regex, dollar)
return sed(
filename,
before=regex,
after=r'%s\1' % char,
use_sudo=use_sudo,
backup=backup,
shell=shell
)
def contains(filename, text, exact=False, use_sudo=False, escape=True,
shell=False, case_sensitive=True):
"""
Return True if ``filename`` contains ``text`` (which may be a regex.)
By default, this function will consider a partial line match (i.e. where
``text`` only makes up part of the line it's on). Specify ``exact=True`` to
change this behavior so that only a line containing exactly ``text``
results in a True return value.
This function leverages ``egrep`` on the remote end (so it may not follow
Python regular expression syntax perfectly), and skips ``env.shell``
wrapper by default.
If ``use_sudo`` is True, will use `sudo` instead of `run`.
If ``escape`` is False, no extra regular expression related escaping is
performed (this includes overriding ``exact`` so that no ``^``/``$`` is
added.)
The ``shell`` argument will be eventually passed to ``run/sudo``. See
description of the same argumnet in ``~fabric.contrib.sed`` for details.
If ``case_sensitive`` is False, the `-i` flag will be passed to ``egrep``.
.. versionchanged:: 1.0
Swapped the order of the ``filename`` and ``text`` arguments to be
consistent with other functions in this module.
.. versionchanged:: 1.4
Updated the regular expression related escaping to try and solve
various corner cases.
.. versionchanged:: 1.4
Added ``escape`` keyword argument.
.. versionadded:: 1.6
Added the ``shell`` keyword argument.
.. versionadded:: 1.11
Added the ``case_sensitive`` keyword argument.
"""
func = use_sudo and sudo or run
if escape:
text = _escape_for_regex(text)
if exact:
text = "^%s$" % text
with settings(hide('everything'), warn_only=True):
egrep_cmd = 'egrep "%s" %s' % (text, _expand_path(filename))
if not case_sensitive:
egrep_cmd = egrep_cmd.replace('egrep', 'egrep -i', 1)
return func(egrep_cmd, shell=shell).succeeded
def append(filename, text, use_sudo=False, partial=False, escape=True,
shell=False):
"""
Append string (or list of strings) ``text`` to ``filename``.
When a list is given, each string inside is handled independently (but in
the order given.)
If ``text`` is already found in ``filename``, the append is not run, and
None is returned immediately. Otherwise, the given text is appended to the
end of the given ``filename`` via e.g. ``echo '$text' >> $filename``.
The test for whether ``text`` already exists defaults to a full line match,
e.g. ``^<text>$``, as this seems to be the most sensible approach for the
"append lines to a file" use case. You may override this and force partial
searching (e.g. ``^<text>``) by specifying ``partial=True``.
Because ``text`` is single-quoted, single quotes will be transparently
backslash-escaped. This can be disabled with ``escape=False``.
If ``use_sudo`` is True, will use `sudo` instead of `run`.
The ``shell`` argument will be eventually passed to ``run/sudo``. See
description of the same argumnet in ``~fabric.contrib.sed`` for details.
.. versionchanged:: 0.9.1
Added the ``partial`` keyword argument.
.. versionchanged:: 1.0
Swapped the order of the ``filename`` and ``text`` arguments to be
consistent with other functions in this module.
.. versionchanged:: 1.0
Changed default value of ``partial`` kwarg to be ``False``.
.. versionchanged:: 1.4
Updated the regular expression related escaping to try and solve
various corner cases.
.. versionadded:: 1.6
Added the ``shell`` keyword argument.
"""
func = use_sudo and sudo or run
# Normalize non-list input to be a list
if isinstance(text, six.string_types):
text = [text]
for line in text:
regex = '^' + _escape_for_regex(line) + ('' if partial else '$')
if (exists(filename, use_sudo=use_sudo) and line
and contains(filename, regex, use_sudo=use_sudo, escape=False,
shell=shell)):
continue
line = line.replace("'", r"'\\''") if escape else line
func("echo '%s' >> %s" % (line, _expand_path(filename)))
def _escape_for_regex(text):
"""Escape ``text`` to allow literal matching using egrep"""
regex = re.escape(text)
# Seems like double escaping is needed for \
regex = regex.replace('\\\\', '\\\\\\')
# Triple-escaping seems to be required for $ signs
regex = regex.replace(r'\$', r'\\\$')
# Whereas single quotes should not be escaped
regex = regex.replace(r"\'", "'")
return regex
def _expand_path(path):
return '"$(echo %s)"' % path
| |
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016
from __future__ import unicode_literals
from future.builtins import *
import os
import sys
import pickle
from past.builtins import basestring
import streamsx.ec as ec
from streamsx.topology.schema import StreamSchema
try:
import dill
# Importing cloudpickle break dill's deserialization.
# Workaround is to make dill aware of the ClassType type.
if sys.version_info.major == 3:
dill.dill._reverse_typemap['ClassType'] = type
dill.settings['recurse'] = True
except ImportError:
dill = pickle
import base64
import json
from pkgutil import extend_path
import streamsx
def __splpy_addDirToPath(dir):
if os.path.isdir(dir):
if dir not in sys.path:
sys.path.insert(0, dir)
# In case a streamsx module (e.g. streamsx.bm)
# is included in the additional code
if os.path.isdir(os.path.join(dir, 'streamsx')):
streamsx.__path__ = extend_path(streamsx.__path__, streamsx.__name__)
def add_output_packages(out_dir):
py_dir = os.path.join(out_dir, 'etc', 'streamsx.topology', 'python')
vdir = 'python' + str(sys.version_info.major) + '.' + str(sys.version_info.minor)
site_pkg = os.path.join(py_dir, 'lib', vdir, 'site-packages')
__splpy_addDirToPath(site_pkg)
def setupOperator(dir):
pydir = os.path.join(dir, 'opt', 'python')
__splpy_addDirToPath(os.path.join(pydir, 'modules'))
__splpy_addDirToPath(os.path.join(pydir, 'packages'))
#print("sys.path", sys.path)
def _json_object_out(v):
"""Return a serialized JSON object for a value."""
if v is None:
return None
return json.dumps(v, ensure_ascii=False)
def _json_force_object(v):
"""Force a non-dictionary object to be a JSON dict object"""
if not isinstance(v, dict):
v = {'payload': v}
return v
# Get the callable from the value
# passed into the SPL PyFunction operator.
#
# It is either something that is callable
# and is used directly or is string
# that is a encoded pickled class instance
#
def _get_callable(f):
if callable(f):
return f
if isinstance(f, basestring):
ci = dill.loads(base64.b64decode(f))
if callable(ci):
return ci
raise TypeError("Class is not callable" + str(type(ci)))
def _verify_tuple(tuple_, attributes):
if isinstance(tuple_, tuple) or tuple_ is None:
return tuple_
if isinstance(tuple_, dict):
return tuple(tuple_.get(name, None) for name in attributes)
raise TypeError("Function must return a tuple, dict or None:" + str(type(tuple_)))
import inspect
class _FunctionalCallable(object):
def __init__(self, callable_, attributes=None):
self._callable = _get_callable(callable_)
self._cls = False
self._attributes = attributes
if callable_ is not self._callable:
is_cls = not inspect.isfunction(self._callable)
is_cls = is_cls and ( not inspect.isbuiltin(self._callable) )
is_cls = is_cls and (not inspect.isclass(self._callable))
if is_cls:
if ec._is_supported():
self._callable._streamsx_ec_op = ec._get_opc(self._callable)
self._cls = True
ec._callable_enter(self._callable)
ec._clear_opc()
def __call__(self, tuple_):
"""Default callable implementation
Just calls the callable directly.
"""
return self._callable(tuple_)
def _splpy_shutdown(self, exc_type=None, exc_value=None, traceback=None):
if self._cls:
return ec._callable_exit(self._callable, exc_type, exc_value, traceback)
class _PickleInObjectOut(_FunctionalCallable):
def __call__(self, tuple_, pm=None):
if pm is not None:
tuple_ = pickle.loads(tuple_)
return self._callable(tuple_)
class _PickleInPickleOut(_FunctionalCallable):
def __call__(self, tuple_, pm=None):
if pm is not None:
tuple_ = pickle.loads(tuple_)
rv = self._callable(tuple_)
if rv is None:
return None
return pickle.dumps(rv)
class _PickleInJSONOut(_FunctionalCallable):
def __call__(self, tuple_, pm=None):
if pm is not None:
tuple_ = pickle.loads(tuple_)
rv = self._callable(tuple_)
return _json_object_out(rv)
class _PickleInStringOut(_FunctionalCallable):
def __call__(self, tuple_, pm=None):
if pm is not None:
tuple_ = pickle.loads(tuple_)
rv = self._callable(tuple_)
if rv is None:
return None
return str(rv)
class _PickleInTupleOut(_FunctionalCallable):
def __call__(self, tuple_, pm=None):
if pm is not None:
tuple_ = pickle.loads(tuple_)
rv = self._callable(tuple_)
return _verify_tuple(rv, self._attributes)
class _ObjectInTupleOut(_FunctionalCallable):
def __call__(self, tuple_):
rv = self._callable(tuple_)
return _verify_tuple(rv, self._attributes)
class _ObjectInPickleOut(_FunctionalCallable):
def __call__(self, tuple_):
rv = self._callable(tuple_)
if rv is None:
return None
return pickle.dumps(rv)
class _ObjectInStringOut(_FunctionalCallable):
def __call__(self, tuple_):
rv = self._callable(tuple_)
if rv is None:
return None
return str(rv)
class _ObjectInJSONOut(_FunctionalCallable):
def __call__(self, tuple_):
rv = self._callable(tuple_)
return _json_object_out(rv)
class _JSONInObjectOut(_FunctionalCallable):
def __call__(self, tuple_):
return self._callable(json.loads(tuple_))
class _JSONInPickleOut(_FunctionalCallable):
def __call__(self, tuple_):
rv = self._callable(json.loads(tuple_))
if rv is None:
return None
return pickle.dumps(rv)
class _JSONInStringOut(_FunctionalCallable):
def __call__(self, tuple_):
rv = self._callable(json.loads(tuple_))
if rv is None:
return None
return str(rv)
class _JSONInTupleOut(_FunctionalCallable):
def __call__(self, tuple_):
rv = self._callable(json.loads(tuple_))
return _verify_tuple(rv, self._attributes)
class _JSONInJSONOut(_FunctionalCallable):
def __call__(self, tuple_):
rv = self._callable(json.loads(tuple_))
return _json_object_out(rv)
##
## Set of functions that wrap the application's Python callable
## with a function that correctly handles the input and output
## (return) value. The input is from the SPL operator, i.e.
## a value obtained from a tuple (attribute) as a Python object.
## The output is the value (as a Python object) to be returned
## to the SPL operator to be set as a tuple (attribute).
##
## The style is one of:
##
## pickle - Object is a Python byte string representing a pickled object.
## The object is depickled/pickled before being passed to/return from
## the application callable.
## he returned function must not maintain a reference
## to the passed in value as it will be a memory view
## object with memory that will become invalid after the call.
##
## json - Object is a Python unicode string representing a serialized
## Json object. The object is deserialized/serialized before
## being passed to/return from the application callable.
##
## string - Object is a Python unicode string representing a string
## to be passed directly to the Python application callable.
## For output the function return is converted to a unicode
## string using str(value).
##
## dict - Object is a Python dictionary object
## to be passed directly to the Python application function.
## For output the function return is expecting a Python
## tuple with the values in the correct order for the
## the SPL schema or a dict that will be mapped to a tuple.
## Missing values (not enough fields in the Python tuple
## or set to None are set the the SPL attribute type default.
## Really for output 'dict' means structured schema and the
## classes use 'TupleOut' as they return a Python tuple to
## the primitive operators.
##
## object - Object is a Python object passed directly into/ from the callable
## Used when passing by ref. In addition since from the Python
## point of view string and dict need no transformations
## they are mapped to the object versions, e.g.
## string_in == dict_in == object_in
##
## tuple - Object is an SPL schema passed as a regular Python tuple. The
## order of the Python tuple values matches the order of
## attributes in the schema. Upon return a tuple is expected
## like the dict style.
##
## The wrapper functions also ensure the correct context is set up for streamsx.ec
## and the __enter__/__exit__ methods are called.
## The core functionality of the wrapper functions are implemented as classes
## with the input_style__output_style (e.g. string_in__json_out) are fields
## set to the correct class objcet. The class object is called with the application
## callable and a function the SPL operator will call is returned.
# Given a callable that returns an iterable
# return a function that can be called
# repeatably by a source operator returning
#
# the next tuple in its pickled form
# Set up iterator from the callable.
# If an error occurs and __exit__ asks for it to be
# ignored then an empty source is created.
class _IterableAnyOut(_FunctionalCallable):
def __init__(self, callable, attributes=None):
super(_IterableAnyOut, self).__init__(callable, attributes)
try:
self._it = iter(self._callable())
except:
ei = sys.exc_info()
ignore = ec._callable_exit(self._callable, ei[0], ei[1], ei[2])
if not ignore:
raise ei[1]
# Ignored by nothing to do so use empty iterator
self._it = iter([])
def __call__(self):
while True:
try:
tuple_ = next(self._it)
if not tuple_ is None:
return tuple_
except StopIteration:
return None
except:
ei = sys.exc_info()
ignore = ec._callable_exit(self._callable, ei[0], ei[1], ei[2])
if not ignore:
raise ei[1]
class _IterablePickleOut(_IterableAnyOut):
def __init__(self, callable, attributes=None):
super(_IterablePickleOut, self).__init__(callable, attributes)
self.pdfn = pickle.dumps
def __call__(self):
tuple_ = super(_IterablePickleOut, self).__call__()
if tuple_ is not None:
return self.pdfn(tuple_)
return tuple_
class _IterableObjectOut(_IterableAnyOut):
pass
# Iterator that wraps another iterator
# to discard any values that are None
class _ObjectIterator(object):
def __init__(self, it):
self.it = iter(it)
def __iter__(self):
return self
def __next__(self):
nv = next(self.it)
while nv is None:
nv = next(self.it)
return nv
# python 2.7 uses the next function whereas
# python 3.x uses __next__
def next(self):
return self.__next__()
# and pickle any returned value.
class _PickleIterator(_ObjectIterator):
def __next__(self):
return pickle.dumps(super(_PickleIterator, self).__next__())
# Return a function that depickles
# the input tuple calls callable
# that is expected to return
# an Iterable. If callable returns
# None then the function will return
# None, otherwise it returns
# an instance of _PickleIterator
# wrapping an iterator from the iterable
# Used by FlatMap (flat_map)
class _ObjectInPickleIter(_FunctionalCallable):
def __call__(self, tuple_):
rv = self._callable(tuple_)
if rv is None:
return None
return _PickleIterator(rv)
class _ObjectInObjectIter(_FunctionalCallable):
def __call__(self, tuple_):
rv = self._callable(tuple_)
if rv is None:
return None
return _ObjectIterator(rv)
class _PickleInPickleIter(_ObjectInPickleIter):
def __call__(self, tuple_, pm=None):
if pm is not None:
tuple_ = pickle.loads(tuple_)
return super(_PickleInPickleIter, self).__call__(tuple_)
class _PickleInObjectIter(_ObjectInObjectIter):
def __call__(self, tuple_, pm=None):
if pm is not None:
tuple_ = pickle.loads(tuple_)
return super(_PickleInObjectIter, self).__call__(tuple_)
class _JSONInPickleIter(_ObjectInPickleIter):
def __call__(self, tuple_):
return super(_JSONInPickleIter, self).__call__(json.loads(tuple_))
class _JSONInObjectIter(_ObjectInObjectIter):
def __call__(self, tuple_):
return super(_JSONInObjectIter, self).__call__(json.loads(tuple_))
# Variables used by SPL Python operators to create specific wrapper function.
#
# Source: source_style
# Filter: style_in__style_out (output style is same as input) - (any input style supported)
# Map: style_in__style_out (any input/output style supported)
# FlatMap: style_in__style_iter: (any input style supported, pickle/object on output)
# ForEach: style_in (any style)
source_object = _IterableObjectOut
object_in__object_out = _FunctionalCallable
object_in__object_iter = _ObjectInObjectIter
object_in__pickle_out = _ObjectInPickleOut
object_in__pickle_iter = _ObjectInPickleIter
object_in__json_out = _ObjectInJSONOut
object_in__dict_out = _ObjectInTupleOut
object_in = _FunctionalCallable
source_pickle = _IterablePickleOut
pickle_in__object_out = _PickleInObjectOut
pickle_in__object_iter = _PickleInObjectIter
pickle_in__pickle_out = _PickleInPickleOut
pickle_in__pickle_iter = _PickleInPickleIter
pickle_in__string_out = _PickleInStringOut
pickle_in__json_out = _PickleInJSONOut
pickle_in__dict_out = _PickleInTupleOut
pickle_in = _PickleInObjectOut
string_in__object_out = object_in__object_out
string_in__object_iter = object_in__object_iter
string_in__pickle_out = object_in__pickle_out
string_in__pickle_iter = object_in__pickle_iter
string_in__string_out = object_in__object_out
string_in__json_out = object_in__json_out
string_in__dict_out = object_in__dict_out
string_in = object_in
json_in__object_out = _JSONInObjectOut
json_in__object_iter = _JSONInObjectIter
json_in__pickle_out = _JSONInPickleOut
json_in__pickle_iter = _JSONInPickleIter
json_in__string_out = _JSONInStringOut
json_in__json_out = _JSONInJSONOut
json_in__dict_out = _JSONInTupleOut
json_in = _JSONInObjectOut
dict_in__object_out = object_in__object_out
dict_in__object_iter = object_in__object_iter
dict_in__pickle_out = object_in__pickle_out
dict_in__pickle_iter = object_in__pickle_iter
dict_in__string_out = object_in__object_out
dict_in__json_out = object_in__json_out
dict_in__dict_out = object_in__dict_out
dict_in = object_in
tuple_in__object_out = object_in__object_out
tuple_in__object_iter = object_in__object_iter
tuple_in__pickle_out = object_in__pickle_out
tuple_in__pickle_iter = object_in__pickle_iter
tuple_in__string_out = object_in__object_out
tuple_in__json_out = object_in__json_out
tuple_in__dict_out = object_in__dict_out
tuple_in = object_in
# Get the named tuple class for a schema.
# used by functional operators.
def _get_namedtuple_cls(schema, name):
return StreamSchema(schema).as_tuple(named=name).style
class _WrappedInstance(object):
def __init__(self, callable_):
self._callable = callable_
def _hasee(self):
return hasattr(self._callable, '__enter__') and hasattr(self._callable, '__exit__')
def __enter__(self):
if self._hasee():
self._callable.__enter__()
def __exit__(self, exc_type, exc_value, traceback):
if self._hasee():
self._callable.__exit__(exc_type, exc_value, traceback)
# Wraps an iterable instance returning
# it when called. Allows an iterable
# instance to be passed directly to Topology.source
class _IterableInstance(_WrappedInstance):
def __call__(self):
return self._callable
# Wraps an callable instance
# When this is called, the callable is called.
# Used to wrap a lambda object or a function/class
# defined in __main__
class _Callable(_WrappedInstance):
def __call__(self, *args, **kwargs):
return self._callable.__call__(*args, **kwargs)
| |
#!/usr/bin/env python
#-
# Copyright (c) 2006 Verdens Gang AS
# Copyright (c) 2006-2015 Varnish Software AS
# All rights reserved.
#
# Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
# Author: Martin Blix Grydeland <martin@varnish-software.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Generate various .c and .h files for the VSL query expression parser
# and the interfaces for it.
import sys
import copy
srcroot = "../.."
buildroot = "../.."
if len(sys.argv) == 3:
srcroot = sys.argv[1]
buildroot = sys.argv[2]
#######################################################################
# These are our tokens
tokens = {
# Numerical comparisons
"T_EQ": "==",
"T_NEQ": "!=",
"T_LEQ": "<=",
"T_GEQ": ">=",
# String comparisons
"T_SEQ": "eq",
"T_SNEQ": "ne",
# Regular expression matching
"T_NOMATCH": "!~",
# Boolean operators
"T_AND": "and",
"T_OR": "or",
"T_NOT": "not",
# Miscellaneous
None: "<>~[]{}():,",
# These have handwritten recognizers
"VAL": None,
"EOI": None,
# Special
"T_TRUE": None,
}
#######################################################################
# Emit a function to recognize tokens in a string
def emit_vxp_fixed_token(fo, tokens):
recog = list()
emit = dict()
for i in tokens:
j = tokens[i]
if (j != None):
recog.append(j)
emit[j] = i
recog.sort()
rrecog = copy.copy(recog)
rrecog.sort(key = lambda x: -len(x))
fo.write("""
unsigned
vxp_fixed_token(const char *p, const char **q)
{
\tswitch (p[0]) {
""")
last_initial = None
for i in recog:
if (i[0] == last_initial):
continue
last_initial = i[0]
fo.write("\tcase '%s':\n" % last_initial)
for j in rrecog:
if (j[0] != last_initial):
continue
fo.write("\t\tif (")
k = 1
l = len(j)
while (k < l):
fo.write("p[%d] == '%s'" % (k, j[k]))
fo.write(" &&\n\t\t ")
k += 1
fo.write("(isword(p[%d]) ? !isword(p[%d]) : 1)) {\n" %
(l - 1, l))
fo.write("\t\t\t*q = p + %d;\n" % l)
fo.write("\t\t\treturn (%s);\n" % emit[j])
fo.write("\t\t}\n");
fo.write("\t\treturn (0);\n")
fo.write("\tdefault:\n\t\treturn (0);\n\t}\n}\n")
#######################################################################
# Emit the vxp_tnames (token->string) conversion array
def emit_vxp_tnames(fo, tokens):
fo.write("\nconst char * const vxp_tnames[256] = {\n")
l = list(tokens.keys())
l.sort()
for i in l:
j = tokens[i]
if j == None:
j = i
if i[0] == "'":
j = i
fo.write("\t[%s] = \"%s\",\n" % (i, j))
fo.write("};\n")
#######################################################################
def polish_tokens(tokens):
# Expand single char tokens
st = tokens[None]
del tokens[None]
for i in st:
tokens["'" + i + "'"] = i
#######################################################################
def file_header(fo):
fo.write("""/*
* NB: This file is machine generated, DO NOT EDIT!
*
* Edit and run generate.py instead
*/
""")
#######################################################################
polish_tokens(tokens)
fo = open(buildroot + "/lib/libvarnishapi/vxp_tokens.h", "w")
file_header(fo)
j = 128
l = list(tokens.keys())
l.sort()
for i in l:
if i[0] == "'":
continue
fo.write("#define\t%s %d\n" % (i, j))
j += 1
assert j < 256
fo.close()
#######################################################################
fo = open(buildroot + "/lib/libvarnishapi/vxp_fixed_token.c", "w")
file_header(fo)
fo.write("""
#include "config.h"
#include <ctype.h>
#include <stdio.h>
#include "vqueue.h"
#include "vre.h"
#include "vxp.h"
""")
emit_vxp_fixed_token(fo, tokens)
emit_vxp_tnames(fo, tokens)
fo.close()
| |
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from testtools import TestCase
from kmip.core.attributes import CryptographicAlgorithm
from kmip.core.attributes import CryptographicLength
from kmip.core.attributes import CryptographicUsageMask
from kmip.core.attributes import UniqueIdentifier
from kmip.core.attributes import ObjectType
from kmip.core.attributes import Name
from kmip.core.enums import AttributeType
from kmip.core.enums import CryptographicAlgorithm as CryptoAlgorithmEnum
from kmip.core.enums import CryptographicUsageMask as CryptoUsageMaskEnum
from kmip.core.enums import KeyCompressionType as KeyCompressionTypeEnum
from kmip.core.enums import KeyFormatType as KeyFormatTypeEnum
from kmip.core.enums import ObjectType as ObjectTypeEnum
from kmip.core.enums import ResultReason
from kmip.core.enums import ResultStatus
from kmip.core.enums import NameType
from kmip.core.factories.attributes import AttributeFactory
from kmip.core.messages.contents import KeyCompressionType
from kmip.core.misc import KeyFormatType
from kmip.core.objects import KeyBlock
from kmip.core.objects import KeyMaterial
from kmip.core.objects import KeyValue
from kmip.core.objects import TemplateAttribute
from kmip.core.secrets import SymmetricKey
from kmip.core.server import KMIPImpl
class TestKMIPServer(TestCase):
def setUp(self):
super(TestKMIPServer, self).setUp()
self.kmip = KMIPImpl()
self.algorithm_name = CryptoAlgorithmEnum.AES
self.key_length = 256
self.key = bytearray(range(0, 32))
self.usage_mask = CryptoUsageMaskEnum.ENCRYPT.value |\
CryptoUsageMaskEnum.DECRYPT.value
def tearDown(self):
super(TestKMIPServer, self).tearDown()
def test_create(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
attributes = self._get_attrs()
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.create(obj_type, template_attribute)
self.assertNotEqual(None, res, 'result is None')
self.assertEqual(ResultStatus.SUCCESS, res.result_status.enum,
'result status did not return success')
def test_create_no_length(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
attributes = self._get_attrs()[0:2]
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.create(obj_type, template_attribute)
self.assertNotEqual(None, res, 'result is None')
attrs = res.template_attribute.attributes
self.assertEqual(ResultStatus.SUCCESS, res.result_status.enum,
'result status did not return success')
self.assertTrue(self._check_attr_exists(attributes[2], attrs),
'length attribute not returned')
def test_create_no_alg(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
attributes = [self._get_attrs()[1]]
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.create(obj_type, template_attribute)
self.assertNotEqual(None, res, 'result is None')
self.assertEqual(ResultStatus.OPERATION_FAILED, res.result_status.enum,
'result status did not return failed')
def test_create_no_usage_mask(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
attributes = [self._get_attrs()[0]]
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.create(obj_type, template_attribute)
self.assertNotEqual(None, res, 'result is None')
self.assertEqual(ResultStatus.OPERATION_FAILED, res.result_status.enum,
'result status did not return failed')
def test_register(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertNotEqual(None, res, 'result is None')
self.assertEqual(ResultStatus.SUCCESS, res.result_status.enum,
'result status did not return success')
def test_register_attrs_in_key_value(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.cryptographic_algorithm = None
key.key_block.cryptographic_length = None
key.key_block.key_value.attributes = self._get_attrs()
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertNotEqual(None, res, 'result is None')
self.assertEqual(ResultStatus.SUCCESS, res.result_status.enum,
'result status did not return success')
def test_register_attrs_in_template(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.cryptographic_algorithm = None
key.key_block.cryptographic_length = None
key.key_block.key_value.attributes = []
attributes = self._get_attrs()
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertNotEqual(None, res, 'result is None')
self.assertEqual(ResultStatus.SUCCESS, res.result_status.enum,
'result status did not return success')
def test_register_no_alg(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.cryptographic_algorithm = None
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.enum,
'result status did not return failed')
self.assertEqual(ResultReason.ITEM_NOT_FOUND,
res.result_reason.enum,
'result reason did not match')
def test_register_alg_in_key_value_and_key_block(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.key_value.attributes = [self._get_alg_attr()]
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.enum,
'result status did not return failed')
self.assertEqual(ResultReason.INDEX_OUT_OF_BOUNDS,
res.result_reason.enum,
'result reason did not match')
def test_register_alg_in_template_and_key_block(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
attributes = [self._get_alg_attr()]
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.enum,
'result status did not return failed')
self.assertEqual(ResultReason.INDEX_OUT_OF_BOUNDS,
res.result_reason.enum,
'result reason did not match')
def test_register_alg_in_template_and_key_value(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.cryptographic_algorithm = None
key.key_block.key_value.attributes = [self._get_alg_attr()]
attributes = [self._get_alg_attr()]
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.enum,
'result status did not return failed')
self.assertEqual(ResultReason.INDEX_OUT_OF_BOUNDS,
res.result_reason.enum,
'result reason did not match')
def test_register_invalid_alg(self):
unsupported_algs = (CryptoAlgorithmEnum.RSA,
CryptoAlgorithmEnum.DSA,
CryptoAlgorithmEnum.ECDSA,
CryptoAlgorithmEnum.HMAC_SHA1,
CryptoAlgorithmEnum.HMAC_SHA224,
CryptoAlgorithmEnum.HMAC_SHA256,
CryptoAlgorithmEnum.HMAC_SHA384,
CryptoAlgorithmEnum.HMAC_SHA512,
CryptoAlgorithmEnum.HMAC_MD5,
CryptoAlgorithmEnum.DH,
CryptoAlgorithmEnum.ECDH,
CryptoAlgorithmEnum.ECMQV,
CryptoAlgorithmEnum.BLOWFISH,
CryptoAlgorithmEnum.CAMELLIA,
CryptoAlgorithmEnum.CAST5,
CryptoAlgorithmEnum.IDEA,
CryptoAlgorithmEnum.MARS,
CryptoAlgorithmEnum.RC2,
CryptoAlgorithmEnum.RC4,
CryptoAlgorithmEnum.RC5,
CryptoAlgorithmEnum.SKIPJACK,
CryptoAlgorithmEnum.TWOFISH)
for alg in unsupported_algs:
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.cryptographic_algorithm = CryptographicAlgorithm(alg)
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.enum,
'result status did not return failed')
self.assertEqual(ResultReason.INVALID_FIELD,
res.result_reason.enum,
'result reason did not match')
def test_register_no_length(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.cryptographic_length = None
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.enum,
'result status did not return failed')
self.assertEqual(ResultReason.ITEM_NOT_FOUND,
res.result_reason.enum,
'result reason did not match')
def test_register_length_in_key_value_and_key_block(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.key_value.attributes = [self._get_length_attr()]
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.enum,
'result status did not return failed')
self.assertEqual(ResultReason.INDEX_OUT_OF_BOUNDS,
res.result_reason.enum,
'result reason did not match')
def test_register_length_in_template_and_key_block(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
attributes = [self._get_length_attr()]
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.enum,
'result status did not return failed')
self.assertEqual(ResultReason.INDEX_OUT_OF_BOUNDS,
res.result_reason.enum,
'result reason did not match')
def test_register_length_in_template_and_key_value(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.cryptographic_length = None
key.key_block.key_value.attributes = [self._get_length_attr()]
attributes = [self._get_length_attr()]
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.enum,
'result status did not return failed')
self.assertEqual(ResultReason.INDEX_OUT_OF_BOUNDS,
res.result_reason.enum,
'result reason did not match')
def test_register_invalid_length(self):
unsupported_lens = (-1, 0, 2048, 5, 18)
for len in unsupported_lens:
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.cryptographic_length = CryptographicLength(len)
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.enum,
'result status did not return failed')
self.assertEqual(ResultReason.INVALID_FIELD,
res.result_reason.enum,
'result reason did not match')
def test_register_no_usage_mask(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.key_value.attributes = []
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.enum,
'result status did not return failed')
self.assertEqual(ResultReason.ITEM_NOT_FOUND,
res.result_reason.enum,
'result reason did not match')
def test_register_no_object_type(self):
obj_type = None
key = self._get_symmetric_key()
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertNotEqual(None, res, 'result is None')
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.enum,
'result status did not return failed')
self.assertEqual(ResultReason.ITEM_NOT_FOUND,
res.result_reason.enum,
'result reason did not match')
def test_register_unsupported_object_type(self):
unsupported_types = (ObjectTypeEnum.CERTIFICATE,
ObjectTypeEnum.PUBLIC_KEY,
ObjectTypeEnum.PRIVATE_KEY,
ObjectTypeEnum.SPLIT_KEY,
ObjectTypeEnum.TEMPLATE,
ObjectTypeEnum.SECRET_DATA,
ObjectTypeEnum.OPAQUE_DATA)
for unsupported_type in unsupported_types:
obj_type = ObjectType(unsupported_type)
key = self._get_symmetric_key()
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertNotEqual(None, res, 'result is None')
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.enum,
'result status did not return failed')
self.assertEqual(ResultReason.INVALID_FIELD,
res.result_reason.enum,
'result reason did not match')
def test_register_object_type_mismatch(self):
unsupported_types = (ObjectTypeEnum.CERTIFICATE,
ObjectTypeEnum.PUBLIC_KEY,
ObjectTypeEnum.PRIVATE_KEY,
ObjectTypeEnum.SPLIT_KEY,
ObjectTypeEnum.TEMPLATE,
ObjectTypeEnum.SECRET_DATA,
ObjectTypeEnum.OPAQUE_DATA)
for unsupported_type in unsupported_types:
obj_type = ObjectType(unsupported_type)
key = self._get_symmetric_key()
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertNotEqual(None, res, 'result is None')
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.enum,
'result status did not return failed')
self.assertEqual(ResultReason.INVALID_FIELD,
res.result_reason.enum,
'result reason did not match')
def test_get(self):
uuid = self._create()
key_format_type = KeyFormatType(KeyFormatTypeEnum.RAW)
res = self.kmip.get(uuid, key_format_type)
self.assertEqual(ResultStatus.SUCCESS, res.result_status.enum,
'result status did not return success')
def test_get_no_key_format_type(self):
uuid = self._create()
res = self.kmip.get(uuid, None)
self.assertEqual(ResultStatus.SUCCESS, res.result_status.enum,
'result status did not return success')
def test_get_unknown(self):
uuids = ('some random string', UniqueIdentifier('no key here'))
for uuid in uuids:
key_format_type = KeyFormatType(KeyFormatTypeEnum.RAW)
res = self.kmip.get(uuid, key_format_type)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.enum,
'result status did not return failed')
self.assertEqual(ResultReason.ITEM_NOT_FOUND,
res.result_reason.enum,
'result reason did not match')
def test_get_no_uuid(self):
self._create()
key_format_type = KeyFormatType(KeyFormatTypeEnum.RAW)
res = self.kmip.get(None, key_format_type)
self.assertEqual(ResultStatus.OPERATION_FAILED, res.result_status.enum,
'result status did not return failed')
def test_get_with_key_compression(self):
uuid = self._create()
key_format_type = KeyFormatType(KeyFormatTypeEnum.RAW)
key_compression = KeyCompressionType(KeyCompressionTypeEnum.
EC_PUBLIC_KEY_TYPE_UNCOMPRESSED)
res = self.kmip.get(uuid, key_format_type, key_compression)
self.assertEqual(ResultStatus.OPERATION_FAILED, res.result_status.enum,
'result status did not return failed')
self.assertEqual(ResultReason.KEY_COMPRESSION_TYPE_NOT_SUPPORTED,
res.result_reason.enum,
'result reason did not match')
def test_destroy(self):
uuid = self._create()
key_format_type = KeyFormatType(KeyFormatTypeEnum.RAW)
res = self.kmip.get(uuid, key_format_type)
self.assertEqual(ResultStatus.SUCCESS, res.result_status.enum,
'result status did not return success')
res = self.kmip.destroy(uuid)
self.assertEqual(ResultStatus.SUCCESS, res.result_status.enum,
'result status did not return success')
res = self.kmip.destroy(uuid)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.enum,
'result status did not return failed')
self.assertEqual(ResultReason.ITEM_NOT_FOUND,
res.result_reason.enum,
'result reason did not match')
def test_destroy_no_uuid(self):
res = self.kmip.destroy(None)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.enum,
'result status did not return failed')
self.assertEqual(ResultReason.ITEM_NOT_FOUND,
res.result_reason.enum,
'result reason did not match')
def test_destroy_unknown(self):
uuids = ('some random string', UniqueIdentifier('no key here'))
for uuid in uuids:
key_format_type = KeyFormatType(KeyFormatTypeEnum.RAW)
res = self.kmip.get(uuid, key_format_type)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.enum,
'result status did not return failed')
res = self.kmip.destroy(uuid)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.enum,
'result status did not return failed')
self.assertEqual(ResultReason.ITEM_NOT_FOUND,
res.result_reason.enum,
'result reason did not match')
def _create(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
attributes = self._get_attrs()
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.create(obj_type, template_attribute)
self.assertNotEqual(None, res, 'result is None')
self.assertEqual(ResultStatus.SUCCESS, res.result_status.enum,
'result status did not return success')
return res.uuid
def _get_symmetric_key(self):
# only need usage attribute
attrs = [self._get_attrs()[1]]
key_format_type = KeyFormatType(KeyFormatTypeEnum.RAW)
key_material = KeyMaterial(self.key)
key_value = KeyValue(key_material, attrs)
crypto_alg = CryptographicAlgorithm(self.algorithm_name)
crypto_length = CryptographicLength(self.key_length)
usage = CryptographicUsageMask(self.usage_mask)
key_block = KeyBlock(key_format_type, None, key_value, crypto_alg,
crypto_length, usage)
return SymmetricKey(key_block)
def _get_attrs(self):
attr_factory = AttributeFactory()
algorithm = self._get_alg_attr(self.algorithm_name)
length = self._get_length_attr(self.key_length)
attribute_type = AttributeType.CRYPTOGRAPHIC_USAGE_MASK
mask_flags = [CryptoUsageMaskEnum.ENCRYPT,
CryptoUsageMaskEnum.DECRYPT]
usage_mask = attr_factory.create_attribute(attribute_type,
mask_flags)
name_value = Name.NameValue(value='TESTNAME')
name_type = Name.NameType(value=NameType.UNINTERPRETED_TEXT_STRING)
value = Name.create(name_value, name_type)
nameattr = attr_factory.create_attribute(AttributeType.NAME, value)
return [algorithm, usage_mask, length, nameattr]
def _get_alg_attr(self, alg=None):
if alg is None:
alg = self.algorithm_name
attr_factory = AttributeFactory()
attribute_type = AttributeType.CRYPTOGRAPHIC_ALGORITHM
return attr_factory.create_attribute(attribute_type, alg)
def _get_length_attr(self, length=None):
if length is None:
length = self.key_length
attr_factory = AttributeFactory()
attribute_type = AttributeType.CRYPTOGRAPHIC_LENGTH
return attr_factory.create_attribute(attribute_type, length)
def _check_attr_exists(self, attr_expected, attributes):
for attribute in attributes:
if attribute.attribute_name.value ==\
attr_expected.attribute_name.value:
return attribute.attribute_value.value ==\
attr_expected.attribute_value.value
return False
def test_locate(self):
self._create()
name_value = Name.NameValue(value='TESTNAME')
name_type = Name.NameType(value=NameType.UNINTERPRETED_TEXT_STRING)
value = Name.create(name_value, name_type)
attr_factory = AttributeFactory()
nameattr = attr_factory.create_attribute(AttributeType.NAME, value)
attrs = [nameattr]
res = self.kmip.locate(attributes=attrs)
self.assertEqual(ResultStatus.OPERATION_FAILED, res.result_status.enum,
'locate result status did not return success')
| |
import pickle
import time
import string
# import os
from math import pi, sqrt, sin, copysign, floor, ceil
from functools import partial
import warnings
import numpy as np
import pandas as pd
import scipy.stats as ss
import scipy.optimize as so
import scipy.integrate as si
# from scipy.stats.kde import gaussian_kde
import matplotlib.pyplot as plt
# import scipy
import read_database as rdb
### ======================= ###
G = 6.67384e-11
M = 1.989e30
AU = 149597870700
msg_ehigh = 'too high eccentricity is found. value has been reset to 0.99'
class GaussianKDE(object):
"""1D Wrapper over scipy's gaussian_kde"""
def __init__(self, data, name='gaussian_kde'):
if type(data) == type(pd.DataFrame()):
data_ = data.as_matrix()
else:
data_ = data
self.dmin, self.dmax = min(data_), max(data_)
self.name = name
self.gaussian_kde = ss.gaussian_kde(data_)
self.shapes = None
def __call__(self, *args, **kwargs):
return self
def pdf(self, x, *args, **kwargs):
return self.gaussian_kde.pdf(x)
def cdf(self, *args, **kwargs):
return self.gaussian_kde.integrate_box_1d(self.dmin, self.dmax)
def rvs(self, size=None):
rvs = self.gaussian_kde.resample(size=size)
# print "rvs:", rvs[:5], rvs.shape
return rvs.ravel()
class HarmonicDistribution(object):
""" 1D harmonic continuous distribution."""
def __init__(self, dmin=0, dmax=360):
self.dmin, self.dmax = dmin, dmax
self.name = 'harmonic'
self.shapes = 'amp'
self.frozen = False
def __call__(self, amp, *args, **kwargs):
self.amp = amp
self.scale = kwargs['scale']
self.loc = kwargs['loc']
self.frozen = True
return self
def _pdf(self, x, amp, pha, shift):
y = amp*np.sin(2*np.radians(x) + pha) + shift
return y
def pdf(self, x, amp=None, *args, **kwargs):
if not self.frozen:
amp_ = amp
loc, scale = kwargs['loc'], kwargs['scale']
else:
amp_ = self.amp
loc, scale = self.loc, self.scale
return self._pdf(x, amp_, loc, scale)
def cdf(self, x, *args, **kwargs):
cdf_ = si.quad(self._pdf, 0, x, args=(self.amp, self.loc, self.scale))
# print "cdf_:", cdf_
return cdf_[0]
def rvs(self, size=None, resolution=60):
size = int(size)
if size < 4:
rvs = np.random.uniform(low=self.dmin, high=self.dmax, size=size)
return rvs
if size < resolution:
resolution = int(ceil(size*0.33))
# print "resolution:", resolution
x = np.linspace(self.dmin, self.dmax, resolution)
w = x[1] - x[0]
p0 = self._pdf(x[:-1] + w*0.5, self.amp, self.loc, self.scale)*w
# size_cut = max(int(size*0.02), 1)
size_cut = 0
for iteration in range(size):
p = np.asarray(np.round(p0*(size-size_cut)), dtype=int)
psum = np.sum(p)
if psum <= size:
break
else:
size_cut += 1
# psum = min(np.sum(p), size)
# print "p_sum:", np.sum(p)
# print "size_cut:", size_cut
# print "np.sum(p):", np.sum(p)
sections = zip(x[:-1], x[1:], p)
# print "sections:", sections
rvs_base = np.asarray([np.random.uniform(low=a, high=b, size=n)
for a, b, n in sections])
rvs_add = np.random.uniform(low=self.dmin, high=self.dmax,
size=(size - psum))
# rvs_base = rvs_base.ravel()
rvs_base = np.hstack(rvs_base)
# print "rvs_base:", rvs_base, rvs_base.shape
# print "rvs_add:", rvs_add, rvs_add.shape
rvs = np.hstack(np.concatenate((rvs_base, rvs_add)))
# rvs = np.random.permutation(rvs_)
# print "len(rvs):", len(rvs)
# return np.random.uniform(low=0, high=360, size=size)
return rvs
class BimodalDistribution(object):
""" 1D bimodal continuous distribution."""
def __init__(self, dist1=ss.norm, dist2=ss.norm, magnitude=0.5, name='bimodal'):
self.dist1 = dist1
self.dist2 = dist2
self.magnitude = magnitude
self.name = name
self.shapes = 'offset'
self.frozen = False
def __call__(self, offset, *args, **kwargs):
self.offset = offset
self.scale = kwargs['scale']
self.loc = kwargs['loc']
self.frozen = True
return self
def _parse_args(self, offset, *args, **kwargs):
if not self.frozen:
offset_, loc, scale = offset, kwargs['loc'], kwargs['scale']
else:
offset_, loc, scale = self.offset, self.loc, self.scale
return offset_, loc, scale
def _pdf(self, x, offset, loc, scale):
pdf1 = self.dist1.pdf(x, loc=loc, scale=scale)
pdf2 = self.dist2.pdf(x, loc=offset, scale=scale)
bimodal_pdf = self.magnitude*pdf1 + (1-self.magnitude)*pdf2
return bimodal_pdf
def pdf(self, x, offset=180, *args, **kwargs):
offset_, loc, scale = self._parse_args(offset, *args, **kwargs)
return self._pdf(x, offset_, loc, scale)
def cdf(self, x, offset=180, *args, **kwargs):
offset_, loc, scale = self._parse_args(offset, *args, **kwargs)
cdf_ = si.quad(self._pdf, 0, x, args=(offset_, loc, scale))
return cdf_[0]
def rvs(self, size=50):
dist1 = self.dist1(loc=self.loc, scale=self.scale)
dist2 = self.dist2(loc=self.offset, scale=self.scale)
rvs1 = dist1.rvs(size=size*self.magnitude)
rvs2 = dist2.rvs(size=size*(1-self.magnitude))
rvs = np.hstack([rvs1, rvs2])
# rvs = np.concatenate((rvs1, rvs2))
return rvs
class FitDist(object):
"""
Fitted continuaous distribution.
Fits data with continuous distribution specified as distfunc.
Parameters
----------
data: 1-D array of independent imput data.
distfunc: scipy.stats countinuous random variable class.
Currently supports continuous random variables with
shape parameter.
"""
# __module__ = os.path.splitext(os.path.basename(__file__))[0]
def __init__(self, data, distfunc, n=50, verbose=False):
self.distfunc = distfunc
self.dmin, self.dmax = min(data), max(data)
n_ = self._extend(n)
pdf_sum = self._split(data, n_)
cdf_max = self._fit()
if verbose:
print "Data cdf(xmax): %f \t" % pdf_sum,
print "%s_cdf(xmax): %f" % (distfunc.name, cdf_max)
def _extend(self, n):
n_ = float(self.dmax) * n/(self.dmax - self.dmin)
return int(n_)
def _split(self, data, num):
"""Split data values into bands"""
bounds = np.linspace(0, self.dmax, num)
sections = zip(bounds[:-1], bounds[1:])
self.probs = np.histogram(data, bins=bounds, density=True)[0]
self.sections_c = np.array([(a+b)*0.5 for a, b in sections])
self.widths = np.array([(b - a) for a, b in sections])
# self.bounds = bounds[:-1]
self.bounds = bounds
pdf_sum = sum(d*w for d, w in zip(self.probs, self.widths))
return pdf_sum
def _fgen(self, shapes, pdf):
"""Generate function for curve fitting"""
if shapes is None:
shapes = ''
else:
shapes += ','
# shapes = string.join('shape%d, ' %d for d in range(n_shapes))
fdef = ("f = lambda x, %sloc, scale:"
"pdf(x, %sloc=loc, scale=scale)" % (shapes, shapes))
exec fdef in locals()
return f
def _fit(self):
"""Fit value bands with continuous distribution"""
pdf = self.distfunc.pdf
distshapes = self.distfunc.shapes
# if distshapes is None:
# f = lambda x, loc, scale: pdf(x, loc=loc, scale=scale)
# else:
# n = len(distshapes.split())
f = self._fgen(distshapes, pdf)
if self.distfunc.name == 'uniform':
self.distfit = self.distfunc(loc=self.dmin, scale=self.dmax)
cdf = self.distfunc.cdf(self.dmax)
else:
popt, pcov = so.curve_fit(f, self.sections_c, self.probs)
shapes = popt[:-2]
self.distfit = self.distfunc(*shapes, loc=popt[-2], scale=popt[-1])
# cdf = self.distfunc.cdf(self.dmax, *shapes,
# loc=popt[-2], scale=popt[-1])
cdf = si.quad(self.distfit.pdf, self.dmin, self.dmax)[0]
return cdf
def _cut_tails(self, rvs):
below_bounds = np.where(rvs < self.dmin)[0]
# print "below_bounds:", below_bounds.shape, #type(below_bounds)
above_bounds = np.where(rvs > self.dmax)[0]
# print "above_bounds:", above_bounds.shape, #type(below_bounds)
bad = np.concatenate((below_bounds, above_bounds))
rvs_less = np.delete(rvs, bad)
if len(bad) > 4:
rvs_add = self.distfit.rvs(size=len(bad))
else:
rvs_add = np.random.uniform(low=self.dmin, high=self.dmax, size=len(bad))
rvs_ = np.concatenate((rvs_add, rvs_less))
rvs_ = np.random.permutation(rvs_)
return rvs_, len(rvs_add)
def get_rvs(self, size=100):
"""Returns random variables using fitted continuous distribution"""
rvs = self.distfit.rvs(size=size)
rvs, add_num = self._cut_tails(rvs)
while add_num > 4:
# print "cut tails and fill up"
rvs, add_num = self._cut_tails(rvs)
return rvs
def plot_distfit(self, npoints=100):
ppx = np.linspace(0, self.dmax, npoints)
ppy = self.distfit.pdf(ppx)
plt.bar(self.sections_c, self.probs, self.widths[0], color='w', alpha=0.7)
plt.plot(ppx, ppy, 'r-', lw=2)
plt.show()
# pass
def plot_rvs(self, npoints=1000):
rvs = self.get_rvs(size=npoints)
bounds = np.linspace(0, self.dmax, 50)
plt.hist(rvs, bins=bounds, normed=1, color='grey')
self.plot_distfit()
def get_param_distributions(data, names, statdists, n=50, verbose=False):
contdists = [FitDist(data[[name]].as_matrix().ravel(),
dist, n=n, verbose=verbose) for name, dist in zip(names, statdists)]
return contdists
def _rgen_orbits(distdict, num, rand_params=None, ri=0):
"""
Generates arrays of random orbital parameters based on their distributions
and recursively re-generates failed orbits (with negative eccentricity.)
"""
if rand_params is None:
rand_params = ({name: contdist.get_rvs(size=num)
for name, contdist in distdict.items()})
else:
for name, contdist in distdict.items():
add_rvs = contdist.get_rvs(size=num)
rand_params[name] = np.concatenate((rand_params[name], add_rvs))
rand_params['e'] = (rand_params['a'] - rand_params['q'])/rand_params['a']
e_rand = rand_params['e']
n_neg = len(e_rand[e_rand < 0])
# print "n_neg:", n_neg
if ri > 50:
print "too high number of iterations has been reached:", ri
return None
elif n_neg > 0:
rand_params_ = {name: list() for name in rand_params}
for i, e in enumerate(e_rand):
if e >= 1.0:
warnings.warn(msg_ehigh)
# print msg_ehigh
rand_params['e'][i] = 0.99
elif e > 0:
for name in rand_params:
rand_params_[name].append(rand_params[name][i])
del rand_params
rand_params = ({name: np.asarray(rvs_list)
for name, rvs_list in rand_params_.items()})
del rand_params_
# print "len(rand_params['a']):", len(rand_params['a'])
ri += 1
rand_params = _rgen_orbits(distdict, n_neg, rand_params, ri)
return rand_params
def gen_orbits(distdict, num=100):
"""
Generates dataset of random orbits based on 1-D distributions
of asteroid orbital parameters.
"""
rand_params = _rgen_orbits(distdict, num)
if rand_params is not None:
names_extend = rand_params.keys()
randdata = np.array([rand_params[name] for name in names_extend]).T
dataframe = pd.DataFrame(randdata, columns=names_extend)
return dataframe
else: return None
# Plotting functions
def get_subplotnum(n):
b = int(sqrt(n))
a = int(float(n)/b)
m = n % b
if m > 0:
a +=1
return str(a) + str(b)
def cut_longtail(dist):
terminate_tail = 1e-4
pdf_dmax = dist.distfit.pdf(dist.dmax)
try: pdf_dmax_ = pdf_dmax[0]
except: pdf_dmax_ = pdf_dmax
if pdf_dmax_ < terminate_tail:
find_tail_end = lambda x: terminate_tail - dist.distfit.pdf(x)
x_end = so.fsolve(find_tail_end, dist.dmax*0.5)
return x_end
else:
return dist.dmax
def plot_param_distributions(distlist, xlabels, npoints=1000, figsize=(16, 10),
original_bars=True, generated_bars=True, cut_tail=False):
fig = plt.figure(figsize=figsize)
subplot_base = get_subplotnum(len(distlist))
subplots = [int(subplot_base + str(i+1)) for i in range(len(distlist))]
for subplot, dist, xlabel in zip(subplots, distlist, xlabels):
# print "dist.bounds:", len(dist.bounds)
# print "dist min, max:", dist.dmin, dist.dmax
# print "dist.dmin:", dist.dmin
rvs = dist.get_rvs(size=npoints)
# print "len(rvs):", len(rvs)
ax = fig.add_subplot(subplot)
# ax.grid(True)
w = dist.widths[0]
# print 'w:', w
# bounds = dist.bounds - w*0.25
if generated_bars:
ax.hist(rvs, bins=dist.bounds-w*0.25, normed=1, rwidth=0.5,
color='lightsteelblue', lw=0, zorder=1) # 'aquamarine' 'lightblue'
# ppx = np.linspace(0, dist.dmax, npoints)
ppx = np.linspace(dist.dmin, dist.dmax, npoints)
ppy = dist.distfit.pdf(ppx)
if original_bars:
ax.bar(dist.bounds[:-1]+w*0.5, dist.probs, w*0.5, lw=0,
color='cornflowerblue', alpha=1, zorder=2) # 'dodgerblue'
distcolor = 'chocolate' # 'greenyellow' # 'limegreen' # 'cornflowerblue'
ax.plot(ppx, ppy, color=distcolor, ls='--', lw=2, zorder=3)
ax.fill_between(ppx, 0, ppy, facecolor=distcolor, zorder=0, alpha=0.1)
ax.set_xlabel(xlabel)
ax.set_ylim(0, None)
# ax.set_xlim(0, dist.dmax)
backstep = w*0.5 if dist.dmin > 0.2 else 0 # dirty fix for nice plotting
dmax_ = cut_longtail(dist) if cut_tail else dist.dmax
ax.set_xlim(dist.dmin-backstep, dmax_)
plt.show()
# Deprecated
def get_param_bounds(data, names):
# data_full = pd.concat([haz[names], nohaz[names]])
maxvals = [np.max(data[name]) for name in names]
minvals = [np.min(data[name]) for name in names]
params = ({name:(minval, maxval)
for name, minval, maxval in zip(names, minvals, maxvals)})
# print "params.items():", params.items()
return params
def gen_rand_params(params=None, distdict=None, num=1):
"""
Deprecated function for random orbit parameters generation.
May produce orbits with negative eccentricity.
"""
if distdict is None:
distdict = rdb.loadObject('./asteroid_data/param_dist.p')
# if params is None:
# params = rdb.loadObject('./asteroid_data/orbparams_minmax.p')
# rand_params = ({name:np.random.uniform(low=values[0], high=values[1],
# size=num) for name, values in params.items()})
# else:
rand_params = ({name: contdist.get_rvs(size=num)
for name, contdist in distdict.items()})
try:
rand_params['e'] = (rand_params['a'] - rand_params['q'])/rand_params['a']
rand_params['per'] = 2*pi*np.sqrt((rand_params['a']*AU)**3/(G*M))/86400.0
except:
pass
# if num == 1:
# print "rand_params:", rand_params
return rand_params
def gen_rand_orbits(names, distlist, num=100):
"""
Deprecated function for generation of random orbit dataset.
May produce orbits with negative eccentricity.
"""
distdict = {name:dist for name, dist in zip(names, distlist)}
rand_params = gen_rand_params(distdict=distdict, num=num)
names_extend = rand_params.keys()
randdata = np.array([rand_params[name] for name in names_extend]).T
dataframe = pd.DataFrame(randdata, columns=names_extend)
return dataframe
# Experimental
def gen_orbits_inout(dist_common, dist_inner, dist_outer, bound=1.0, num=100):
rand_params = ({name: cdist.get_rvs(size=num)
for name, cdist in dist_common.items()})
q_rand = rand_params['q']
num_in = len(q_rand[q_rand <= 1.0])
num_out = len(q_rand[q_rand > 1.0])
print "num_in:", num_in
print "num_out:", num_out
w_in = dist_inner['w'].get_rvs(size=num_in)
w_out = dist_outer['w'].get_rvs(size=num_out)
w_in = np.random.permutation(w_in)
w_out = np.random.permutation(w_out)
# print "len w_in:", len(w_in) #w_in.shape
# print "len w_out:", len(w_out) #w_out.shape
# rand_params['a'] = np.zeros(num)
rand_params['w'] = np.zeros(num)
i_in = i_out = 0
for i, q, e in zip(range(num), rand_params['q'], rand_params['e']):
# just in case to avoid possible surprises
if rand_params['e'][i] >= 1.0:
warnings.warn('too high eccentricity is found. value has been reset to 0.99')
rand_params['e'][i] = 0.99
if q <= 1.0:
rand_params['w'][i] = w_in[i_in]
i_in += 1
else:
# try
rand_params['w'][i] = w_out[i_out]
i_out += 1
rand_params['a'] = rand_params['q']/(1.0 - rand_params['e'])
# e_rand = rand_params['e']
# print "e_rand[e_rand >= 1]:", e_rand[e_rand >= 0.9]
# print len(rand_params['e']), type(rand_params['e'])
# print len(rand_params['i']), type(rand_params['i'])
# print len(rand_params['om']), type(rand_params['om'])
# print len(rand_params['q']), type(rand_params['q'])
# print len(rand_params['w']), type(rand_params['w'])
# print len(rand_params['a']), type(rand_params['a'])
names_extend = rand_params.keys()
randdata = np.array([rand_params[name] for name in names_extend]).T
dataframe = pd.DataFrame(randdata, columns=names_extend)
return dataframe
if __name__ == '__main__':
haz = rdb.loadObject('./asteroid_data/haz_test.p')
nohaz = rdb.loadObject('./asteroid_data/nohaz_test.p')
names = ['a', 'i', 'w', 'om', 'q', 'n', 'ma', 'epoch']
data_full = pd.concat([haz[names], nohaz[names]])
params = get_param_bounds(data_full, names)
rdb.dumpObject(params, './asteroid_data/orbparams_minmax.p')
# gen_rand_params(params=params)
print "init orbit generation..."
# names = ['a', 'e', 'i', 'w', 'om', 'q']
# gkde = GaussianKDE('gkde', data_full['w'].as_matrix())
# gkde2 = GaussianKDE('gkde2', data_full['om'].as_matrix())
# gkde_a = GaussianKDE('gkde_a', data_full['a'].as_matrix())
# kde_a = GaussianKDE(data_full['a'])
names = ['a', 'i', 'w', 'om', 'q']
bimod = BimodalDistribution() # ss.logistic, ss.logistic
statdists = [ss.johnsonsb, ss.exponweib, HarmonicDistribution(), HarmonicDistribution(), ss.pearson3] # ss.exponweib ss.loggamma
# ss.genlogistic ss.exponweib ss.loggamma ss.burr
# ss.fatiguelife ss.foldnorm ss.genpareto ss.gompertz!!! ss.johnsonsb!!! ss.pearson3 ss.powerlognorm ss.recipinvgauss
# ss.uniform, ss.beta
data_full = pd.concat([haz[names], nohaz[names]])
distlist = get_param_distributions(data_full, names, statdists, n=25, verbose=True)
randdata = gen_rand_orbits(params, names, distlist, num=2e5)
print "orbit generation finished."
print "randdata sample:\n", randdata[:5]
plot_param_distributions(distlist, names)
# ### CALCULATE MOID ###
# data = rdb.calc_moid(randdata, jobtime=True)
# # haz, nohaz = rdb.get_hazMOID(data)
# ### DUMP RANDOM ORBITS ###
# haz_rand, nohaz_rand = rdb.get_hazMOID(randdata)
# rdb.dumpObject(haz_rand, './asteroid_data/haz_rand_2e5m.p')
# rdb.dumpObject(nohaz_rand, './asteroid_data/nohaz_rand_2e5m.p')
# print "haz_rand:", len(haz_rand)
# print "nohaz_rand:", len(nohaz_rand)
# ### DUMP PARAMETERS DISTRIBUTIONS ###
# distdict = {name: dist for name, dist in zip(names, distlist)}
# rdb.dumpObject(distdict, './asteroid_data/param_dist.p')
# # rdb.dumpObject(distlist, './asteroid_data/param_distlist.p')
# rand_params = gen_rand_params(num=4)
# # print "rand_params:", rand_params
# # for key, value in rand_params.items():
# # print "%s\t%d" %(key, len(value))
dist_names = ['alpha',
'anglit',
'arcsine',
'beta',
'betaprime',
'bradford',
'burr',
'cauchy',
'chi',
'chi2',
'cosine',
'dgamma',
'dweibull',
'erlang',
'expon',
'exponweib',
'exponpow',
'f',
'fatiguelife',
'fisk',
'foldcauchy',
'foldnorm',
'frechet_r',
'frechet_l',
'genlogistic',
'genpareto',
'genexpon',
'genextreme',
'gausshyper',
'gamma',
'gengamma',
'genhalflogistic',
'gilbrat',
'gompertz',
'gumbel_r',
'gumbel_l',
'halfcauchy',
'halflogistic',
'halfnorm',
'hypsecant',
'invgamma',
'invgauss',
'invweibull',
'johnsonsb',
'johnsonsu',
'ksone',
'kstwobign',
'laplace',
'logistic',
'loggamma',
'loglaplace',
'lognorm',
'lomax',
'maxwell',
'mielke',
'nakagami',
'ncx2',
'ncf',
'nct',
'norm',
'pareto',
'pearson3',
'powerlaw',
'powerlognorm',
'powernorm',
'rdist',
'reciprocal',
'rayleigh',
'rice',
'recipinvgauss',
'semicircular',
't',
'triang',
'truncexpon',
'truncnorm',
'tukeylambda',
'uniform',
'vonmises',
'wald',
'weibull_min',
'weibull_max',
'wrapcauchy']
| |
__author__ = 'rcj1492'
__created__ = '2017.06'
__license__ = 'MIT'
'''
deploy to heroku
deploy to EC2
TODO: deploy to other platforms (azure, gcp, bluemix, rackspace, openshift)
'''
_deploy_details = {
'title': 'Deploy',
'description': 'Deploys a service to a remote platform. Deploy is currently only available for the heroku and ec2 platforms. Deploy can also deploy static html sites and apps using their dependencies if the root folder is added to one of the runtime type flags (ex. lab deploy heroku --html site/)\n\nPLEASE NOTE: deploy uses the service name specified in the docker-compose.yaml configuration file to determine which instance to connect to. The service name will be added as part of ```lab launch ec2```. Otherwise, a tag must be added to the instance with key "Services" and value "<service1>,<service2>".',
'help': 'deploys service to a remote platform',
'benefit': 'Makes a service available online.'
}
from pocketlab.init import fields_model
def deploy(platform_name, service_option, environ_type='test', resource_tags='', region_name='', verbose=True, overwrite=False, resume_routine=False, print_terminal=False, mount_volumes=False, virtualbox='default', html_folder='', php_folder='', python_folder='', java_folder='', ruby_folder='', node_folder='', jingo_folder=''):
'''
a method to deploy the docker image of a service to a remote host
:param platform_name: string with name of remote platform to host service
:param service_option: [optional] string with name of service in lab registry
:param environ_type: [optional] string with environment of instance (dev, test, prod, asg)
:param resource_tags: [optional] comma separated string with tags on remote platform
:param region_name: [optional] string with name of remote provider region
:param verbose: [optional] boolean to toggle process messages
:param overwrite: [optional] boolean to overwrite existing container
:param resume_routine: [optional] boolean to resume from last sub-routine
:param mount_volumes: [optional] boolean to mount volumes in docker-compose.yaml
:param virtualbox: [optional] string with name of virtualbox image (win7/8)
:param html_folder: [optional] string with path to static html site folder root
:param php_folder: [optional] string with path to php app folder root
:param python_folder: [optional] string with path to python app folder root
:param java_folder: [optional] string with path to java app folder root
:param ruby_folder: [optional] string with path to ruby app folder root
:param node_folder: [optional] string with path to node app folder root
:param jingo_folder: [optional] string with path to jingo app folder root
:param print_terminal: [optional] boolean to print ssh commands and conf values
:return: string with exit message
'''
title = 'deploy'
# ingest service option
if isinstance(service_option, str):
if service_option:
service_option = [service_option]
# validate inputs
input_fields = {
'service_option': service_option,
'platform_name': platform_name,
'environ_type': environ_type,
'resource_tags': resource_tags,
'region_name': region_name,
'virtualbox': virtualbox,
'html_folder': html_folder,
'php_folder': php_folder,
'python_folder': python_folder,
'java_folder': java_folder,
'ruby_folder': ruby_folder,
'node_folder': node_folder,
'jingo_folder': jingo_folder
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
fields_model.validate(value, '.%s' % key, object_title)
# determine service name
service_name = ''
if service_option:
service_name = service_option[0]
# construct path to service root
from pocketlab.methods.service import retrieve_service_root
if service_name:
service_insert = '"%s"' % service_name
service_root = retrieve_service_root(service_name)
else:
service_insert = 'in working directory'
service_root = './'
details = {
'name': service_name,
'insert': service_insert,
'path': service_root
}
# construct service list
service_list = []
exit_msg = ''
# deploy to heroku
if platform_name == 'heroku':
# import dependencies
from os import path
from pocketlab.methods.validation import validate_platform
from pocketlab import __module__
from jsonmodel.loader import jsonLoader
from jsonmodel.validators import jsonModel
# validate heroku file
heroku_schema = jsonLoader(__module__, 'models/heroku-config.json')
heroku_model = jsonModel(heroku_schema)
heroku_details = validate_platform(heroku_model, details['path'], service_name, '.lab')
details['config'] = heroku_details
service_list.append(details)
# define site folder path function
def _site_path(site_folder, service_root, service_insert, runtime_type):
from os import path
if path.isabs(site_folder):
raise Exception('--%s %s must be a path relative to root of service %s' % (runtime_type, site_folder, service_insert))
site_path = path.join(service_root, site_folder)
return site_path
# process deployment sequence
from labpack.platforms.heroku import herokuClient
for service in service_list:
# construct message inserts
service_insert = service['insert']
msg_insert = 'working directory'
if service['name']:
msg_insert = 'root directory for "%s"' % service['name']
# initialize heroku client
heroku_kwargs = {
'account_email': service['config']['heroku_account_email'],
'auth_token': service['config']['heroku_auth_token'],
'verbose': verbose
}
heroku_client = herokuClient(**heroku_kwargs)
heroku_client.access(service['config']['heroku_app_subdomain'])
heroku_insert = "service %s deployed to heroku.\nIf you haven't already, you must allocate resources to this heroku service.\nTry: heroku ps:scale web=1 --app %s" % (service_insert, service['config']['heroku_app_subdomain'])
# deploy app from requirements
if html_folder:
html_folder = _site_path(html_folder, service['path'], service_insert, 'html')
heroku_client.deploy_app(html_folder)
exit_msg = 'Static site of %s' % heroku_insert
elif php_folder:
php_folder = _site_path(php_folder, service['path'], service_insert, 'php')
heroku_client.deploy_app(php_folder, 'php')
exit_msg = 'Php app of %s' % heroku_insert
elif python_folder:
python_folder = _site_path(python_folder, service['path'], service_insert, 'python')
heroku_client.deploy_app(python_folder, 'python')
exit_msg = 'Python app of %s' % heroku_insert
elif java_folder:
java_folder = _site_path(java_folder, service['path'], service_insert, 'java')
heroku_client.deploy_app(java_folder, 'java')
exit_msg = 'Java app of %s' % heroku_insert
elif ruby_folder:
ruby_folder = _site_path(ruby_folder, service['path'], service_insert, 'ruby')
heroku_client.deploy_app(ruby_folder, 'ruby')
exit_msg = 'Ruby app of %s' % heroku_insert
elif node_folder:
node_folder = _site_path(node_folder, service['path'], service_insert, 'node')
heroku_client.deploy_app(node_folder, 'node')
exit_msg = 'Node app of %s' % heroku_insert
elif jingo_folder:
jingo_folder = _site_path(jingo_folder, service['path'], service_insert, 'jingo')
heroku_client.deploy_app(jingo_folder, 'jingo')
exit_msg = 'Jingo app of %s' % heroku_insert
# deploy app in docker container
else:
# establish path of files
from os import path
from time import time
dockerfile_path = path.join(service['path'], 'Dockerfile')
platform_path = path.join(service['path'], 'DockerfileHeroku')
compose_path = path.join(service['path'], 'docker-compose.yaml')
temp_path = path.join(service['path'], 'DockerfileTemp%s' % int(time()))
# construct system envvar
system_envvar = {
'system_environment': environ_type,
'system_platform': 'heroku'
}
# compile dockerfile text
from pocketlab.methods.config import compile_dockerfile
dockerfile_text = compile_dockerfile(
dockerfile_path=dockerfile_path,
platform_path=platform_path,
compose_path=compose_path,
service_details=service,
msg_insert=msg_insert,
platform_name='heroku',
system_envvar=system_envvar,
verbose=verbose
)
if print_terminal:
print(dockerfile_text)
# create temporary Dockerfile
from os import remove
from shutil import copyfile
if path.exists(dockerfile_path):
copyfile(dockerfile_path, temp_path)
with open(dockerfile_path, 'wt') as f:
f.write(dockerfile_text)
f.close()
# construct deploy kwargs
docker_kwargs = {
'dockerfile_path': dockerfile_path,
'virtualbox_name': virtualbox
}
# start build and deployment
try:
heroku_client.deploy_docker(**docker_kwargs)
except:
# ROLLBACK Dockerfile
if path.exists(temp_path):
copyfile(temp_path, dockerfile_path)
remove(temp_path)
raise
# restore Dockerfile
if path.exists(temp_path):
copyfile(temp_path, dockerfile_path)
remove(temp_path)
exit_msg = 'Docker image of %s' % heroku_insert
if len(service_list) > 1:
print(exit_msg)
# deploy to ec2
elif platform_name == 'ec2':
# check for library dependencies
from pocketlab.methods.dependencies import import_boto3
import_boto3('ec2 platform')
# retrieve progress point
from pocketlab import __module__
from labpack.storage.appdata import appdataClient
from labpack.compilers.encoding import encode_data, decode_data
progress_client = appdataClient(collection_name='Progress Points', prod_name=__module__)
progress_id = 'deploy.yaml'
progress_map = { 'step': 0 }
if resume_routine:
try:
progress_map = decode_data(progress_id, progress_client.load(progress_id))
except:
pass
progress_client.save(progress_id, encode_data(progress_id, progress_map))
# retrieve aws config
from os import path, remove
from time import time
from pocketlab import __module__
from jsonmodel.loader import jsonLoader
from jsonmodel.validators import jsonModel
from pocketlab.methods.validation import validate_platform
aws_schema = jsonLoader(__module__, 'models/aws-config.json')
aws_model = jsonModel(aws_schema)
aws_config = validate_platform(aws_model, details['path'], service_name, '.lab')
details['config'] = aws_config
service_list.append(details)
# construct docker client
from labpack.platforms.docker import dockerClient
docker_client = dockerClient()
# iterate over each service
for service in service_list:
# construct variables
service_name = service['name']
service_root = service['path']
service_insert = service['insert']
msg_insert = 'working directory'
if service_name:
msg_insert = 'root directory for "%s"' % service_name
ec2_insert = 'service %s deployed to ec2.' % service_insert
# retrieve instance details from ec2
from pocketlab.methods.aws import initialize_clients
ec2_client, ssh_client, instance_details = initialize_clients(
aws_cred=aws_config,
service_name=service_name,
service_insert=service_insert,
service_root=service_root,
region_name=region_name,
environment_type=environ_type,
resource_tags=resource_tags,
verbose=verbose
)
# define ssh script printer
def print_script(command, message, error=''):
if verbose:
if print_terminal:
print(message)
else:
print(message, end='', flush=True)
try:
response = ssh_client.script(command)
if verbose:
if not print_terminal:
print('done.')
except:
if verbose:
print('ERROR.')
if error:
raise Exception(error)
else:
raise
return response
# disable normal ssh client printing
if not print_terminal:
ssh_client.ec2.iam.verbose = False
# verify docker installed on ec2
sys_command = 'docker --help'
sys_message = 'Verifying docker installed on ec2 image ... '
sys_error = '"docker" not installed.\nTry using an ECS-Optimized AMI or install docker (https://www.docker.com).'
print_script(sys_command, sys_message, sys_error)
# retrieve docker images
sys_command = 'docker images'
sys_message = 'Retrieving list of images on ec2 image ... '
sys_output = print_script(sys_command, sys_message)
image_list = docker_client._images(sys_output)
# retrieve docker containers
sys_command = 'docker ps -a'
sys_message = 'Retrieving list of containers on ec2 image ... '
sys_output = print_script(sys_command, sys_message)
container_list = docker_client._ps(sys_output)
# retrieve list of ports
sys_command = 'netstat -lntu'
sys_message = 'Retrieving list of open ports on ec2 image ... '
sys_output = print_script(sys_command, sys_message)
from labpack.parsing.shell import convert_table
output_lines = sys_output.splitlines()
sys_output = '\n'.join(output_lines[1:])
delimiter = '\s(?!A)\s*'
connection_list = convert_table(sys_output, delimiter)
port_list = []
import re
for connection in connection_list:
if connection['State'] == 'LISTEN':
port_search = re.findall('.*:(\d+)$', connection['Local Address'])
port_list.append(int(port_search[0]))
# retrieve service configurations
from pocketlab.methods.service import retrieve_service_config
service_title = '%s %s' % (title, platform_name)
service_config, service_name = retrieve_service_config(
service_root=service_root,
service_name=service_name,
command_title=service_title
)
# verify overwrite of existing container
existing_container = None
for container in container_list:
if service_name == container['NAMES']:
import json
sys_command = 'docker inspect %s' % service_name
response = ssh_client.script(sys_command)
settings = json.loads(response)
try:
sys_command = 'docker logs --tail 1 %s' % service_name
ssh_client.script(sys_command)
status = 'stopped'
except:
status = 'exited'
synopsis = docker_client._synopsis(settings[0], status)
if not overwrite and not resume_routine:
raise Exception('"%s" is %s on ec2 image. To replace, add "-f"' % (service_name, synopsis['container_status']))
else:
existing_container = synopsis
# verify port availability
from pocketlab.methods.service import compile_ports
service_ports = compile_ports(service_config)
if service_ports:
container_ports = set()
if existing_container:
for key in existing_container['mapped_ports'].keys():
container_ports.add(int(key))
used_ports = set(port_list) - container_ports
conflict_ports = used_ports.intersection(service_ports)
if conflict_ports:
from labpack.parsing.grammar import join_words
port_names = join_words(list(conflict_ports))
port_plural = ''
if len(conflict_ports) > 1:
port_plural = 's'
raise Exception('Port%s %s are already in use by other processes on ec2 image.' % (port_plural, port_names))
# construct system envvar
system_envvar = {
'system_environment': environ_type,
'system_platform': 'ec2',
'system_ip': '',
'public_ip': ''
}
if 'public_ip_address' in instance_details.keys():
system_envvar['public_ip'] = instance_details['public_ip_address']
if 'private_ip_address' in instance_details.keys():
system_envvar['system_ip'] = instance_details['private_ip_address']
# verify image exists
if mount_volumes:
# validate image exists in local docker repository
from pocketlab.methods.validation import validate_image
docker_images = docker_client.images()
service_repo, service_tag = validate_image(service_config, docker_images, service_name)
# or build new image
else:
# define service variables and check progress
service_repo = service_name
service_tag = ''
if progress_map['step'] < 1:
# establish path of files
dockerfile_path = path.join(service_root, 'Dockerfile')
platform_path = path.join(service_root, 'DockerfileEC2')
compose_path = path.join(service_root, 'docker-compose.yaml')
temp_path = path.join(service_root, 'DockerfileTemp%s' % int(time()))
# compile dockerfile text
from pocketlab.methods.config import compile_dockerfile
dockerfile_text = compile_dockerfile(
dockerfile_path=dockerfile_path,
platform_path=platform_path,
compose_path=compose_path,
service_details=service,
msg_insert=msg_insert,
platform_name='ec2',
system_envvar=system_envvar,
verbose=verbose
)
if print_terminal:
print(dockerfile_text)
# create temporary Dockerfile
from os import remove
from shutil import copyfile
if path.exists(dockerfile_path):
copyfile(dockerfile_path, temp_path)
with open(dockerfile_path, 'wt') as f:
f.write(dockerfile_text)
f.close()
# start image build
try:
if verbose:
print('Building docker image ... ')
docker_client.verbose = True
docker_client.build(service_name, dockerfile_path=dockerfile_path)
if verbose:
docker_client.verbose = False
except Exception as err:
# ROLLBACK Dockerfile
if path.exists(temp_path):
copyfile(temp_path, dockerfile_path)
remove(temp_path)
raise
# restore Dockerfile
if path.exists(temp_path):
copyfile(temp_path, dockerfile_path)
remove(temp_path)
# save progress
progress_map['step'] = 1
progress_client.save(progress_id, encode_data(progress_id, progress_map))
# copy volumes to ec2 image
volumes_mounted = False
if mount_volumes:
if progress_map['step'] < 1:
if 'volumes' in service_config.keys():
# create directory for service
if service_config['volumes']:
# verbosity
if verbose:
print('Copying volumes to ec2 image', end='', flush=True)
# determine if service folder exists
sys_command = 'ls %s' % service_name
try:
ssh_client.script(sys_command)
if not overwrite:
if verbose:
print('ERROR.')
raise Exception('Files for "%s" already exist on ec2 image. To replace, add "-f"' % (service_name))
# determine if service node is a folder
try:
sys_command = 'cd %s' % service_name
ssh_client.script(sys_command)
except:
sys_commands = [
'sudo rm %s' % service_name,
'mkdir %s' % service_name
]
ssh_client.script(sys_commands)
except:
ssh_client.script('mkdir %s' % service_name)
# copy volumes to image
from os import path
for volume in service_config['volumes']:
if volume['type'] == 'bind':
remote_path = path.join(service_name, volume['source'])
local_path = path.join(service_root, volume['source'])
try:
ssh_client.put(local_path, remote_path, overwrite=True)
except:
if verbose:
print(' ERROR.')
raise
if verbose:
print('.', end='', flush=True)
volumes_mounted = True
# verbosity
if verbose:
print(' done.')
# save progress
progress_map['step'] = 1
progress_client.save(progress_id, encode_data(progress_id, progress_map))
# save docker image to local file
if progress_map['step'] < 2:
file_name = '%s%s.tar' % (service_name, int(time()))
file_path = path.relpath(path.join(service_root, file_name))
if verbose:
print('Saving docker image %s as %s ... ' % (service_name, file_name), end='', flush=True)
try:
docker_client.save(service_repo, file_path, service_tag)
if verbose:
print('done.')
except:
if verbose:
print('ERROR.')
# ROLLBACK local tar file
if path.exists(file_path):
remove(file_path)
raise
# copy local file to ec2 image
if verbose:
print('Copying %s to ec2 image ... ' % file_name, end='', flush=True)
try:
ssh_client.put(file_path, file_name)
if verbose:
print('done.')
except:
if verbose:
print('ERROR.')
# ROLLBACK local tar file
if path.exists(file_path):
remove(file_path)
raise
# remove local tar file
if path.exists(file_path):
remove(file_path)
# load file into docker on ec2 image
sys_commands = [
'docker load -i %s' % file_name,
'rm %s' % file_name
]
sys_message = 'Loading %s into docker on ec2 image ... ' % file_name
print_script(sys_commands, sys_message)
# save progress
progress_map['step'] = 2
progress_client.save(progress_id, encode_data(progress_id, progress_map))
# compile run command
if progress_map['step'] < 3:
from pocketlab.methods.docker import compile_run_kwargs, compile_run_command
run_kwargs = compile_run_kwargs(
service_config=service_config,
service_repo=service_repo,
service_alias=service_name,
service_tag=service_tag,
service_path=service_root,
system_envvar=system_envvar
)
if not volumes_mounted:
run_kwargs['environmental_variables'] = {}
run_kwargs['mounted_volumes'] = {}
run_kwargs['start_command'] = ''
run_command = compile_run_command(run_kwargs, root_path='~/%s' % service_name)
# remove existing container
if existing_container:
sys_command = 'docker rm -f %s' % existing_container['container_alias']
sys_message = 'Removing existing container "%s" on ec2 image ... ' % existing_container['container_alias']
print_script(sys_command, sys_message)
# start container
sys_message = 'Starting container "%s" on ec2 image ... ' % service_name
print_script(run_command, sys_message)
# save progress
progress_map['step'] = 3
progress_client.save(progress_id, encode_data(progress_id, progress_map))
# update docker service
if progress_map['step'] < 4:
# retrieve scripts for image type
from pocketlab.methods.config import retrieve_scripts
image_details = ec2_client.read_image(instance_details['image_id'])
package_details = retrieve_scripts('docker', image_details['name'])
package_services = package_details.get('services',{})
services_init = package_services['init']
# retrieve system restart commands
sys_command = 'sudo ls %s 2>/dev/null' % services_init
sys_message = 'Checking rc.d for system restart file ... '
rcd_file = print_script(sys_command, sys_message)
if not rcd_file:
sys_command = 'sudo touch %s' % services_init
sys_message = 'Creating rc.d system restart file for docker ... '
print_script(sys_command, sys_message)
sys_command = 'sudo cat %s' % services_init
sys_message = 'Checking rc.d for service restart command ... '
s99local_text = print_script(sys_command, sys_message)
# update docker restart command
restart_command = 'docker restart %s' % service_name
if s99local_text.find(restart_command) == -1:
sys_command = 'sudo chmod 777 %s; echo "%s" >> %s' % (services_init, restart_command, services_init)
sys_message = 'Updating %s to restart service on system restart ... ' % services_init
print_script(sys_command, sys_message)
# add boot file to system restart
if not rcd_file:
rcd_enable = package_services.get('enable','')
if rcd_enable:
sys_message = 'Adding rc.d file to system startup ... '
print_script(rcd_enable, sys_message)
# TODO cleanup orphaned files and images on ec2
if progress_map['step'] < 5:
pass
# compose exit message
ssh_client.ec2.iam.verbose = True
exit_msg = 'Docker image of %s.\nTo add a reverse proxy, try: lab update nginx ec2' % ec2_insert
if len(service_list) > 1:
print(exit_msg)
# reset progress
progress_map['step'] = 0
progress_client.save(progress_id, encode_data(progress_id, progress_map))
# TODO consider rollback options
# TODO consider build versioning/storage
# report composite outcome
if len(service_list) > 1:
service_names = []
for service in service_list:
service_names.append('"%s"' % service['name'])
from labpack.parsing.grammar import join_words
exit_insert = join_words(service_names)
exit_msg = 'Finished deploying %s to %s.' % (exit_insert, platform_name)
return exit_msg
| |
from fabric import colors
from fabric import api as fab
from fabric import decorators
from fabric.contrib import files
import os, getpass
fab.env.colors = True
OS_COMMANDS = ('sudo apt-get install aptitude',
'sudo aptitude update',
'sudo aptitude install python-dev -y',
'sudo aptitude install python-pip -y',
'sudo aptitude install python-virtualenv supervisor uwsgi uwsgi-plugin-python nginx postgresql postgresql-server-dev-9.5 -y',
'sudo aptitude install libffi-dev postfix -y',
)
NODE_COMMANDS = (
'sudo aptitude install npm',
)
certCommands = (
'openssl genrsa -aes256 -out {installDir}/server/server.key 4096',
'openssl req -new -key {installDir}/server/server.key -out {installDir}/server/server.csr',
'cp {installDir}/server/server.key {installDir}/server/server.key.org',
'openssl rsa -in {installDir}/server/server.key.org -out {installDir}/server/server.key',
'openssl x509 -req -days 365 -in {installDir}/server/server.csr -signkey {installDir}/server/server.key -out {installDir}/server/server.crt',
)
supervisorTextTemplate = '''
[program:{programName}]
command=uwsgi --ini {uwsgiConfLocation}
autostart=true
autorestart=true
stdout_logfile=/var/log/{programName}.out.log
redirect_stderr=true
user={user}
stopsignal=QUIT
environment=LANG=en_US.UTF-8, LC_ALL=en_US.UTF-8, LC_LANG=en_US.UTF-8
stdout_logfile_maxbytes=500000
stdout_logfile_backups=10
'''
uwsgiTextTemplate = '''
[uwsgi]
socket = /tmp/{programName}.sock
chdir = {installDir}
virtualenv = {venv_location}
env = DJANGO_SETTINGS_MODULE=mysite.settings
home = {venv_location}
uid = {user}
gid = {user}
processes = 8
threads = 2
module = django.core.handlers.wsgi:WSGIHandler()
chmod-socket = 666
'''
nginxTextTemplate = '''
upstream django {{
server unix:///tmp/{programName}.sock; # for a file socket
}}
# configuration of the server
server {{
# the port your site will be served on
listen {port};
# the domain name it will serve for
server_name {serverName};
rewrite ^(.*) https://$host:{securePort}$1 permanent;
}}
server {{
listen {apiPort};
server_name {serverName};
location /mediaviewer/api{{
uwsgi_pass django;
include {installDir}/server/uwsgi_params; # the uwsgi_params file you installed
}}
}}
server {{
listen {securePort};
server_name {serverName};
ssl on;
ssl_certificate {installDir}/server/server.crt;
ssl_certificate_key {installDir}/server/server.key;
ssl_session_timeout 5m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers "EECDH+ECDSA+AESGCM:EECDH+aRSA+AESGCM:EECDH+ECDSA+SHA256:EECDH+aRSA+SHA256:EECDH+ECDSA+SHA384:EECDH+ECDSA+SHA256:EECDH+aRSA+SHA384:EDH+aRSA+AESGCM:EDH+aRSA+SHA256:EDH+aRSA:EECDH:!aNULL:!eNULL:!MEDIUM:!LOW:!3DES:!MD5:!EXP:!PSK:!SRP:!DSS:!RC4:!SEED";
ssl_prefer_server_ciphers on;
# Django media
location /media {{
alias {installDir}/mediaviewer/static/media;
expires 1y;
}}
location /static {{
alias {installDir}/static;
expires 1d;
}}
# Finally, send all non-media requests to the Django server.
location /mediaviewer{{
uwsgi_pass django;
include {installDir}/server/uwsgi_params;
}}
location /admin{{
uwsgi_pass django;
include {installDir}/server/uwsgi_params;
}}
location /static/admin/ {{
# this changes depending on your python version
root {venv_location}/lib/python2.7/site-packages/django/contrib/admin/;
expires 1y;
}}
location /static/rest_framework/ {{
# this changes depending on your python version
root {venv_location}/local/lib/python2.7/site-packages/rest_framework;
expires 1y;
}}
}}
'''
dailyTemplate = '''#!/bin/bash
echo "Starting daily cleanup `date`"
psql -d autodl -f {installDir}/daily.sql
echo "Ending daily cleanup `date`"
'''
virtualenvPthTemplate = '{installDir}'
def create_venv(venv_home, venv_name):
venv_location = os.path.join(venv_home, venv_name)
fab.local('virtualenv -p python2.7 %s' % venv_location)
return venv_location
def get_venv_prefix(venv_location):
return '/bin/bash %s' % os.path.join(venv_location, 'bin', 'activate')
def install_venv_requirements(installDir, venv_location, prefix):
fab.local('%s && %s install -r %s' % (prefix,
os.path.join(venv_location, 'bin', 'pip'),
os.path.join(installDir, 'requirements.txt')))
def run_command_list(commands, values=None):
for command in commands:
if values:
fab.local(command.format(**values))
else:
fab.local(command)
def write_file(filename, text, use_sudo=False):
files.append(filename, text, use_sudo=use_sudo)
def write_sudo_file(filename, text):
write_file(filename, text, use_sudo=True)
def add_cronjob(text):
with fab.warn_only():
fab.local('crontab -l > /tmp/crondump')
fab.local('echo "%s 2> /dev/null" >> /tmp/crondump' % text)
fab.local('crontab /tmp/crondump')
@fab.task
@decorators.hosts(['localhost'])
def update_bower():
run_command_list(NODE_COMMANDS)
fab.local('sudo npm install -g bower')
fab.local('python manage.py bower install')
@fab.task
@decorators.hosts(['localhost'])
def install():
user = getpass.getuser()
installDir = os.getcwd()
run_command_list(OS_COMMANDS)
venv_home = fab.prompt(colors.cyan('Specify directory where you want the '
'virtual environment to be created:'),
default='%s/virtualenvs' % os.path.expanduser('~'))
venv_name = fab.prompt(colors.cyan('Specify the name of the environment'),
default='mediaviewer')
venv_location = create_venv(venv_home, venv_name)
prefix = get_venv_prefix(venv_location)
install_venv_requirements(installDir, venv_location, prefix)
programName = fab.prompt(colors.cyan('Specify program name'), default='mediaviewer')
serverName = fab.prompt(colors.cyan('Specify server IP address or FQDN'), default='127.0.0.1')
port = fab.prompt(colors.cyan('Specify port to run application on'), default='8000')
securePort = fab.prompt(colors.cyan('Specify secure port to run application on'), default='8001')
apiPort = fab.prompt(colors.cyan('Specify api port to run application on'), default='8002')
values = {'programName': programName,
'user': user,
'venv_location': venv_location,
'installDir': installDir,
'uwsgiConfLocation': os.path.join(installDir, 'uwsgi.ini'),
'port': port,
'securePort': securePort,
'apiPort': apiPort,
'serverName': serverName,
}
run_command_list(certCommands, values=values)
virtualenvPthText = virtualenvPthTemplate.format(**values)
write_file(os.path.join(venv_location,
'lib',
'python2.7',
'site-packages',
'mediaviewer.pth'), virtualenvPthText)
uwsgiText = uwsgiTextTemplate.format(**values)
if os.path.exists(values['uwsgiConfLocation']):
fab.local('sudo rm %s' % values['uwsgiConfLocation'])
write_sudo_file(values['uwsgiConfLocation'], uwsgiText)
dailyBash = dailyTemplate.format(**values)
dailyBashPath = os.path.join(installDir, 'daily.sh')
write_file(dailyBashPath, dailyBash)
fab.local('chmod a+x %s' % dailyBashPath)
add_cronjob('@daily {installDir}/daily.sh'.format(**values))
supervisorText = supervisorTextTemplate.format(**values)
supervisorPath = os.path.join('/etc/supervisor/conf.d', '%s.conf' % values['programName'])
if os.path.exists(supervisorPath):
fab.local('sudo rm %s' % supervisorPath)
write_sudo_file(supervisorPath, supervisorText)
nginxText = nginxTextTemplate.format(**values)
nginxPath = os.path.join('/etc/nginx/sites-enabled/%s.conf' % values['programName'])
if os.path.exists(nginxPath):
fab.local('sudo rm %s' % nginxPath)
write_sudo_file(nginxPath, nginxText)
fab.local('sudo systemctl start supervisor')
fab.local('sudo supervisorctl update')
fab.local('sudo supervisorctl restart %s' % values['programName'])
fab.local('sudo service nginx restart')
| |
from __future__ import print_function
import ast
import copy
import logging
import re
import time
import urllib.parse
import uuid
from pprint import pformat
from typing import Optional, Tuple
import demisto_client
import requests.exceptions
import urllib3
from demisto_client.demisto_api import DefaultApi
from demisto_client.demisto_api.models.incident import Incident
from demisto_client.demisto_api.rest import ApiException
from demisto_sdk.commands.common.constants import PB_Status
# Disable insecure warnings
from demisto_sdk.commands.test_content.tools import update_server_configuration
urllib3.disable_warnings()
# ----- Constants ----- #
DEFAULT_TIMEOUT = 60
DEFAULT_INTERVAL = 20
ENTRY_TYPE_ERROR = 4
# ----- Functions ----- #
# get integration configuration
def __get_integration_config(client, integration_name, logging_module=logging):
body = {
'page': 0, 'size': 100, 'query': 'name:' + integration_name
}
try:
res_raw = demisto_client.generic_request_func(self=client, path='/settings/integration/search',
method='POST', body=body)
except ApiException:
logging_module.exception(f'failed to get integration {integration_name} configuration')
return None
res = ast.literal_eval(res_raw[0])
TIMEOUT = 180
SLEEP_INTERVAL = 5
total_sleep = 0
while 'configurations' not in res:
if total_sleep == TIMEOUT:
logging_module.error(f"Timeout - failed to get integration {integration_name} configuration. Error: {res}")
return None
time.sleep(SLEEP_INTERVAL)
total_sleep += SLEEP_INTERVAL
all_configurations = res['configurations']
match_configurations = [x for x in all_configurations if x['name'] == integration_name]
if not match_configurations or len(match_configurations) == 0:
logging_module.error('integration was not found')
return None
return match_configurations[0]
# __test_integration_instance
def __test_integration_instance(client, module_instance, logging_module=logging):
connection_retries = 3
response_code = 0
integration_of_instance = module_instance.get('brand', '')
instance_name = module_instance.get('name', '')
logging_module.info(
f'Running "test-module" for instance "{instance_name}" of integration "{integration_of_instance}".')
for i in range(connection_retries):
try:
response_data, response_code, _ = demisto_client.generic_request_func(self=client, method='POST',
path='/settings/integration/test',
body=module_instance,
_request_timeout=120)
break
except ApiException:
logging_module.exception(
'Failed to test integration instance, error trying to communicate with demisto server')
return False, None
except urllib3.exceptions.ReadTimeoutError:
logging_module.warning(f"Could not connect. Trying to connect for the {i + 1} time")
if int(response_code) != 200:
logging_module.error(f'Integration-instance test ("Test" button) failed. Bad status code: {response_code}')
return False, None
result_object = ast.literal_eval(response_data)
success, failure_message = bool(result_object.get('success')), result_object.get('message')
if not success:
server_url = client.api_client.configuration.host
test_failed_msg = f'Test integration failed - server: {server_url}.'
test_failed_msg += f'\nFailure message: {failure_message}' if failure_message else ' No failure message.'
logging_module.error(test_failed_msg)
return success, failure_message
def __set_server_keys(client, logging_manager, integration_params, integration_name):
"""Adds server configuration keys using the demisto_client.
Args:
client (demisto_client): The configured client to use.
logging_manager (ParallelLoggingManager): logging manager object.
integration_params (dict): The values to use for an integration's parameters to configure an instance.
integration_name (str): The name of the integration which the server configurations keys are related to.
"""
if 'server_keys' not in integration_params:
return
logging_manager.debug(f'Setting server keys for integration: {integration_name}')
data: dict = {
'data': {},
'version': -1
}
for key, value in integration_params.get('server_keys').items():
data['data'][key] = value
update_server_configuration(
client=client,
server_configuration=integration_params.get('server_keys'),
error_msg='Failed to set server keys',
logging_manager=logging_manager
)
def __delete_integration_instance_if_determined_by_name(client, instance_name, logging_manager):
"""Deletes integration instance by it's name.
Args:
client (demisto_client): The configured client to use.
instance_name (str): The name of the instance to delete.
logging_manager (ParallelLoggingManager): logging manager object.
Notes:
This function is needed when the name of the instance is pre-defined in the tests configuration, and the test
itself depends on the instance to be called as the `instance name`.
In case we need to configure another instance with the same name, the client will throw an error, so we
will call this function first, to delete the instance with this name.
"""
try:
int_resp = demisto_client.generic_request_func(self=client, method='POST',
path='/settings/integration/search',
body={'size': 1000})
int_instances = ast.literal_eval(int_resp[0])
except ApiException:
logging_manager.exception(
'Failed to delete integrations instance, error trying to communicate with demisto server')
return
if int(int_resp[1]) != 200:
logging_manager.error(f'Get integration instance failed with status code: {int_resp[1]}')
return
if 'instances' not in int_instances:
logging_manager.info('No integrations instances found to delete')
return
for instance in int_instances['instances']:
if instance.get('name') == instance_name:
logging_manager.info(f'Deleting integration instance {instance_name} since it is defined by name')
__delete_integration_instance(client, instance.get('id'), logging_manager)
# return instance name if succeed, None otherwise
def __create_integration_instance(server, username, password, integration_name, integration_instance_name,
integration_params, is_byoi, logging_manager=logging, validate_test=True):
# get configuration config (used for later rest api
integration_conf_client = demisto_client.configure(base_url=server, username=username, password=password,
verify_ssl=False)
configuration = __get_integration_config(integration_conf_client, integration_name, logging_manager)
if not configuration:
return None, 'No configuration'
module_configuration = configuration['configuration']
if not module_configuration:
module_configuration = []
if 'integrationInstanceName' in integration_params:
instance_name = integration_params['integrationInstanceName']
__delete_integration_instance_if_determined_by_name(integration_conf_client, instance_name, logging_manager)
else:
instance_name = f'{integration_instance_name.replace(" ", "_")}_test_{uuid.uuid4()}'
logging_manager.info(
f'Configuring instance for {integration_name} (instance name: {instance_name}, validate "Test": {validate_test})'
)
# define module instance
module_instance = {
'brand': configuration['name'],
'category': configuration['category'],
'configuration': configuration,
'data': [],
'enabled': "true",
'engine': '',
'id': '',
'isIntegrationScript': is_byoi,
'name': instance_name,
'passwordProtected': False,
'version': 0
}
# set server keys
__set_server_keys(integration_conf_client, logging_manager, integration_params, configuration['name'])
# set module params
for param_conf in module_configuration:
if param_conf['display'] in integration_params or param_conf['name'] in integration_params:
# param defined in conf
key = param_conf['display'] if param_conf['display'] in integration_params else param_conf['name']
if key == 'credentials':
credentials = integration_params[key]
param_value = {
'credential': '',
'identifier': credentials['identifier'],
'password': credentials['password'],
'passwordChanged': False
}
else:
param_value = integration_params[key]
param_conf['value'] = param_value
param_conf['hasvalue'] = True
elif param_conf['defaultValue']:
# param is required - take default value
param_conf['value'] = param_conf['defaultValue']
module_instance['data'].append(param_conf)
try:
res = demisto_client.generic_request_func(self=integration_conf_client, method='PUT',
path='/settings/integration',
body=module_instance)
except ApiException:
error_message = f'Error trying to create instance for integration: {integration_name}'
logging_manager.exception(error_message)
return None, error_message
if res[1] != 200:
error_message = f'create instance failed with status code {res[1]}'
logging_manager.error(error_message)
logging_manager.error(pformat(res[0]))
return None, error_message
integration_config = ast.literal_eval(res[0])
module_instance['id'] = integration_config['id']
# test integration
refreshed_client = demisto_client.configure(base_url=server, username=username, password=password, verify_ssl=False)
if validate_test:
test_succeed, failure_message = __test_integration_instance(refreshed_client, module_instance, logging_manager)
else:
logging_manager.debug(
f"Skipping test validation for integration: {integration_name} (it has test_validate set to false)"
)
test_succeed = True
if not test_succeed:
__disable_integrations_instances(refreshed_client, [module_instance], logging_manager)
return None, failure_message
return module_instance, ''
def __disable_integrations_instances(client, module_instances, logging_module=logging):
for configured_instance in module_instances:
# tested with POSTMAN, this is the minimum required fields for the request.
module_instance = {
key: configured_instance[key] for key in ['id', 'brand', 'name', 'data', 'isIntegrationScript', ]
}
module_instance['enable'] = "false"
module_instance['version'] = -1
logging.debug(f'Disabling integration {module_instance.get("name")}')
try:
res = demisto_client.generic_request_func(self=client, method='PUT',
path='/settings/integration',
body=module_instance)
except ApiException:
logging_module.exception('Failed to disable integration instance')
return
if res[1] != 200:
logging_module.error(f'disable instance failed, Error: {pformat(res)}')
# create incident with given name & playbook, and then fetch & return the incident
def __create_incident_with_playbook(client: DefaultApi,
name,
playbook_id,
integrations,
logging_manager,
) -> Tuple[Optional[Incident], int]:
# create incident
create_incident_request = demisto_client.demisto_api.CreateIncidentRequest()
create_incident_request.create_investigation = True
create_incident_request.playbook_id = playbook_id
create_incident_request.name = name
try:
response = client.create_incident(create_incident_request=create_incident_request)
except ApiException:
logging_manager.exception(f'Failed to create incident with name {name} for playbook {playbook_id}')
try:
inc_id = response.id
except AttributeError:
integration_names = [integration['name'] for integration in integrations if
'name' in integration]
error_message = f'Failed to create incident for integration names: {integration_names} ' \
f'and playbookID: {playbook_id}.' \
'Possible reasons are:\nMismatch between playbookID in conf.json and ' \
'the id of the real playbook you were trying to use,' \
'or schema problems in the TestPlaybook.'
logging_manager.error(error_message)
return None, -1
# get incident
search_filter = demisto_client.demisto_api.SearchIncidentsData()
inc_filter = demisto_client.demisto_api.IncidentFilter()
inc_filter.query = 'id:' + str(inc_id)
# inc_filter.query
search_filter.filter = inc_filter
incident_search_responses = []
found_incidents = 0
# poll the incidents queue for a max time of 300 seconds
timeout = time.time() + 300
while found_incidents < 1:
try:
incidents = client.search_incidents(filter=search_filter)
found_incidents = incidents.total
incident_search_responses.append(incidents)
except ApiException:
logging_manager.exception(f'Searching incident with id {inc_id} failed')
if time.time() > timeout:
logging_manager.error(f'Got timeout for searching incident with id {inc_id}')
logging_manager.error(f'Incident search responses: {incident_search_responses}')
return None, -1
time.sleep(10)
return incidents.data[0], inc_id
# returns current investigation playbook state - 'inprogress'/'failed'/'completed'
def __get_investigation_playbook_state(client, inv_id, logging_manager):
try:
investigation_playbook_raw = demisto_client.generic_request_func(self=client, method='GET',
path='/inv-playbook/' + inv_id)
investigation_playbook = ast.literal_eval(investigation_playbook_raw[0])
except ApiException:
logging_manager.exception(
'Failed to get investigation playbook state, error trying to communicate with demisto server'
)
return PB_Status.FAILED
return investigation_playbook.get('state', PB_Status.NOT_SUPPORTED_VERSION)
# return True if delete-incident succeeded, False otherwise
def __delete_incident(client: DefaultApi, incident: Incident, logging_manager):
try:
body = {
'ids': [incident.id],
'filter': {},
'all': False
}
res = demisto_client.generic_request_func(self=client, method='POST',
path='/incident/batchDelete', body=body)
except ApiException:
logging_manager.exception('Failed to delete incident, error trying to communicate with demisto server')
return False
if int(res[1]) != 200:
logging_manager.error(f'delete incident failed with Status code {res[1]}')
logging_manager.error(pformat(res))
return False
return True
# return True if delete-integration-instance succeeded, False otherwise
def __delete_integration_instance(client, instance_id, logging_manager=logging):
try:
res = demisto_client.generic_request_func(self=client, method='DELETE',
path='/settings/integration/' + urllib.parse.quote(
instance_id))
except ApiException:
logging_manager.exception('Failed to delete integration instance, error trying to communicate with demisto.')
return False
if int(res[1]) != 200:
logging_manager.error(f'delete integration instance failed\nStatus code {res[1]}')
logging_manager.error(pformat(res))
return False
return True
# delete all integration instances, return True if all succeed delete all
def __delete_integrations_instances(client, module_instances, logging_manager=logging):
succeed = True
for module_instance in module_instances:
succeed = __delete_integration_instance(client, module_instance['id'], logging_manager) and succeed
return succeed
def __print_investigation_error(client, playbook_id, investigation_id, logging_manager):
try:
empty_json = {"pageSize": 1000}
res = demisto_client.generic_request_func(self=client, method='POST',
path='/investigation/' + urllib.parse.quote(
investigation_id), body=empty_json)
if res and int(res[1]) == 200:
resp_json = ast.literal_eval(res[0])
entries = resp_json['entries']
logging_manager.error(f'Playbook {playbook_id} has failed:')
for entry in entries:
if entry['type'] == ENTRY_TYPE_ERROR and entry['parentContent']:
logging_manager.error(f'- Task ID: {entry["taskId"]}')
# Checks for passwords and replaces them with "******"
parent_content = re.sub(
r' (P|p)assword="[^";]*"', ' password=******', entry['parentContent'])
logging_manager.error(f' Command: {parent_content}')
logging_manager.error(f' Body:\n{entry["contents"]}')
else:
logging_manager.error(f'Failed getting entries for investigation: {investigation_id}. Res: {res}')
except ApiException:
logging_manager.exception(
'Failed to print investigation error, error trying to communicate with demisto server')
# Configure integrations to work with mock
def configure_proxy_unsecure(integration_params):
"""Copies the integration parameters dictionary.
Set proxy and insecure integration parameters to true.
Args:
integration_params: dict of the integration parameters.
"""
integration_params_copy = copy.deepcopy(integration_params)
for param in ('proxy', 'useProxy', 'insecure', 'unsecure'):
integration_params[param] = True
return integration_params_copy
# 1. create integrations instances
# 2. create incident with playbook
# 3. wait for playbook to finish run
# 4. if test pass - delete incident & instance
# return playbook status
def check_integration(client, server_url, demisto_user, demisto_pass, integrations, playbook_id,
logging_module=logging, options=None, is_mock_run=False):
options = options if options is not None else {}
# create integrations instances
module_instances: list = []
for integration in integrations:
integration_name = integration.get('name', None)
integration_instance_name = integration.get('instance_name', '')
integration_params = integration.get('params', None)
is_byoi = integration.get('byoi', True)
validate_test = integration.get('validate_test', False)
if is_mock_run:
configure_proxy_unsecure(integration_params)
module_instance, failure_message = __create_integration_instance(server_url,
demisto_user,
demisto_pass,
integration_name,
integration_instance_name,
integration_params,
is_byoi, logging_module,
validate_test=validate_test)
if module_instance is None:
failure_message = failure_message if failure_message else 'No failure message could be found'
logging_module.error(f'Failed to create instance: {failure_message}')
__delete_integrations_instances(client, module_instances, logging_module)
return False, -1
module_instances.append(module_instance)
logging_module.info(f'Create integration {integration_name} succeed')
# create incident with playbook
incident, inc_id = __create_incident_with_playbook(client,
f'inc_{playbook_id}',
playbook_id,
integrations,
logging_module)
if not incident:
return False, -1
investigation_id = incident.investigation_id
if investigation_id is None or len(investigation_id) == 0:
logging.error(f'Failed to get investigation id of incident: {incident}')
return False, -1
logging_module.info(f'Investigation URL: {server_url}/#/WorkPlan/{investigation_id}')
timeout_amount = options['timeout'] if 'timeout' in options else DEFAULT_TIMEOUT
timeout = time.time() + timeout_amount
i = 1
# wait for playbook to finish run
while True:
# give playbook time to run
time.sleep(1)
try:
# fetch status
playbook_state = __get_investigation_playbook_state(client, investigation_id, logging_module)
except demisto_client.demisto_api.rest.ApiException:
playbook_state = 'Pending'
client = demisto_client.configure(base_url=client.api_client.configuration.host,
api_key=client.api_client.configuration.api_key, verify_ssl=False)
if playbook_state in (PB_Status.COMPLETED, PB_Status.NOT_SUPPORTED_VERSION):
break
if playbook_state == PB_Status.FAILED:
logging_module.error(f'{playbook_id} failed with error/s')
__print_investigation_error(client, playbook_id, investigation_id, logging_module)
break
if time.time() > timeout:
logging_module.error(f'{playbook_id} failed on timeout')
break
if i % DEFAULT_INTERVAL == 0:
logging_module.info(f'loop no. {i / DEFAULT_INTERVAL}, playbook state is {playbook_state}')
i = i + 1
__disable_integrations_instances(client, module_instances, logging_module)
test_pass = playbook_state in (PB_Status.COMPLETED, PB_Status.NOT_SUPPORTED_VERSION)
if test_pass:
# delete incident
__delete_incident(client, incident, logging_module)
# delete integration instance
__delete_integrations_instances(client, module_instances, logging_module)
return playbook_state, inc_id
def disable_all_integrations(dem_client, logging_manager=logging):
"""
Disable all enabled integrations. Should be called at start of test loop to start out clean
Arguments:
client -- demisto py client
"""
try:
body = {'size': 1000}
int_resp = demisto_client.generic_request_func(self=dem_client, method='POST',
path='/settings/integration/search',
body=body)
int_instances = ast.literal_eval(int_resp[0])
except requests.exceptions.RequestException:
logging_manager.exception('Failed to disable all integrations, error trying to communicate with demisto server')
return
if int(int_resp[1]) != 200:
logging_manager.error(f'Get all integration instances failed with status code: {int_resp[1]}')
return
if 'instances' not in int_instances:
logging_manager.info("No integrations instances found to disable all")
return
to_disable = []
for instance in int_instances['instances']:
if instance.get('enabled') == 'true' and instance.get("isIntegrationScript"):
logging_manager.debug(
f'Adding to disable list. Name: {instance.get("name")}. Brand: {instance.get("brand")}')
to_disable.append(instance)
if len(to_disable) > 0:
__disable_integrations_instances(dem_client, to_disable, logging_manager)
| |
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django_dynamic_fixture import G, N
from entity.models import Entity, EntityRelationship, EntityKind
from mock import patch
from entity_subscription.models import Medium, Source, Subscription, Unsubscribe
class SubscriptionManagerMediumsSubscribedTest(TestCase):
# We just test that this dispatches correctly. We test the
# dispatched functions more carefully.
@patch('entity_subscription.models.SubscriptionManager._mediums_subscribed_individual')
def test_individual(self, subscribed_mock):
source = N(Source)
entity = N(Entity)
Subscription.objects.mediums_subscribed(source, entity)
self.assertEqual(len(subscribed_mock.mock_calls), 1)
@patch('entity_subscription.models.SubscriptionManager._mediums_subscribed_group')
def test_group(self, subscribed_mock):
source = N(Source)
entity = N(Entity)
ct = N(ContentType)
Subscription.objects.mediums_subscribed(source, entity, ct)
self.assertEqual(len(subscribed_mock.mock_calls), 1)
class SubscriptionManagerIsSubscribedTest(TestCase):
# We just test that this dispatches correctly. We test the
# dispatched functions more carefully.
@patch('entity_subscription.models.SubscriptionManager._is_subscribed_individual')
def test_individual(self, subscribed_mock):
source = N(Source)
medium = N(Medium)
entity = N(Entity)
Subscription.objects.is_subscribed(source, medium, entity)
self.assertEqual(len(subscribed_mock.mock_calls), 1)
@patch('entity_subscription.models.SubscriptionManager._is_subscribed_group')
def test_group(self, subscribed_mock):
source = N(Source)
medium = N(Medium)
entity = N(Entity)
ct = N(ContentType)
Subscription.objects.is_subscribed(source, medium, entity, ct)
self.assertEqual(len(subscribed_mock.mock_calls), 1)
class SubscriptionManagerMediumsSubscribedIndividualTest(TestCase):
def setUp(self):
self.medium_1 = G(Medium)
self.medium_2 = G(Medium)
self.source_1 = G(Source)
self.source_2 = G(Source)
def test_individual_subscription(self):
entity_1 = G(Entity)
G(Subscription, entity=entity_1, medium=self.medium_1, source=self.source_1, subentity_kind=None)
mediums = Subscription.objects._mediums_subscribed_individual(source=self.source_1, entity=entity_1)
expected_medium = self.medium_1
self.assertEqual(mediums.first(), expected_medium)
def test_group_subscription(self):
ek = G(EntityKind)
super_e = G(Entity)
sub_e = G(Entity, entity_kind=ek)
G(EntityRelationship, super_entity=super_e, sub_entity=sub_e)
G(Subscription, entity=super_e, medium=self.medium_1, source=self.source_1, subentity_kind=ek)
mediums = Subscription.objects._mediums_subscribed_individual(source=self.source_1, entity=sub_e)
expected_medium = self.medium_1
self.assertEqual(mediums.first(), expected_medium)
def test_multiple_mediums(self):
entity_1 = G(Entity)
G(Subscription, entity=entity_1, medium=self.medium_1, source=self.source_1, subentity_kind=None)
G(Subscription, entity=entity_1, medium=self.medium_2, source=self.source_1, subentity_kind=None)
mediums = Subscription.objects._mediums_subscribed_individual(source=self.source_1, entity=entity_1)
self.assertEqual(mediums.count(), 2)
def test_unsubscribed(self):
entity_1 = G(Entity)
G(Subscription, entity=entity_1, medium=self.medium_1, source=self.source_1, subentity_kind=None)
G(Subscription, entity=entity_1, medium=self.medium_2, source=self.source_1, subentity_kind=None)
G(Unsubscribe, entity=entity_1, medium=self.medium_1, source=self.source_1)
mediums = Subscription.objects._mediums_subscribed_individual(source=self.source_1, entity=entity_1)
self.assertEqual(mediums.count(), 1)
self.assertEqual(mediums.first(), self.medium_2)
def test_filters_by_source(self):
entity_1 = G(Entity)
G(Subscription, entity=entity_1, medium=self.medium_1, source=self.source_1, subentity_kind=None)
G(Subscription, entity=entity_1, medium=self.medium_2, source=self.source_2, subentity_kind=None)
mediums = Subscription.objects._mediums_subscribed_individual(source=self.source_1, entity=entity_1)
self.assertEqual(mediums.count(), 1)
class SubscriptionManagerMediumsSubscribedGroup(TestCase):
def setUp(self):
self.ek = G(EntityKind)
self.source_1 = G(Source)
self.source_2 = G(Source)
self.medium_1 = G(Medium)
self.medium_2 = G(Medium)
def test_one_subscription_matches_across_supers(self):
super_1 = G(Entity)
super_2 = G(Entity)
sub = G(Entity, entity_kind=self.ek)
G(EntityRelationship, super_entity=super_1, sub_entity=sub)
G(EntityRelationship, super_entity=super_2, sub_entity=sub)
G(Subscription, entity=super_1, medium=self.medium_1, source=self.source_1, subentity_kind=self.ek)
mediums = Subscription.objects._mediums_subscribed_group(self.source_1, super_2, self.ek)
self.assertEqual(mediums.count(), 1)
self.assertEqual(mediums.first(), self.medium_1)
def test_multiple_subscriptions_match_acorss_supers(self):
super_1 = G(Entity)
super_2 = G(Entity)
super_3 = G(Entity)
sub = G(Entity, entity_kind=self.ek)
G(EntityRelationship, super_entity=super_1, sub_entity=sub)
G(EntityRelationship, super_entity=super_2, sub_entity=sub)
G(EntityRelationship, super_entity=super_3, sub_entity=sub)
G(Subscription, entity=super_1, medium=self.medium_1, source=self.source_1, subentity_kind=self.ek)
G(Subscription, entity=super_2, medium=self.medium_2, source=self.source_1, subentity_kind=self.ek)
mediums = Subscription.objects._mediums_subscribed_group(self.source_1, super_3, self.ek)
self.assertEqual(mediums.count(), 2)
def test_filters_by_source(self):
super_1 = G(Entity)
super_2 = G(Entity)
sub = G(Entity, entity_kind=self.ek)
G(EntityRelationship, super_entity=super_1, sub_entity=sub)
G(EntityRelationship, super_entity=super_2, sub_entity=sub)
G(Subscription, entity=super_1, medium=self.medium_1, source=self.source_1, subentity_kind=self.ek)
G(Subscription, entity=super_1, medium=self.medium_2, source=self.source_2, subentity_kind=self.ek)
mediums = Subscription.objects._mediums_subscribed_group(self.source_1, super_2, self.ek)
self.assertEqual(mediums.count(), 1)
self.assertEqual(mediums.first(), self.medium_1)
def test_filters_by_super_entity_intersections(self):
super_1 = G(Entity)
super_2 = G(Entity)
super_3 = G(Entity)
sub = G(Entity, entity_kind=self.ek)
G(EntityRelationship, super_entity=super_1, sub_entity=sub)
G(EntityRelationship, super_entity=super_3, sub_entity=sub)
G(Subscription, entity=super_1, medium=self.medium_1, source=self.source_1, subentity_kind=self.ek)
G(Subscription, entity=super_2, medium=self.medium_2, source=self.source_1, subentity_kind=self.ek)
mediums = Subscription.objects._mediums_subscribed_group(self.source_1, super_3, self.ek)
self.assertEqual(mediums.count(), 1)
self.assertEqual(mediums.first(), self.medium_1)
class SubscriptionManagerIsSubScribedIndividualTest(TestCase):
def setUp(self):
self.ek = G(EntityKind)
self.medium_1 = G(Medium)
self.medium_2 = G(Medium)
self.source_1 = G(Source)
self.source_2 = G(Source)
self.entity_1 = G(Entity, entity_kind=self.ek)
self.entity_2 = G(Entity)
G(EntityRelationship, sub_entity=self.entity_1, super_entity=self.entity_2)
def test_is_subscribed_direct_subscription(self):
G(Subscription, entity=self.entity_1, medium=self.medium_1, source=self.source_1, subentity_kind=None)
is_subscribed = Subscription.objects._is_subscribed_individual(
self.source_1, self.medium_1, self.entity_1
)
self.assertTrue(is_subscribed)
def test_is_subscribed_group_subscription(self):
G(Subscription, entity=self.entity_2, medium=self.medium_1, source=self.source_1, subentity_kind=self.ek)
is_subscribed = Subscription.objects._is_subscribed_individual(
self.source_1, self.medium_1, self.entity_1
)
self.assertTrue(is_subscribed)
def test_filters_source(self):
G(Subscription, entity=self.entity_1, medium=self.medium_1, source=self.source_2, subentity_kind=None)
is_subscribed = Subscription.objects._is_subscribed_individual(
self.source_1, self.medium_1, self.entity_1
)
self.assertFalse(is_subscribed)
def test_filters_medium(self):
G(Subscription, entity=self.entity_1, medium=self.medium_2, source=self.source_1, subentity_kind=None)
is_subscribed = Subscription.objects._is_subscribed_individual(
self.source_1, self.medium_1, self.entity_1
)
self.assertFalse(is_subscribed)
def test_super_entity_means_not_subscribed(self):
G(Subscription, entity=self.entity_1, medium=self.medium_1, source=self.source_1, subentity_kind=self.ek)
is_subscribed = Subscription.objects._is_subscribed_individual(
self.source_1, self.medium_1, self.entity_1
)
self.assertFalse(is_subscribed)
def test_not_subscribed(self):
is_subscribed = Subscription.objects._is_subscribed_individual(
self.source_1, self.medium_1, self.entity_1
)
self.assertFalse(is_subscribed)
def test_unsubscribed(self):
G(Subscription, entity=self.entity_1, medium=self.medium_1, source=self.source_1, subentity_kind=None)
G(Unsubscribe, entity=self.entity_1, medium=self.medium_1, source=self.source_1)
is_subscribed = Subscription.objects._is_subscribed_individual(
self.source_1, self.medium_1, self.entity_1
)
self.assertFalse(is_subscribed)
class SubscriptionManagerIsSubscribedGroupTest(TestCase):
def setUp(self):
self.ek_1 = G(EntityKind)
self.ek_2 = G(EntityKind)
self.medium_1 = G(Medium)
self.medium_2 = G(Medium)
self.source_1 = G(Source)
self.source_2 = G(Source)
self.entity_1 = G(Entity, entity_kind=self.ek_1) # sub
self.entity_2 = G(Entity) # super
self.entity_3 = G(Entity) # super
self.entity_4 = G(Entity, entity_kind=self.ek_2) # sub
G(EntityRelationship, sub_entity=self.entity_1, super_entity=self.entity_2)
G(EntityRelationship, sub_entity=self.entity_1, super_entity=self.entity_3)
G(EntityRelationship, sub_entity=self.entity_4, super_entity=self.entity_2)
G(EntityRelationship, sub_entity=self.entity_4, super_entity=self.entity_3)
def test_one_subscription_matches_across_supers(self):
G(Subscription, entity=self.entity_2, medium=self.medium_1, source=self.source_1, subentity_kind=self.ek_1)
is_subscribed = Subscription.objects._is_subscribed_group(
source=self.source_1, medium=self.medium_1, entity=self.entity_3, subentity_kind=self.ek_1
)
self.assertTrue(is_subscribed)
def test_filters_source(self):
G(Subscription, entity=self.entity_2, medium=self.medium_1, source=self.source_1, subentity_kind=self.ek_1)
is_subscribed = Subscription.objects._is_subscribed_group(
source=self.source_2, medium=self.medium_1, entity=self.entity_3, subentity_kind=self.ek_1
)
self.assertFalse(is_subscribed)
def test_filters_medium(self):
G(Subscription, entity=self.entity_2, medium=self.medium_1, source=self.source_1, subentity_kind=self.ek_1)
is_subscribed = Subscription.objects._is_subscribed_group(
source=self.source_1, medium=self.medium_2, entity=self.entity_3, subentity_kind=self.ek_1
)
self.assertFalse(is_subscribed)
def test_filters_subentity_kind(self):
G(Subscription, entity=self.entity_2, medium=self.medium_1, source=self.source_1, subentity_kind=self.ek_1)
is_subscribed = Subscription.objects._is_subscribed_group(
source=self.source_1, medium=self.medium_1, entity=self.entity_3, subentity_kind=self.ek_2
)
self.assertFalse(is_subscribed)
class SubscriptionFilterNotSubscribedTest(TestCase):
def setUp(self):
self.super_ek = G(EntityKind)
self.sub_ek = G(EntityKind)
self.super_e1 = G(Entity, entity_kind=self.super_ek)
self.super_e2 = G(Entity, entity_kind=self.super_ek)
self.sub_e1 = G(Entity, entity_kind=self.sub_ek)
self.sub_e2 = G(Entity, entity_kind=self.sub_ek)
self.sub_e3 = G(Entity, entity_kind=self.sub_ek)
self.sub_e4 = G(Entity, entity_kind=self.sub_ek)
self.ind_e1 = G(Entity, entity_kind=self.sub_ek)
self.ind_e2 = G(Entity, entity_kind=self.sub_ek)
self.medium = G(Medium)
self.source = G(Source)
G(EntityRelationship, sub_entity=self.sub_e1, super_entity=self.super_e1)
G(EntityRelationship, sub_entity=self.sub_e2, super_entity=self.super_e1)
G(EntityRelationship, sub_entity=self.sub_e3, super_entity=self.super_e2)
G(EntityRelationship, sub_entity=self.sub_e4, super_entity=self.super_e2)
def test_group_and_individual_subscription(self):
G(Subscription, entity=self.ind_e1, source=self.source, medium=self.medium, subentity_kind=None)
G(Subscription, entity=self.super_e1, source=self.source, medium=self.medium, subentity_kind=self.sub_ek)
entities = [self.sub_e1, self.sub_e3, self.ind_e1, self.ind_e2]
filtered_entities = Subscription.objects.filter_not_subscribed(self.source, self.medium, entities)
expected_entity_ids = [self.sub_e1.id, self.ind_e1.id]
self.assertEqual(set(filtered_entities.values_list('id', flat=True)), set(expected_entity_ids))
def test_unsubscribe_filtered_out(self):
G(Subscription, entity=self.ind_e1, source=self.source, medium=self.medium, subentity_kind=None)
G(Subscription, entity=self.super_e1, source=self.source, medium=self.medium, subentity_kind=self.sub_ek)
G(Unsubscribe, entity=self.sub_e1, source=self.source, medium=self.medium)
entities = [self.sub_e1, self.sub_e2, self.sub_e3, self.ind_e1, self.ind_e2]
filtered_entities = Subscription.objects.filter_not_subscribed(self.source, self.medium, entities)
expected_entity_ids = [self.sub_e2.id, self.ind_e1.id]
self.assertEqual(set(filtered_entities.values_list('id', flat=True)), set(expected_entity_ids))
def test_entities_not_passed_in_filtered(self):
G(Subscription, entity=self.ind_e1, source=self.source, medium=self.medium, subentity_kind=None)
G(Subscription, entity=self.super_e1, source=self.source, medium=self.medium, subentity_kind=self.sub_ek)
entities = [se for se in self.super_e1.get_sub_entities() if se.entity_kind == self.sub_ek]
filtered_entities = Subscription.objects.filter_not_subscribed(self.source, self.medium, entities)
self.assertEqual(set(filtered_entities), set(entities))
def test_different_entity_kinds_raises_error(self):
entities = [self.sub_e1, self.super_e1]
with self.assertRaises(ValueError):
Subscription.objects.filter_not_subscribed(self.source, self.medium, entities)
class UnsubscribeManagerIsUnsubscribed(TestCase):
def test_is_unsubscribed(self):
entity, source, medium = G(Entity), G(Source), G(Medium)
G(Unsubscribe, entity=entity, source=source, medium=medium)
is_unsubscribed = Unsubscribe.objects.is_unsubscribed(source, medium, entity)
self.assertTrue(is_unsubscribed)
def test_is_not_unsubscribed(self):
entity, source, medium = G(Entity), G(Source), G(Medium)
is_unsubscribed = Unsubscribe.objects.is_unsubscribed(source, medium, entity)
self.assertFalse(is_unsubscribed)
class NumberOfQueriesTests(TestCase):
def test_query_count(self):
ek = G(EntityKind)
e0 = G(Entity, entity_kind=ek) # sub
e1 = G(Entity, entity_kind=ek) # sub
e2 = G(Entity) # super
e3 = G(Entity) # super
e4 = G(Entity) # super
e5 = G(Entity) # super
e6 = G(Entity) # super
m1, m2, m3, m4, m5 = G(Medium), G(Medium), G(Medium), G(Medium), G(Medium)
s1, s2 = G(Source), G(Source)
G(EntityRelationship, sub_entity=e1, super_entity=e2, subentity_kind=ek)
G(EntityRelationship, sub_entity=e1, super_entity=e3, subentity_kind=ek)
G(EntityRelationship, sub_entity=e1, super_entity=e4, subentity_kind=ek)
G(EntityRelationship, sub_entity=e1, super_entity=e5, subentity_kind=ek)
G(EntityRelationship, sub_entity=e1, super_entity=e6, subentity_kind=ek)
G(EntityRelationship, sub_entity=e0, super_entity=e2, subentity_kind=ek)
G(EntityRelationship, sub_entity=e0, super_entity=e3, subentity_kind=ek)
G(EntityRelationship, sub_entity=e0, super_entity=e4, subentity_kind=ek)
G(EntityRelationship, sub_entity=e0, super_entity=e5, subentity_kind=ek)
G(EntityRelationship, sub_entity=e0, super_entity=e6, subentity_kind=ek)
G(Subscription, entity=e2, subentity_kind=ek, source=s1, medium=m1)
G(Subscription, entity=e3, subentity_kind=ek, source=s1, medium=m2)
G(Subscription, entity=e4, subentity_kind=ek, source=s1, medium=m3)
G(Subscription, entity=e5, subentity_kind=ek, source=s1, medium=m4)
G(Subscription, entity=e6, subentity_kind=ek, source=s1, medium=m5)
G(Subscription, entity=e2, subentity_kind=ek, source=s2, medium=m1)
G(Subscription, entity=e3, subentity_kind=ek, source=s2, medium=m2)
G(Subscription, entity=e4, subentity_kind=ek, source=s2, medium=m3)
G(Subscription, entity=e5, subentity_kind=ek, source=s2, medium=m4)
G(Subscription, entity=e6, subentity_kind=ek, source=s2, medium=m5)
with self.assertNumQueries(1):
mediums = Subscription.objects._mediums_subscribed_individual(source=s1, entity=e1)
list(mediums)
with self.assertNumQueries(1):
mediums = Subscription.objects._mediums_subscribed_group(source=s1, entity=e6, subentity_kind=ek)
list(mediums)
with self.assertNumQueries(2):
Subscription.objects._is_subscribed_individual(source=s1, medium=m1, entity=e1)
with self.assertNumQueries(1):
Subscription.objects._is_subscribed_group(source=s1, medium=m1, entity=e6, subentity_kind=ek)
with self.assertNumQueries(1):
entities = [e0, e1]
list(Subscription.objects.filter_not_subscribed(source=s1, medium=m1, entities=entities))
class UnicodeMethodTests(TestCase):
def setUp(self):
self.entity = G(
Entity, entity_meta={'name': 'Entity Test'}, display_name='Entity Test'
)
self.medium = G(
Medium, name='test', display_name='Test', description='A test medium.'
)
self.source = G(
Source, name='test', display_name='Test', description='A test source.'
)
def test_subscription_unicode(self):
sub = G(Subscription, entity=self.entity, medium=self.medium, source=self.source)
expected_unicode = 'Entity Test to Test by Test'
self.assertEqual(sub.__unicode__(), expected_unicode)
def test_unsubscribe_unicode(self):
unsub = G(Unsubscribe, entity=self.entity, medium=self.medium, source=self.source)
expected_unicode = 'Entity Test from Test by Test'
self.assertEqual(unsub.__unicode__(), expected_unicode)
def test_medium_unicode(self):
expected_unicode = 'Test'
self.assertEqual(self.medium.__unicode__(), expected_unicode)
def test_source_unicode(self):
expected_unicode = 'Test'
self.assertEqual(self.source.__unicode__(), expected_unicode)
| |
from __future__ import print_function
import unittest
import numpy as np
import sqaod as sq
import sqaod.common as common
from tests.example_problems import *
from math import log, exp
class TestBipartiteGraphAnnealerBase:
def __init__(self, anpkg, dtype) :
self.anpkg = anpkg
self.dtype = dtype
self.epu = 1.e-6 if dtype == np.float32 else 1.e-12
def new_annealer(self, N0, N1, m) :
an = self.anpkg.bipartite_graph_annealer(dtype=self.dtype)
b0, b1, W = bipartite_graph_random(N0, N1, self.dtype)
an.set_qubo(b0, b1, W)
an.set_preferences(n_trotters = m)
return an
def test_calling_sequence(self) :
N0, N1 = 200, 100
m = 100
an = self.new_annealer(N0, N1, m)
an.set_preferences(algorithm = sq.algorithm.default)
an.seed(0)
an.prepare()
an.randomize_spin()
an.anneal_one_step(1, 1)
an.calculate_E()
an.make_solution()
an.get_E()
an.get_problem_size()
an.get_preferences()
xpair = an.get_x()
h0, h1, J, c = an.get_hamiltonian()
qpair = an.get_q()
def test_problem_size(self) :
N0, N1 = 100, 110
m = 10
an = self.new_annealer(N0, N1, m)
N0out, N1out = an.get_problem_size()
self.assertEqual(N0, N0out)
self.assertEqual(N1, N1out)
def test_set_n_trotters(self) :
an = self.new_annealer(10, 10, 10)
an.set_preferences(n_trotters = 3)
prefs = an.get_preferences()
self.assertEqual(prefs['n_trotters'], 3)
def test_get_hamiltonian(self) :
N0, N1 = 10, 11
m = 10
an = self.anpkg.bipartite_graph_annealer(dtype=self.dtype)
b0, b1, W = bipartite_graph_random(N0, N1, self.dtype)
an.set_qubo(b0, b1, W)
h00, h01, J0, c0 = an.get_hamiltonian()
h10, h11, J1, c1 = sq.py.formulas.bipartite_graph_calculate_hamiltonian(b0, b1, W)
# print(h00, h10)
self.assertTrue(np.allclose(h00, h10, atol=self.epu))
self.assertTrue(np.allclose(h01, h11))
self.assertTrue(np.allclose(J0, J1))
self.assertTrue(np.allclose(c0, c1)) #, atol=self.epu))
def test_set_hamiltonian(self) :
N0, N1 = 10, 11
m = 10
b0, b1, W = bipartite_graph_random(N0, N1, self.dtype)
h00, h01, J0, c0 = sq.py.formulas.bipartite_graph_calculate_hamiltonian(b0, b1, W)
an = self.anpkg.bipartite_graph_annealer(dtype=self.dtype)
an.set_hamiltonian(h00, h01, J0, c0)
an.prepare()
h10, h11, J1, c1 = an.get_hamiltonian()
#print(h00, h10)
self.assertTrue(np.allclose(h00, h10, atol=self.epu))
self.assertTrue(np.allclose(h01, h11))
self.assertTrue(np.allclose(J0, J1))
self.assertTrue(np.allclose(c0, c1)) #, atol=self.epu))
def test_min_energy(self):
N0, N1 = 200, 350
m = 1
an = self.new_annealer(N0, N1, m)
an.prepare()
q0 = np.ndarray((N0), np.int8)
q1 = np.ndarray((N1), np.int8)
q0[:] = -1
q1[:] = -1
an.set_q((q0, q1))
an.calculate_E()
E = an.get_E()
res = np.allclose(E, 0., atol=self.epu)
if not res :
print(E)
self.assertTrue(res)
# print(an.E)
def test_qubo_energy(self):
N0, N1 = 8, 5
m = 1
an = self.anpkg.bipartite_graph_annealer(dtype=self.dtype)
b0, b1, W = bipartite_graph_random(N0, N1, self.dtype)
an.set_qubo(b0, b1, W)
an.set_preferences(n_trotters=m)
an.prepare()
iMax = 1 << N0
jMax = 1 << N1
for i in range(iMax) :
x0 = common.create_bitset_sequence((i,), N0)
q0 = sq.bit_to_spin(x0)
for j in range(jMax) :
x1 = common.create_bitset_sequence((j,), N1)
q1 = sq.bit_to_spin(x1)
Ebf = np.dot(b0, x0.transpose()) + np.dot(b1, x1.transpose()) \
+ np.dot(x1, np.matmul(W, x0.transpose()))
an.set_q((q0, q1))
an.calculate_E()
Ean = an.get_E()
if not np.allclose(Ebf, Ean, atol=self.epu) :
print(i, j, Ebf, Ean)
self.assertTrue(np.allclose(Ebf, Ean, atol=self.epu))
def test_set_q(self):
N0, N1 = 100, 50
m = 150
an = self.new_annealer(N0, N1, m)
an.prepare()
q0in = 2 * np.random.randint(0, 2, N0) - 1
q1in = 2 * np.random.randint(0, 2, N1) - 1
an.set_q((q0in, q1in))
qout = an.get_q()
res = True
for qpair in qout :
res &= np.allclose(q0in, qpair[0]) and np.allclose(q1in, qpair[1])
self.assertTrue(res)
def test_set_qset(self):
N0, N1 = 100, 50
m = 150
an = self.new_annealer(N0, N1, m)
an.prepare()
an.set_preferences(n_trotters = m)
qsetin = []
for loop in range(0, m) :
q0 = 2 * np.random.randint(0, 2, N0) - 1
q1 = 2 * np.random.randint(0, 2, N1) - 1
qsetin.append((q0.astype(np.int8), q1.astype(np.int8)))
an.set_qset(qsetin)
qout = an.get_q()
self.assertTrue(len(qsetin) == len(qout))
res = True
for qinpair, qoutpair in zip(qsetin, qout) :
res &= np.allclose(qinpair[0], qoutpair[0]) and np.allclose(qinpair[1], qoutpair[1])
self.assertTrue(res)
def anneal(self, an) :
an.prepare()
an.randomize_spin()
Ginit, Gfin = 5, 0.01
beta = 1. / 0.0001
nSteps = 100
G = Ginit
tau = exp(log(Gfin / Ginit) / nSteps)
for loop in range(0, nSteps) :
an.anneal_one_step(G, beta)
G *= tau
an.make_solution()
def _test_anneal_minimize(self, algorithm, m) :
N0, N1 = 10, 8
an = self.anpkg.bipartite_graph_annealer(dtype=self.dtype)
b0 = np.ones((N0), dtype=self.dtype)
b1 = np.ones((N1), dtype=self.dtype)
W = np.ones((N1, N0), dtype=self.dtype)
an.set_qubo(b0, b1, W, sq.minimize)
an.set_preferences(n_trotters = m)
an.set_preferences(algorithm = algorithm)
self.anneal(an)
E = an.get_E()
self.assertEqual(E.min(), 0)
def _test_anneal_hamiltonian(self, algorithm, m) :
N0, N1 = 10, 8
an = self.anpkg.bipartite_graph_annealer(dtype=self.dtype)
b0 = np.ones((N0), dtype=self.dtype)
b1 = np.ones((N1), dtype=self.dtype)
W = np.ones((N1, N0), dtype=self.dtype)
h0, h1, J, c = sq.py.formulas.bipartite_graph_calculate_hamiltonian(b0, b1, W)
an.set_hamiltonian(h0, h1, J, c)
an.set_preferences(n_trotters = m)
an.set_preferences(algorithm = algorithm)
self.anneal(an)
E = an.get_E()
self.assertEqual(E.min(), 0)
def _test_anneal_maximize(self, algorithm, m) :
N0, N1 = 10, 8
an = self.anpkg.bipartite_graph_annealer(dtype=self.dtype)
b0 = - np.ones((N0), dtype=self.dtype)
b1 = - np.ones((N1), dtype=self.dtype)
W = - np.ones((N1, N0), dtype=self.dtype)
an.set_qubo(b0, b1, W, sq.maximize)
an.set_preferences(n_trotters = m)
an.set_preferences(algorithm = algorithm)
self.anneal(an)
E = an.get_E()
self.assertEqual(E.max(), 0)
def test_anneal_minimize(self) :
self._test_anneal_minimize(sq.algorithm.naive, 6)
self._test_anneal_minimize(sq.algorithm.coloring, 6)
self._test_anneal_minimize(sq.algorithm.default, 6)
def test_anneal_maximize(self) :
self._test_anneal_maximize(sq.algorithm.naive, 6)
self._test_anneal_maximize(sq.algorithm.coloring, 6)
self._test_anneal_maximize(sq.algorithm.default, 6)
def test_anneal_hamiltonian(self) :
self._test_anneal_hamiltonian(sq.algorithm.naive, 6)
self._test_anneal_hamiltonian(sq.algorithm.coloring, 6)
self._test_anneal_hamiltonian(sq.algorithm.default, 6)
def test_set_algorithm(self) :
ann = self.new_annealer(10, 10, 1)
ann.set_preferences(algorithm = sq.algorithm.naive)
pref = ann.get_preferences()
self.assertEqual(pref['algorithm'], sq.algorithm.naive)
ann.set_preferences(algorithm = sq.algorithm.coloring)
pref = ann.get_preferences()
self.assertEqual(pref['algorithm'], sq.algorithm.coloring)
ann.set_preferences(algorithm = sq.algorithm.sa_naive)
pref = ann.get_preferences()
self.assertEqual(pref['algorithm'], sq.algorithm.sa_naive)
ann.set_preferences(algorithm = sq.algorithm.sa_coloring)
pref = ann.get_preferences()
self.assertEqual(pref['algorithm'], sq.algorithm.sa_coloring)
def test_anneal_sa_naive(self) :
self._test_anneal_minimize(sq.algorithm.sa_naive, 1)
self._test_anneal_maximize(sq.algorithm.sa_naive, 2)
self._test_anneal_hamiltonian(sq.algorithm.sa_naive, 1)
def test_anneal_sa_coloring(self) :
self._test_anneal_minimize(sq.algorithm.sa_coloring, 1)
self._test_anneal_maximize(sq.algorithm.sa_coloring, 1)
self._test_anneal_hamiltonian(sq.algorithm.sa_coloring, 1)
def test_reuse_solver(self) :
# test no errors
ann = self.new_annealer(10, 10, 1)
self.anneal(ann)
self.anneal(ann)
# py
class TestPyBipartiteGraphAnnealer(TestBipartiteGraphAnnealerBase, unittest.TestCase) :
def __init__(self, testFunc) :
TestBipartiteGraphAnnealerBase.__init__(self, sq.py, np.float64)
unittest.TestCase.__init__(self, testFunc)
def test_set_default_algorithm(self):
ann = self.new_annealer(10, 10, 10)
ann.set_preferences(algorithm = sq.algorithm.default)
pref = ann.get_preferences()
self.assertEqual(pref['algorithm'], sq.algorithm.naive)
ann.set_preferences(algorithm = sq.algorithm.naive, n_trotters = 1)
ann.prepare()
pref = ann.get_preferences()
self.assertEqual(pref['algorithm'], sq.algorithm.sa_naive)
# native
class TestNativeBipartiteGraphAnnealerBase(TestBipartiteGraphAnnealerBase) :
def __init__(self, anpkg, dtype) :
TestBipartiteGraphAnnealerBase.__init__(self, anpkg, dtype)
def test_precision(self) :
an = self.new_annealer(10, 10, 1)
self.assertEqual(an.dtype, self.dtype)
def test_set_default_algorithm(self):
ann = self.new_annealer(10, 10, 10)
ann.set_preferences(algorithm = sq.algorithm.default)
pref = ann.get_preferences()
self.assertEqual(pref['algorithm'], sq.algorithm.coloring)
ann.set_preferences(algorithm = sq.algorithm.naive, n_trotters = 1)
ann.prepare()
pref = ann.get_preferences()
self.assertEqual(pref['algorithm'], sq.algorithm.sa_coloring)
# cpu
class TestCPUBipartiteGraphAnnealerBase(TestNativeBipartiteGraphAnnealerBase) :
def __init__(self, dtype) :
TestNativeBipartiteGraphAnnealerBase.__init__(self, sq.cpu, dtype)
def test_device_pref(self) :
an = self.new_annealer(10, 10, 1)
prefs = an.get_preferences()
self.assertEqual(prefs['device'], 'cpu')
class TestCPUBipartiteGraphAnnealerFP32(TestCPUBipartiteGraphAnnealerBase, unittest.TestCase) :
def __init__(self, testFunc) :
TestCPUBipartiteGraphAnnealerBase.__init__(self, np.float32)
unittest.TestCase.__init__(self, testFunc)
class TestCPUBipartiteGraphAnnealerFP64(TestCPUBipartiteGraphAnnealerBase, unittest.TestCase) :
def __init__(self, testFunc) :
TestCPUBipartiteGraphAnnealerBase.__init__(self, np.float64)
unittest.TestCase.__init__(self, testFunc)
if sq.is_cuda_available() :
# cuda
class TestCUDABipartiteGraphAnnealerBase(TestNativeBipartiteGraphAnnealerBase) :
def __init__(self, dtype) :
TestNativeBipartiteGraphAnnealerBase.__init__(self, sq.cuda, dtype)
def test_device_pref(self) :
an = self.new_annealer(10, 10, 1)
prefs = an.get_preferences()
self.assertEqual(prefs['device'], 'cuda')
def test_set_algorithm(self) :
ann = self.new_annealer(10, 10, 1)
ann.set_preferences(algorithm = sq.algorithm.default)
pref = ann.get_preferences()
self.assertEqual(pref['algorithm'], sq.algorithm.coloring)
ann.set_preferences(algorithm = sq.algorithm.naive)
pref = ann.get_preferences()
self.assertEqual(pref['algorithm'], sq.algorithm.coloring)
ann.set_preferences(algorithm = sq.algorithm.coloring)
pref = ann.get_preferences()
self.assertEqual(pref['algorithm'], sq.algorithm.coloring)
ann.set_preferences(algorithm = sq.algorithm.sa_coloring)
pref = ann.get_preferences()
self.assertEqual(pref['algorithm'], sq.algorithm.sa_coloring)
@unittest.skip('sa_naive is not implemented in CUDABipartiteGraphAnnealer')
def test_anneal_sa_naive(self) :
pass
def test_anneal_sa_coloring(self) :
self._test_anneal_minimize(sq.algorithm.sa_coloring, 1)
self._test_anneal_maximize(sq.algorithm.sa_coloring, 1)
self._test_anneal_hamiltonian(sq.algorithm.sa_coloring, 1)
class TestCUDABipartiteGraphAnnealerFP32(TestCUDABipartiteGraphAnnealerBase, unittest.TestCase) :
def __init__(self, testFunc) :
TestCUDABipartiteGraphAnnealerBase.__init__(self, np.float32)
unittest.TestCase.__init__(self, testFunc)
class TestCUDABipartiteGraphAnnealerFP64(TestCUDABipartiteGraphAnnealerBase, unittest.TestCase) :
def __init__(self, testFunc) :
TestCUDABipartiteGraphAnnealerBase.__init__(self, np.float64)
unittest.TestCase.__init__(self, testFunc)
if __name__ == '__main__':
np.random.seed(0)
unittest.main()
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base testing class for strategies that require multiple nodes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import json
import os
import six
import subprocess
import sys
import threading
import numpy as np
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error: # pylint: disable=invalid-name
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import server_lib
from tensorflow.python.util import nest
original_run_std_server = dc._run_std_server # pylint: disable=protected-access
ASSIGNED_PORTS = set()
lock = threading.Lock()
def pick_unused_port():
"""Returns an unused and unassigned local port."""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
global ASSIGNED_PORTS
with lock:
while True:
port = portpicker.pick_unused_port()
if port > 10000 and port not in ASSIGNED_PORTS:
ASSIGNED_PORTS.add(port)
logging.info('Using local port %r', port)
return port
def _create_cluster(num_workers,
num_ps,
has_chief=False,
has_eval=False,
protocol='grpc',
worker_config=None,
ps_config=None,
eval_config=None):
"""Creates and starts local servers and returns the cluster_spec dict."""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [pick_unused_port() for _ in range(num_workers)]
ps_ports = [pick_unused_port() for _ in range(num_ps)]
cluster_dict = {}
if num_workers > 0:
cluster_dict['worker'] = ['localhost:%s' % port for port in worker_ports]
if num_ps > 0:
cluster_dict['ps'] = ['localhost:%s' % port for port in ps_ports]
if has_eval:
cluster_dict['evaluator'] = ['localhost:%s' % pick_unused_port()]
if has_chief:
cluster_dict['chief'] = ['localhost:%s' % pick_unused_port()]
cs = server_lib.ClusterSpec(cluster_dict)
for i in range(num_workers):
server_lib.Server(
cs,
job_name='worker',
protocol=protocol,
task_index=i,
config=worker_config,
start=True)
for i in range(num_ps):
server_lib.Server(
cs,
job_name='ps',
protocol=protocol,
task_index=i,
config=ps_config,
start=True)
if has_chief:
server_lib.Server(
cs,
job_name='chief',
protocol=protocol,
task_index=0,
config=worker_config,
start=True)
if has_eval:
server_lib.Server(
cs,
job_name='evaluator',
protocol=protocol,
task_index=0,
config=eval_config,
start=True)
return cluster_dict
def create_in_process_cluster(num_workers,
num_ps,
has_chief=False,
has_eval=False):
"""Create an in-process cluster that consists of only standard server."""
# Leave some memory for cuda runtime.
gpu_mem_frac = 0.7 / (num_workers + int(has_chief) + int(has_eval))
worker_config = config_pb2.ConfigProto()
worker_config.gpu_options.per_process_gpu_memory_fraction = gpu_mem_frac
# Enable collective ops which has no impact on non-collective ops.
# TODO(yuefengz, tucker): removing this after we move the initialization of
# collective mgr to the session level.
if has_chief:
worker_config.experimental.collective_group_leader = (
'/job:chief/replica:0/task:0')
else:
worker_config.experimental.collective_group_leader = (
'/job:worker/replica:0/task:0')
ps_config = config_pb2.ConfigProto()
ps_config.device_count['GPU'] = 0
eval_config = config_pb2.ConfigProto()
eval_config.experimental.collective_group_leader = ''
# Create in-process servers. Once an in-process tensorflow server is created,
# there is no way to terminate it. So we create one cluster per test process.
# We could've started the server in another process, we could then kill that
# process to terminate the server. The reasons why we don't want multiple
# processes are
# 1) it is more difficult to manage these processes;
# 2) there is something global in CUDA such that if we initialize CUDA in the
# parent process, the child process cannot initialize it again and thus cannot
# use GPUs (https://stackoverflow.com/questions/22950047).
return _create_cluster(
num_workers,
num_ps=num_ps,
has_chief=has_chief,
has_eval=has_eval,
worker_config=worker_config,
ps_config=ps_config,
eval_config=eval_config,
protocol='grpc')
def create_cluster_spec(has_chief=False,
num_workers=1,
num_ps=0,
has_eval=False):
"""Create a cluster spec with tasks with unused local ports."""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
cluster_spec = {}
if has_chief:
cluster_spec['chief'] = ['localhost:%s' % pick_unused_port()]
if num_workers:
cluster_spec['worker'] = [
'localhost:%s' % pick_unused_port() for _ in range(num_workers)
]
if num_ps:
cluster_spec['ps'] = [
'localhost:%s' % pick_unused_port() for _ in range(num_ps)
]
if has_eval:
cluster_spec['evaluator'] = ['localhost:%s' % pick_unused_port()]
return cluster_spec
class MultiWorkerTestBase(test.TestCase):
"""Base class for testing multi node strategy and dataset."""
@classmethod
def setUpClass(cls):
"""Create a local cluster with 2 workers."""
cls._cluster_spec = create_in_process_cluster(num_workers=2, num_ps=1)
cls._default_target = 'grpc://' + cls._cluster_spec['worker'][0]
def setUp(self):
# We only cache the session in one test because another test may have a
# different session config or master target.
self._thread_local = threading.local()
self._thread_local.cached_session = None
self._result = 0
self._lock = threading.Lock()
@contextlib.contextmanager
def session(self, graph=None, config=None, target=None):
"""Create a test session with master target set to the testing cluster.
Creates a test session that connects to the local testing cluster.
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
target: the target of session to connect to.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
config = self._create_config(config)
if target is None:
target = self._default_target
with session.Session(graph=graph, config=config, target=target) as sess:
yield sess
@contextlib.contextmanager
# TODO(b/117573461): Overwrite self.evaluate() to use this function.
def cached_session(self, graph=None, config=None, target=None):
"""Create a test session with master target set to the testing cluster.
Creates a test session that connects to the local testing cluster.
The session is only created once per test and then reused.
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
target: the target of session to connect to.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case. Note that the
session will live until the end of the test.
"""
config = self._create_config(config)
if target is None:
target = self._default_target
if getattr(self._thread_local, 'cached_session', None) is None:
self._thread_local.cached_session = session.Session(
graph=None, config=config, target=target)
sess = self._thread_local.cached_session
with sess.graph.as_default(), sess.as_default():
yield sess
def _create_config(self, config):
if config is None:
config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
config = copy.deepcopy(config)
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
def _run_client(self, client_fn, task_type, task_id, num_gpus, eager_mode,
*args, **kwargs):
if eager_mode:
with context.eager_mode():
result = client_fn(task_type, task_id, num_gpus, *args, **kwargs)
else:
with context.graph_mode():
result = client_fn(task_type, task_id, num_gpus, *args, **kwargs)
if np.all(result):
with self._lock:
self._result += 1
def _run_between_graph_clients(self, client_fn, cluster_spec, num_gpus, *args,
**kwargs):
"""Runs several clients for between-graph replication.
Args:
client_fn: a function that needs to accept `task_type`, `task_id`,
`num_gpus` and returns True if it succeeds.
cluster_spec: a dict specifying jobs in a cluster.
num_gpus: number of GPUs per worker.
*args: will be passed to `client_fn`.
**kwargs: will be passed to `client_fn`.
"""
threads = []
for task_type in ['chief', 'worker']:
for task_id in range(len(cluster_spec.get(task_type, []))):
t = threading.Thread(
target=self._run_client,
args=(client_fn, task_type, task_id, num_gpus,
context.executing_eagerly()) + args,
kwargs=kwargs)
t.start()
threads.append(t)
for t in threads:
t.join()
self.assertEqual(self._result, len(threads))
class MockOsEnv(collections.Mapping):
"""A class that allows per-thread TF_CONFIG."""
def __init__(self, *args):
self._dict = dict()
self._thread_local = threading.local()
super(MockOsEnv, self).__init__(*args)
def get(self, key, default=None):
if not hasattr(self._thread_local, 'dict'):
self._thread_local.dict = dict()
if key == 'TF_CONFIG':
return dict.get(self._thread_local.dict, key, default)
else:
return dict.get(self._dict, key, default)
def __getitem__(self, key):
if not hasattr(self._thread_local, 'dict'):
self._thread_local.dict = dict()
if key == 'TF_CONFIG':
return dict.__getitem__(self._thread_local.dict, key)
else:
return dict.__getitem__(self._dict, key)
def __setitem__(self, key, val):
if not hasattr(self._thread_local, 'dict'):
self._thread_local.dict = dict()
if key == 'TF_CONFIG':
return dict.__setitem__(self._thread_local.dict, key, val)
else:
return dict.__setitem__(self._dict, key, val)
def __iter__(self):
if not hasattr(self._thread_local, 'dict'):
self._thread_local.dict = dict()
for x in self._thread_local.dict:
yield x
for x in self._dict:
yield x
def __len__(self):
if not hasattr(self._thread_local, 'dict'):
self._thread_local.dict = dict()
return self._thread_local.dict.__len__() + self._dict.__len__()
class IndependentWorkerTestBase(test.TestCase):
"""Testing infra for independent workers."""
def _make_mock_run_std_server(self):
def _mock_run_std_server(*args, **kwargs):
ret = original_run_std_server(*args, **kwargs)
# Wait for all std servers to be brought up in order to reduce the chance
# of remote sessions taking local ports that have been assigned to std
# servers. Only call this barrier the first time this function is run for
# each thread.
if not getattr(self._thread_local, 'server_started', False):
self._barrier.wait()
self._thread_local.server_started = True
return ret
return _mock_run_std_server
def setUp(self):
self._mock_os_env = MockOsEnv()
self._mock_context = test.mock.patch.object(os, 'environ',
self._mock_os_env)
self._coord = coordinator.Coordinator()
super(IndependentWorkerTestBase, self).setUp()
self._mock_context.__enter__()
# threading local object to be shared by all threads
self._thread_local = threading.local()
def tearDown(self):
self._mock_context.__exit__(None, None, None)
super(IndependentWorkerTestBase, self).tearDown()
def _task_thread(self, task_fn, tf_config, executing_eagerly, *args,
**kwargs):
with self._coord.stop_on_exception():
os.environ['TF_CONFIG'] = json.dumps(tf_config)
# Force the new thread simulating a worker to run in the same context
# mode as the parent thread does.
if executing_eagerly:
with context.eager_mode():
task_fn(*args, **kwargs)
else:
with ops.Graph().as_default(), context.graph_mode():
task_fn(*args, **kwargs)
def _run_task_in_thread(self, task_fn, cluster_spec, task_type, task_id,
*args, **kwargs):
"""Run tasks in a thread.
If `tf_config` is provided, use it for the new thread; if not, construct one
from `cluster_spec`, `task_type`, and `task_id`, and provide it to the new
thread to be set as `TF_CONFIG` environment.
Arguments:
task_fn: The function to run in the new thread.
cluster_spec: The cluster spec.
task_type: The task type.
task_id: The task id.
*args: Additional positional arguments to provide to the thread's task_fn.
**kwargs: Additional keyword arguments to provide to the thread's task_fn.
If `tf_config` is provided, that dict will be used for the TF_CONFIG for
the new thread.
Returns:
The thread that has started.
"""
tf_config = kwargs.pop('tf_config', None)
if tf_config is None:
if task_type:
tf_config = {
'cluster': cluster_spec,
'task': {
'type': task_type,
'index': task_id
}
}
else:
tf_config = {
'cluster': cluster_spec,
}
t = threading.Thread(
target=self._task_thread,
args=(task_fn, tf_config, context.executing_eagerly()) + args,
kwargs=kwargs)
t.start()
return t
def run_multiple_tasks_in_threads(self, task_fn, cluster_spec, *args,
**kwargs):
# The task_fn should create std_server by itself.
threads = {}
for task_type in cluster_spec.keys():
threads[task_type] = []
for task_id in range(len(cluster_spec[task_type])):
t = self._run_task_in_thread(task_fn, cluster_spec, task_type, task_id,
*args, **kwargs)
threads[task_type].append(t)
return threads
def join_independent_workers(self, worker_threads):
try:
self._coord.join(worker_threads)
except errors.UnknownError as e:
if 'Could not start gRPC server' in e.message:
self.skipTest('Cannot start std servers.')
else:
raise
class MultiWorkerMultiProcessTest(test.TestCase):
"""Testing infra for independent workers using multiple processes."""
def _run_task_in_process(self, cmd_args, cluster_spec, task_type, task_id):
env = os.environ.copy()
env['TF_CONFIG'] = json.dumps({
'cluster': cluster_spec,
'task': {
'type': task_type,
'index': task_id
}
})
return subprocess.Popen(
cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
def run_multiple_tasks_in_processes(self, cmd_args, cluster_spec):
"""Run `cmd_args` in a process for each task in `cluster_spec`."""
processes = {}
for task_type in cluster_spec.keys():
processes[task_type] = []
for task_id in range(len(cluster_spec[task_type])):
p = self._run_task_in_process(cmd_args, cluster_spec, task_type,
task_id)
processes[task_type].append(p)
return processes
def join_independent_workers(self, worker_processes):
return_codes = []
for p in nest.flatten(worker_processes):
try:
# Calling p.wait() will hang if we don't consume its output.
p.communicate()
except ValueError:
# The output of the process may have been consumed, in which case
# calling `p.communicate()` will raise a ValueError.
pass
finally:
return_codes.append(p.returncode)
for return_code in return_codes:
self.assertEqual(return_code, 0)
def stream_stderr(self, processes, print_only_first=False):
"""Consume stderr of all processes and print to stdout.
To reduce the amount of logging, caller can set print_only_first to True.
In that case, this function only prints stderr from the first process of
each type.
Arguments:
processes: A dictionary from process type string -> list of processes.
print_only_first: If true, only print output from first process of each
type.
"""
def _stream_stderr_single_process(process, type_string, index,
print_to_stdout):
"""Consume a single process's stderr and optionally print to stdout."""
while True:
output = process.stderr.readline()
if not output and process.poll() is not None:
break
if output and print_to_stdout:
print('{}{} {}'.format(type_string, index, output.strip()))
sys.stdout.flush()
stream_threads = []
for process_type, process_list in six.iteritems(processes):
for i in range(len(process_list)):
print_to_stdout = (not print_only_first) or (i == 0)
thread = threading.Thread(
target=_stream_stderr_single_process,
args=(process_list[i], process_type, i, print_to_stdout))
thread.start()
stream_threads.append(thread)
for thread in stream_threads:
thread.join()
def get_tf_config_task():
return json.loads(os.environ['TF_CONFIG'])['task']
def get_tf_config_cluster_spec():
return json.loads(os.environ['TF_CONFIG'])['cluster']
def get_task_type():
return get_tf_config_task()['type']
def get_task_index():
return get_tf_config_task()['index']
def is_chief():
return ('chief' not in get_tf_config_cluster_spec()
and get_task_type() == 'worker'
and get_task_index() == 0)
| |
"""
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for matplotlib.
They have been rewritten and extended for convenience.
"""
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
_check_fill_value = np.ma.core._check_fill_value
__all__ = ['append_fields',
'drop_fields',
'find_duplicates',
'get_fieldstructure',
'join_by',
'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields',
'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields as keys and a list of parent fields as values.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
# if (lastparent[-1] != lastname):
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if hasattr(element, '__iter__') and not isinstance(element, basestring):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarray : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in itertools.izip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).iteritems():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays,
fill_value= -1, flatten=False, usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarary as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = map(np.asanyarray, seqarrays)
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in itertools.izip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in itertools.izip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the fields
to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype (`asrecarray=False`)
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
#
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
#
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
#
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append((newname,
_recursive_rename_fields(current, namemapper)))
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data=None, dtypes=None,
fill_value= -1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
err_msg = "The number of arrays does not match the number of names"
raise ValueError(err_msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
elif not hasattr(dtypes, '__iter__'):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, "\
"a single dtype or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
seqarrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" % \
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array.
An exception is raised if the `key` field cannot be found in the two input
arrays.
Neither `r1` nor `r2` should have any duplicates along `key`: the presence
of duplicates will make the output quite unreliable. Note that duplicates
are not looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of r1
not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1 not
in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present in r2
but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present in r1
but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for the
two arrays and concatenating the result. This array is then sorted, and
the common entries selected. The output is constructed by filling the fields
with the selected entries. Matching is not preserved if there are some
duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError("The 'jointype' argument should be in 'inner', "\
"'outer' or 'leftouter' (got '%s' instead)" % jointype)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
(nb1, nb2) = (len(r1), len(r2))
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = names.index(name)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields : r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names:
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names:
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
| |
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from gzip import GzipFile
import os.path as op
import re
import time
import uuid
import numpy as np
from scipy import linalg
from .constants import FIFF
from ..utils import logger
from ..externals.jdcal import jcal2jd
from ..externals.six import string_types, b
def _write(fid, data, kind, data_size, FIFFT_TYPE, dtype):
if isinstance(data, np.ndarray):
data_size *= data.size
# XXX for string types the data size is used as
# computed in ``write_string``.
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_TYPE, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(data, dtype=dtype).tostring())
def _get_split_size(split_size):
"""Convert human-readable bytes to machine-readable bytes."""
if isinstance(split_size, string_types):
exp = dict(MB=20, GB=30).get(split_size[-2:], None)
if exp is None:
raise ValueError('split_size has to end with either'
'"MB" or "GB"')
split_size = int(float(split_size[:-2]) * 2 ** exp)
if split_size > 2147483648:
raise ValueError('split_size cannot be larger than 2GB')
return split_size
def write_int(fid, kind, data):
"""Writes a 32-bit integer tag to a fif file"""
data_size = 4
data = np.array(data, dtype='>i4').T
_write(fid, data, kind, data_size, FIFF.FIFFT_INT, '>i4')
def write_double(fid, kind, data):
"""Writes a double-precision floating point tag to a fif file"""
data_size = 8
data = np.array(data, dtype='>f8').T
_write(fid, data, kind, data_size, FIFF.FIFFT_DOUBLE, '>f8')
def write_float(fid, kind, data):
"""Writes a single-precision floating point tag to a fif file"""
data_size = 4
data = np.array(data, dtype='>f4').T
_write(fid, data, kind, data_size, FIFF.FIFFT_FLOAT, '>f4')
def write_dau_pack16(fid, kind, data):
"""Writes a dau_pack16 tag to a fif file"""
data_size = 2
data = np.array(data, dtype='>i2').T
_write(fid, data, kind, data_size, FIFF.FIFFT_DAU_PACK16, '>i2')
def write_complex64(fid, kind, data):
"""Writes a 64 bit complex floating point tag to a fif file"""
data_size = 8
data = np.array(data, dtype='>c8').T
_write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, '>c8')
def write_complex128(fid, kind, data):
"""Writes a 128 bit complex floating point tag to a fif file"""
data_size = 16
data = np.array(data, dtype='>c16').T
_write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, '>c16')
def write_julian(fid, kind, data):
"""Writes a Julian-formatted date to a FIF file"""
assert len(data) == 3
data_size = 4
jd = np.sum(jcal2jd(*data))
data = np.array(jd, dtype='>i4')
_write(fid, data, kind, data_size, FIFF.FIFFT_JULIAN, '>i4')
def write_string(fid, kind, data):
"""Writes a string tag"""
str_data = data.encode('utf-8') # Use unicode or bytes depending on Py2/3
data_size = len(str_data) # therefore compute size here
my_dtype = '>a' # py2/3 compatible on writing -- don't ask me why
if data_size > 0:
_write(fid, str_data, kind, data_size, FIFF.FIFFT_STRING, my_dtype)
def write_name_list(fid, kind, data):
"""Writes a colon-separated list of names
Parameters
----------
data : list of strings
"""
write_string(fid, kind, ':'.join(data))
def write_float_matrix(fid, kind, mat):
"""Writes a single-precision floating-point matrix tag"""
FIFFT_MATRIX = 1 << 30
FIFFT_MATRIX_FLOAT = FIFF.FIFFT_FLOAT | FIFFT_MATRIX
data_size = 4 * mat.size + 4 * (mat.ndim + 1)
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_MATRIX_FLOAT, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(mat, dtype='>f4').tostring())
dims = np.empty(mat.ndim + 1, dtype=np.int32)
dims[:mat.ndim] = mat.shape[::-1]
dims[-1] = mat.ndim
fid.write(np.array(dims, dtype='>i4').tostring())
check_fiff_length(fid)
def write_double_matrix(fid, kind, mat):
"""Writes a double-precision floating-point matrix tag"""
FIFFT_MATRIX = 1 << 30
FIFFT_MATRIX_DOUBLE = FIFF.FIFFT_DOUBLE | FIFFT_MATRIX
data_size = 8 * mat.size + 4 * (mat.ndim + 1)
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_MATRIX_DOUBLE, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(mat, dtype='>f8').tostring())
dims = np.empty(mat.ndim + 1, dtype=np.int32)
dims[:mat.ndim] = mat.shape[::-1]
dims[-1] = mat.ndim
fid.write(np.array(dims, dtype='>i4').tostring())
check_fiff_length(fid)
def write_int_matrix(fid, kind, mat):
"""Writes integer 32 matrix tag"""
FIFFT_MATRIX = 1 << 30
FIFFT_MATRIX_INT = FIFF.FIFFT_INT | FIFFT_MATRIX
data_size = 4 * mat.size + 4 * 3
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_MATRIX_INT, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(mat, dtype='>i4').tostring())
dims = np.empty(3, dtype=np.int32)
dims[0] = mat.shape[1]
dims[1] = mat.shape[0]
dims[2] = 2
fid.write(np.array(dims, dtype='>i4').tostring())
check_fiff_length(fid)
def get_machid():
"""Get (mostly) unique machine ID
Returns
-------
ids : array (length 2, int32)
The machine identifier used in MNE.
"""
mac = b('%012x' % uuid.getnode()) # byte conversion for Py3
mac = re.findall(b'..', mac) # split string
mac += [b'00', b'00'] # add two more fields
# Convert to integer in reverse-order (for some reason)
from codecs import encode
mac = b''.join([encode(h, 'hex_codec') for h in mac[::-1]])
ids = np.flipud(np.fromstring(mac, np.int32, count=2))
return ids
def get_new_file_id():
"""Helper to create a new file ID tag"""
secs, usecs = divmod(time.time(), 1.)
secs, usecs = int(secs), int(usecs * 1e6)
return {'machid': get_machid(), 'version': FIFF.FIFFC_VERSION,
'secs': secs, 'usecs': usecs}
def write_id(fid, kind, id_=None):
"""Writes fiff id"""
id_ = _generate_meas_id() if id_ is None else id_
data_size = 5 * 4 # The id comprises five integers
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFT_ID_STRUCT, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
# Collect the bits together for one write
arr = np.array([id_['version'],
id_['machid'][0], id_['machid'][1],
id_['secs'], id_['usecs']], dtype='>i4')
fid.write(arr.tostring())
def start_block(fid, kind):
"""Writes a FIFF_BLOCK_START tag"""
write_int(fid, FIFF.FIFF_BLOCK_START, kind)
def end_block(fid, kind):
"""Writes a FIFF_BLOCK_END tag"""
write_int(fid, FIFF.FIFF_BLOCK_END, kind)
def start_file(fname, id_=None):
"""Opens a fif file for writing and writes the compulsory header tags
Parameters
----------
fname : string | fid
The name of the file to open. It is recommended
that the name ends with .fif or .fif.gz. Can also be an
already opened file.
id_ : dict | None
ID to use for the FIFF_FILE_ID.
"""
if isinstance(fname, string_types):
if op.splitext(fname)[1].lower() == '.gz':
logger.debug('Writing using gzip')
# defaults to compression level 9, which is barely smaller but much
# slower. 2 offers a good compromise.
fid = GzipFile(fname, "wb", compresslevel=2)
else:
logger.debug('Writing using normal I/O')
fid = open(fname, "wb")
else:
logger.debug('Writing using %s I/O' % type(fname))
fid = fname
fid.seek(0)
# Write the compulsory items
write_id(fid, FIFF.FIFF_FILE_ID, id_)
write_int(fid, FIFF.FIFF_DIR_POINTER, -1)
write_int(fid, FIFF.FIFF_FREE_LIST, -1)
return fid
def check_fiff_length(fid, close=True):
"""Ensure our file hasn't grown too large to work properly"""
if fid.tell() > 2147483648: # 2 ** 31, FIFF uses signed 32-bit locations
if close:
fid.close()
raise IOError('FIFF file exceeded 2GB limit, please split file or '
'save to a different format')
def end_file(fid):
"""Writes the closing tags to a fif file and closes the file"""
data_size = 0
fid.write(np.array(FIFF.FIFF_NOP, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFT_VOID, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_NONE, dtype='>i4').tostring())
check_fiff_length(fid)
fid.close()
def write_coord_trans(fid, trans):
"""Writes a coordinate transformation structure"""
data_size = 4 * 2 * 12 + 4 * 2
fid.write(np.array(FIFF.FIFF_COORD_TRANS, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFT_COORD_TRANS_STRUCT, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(trans['from'], dtype='>i4').tostring())
fid.write(np.array(trans['to'], dtype='>i4').tostring())
# The transform...
rot = trans['trans'][:3, :3]
move = trans['trans'][:3, 3]
fid.write(np.array(rot, dtype='>f4').tostring())
fid.write(np.array(move, dtype='>f4').tostring())
# ...and its inverse
trans_inv = linalg.inv(trans['trans'])
rot = trans_inv[:3, :3]
move = trans_inv[:3, 3]
fid.write(np.array(rot, dtype='>f4').tostring())
fid.write(np.array(move, dtype='>f4').tostring())
def write_ch_info(fid, ch):
"""Writes a channel information record to a fif file"""
data_size = 4 * 13 + 4 * 7 + 16
fid.write(np.array(FIFF.FIFF_CH_INFO, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFT_CH_INFO_STRUCT, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
# Start writing fiffChInfoRec
fid.write(np.array(ch['scanno'], dtype='>i4').tostring())
fid.write(np.array(ch['logno'], dtype='>i4').tostring())
fid.write(np.array(ch['kind'], dtype='>i4').tostring())
fid.write(np.array(ch['range'], dtype='>f4').tostring())
fid.write(np.array(ch['cal'], dtype='>f4').tostring())
fid.write(np.array(ch['coil_type'], dtype='>i4').tostring())
fid.write(np.array(ch['loc'], dtype='>f4').tostring()) # writing 12 values
# unit and unit multiplier
fid.write(np.array(ch['unit'], dtype='>i4').tostring())
fid.write(np.array(ch['unit_mul'], dtype='>i4').tostring())
# Finally channel name
if len(ch['ch_name']):
ch_name = ch['ch_name'][:15]
else:
ch_name = ch['ch_name']
fid.write(np.array(ch_name, dtype='>c').tostring())
if len(ch_name) < 16:
fid.write(b('\0') * (16 - len(ch_name)))
def write_dig_point(fid, dig):
"""Writes a digitizer data point into a fif file"""
data_size = 5 * 4
fid.write(np.array(FIFF.FIFF_DIG_POINT, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFT_DIG_POINT_STRUCT, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
# Start writing fiffDigPointRec
fid.write(np.array(dig['kind'], dtype='>i4').tostring())
fid.write(np.array(dig['ident'], dtype='>i4').tostring())
fid.write(np.array(dig['r'][:3], dtype='>f4').tostring())
def write_float_sparse_rcs(fid, kind, mat):
"""Writes a single-precision floating-point matrix tag"""
FIFFT_MATRIX = 16416 << 16
FIFFT_MATRIX_FLOAT_RCS = FIFF.FIFFT_FLOAT | FIFFT_MATRIX
nnzm = mat.nnz
nrow = mat.shape[0]
data_size = 4 * nnzm + 4 * nnzm + 4 * (nrow + 1) + 4 * 4
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_MATRIX_FLOAT_RCS, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(mat.data, dtype='>f4').tostring())
fid.write(np.array(mat.indices, dtype='>i4').tostring())
fid.write(np.array(mat.indptr, dtype='>i4').tostring())
dims = [nnzm, mat.shape[0], mat.shape[1], 2]
fid.write(np.array(dims, dtype='>i4').tostring())
check_fiff_length(fid)
def _generate_meas_id():
"""Helper to generate a new meas_id dict"""
id_ = dict()
id_['version'] = FIFF.FIFFC_VERSION
id_['machid'] = get_machid()
id_['secs'], id_['usecs'] = _date_now()
return id_
def _date_now():
"""Helper to get date in secs, usecs"""
now = time.time()
# Get date in secs/usecs (as in `fill_measurement_info` in
# mne/forward/forward.py)
date_arr = np.array([np.floor(now), 1e6 * (now - np.floor(now))],
dtype='int32')
return date_arr
| |
from pytest import raises
from desmod.queue import PriorityItem, PriorityQueue, Queue
def test_mq(env):
queue = Queue(env, capacity=2)
def producer(msg, wait):
yield env.timeout(wait)
yield queue.put(msg)
def consumer(expected_msg, wait):
yield env.timeout(wait)
msg = yield queue.get()
assert msg == expected_msg
env.process(producer('1st', 0))
env.process(producer('2nd', 1))
env.process(consumer('1st', 0))
env.process(consumer('2nd', 1))
env.run()
def test_queue_peek(env):
queue = Queue(env)
assert queue.is_empty
with raises(IndexError):
queue.peek()
queue2 = Queue(env, items=[9, 8, 7])
assert not queue2.is_empty
assert queue2.peek() == 9
def test_queue_overflow(env):
def proc(env, queue):
yield queue.put(1)
yield env.timeout(1)
yield queue.put(1)
yield env.timeout(1)
with raises(OverflowError):
yield queue.put(1)
queue = Queue(env, capacity=2, hard_cap=True)
env.process(proc(env, queue))
env.run()
def test_mq_when_full(env):
queue = Queue(env, capacity=2)
result = []
def producer(env):
yield env.timeout(1)
for i in range(5):
yield queue.put(i)
yield env.timeout(1)
def consumer(env):
yield env.timeout(5)
for i in range(3):
msg = yield queue.get()
assert msg == i
def full_waiter(env):
yield queue.when_full()
result.append('full')
def any_waiter(env):
yield queue.when_any()
assert env.now == 1
result.append('any')
env.process(producer(env))
env.process(consumer(env))
env.process(full_waiter(env))
env.process(any_waiter(env))
env.process(any_waiter(env))
env.run()
assert queue.items
assert queue.is_full
assert 'full' in result
assert result.count('any') == 2
def test_priority_mq(env):
queue = PriorityQueue(env)
def producer(env):
for priority in reversed(range(5)):
item = set([priority]) # unhashable
yield queue.put(PriorityItem(priority, item))
yield env.timeout(1)
def consumer(env):
yield env.timeout(5)
for i in range(5):
msg = yield queue.get()
assert msg.item == set([i])
yield env.timeout(1)
env.process(producer(env))
env.process(consumer(env))
env.run()
def test_queue_repr(env):
queue = Queue(env, name='hi', items=[3, 2, 1])
assert str(queue) == "Queue(name='hi' size=3 capacity=inf)"
pri_queue = PriorityQueue(env, capacity=3)
assert str(pri_queue) == 'PriorityQueue(name=None size=0 capacity=3)'
def test_when_not_full(env):
queue = Queue(env, capacity=2, items=[0, 1])
def consumer(env):
for i in range(2):
yield env.timeout(3)
msg = yield queue.get()
assert msg == i
def not_full_waiter(env):
yield queue.when_not_full()
assert env.now == 3
yield queue.when_not_full()
assert env.now == 3
env.process(consumer(env))
env.process(not_full_waiter(env))
env.run()
def test_when_empty(env):
def proc(env, queue):
yield queue.when_empty()
assert env.now == 0
yield queue.put('a')
yield queue.put('b')
with queue.when_empty() as when_empty_ev:
assert not when_empty_ev.triggered
yield env.timeout(1)
item = yield queue.get()
assert item == 'a'
assert not when_empty_ev.triggered
with queue.when_empty() as when_empty_ev:
assert not when_empty_ev.triggered
yield env.timeout(1)
with queue.get() as get_ev:
item = yield get_ev
assert item == 'b'
assert when_empty_ev.triggered
yield when_empty_ev
env.process(proc(env, Queue(env)))
env.run()
def test_when_at_most(env):
def proc(env, queue):
for item in 'abc':
with queue.put(item) as put_ev:
yield put_ev
at_most = {}
at_most[0] = queue.when_at_most(0)
at_most[3] = queue.when_at_most(3)
at_most[1] = queue.when_at_most(1)
at_most[2] = queue.when_at_most(2)
assert not at_most[0].triggered
assert not at_most[1].triggered
assert not at_most[2].triggered
assert at_most[3].triggered
item = yield queue.get()
assert item == 'a'
assert not at_most[0].triggered
assert not at_most[1].triggered
assert at_most[2].triggered
item = yield queue.get()
assert item == 'b'
assert not at_most[0].triggered
assert at_most[1].triggered
item = yield queue.get()
assert item == 'c'
assert at_most[0].triggered
env.process(proc(env, Queue(env)))
env.run()
def test_when_at_least(env):
def proc(env, queue):
at_least = {}
at_least[3] = queue.when_at_least(3)
at_least[0] = queue.when_at_least(0)
at_least[2] = queue.when_at_least(2)
at_least[1] = queue.when_at_least(1)
assert at_least[0].triggered
assert not at_least[1].triggered
assert not at_least[2].triggered
assert not at_least[3].triggered
yield queue.put('a')
assert at_least[1].triggered
assert not at_least[2].triggered
assert not at_least[3].triggered
yield queue.get()
assert not at_least[2].triggered
assert not at_least[3].triggered
yield queue.put('b')
assert not at_least[2].triggered
assert not at_least[3].triggered
yield queue.put('c')
assert at_least[2].triggered
assert not at_least[3].triggered
yield queue.put('d')
assert at_least[3].triggered
env.process(proc(env, Queue(env)))
env.run()
def test_queue_cancel(env):
queue = Queue(env, capacity=2)
def producer(env):
for i in range(5):
yield env.timeout(5)
yield queue.put(i)
def consumer(env):
for i in range(3):
yield env.timeout(10)
msg = yield queue.get()
assert msg == i
def canceller(env):
any_ev = queue.when_any()
get_ev = queue.get()
full_ev = queue.when_full()
yield env.timeout(1)
assert not get_ev.triggered
assert not any_ev.triggered
assert not full_ev.triggered
get_ev.cancel()
any_ev.cancel()
full_ev.cancel()
assert not queue.is_full
with queue.when_full() as when_full:
yield when_full
with queue.put(1) as put_ev:
not_full_ev = queue.when_not_full()
yield env.timeout(1)
assert not put_ev.triggered
assert not not_full_ev.triggered
not_full_ev.cancel()
yield env.timeout(100)
assert not get_ev.triggered
assert not any_ev.triggered
assert not put_ev.triggered
assert not put_ev.triggered
assert not not_full_ev.triggered
env.process(producer(env))
env.process(consumer(env))
env.process(canceller(env))
env.run()
| |
import os
import sys
import math
import platform
import numpy as np
from common.numpy_fast import clip, interp
from common.kalman.ekf import FastEKF1D, SimpleSensor
# radar tracks
SPEED, ACCEL = 0, 1 # Kalman filter states enum
rate, ratev = 20., 20. # model and radar are both at 20Hz
ts = 1./rate
freq_v_lat = 0.2 # Hz
k_v_lat = 2*np.pi*freq_v_lat*ts / (1 + 2*np.pi*freq_v_lat*ts)
freq_a_lead = .5 # Hz
k_a_lead = 2*np.pi*freq_a_lead*ts / (1 + 2*np.pi*freq_a_lead*ts)
# stationary qualification parameters
v_stationary_thr = 4. # objects moving below this speed are classified as stationary
v_oncoming_thr = -3.9 # needs to be a bit lower in abs value than v_stationary_thr to not leave "holes"
v_ego_stationary = 4. # no stationary object flag below this speed
class Track(object):
def __init__(self):
self.ekf = None
self.stationary = True
self.initted = False
def update(self, d_rel, y_rel, v_rel, d_path, v_ego_t_aligned):
if self.initted:
self.dPathPrev = self.dPath
self.vLeadPrev = self.vLead
self.vRelPrev = self.vRel
# relative values, copy
self.dRel = d_rel # LONG_DIST
self.yRel = y_rel # -LAT_DIST
self.vRel = v_rel # REL_SPEED
# compute distance to path
self.dPath = d_path
# computed velocity and accelerations
self.vLead = self.vRel + v_ego_t_aligned
if not self.initted:
self.aRel = 0. # nidec gives no information about this
self.vLat = 0.
self.aLead = 0.
else:
# estimate acceleration
a_rel_unfilt = (self.vRel - self.vRelPrev) / ts
a_rel_unfilt = clip(a_rel_unfilt, -10., 10.)
self.aRel = k_a_lead * a_rel_unfilt + (1 - k_a_lead) * self.aRel
v_lat_unfilt = (self.dPath - self.dPathPrev) / ts
self.vLat = k_v_lat * v_lat_unfilt + (1 - k_v_lat) * self.vLat
a_lead_unfilt = (self.vLead - self.vLeadPrev) / ts
a_lead_unfilt = clip(a_lead_unfilt, -10., 10.)
self.aLead = k_a_lead * a_lead_unfilt + (1 - k_a_lead) * self.aLead
if self.stationary:
# stationary objects can become non stationary, but not the other way around
self.stationary = v_ego_t_aligned > v_ego_stationary and abs(self.vLead) < v_stationary_thr
self.oncoming = self.vLead < v_oncoming_thr
if self.ekf is None:
self.ekf = FastEKF1D(ts, 1e3, [0.1, 1])
self.ekf.state[SPEED] = self.vLead
self.ekf.state[ACCEL] = 0
self.lead_sensor = SimpleSensor(SPEED, 1, 2)
self.vLeadK = self.vLead
self.aLeadK = self.aLead
else:
self.ekf.update_scalar(self.lead_sensor.read(self.vLead))
self.ekf.predict(ts)
self.vLeadK = float(self.ekf.state[SPEED])
self.aLeadK = float(self.ekf.state[ACCEL])
if not self.initted:
self.cnt = 1
self.vision_cnt = 0
else:
self.cnt += 1
self.initted = True
self.vision = False
def mix_vision(self, dist_to_vision, rel_speed_diff):
# rel speed is very hard to estimate from vision
if dist_to_vision < 4.0 and rel_speed_diff < 10.:
# vision point is never stationary
self.stationary = False
self.vision = True
self.vision_cnt += 1
def get_key_for_cluster(self):
# Weigh y higher since radar is inaccurate in this dimension
return [self.dRel, self.dPath*2, self.vRel]
# ******************* Cluster *******************
if platform.machine() == 'aarch64':
for x in sys.path:
pp = os.path.join(x, "phonelibs/hierarchy/lib")
if os.path.isfile(os.path.join(pp, "_hierarchy.so")):
sys.path.append(pp)
break
import _hierarchy
else:
from scipy.cluster import _hierarchy
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
# supersimplified function to get fast clustering. Got it from scipy
Z = np.asarray(Z, order='c')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
_hierarchy.cluster_dist(Z, T, float(t), int(n))
return T
RDR_TO_LDR = 2.7
def mean(l):
return sum(l)/len(l)
class Cluster(object):
def __init__(self):
self.tracks = set()
def add(self, t):
# add the first track
self.tracks.add(t)
# TODO: make generic
@property
def dRel(self):
return mean([t.dRel for t in self.tracks])
@property
def yRel(self):
return mean([t.yRel for t in self.tracks])
@property
def vRel(self):
return mean([t.vRel for t in self.tracks])
@property
def aRel(self):
return mean([t.aRel for t in self.tracks])
@property
def vLead(self):
return mean([t.vLead for t in self.tracks])
@property
def aLead(self):
return mean([t.aLead for t in self.tracks])
@property
def dPath(self):
return mean([t.dPath for t in self.tracks])
@property
def vLat(self):
return mean([t.vLat for t in self.tracks])
@property
def vLeadK(self):
return mean([t.vLeadK for t in self.tracks])
@property
def aLeadK(self):
return mean([t.aLeadK for t in self.tracks])
@property
def vision(self):
return any([t.vision for t in self.tracks])
@property
def vision_cnt(self):
return max([t.vision_cnt for t in self.tracks])
@property
def stationary(self):
return all([t.stationary for t in self.tracks])
@property
def oncoming(self):
return all([t.oncoming for t in self.tracks])
def toLive20(self, lead):
lead.dRel = float(self.dRel) - RDR_TO_LDR
lead.yRel = float(self.yRel)
lead.vRel = float(self.vRel)
lead.aRel = float(self.aRel)
lead.vLead = float(self.vLead)
lead.aLead = float(self.aLead)
lead.dPath = float(self.dPath)
lead.vLat = float(self.vLat)
lead.vLeadK = float(self.vLeadK)
lead.aLeadK = float(self.aLeadK)
lead.status = True
lead.fcw = False
def __str__(self):
ret = "x: %7.2f y: %7.2f v: %7.2f a: %7.2f" % (self.dRel, self.yRel, self.vRel, self.aRel)
if self.stationary:
ret += " stationary"
if self.vision:
ret += " vision"
if self.oncoming:
ret += " oncoming"
if self.vision_cnt > 0:
ret += " vision_cnt: %6.0f" % self.vision_cnt
return ret
def is_potential_lead(self, v_ego, enabled):
# predict cut-ins by extrapolating lateral speed by a lookahead time
# lookahead time depends on cut-in distance. more attentive for close cut-ins
# also, above 50 meters the predicted path isn't very reliable
# the distance at which v_lat matters is higher at higher speed
lookahead_dist = 40. + v_ego/1.2 #40m at 0mph, ~70m at 80mph
t_lookahead_v = [1., 0.]
t_lookahead_bp = [10., lookahead_dist]
# average dist
d_path = self.dPath
if enabled:
t_lookahead = interp(self.dRel, t_lookahead_bp, t_lookahead_v)
# correct d_path for lookahead time, considering only cut-ins and no more than 1m impact
lat_corr = clip(t_lookahead * self.vLat, -1, 0)
else:
lat_corr = 0.
d_path = max(d_path + lat_corr, 0)
if d_path < 1.5 and not self.stationary and not self.oncoming:
return True
else:
return False
def is_potential_lead2(self, lead_clusters):
if len(lead_clusters) > 0:
lead_cluster = lead_clusters[0]
# check if the new lead is too close and roughly at the same speed of the first lead: it might just be the second axle of the same vehicle
if (self.dRel - lead_cluster.dRel) < 8. and abs(self.vRel - lead_cluster.vRel) < 1.:
return False
else:
return True
else:
return False
| |
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from pytest import raises as assert_raises
from scipy.sparse.csgraph import (shortest_path, dijkstra, johnson,
bellman_ford, construct_dist_matrix, NegativeCycleError)
directed_G = np.array([[0, 3, 3, 0, 0],
[0, 0, 0, 2, 4],
[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[2, 0, 0, 2, 0]], dtype=float)
undirected_G = np.array([[0, 3, 3, 1, 2],
[3, 0, 0, 2, 4],
[3, 0, 0, 0, 0],
[1, 2, 0, 0, 2],
[2, 4, 0, 2, 0]], dtype=float)
unweighted_G = (directed_G > 0).astype(float)
directed_SP = [[0, 3, 3, 5, 7],
[3, 0, 6, 2, 4],
[np.inf, np.inf, 0, np.inf, np.inf],
[1, 4, 4, 0, 8],
[2, 5, 5, 2, 0]]
directed_pred = np.array([[-9999, 0, 0, 1, 1],
[3, -9999, 0, 1, 1],
[-9999, -9999, -9999, -9999, -9999],
[3, 0, 0, -9999, 1],
[4, 0, 0, 4, -9999]], dtype=float)
undirected_SP = np.array([[0, 3, 3, 1, 2],
[3, 0, 6, 2, 4],
[3, 6, 0, 4, 5],
[1, 2, 4, 0, 2],
[2, 4, 5, 2, 0]], dtype=float)
undirected_SP_limit_2 = np.array([[0, np.inf, np.inf, 1, 2],
[np.inf, 0, np.inf, 2, np.inf],
[np.inf, np.inf, 0, np.inf, np.inf],
[1, 2, np.inf, 0, 2],
[2, np.inf, np.inf, 2, 0]], dtype=float)
undirected_SP_limit_0 = np.ones((5, 5), dtype=float) - np.eye(5)
undirected_SP_limit_0[undirected_SP_limit_0 > 0] = np.inf
undirected_pred = np.array([[-9999, 0, 0, 0, 0],
[1, -9999, 0, 1, 1],
[2, 0, -9999, 0, 0],
[3, 3, 0, -9999, 3],
[4, 4, 0, 4, -9999]], dtype=float)
methods = ['auto', 'FW', 'D', 'BF', 'J']
def test_dijkstra_limit():
limits = [0, 2, np.inf]
results = [undirected_SP_limit_0,
undirected_SP_limit_2,
undirected_SP]
def check(limit, result):
SP = dijkstra(undirected_G, directed=False, limit=limit)
assert_array_almost_equal(SP, result)
for limit, result in zip(limits, results):
check(limit, result)
def test_directed():
def check(method):
SP = shortest_path(directed_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP, directed_SP)
for method in methods:
check(method)
def test_undirected():
def check(method, directed_in):
if directed_in:
SP1 = shortest_path(directed_G, method=method, directed=False,
overwrite=False)
assert_array_almost_equal(SP1, undirected_SP)
else:
SP2 = shortest_path(undirected_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP2, undirected_SP)
for method in methods:
for directed_in in (True, False):
check(method, directed_in)
def test_shortest_path_indices():
indices = np.arange(4)
def check(func, indshape):
outshape = indshape + (5,)
SP = func(directed_G, directed=False,
indices=indices.reshape(indshape))
assert_array_almost_equal(SP, undirected_SP[indices].reshape(outshape))
for indshape in [(4,), (4, 1), (2, 2)]:
for func in (dijkstra, bellman_ford, johnson, shortest_path):
check(func, indshape)
assert_raises(ValueError, shortest_path, directed_G, method='FW',
indices=indices)
def test_predecessors():
SP_res = {True: directed_SP,
False: undirected_SP}
pred_res = {True: directed_pred,
False: undirected_pred}
def check(method, directed):
SP, pred = shortest_path(directed_G, method, directed=directed,
overwrite=False,
return_predecessors=True)
assert_array_almost_equal(SP, SP_res[directed])
assert_array_almost_equal(pred, pred_res[directed])
for method in methods:
for directed in (True, False):
check(method, directed)
def test_construct_shortest_path():
def check(method, directed):
SP1, pred = shortest_path(directed_G,
directed=directed,
overwrite=False,
return_predecessors=True)
SP2 = construct_dist_matrix(directed_G, pred, directed=directed)
assert_array_almost_equal(SP1, SP2)
for method in methods:
for directed in (True, False):
check(method, directed)
def test_unweighted_path():
def check(method, directed):
SP1 = shortest_path(directed_G,
directed=directed,
overwrite=False,
unweighted=True)
SP2 = shortest_path(unweighted_G,
directed=directed,
overwrite=False,
unweighted=False)
assert_array_almost_equal(SP1, SP2)
for method in methods:
for directed in (True, False):
check(method, directed)
def test_negative_cycles():
# create a small graph with a negative cycle
graph = np.ones([5, 5])
graph.flat[::6] = 0
graph[1, 2] = -2
def check(method, directed):
assert_raises(NegativeCycleError, shortest_path, graph, method,
directed)
for method in ['FW', 'J', 'BF']:
for directed in (True, False):
check(method, directed)
def test_masked_input():
G = np.ma.masked_equal(directed_G, 0)
def check(method):
SP = shortest_path(directed_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP, directed_SP)
for method in methods:
check(method)
def test_overwrite():
G = np.array([[0, 3, 3, 1, 2],
[3, 0, 0, 2, 4],
[3, 0, 0, 0, 0],
[1, 2, 0, 0, 2],
[2, 4, 0, 2, 0]], dtype=float)
foo = G.copy()
shortest_path(foo, overwrite=False)
assert_array_equal(foo, G)
| |
import nibabel as nib
import numpy as np
import numpy.testing as npt
from dipy.core.sphere import HemiSphere, unit_octahedron
from dipy.core.gradients import gradient_table
from dipy.data import get_data
from dipy.tracking.local import (LocalTracking, ThresholdTissueClassifier,
DirectionGetter, TissueClassifier,
BinaryTissueClassifier)
from dipy.direction import (ProbabilisticDirectionGetter,
DeterministicMaximumDirectionGetter)
from dipy.tracking.local.interpolation import trilinear_interpolate4d
from dipy.tracking.local.localtracking import TissueTypes
def test_stop_conditions():
"""This tests that the Local Tracker behaves as expected for the
following tissue types.
"""
# TissueTypes.TRACKPOINT = 1
# TissueTypes.ENDPOINT = 2
# TissueTypes.INVALIDPOINT = 0
tissue = np.array([[2, 1, 1, 2, 1],
[2, 2, 1, 1, 2],
[1, 1, 1, 1, 1],
[1, 1, 1, 2, 2],
[0, 1, 1, 1, 2],
[0, 1, 1, 0, 2],
[1, 0, 1, 1, 1]])
tissue = tissue[None]
class SimpleTissueClassifier(TissueClassifier):
def check_point(self, point):
p = np.round(point).astype(int)
if any(p < 0) or any(p >= tissue.shape):
return TissueTypes.OUTSIDEIMAGE
return tissue[p[0], p[1], p[2]]
class SimpleDirectionGetter(DirectionGetter):
def initial_direction(self, point):
# Test tracking along the rows (z direction)
# of the tissue array above
p = np.round(point).astype(int)
if (any(p < 0) or
any(p >= tissue.shape) or
tissue[p[0], p[1], p[2]] == TissueTypes.INVALIDPOINT):
return np.array([])
return np.array([[0., 0., 1.]])
def get_direction(self, p, d):
# Always keep previous direction
return 0
# Create a seeds along
x = np.array([0., 0, 0, 0, 0, 0, 0])
y = np.array([0., 1, 2, 3, 4, 5, 6])
z = np.array([1., 1, 1, 0, 1, 1, 1])
seeds = np.column_stack([x, y, z])
# Set up tracking
dg = SimpleDirectionGetter()
tc = SimpleTissueClassifier()
streamlines_not_all = LocalTracking(direction_getter=dg,
tissue_classifier=tc,
seeds=seeds,
affine=np.eye(4),
step_size=1.,
return_all=False)
streamlines_all = LocalTracking(direction_getter=dg,
tissue_classifier=tc,
seeds=seeds,
affine=np.eye(4),
step_size=1.,
return_all=True)
streamlines_not_all = iter(streamlines_not_all) # valid streamlines only
streamlines_all = iter(streamlines_all) # all streamlines
# Check that the first streamline stops at 0 and 3 (ENDPOINT)
y = 0
sl = next(streamlines_not_all)
npt.assert_equal(sl[0], [0, y, 0])
npt.assert_equal(sl[-1], [0, y, 3])
npt.assert_equal(len(sl), 4)
sl = next(streamlines_all)
npt.assert_equal(sl[0], [0, y, 0])
npt.assert_equal(sl[-1], [0, y, 3])
npt.assert_equal(len(sl), 4)
# Check that the first streamline stops at 0 and 4 (ENDPOINT)
y = 1
sl = next(streamlines_not_all)
npt.assert_equal(sl[0], [0, y, 0])
npt.assert_equal(sl[-1], [0, y, 4])
npt.assert_equal(len(sl), 5)
sl = next(streamlines_all)
npt.assert_equal(sl[0], [0, y, 0])
npt.assert_equal(sl[-1], [0, y, 4])
npt.assert_equal(len(sl), 5)
# This streamline should be the same as above. This row does not have
# ENDPOINTs, but the streamline should stop at the edge and not include
# OUTSIDEIMAGE points.
y = 2
sl = next(streamlines_not_all)
npt.assert_equal(sl[0], [0, y, 0])
npt.assert_equal(sl[-1], [0, y, 4])
npt.assert_equal(len(sl), 5)
sl = next(streamlines_all)
npt.assert_equal(sl[0], [0, y, 0])
npt.assert_equal(sl[-1], [0, y, 4])
npt.assert_equal(len(sl), 5)
# If we seed on the edge, the first (or last) point in the streamline
# should be the seed.
y = 3
sl = next(streamlines_not_all)
npt.assert_equal(sl[0], seeds[y])
sl = next(streamlines_all)
npt.assert_equal(sl[0], seeds[y])
# The last 3 seeds should not produce streamlines,
# INVALIDPOINT streamlines are rejected (return_all=False).
npt.assert_equal(len(list(streamlines_not_all)), 0)
# The last 3 seeds should produce invalid streamlines,
# INVALIDPOINT streamlines are kept (return_all=True).
# The streamline stops at 0 (INVALIDPOINT) and 4 (ENDPOINT)
y = 4
sl = next(streamlines_all)
npt.assert_equal(sl[0], [0, y, 0])
npt.assert_equal(sl[-1], [0, y, 4])
npt.assert_equal(len(sl), 5)
# The streamline stops at 0 (INVALIDPOINT) and 4 (INVALIDPOINT)
y = 5
sl = next(streamlines_all)
npt.assert_equal(sl[0], [0, y, 0])
npt.assert_equal(sl[-1], [0, y, 3])
npt.assert_equal(len(sl), 4)
# The last streamline should contain only one point, the seed point,
# because no valid inital direction was returned.
y = 6
sl = next(streamlines_all)
npt.assert_equal(sl[0], seeds[y])
npt.assert_equal(sl[-1], seeds[y])
npt.assert_equal(len(sl), 1)
def test_trilinear_interpolate():
a, b, c = np.random.random(3)
def linear_function(x, y, z):
return a * x + b * y + c * z
N = 6
x, y, z = np.mgrid[:N, :N, :N]
data = np.empty((N, N, N, 2))
data[..., 0] = linear_function(x, y, z)
data[..., 1] = 99.
# Use a point not near the edges
point = np.array([2.1, 4.8, 3.3])
out = trilinear_interpolate4d(data, point)
expected = [linear_function(*point), 99.]
npt.assert_array_almost_equal(out, expected)
# Pass in out ourselves
out[:] = -1
trilinear_interpolate4d(data, point, out)
npt.assert_array_almost_equal(out, expected)
# use a point close to an edge
point = np.array([-.1, -.1, -.1])
expected = [0., 99.]
out = trilinear_interpolate4d(data, point)
npt.assert_array_almost_equal(out, expected)
# different edge
point = np.array([2.4, 5.4, 3.3])
# On the edge 5.4 get treated as the max y value, 5.
expected = [linear_function(point[0], 5., point[2]), 99.]
out = trilinear_interpolate4d(data, point)
npt.assert_array_almost_equal(out, expected)
# Test index errors
point = np.array([2.4, 5.5, 3.3])
npt.assert_raises(IndexError, trilinear_interpolate4d, data, point)
point = np.array([2.4, -1., 3.3])
npt.assert_raises(IndexError, trilinear_interpolate4d, data, point)
def test_ProbabilisticOdfWeightedTracker():
"""This tests that the Probabalistic Direction Getter plays nice
LocalTracking and produces reasonable streamlines in a simple example.
"""
sphere = HemiSphere.from_sphere(unit_octahedron)
# A simple image with three possible configurations, a vertical tract,
# a horizontal tract and a crossing
pmf_lookup = np.array([[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.],
[.6, .4, 0.]])
simple_image = np.array([[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 3, 2, 2, 2, 0],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
])
simple_image = simple_image[..., None]
pmf = pmf_lookup[simple_image]
seeds = [np.array([1., 1., 0.])] * 30
mask = (simple_image > 0).astype(float)
tc = ThresholdTissueClassifier(mask, .5)
dg = ProbabilisticDirectionGetter.from_pmf(pmf, 90, sphere, pmf_threshold=0.1)
streamlines = LocalTracking(dg, tc, seeds, np.eye(4), 1.)
expected = [np.array([[0., 1., 0.],
[1., 1., 0.],
[2., 1., 0.],
[2., 2., 0.],
[2., 3., 0.],
[2., 4., 0.],
[2., 5., 0.]]),
np.array([[0., 1., 0.],
[1., 1., 0.],
[2., 1., 0.],
[3., 1., 0.],
[4., 1., 0.]])]
def allclose(x, y):
return x.shape == y.shape and np.allclose(x, y)
path = [False, False]
for sl in streamlines:
if allclose(sl, expected[0]):
path[0] = True
elif allclose(sl, expected[1]):
path[1] = True
else:
raise AssertionError()
npt.assert_(all(path))
# The first path is not possible if 90 degree turns are excluded
dg = ProbabilisticDirectionGetter.from_pmf(pmf, 80, sphere,
pmf_threshold=0.1)
streamlines = LocalTracking(dg, tc, seeds, np.eye(4), 1.)
for sl in streamlines:
npt.assert_(np.allclose(sl, expected[1]))
# The first path is not possible if pmf_threshold > 0.4
dg = ProbabilisticDirectionGetter.from_pmf(pmf, 90, sphere,
pmf_threshold=0.5)
streamlines = LocalTracking(dg, tc, seeds, np.eye(4), 1.)
for sl in streamlines:
npt.assert_(np.allclose(sl, expected[1]))
def test_MaximumDeterministicTracker():
"""This tests that the Maximum Deterministic Direction Getter plays nice
LocalTracking and produces reasonable streamlines in a simple example.
"""
sphere = HemiSphere.from_sphere(unit_octahedron)
# A simple image with three possible configurations, a vertical tract,
# a horizontal tract and a crossing
pmf_lookup = np.array([[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.],
[.4, .6, 0.]])
simple_image = np.array([[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 3, 2, 2, 2, 0],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
])
simple_image = simple_image[..., None]
pmf = pmf_lookup[simple_image]
seeds = [np.array([1., 1., 0.])] * 30
mask = (simple_image > 0).astype(float)
tc = ThresholdTissueClassifier(mask, .5)
dg = DeterministicMaximumDirectionGetter.from_pmf(pmf, 90, sphere,
pmf_threshold=0.1)
streamlines = LocalTracking(dg, tc, seeds, np.eye(4), 1.)
expected = [np.array([[0., 1., 0.],
[1., 1., 0.],
[2., 1., 0.],
[2., 2., 0.],
[2., 3., 0.],
[2., 4., 0.],
[2., 5., 0.]]),
np.array([[0., 1., 0.],
[1., 1., 0.],
[2., 1., 0.],
[3., 1., 0.],
[4., 1., 0.]]),
np.array([[0., 1., 0.],
[1., 1., 0.],
[2., 1., 0.]])]
def allclose(x, y):
return x.shape == y.shape and np.allclose(x, y)
for sl in streamlines:
if not allclose(sl, expected[0]):
raise AssertionError()
# The first path is not possible if 90 degree turns are excluded
dg = DeterministicMaximumDirectionGetter.from_pmf(pmf, 80, sphere,
pmf_threshold=0.1)
streamlines = LocalTracking(dg, tc, seeds, np.eye(4), 1.)
for sl in streamlines:
npt.assert_(np.allclose(sl, expected[1]))
# Both path are not possible if 90 degree turns are exclude and
# if pmf_threhold is larger than 0.4. Streamlines should stop at
# the crossing
dg = DeterministicMaximumDirectionGetter.from_pmf(pmf, 80, sphere,
pmf_threshold=0.5)
streamlines = LocalTracking(dg, tc, seeds, np.eye(4), 1.)
for sl in streamlines:
npt.assert_(np.allclose(sl, expected[2]))
def test_affine_transformations():
"""This tests that the input affine is properly handled by
LocalTracking and produces reasonable streamlines in a simple example.
"""
sphere = HemiSphere.from_sphere(unit_octahedron)
# A simple image with three possible configurations, a vertical tract,
# a horizontal tract and a crossing
pmf_lookup = np.array([[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.],
[.4, .6, 0.]])
simple_image = np.array([[0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 3, 2, 2, 2, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
])
simple_image = simple_image[..., None]
pmf = pmf_lookup[simple_image]
seeds = [np.array([1., 1., 0.]),
np.array([2., 4., 0.])]
expected = [np.array([[0., 1., 0.],
[1., 1., 0.],
[2., 1., 0.],
[3., 1., 0.],
[4., 1., 0.]]),
np.array([[2., 0., 0.],
[2., 1., 0.],
[2., 2., 0.],
[2., 3., 0.],
[2., 4., 0.],
[2., 5., 0.]])]
mask = (simple_image > 0).astype(float)
tc = BinaryTissueClassifier(mask)
dg = DeterministicMaximumDirectionGetter.from_pmf(pmf, 60, sphere,
pmf_threshold=0.1)
streamlines = LocalTracking(dg, tc, seeds, np.eye(4), 1.)
# TST- bad affine wrong shape
bad_affine = np.eye(3)
npt.assert_raises(ValueError, LocalTracking, dg, tc, seeds, bad_affine, 1.)
# TST - bad affine with shearing
bad_affine = np.eye(4)
bad_affine[0, 1] = 1.
npt.assert_raises(ValueError, LocalTracking, dg, tc, seeds, bad_affine, 1.)
# TST - identity
a0 = np.eye(4)
# TST - affines with positive/negative offsets
a1 = np.eye(4)
a1[:3, 3] = [1, 2, 3]
a2 = np.eye(4)
a2[:3, 3] = [-2, 0, -1]
# TST - affine with scaling
a3 = np.eye(4)
a3[0, 0] = a3[1, 1] = a3[2, 2] = 8
# TST - affine with axes inverting (negative value)
a4 = np.eye(4)
a4[1, 1] = a4[2, 2] = -1
# TST - combined affines
a5 = a1 + a2 + a3
a5[3, 3] = 1
# TST - in vivo affine exemple
# Sometimes data have affines with tiny shear components.
# For example, the small_101D data-set has some of that:
fdata, _, _ = get_data('small_101D')
a6 = nib.load(fdata).affine
for affine in [a0, a1, a2, a3, a4, a5, a6]:
lin = affine[:3, :3]
offset = affine[:3, 3]
seeds_trans = [np.dot(lin, s) + offset for s in seeds]
# We compute the voxel size to ajust the step size to one voxel
voxel_size = np.mean(np.sqrt(np.dot(lin, lin).diagonal()))
streamlines = LocalTracking(direction_getter=dg,
tissue_classifier=tc,
seeds=seeds_trans,
affine=affine,
step_size=voxel_size,
return_all=True)
# We apply the inverse affine transformation to the generated
# streamlines. It should be equals to the expected streamlines
# (generated with the identity affine matrix).
affine_inv = np.linalg.inv(affine)
lin = affine_inv[:3, :3]
offset = affine_inv[:3, 3]
streamlines_inv = []
for line in streamlines:
streamlines_inv.append([np.dot(pts, lin) + offset for pts in line])
npt.assert_equal(len(streamlines_inv[0]), len(expected[0]))
npt.assert_(np.allclose(streamlines_inv[0], expected[0], atol=0.3))
npt.assert_equal(len(streamlines_inv[1]), len(expected[1]))
npt.assert_(np.allclose(streamlines_inv[1], expected[1], atol=0.3))
if __name__ == "__main__":
npt.run_module_suite()
| |
"""Constants for the opentherm_gw integration."""
import pyotgw.vars as gw_vars
from homeassistant.const import (
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
TIME_HOURS,
TIME_MINUTES,
UNIT_PERCENTAGE,
)
ATTR_GW_ID = "gateway_id"
ATTR_LEVEL = "level"
ATTR_DHW_OVRD = "dhw_override"
CONF_CLIMATE = "climate"
CONF_FLOOR_TEMP = "floor_temperature"
CONF_PRECISION = "precision"
DATA_GATEWAYS = "gateways"
DATA_OPENTHERM_GW = "opentherm_gw"
DEVICE_CLASS_COLD = "cold"
DEVICE_CLASS_HEAT = "heat"
DEVICE_CLASS_PROBLEM = "problem"
DOMAIN = "opentherm_gw"
SERVICE_RESET_GATEWAY = "reset_gateway"
SERVICE_SET_CLOCK = "set_clock"
SERVICE_SET_CONTROL_SETPOINT = "set_control_setpoint"
SERVICE_SET_HOT_WATER_SETPOINT = "set_hot_water_setpoint"
SERVICE_SET_HOT_WATER_OVRD = "set_hot_water_ovrd"
SERVICE_SET_GPIO_MODE = "set_gpio_mode"
SERVICE_SET_LED_MODE = "set_led_mode"
SERVICE_SET_MAX_MOD = "set_max_modulation"
SERVICE_SET_OAT = "set_outside_temperature"
SERVICE_SET_SB_TEMP = "set_setback_temperature"
UNIT_BAR = "bar"
UNIT_KW = "kW"
UNIT_L_MIN = f"L/{TIME_MINUTES}"
BINARY_SENSOR_INFO = {
# [device_class, friendly_name format]
gw_vars.DATA_MASTER_CH_ENABLED: [None, "Thermostat Central Heating Enabled {}"],
gw_vars.DATA_MASTER_DHW_ENABLED: [None, "Thermostat Hot Water Enabled {}"],
gw_vars.DATA_MASTER_COOLING_ENABLED: [None, "Thermostat Cooling Enabled {}"],
gw_vars.DATA_MASTER_OTC_ENABLED: [
None,
"Thermostat Outside Temperature Correction Enabled {}",
],
gw_vars.DATA_MASTER_CH2_ENABLED: [None, "Thermostat Central Heating 2 Enabled {}"],
gw_vars.DATA_SLAVE_FAULT_IND: [DEVICE_CLASS_PROBLEM, "Boiler Fault Indication {}"],
gw_vars.DATA_SLAVE_CH_ACTIVE: [
DEVICE_CLASS_HEAT,
"Boiler Central Heating Status {}",
],
gw_vars.DATA_SLAVE_DHW_ACTIVE: [DEVICE_CLASS_HEAT, "Boiler Hot Water Status {}"],
gw_vars.DATA_SLAVE_FLAME_ON: [DEVICE_CLASS_HEAT, "Boiler Flame Status {}"],
gw_vars.DATA_SLAVE_COOLING_ACTIVE: [DEVICE_CLASS_COLD, "Boiler Cooling Status {}"],
gw_vars.DATA_SLAVE_CH2_ACTIVE: [
DEVICE_CLASS_HEAT,
"Boiler Central Heating 2 Status {}",
],
gw_vars.DATA_SLAVE_DIAG_IND: [
DEVICE_CLASS_PROBLEM,
"Boiler Diagnostics Indication {}",
],
gw_vars.DATA_SLAVE_DHW_PRESENT: [None, "Boiler Hot Water Present {}"],
gw_vars.DATA_SLAVE_CONTROL_TYPE: [None, "Boiler Control Type {}"],
gw_vars.DATA_SLAVE_COOLING_SUPPORTED: [None, "Boiler Cooling Support {}"],
gw_vars.DATA_SLAVE_DHW_CONFIG: [None, "Boiler Hot Water Configuration {}"],
gw_vars.DATA_SLAVE_MASTER_LOW_OFF_PUMP: [None, "Boiler Pump Commands Support {}"],
gw_vars.DATA_SLAVE_CH2_PRESENT: [None, "Boiler Central Heating 2 Present {}"],
gw_vars.DATA_SLAVE_SERVICE_REQ: [
DEVICE_CLASS_PROBLEM,
"Boiler Service Required {}",
],
gw_vars.DATA_SLAVE_REMOTE_RESET: [None, "Boiler Remote Reset Support {}"],
gw_vars.DATA_SLAVE_LOW_WATER_PRESS: [
DEVICE_CLASS_PROBLEM,
"Boiler Low Water Pressure {}",
],
gw_vars.DATA_SLAVE_GAS_FAULT: [DEVICE_CLASS_PROBLEM, "Boiler Gas Fault {}"],
gw_vars.DATA_SLAVE_AIR_PRESS_FAULT: [
DEVICE_CLASS_PROBLEM,
"Boiler Air Pressure Fault {}",
],
gw_vars.DATA_SLAVE_WATER_OVERTEMP: [
DEVICE_CLASS_PROBLEM,
"Boiler Water Overtemperature {}",
],
gw_vars.DATA_REMOTE_TRANSFER_DHW: [
None,
"Remote Hot Water Setpoint Transfer Support {}",
],
gw_vars.DATA_REMOTE_TRANSFER_MAX_CH: [
None,
"Remote Maximum Central Heating Setpoint Write Support {}",
],
gw_vars.DATA_REMOTE_RW_DHW: [None, "Remote Hot Water Setpoint Write Support {}"],
gw_vars.DATA_REMOTE_RW_MAX_CH: [
None,
"Remote Central Heating Setpoint Write Support {}",
],
gw_vars.DATA_ROVRD_MAN_PRIO: [None, "Remote Override Manual Change Priority {}"],
gw_vars.DATA_ROVRD_AUTO_PRIO: [None, "Remote Override Program Change Priority {}"],
gw_vars.OTGW_GPIO_A_STATE: [None, "Gateway GPIO A State {}"],
gw_vars.OTGW_GPIO_B_STATE: [None, "Gateway GPIO B State {}"],
gw_vars.OTGW_IGNORE_TRANSITIONS: [None, "Gateway Ignore Transitions {}"],
gw_vars.OTGW_OVRD_HB: [None, "Gateway Override High Byte {}"],
}
SENSOR_INFO = {
# [device_class, unit, friendly_name]
gw_vars.DATA_CONTROL_SETPOINT: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Control Setpoint {}",
],
gw_vars.DATA_MASTER_MEMBERID: [None, None, "Thermostat Member ID {}"],
gw_vars.DATA_SLAVE_MEMBERID: [None, None, "Boiler Member ID {}"],
gw_vars.DATA_SLAVE_OEM_FAULT: [None, None, "Boiler OEM Fault Code {}"],
gw_vars.DATA_COOLING_CONTROL: [None, UNIT_PERCENTAGE, "Cooling Control Signal {}"],
gw_vars.DATA_CONTROL_SETPOINT_2: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Control Setpoint 2 {}",
],
gw_vars.DATA_ROOM_SETPOINT_OVRD: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Room Setpoint Override {}",
],
gw_vars.DATA_SLAVE_MAX_RELATIVE_MOD: [
None,
UNIT_PERCENTAGE,
"Boiler Maximum Relative Modulation {}",
],
gw_vars.DATA_SLAVE_MAX_CAPACITY: [None, UNIT_KW, "Boiler Maximum Capacity {}"],
gw_vars.DATA_SLAVE_MIN_MOD_LEVEL: [
None,
UNIT_PERCENTAGE,
"Boiler Minimum Modulation Level {}",
],
gw_vars.DATA_ROOM_SETPOINT: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Room Setpoint {}",
],
gw_vars.DATA_REL_MOD_LEVEL: [None, UNIT_PERCENTAGE, "Relative Modulation Level {}"],
gw_vars.DATA_CH_WATER_PRESS: [None, UNIT_BAR, "Central Heating Water Pressure {}"],
gw_vars.DATA_DHW_FLOW_RATE: [None, UNIT_L_MIN, "Hot Water Flow Rate {}"],
gw_vars.DATA_ROOM_SETPOINT_2: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Room Setpoint 2 {}",
],
gw_vars.DATA_ROOM_TEMP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Room Temperature {}",
],
gw_vars.DATA_CH_WATER_TEMP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Central Heating Water Temperature {}",
],
gw_vars.DATA_DHW_TEMP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Hot Water Temperature {}",
],
gw_vars.DATA_OUTSIDE_TEMP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Outside Temperature {}",
],
gw_vars.DATA_RETURN_WATER_TEMP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Return Water Temperature {}",
],
gw_vars.DATA_SOLAR_STORAGE_TEMP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Solar Storage Temperature {}",
],
gw_vars.DATA_SOLAR_COLL_TEMP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Solar Collector Temperature {}",
],
gw_vars.DATA_CH_WATER_TEMP_2: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Central Heating 2 Water Temperature {}",
],
gw_vars.DATA_DHW_TEMP_2: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Hot Water 2 Temperature {}",
],
gw_vars.DATA_EXHAUST_TEMP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Exhaust Temperature {}",
],
gw_vars.DATA_SLAVE_DHW_MAX_SETP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Hot Water Maximum Setpoint {}",
],
gw_vars.DATA_SLAVE_DHW_MIN_SETP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Hot Water Minimum Setpoint {}",
],
gw_vars.DATA_SLAVE_CH_MAX_SETP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Boiler Maximum Central Heating Setpoint {}",
],
gw_vars.DATA_SLAVE_CH_MIN_SETP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Boiler Minimum Central Heating Setpoint {}",
],
gw_vars.DATA_DHW_SETPOINT: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Hot Water Setpoint {}",
],
gw_vars.DATA_MAX_CH_SETPOINT: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Maximum Central Heating Setpoint {}",
],
gw_vars.DATA_OEM_DIAG: [None, None, "OEM Diagnostic Code {}"],
gw_vars.DATA_TOTAL_BURNER_STARTS: [None, None, "Total Burner Starts {}"],
gw_vars.DATA_CH_PUMP_STARTS: [None, None, "Central Heating Pump Starts {}"],
gw_vars.DATA_DHW_PUMP_STARTS: [None, None, "Hot Water Pump Starts {}"],
gw_vars.DATA_DHW_BURNER_STARTS: [None, None, "Hot Water Burner Starts {}"],
gw_vars.DATA_TOTAL_BURNER_HOURS: [None, TIME_HOURS, "Total Burner Hours {}"],
gw_vars.DATA_CH_PUMP_HOURS: [None, TIME_HOURS, "Central Heating Pump Hours {}"],
gw_vars.DATA_DHW_PUMP_HOURS: [None, TIME_HOURS, "Hot Water Pump Hours {}"],
gw_vars.DATA_DHW_BURNER_HOURS: [None, TIME_HOURS, "Hot Water Burner Hours {}"],
gw_vars.DATA_MASTER_OT_VERSION: [None, None, "Thermostat OpenTherm Version {}"],
gw_vars.DATA_SLAVE_OT_VERSION: [None, None, "Boiler OpenTherm Version {}"],
gw_vars.DATA_MASTER_PRODUCT_TYPE: [None, None, "Thermostat Product Type {}"],
gw_vars.DATA_MASTER_PRODUCT_VERSION: [None, None, "Thermostat Product Version {}"],
gw_vars.DATA_SLAVE_PRODUCT_TYPE: [None, None, "Boiler Product Type {}"],
gw_vars.DATA_SLAVE_PRODUCT_VERSION: [None, None, "Boiler Product Version {}"],
gw_vars.OTGW_MODE: [None, None, "Gateway/Monitor Mode {}"],
gw_vars.OTGW_DHW_OVRD: [None, None, "Gateway Hot Water Override Mode {}"],
gw_vars.OTGW_ABOUT: [None, None, "Gateway Firmware Version {}"],
gw_vars.OTGW_BUILD: [None, None, "Gateway Firmware Build {}"],
gw_vars.OTGW_CLOCKMHZ: [None, None, "Gateway Clock Speed {}"],
gw_vars.OTGW_LED_A: [None, None, "Gateway LED A Mode {}"],
gw_vars.OTGW_LED_B: [None, None, "Gateway LED B Mode {}"],
gw_vars.OTGW_LED_C: [None, None, "Gateway LED C Mode {}"],
gw_vars.OTGW_LED_D: [None, None, "Gateway LED D Mode {}"],
gw_vars.OTGW_LED_E: [None, None, "Gateway LED E Mode {}"],
gw_vars.OTGW_LED_F: [None, None, "Gateway LED F Mode {}"],
gw_vars.OTGW_GPIO_A: [None, None, "Gateway GPIO A Mode {}"],
gw_vars.OTGW_GPIO_B: [None, None, "Gateway GPIO B Mode {}"],
gw_vars.OTGW_SB_TEMP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Gateway Setback Temperature {}",
],
gw_vars.OTGW_SETP_OVRD_MODE: [None, None, "Gateway Room Setpoint Override Mode {}"],
gw_vars.OTGW_SMART_PWR: [None, None, "Gateway Smart Power Mode {}"],
gw_vars.OTGW_THRM_DETECT: [None, None, "Gateway Thermostat Detection {}"],
gw_vars.OTGW_VREF: [None, None, "Gateway Reference Voltage Setting {}"],
}
| |
"""Parser of the Daily Summary Message (DSM)."""
import re
from datetime import datetime, timedelta
from metpy.units import units
from pyiem.nws.product import TextProduct
from pyiem.util import utc
from pyiem.reference import TRACE_VALUE
PARSER_RE = re.compile(
r"""^(?P<station>[A-Z][A-Z0-9]{3})\s+
DS\s+
(COR\s)?
([0-9]{4}\s)?
(?P<day>\d\d)/(?P<month>\d\d)\s?
((?P<highmiss>M)|((?P<high>(-?\d+))(?P<hightime>[0-9]{4})))/\s?
((?P<lowmiss>M)|((?P<low>(-?\d+))(?P<lowtime>[0-9]{4})))//\s?
(?P<coophigh>(-?\d+|M))/\s?
(?P<cooplow>(-?\d+|M))//
(?P<minslp>M|[\-0-9]{3,4})(?P<slptime>[0-9]{4})?/
(?P<pday>T|M|[0-9]{,4})/
(?P<p01>T|M|\-|\-?[0-9]{,4})/(?P<p02>T|M|\-|\-?[0-9]{,4})/
(?P<p03>T|M|\-|\-?[0-9]{,4})/(?P<p04>T|M|\-|\-?[0-9]{,4})/
(?P<p05>T|M|\-|\-?[0-9]{,4})/(?P<p06>T|M|\-|\-?[0-9]{,4})/
(?P<p07>T|M|\-|\-?[0-9]{,4})/(?P<p08>T|M|\-|\-?[0-9]{,4})/
(?P<p09>T|M|\-|\-?[0-9]{,4})/(?P<p10>T|M|\-|\-?[0-9]{,4})/
(?P<p11>T|M|\-|\-?[0-9]{,4})/(?P<p12>T|M|\-|\-?[0-9]{,4})/
(?P<p13>T|M|\-|\-?[0-9]{,4})/(?P<p14>T|M|\-|\-?[0-9]{,4})/
(?P<p15>T|M|\-|[\-?0-9]{,4})/(?P<p16>T|M|\-|\-?[0-9]{,4})/
(?P<p17>T|M|\-|\-?[0-9]{,4})/(?P<p18>T|M|\-|\-?[0-9]{,4})/
(?P<p19>T|M|\-|\-?[0-9]{,4})/(?P<p20>T|M|\-|\-?[0-9]{,4})/
(?P<p21>T|M|\-|\-?[0-9]{,4})/(?P<p22>T|M|\-|\-?[0-9]{,4})/
(?P<p23>T|M|\-|\-?[0-9]{,4})/(?P<p24>T|M|\-|\-?[0-9]{,4})/
(?P<avg_sped>M|\-|[0-9]{2,3})/
((?P<drct_sped_max>[0-9]{2})
(?P<sped_max>[0-9]{2,3})(?P<time_sped_max>[0-9]{4})/
(?P<drct_gust_max>[0-9]{2})
(?P<sped_gust_max>[0-9]{2,3})(?P<time_sped_gust_max>[0-9]{4}))?
""",
re.VERBOSE,
)
def process(text):
"""Emit DSMProduct object for what we can parse."""
m = PARSER_RE.match(text.replace("\r", "").replace("\n", ""))
if m is None:
return None
return DSMProduct(m.groupdict())
def compute_time(date, timestamp):
"""Make a valid timestamp."""
if timestamp is None:
return None
return datetime(
date.year,
date.month,
date.day,
int(timestamp[:2]),
int(timestamp[2:4]),
)
class DSMProduct:
"""Represents a single DSM."""
def __init__(self, groupdict):
"""Contructor."""
self.date = None
self.high_time = None
self.low_time = None
self.time_sped_max = None
self.time_sped_gust_max = None
self.station = groupdict["station"]
self.groupdict = groupdict
def tzlocalize(self, tzinfo):
"""Localize the timestamps, tricky."""
offset = tzinfo.utcoffset(datetime(2000, 1, 1)).total_seconds()
for name in [
"high_time",
"low_time",
"time_sped_max",
"time_sped_gust_max",
]:
val = getattr(self, name)
if val is None:
continue
# Need to convert timestamp into standard time time, tricky
ts = val - timedelta(seconds=offset)
setattr(
self,
name,
utc(ts.year, ts.month, ts.day, ts.hour, ts.minute).astimezone(
tzinfo
),
)
def compute_times(self, utcnow):
"""Figure out when this DSM is valid for."""
ts = utcnow.replace(
day=int(self.groupdict["day"]), month=int(self.groupdict["month"])
)
# Is this ob from 'last year'
if ts.month == 12 and utcnow.month == 1:
ts = ts.replace(year=(ts.year - 1))
self.date = datetime(ts.year, ts.month, ts.day).date()
self.high_time = compute_time(
self.date, self.groupdict.get("hightime")
)
self.low_time = compute_time(self.date, self.groupdict.get("lowtime"))
self.time_sped_max = compute_time(
self.date, self.groupdict.get("time_sped_max")
)
self.time_sped_gust_max = compute_time(
self.date, self.groupdict.get("time_sped_gust_max")
)
def sql(self, txn):
"""Persist to database given the transaction object."""
cols = []
args = []
val = self.groupdict.get("high")
if val is not None and val != "M":
cols.append("max_tmpf")
args.append(val)
val = self.groupdict.get("low")
if val is not None and val != "M":
cols.append("min_tmpf")
args.append(val)
val = self.groupdict.get("pday")
if val is not None and val != "M":
cols.append("pday")
args.append(TRACE_VALUE if val == "T" else float(val) / 100.0)
val = self.groupdict.get("sped_max")
if val is not None:
cols.append("max_sknt")
args.append(
(int(val) * units("miles / hour")).to(units("knots")).m
)
val = self.time_sped_max
if val is not None:
cols.append("max_sknt_ts")
args.append(val)
val = self.groupdict.get("sped_gust_max")
if val is not None:
cols.append("max_gust")
args.append(
(int(val) * units("miles / hour")).to(units("knots")).m
)
val = self.time_sped_gust_max
if val is not None:
cols.append("max_gust_ts")
args.append(val)
if not cols:
return False
cs = ", ".join(["%s = %%s" % (c,) for c in cols])
slicer = slice(0, 4) if self.station[0] != "K" else slice(1, 4)
args.extend([self.station[slicer], self.date])
txn.execute(
(
f"UPDATE summary_{self.date.year} s SET {cs} FROM stations t "
"WHERE s.iemid = t.iemid and t.network ~* 'ASOS' "
"and t.id = %s and s.day = %s"
),
args,
)
return txn.rowcount == 1
class DSMCollective(TextProduct):
"""A collective representing a NOAAPort Text Product with many DSMs."""
def __init__(
self, text, utcnow=None, ugc_provider=None, nwsli_provider=None
):
"""constructor"""
TextProduct.__init__(
self,
text,
utcnow,
ugc_provider,
nwsli_provider,
parse_segments=False,
)
# hold our parsing results
self.data = []
lines = self.unixtext.split("\n")
if len(lines) < 4:
raise ValueError("Impossibly small DSM Text Product?")
if len(lines[3]) < 10:
meat = ("".join(lines[4:])).split("=")
else:
meat = ("".join(lines[3:])).split("=")
for piece in meat:
if piece == "":
continue
res = process(piece)
if res is None:
self.warnings.append(f"DSM RE Match Failure: '{piece}'")
continue
res.compute_times(utcnow if utcnow is not None else utc())
self.data.append(res)
def tzlocalize(self, tzprovider):
"""Localize our currently stored timestamps."""
for dsm in self.data:
tzinfo = tzprovider.get(dsm.station)
if tzinfo is None:
self.warnings.append(f"station {dsm.station} has no tzinfo")
continue
dsm.tzlocalize(tzinfo)
def sql(self, txn):
"""Do databasing."""
return [dsm.sql(txn) for dsm in self.data]
def parser(text, utcnow=None, ugc_provider=None, nwsli_provider=None):
"""Provide back DSM objects based on the parsing of this text"""
return DSMCollective(text, utcnow, ugc_provider, nwsli_provider)
| |
#!/usr/bin/env python
from __future__ import absolute_import, print_function, division
from os.path import join
import contextlib
import os
import shutil
import subprocess
import re
import shlex
import runpy
import zipfile
import tarfile
import platform
import click
import pysftp
import fnmatch
# https://virtualenv.pypa.io/en/latest/userguide.html#windows-notes
# scripts and executables on Windows go in ENV\Scripts\ instead of ENV/bin/
if platform.system() == "Windows":
VENV_BIN = "Scripts"
else:
VENV_BIN = "bin"
if platform.system() == "Windows":
def Archive(name):
a = zipfile.ZipFile(name, "w")
a.add = a.write
return a
else:
def Archive(name):
return tarfile.open(name, "w:gz")
RELEASE_DIR = join(os.path.dirname(os.path.realpath(__file__)))
DIST_DIR = join(RELEASE_DIR, "dist")
ROOT_DIR = os.path.normpath(join(RELEASE_DIR, ".."))
RELEASE_SPEC_DIR = join(RELEASE_DIR, "specs")
VERSION_FILE = join(ROOT_DIR, "netlib/version.py")
BUILD_DIR = join(RELEASE_DIR, "build")
PYINSTALLER_TEMP = join(BUILD_DIR, "pyinstaller")
PYINSTALLER_DIST = join(BUILD_DIR, "binaries")
VENV_DIR = join(BUILD_DIR, "venv")
VENV_PIP = join(VENV_DIR, VENV_BIN, "pip")
VENV_PYINSTALLER = join(VENV_DIR, VENV_BIN, "pyinstaller")
project = {
"name": "mitmproxy",
"tools": ["pathod", "pathoc", "mitmproxy", "mitmdump", "mitmweb"],
"bdists": {
"mitmproxy": ["mitmproxy", "mitmdump", "mitmweb"],
"pathod": ["pathoc", "pathod"]
},
"dir": ROOT_DIR,
"python_version": "py2"
}
if platform.system() == "Windows":
project["tools"].remove("mitmproxy")
project["bdists"]["mitmproxy"].remove("mitmproxy")
def get_version():
return runpy.run_path(VERSION_FILE)["VERSION"]
def get_snapshot_version():
last_tag, tag_dist, commit = git("describe --tags --long").strip().rsplit(b"-", 2)
tag_dist = int(tag_dist)
if tag_dist == 0:
return get_version()
else:
# The wheel build tag (we use the commit) must start with a digit, so we include "0x"
return "{version}dev{tag_dist:04}-0x{commit}".format(
version=get_version(), # this should already be the next version
tag_dist=tag_dist,
commit=commit
)
def archive_name(project):
platform_tag = {
"Darwin": "osx",
"Windows": "win32",
"Linux": "linux"
}.get(platform.system(), platform.system())
if platform.system() == "Windows":
ext = "zip"
else:
ext = "tar.gz"
return "{project}-{version}-{platform}.{ext}".format(
project=project,
version=get_version(),
platform=platform_tag,
ext=ext
)
def wheel_name():
return "{project}-{version}-{py_version}-none-any.whl".format(
project=project["name"],
version=get_version(),
py_version=project["python_version"]
)
@contextlib.contextmanager
def empty_pythonpath():
"""
Make sure that the regular python installation is not on the python path,
which would give us access to modules installed outside of our virtualenv.
"""
pythonpath = os.environ.get("PYTHONPATH", "")
os.environ["PYTHONPATH"] = ""
yield
os.environ["PYTHONPATH"] = pythonpath
@contextlib.contextmanager
def chdir(path):
old_dir = os.getcwd()
os.chdir(path)
yield
os.chdir(old_dir)
def git(args):
with chdir(ROOT_DIR):
return subprocess.check_output(["git"] + shlex.split(args))
@click.group(chain=True)
def cli():
"""
mitmproxy build tool
"""
pass
@cli.command("contributors")
def contributors():
"""
Update CONTRIBUTORS.md
"""
with chdir(ROOT_DIR):
print("Updating CONTRIBUTORS...")
contributors_data = git("shortlog -n -s")
with open("CONTRIBUTORS", "w") as f:
f.write(contributors_data)
@cli.command("set-version")
@click.argument('version')
def set_version(version):
"""
Update version information
"""
print("Update versions...")
version = ", ".join(version.split("."))
print("Update %s..." % VERSION_FILE)
with open(VERSION_FILE, "rb") as f:
content = f.read()
new_content = re.sub(
r"IVERSION\s*=\s*\([\d,\s]+\)", "IVERSION = (%s)" % version,
content
)
with open(VERSION_FILE, "wb") as f:
f.write(new_content)
@cli.command("wheels")
def wheels():
"""
Build wheels
"""
with empty_pythonpath():
print("Building release...")
if os.path.exists(DIST_DIR):
shutil.rmtree(DIST_DIR)
print("Creating wheel for %s ..." % project["name"])
subprocess.check_call(
[
"python", "./setup.py", "-q",
"bdist_wheel", "--dist-dir", DIST_DIR,
],
cwd=project["dir"]
)
print("Creating virtualenv for test install...")
if os.path.exists(VENV_DIR):
shutil.rmtree(VENV_DIR)
subprocess.check_call(["virtualenv", "-q", VENV_DIR])
with chdir(DIST_DIR):
print("Installing %s..." % project["name"])
subprocess.check_call([VENV_PIP, "install", "-q", wheel_name()])
print("Running binaries...")
for tool in project["tools"]:
tool = join(VENV_DIR, VENV_BIN, tool)
print("> %s --version" % tool)
print(subprocess.check_output([tool, "--version"]))
print("Virtualenv available for further testing:")
print("source %s" % os.path.normpath(join(VENV_DIR, VENV_BIN, "activate")))
@cli.command("bdist")
@click.option("--use-existing-wheels/--no-use-existing-wheels", default=False)
@click.argument("pyinstaller_version", envvar="PYINSTALLER_VERSION", default="PyInstaller~=3.1.1")
@click.pass_context
def bdist(ctx, use_existing_wheels, pyinstaller_version):
"""
Build a binary distribution
"""
if os.path.exists(PYINSTALLER_TEMP):
shutil.rmtree(PYINSTALLER_TEMP)
if os.path.exists(PYINSTALLER_DIST):
shutil.rmtree(PYINSTALLER_DIST)
if not use_existing_wheels:
ctx.invoke(wheels)
print("Installing PyInstaller...")
subprocess.check_call([VENV_PIP, "install", "-q", pyinstaller_version])
for bdist_project, tools in project["bdists"].items():
with Archive(join(DIST_DIR, archive_name(bdist_project))) as archive:
for tool in tools:
# This is PyInstaller, so it messes up paths.
# We need to make sure that we are in the spec folder.
with chdir(RELEASE_SPEC_DIR):
print("Building %s binary..." % tool)
subprocess.check_call(
[
VENV_PYINSTALLER,
"--clean",
"--workpath", PYINSTALLER_TEMP,
"--distpath", PYINSTALLER_DIST,
# This is PyInstaller, so setting a
# different log level obviously breaks it :-)
# "--log-level", "WARN",
"%s.spec" % tool
]
)
# Test if it works at all O:-)
executable = join(PYINSTALLER_DIST, tool)
if platform.system() == "Windows":
executable += ".exe"
print("> %s --version" % executable)
subprocess.check_call([executable, "--version"])
archive.add(executable, os.path.basename(executable))
print("Packed {}.".format(archive_name(bdist_project)))
@cli.command("upload-release")
@click.option('--username', prompt=True)
@click.password_option(confirmation_prompt=False)
@click.option('--repository', default="pypi")
def upload_release(username, password, repository):
"""
Upload wheels to PyPI
"""
filename = wheel_name()
print("Uploading {} to {}...".format(filename, repository))
subprocess.check_call([
"twine",
"upload",
"-u", username,
"-p", password,
"-r", repository,
join(DIST_DIR, filename)
])
@cli.command("upload-snapshot")
@click.option("--host", envvar="SNAPSHOT_HOST", prompt=True)
@click.option("--port", envvar="SNAPSHOT_PORT", type=int, default=22)
@click.option("--user", envvar="SNAPSHOT_USER", prompt=True)
@click.option("--private-key", default=join(RELEASE_DIR, "rtool.pem"))
@click.option("--private-key-password", envvar="SNAPSHOT_PASS", prompt=True, hide_input=True)
@click.option("--wheel/--no-wheel", default=False)
@click.option("--bdist/--no-bdist", default=False)
def upload_snapshot(host, port, user, private_key, private_key_password, wheel, bdist):
"""
Upload snapshot to snapshot server
"""
with pysftp.Connection(host=host,
port=port,
username=user,
private_key=private_key,
private_key_pass=private_key_password) as sftp:
dir_name = "snapshots/v{}".format(get_version())
sftp.makedirs(dir_name)
with sftp.cd(dir_name):
files = []
if wheel:
files.append(wheel_name())
for bdist in project["bdists"].keys():
files.append(archive_name(bdist))
for f in files:
local_path = join(DIST_DIR, f)
remote_filename = f.replace(get_version(), get_snapshot_version())
symlink_path = "../{}".format(f.replace(get_version(), "latest"))
# Delete old versions
old_version = f.replace(get_version(), "*")
for f_old in sftp.listdir():
if fnmatch.fnmatch(f_old, old_version):
print("Removing {}...".format(f_old))
sftp.remove(f_old)
# Upload new version
print("Uploading {} as {}...".format(f, remote_filename))
with click.progressbar(length=os.stat(local_path).st_size) as bar:
sftp.put(
local_path,
"." + remote_filename,
callback=lambda done, total: bar.update(done - bar.pos)
)
# We hide the file during upload.
sftp.rename("." + remote_filename, remote_filename)
# update symlink for the latest release
if sftp.lexists(symlink_path):
print("Removing {}...".format(symlink_path))
sftp.remove(symlink_path)
sftp.symlink("v{}/{}".format(get_version(), remote_filename), symlink_path)
@cli.command("wizard")
@click.option('--next-version', prompt=True)
@click.option('--username', prompt="PyPI Username")
@click.password_option(confirmation_prompt=False, prompt="PyPI Password")
@click.option('--repository', default="pypi")
@click.pass_context
def wizard(ctx, next_version, username, password, repository):
"""
Interactive Release Wizard
"""
is_dirty = git("status --porcelain")
if is_dirty:
raise RuntimeError("Repository is not clean.")
# update contributors file
ctx.invoke(contributors)
# Build test release
ctx.invoke(bdist)
try:
click.confirm("Please test the release now. Is it ok?", abort=True)
except click.Abort:
# undo changes
git("checkout CONTRIBUTORS")
raise
# Everything ok - let's ship it!
git("tag v{}".format(get_version()))
git("push --tags")
ctx.invoke(
upload_release,
username=username, password=password, repository=repository
)
click.confirm("Now please wait until CI has built binaries. Finished?")
# version bump commit
ctx.invoke(set_version, version=next_version)
git("commit -a -m \"bump version\"")
git("push")
click.echo("All done!")
if __name__ == "__main__":
cli()
| |
"""
This module provides access to the OSVR ClientKit C API via the foreign function interface ctypes.
Each class defines the struct of the same name in the C API. Likewise, each method defines the function of the same name in the C API.
For reference, view the C API documentation at http://resource.osvr.com/docs/OSVR-Core/group__ClientKit.html
"""
from ctypes import *
mylib = cdll.LoadLibrary("osvrClientKit")
class OSVR_ClientContextObject(Structure):
pass
OSVR_ClientContext = POINTER(OSVR_ClientContextObject)
class OSVR_ClientInterfaceObject(Structure):
pass
OSVR_ClientInterface = POINTER(OSVR_ClientInterfaceObject)
class OSVR_DisplayConfigObject(Structure):
pass
OSVR_DisplayConfig = POINTER(OSVR_DisplayConfigObject)
class OSVR_Vec2(Structure):
_fields_ = [("data", c_double * 2)]
class OSVR_Vec3(Structure):
_fields_ = [("data", c_double * 3)]
class OSVR_Quaternion(Structure):
_fields_ = [("data", c_double * 4)]
class OSVR_Pose3(Structure):
_fields_ = [("translation", OSVR_Vec3), ("rotation", OSVR_Quaternion)]
class OSVR_RadialDistortionParameters(Structure):
_fields_ = [("k1", OSVR_Vec3), ("centerOfProjection", OSVR_Vec2)]
class OSVR_TimeValue(Structure):
_fields_ = [("seconds", c_int64),("microseconds", c_int32)]
class OSVR_DisplayDimensions(Structure):
_fields_ = [("width", c_int32), ("height", c_int32)]
class OSVR_RelativeViewport(Structure):
_fields_ = [("left", c_int32), ("bottom", c_int32), ("width", c_int32), ("height", c_int32)]
class OSVR_ClippingPlanes(Structure):
_fields_ = [("left", c_double), ("right", c_double), ("bottom", c_double), ("top", c_double)]
class OSVR_EyeTracker3DState(Structure):
_fields_ = [("direction", OSVR_Vec3), ("basePoint", OSVR_Vec3)]
# InterfaceCallbackC.h data types
class OSVR_PoseReport(Structure):
_fields_ = [("sensor", c_int32), ("pose", OSVR_Pose3)]
class OSVR_PositionReport(Structure):
_fields_ = [("sensor", c_int32), ("xyz", OSVR_Vec3)]
class OSVR_OrientationReport(Structure):
_fields_ = [("sensor", c_int32), ("rotation", OSVR_Quaternion)]
class OSVR_ButtonReport(Structure):
_fields_ = [("sensor", c_int32), ("state", c_uint8)]
class OSVR_AnalogReport(Structure):
_fields_ = [("sensor", c_int32), ("state", c_double)]
#This does not seem to exist in the C file, probably mention this to Ryan
#class OSVR_ImagingReport(Structure):
# _fields_ = [(), ()]
class OSVR_Location2DReport(Structure):
_fields_ = [("sensor", c_uint32), ("location", OSVR_Vec2)]
class OSVR_DirectionReport(Structure):
_fields_ = [("sensor", c_uint32), ("direction", OSVR_Vec3)]
#using cbool for now, may need to change depending on what exactly bool is in the C code, probably gonna be OSVR_CBool
class OSVR_EyeTracker2DReport(Structure):
_fields_ = [("locationValid", c_bool), ("sensor", c_uint32), ("state", OSVR_Vec2)]
class OSVR_EyeTracker3DReport(Structure):
_fields_ = [("directioinValid", c_bool), ("basePointValid", c_bool), ("sensor", c_uint32), ("state", OSVR_EyeTracker3DState)]
class OSVR_EyeTrackerBlinkReport(Structure):
_fields_ = [("blinkValid", c_bool), ("sensor", c_uint32), ("state", c_uint8)]
class OSVR_NaviVelocityReport(Structure):
_fields_ = [("sensor", c_uint32), ("state", OSVR_Vec2)]
class OSVR_NaviPositionReport(Structure):
_fields_ = [("sensor", c_uint32), ("state", OSVR_Vec2)]
#IMPORTANT: To create a TYPECallback function pointer to pass to OSVR_RegisterTYPECallback, you must make a python function to be called
# and then pass it to OSVR_TYPECallback, i.e.
#
#def my_pose_callback_function(c_void_p variable, POINTER(OSVR_TimeValue) variable, POINTER(OSVR_PoseReport) variable):
# .....
#
#interface = OSVR_ClientGetInterface(...)
#
#callback = OSVR_PoseCallback(my_pose_callback_function)
#
#osvrRegisterPoseCallback(interface, callback, None)
OSVR_PoseCallback = CFUNCTYPE(None, c_void_p, POINTER(OSVR_TimeValue), POINTER(OSVR_PoseReport))
OSVR_PositionCallback = CFUNCTYPE(None, c_void_p, POINTER(OSVR_TimeValue), POINTER(OSVR_PositionReport))
OSVR_OrientationCallback = CFUNCTYPE(None, c_void_p, POINTER(OSVR_TimeValue), POINTER(OSVR_OrientationReport))
OSVR_ButtonCallback = CFUNCTYPE(None, c_void_p, POINTER(OSVR_TimeValue), POINTER(OSVR_ButtonReport))
OSVR_AnalogCallback = CFUNCTYPE(None, c_void_p, POINTER(OSVR_TimeValue), POINTER(OSVR_AnalogReport))
#Commented out because the OSVR_ImagingReport type is not defined in C, may be included in the future
#OSVR_ImagingCallback = CFUNCTYPE(None, c_void_p, POINTER(OSVR_TimeValue), POINTER(OSVR_ImagingReport))
OSVR_Location2DCallback = CFUNCTYPE(None, c_void_p, POINTER(OSVR_TimeValue), POINTER(OSVR_Location2DReport))
OSVR_DirectionCallback = CFUNCTYPE(None, c_void_p, POINTER(OSVR_TimeValue), POINTER(OSVR_DirectionReport))
OSVR_EyeTracker2DCallback = CFUNCTYPE(None, c_void_p, POINTER(OSVR_TimeValue), POINTER(OSVR_EyeTracker2DReport))
OSVR_EyeTracker3DCallback = CFUNCTYPE(None, c_void_p, POINTER(OSVR_TimeValue), POINTER(OSVR_EyeTracker3DReport))
OSVR_EyeTrackerBlinkCallback = CFUNCTYPE(None, c_void_p, POINTER(OSVR_TimeValue), POINTER(OSVR_EyeTrackerBlinkReport))
OSVR_NaviVelocityCallback = CFUNCTYPE(None, c_void_p, POINTER(OSVR_TimeValue), POINTER(OSVR_NaviVelocityReport))
OSVR_NaviPositionCallback = CFUNCTYPE(None, c_void_p, POINTER(OSVR_TimeValue), POINTER(OSVR_NaviPositionReport))
# Error checking
class ReturnError(Exception):
def __init__(self, value, function):
self.value = value
self.function = function
def __str__(self):
return repr(self.function)
def checkReturn(returnvalue, function):
if returnvalue == 1:
raise ReturnError(returnvalue, function)
# ContextC.h functions
def osvrClientInit(applicationIdentifier):
mylib.osvrClientInit.argtypes = [c_char_p, c_uint32]
mylib.osvrClientInit.restype = OSVR_ClientContext
return mylib.osvrClientInit(c_char_p(applicationIdentifier.encode("utf8")), c_uint32(0))
def osvrClientUpdate(ctx):
mylib.osvrClientUpdate.argtypes = [OSVR_ClientContext]
mylib.osvrClientUpdate.restype = c_int8
returnvalue = mylib.osvrClientUpdate(ctx)
checkReturn(returnvalue, 'osvrClientUpdate')
return
def osvrClientCheckStatus(ctx):
mylib.osvrClientCheckStatus.argtypes = [OSVR_ClientContext]
mylib.osvrClientCheckStatus.restype = c_int8
returnvalue = mylib.osvrClientCheckStatus(ctx)
checkReturn(returnvalue, 'osvrClientCheckStatus')
return
def osvrClientShutdown(ctx):
mylib.osvrClientShutdown.argtypes = [OSVR_ClientContext]
mylib.osvrClientShutdown.restype = c_int8
returnvalue = mylib.osvrClientShutdown(ctx)
checkReturn(returnvalue, 'osvrClientShutdown')
return
# DisplayC.h functions
def osvrClientGetDisplay(ctx):
mylib.osvrClientGetDisplay.argtypes = [OSVR_ClientContext, POINTER(OSVR_DisplayConfig)]
mylib.osvrClientGetDisplay.restype = c_int8
disp = pointer(OSVR_DisplayConfigObject())
returnvalue = mylib.osvrClientGetDisplay(ctx, pointer(disp))
checkReturn(returnvalue, 'osvrClientGetDisplay')
return disp
def osvrClientFreeDisplay(disp):
mylib.osvrClientFreeDisplay.argtypes = [OSVR_DisplayConfig]
mylib.osvrClientFreeDisplay.restype = c_int8
returnvalue = mylib.osvrClientFreeDisplay(disp)
checkReturn(returnvalue, 'osvrClientFreeDisplay')
return
def osvrClientCheckDisplayStartup(disp):
mylib.osvrClientCheckDisplayStartup.argtypes = [OSVR_DisplayConfig]
mylib.osvrClientCheckDisplayStartup.restype = c_int8
returnvalue = mylib.osvrClientCheckDisplayStartup(disp)
checkReturn(returnvalue, 'osvrClientCheckDisplayStartup')
return
def osvrClientGetNumDisplayInputs(disp):
mylib.osvrClientGetNumDisplayInputs.argtypes = [OSVR_DisplayConfig, POINTER(c_uint8)]
mylib.osvrClientGetNumDisplayInputs.restype = c_int8
numDisplayInputs = c_uint8()
returnvalue = mylib.osvrClientGetNumDisplayInputs(disp, pointer(numDisplayInputs))
checkReturn(returnvalue, 'osvrClientGetNumDisplayInputs')
return numDisplayInputs
def osvrClientGetDisplayDimensions(disp, displayInputIndex):
mylib.osvrClientGetDisplayDimensions.argtypes = [OSVR_DisplayConfig, c_uint8, POINTER(c_int32), POINTER(c_int32)]
mylib.osvrClientGetDisplayDimensions.restype = c_int8
dimensions = OSVR_DisplayDimensions()
returnvalue = mylib.osvrClientGetDisplayDimensions(disp, c_uint8(displayInputIndex), pointer(dimensions.width), pointer(dimensions.height))
checkReturn(returnvalue, 'osvrClientGetDisplayDimensions')
return dimensions
def osvrClientGetNumViewers(disp):
mylib.osvrClientGetNumViewers.argtypes = [OSVR_DisplayConfig, POINTER(c_uint32)]
mylib.osvrClientGetNumViewers.restype = c_int8
viewers = c_uint32()
returnvalue = mylib.osvrClientGetNumViewers(disp, pointer(viewers))
checkReturn(returnvalue, 'osvrClientGetNumViewers')
return viewers
def osvrClientGetViewerPose(disp, viewer):
mylib.osvrClientGetViewerPose.argtypes = [OSVR_DisplayConfig, c_uint32, POINTER(OSVR_Pose3)]
mylib.osvrClientGetViewerPose.restype = c_int8
pose = OSVR_Pose3()
returnvalue = mylib.osvrClientGetViewerPose(disp, c_uint32(viewer), pointer(pose))
checkReturn(returnvalue, 'osvrClientGetViewerPose')
return pose
def osvrClientGetNumEyesForViewer(disp, viewer):
mylib.osvrClientGetNumEyesForViewer.argtypes = [OSVR_DisplayConfig, c_uint32, POINTER(c_uint8)]
mylib.osvrClientGetNumEyesForViewer.restype = c_int8
eyes = c_uint8()
returnvalue = mylib.osvrClientGetNumEyesForViewer(disp, c_uint32(viewer), pointer(eyes))
checkReturn(returnvalue, 'osvrClientGetNumEyesForViewer')
return eyes
def osvrClientGetViewerEyePose(disp, viewer, eye):
mylib.osvrClientGetViewerEyePose.argtypes = [OSVR_DisplayConfig, c_uint32, c_uint8, POINTER(OSVR_Pose3)]
mylib.osvrClientGetViewerEyePose.restype = c_int8
pose = OSVR_Pose3()
returnvalue = mylib.osvrClientGetViewerPose(disp, c_uint32(viewer), c_uint8(eye), pointer(pose))
checkReturn(returnvalue, 'osvrClientGetViewerEyePose')
return pose
def osvrClientGetViewerEyeViewMatrixd(disp, viewer, eye, flags):
mylib.osvrClientGetViewerEyeViewMatrixd.argtypes = [OSVR_DisplayConfig, c_uint32, c_uint8, c_uint16, POINTER(c_double)]
mylib.osvrClientGetViewerEyeViewMatrixd.restype = c_int8
mat = c_double()
returnvalue = mylib.osvrClientGetViewerEyeViewMatrixd(disp, c_uint32(viewer), c_uint8(eye), c_uint16(flags), pointer(mat))
checkReturn(returnvalue, 'osvrClientGetViewerEyeViewMatrixd')
return mat
def osvrClientGetViewerEyeViewMatrixf(disp, viewer, eye, flags):
mylib.osvrClientGetViewerEyeViewMatrixf.argtypes = [OSVR_DisplayConfig, c_uint32, c_uint8, c_uint16, POINTER(c_float)]
mylib.osvrClientGetViewerEyeViewMatrixf.restype = c_int8
mat = c_float()
returnvalue = mylib.osvrClientGetViewerEyeViewMatrixd(disp, c_uint32(viewer), c_uint8(eye), c_uint16(flags), pointer(mat))
checkReturn(returnvalue, 'osvrClientGetViewerEyeViewMatrixf')
return mat
def osvrClientGetNumSurfacesForViewerEye(disp, viewer, eye):
mylib.osvrClientGetNumSurfacesForViewerEye.argtypes = [OSVR_DisplayConfig, c_uint32, c_uint8, POINTER(c_uint32)]
mylib.osvrClientGetNumSurfacesForViewerEye.restype = c_int8
surfaces = c_uint32()
returnvalue = mylib.osvrClientGetNumSurfacesForViewerEye(disp, c_uint32(viewer), c_uint8(eye), pointer(surfaces))
checkReturn(returnvalue, 'osvrClientGetNumSurfacesForViewerEye')
return surfaces
def osvrClientGetRelativeViewportForViewerEyeSurface(disp, viewer, eye, surface):
mylib.osvrClientGetRelativeViewportForViewerEyeSurface.argtypes = [OSVR_DisplayConfig, c_uint32, c_uint8, c_uint32, POINTER(c_int32), POINTER(c_int32), POINTER(c_int32), POINTER(c_int32)]
mylib.osvrClientGetRelativeViewportForViewerEyeSurface.restype = c_int8
viewport = OSVR_RelativeViewport()
returnvalue = mylib.osvrClientGetRelativeViewportForViewerEyeSurface(disp, c_uint32(viewer), c_uint8(eye), c_uint32(surface), pointer(viewport.left), pointer(viewport.bottom), pointer(viewport.width), pointer(viewport.height))
checkReturn(returnvalue, 'osvrClientGetRelativeViewportForViewerEyeSurface')
return viewport
def osvrClientGetViewerEyeSurfaceDisplayInputIndex(disp, viewer, eye, surface):
mylib.osvrClientGetViewerEyeSurfaceDisplayInputIndex.argtypes = [OSVR_DisplayConfig, c_uint32, c_uint8, c_uint32, POINTER(c_uint8)]
mylib.osvrClientGetViewerEyeSurfaceDisplayInputIndex.restype = c_int8
displayInput = c_uint8()
returnvalue = osvrClientGetViewerEyeSurfaceDisplayInputIndex(disp, c_uint32(viewer), c_uint8(eye), c_uint32(surface), pointer(displayInput))
checkReturn(returnvalue, 'osvrClientGetRelativeViewportEyeSurfaceDisplayInputIndex')
return displayInput
def osvrClientGetViewerEyeSurfaceProjectionMatrixd(disp, viewer, eye, surface, near, far, flags):
mylib.osvrClientGetViewerEyeSurfaceProjectionMatrixd.argtypes = [OSVR_DisplayConfig, c_uint32, c_uint8, c_uint32, c_double, c_double, c_uint16, POINTER(c_double)]
mylib.osvrClientGetViewerEyeSurfaceProjectionMatrixd.restype = c_int8
matrix = c_double()
returnvalue = mylib.osvrClientGetViewerEyeSurfaceProjectionMatrixd(disp, c_uint32(viewer), c_uint8(eye), c_uint32(surface), c_double(near), c_double(far), c_uint16(flags), pointer(matrix))
checkReturn(returnvalue, 'osvrClientGetViewerEyeSurfaceProjectionMatrixd')
return matrix
def osvrClientGetViewerEyeSurfaceProjectionMatrixf(disp, viewer, eye, surface, near, far, flags):
mylib.osvrClientGetViewerEyeSurfaceProjectionMatrixf.argtypes = [OSVR_DisplayConfig, c_uint32, c_uint8, c_uint32, c_double, c_double, c_uint16, POINTER(c_float)]
mylib.osvrClientGetViewerEyeSurfaceProjectionMatrixf.restype = c_int8
matrix = c_float()
returnvalue = mylib.osvrClientGetViewerEyeSurfaceProjectionMatrixf(disp, c_uint32(viewer), c_uint8(eye), c_uint32(surface), c_double(near), c_double(far), c_uint16(flags), pointer(matrix))
checkReturn(returnvalue, 'osvrClientGetViewerEyeSurfaceProjectionMatrixf')
return matrix
def osvrClientGetViewerEyeSurfaceProjectionClippingPlanes(disp, viewer, eye, surface):
mylib.osvrClientGetViewerEyeSurfaceProjectionClippingPlanes.argtypes = [OSVR_DisplayConfig, c_uint32, c_uint32, POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double)]
mylib.osvrClientGetViewerEyeSurfaceProjectionClippingPlanes.restype = c_int8
planes = OSVR_ClippingPlanes()
returnvalue = mylib.osvrClientGetViewerEyeSurfaceProjectionClippingPlanes(disp, c_uint32(viewer), c_uint8(eye), c_uint32(surface), pointer(planes.left), pointer(planes.right), pointer(planes.bottom), pointer(planes.top))
checkReturn(returnvalue, 'osvrClientGetViewerEyeSurfaceProjectionClippingPlanes')
return planes
def osvrClientDoesViewerEyeSurfaceWantDistortion(disp, viewer, eye, surface):
mylib.osvrClientDoesViewerEyeSurfaceWantDistortion.argtypes = [OSVR_DisplayConfig, c_uint32, c_uint32, POINTER(c_uint8)]
mylib.osvrClientDoesViewerEyeSurfaceWantDistortion.restype = c_int8
request = c_uint8()
returnvalue = mylib.osvrClientDoesViewerEyeSurfaceWantDistortion(disp, c_uint32(viewer), c_uint8(eye), c_uint32(surface), pointer(request))
checkReturn(returnvalue, 'osvrClientDoesViewerEyeSurfaceWantDistortion')
return request
def osvrClientGetViewerEyeSurfaceRadialDistortionPriority(disp, viewer, eye, surface):
mylib.osvrClientGetViewerEyeSurfaceRadialDistortionPriority.argtypes = [OSVR_DisplayConfig, c_uint32, c_uint32, POINTER(c_int32)]
mylib.osvrClientGetViewerEyeSurfaceRadialDistortionPriority.restype = c_int8
priority = c_int32()
returnvalue = mylib.osvrClientGetViewerEyeSurfaceRadialDistortionPriority(disp, c_uint32(viewer), c_uint8(eye), c_uint32(surface), pointer(priority))
checkReturn(returnvalue, 'osvrClientGetViewerEyeSurfaceRadialDistortionPriority')
return priority
def osvrClientGetViewerEyeSurfaceRadialDistortion(disp, viewer, eye, surface):
mylib.osvrClientGetViewerEyeSurfaceRadialDistortion.argtypes = [OSVR_DisplayConfig, c_uint32, c_uint8, c_uint32, POINTER(OSVR_RadialDistortionParameters)]
mylib.osvrClientGetViewerEyeSurfaceRadialDistortion.restype = c_int8
params = OSVR_RadialDistortionParameters()
returnvalue = mylib.osvrClientGetViewerEyeSurfaceRadialDistortion(disp, c_uint32(viewer), c_uint8(eye), c_uint32(surface), pointer(params))
checkReturn(returnvalue, 'osvrClientGetViewerEyeSurfaceRadialDistortion')
return params
# ImagingC.h functions
# These don't seem to be included in the doxygen docs
def osvrClientFreeImage(ctx, buf):
mylib.osvrClientFreeImage.argtypes = [OSVR_ClientContext, POINTER(c_ubyte)]
mylib.osvrClientFreeImage.restype = c_int8
returnvalue = mylib.osvrClientFreeImage(ctx, buf)
checkReturn(returnvalue, 'osvrClientFreeImage')
return
# InterfaceC.h functions
def osvrClientGetInterface(ctx, path):
mylib.osvrClientGetInterface.argtypes = [OSVR_ClientContext, c_char_p, POINTER(OSVR_ClientInterface)]
mylib.osvrClientGetInterface.restype = c_int8
interface = pointer(OSVR_ClientInterfaceObject())
returnvalue = mylib.osvrClientGetInterface(ctx, c_char_p(path.encode("utf8")), pointer(interface))
checkReturn(returnvalue, 'osvrClientGetInterface')
return interface
def osvrClientFreeInterface(ctx, iface):
mylib.osvrClientFreeInterface.argtypes = [OSVR_ClientContext, OSVR_ClientInterface]
mylib.osvrClientFreeInterface.restype = c_int8
returnvalue = mylib.osvrClientFreeInterface(ctx, iface)
checkReturn(returnvalue, 'osvrClientFreeInterface')
return
# InterfaceCallbackC.h functions
def osvrRegisterPoseCallback(iface, cb, userdata):
mylib.osvrRegisterPoseCallback.argtypes = [OSVR_ClientInterface, OSVR_PoseCallback, c_void_p]
mylib.osvrRegisterPoseCallback.restype = c_int8
returnvalue = mylib.osvrRegisterPoseCallback(iface, cb, c_void_p(userdata))
checkReturn(returnvalue, 'osvrRegisterPoseCallback')
return
def osvrRegisterPositionCallback(iface, cb, userdata):
mylib.osvrRegisterPositionCallback.argtypes = [OSVR_ClientInterface, OSVR_PositionCallback, c_void_p]
mylib.osvrRegisterPositionCallback.restype = c_int8
returnvalue = mylib.osvrRegisterPositionCallback(iface, cb, c_void_p(userdata))
checkReturn(returnvalue, 'osvrRegisterPositionCallback')
return
def osvrRegisterOrientationCallback(iface, cb, userdata):
mylib.osvrRegisterOrientationCallback.argtypes = [OSVR_ClientInterface, OSVR_OrientationCallback, c_void_p]
mylib.osvrRegisterOrientationCallback.restype = c_int8
returnvalue = mylib.osvrRegisterOrientationCallback(iface, cb, c_void_p(userdata))
checkReturn(returnvalue, 'osvrRegisterOrientationCallback')
return
def osvrRegisterButtonCallback(iface, cb, userdata):
mylib.osvrRegisterButtonCallback.argtypes = [OSVR_ClientInterface, OSVR_ButtonCallback, c_void_p]
mylib.osvrRegisterButtonCallback.restype = c_int8
returnvalue = mylib.osvrRegisterButtonCallback(iface, cb, c_void_p(userdata))
checkReturn(returnvalue, 'osvrRegisterButtonCallback')
return
def osvrRegisterAnalogCallback(iface, cb, userdata):
mylib.osvrRegisterAnalogCallback.argtypes = [OSVR_ClientInterface, OSVR_AnalogCallback, c_void_p]
mylib.osvrRegisterAnalogCallback.restype = c_int8
returnvalue = mylib.osvrRegisterAnalogCallback(iface, cb, c_void_p(userdata))
checkReturn(returnvalue, 'osvrRegisterAnalogCallback')
return
#Commented out because ImagingReport is not defined
#def osvrRegisterImagingCallback(iface, cb, userdata):
# mylib.osvrRegisterImagingCallback.argtypes = [OSVR_ClientInterface, OSVR_ImagingCallback, c_void_p]
# mylib.osvrRegisterImagingCallback.restype = c_int8
# returnvalue = mylib.osvrRegisterImagingCallback(iface, cb, c_void_p(userdata))
# checkReturn(returnvalue, 'osvrRegisterImagingCallback')
# return
def osvrRegisterLocation2DCallback(iface, cb, userdata):
mylib.osvrRegisterLocation2DCallback.argtypes = [OSVR_ClientInterface, OSVR_Location2DCallback, c_void_p]
mylib.osvrRegisterLocation2DCallback.restype = c_int8
returnvalue = mylib.osvrRegisterLocation2DCallback(iface, cb, c_void_p(userdata))
checkReturn(returnvalue, 'osvrRegisterLocation2DCallback')
return
def osvrRegisterDirectionCallback(iface, cb, userdata):
mylib.osvrRegisterDirectionCallback.argtypes = [OSVR_ClientInterface, OSVR_DirectionCallback, c_void_p]
mylib.osvrRegisterDirectionCallback.restype = c_int8
returnvalue = mylib.osvrRegisterDirectionCallback(iface, cb, c_void_p(userdata))
checkReturn(returnvalue, 'osvrRegisterDirectionCallback')
return
def osvrRegisterEyeTracker2DCallback(iface, cb, userdata):
mylib.osvrRegisterEyeTracker2DCallback.argtypes = [OSVR_ClientInterface, OSVR_EyeTracker2DCallback, c_void_p]
mylib.osvrRegisterEyeTracker2DCallback.restype = c_int8
returnvalue = mylib.osvrRegisterEyeTracker2DCallback(iface, cb, c_void_p(userdata))
checkReturn(returnvalue, 'osvrRegisterEyeTracker2DCallback')
return
def osvrRegisterEyeTracker3DCallback(iface, cb, userdata):
mylib.osvrRegisterEyeTracker3DCallback.argtypes = [OSVR_ClientInterface, OSVR_EyeTracker3DCallback, c_void_p]
mylib.osvrRegisterEyeTracker3DCallback.restype = c_int8
returnvalue = mylib.osvrRegisterEyeTracker3DCallback(iface, cb, c_void_p(userdata))
checkReturn(returnvalue, 'osvrRegisterEyeTracker3DCallback')
return
def osvrRegisterEyeTrackerBlinkCallback(iface, cb, userdata):
mylib.osvrRegisterEyeTrackerBlinkCallback.argtypes = [OSVR_ClientInterface, OSVR_EyeTrackerBlinkCallback, c_void_p]
mylib.osvrRegisterEyeTrackerBlinkCallback.restype = c_int8
returnvalue = mylib.osvrRegisterEyeTrackerBlinkCallback(iface, cb, c_void_p(userdata))
checkReturn(returnvalue, 'osvrRegisterEyeTrackerBlinkCallback')
return
def osvrRegisterNaviVelocityCallback(iface, cb, userdata):
mylib.osvrRegisterNaviVelocityCallback.argtypes = [OSVR_ClientInterface, OSVR_NaviVelocityCallback, c_void_p]
mylib.osvrRegisterNaviVelocityCallback.restype = c_int8
returnvalue = mylib.osvrRegisterNaviVelocityCallback(iface, cb, c_void_p(userdata))
checkReturn(returnvalue, 'osvrRegisterNaviVelocityCallback')
return
def osvrRegisterNaviPositionCallback(iface, cb, userdata):
mylib.osvrRegisterNaviPositionCallback.argtypes = [OSVR_ClientInterface, OSVR_NaviPositionCallback, c_void_p]
mylib.osvrRegisterNaviPositionCallback.restype = c_int8
returnvalue = mylib.osvrRegisterNaviPositionCallback(iface, cb, c_void_p(userdata))
checkReturn(returnvalue, 'osvrRegisterNaviPositionCallback')
return
# InterfaceStateC.h functions
def osvrGetPoseState(iface):
mylib.osvrGetPoseState.argtypes = [OSVR_ClientInterface, POINTER(OSVR_TimeValue), POINTER(OSVR_Pose3)]
mylib.osvrGetPoseState.restype = c_int8
timestamp = OSVR_TimeValue()
state = OSVR_Pose3()
returnvalue = mylib.osvrGetPoseState(iface, pointer(timestamp), pointer(state))
checkReturn(returnvalue, 'osvrGetPoseState')
return (state, timestamp)
def osvrGetPositionState(iface):
mylib.osvrGetPositionState.argtypes = [OSVR_ClientInterface, POINTER(OSVR_TimeValue), POINTER(OSVR_Vec3)]
mylib.osvrGetPositionState.restype = c_int8
timestamp = OSVR_TimeValue()
state = OSVR_Vec3()
returnvalue = mylib.osvrGetPositionState(iface, pointer(timestamp), pointer(state))
checkReturn(returnvalue, 'osvrGetPositionState')
return (state, timestamp)
def osvrGetOrientationState(iface):
mylib.osvrGetOrientationState.argtypes = [OSVR_ClientInterface, POINTER(OSVR_TimeValue), POINTER(OSVR_Quaternion)]
mylib.osvrGetOrientationState.restype = c_int8
timestamp = OSVR_TimeValue()
state = OSVR_Quaternion()
returnvalue = mylib.osvrGetOrientationState(iface, pointer(timestamp), pointer(state))
checkReturn(returnvalue, 'osvrGetOrientationState')
return (state, timestamp)
def osvrGetButtonState(iface):
mylib.osvrGetButtonState.argtypes = [OSVR_ClientInterface, POINTER(OSVR_TimeValue), POINTER(c_uint8)]
mylib.osvrGetButtonState.restype = c_int8
timestamp = OSVR_TimeValue()
state = c_uint8()
returnvalue = mylib.osvrGetButtonState(iface, pointer(timestamp), pointer(state))
checkReturn(returnvalue, 'osvrGetButtonState')
return (state, timestamp)
def osvrGetAnalogState(iface):
mylib.osvrGetAnalogState.argtypes = [OSVR_ClientInterface, POINTER(OSVR_TimeValue), POINTER(c_double)]
mylib.osvrGetAnalogState.restype = c_int8
timestamp = OSVR_TimeValue()
state = c_double()
returnvalue = mylib.osvrGetAnalogState(iface, pointer(timestamp), pointer(state))
checkReturn(returnvalue, 'osvrGetAnalogState')
return (state, timestamp)
def osvrGetLocation2DState(iface):
mylib.osvrGetLocation2DState.argtypes = [OSVR_ClientInterface, POINTER(OSVR_TimeValue), POINTER(OSVR_Vec2)]
mylib.osvrGetLocation2DState.restype = c_int8
timestamp = OSVR_TimeValue()
state = OSVR_Vec2()
returnvalue = mylib.osvrGetLocation2DState(iface, pointer(timestamp), pointer(state))
checkReturn(returnvalue, 'osvrGetLocation2DState')
return (state, timestamp)
def osvrGetDirectionState(iface):
mylib.osvrGetDirectionState.argtypes = [OSVR_ClientInterface, POINTER(OSVR_TimeValue), POINTER(OSVR_Vec3)]
mylib.osvrGetDirectionState.restype = c_int8
timestamp = OSVR_TimeValue()
state =OSVR_Vec3()
returnvalue = mylib.osvrGetDirectionState(iface, pointer(timestamp), pointer(state))
checkReturn(returnvalue, 'osvrGetDirectionState')
return (state, timestamp)
def osvrGetEyeTracker2DState(iface):
mylib.osvrGetEyeTracker2DState.argtypes = [OSVR_ClientInterface, POINTER(OSVR_TimeValue), POINTER(OSVR_Vec2)]
mylib.osvrGetEyeTracker2DState.restype = c_int8
timestamp = OSVR_TimeValue()
state = OSVR_Vec2()
returnvalue = mylib.osvrGetEyeTracker2DState(iface, pointer(timestamp), pointer(state))
checkReturn(returnvalue, 'osvrGetEyeTracker2DState')
return (state, timestamp)
def osvrGetEyeTracker3DState(iface):
mylib.osvrGetEyeTracker3DState.argtypes = [OSVR_ClientInterface, POINTER(OSVR_TimeValue), POINTER(OSVR_EyeTracker3DState)]
mylib.osvrGetEyeTracker3DState.restype = c_int8
timestamp = OSVR_TimeValue()
state = OSVR_EyeTracker3DState()
returnvalue = mylib.osvrGetEyeTracker3DState(iface, pointer(timestamp), pointer(state))
checkReturn(returnvalue, 'osvrGetEyeTracker3DState')
return (state, timestamp)
def osvrGetEyeTrackerBlinkState(iface):
mylib.osvrGetEyeTrackerBlinkState.argtypes = [OSVR_ClientInterface, POINTER(OSVR_TimeValue), POINTER(c_uint8)]
mylib.osvrGetEyeTrackerBlinkState.restype = c_int8
timestamp = OSVR_TimeValue()
state = c_uint8()
returnvalue = mylib.osvrGetEyeTrackerBlinkState(iface, pointer(timestamp), pointer(state))
checkReturn(returnvalue, 'osvrGetEyeTrackerBlinkState')
return (state, timestamp)
def osvrGetNaviVelocityState(iface):
mylib.osvrGetNaviVelocityState.argtypes = [OSVR_ClientInterface, POINTER(OSVR_TimeValue), POINTER(OSVR_Vec2)]
mylib.osvrGetNaviVelocityState.restype = c_int8
timestamp = OSVR_TimeValue()
state = OSVR_Vec2()
returnvalue = mylib.osvrGetNaviVelocityState(iface, pointer(timestamp), pointer(state))
checkReturn(returnvalue, 'osvrGetNaviVelocityState')
return (state, timestamp)
def osvrGetNaviPositionState(iface):
mylib.osvrGetNaviPositionState.argtypes = [OSVR_ClientInterface, POINTER(OSVR_TimeValue), POINTER(OSVR_Vec2)]
mylib.osvrGetNaviPositionState.restype = c_int8
timestamp = OSVR_TimeValue()
state = OSVR_Vec2()
returnvalue = mylib.osvrGetNaviPositionState(iface, pointer(timestamp), pointer(state))
checkReturn(returnvalue, 'osvrGetNaviPositionState')
return (state, timestamp)
# ParametersC.h functions
def osvrClientGetStringParameterLength(ctx, path):
mylib.osvrClientGetStringParameterLength.argtypes = [OSVR_ClientContext, c_char_p, POINTER(c_size_t)]
mylib.osvrClientGetStringParameterLength.restype = c_int8
length = c_size_t()
returnvalue = mylib.osvrClientGetStringParameterLength(ctx, c_char_p(path.encode("utf8")), pointer(length))
checkReturn(returnvalue, 'osvrClientGetStringParameterLength')
return length
def osvrClientGetStringParameter(ctx, path, len):
mylib.osvrClientGetStringParameter.argtypes = [OSVR_ClientContext, POINTER(c_char), c_char_p, c_size_t]
mylib.osvrClientGetStringParameter.restype = c_int8
buf = create_string_buffer(len.value)
returnvalue = mylib.osvrClientGetStringParameter(ctx, c_char_p(path.encode("utf8")), buf, c_size_t(len.value))
checkReturn(returnvalue, 'osvrClientGetStringParameter')
return buf.value.decode("utf8")
| |
from geo import *
INVALID = 0
CHECK = 1
CHECK_MATE = 2
PROMOTION = 3
TAKE = 4
DRAW = 5
VALID = 6
ENPASANT = 7
KING_CASTLE = 8
QUEEN_CASTLE = 9
WHITE_WIN = 10
BLACK_WIN = 11
WHITE = 12
BLACK = 13
class Piece(object):
def __init__(self, board, color, coord):
self.board = board
self.color = color
self.coord = coord
self.count = 0
x, y = coord
self.board[x][y] = self
def pieceReach(self):
pass
def __str__(self):
pass
def isOppositeColor(self, square):
x, y = square
other = self.board[x][y]
return other.color != self.color
def isValidSquare(self, square):
x, y = square
item = self.board[x][y]
if item:
if not self.isOppositeColor(square):
return False
return True
def oppositeColor(self):
if self.color == WHITE:
return BLACK
else:
return WHITE
def oppositeSide(self, square):
pass
def attackReach(self):
pass
def isValidMove(self, square):
return INVALID
pass
class Bishop(Piece):
def __init__(self, board, color, coord):
Piece.__init__(self, board, color, coord)
self.type = 'bishop'
def pieceReach(self):
for ind in isOnRight(self.coord):
if self.isValidMove(ind):
yield(ind)
for ind in isOnLeft(self.coord):
if self.isValidMove(ind):
yield(ind)
def isValidMove(self, square):
if not self.isValidSquare(square):
return INVALID
if isObtuse(self.coord, square):
if self.isBlockedOnLeft(self.coord, square):
return INVALID
return VALID
if isAcute(self.coord, square):
if self.isBlockedOnRight(self.coord, square):
return INVALID
return VALID
return INVALID
def isBlockedOnLeft(self, square1, square2):
for x, y in goOnLeft(square1, square2):
if self.board[x][y]:
return True
return False
def isBlockedOnRight(self, square1, square2):
for x, y in goOnRight(square1, square2):
if self.board[x][y]:
return True
return False
class Knight(Piece):
def __init__(self, board, color, coord):
Piece.__init__(self, board, color, coord)
self.type = 'knight'
def pieceReach(self):
for ind in vertice(self.coord):
if self.isValidMove(ind):
yield(ind)
def isValidMove(self, square):
if not self.isValidSquare(square):
return INVALID
for ind in vertice(self.coord):
if ind == square:
return VALID
return INVALID
class Rook(Piece):
def __init__(self, board, color, coord):
Piece.__init__(self, board, color, coord)
self.type = 'rook'
def pieceReach(self):
for ind in isOnVertical(self.coord):
if self.isValidMove(ind):
yield(ind)
for ind in isOnHorizontal(self.coord):
if self.isValidMove(ind):
yield(ind)
def isValidMove(self, square):
if not self.isValidSquare(square):
return INVALID
if isVertical(self.coord, square):
if self.isBlockedOnVertical(self.coord, square):
return INVALID
return VALID
if isHorizontal(self.coord, square):
if self.isBlockedOnHorizontal(self.coord, square):
return INVALID
return VALID
return INVALID
def isBlockedOnHorizontal(self, square1, square2):
for x, y in goOnHorizontal(square1, square2):
if self.board[x][y]:
return True
return False
def isBlockedOnVertical(self, square1, square2):
for x, y in goOnVertical(square1, square2):
if self.board[x][y]:
return True
return False
class Queen(Bishop, Rook):
def __init__(self, board, color, coord):
Piece.__init__(self, board, color, coord)
self.type = 'queen'
def pieceReach(self):
for ind in Bishop.pieceReach(self):
yield(ind)
for ind in Rook.pieceReach(self):
yield(ind)
def isValidMove(self, square):
if not self.isValidSquare(square):
return INVALID
alpha = Bishop.isValidMove(self, square)
beta = Rook.isValidMove(self, square)
if alpha or beta:
return VALID
return INVALID
class Pawn(Rook):
def __init__(self, board, color, coord):
Piece.__init__(self, board, color, coord)
self.type = 'pawn'
def oppositeSide(self):
if self.color == WHITE:
return -1
else:
return 1
def pieceReach(self):
side = self.oppositeSide()
for ind in arrow(side, self.coord):
code = self.isValidMove(ind)
if code:
yield(ind)
def firstCaseEnPasant(self, square):
x1, y1 = self.coord
x2, y2 = square
side = self.oppositeSide()
if y2 == y1 - 1 and x2 == x1 + side:
left = self.board[x1][y1 - 1]
if not left:
return False
if not isinstance(left, Pawn):
return False
if left.isOppositeColor(self.coord):
if left.count == 1:
return True
return False
def secondCaseEnPasant(self, square):
x1, y1 = self.coord
x2, y2 = square
side = self.oppositeSide()
if y2 == y1 + 1 and x2 == x1 + side:
right = self.board[x1][y1 + 1]
if not right:
return False
if not isinstance(right, Pawn):
return False
if right.isOppositeColor(self.coord):
if right.count == 1:
return True
return False
def isEnPasant(self, square):
alpha = self.firstCaseEnPasant(square)
beta = self.secondCaseEnPasant(square)
if alpha or beta:
last = self.board.lastMove()
m, n = last
xn, yn = n
x1, y1 = self.coord
x2, y2 = square
if (xn, yn) == (x1, y2):
return True
return False
def isTake(self, square):
x1, y1 = self.coord
x2, y2 = square
side = self.oppositeSide()
if x2 != x1 + side:
return False
if y2 != y1 - 1 and y2 != y1 + 1:
return False
if not self.board[x2][y2]:
return False
return True
def isNormalMove(self, square):
x1, y1 = self.coord
x2, y2 = square
if self.board[x2][y2]:
return False
side = self.oppositeSide()
if y1 != y2:
return False
if x2 == x1 + side:
return True
if x2 == x1 + 2 * side:
if not self.count:
return True
return False
def isPromotion(self, square):
if not self.isNormalMove(square) and not self.isTake(square):
return False
x2, y2 = square
if x2 == 0 or x2 == 7:
return True
return False
def isValidMove(self, square):
if not self.isValidSquare(square):
return INVALID
if self.isPromotion(square):
return PROMOTION
elif self.isNormalMove(square):
return VALID
elif self.isTake(square):
return VALID
elif self.isEnPasant(square):
return ENPASANT
return INVALID
class King(Piece):
def __init__(self, board, color, coord):
Piece.__init__(self, board, color, coord)
self.type = 'king'
def isFreeKingSide(self):
x1, y1 = self.coord
if self.board[x1][y1 + 1]:
return False
if self.board[x1][y1 + 2]:
return False
return True
def isFreeQueenSide(self):
x1, y1 = self.coord
if self.board[x1][y1 - 1]:
return False
if self.board[x1][y1 - 2]:
return False
if self.board[x1][y1 - 3]:
return False
return True
def isThreatenedKingSide(self, square):
x1, y1 = self.coord
m = (x1, y1 + 1)
n = (x1, y1 + 2)
color = self.oppositeColor()
for indi in self.board.matter(color):
for indj in indi.pieceReach():
if indj == self.coord:
return True
if indj == m:
return True
if indj == n:
return True
return False
def isThreatenedQueenSide(self, square):
x1, y1 = self.coord
m = (x1, y1 - 1)
n = (x1, y1 - 2)
e = (x1, y1 - 3)
color = self.oppositeColor()
for indi in self.board.matter(color):
for indj in indi.pieceReach():
if indj == self.coord:
return True
if indj == m:
return True
if indj == n:
return True
if indj == e:
return True
return False
def isKingCastle(self, square):
if self.count:
return False
if not self.isFreeKingSide():
return False
if self.isThreatenedKingSide(square):
return False
if square != (7, 6) and square != (0, 6):
return False
x, y = square
item = self.board[x][y + 1]
if not item:
return False
if not isinstance(item, Rook):
return False
if item.color != self.color:
return False
if item.count:
return False
return True
def isQueenCastle(self, square):
if self.count:
return False
if not self.isFreeQueenSide():
return False
if self.isThreatenedQueenSide(square):
return False
if square != (0, 2) and square != (7, 2):
return False
x, y = square
item = self.board[x][y - 2]
if not item:
return False
if not isinstance(item, Rook):
return False
if item.color != self.color:
return False
if item.count:
return False
return True
def isNormalMove(self, square):
if not self.isValidSquare(square):
return False
for ind in edge(self.coord):
if ind == square:
return True
return False
def pieceReach(self):
""" Returns all the surrounding squares differing from color """
for x, y in edge(self.coord):
""" I can't use self.isValidMove here otherwise
we ould have a infinite recursion since i'm using pieceReach in
King.isValidMove
"""
if not self.board[x][y]:
yield((x, y))
continue
if self.isOppositeColor((x, y)):
yield((x,y))
def isValidMove(self, square):
if self.isNormalMove(square):
return VALID
elif self.isKingCastle(square):
return KING_CASTLE
elif self.isQueenCastle(square):
return QUEEN_CASTLE
return INVALID
| |
import numpy as np
import ray.experimental.array.remote as ra
import ray
from . import core
__all__ = ["tsqr", "modified_lu", "tsqr_hr", "qr"]
@ray.remote(num_returns=2)
def tsqr(a):
"""Perform a QR decomposition of a tall-skinny matrix.
Args:
a: A distributed matrix with shape MxN (suppose K = min(M, N)).
Returns:
A tuple of q (a DistArray) and r (a numpy array) satisfying the
following.
- If q_full = ray.get(DistArray, q).assemble(), then
q_full.shape == (M, K).
- np.allclose(np.dot(q_full.T, q_full), np.eye(K)) == True.
- If r_val = ray.get(np.ndarray, r), then r_val.shape == (K, N).
- np.allclose(r, np.triu(r)) == True.
"""
if len(a.shape) != 2:
raise Exception("tsqr requires len(a.shape) == 2, but a.shape is "
"{}".format(a.shape))
if a.num_blocks[1] != 1:
raise Exception("tsqr requires a.num_blocks[1] == 1, but a.num_blocks "
"is {}".format(a.num_blocks))
num_blocks = a.num_blocks[0]
K = int(np.ceil(np.log2(num_blocks))) + 1
q_tree = np.empty((num_blocks, K), dtype=object)
current_rs = []
for i in range(num_blocks):
block = a.object_refs[i, 0]
q, r = ra.linalg.qr.remote(block)
q_tree[i, 0] = q
current_rs.append(r)
for j in range(1, K):
new_rs = []
for i in range(int(np.ceil(1.0 * len(current_rs) / 2))):
stacked_rs = ra.vstack.remote(*current_rs[(2 * i):(2 * i + 2)])
q, r = ra.linalg.qr.remote(stacked_rs)
q_tree[i, j] = q
new_rs.append(r)
current_rs = new_rs
assert len(current_rs) == 1, "len(current_rs) = " + str(len(current_rs))
# handle the special case in which the whole DistArray "a" fits in one
# block and has fewer rows than columns, this is a bit ugly so think about
# how to remove it
if a.shape[0] >= a.shape[1]:
q_shape = a.shape
else:
q_shape = [a.shape[0], a.shape[0]]
q_num_blocks = core.DistArray.compute_num_blocks(q_shape)
q_object_refs = np.empty(q_num_blocks, dtype=object)
q_result = core.DistArray(q_shape, q_object_refs)
# reconstruct output
for i in range(num_blocks):
q_block_current = q_tree[i, 0]
ith_index = i
for j in range(1, K):
if np.mod(ith_index, 2) == 0:
lower = [0, 0]
upper = [a.shape[1], core.BLOCK_SIZE]
else:
lower = [a.shape[1], 0]
upper = [2 * a.shape[1], core.BLOCK_SIZE]
ith_index //= 2
q_block_current = ra.dot.remote(
q_block_current,
ra.subarray.remote(q_tree[ith_index, j], lower, upper))
q_result.object_refs[i] = q_block_current
r = current_rs[0]
return q_result, ray.get(r)
# TODO(rkn): This is unoptimized, we really want a block version of this.
# This is Algorithm 5 from
# http://www.eecs.berkeley.edu/Pubs/TechRpts/2013/EECS-2013-175.pdf.
@ray.remote(num_returns=3)
def modified_lu(q):
"""Perform a modified LU decomposition of a matrix.
This takes a matrix q with orthonormal columns, returns l, u, s such that
q - s = l * u.
Args:
q: A two dimensional orthonormal matrix q.
Returns:
A tuple of a lower triangular matrix l, an upper triangular matrix u,
and a a vector representing a diagonal matrix s such that
q - s = l * u.
"""
q = q.assemble()
m, b = q.shape[0], q.shape[1]
S = np.zeros(b)
q_work = np.copy(q)
for i in range(b):
S[i] = -1 * np.sign(q_work[i, i])
q_work[i, i] -= S[i]
# Scale ith column of L by diagonal element.
q_work[(i + 1):m, i] /= q_work[i, i]
# Perform Schur complement update.
q_work[(i + 1):m, (i + 1):b] -= np.outer(q_work[(i + 1):m, i],
q_work[i, (i + 1):b])
L = np.tril(q_work)
for i in range(b):
L[i, i] = 1
U = np.triu(q_work)[:b, :]
# TODO(rkn): Get rid of the put below.
return ray.get(core.numpy_to_dist.remote(ray.put(L))), U, S
@ray.remote(num_returns=2)
def tsqr_hr_helper1(u, s, y_top_block, b):
y_top = y_top_block[:b, :b]
s_full = np.diag(s)
t = -1 * np.dot(u, np.dot(s_full, np.linalg.inv(y_top).T))
return t, y_top
@ray.remote
def tsqr_hr_helper2(s, r_temp):
s_full = np.diag(s)
return np.dot(s_full, r_temp)
# This is Algorithm 6 from
# http://www.eecs.berkeley.edu/Pubs/TechRpts/2013/EECS-2013-175.pdf.
@ray.remote(num_returns=4)
def tsqr_hr(a):
q, r_temp = tsqr.remote(a)
y, u, s = modified_lu.remote(q)
y_blocked = ray.get(y)
t, y_top = tsqr_hr_helper1.remote(u, s, y_blocked.object_refs[0, 0],
a.shape[1])
r = tsqr_hr_helper2.remote(s, r_temp)
return ray.get(y), ray.get(t), ray.get(y_top), ray.get(r)
@ray.remote
def qr_helper1(a_rc, y_ri, t, W_c):
return a_rc - np.dot(y_ri, np.dot(t.T, W_c))
@ray.remote
def qr_helper2(y_ri, a_rc):
return np.dot(y_ri.T, a_rc)
# This is Algorithm 7 from
# http://www.eecs.berkeley.edu/Pubs/TechRpts/2013/EECS-2013-175.pdf.
@ray.remote(num_returns=2)
def qr(a):
m, n = a.shape[0], a.shape[1]
k = min(m, n)
# we will store our scratch work in a_work
a_work = core.DistArray(a.shape, np.copy(a.object_refs))
result_dtype = np.linalg.qr(ray.get(a.object_refs[0, 0]))[0].dtype.name
# TODO(rkn): It would be preferable not to get this right after creating
# it.
r_res = ray.get(core.zeros.remote([k, n], result_dtype))
# TODO(rkn): It would be preferable not to get this right after creating
# it.
y_res = ray.get(core.zeros.remote([m, k], result_dtype))
Ts = []
# The for loop differs from the paper, which says
# "for i in range(a.num_blocks[1])", but that doesn't seem to make any
# sense when a.num_blocks[1] > a.num_blocks[0].
for i in range(min(a.num_blocks[0], a.num_blocks[1])):
sub_dist_array = core.subblocks.remote(
a_work, list(range(i, a_work.num_blocks[0])), [i])
y, t, _, R = tsqr_hr.remote(sub_dist_array)
y_val = ray.get(y)
for j in range(i, a.num_blocks[0]):
y_res.object_refs[j, i] = y_val.object_refs[j - i, 0]
if a.shape[0] > a.shape[1]:
# in this case, R needs to be square
R_shape = ray.get(ra.shape.remote(R))
eye_temp = ra.eye.remote(
R_shape[1], R_shape[0], dtype_name=result_dtype)
r_res.object_refs[i, i] = ra.dot.remote(eye_temp, R)
else:
r_res.object_refs[i, i] = R
Ts.append(core.numpy_to_dist.remote(t))
for c in range(i + 1, a.num_blocks[1]):
W_rcs = []
for r in range(i, a.num_blocks[0]):
y_ri = y_val.object_refs[r - i, 0]
W_rcs.append(qr_helper2.remote(y_ri, a_work.object_refs[r, c]))
W_c = ra.sum_list.remote(*W_rcs)
for r in range(i, a.num_blocks[0]):
y_ri = y_val.object_refs[r - i, 0]
A_rc = qr_helper1.remote(a_work.object_refs[r, c], y_ri, t,
W_c)
a_work.object_refs[r, c] = A_rc
r_res.object_refs[i, c] = a_work.object_refs[i, c]
# construct q_res from Ys and Ts
q = core.eye.remote(m, k, dtype_name=result_dtype)
for i in range(len(Ts))[::-1]:
y_col_block = core.subblocks.remote(y_res, [], [i])
q = core.subtract.remote(
q,
core.dot.remote(
y_col_block,
core.dot.remote(
Ts[i],
core.dot.remote(core.transpose.remote(y_col_block), q))))
return ray.get(q), r_res
| |
""" Dictionary learning
"""
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD
import time
import sys
import itertools
import warnings
from math import sqrt, floor, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..utils import array2d, check_random_state, gen_even_slices
from ..utils.extmath import randomized_svd
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, precompute=gram,
max_iter=max_iter)
clf.fit(dictionary.T, X.T, Xy=cov, coef_init=init)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
norms_squared = np.sum((X ** 2), axis=1)
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
norms_squared, copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = array2d(dictionary)
X = array2d(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None:
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = max(n_features / 10, 1)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
return _sparse_encode(X, dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init, max_iter=max_iter)
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, n_jobs))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram, cov[:, this_slice], algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in xrange(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print "Adding new random atom"
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
n_atoms=None):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if not n_atoms is None:
n_components = n_atoms
warnings.warn("Parameter n_atoms has been renamed to"
"'n_components' and will be removed in release 0.14.",
DeprecationWarning, stacklevel=2)
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print '[dict_learning]',
for ii in xrange(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print ""
elif verbose:
print "--- Convergence reached after %d iterations" % ii
break
if ii % 5 == 0 and callback is not None:
callback(locals())
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
n_atoms=None, chunk_size=None):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : int,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_atoms is not None:
n_components = n_atoms
warnings.warn("Parameter n_atoms has been renamed to "
"'n_components' and will be removed in release 0.14.",
DeprecationWarning, stacklevel=2)
if chunk_size is not None:
chunk_size = batch_size
warnings.warn("Parameter chunk_size has been renamed to "
"'batch_size' and will be removed in release 0.14.",
DeprecationWarning, stacklevel=2)
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
dictionary = np.ascontiguousarray(dictionary.T)
if verbose == 1:
print '[dict_learning]',
n_batches = floor(float(len(X)) / batch_size)
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
batches = np.array_split(X_train, n_batches)
batches = itertools.cycle(batches)
# The covariance of the dictionary
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
for ii, this_X in itertools.izip(xrange(iter_offset, iter_offset + n_iter),
batches):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_code:
if verbose > 1:
print 'Learning code...',
elif verbose == 1:
print '|',
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print 'done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60)
return code, dictionary.T
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
# XXX : kwargs is not documented
X = array2d(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
`components_` : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : int,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
`components_` : array, [n_components, n_features]
dictionary atoms extracted from the data
`error_` : array
vector of errors at each iteration
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None, n_atoms=None):
if n_atoms is not None:
n_components = n_atoms
warnings.warn("Parameter n_atoms has been renamed to "
"'n_components' and will be removed in release"
" 0.14.", DeprecationWarning, stacklevel=2)
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
self.random_state = check_random_state(self.random_state)
X = array2d(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E = dict_learning(X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=self.random_state)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : int,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
`components_` : array, [n_components, n_features]
components extracted from the data
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None,
n_atoms=None, chunk_size=None):
if n_atoms is not None:
n_components = n_atoms
warnings.warn("Parameter n_atoms has been renamed to"
"'n_components' and will be removed in release"
" 0.14.", DeprecationWarning, stacklevel=2)
if chunk_size is not None:
chunk_size = batch_size
warnings.warn("Parameter chunk_size has been renamed to"
"'batch_size' and will be removed in release"
" 0.14.", DeprecationWarning, stacklevel=2)
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self.random_state = check_random_state(self.random_state)
X = array2d(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U = dict_learning_online(X, n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
dict_init=self.dict_init,
batch_size=self.batch_size,
shuffle=self.shuffle, verbose=self.verbose,
random_state=self.random_state)
self.components_ = U
return self
def partial_fit(self, X, y=None, iter_offset=0):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self.random_state = check_random_state(self.random_state)
X = array2d(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
U = dict_learning_online(X, self.n_components, self.alpha,
n_iter=self.n_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset,
random_state=self.random_state)
self.components_ = U
return self
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""cond_v2 and gradient.
This is a version of cond that emits a single If op, as well as the gradient
function for If ops produced by cond_v2. This will eventually replace the
current tf.cond implementation once it reaches feature and performance parity.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import function
from tensorflow.python.framework import function_def_to_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.util import compat
# NOTE(skyewm): TensorFlow uses protected class methods and fields to signify
# that they aren't part of the official public API. These protected members
# often need to be used by implementation code however. Rather than litter the
# code with pylint comments, we ignore protected access violations for
# readability.
# pylint: disable=protected-access
def cond_v2(pred, true_fn, false_fn, name="cond"):
"""Like tf.cond, except emits a single If op."""
with ops.name_scope(name) as scope:
true_graph = function.func_graph_from_py_func(true_fn, [], [],
name="%s_true" % scope)
false_graph = function.func_graph_from_py_func(false_fn, [], [],
name="%s_false" % scope)
_check_same_outputs(true_graph, false_graph)
# Add inputs to true_graph and false_graph to make them match. Note that
# this modifies true_graph and false_graph.
cond_inputs = _make_inputs_match(true_graph, false_graph,
true_graph.extra_inputs,
false_graph.extra_inputs)
# Add all intermediate tensors as function outputs so they're available for
# the gradient computation.
true_intermediates = _get_intermediates(true_graph)
false_intermediates = _get_intermediates(false_graph)
# Save the original number of outputs to return to the caller.
num_cond_outputs = len(true_graph.outputs)
# Make the number/type of new intermediate outputs match.
extra_true_outputs, extra_false_outputs = _pad_params(
true_graph, false_graph, true_intermediates, false_intermediates)
true_graph.outputs.extend(extra_true_outputs)
false_graph.outputs.extend(extra_false_outputs)
# Create the If op.
tensors = gen_functional_ops._if(
pred, cond_inputs, [t.dtype for t in true_graph.outputs],
_create_new_tf_function(true_graph),
_create_new_tf_function(false_graph),
name=scope)
return tensors[:num_cond_outputs]
@ops.RegisterGradient("If")
def _IfGrad(op, *grads): # pylint: disable=invalid-name
"""The gradient of an If op produced by cond_v2."""
true_graph, false_graph = _get_func_graphs(op)
# Create grad functions that compute the gradient of the true/false forward
# graphs. These functions will capture tensors from the forward pass
# functions.
true_grad_graph = _create_grad_func(
true_graph, grads, _get_grad_fn_name(true_graph))
false_grad_graph = _create_grad_func(
false_graph, grads, _get_grad_fn_name(false_graph))
assert ([t.dtype for t in true_grad_graph.outputs] ==
[t.dtype for t in false_grad_graph.outputs])
# Match up the captured grad function inputs with outputs of 'op' and other
# external tensors.
true_grad_inputs = _get_grad_inputs(op, true_graph, true_grad_graph)
false_grad_inputs = _get_grad_inputs(op, false_graph, false_grad_graph)
# Make the inputs to true_grad_graph and false_grad_graph match. Note that
# this modifies true_grad_graph and false_grad_graph.
grad_inputs = _make_inputs_match(true_grad_graph, false_grad_graph,
true_grad_inputs, false_grad_inputs)
# Add all intermediate tensors as function outputs so they're available for
# higher-order gradient computations.
true_grad_intermediates = _get_intermediates(true_grad_graph)
false_grad_intermediates = _get_intermediates(false_grad_graph)
# Save the original number of gradient outputs to return.
num_grad_outputs = len(true_grad_graph.outputs)
# Make the number/type of new intermediate outputs match.
extra_true_grad_outputs, extra_false_grad_outputs = _pad_params(
true_grad_graph, false_grad_graph,
true_grad_intermediates, false_grad_intermediates)
true_grad_graph.outputs.extend(extra_true_grad_outputs)
false_grad_graph.outputs.extend(extra_false_grad_outputs)
# Create the gradient If op.
tensors = gen_functional_ops._if(
op.inputs[0], grad_inputs, [t.dtype for t in true_grad_graph.outputs],
_create_new_tf_function(true_grad_graph),
_create_new_tf_function(false_grad_graph))
# The predicate has no gradient.
return [None] + tensors[:num_grad_outputs]
def _get_func_graphs(if_op):
"""Returns `_FuncGraph`s for the input op branches.
Args:
if_op: The _If Operation.
Returns:
A 2-tuple of the `_FuncGraph`s of the then_branch and else_branch.
"""
def _get_func_graph_for_branch(branch_name):
extra_inputs = if_op.inputs[1:] # First input is pred.
input_shapes = [t.shape for t in extra_inputs]
func_name = if_op.get_attr(branch_name).name
fdef = if_op.graph._get_function(func_name).definition
func_graph = function_def_to_graph.function_def_to_graph(fdef, input_shapes)
func_graph.extra_inputs = extra_inputs
func_graph.extra_args = func_graph.inputs
func_graph._captured = dict(zip(extra_inputs, func_graph.inputs))
return func_graph
return (_get_func_graph_for_branch("then_branch"),
_get_func_graph_for_branch("else_branch"))
def _grad_fn(func_graph, grads):
"""The gradient function for each conditional branch.
This function builds the gradient graph of the corresponding forward-pass
conditional branch in `func_graph`. This is done by differentiating
func_graph's outputs w.r.t. its inputs.
Args:
func_graph: function._FuncGraph. The corresponding forward-pass function.
grads: The list of input gradient Tensors.
Returns:
The output gradient Tensors.
"""
# Filter out untrainable function outputs.
# NOTE(skyewm): If we don't do this, the untrainable tensors can sometimes
# cause _GradientsHelper to raise an exception (e.g. the implementation
# doesn't expect 'ys' to contain boolean tensors).
assert len(func_graph.outputs) == len(grads)
ys = []
grad_ys = []
for y, grad_y in zip(func_graph.outputs, grads):
if not gradients_impl._IsTrainable(y):
continue
ys.append(y)
grad_ys.append(grad_y)
# Build the gradient graph. Note that this builds the gradient computation of
# func_graph in the current graph, which requires capturing tensors from
# func_graph. The captured func_graph tensors are resolved to external tensors
# in _get_grad_inputs.
result = gradients_impl._GradientsHelper(
ys, func_graph.inputs, grad_ys=grad_ys,
src_graph=func_graph)
# Functions can't return None; replace Nones with zero tensors.
# TODO(b/80444525): don't return anything here and make _IfGrad return None if
# both branches have zero gradient.
for i in range(len(result)):
if result[i] is None:
result[i] = array_ops.zeros_like(func_graph.inputs[i])
return result
def _create_grad_func(func_graph, grads, name):
"""Returns the _FuncGraph representation of _grad_fn."""
return function.func_graph_from_py_func(lambda: _grad_fn(func_graph, grads),
[], [], name)
def _get_grad_inputs(if_op, cond_graph, grad_graph):
"""Returns the tensors we should pass to grad_graph.
This method handles tensors captured from cond_graph in grad_graph. It
converts these to suitable input tensors from the outer graph.
Args:
if_op: Operation. The forward-pass If op that uses cond_graph.
cond_graph: function._FuncGraph. The forward-pass function.
grad_graph: function._FuncGraph. The gradients function.
Returns:
A list of inputs tensors to be passed to grad_graph.
"""
inputs = []
# Maps placeholders in cond_graph -> input tensor in outer graph.
forward_input_map = {v: k for k, v in cond_graph._captured.items()}
for t in grad_graph.extra_inputs:
if t.graph == ops.get_default_graph():
# t is in the outer graph (e.g. one of the input gradients).
inputs.append(t)
elif t in forward_input_map:
# t is an input placeholder in cond_graph. Get the corresponding input
# tensor in the outer graph.
assert t.graph == cond_graph
assert forward_input_map[t].graph == ops.get_default_graph()
inputs.append(forward_input_map[t])
else:
# t is an intermediate value in cond_graph. Get the corresponding output
# of 'if_op' (note that all intermediate values are outputs).
assert t.graph == cond_graph
output_idx = cond_graph.outputs.index(t)
inputs.append(if_op.outputs[output_idx])
return inputs
def _create_new_tf_function(func_graph):
"""Converts func_graph to a TF_Function and adds it to the current graph.
Args:
func_graph: function._FuncGraph
Returns:
The name of the new TF_Function.
"""
c_func = c_api.TF_GraphToFunction_wrapper(
func_graph._c_graph,
compat.as_str(func_graph.name),
False, # append_hash_to_fn_name
None, # opers
[t._as_tf_output() for t in func_graph.inputs],
[t._as_tf_output() for t in func_graph.outputs],
[],
None, # opts
None) # description
_ = c_api_util.ScopedTFFunction(c_func)
# TODO(b/109833212): this sucks, we're serializing the TF_Function*,
# deserializing it into a Python FunctionDef, then reserializing it to create
# a new TF_Function that we add to the graph.
fdef = function.function_def_from_tf_function(c_func)
defined_func = function._from_definition(fdef)
defined_func.add_to_graph(ops.get_default_graph())
return func_graph.name
def _get_intermediates(func_graph):
"""Returns all tensors in `func_graph` that aren't inputs or outputs."""
intermediates = []
for op in func_graph.get_operations():
for t in op.outputs:
if t in func_graph.inputs: continue
if t in func_graph.outputs: continue
intermediates.append(t)
return intermediates
def _separate_unique_inputs(true_inputs, false_inputs):
"""Separates tensors appearing only in true_inputs or false_inputs, or both.
Args:
true_inputs: list of Tensors
false_inputs: list of Tensors
Returns:
Three lists of Tensors:
1. The tensors that appear in both true_inputs and false_inputs
2. The tensors that only appear in true_inputs
3. The tensors that only appear in false_inputs
"""
true_inputs = set(true_inputs)
false_inputs = set(false_inputs)
shared_inputs = true_inputs.intersection(false_inputs)
true_only_inputs = true_inputs - false_inputs
false_only_inputs = false_inputs - true_inputs
return list(shared_inputs), list(true_only_inputs), list(false_only_inputs)
def _pad_params(true_graph, false_graph, true_params, false_params):
"""Returns new param lists that have matching signatures.
This is done by mirroring each param list in the other using dummy params.
There is no merging of params.
Args:
true_graph: function._FuncGraph
false_graph: function._FuncGraph
true_params: a list of Tensors from true_graph
false_params: a list of Tensors from false_graph
Returns:
A new list of Tensors in true_graph and a new list of Tensors in
false_graph. The two lists have the same number of Tensors, with matching
types and shapes across the lists.
"""
new_true_params = (true_params +
_create_dummy_params(true_graph, false_params))
new_false_inputs = (_create_dummy_params(false_graph, true_params)
+ false_params)
return new_true_params, new_false_inputs
def _make_inputs_match(true_graph, false_graph, true_inputs, false_inputs):
"""Modifies true_graph and false_graph so they have the same input signature.
This method reorders and/or adds parameters to true_graph and false_graph so
they have the same input signature, and updates the 'inputs', 'extra_inputs',
and '_captured' fields of both graphs accordingly. It uses the input tensors
from the outer graph to avoid duplicating shared arguments.
Args:
true_graph: function._FuncGraph
false_graph: function._FuncGraph
true_inputs: a list of Tensors in the outer graph. The inputs for
true_graph.
false_inputs: a list of Tensors in the outer graph. The inputs for
false_graph.
Returns:
A new list of Tensors from the outer graph that are the new inputs for both
true_graph and false_graph. This is a deduped version of true_inputs +
false_inputs.
"""
shared_inputs, true_only_inputs, false_only_inputs = _separate_unique_inputs(
true_inputs, false_inputs)
new_inputs = shared_inputs + true_only_inputs + false_only_inputs
true_input_to_param = dict(zip(true_inputs, true_graph.inputs))
false_input_to_param = dict(zip(false_inputs, false_graph.inputs))
true_graph.inputs = (
[true_input_to_param[t] for t in shared_inputs] +
[true_input_to_param[t] for t in true_only_inputs] +
_create_dummy_params(true_graph, false_only_inputs))
false_graph.inputs = (
[false_input_to_param[t] for t in shared_inputs] +
_create_dummy_params(false_graph, true_only_inputs) +
[false_input_to_param[t] for t in false_only_inputs])
# Rewrite the _FuncGraphs' state to reflect the new inputs.
true_graph.extra_inputs = new_inputs
false_graph.extra_inputs = new_inputs
true_graph._captured = dict(zip(new_inputs, true_graph.inputs))
false_graph._captured = dict(zip(new_inputs, false_graph.inputs))
return new_inputs
def _create_dummy_params(func_graph, template_tensors):
"""Creates tensors in func_graph to represent template_tensors.
Args:
func_graph: function._FuncGraph.
template_tensors: a list of tensors in the outer graph.
Returns:
A list of tensors in func_graph.
"""
with func_graph.as_default():
return [gen_functional_ops.fake_param(dtype=t.dtype, shape=t.shape)
for t in template_tensors]
def _get_grad_fn_name(func_graph):
"""Returns a unique name to use for the grad function of `func_graph`."""
name = "%s_grad" % func_graph.name
base_name = name
counter = 1
if ops.get_default_graph()._is_function(name):
name = "%s_%s" % (base_name, counter)
counter += 1
return name
def _check_same_outputs(true_graph, false_graph):
"""Raises an error if true_graph and false_graph have different outputs."""
true_output_types = [t.dtype for t in true_graph.outputs]
false_output_types = [t.dtype for t in false_graph.outputs]
if (len(true_graph.outputs) != len(false_graph.outputs) or
true_output_types != false_output_types):
raise ValueError(
"true_fn() and false_fn() must return the same number and type of "
"arguments, got:\n"
" true_fn: %s\n"
" false_fn: %s" % (true_output_types, false_output_types))
| |
from datetime import date, datetime, timedelta
import json
from django.core.cache import cache
from nose.tools import eq_
from kitsune.customercare.tests import reply
from kitsune.kpi.cron import update_contributor_metrics
from kitsune.kpi.models import (
Metric, AOA_CONTRIBUTORS_METRIC_CODE, KB_ENUS_CONTRIBUTORS_METRIC_CODE,
KB_L10N_CONTRIBUTORS_METRIC_CODE, L10N_METRIC_CODE,
SUPPORT_FORUM_CONTRIBUTORS_METRIC_CODE, VISITORS_METRIC_CODE,
EXIT_SURVEY_YES_CODE, EXIT_SURVEY_NO_CODE, EXIT_SURVEY_DONT_KNOW_CODE)
from kitsune.kpi.tests import metric, metric_kind
from kitsune.products.tests import product
from kitsune.sumo.helpers import urlparams
from kitsune.sumo.tests import TestCase
from kitsune.sumo.urlresolvers import reverse
from kitsune.questions.tests import answer, answervote, question
from kitsune.users.tests import user
from kitsune.wiki.tests import document, revision, helpful_vote
class KpiApiTests(TestCase):
def _make_elastic_metric_kinds(self):
click_kind = metric_kind(code='search clickthroughs:elastic:clicks',
save=True)
search_kind = metric_kind(code='search clickthroughs:elastic:searches',
save=True)
return click_kind, search_kind
def _make_contributor_metric_kinds(self):
metric_kind(code=AOA_CONTRIBUTORS_METRIC_CODE, save=True)
metric_kind(code=KB_ENUS_CONTRIBUTORS_METRIC_CODE, save=True)
metric_kind(code=KB_L10N_CONTRIBUTORS_METRIC_CODE, save=True)
metric_kind(code=SUPPORT_FORUM_CONTRIBUTORS_METRIC_CODE, save=True)
def _get_api_result(self, name, **kwargs):
"""Helper to make API calls, parse the json and return the result."""
url = reverse(name)
url = urlparams(url, format='json', **kwargs)
response = self.client.get(url)
eq_(200, response.status_code)
return json.loads(response.content)
def test_questions(self):
"""Test questions API call."""
# A question with a solution:
a = answer(save=True)
a.question.solution = a
a.question.save()
# A question with an answer:
answer(save=True)
# A question without answers:
question(save=True)
# A locked question that shouldn't be counted for anything
question(is_locked=True, save=True)
r = self._get_api_result('api.kpi.questions')
eq_(r['objects'][0]['solved'], 1)
eq_(r['objects'][0]['responded_24'], 2)
eq_(r['objects'][0]['responded_72'], 2)
eq_(r['objects'][0]['questions'], 3)
def test_questions_by_locale(self):
"""Test locale filtering of questions API call."""
# An en-US question with a solution:
q = question(locale='en-US', save=True)
a = answer(question=q, save=True)
q.solution = a
q.save()
# An en-US question with an answer:
q = question(locale='en-US', save=True)
answer(question=q, save=True)
# An en-US question without answers:
question(locale='en-US', save=True)
# A pt-BR question without answers:
question(locale='pt-BR', save=True)
# Verify no locale filtering:
r = self._get_api_result('api.kpi.questions')
eq_(r['objects'][0]['solved'], 1)
eq_(r['objects'][0]['responded_24'], 2)
eq_(r['objects'][0]['responded_72'], 2)
eq_(r['objects'][0]['questions'], 4)
# Verify locale=en-US
r = self._get_api_result('api.kpi.questions', locale='en-US')
eq_(r['objects'][0]['solved'], 1)
eq_(r['objects'][0]['responded_24'], 2)
eq_(r['objects'][0]['responded_72'], 2)
eq_(r['objects'][0]['questions'], 3)
# Verify locale=pt-BR
r = self._get_api_result('api.kpi.questions', locale='pt-BR')
eq_(r['objects'][0]['questions'], 1)
assert 'solved' not in r['objects'][0]
assert 'responded_24' not in r['objects'][0]
assert 'responded_72' not in r['objects'][0]
def test_questions_by_product(self):
"""Test product filtering of questions API call."""
firefox_os = product(slug='firefox-os', save=True)
firefox = product(slug='firefox', save=True)
# A Firefox OS question with a solution:
q = question(save=True)
q.products.add(firefox_os)
a = answer(question=q, save=True)
q.solution = a
q.save()
# A Firefox OS question with an answer:
q = question(save=True)
q.products.add(firefox_os)
answer(question=q, save=True)
# A Firefox OS question without answers:
q = question(save=True)
q.products.add(firefox_os)
# A Firefox question without answers:
q = question(locale='pt-BR', save=True)
q.products.add(firefox)
# Verify no product filtering:
r = self._get_api_result('api.kpi.questions')
eq_(r['objects'][0]['solved'], 1)
eq_(r['objects'][0]['responded_24'], 2)
eq_(r['objects'][0]['responded_72'], 2)
eq_(r['objects'][0]['questions'], 4)
# Verify product=firefox-os
r = self._get_api_result('api.kpi.questions', product='firefox-os')
eq_(r['objects'][0]['solved'], 1)
eq_(r['objects'][0]['responded_24'], 2)
eq_(r['objects'][0]['responded_72'], 2)
eq_(r['objects'][0]['questions'], 3)
# Verify product=firefox
r = self._get_api_result('api.kpi.questions', product='firefox')
eq_(r['objects'][0]['questions'], 1)
assert 'solved' not in r['objects'][0]
assert 'responded_24' not in r['objects'][0]
assert 'responded_72' not in r['objects'][0]
def test_questions_inactive_user(self):
"""Verify questions from inactive users aren't counted."""
# Two questions for an inactive user.
# They shouldn't show up in the count.
u = user(is_active=False, save=True)
question(creator=u, save=True)
question(creator=u, save=True)
r = self._get_api_result('api.kpi.questions')
eq_(len(r['objects']), 0)
# Activate the user, now the questions should count.
u.is_active = True
u.save()
cache.clear() # We need to clear the cache for new results.
url = reverse('api.kpi.questions')
response = self.client.get(url + '?format=json')
eq_(200, response.status_code)
r = json.loads(response.content)
eq_(r['objects'][0]['questions'], 2)
def test_vote(self):
"""Test vote API call."""
r = revision(save=True)
helpful_vote(revision=r, save=True)
helpful_vote(revision=r, save=True)
helpful_vote(revision=r, helpful=True, save=True)
a = answer(save=True)
answervote(answer=a, save=True)
answervote(answer=a, helpful=True, save=True)
answervote(answer=a, helpful=True, save=True)
r = self._get_api_result('api.kpi.votes')
eq_(r['objects'][0]['kb_helpful'], 1)
eq_(r['objects'][0]['kb_votes'], 3)
eq_(r['objects'][0]['ans_helpful'], 2)
eq_(r['objects'][0]['ans_votes'], 3)
def test_kb_vote(self):
"""Test vote API call."""
r1 = revision(document=document(locale='en-US', save=True), save=True)
r2 = revision(document=document(locale='es', save=True), save=True)
r3 = revision(document=document(locale='es', save=True), save=True)
for r in [r1, r2, r3]:
helpful_vote(revision=r, save=True)
helpful_vote(revision=r, save=True)
helpful_vote(revision=r, helpful=True, save=True)
# Assign 2 documents to Firefox OS and 1 to Firefox
firefox_os = product(slug='firefox-os', save=True)
firefox = product(slug='firefox', save=True)
r1.document.products.add(firefox_os)
r2.document.products.add(firefox_os)
r3.document.products.add(firefox)
# All votes should be counted if we don't specify a locale
r = self._get_api_result('api.kpi.kb-votes')
eq_(r['objects'][0]['kb_helpful'], 3)
eq_(r['objects'][0]['kb_votes'], 9)
# Only en-US votes:
r = self._get_api_result('api.kpi.kb-votes', locale='en-US')
eq_(r['objects'][0]['kb_helpful'], 1)
eq_(r['objects'][0]['kb_votes'], 3)
# Only es votes:
r = self._get_api_result('api.kpi.kb-votes', locale='es')
eq_(r['objects'][0]['kb_helpful'], 2)
eq_(r['objects'][0]['kb_votes'], 6)
# Only Firefox OS votes:
r = self._get_api_result('api.kpi.kb-votes', product='firefox-os')
eq_(r['objects'][0]['kb_helpful'], 2)
eq_(r['objects'][0]['kb_votes'], 6)
# Only Firefox votes:
r = self._get_api_result('api.kpi.kb-votes', product='firefox')
eq_(r['objects'][0]['kb_helpful'], 1)
eq_(r['objects'][0]['kb_votes'], 3)
# Only Firefox OS + es votes:
r = self._get_api_result(
'api.kpi.kb-votes', product='firefox-os', locale='es')
eq_(r['objects'][0]['kb_helpful'], 1)
eq_(r['objects'][0]['kb_votes'], 3)
def test_active_contributors(self):
"""Test active contributors API call."""
# 2 en-US revisions by 2 contributors:
r1 = revision(creator=user(save=True), save=True)
r2 = revision(creator=user(save=True), save=True)
# A translation with 2 contributors (translator + reviewer):
d = document(parent=r1.document, locale='es', save=True)
revision(document=d, reviewed=datetime.now(),
reviewer=r1.creator, creator=r2.creator, save=True)
# 1 active support forum contributor:
# A user with 10 answers
u1 = user(save=True)
for x in range(10):
answer(save=True, creator=u1)
# A user with 9 answers
u2 = user(save=True)
for x in range(9):
answer(save=True, creator=u2)
# A user with 1 answer
u3 = user(save=True)
answer(save=True, creator=u3)
# An AoA reply (1 contributor):
reply(save=True)
# Create metric kinds and update metrics for tomorrow (today's
# activity shows up tomorrow).
self._make_contributor_metric_kinds()
update_contributor_metrics(day=date.today() + timedelta(days=1))
r = self._get_api_result('api.kpi.contributors')
eq_(r['objects'][0]['en_us'], 2)
eq_(r['objects'][0]['non_en_us'], 2)
eq_(r['objects'][0]['support_forum'], 1)
eq_(r['objects'][0]['aoa'], 1)
def test_asker_replies_arent_a_contribution(self):
"""Verify that replies posted by the question creator aren't counted.
If a user has 10 replies to their own question, they aren't counted as
a contributor.
"""
# A user with 10 answers to own question.
q = question(save=True)
u = q.creator
for x in range(10):
answer(creator=u, question=q, save=True)
# Create metric kinds and update metrics for tomorrow (today's
# activity shows up tomorrow).
self._make_contributor_metric_kinds()
update_contributor_metrics(day=date.today() + timedelta(days=1))
r = self._get_api_result('api.kpi.contributors')
eq_(r['objects'][0]['support_forum'], 0)
# Change the question creator, now we should have 1 contributor.
q.creator = user(save=True)
q.save()
cache.clear() # We need to clear the cache for new results.
Metric.objects.all().delete()
update_contributor_metrics(day=date.today() + timedelta(days=1))
r = self._get_api_result('api.kpi.contributors')
eq_(r['objects'][0]['support_forum'], 1)
def test_elastic_clickthrough_get(self):
"""Test elastic clickthrough read API."""
click_kind, search_kind = self._make_elastic_metric_kinds()
metric(kind=click_kind,
start=date(2000, 1, 1),
value=1,
save=True)
metric(kind=search_kind,
start=date(2000, 1, 1),
value=10,
save=True)
metric(kind=click_kind,
start=date(2000, 1, 9),
value=2,
save=True)
metric(kind=search_kind,
start=date(2000, 1, 9),
value=20,
save=True)
url = reverse('api.kpi.search-ctr')
response = self.client.get(url + '?format=json')
data = json.loads(response.content)
eq_(data['objects'], [
{'clicks': 2, 'searches': 20,
'start': u'2000-01-09'},
{'clicks': 1, 'searches': 10,
'start': u'2000-01-01'}])
# Test filtering by start date:
response = self.client.get(url + '?format=json&min_start=2000-01-09')
data = json.loads(response.content)
eq_(data['objects'], [{u'searches': 20, u'start': u'2000-01-09',
u'clicks': 2}])
def test_visitors(self):
"""Test unique visitors API call."""
# Create the metric.
kind = metric_kind(code=VISITORS_METRIC_CODE, save=True)
metric(kind=kind, start=date.today(), end=date.today(), value=42,
save=True)
# There should be 42 visitors.
r = self._get_api_result('api.kpi.visitors')
eq_(r['objects'][0]['visitors'], 42)
def test_l10n_coverage(self):
"""Test l10n coverage API call."""
# Create the metrics
kind = metric_kind(code=L10N_METRIC_CODE, save=True)
metric(kind=kind, start=date.today(), end=date.today(), value=56,
save=True)
# The l10n coverage should be 56%.
r = self._get_api_result('api.kpi.l10n-coverage')
eq_(r['objects'][0]['coverage'], 56)
def test_exit_survey_results(self):
"""Test the exist survey results API call."""
# Create the metrics
kind = metric_kind(code=EXIT_SURVEY_YES_CODE, save=True)
metric(kind=kind, start=date.today(), end=date.today(), value=1337,
save=True)
kind = metric_kind(code=EXIT_SURVEY_NO_CODE, save=True)
metric(kind=kind, start=date.today(), end=date.today(), value=42,
save=True)
kind = metric_kind(code=EXIT_SURVEY_DONT_KNOW_CODE, save=True)
metric(kind=kind, start=date.today(), end=date.today(), value=777,
save=True)
# Verify the results returned from the API
r = self._get_api_result('api.kpi.exit-survey')
eq_(r['objects'][0]['yes'], 1337)
eq_(r['objects'][0]['no'], 42)
eq_(r['objects'][0]['dont_know'], 777)
| |
#!/usr/bin/env python
from ..debugging import bacpypes_debugging, ModuleLogger
from ..capability import Capability
from ..object import FileObject
from ..apdu import AtomicReadFileACK, AtomicReadFileACKAccessMethodChoice, \
AtomicReadFileACKAccessMethodRecordAccess, \
AtomicReadFileACKAccessMethodStreamAccess, \
AtomicWriteFileACK
from ..errors import ExecutionError, MissingRequiredParameter
# some debugging
_debug = 0
_log = ModuleLogger(globals())
#
# Local Record Access File Object Type
#
class LocalRecordAccessFileObject(FileObject):
def __init__(self, **kwargs):
""" Initialize a record accessed file object. """
if _debug:
LocalRecordAccessFileObject._debug("__init__ %r",
kwargs,
)
# verify the file access method or provide it
if 'fileAccessMethod' in kwargs:
if kwargs['fileAccessMethod'] != 'recordAccess':
raise ValueError("inconsistent file access method")
else:
kwargs['fileAccessMethod'] = 'recordAccess'
# continue with initialization
FileObject.__init__(self, **kwargs)
def __len__(self):
""" Return the number of records. """
raise NotImplementedError("__len__")
def read_record(self, start_record, record_count):
""" Read a number of records starting at a specific record. """
raise NotImplementedError("read_record")
def write_record(self, start_record, record_count, record_data):
""" Write a number of records, starting at a specific record. """
raise NotImplementedError("write_record")
bacpypes_debugging(LocalRecordAccessFileObject)
#
# Local Stream Access File Object Type
#
class LocalStreamAccessFileObject(FileObject):
def __init__(self, **kwargs):
""" Initialize a stream accessed file object. """
if _debug:
LocalStreamAccessFileObject._debug("__init__ %r",
kwargs,
)
# verify the file access method or provide it
if 'fileAccessMethod' in kwargs:
if kwargs['fileAccessMethod'] != 'streamAccess':
raise ValueError("inconsistent file access method")
else:
kwargs['fileAccessMethod'] = 'streamAccess'
# continue with initialization
FileObject.__init__(self, **kwargs)
def __len__(self):
""" Return the number of octets in the file. """
raise NotImplementedError("write_file")
def read_stream(self, start_position, octet_count):
""" Read a chunk of data out of the file. """
raise NotImplementedError("read_stream")
def write_stream(self, start_position, data):
""" Write a number of octets, starting at a specific offset. """
raise NotImplementedError("write_stream")
bacpypes_debugging(LocalStreamAccessFileObject)
#
# File Application Mixin
#
class FileServices(Capability):
def __init__(self):
if _debug: FileServices._debug("__init__")
Capability.__init__(self)
def do_AtomicReadFileRequest(self, apdu):
"""Return one of our records."""
if _debug: FileServices._debug("do_AtomicReadFileRequest %r", apdu)
if (apdu.fileIdentifier[0] != 'file'):
raise ExecutionError('services', 'inconsistentObjectType')
# get the object
obj = self.get_object_id(apdu.fileIdentifier)
if _debug: FileServices._debug(" - object: %r", obj)
if not obj:
raise ExecutionError('object', 'unknownObject')
if apdu.accessMethod.recordAccess:
# check against the object
if obj.fileAccessMethod != 'recordAccess':
raise ExecutionError('services', 'invalidFileAccessMethod')
# simplify
record_access = apdu.accessMethod.recordAccess
# check for required parameters
if record_access.fileStartRecord is None:
raise MissingRequiredParameter("fileStartRecord required")
if record_access.requestedRecordCount is None:
raise MissingRequiredParameter("requestedRecordCount required")
### verify start is valid - double check this (empty files?)
if (record_access.fileStartRecord < 0) or \
(record_access.fileStartRecord >= len(obj)):
raise ExecutionError('services', 'invalidFileStartPosition')
# pass along to the object
end_of_file, record_data = obj.read_record(
record_access.fileStartRecord,
record_access.requestedRecordCount,
)
if _debug: FileServices._debug(" - record_data: %r", record_data)
# this is an ack
resp = AtomicReadFileACK(context=apdu,
endOfFile=end_of_file,
accessMethod=AtomicReadFileACKAccessMethodChoice(
recordAccess=AtomicReadFileACKAccessMethodRecordAccess(
fileStartRecord=record_access.fileStartRecord,
returnedRecordCount=len(record_data),
fileRecordData=record_data,
),
),
)
elif apdu.accessMethod.streamAccess:
# check against the object
if obj.fileAccessMethod != 'streamAccess':
raise ExecutionError('services', 'invalidFileAccessMethod')
# simplify
stream_access = apdu.accessMethod.streamAccess
# check for required parameters
if stream_access.fileStartPosition is None:
raise MissingRequiredParameter("fileStartPosition required")
if stream_access.requestedOctetCount is None:
raise MissingRequiredParameter("requestedOctetCount required")
### verify start is valid - double check this (empty files?)
if (stream_access.fileStartPosition < 0) or \
(stream_access.fileStartPosition >= len(obj)):
raise ExecutionError('services', 'invalidFileStartPosition')
# pass along to the object
end_of_file, record_data = obj.read_stream(
stream_access.fileStartPosition,
stream_access.requestedOctetCount,
)
if _debug: FileServices._debug(" - record_data: %r", record_data)
# this is an ack
resp = AtomicReadFileACK(context=apdu,
endOfFile=end_of_file,
accessMethod=AtomicReadFileACKAccessMethodChoice(
streamAccess=AtomicReadFileACKAccessMethodStreamAccess(
fileStartPosition=stream_access.fileStartPosition,
fileData=record_data,
),
),
)
if _debug: FileServices._debug(" - resp: %r", resp)
# return the result
self.response(resp)
def do_AtomicWriteFileRequest(self, apdu):
"""Return one of our records."""
if _debug: FileServices._debug("do_AtomicWriteFileRequest %r", apdu)
if (apdu.fileIdentifier[0] != 'file'):
raise ExecutionError('services', 'inconsistentObjectType')
# get the object
obj = self.get_object_id(apdu.fileIdentifier)
if _debug: FileServices._debug(" - object: %r", obj)
if not obj:
raise ExecutionError('object', 'unknownObject')
if apdu.accessMethod.recordAccess:
# check against the object
if obj.fileAccessMethod != 'recordAccess':
raise ExecutionError('services', 'invalidFileAccessMethod')
# simplify
record_access = apdu.accessMethod.recordAccess
# check for required parameters
if record_access.fileStartRecord is None:
raise MissingRequiredParameter("fileStartRecord required")
if record_access.recordCount is None:
raise MissingRequiredParameter("recordCount required")
if record_access.fileRecordData is None:
raise MissingRequiredParameter("fileRecordData required")
# check for read-only
if obj.readOnly:
raise ExecutionError('services', 'fileAccessDenied')
# pass along to the object
start_record = obj.write_record(
record_access.fileStartRecord,
record_access.recordCount,
record_access.fileRecordData,
)
if _debug: FileServices._debug(" - start_record: %r", start_record)
# this is an ack
resp = AtomicWriteFileACK(context=apdu,
fileStartRecord=start_record,
)
elif apdu.accessMethod.streamAccess:
# check against the object
if obj.fileAccessMethod != 'streamAccess':
raise ExecutionError('services', 'invalidFileAccessMethod')
# simplify
stream_access = apdu.accessMethod.streamAccess
# check for required parameters
if stream_access.fileStartPosition is None:
raise MissingRequiredParameter("fileStartPosition required")
if stream_access.fileData is None:
raise MissingRequiredParameter("fileData required")
# check for read-only
if obj.readOnly:
raise ExecutionError('services', 'fileAccessDenied')
# pass along to the object
start_position = obj.write_stream(
stream_access.fileStartPosition,
stream_access.fileData,
)
if _debug: FileServices._debug(" - start_position: %r", start_position)
# this is an ack
resp = AtomicWriteFileACK(context=apdu,
fileStartPosition=start_position,
)
if _debug: FileServices._debug(" - resp: %r", resp)
# return the result
self.response(resp)
bacpypes_debugging(FileServices)
#
# FileServicesClient
#
class FileServicesClient(Capability):
def read_record(self, address, fileIdentifier, start_record, record_count):
""" Read a number of records starting at a specific record. """
raise NotImplementedError("read_record")
def write_record(self, address, fileIdentifier, start_record, record_count, record_data):
""" Write a number of records, starting at a specific record. """
raise NotImplementedError("write_record")
def read_stream(self, address, fileIdentifier, start_position, octet_count):
""" Read a chunk of data out of the file. """
raise NotImplementedError("read_stream")
def write_stream(self, address, fileIdentifier, start_position, data):
""" Write a number of octets, starting at a specific offset. """
raise NotImplementedError("write_stream")
| |
# The MIT License (MIT)
#
# Copyright (c) 2021 Samuel Bear Powell
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import numpy as np
from datetime import datetime
_JIT = not os.environ.get('NUMBA_DISABLE_JIT',False)
if __name__ == '__main__':
from argparse import ArgumentParser
import sys
parser = ArgumentParser(prog='sunposition',description='Compute sun position parameters given the time and location')
parser.add_argument('--test',dest='test',action='store_true',help='Run tests')
parser.add_argument('--version',action='version',version='%(prog)s 1.0')
parser.add_argument('--citation',dest='cite',action='store_true',help='Print citation information')
parser.add_argument('-t,--time',dest='t',type=str,default='now',help='"now" or date and time (UTC) in "YYYY-MM-DD hh:mm:ss.ssssss" format or a (UTC) POSIX timestamp')
parser.add_argument('-lat,--latitude',dest='lat',type=float,default=51.48,help='latitude, in decimal degrees, positive for north')
parser.add_argument('-lon,--longitude',dest='lon',type=float,default=0.0,help='longitude, in decimal degrees, positive for east')
parser.add_argument('-e,--elevation',dest='elev',type=float,default=0,help='elevation, in meters')
parser.add_argument('-T,--temperature',dest='temp',type=float,default=14.6,help='temperature, in degrees celcius')
parser.add_argument('-p,--pressure',dest='p',type=float,default=1013.0,help='atmospheric pressure, in millibar')
parser.add_argument('-a,--atmos_refract',dest='a',type=float,default=0.5667,help='atmospheric refraction at sunrise and sunset, in degrees')
parser.add_argument('-dt',type=float,default=0.0,help='difference between earth\'s rotation time (TT) and universal time (UT1)')
parser.add_argument('-r,--radians',dest='rad',action='store_true',help='Output in radians instead of degrees')
parser.add_argument('--csv',dest='csv',action='store_true',help='Comma separated values (time,dt,lat,lon,elev,temp,pressure,az,zen,RA,dec,H)')
parser.add_argument('--jit',dest='jit',action='store_true',help='Enable Numba acceleration (jit compilation time may overwhelm speed-up)')
args = parser.parse_args()
if args.cite:
print("Implementation: Samuel Bear Powell, 2016")
print("Algorithm:")
print("Ibrahim Reda, Afshin Andreas, \"Solar position algorithm for solar radiation applications\", Solar Energy, Volume 76, Issue 5, 2004, Pages 577-589, ISSN 0038-092X, doi:10.1016/j.solener.2003.12.003")
sys.exit(0)
_JIT = args.jit
if args.t == "now":
args.t = datetime.utcnow()
elif ":" in args.t and "-" in args.t:
try:
args.t = datetime.strptime(args.t,'%Y-%m-%d %H:%M:%S.%f') #with microseconds
except:
try:
args.t = datetime.strptime(args.t,'%Y-%m-%d %H:%M:%S.') #without microseconds
except:
args.t = datetime.strptime(args.t,'%Y-%m-%d %H:%M:%S')
else:
args.t = datetime.utcfromtimestamp(int(args.t))
if _JIT:
try:
import numba
from numba import jit
_JIT = (numba.config.DISABLE_JIT == 0)
@jit(nopython=True)
def _polyval(p, x):
y = 0.0
for i,v in enumerate(p):
y = y*x + v
return y
except:
_JIT = False
if not _JIT:
def jit(*args, **kwargs):
#decorator that does nothing
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# called as @decorator
return args[0]
else:
# called as @decorator(*args, **kwargs)
return jit
_polyval = np.polyval
def _calendar_time(dt):
try:
x = dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond
return x
except AttributeError:
try:
return _calendar_time(datetime.utcfromtimestamp(dt)) #will raise OSError if dt is not acceptable
except:
raise TypeError('dt must be datetime object or POSIX timestamp')
@jit(nopython=True)
def _julian_day(dt):
"""Calculate the Julian Day from a (year, month, day, hour, minute, second, microsecond) tuple"""
# year and month numbers
yr, mo, dy, hr, mn, sc, us = dt
if mo <= 2: # From paper: "if M = 1 or 2, then Y = Y - 1 and M = M + 12"
mo += 12
yr -= 1
# day of the month with decimal time
dy = dy + hr/24.0 + mn/(24.0*60.0) + sc/(24.0*60.0*60.0) + us/(24.0*60.0*60.0*1e6)
# b is equal to 0 for the julian calendar and is equal to (2- A +
# INT(A/4)), A = INT(Y/100), for the gregorian calendar
a = int(yr / 100)
b = 2 - a + int(a / 4)
jd = int(365.25 * (yr + 4716)) + int(30.6001 * (mo + 1)) + dy + b - 1524.5
return jd
@jit(nopython=True)
def _julian_ephemeris_day(jd, deltat):
"""Calculate the Julian Ephemeris Day from the Julian Day and delta-time = (terrestrial time - universal time) in seconds"""
return jd + deltat / 86400.0
@jit(nopython=True)
def _julian_century(jd):
"""Caluclate the Julian Century from Julian Day or Julian Ephemeris Day"""
return (jd - 2451545.0) / 36525.0
@jit(nopython=True)
def _julian_millennium(jc):
"""Calculate the Julian Millennium from Julian Ephemeris Century"""
return jc / 10.0
@jit(nopython=True)
def _cos_sum(x, coeffs):
y = np.zeros(len(coeffs))
for i, abc in enumerate(coeffs):
for a,b,c in abc:
y[i] += a*np.cos(b + c*x)
return y
# Earth Heliocentric Longitude coefficients (L0, L1, L2, L3, L4, and L5 in paper)
_EHL = (
#L5:
np.array([(1.0, 3.14, 0.0)]),
#L4:
np.array([(114.0, 3.142, 0.0), (8.0, 4.13, 6283.08), (1.0, 3.84, 12566.15)]),
#L3:
np.array([(289.0, 5.844, 6283.076), (35.0, 0.0, 0.0,), (17.0, 5.49, 12566.15),
(3.0, 5.2, 155.42), (1.0, 4.72, 3.52), (1.0, 5.3, 18849.23),
(1.0, 5.97, 242.73)]),
#L2:
np.array([(52919.0, 0.0, 0.0), (8720.0, 1.0721, 6283.0758), (309.0, 0.867, 12566.152),
(27.0, 0.05, 3.52), (16.0, 5.19, 26.3), (16.0, 3.68, 155.42),
(10.0, 0.76, 18849.23), (9.0, 2.06, 77713.77), (7.0, 0.83, 775.52),
(5.0, 4.66, 1577.34), (4.0, 1.03, 7.11), (4.0, 3.44, 5573.14),
(3.0, 5.14, 796.3), (3.0, 6.05, 5507.55), (3.0, 1.19, 242.73),
(3.0, 6.12, 529.69), (3.0, 0.31, 398.15), (3.0, 2.28, 553.57),
(2.0, 4.38, 5223.69), (2.0, 3.75, 0.98)]),
#L1:
np.array([(628331966747.0, 0.0, 0.0), (206059.0, 2.678235, 6283.07585), (4303.0, 2.6351, 12566.1517),
(425.0, 1.59, 3.523), (119.0, 5.796, 26.298), (109.0, 2.966, 1577.344),
(93.0, 2.59, 18849.23), (72.0, 1.14, 529.69), (68.0, 1.87, 398.15),
(67.0, 4.41, 5507.55), (59.0, 2.89, 5223.69), (56.0, 2.17, 155.42),
(45.0, 0.4, 796.3), (36.0, 0.47, 775.52), (29.0, 2.65, 7.11),
(21.0, 5.34, 0.98), (19.0, 1.85, 5486.78), (19.0, 4.97, 213.3),
(17.0, 2.99, 6275.96), (16.0, 0.03, 2544.31), (16.0, 1.43, 2146.17),
(15.0, 1.21, 10977.08), (12.0, 2.83, 1748.02), (12.0, 3.26, 5088.63),
(12.0, 5.27, 1194.45), (12.0, 2.08, 4694), (11.0, 0.77, 553.57),
(10.0, 1.3, 3286.6), (10.0, 4.24, 1349.87), (9.0, 2.7, 242.73),
(9.0, 5.64, 951.72), (8.0, 5.3, 2352.87), (6.0, 2.65, 9437.76),
(6.0, 4.67, 4690.48)]),
#L0:
np.array([(175347046.0, 0.0, 0.0), (3341656.0, 4.6692568, 6283.07585), (34894.0, 4.6261, 12566.1517),
(3497.0, 2.7441, 5753.3849), (3418.0, 2.8289, 3.5231), (3136.0, 3.6277, 77713.7715),
(2676.0, 4.4181, 7860.4194), (2343.0, 6.1352, 3930.2097), (1324.0, 0.7425, 11506.7698),
(1273.0, 2.0371, 529.691), (1199.0, 1.1096, 1577.3435), (990.0, 5.233, 5884.927),
(902.0, 2.045, 26.298), (857.0, 3.508, 398.149), (780.0, 1.179, 5223.694),
(753.0, 2.533, 5507.553), (505.0, 4.583, 18849.228), (492.0, 4.205, 775.523),
(357.0, 2.92, 0.067), (317.0, 5.849, 11790.629), (284.0, 1.899, 796.298),
(271.0, 0.315, 10977.079), (243.0, 0.345, 5486.778), (206.0, 4.806, 2544.314),
(205.0, 1.869, 5573.143), (202.0, 2.4458, 6069.777), (156.0, 0.833, 213.299),
(132.0, 3.411, 2942.463), (126.0, 1.083, 20.775), (115.0, 0.645, 0.98),
(103.0, 0.636, 4694.003), (102.0, 0.976, 15720.839), (102.0, 4.267, 7.114),
(99.0, 6.21, 2146.17), (98.0, 0.68, 155.42), (86.0, 5.98, 161000.69),
(85.0, 1.3, 6275.96), (85.0, 3.67, 71430.7), (80.0, 1.81, 17260.15),
(79.0, 3.04, 12036.46), (71.0, 1.76, 5088.63), (74.0, 3.5, 3154.69),
(74.0, 4.68, 801.82), (70.0, 0.83, 9437.76), (62.0, 3.98, 8827.39),
(61.0, 1.82, 7084.9), (57.0, 2.78, 6286.6), (56.0, 4.39, 14143.5),
(56.0, 3.47, 6279.55), (52.0, 0.19, 12139.55), (52.0, 1.33, 1748.02),
(51.0, 0.28, 5856.48), (49.0, 0.49, 1194.45), (41.0, 5.37, 8429.24),
(41.0, 2.4, 19651.05), (39.0, 6.17, 10447.39), (37.0, 6.04, 10213.29),
(37.0, 2.57, 1059.38), (36.0, 1.71, 2352.87), (36.0, 1.78, 6812.77),
(33.0, 0.59, 17789.85), (30.0, 0.44, 83996.85), (30.0, 2.74, 1349.87),
(25.0, 3.16, 4690.48)])
)
@jit(nopython=True)
def _heliocentric_longitude(jme):
"""Compute the Earth Heliocentric Longitude (L) in degrees given the Julian Ephemeris Millennium"""
#L5, ..., L0
Li = _cos_sum(jme, _EHL)
L = _polyval(Li, jme) / 1e8
L = np.rad2deg(L) % 360
return L
#Earth Heliocentric Latitude coefficients (B0 and B1 in paper)
_EHB = (
#B1:
np.array([(9.0, 3.9, 5507.55), (6.0, 1.73, 5223.69)]),
#B0:
np.array([(280.0, 3.199, 84334.662), (102.0, 5.422, 5507.553), (80.0, 3.88, 5223.69),
(44.0, 3.7, 2352.87), (32.0, 4.0, 1577.34)])
)
@jit(nopython=True)
def _heliocentric_latitude(jme):
"""Compute the Earth Heliocentric Latitude (B) in degrees given the Julian Ephemeris Millennium"""
Bi = _cos_sum(jme, _EHB)
B = _polyval(Bi, jme) / 1e8
B = np.rad2deg(B) % 360
return B
#Earth Heliocentric Radius coefficients (R0, R1, R2, R3, R4)
_EHR = (
#R4:
np.array([(4.0, 2.56, 6283.08)]),
#R3:
np.array([(145.0, 4.273, 6283.076), (7.0, 3.92, 12566.15)]),
#R2:
np.array([(4359.0, 5.7846, 6283.0758), (124.0, 5.579, 12566.152), (12.0, 3.14, 0.0),
(9.0, 3.63, 77713.77), (6.0, 1.87, 5573.14), (3.0, 5.47, 18849)]),
#R1:
np.array([(103019.0, 1.10749, 6283.07585), (1721.0, 1.0644, 12566.1517), (702.0, 3.142, 0.0),
(32.0, 1.02, 18849.23), (31.0, 2.84, 5507.55), (25.0, 1.32, 5223.69),
(18.0, 1.42, 1577.34), (10.0, 5.91, 10977.08), (9.0, 1.42, 6275.96),
(9.0, 0.27, 5486.78)]),
#R0:
np.array([(100013989.0, 0.0, 0.0), (1670700.0, 3.0984635, 6283.07585), (13956.0, 3.05525, 12566.1517),
(3084.0, 5.1985, 77713.7715), (1628.0, 1.1739, 5753.3849), (1576.0, 2.8469, 7860.4194),
(925.0, 5.453, 11506.77), (542.0, 4.564, 3930.21), (472.0, 3.661, 5884.927),
(346.0, 0.964, 5507.553), (329.0, 5.9, 5223.694), (307.0, 0.299, 5573.143),
(243.0, 4.273, 11790.629), (212.0, 5.847, 1577.344), (186.0, 5.022, 10977.079),
(175.0, 3.012, 18849.228), (110.0, 5.055, 5486.778), (98.0, 0.89, 6069.78),
(86.0, 5.69, 15720.84), (86.0, 1.27, 161000.69), (85.0, 0.27, 17260.15),
(63.0, 0.92, 529.69), (57.0, 2.01, 83996.85), (56.0, 5.24, 71430.7),
(49.0, 3.25, 2544.31), (47.0, 2.58, 775.52), (45.0, 5.54, 9437.76),
(43.0, 6.01, 6275.96), (39.0, 5.36, 4694), (38.0, 2.39, 8827.39),
(37.0, 0.83, 19651.05), (37.0, 4.9, 12139.55), (36.0, 1.67, 12036.46),
(35.0, 1.84, 2942.46), (33.0, 0.24, 7084.9), (32.0, 0.18, 5088.63),
(32.0, 1.78, 398.15), (28.0, 1.21, 6286.6), (28.0, 1.9, 6279.55),
(26.0, 4.59, 10447.39)])
)
@jit(nopython=True)
def _heliocentric_radius(jme):
"""Compute the Earth Heliocentric Radius (R) in astronimical units given the Julian Ephemeris Millennium"""
Ri = _cos_sum(jme, _EHR)
R = _polyval(Ri, jme) / 1e8
return R
@jit(nopython=True)
def _heliocentric_position(jme):
"""Compute the Earth Heliocentric Longitude, Latitude, and Radius given the Julian Ephemeris Millennium
Returns (L, B, R) where L = longitude in degrees, B = latitude in degrees, and R = radius in astronimical units
"""
return _heliocentric_longitude(jme), _heliocentric_latitude(jme), _heliocentric_radius(jme)
@jit(nopython=True)
def _geocentric_position(helio_pos):
"""Compute the geocentric latitude (Theta) and longitude (beta) (in degrees) of the sun given the earth's heliocentric position (L, B, R)"""
L,B,R = helio_pos
th = L + 180
b = -B
return (th, b)
@jit(nopython=True)
def _ecliptic_obliquity(jme, delta_epsilon):
"""Calculate the true obliquity of the ecliptic (epsilon, in degrees) given the Julian Ephemeris Millennium and the obliquity"""
u = jme/10
e0 = _polyval((2.45, 5.79, 27.87, 7.12, -39.05, -249.67, -51.38, 1999.25, -1.55, -4680.93, 84381.448), u)
e = e0/3600.0 + delta_epsilon
return e
#Nutation Longitude and Obliquity coefficients (Y)
_NLO_Y = np.array([(0.0, 0.0, 0.0, 0.0, 1.0), (-2.0, 0.0, 0.0, 2.0, 2.0), (0.0, 0.0, 0.0, 2.0, 2.0),
(0.0, 0.0, 0.0, 0.0, 2.0), (0.0, 1.0, 0.0, 0.0, 0.0), (0.0, 0.0, 1.0, 0.0, 0.0),
(-2.0, 1.0, 0.0, 2.0, 2.0), (0.0, 0.0, 0.0, 2.0, 1.0), (0.0, 0.0, 1.0, 2.0, 2.0),
(-2.0, -1.0, 0.0, 2.0, 2.0), (-2.0, 0.0, 1.0, 0.0, 0.0), (-2.0, 0.0, 0.0, 2.0, 1.0),
(0.0, 0.0, -1.0, 2.0, 2.0), (2.0, 0.0, 0.0, 0.0, 0.0), (0.0, 0.0, 1.0, 0.0, 1.0),
(2.0, 0.0, -1.0, 2.0, 2.0), (0.0, 0.0, -1.0, 0.0, 1.0), (0.0, 0.0, 1.0, 2.0, 1.0),
(-2.0, 0.0, 2.0, 0.0, 0.0), (0.0, 0.0, -2.0, 2.0, 1.0), (2.0, 0.0, 0.0, 2.0, 2.0),
(0.0, 0.0, 2.0, 2.0, 2.0), (0.0, 0.0, 2.0, 0.0, 0.0), (-2.0, 0.0, 1.0, 2.0, 2.0),
(0.0, 0.0, 0.0, 2.0, 0.0), (-2.0, 0.0, 0.0, 2.0, 0.0), (0.0, 0.0, -1.0, 2.0, 1.0),
(0.0, 2.0, 0.0, 0.0, 0.0), (2.0, 0.0, -1.0, 0.0, 1.0), (-2.0, 2.0, 0.0, 2.0, 2.0),
(0.0, 1.0, 0.0, 0.0, 1.0), (-2.0, 0.0, 1.0, 0.0, 1.0), (0.0, -1.0, 0.0, 0.0, 1.0),
(0.0, 0.0, 2.0, -2.0, 0.0), (2.0, 0.0, -1.0, 2.0, 1.0), (2.0, 0.0, 1.0, 2.0, 2.0),
(0.0, 1.0, 0.0, 2.0, 2.0), (-2.0, 1.0, 1.0, 0.0, 0.0), (0.0, -1.0, 0.0, 2.0, 2.0),
(2.0, 0.0, 0.0, 2.0, 1.0), (2.0, 0.0, 1.0, 0.0, 0.0), (-2.0, 0.0, 2.0, 2.0, 2.0),
(-2.0, 0.0, 1.0, 2.0, 1.0), (2.0, 0.0, -2.0, 0.0, 1.0), (2.0, 0.0, 0.0, 0.0, 1.0),
(0.0, -1.0, 1.0, 0.0, 0.0), (-2.0, -1.0, 0.0, 2.0, 1.0), (-2.0, 0.0, 0.0, 0.0, 1.0),
(0.0, 0.0, 2.0, 2.0, 1.0), (-2.0, 0.0, 2.0, 0.0, 1.0), (-2.0, 1.0, 0.0, 2.0, 1.0),
(0.0, 0.0, 1.0, -2.0, 0.0), (-1.0, 0.0, 1.0, 0.0, 0.0), (-2.0, 1.0, 0.0, 0.0, 0.0),
(1.0, 0.0, 0.0, 0.0, 0.0), (0.0, 0.0, 1.0, 2.0, 0.0), (0.0, 0.0, -2.0, 2.0, 2.0),
(-1.0, -1.0, 1.0, 0.0, 0.0), (0.0, 1.0, 1.0, 0.0, 0.0), (0.0, -1.0, 1.0, 2.0, 2.0),
(2.0, -1.0, -1.0, 2.0, 2.0), (0.0, 0.0, 3.0, 2.0, 2.0), (2.0, -1.0, 0.0, 2.0, 2.0)])
#Nutation Longitude and Obliquity coefficients (a,b)
_NLO_AB = np.array([(-171996.0, -174.2), (-13187.0, -1.6), (-2274.0, -0.2), (2062.0, 0.2), (1426.0, -3.4), (712.0, 0.1),
(-517.0, 1.2), (-386.0, -0.4), (-301.0, 0.0), (217.0, -0.5), (-158.0, 0.0), (129.0, 0.1),
(123.0, 0.0), (63.0, 0.0), (63.0, 0.1), (-59.0, 0.0), (-58.0, -0.1), (-51.0, 0.0),
(48.0, 0.0), (46.0, 0.0), (-38.0, 0.0), (-31.0, 0.0), (29.0, 0.0), (29.0, 0.0),
(26.0, 0.0), (-22.0, 0.0), (21.0, 0.0), (17.0, -0.1), (16.0, 0.0), (-16.0, 0.1),
(-15.0, 0.0), (-13.0, 0.0), (-12.0, 0.0), (11.0, 0.0), (-10.0, 0.0), (-8.0, 0.0),
(7.0, 0.0), (-7.0, 0.0), (-7.0, 0.0), (-7.0, 0.0), (6.0, 0.0), (6.0, 0.0),
(6.0, 0.0), (-6.0, 0.0), (-6.0, 0.0), (5.0, 0.0), (-5.0, 0.0), (-5.0, 0.0),
(-5.0, 0.0), (4.0, 0.0), (4.0, 0.0), (4.0, 0.0), (-4.0, 0.0), (-4.0, 0.0),
(-4.0, 0.0), (3.0, 0.0), (-3.0, 0.0), (-3.0, 0.0), (-3.0, 0.0), (-3.0, 0.0),
(-3.0, 0.0), (-3.0, 0.0), (-3.0, 0.0)])
#Nutation Longitude and Obliquity coefficients (c,d)
_NLO_CD = np.array([(92025.0, 8.9), (5736.0, -3.1), (977.0, -0.5), (-895.0, 0.5),
(54.0, -0.1), (-7.0, 0.0), (224.0, -0.6), (200.0, 0.0),
(129.0, -0.1), (-95.0, 0.3), (0.0, 0.0), (-70.0, 0.0),
(-53.0, 0.0), (0.0, 0.0), (-33.0, 0.0), (26.0, 0.0),
(32.0, 0.0), (27.0, 0.0), (0.0, 0.0), (-24.0, 0.0),
(16.0, 0.0), (13.0, 0.0), (0.0, 0.0), (-12.0, 0.0),
(0.0, 0.0), (0.0, 0.0), (-10.0, 0.0), (0.0, 0.0),
(-8.0, 0.0), (7.0, 0.0), (9.0, 0.0), (7.0, 0.0),
(6.0, 0.0), (0.0, 0.0), (5.0, 0.0), (3.0, 0.0),
(-3.0, 0.0), (0.0, 0.0), (3.0, 0.0), (3.0, 0.0),
(0.0, 0.0), (-3.0, 0.0), (-3.0, 0.0), (3.0, 0.0),
(3.0, 0.0), (0.0, 0.0), (3.0, 0.0), (3.0, 0.0),
(3.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0),
(0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0),
(0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0),
(0.0, 0.0), (0.0, 0.0), (0.0, 0.0)])
@jit(nopython=True)
def _nutation_obliquity(jce):
"""compute the nutation in longitude (delta_psi) and the true obliquity (epsilon) given the Julian Ephemeris Century"""
#mean elongation of the moon from the sun, in radians:
#x0 = 297.85036 + 445267.111480*jce - 0.0019142*(jce**2) + (jce**3)/189474
x0 = np.deg2rad(_polyval((1./189474, -0.0019142, 445267.111480, 297.85036),jce))
#mean anomaly of the sun (Earth), in radians:
x1 = np.deg2rad(_polyval((-1/3e5, -0.0001603, 35999.050340, 357.52772), jce))
#mean anomaly of the moon, in radians:
x2 = np.deg2rad(_polyval((1./56250, 0.0086972, 477198.867398, 134.96298), jce))
#moon's argument of latitude, in radians:
x3 = np.deg2rad(_polyval((1./327270, -0.0036825, 483202.017538, 93.27191), jce))
#Longitude of the ascending node of the moon's mean orbit on the ecliptic
# measured from the mean equinox of the date, in radians
x4 = np.deg2rad(_polyval((1./45e4, 0.0020708, -1934.136261, 125.04452), jce))
x = np.array([x0, x1, x2, x3, x4])
a,b = _NLO_AB.T
c,d = _NLO_CD.T
dp = np.sum((a + b*jce)*np.sin(np.dot(_NLO_Y, x)))/36e6
de = np.sum((c + d*jce)*np.cos(np.dot(_NLO_Y, x)))/36e6
e = _ecliptic_obliquity(_julian_millennium(jce), de)
return dp, e
@jit(nopython=True)
def _abberation_correction(R):
"""Calculate the abberation correction (delta_tau, in degrees) given the Earth Heliocentric Radius (in AU)"""
return -20.4898/(3600*R)
@jit(nopython=True)
def _sun_longitude(helio_pos, delta_psi):
"""Calculate the apparent sun longitude (lambda, in degrees) and geocentric latitude (beta, in degrees) given the earth heliocentric position and delta_psi"""
L,B,R = helio_pos
theta = L + 180 #geocentric longitude
beta = -B #geocentric latitude
ll = theta + delta_psi + _abberation_correction(R)
return ll, beta
@jit(nopython=True)
def _greenwich_sidereal_time(jd, delta_psi, epsilon):
"""Calculate the apparent Greenwich sidereal time (v, in degrees) given the Julian Day"""
jc = _julian_century(jd)
#mean sidereal time at greenwich, in degrees:
v0 = (280.46061837 + 360.98564736629*(jd - 2451545) + 0.000387933*(jc**2) - (jc**3)/38710000) % 360
v = v0 + delta_psi*np.cos(np.deg2rad(epsilon))
return v
@jit(nopython=True)
def _sun_ra_decl(llambda, epsilon, beta):
"""Calculate the sun's geocentric right ascension (alpha, in degrees) and declination (delta, in degrees)"""
l = np.deg2rad(llambda)
e = np.deg2rad(epsilon)
b = np.deg2rad(beta)
alpha = np.arctan2(np.sin(l)*np.cos(e) - np.tan(b)*np.sin(e), np.cos(l)) #x1 / x2
alpha = np.rad2deg(alpha) % 360
delta = np.arcsin(np.sin(b)*np.cos(e) + np.cos(b)*np.sin(e)*np.sin(l))
delta = np.rad2deg(delta)
return alpha, delta
@jit(nopython=True)
def _sun_topo_ra_decl_hour(latitude, longitude, elevation, jd, delta_t = 0):
"""Calculate the sun's topocentric right ascension (alpha'), declination (delta'), and hour angle (H')"""
jde = _julian_ephemeris_day(jd, delta_t)
jce = _julian_century(jde)
jme = _julian_millennium(jce)
helio_pos = _heliocentric_position(jme)
R = helio_pos[-1]
phi, E = np.deg2rad(latitude), elevation
#equatorial horizontal parallax of the sun, in radians
xi = np.deg2rad(8.794/(3600*R)) #
#rho = distance from center of earth in units of the equatorial radius
#phi-prime = geocentric latitude
#NB: These equations look like their based on WGS-84, but are rounded slightly
# The WGS-84 reference ellipsoid has major axis a = 6378137 m, and flattening factor 1/f = 298.257223563
# minor axis b = a*(1-f) = 6356752.3142 = 0.996647189335*a
u = np.arctan(0.99664719*np.tan(phi)) #
x = np.cos(u) + E*np.cos(phi)/6378140 #rho sin(phi-prime)
y = 0.99664719*np.sin(u) + E*np.sin(phi)/6378140 #rho cos(phi-prime)
delta_psi, epsilon = _nutation_obliquity(jce) #
llambda, beta = _sun_longitude(helio_pos, delta_psi) #
alpha, delta = _sun_ra_decl(llambda, epsilon, beta) #
v = _greenwich_sidereal_time(jd, delta_psi, epsilon) #
H = v + longitude - alpha #
Hr, dr = np.deg2rad(H), np.deg2rad(delta)
dar = np.arctan2(-x*np.sin(xi)*np.sin(Hr), np.cos(dr)-x*np.sin(xi)*np.cos(Hr))
delta_alpha = np.rad2deg(dar) #
alpha_prime = alpha + delta_alpha #
delta_prime = np.rad2deg(np.arctan2((np.sin(dr) - y*np.sin(xi))*np.cos(dar), np.cos(dr) - y*np.sin(xi)*np.cos(Hr))) #
H_prime = H - delta_alpha #
return alpha_prime, delta_prime, H_prime
@jit(nopython=True)
def _sun_topo_azimuth_zenith(latitude, delta_prime, H_prime, temperature=14.6, pressure=1013, atmos_refract=0.5667):
"""Compute the sun's topocentric azimuth and zenith angles
azimuth is measured eastward from north, zenith from vertical
temperature = average temperature in C (default is 14.6 = global average in 2013)
pressure = average pressure in mBar (default 1013 = global average)
"""
SUN_RADIUS = 0.26667
phi = np.deg2rad(latitude)
dr, Hr = np.deg2rad(delta_prime), np.deg2rad(H_prime)
P, T = pressure, temperature
e0 = np.rad2deg(np.arcsin(np.sin(phi)*np.sin(dr) + np.cos(phi)*np.cos(dr)*np.cos(Hr)))
delta_e = 0.0
if e0 >= -1*(SUN_RADIUS + atmos_refract):
tmp = np.deg2rad(e0 + 10.3/(e0+5.11))
delta_e = (P/1010.0)*(283.0/(273+T))*(1.02/(60*np.tan(tmp)))
e = e0 + delta_e
zenith = 90 - e
gamma = np.rad2deg(np.arctan2(np.sin(Hr), np.cos(Hr)*np.sin(phi) - np.tan(dr)*np.cos(phi))) % 360
Phi = (gamma + 180) % 360 #azimuth from north
return Phi, zenith, delta_e
@jit(nopython=True)
def _norm_lat_lon(lat,lon):
if lat < -90 or lat > 90:
#convert to cartesian and back
x = np.cos(np.deg2rad(lon))*np.cos(np.deg2rad(lat))
y = np.sin(np.deg2rad(lon))*np.cos(np.deg2rad(lat))
z = np.sin(np.deg2rad(lat))
r = np.sqrt(x**2 + y**2 + z**2)
lon = np.rad2deg(np.arctan2(y,x)) % 360
lat = np.rad2deg(np.arcsin(z/r))
elif lon < 0 or lon > 360:
lon = lon % 360
return lat,lon
@jit(nopython=True)
def _topo_pos(jd,lat,lon,elev,dt,radians):
"""compute RA,dec,H, all in degrees"""
lat,lon = _norm_lat_lon(lat,lon)
RA, dec, H = _sun_topo_ra_decl_hour(lat, lon, elev, jd, dt)
if radians:
return np.deg2rad(RA), np.deg2rad(dec), np.deg2rad(H)
else:
return RA, dec, H
_topo_pos_v = np.vectorize(_topo_pos)
@jit(nopython=True)
def _pos(jd,lat,lon,elev,temp,press,atmos_refract,dt,radians):
"""Compute azimuth,zenith,RA,dec,H"""
lat,lon = _norm_lat_lon(lat,lon)
RA, dec, H = _sun_topo_ra_decl_hour(lat, lon, elev, jd, dt)
azimuth, zenith, delta_e = _sun_topo_azimuth_zenith(lat, dec, H, temp, press, atmos_refract)
if radians:
return np.deg2rad(azimuth), np.deg2rad(zenith), np.deg2rad(RA), np.deg2rad(dec), np.deg2rad(H)
else:
return azimuth,zenith,RA,dec,H
_pos_v = np.vectorize(_pos)
@np.vectorize
def julian_day(dt):
"""Convert UTC datetimes or UTC timestamps to Julian days
Parameters
----------
dt : array_like
UTC datetime objects or UTC timestamps (as per datetime.utcfromtimestamp)
Returns
-------
jd : ndarray
datetimes converted to fractional Julian days
"""
t = _calendar_time(dt)
return _julian_day(t)
@jit
def arcdist(p0,p1,radians=False):
"""Angular distance between azimuth,zenith pairs
Parameters
----------
p0 : array_like, shape (..., 2)
p1 : array_like, shape (..., 2)
p[...,0] = azimuth angles, p[...,1] = zenith angles
radians : boolean (default False)
If False, angles are in degrees, otherwise in radians
Returns
-------
ad : array_like, shape is broadcast(p0,p1).shape
Arcdistances between corresponding pairs in p0,p1
In degrees by default, in radians if radians=True
"""
#formula comes from translating points into cartesian coordinates
#taking the dot product to get the cosine between the two vectors
#then arccos to return to angle, and simplify everything assuming real inputs
p0,p1 = np.asarray(p0), np.asarray(p1)
if not radians:
p0,p1 = np.deg2rad(p0), np.deg2rad(p1)
a0,z0 = p0[...,0], p0[...,1]
a1,z1 = p1[...,0], p1[...,1]
d = np.arccos(np.cos(z0)*np.cos(z1)+np.cos(a0-a1)*np.sin(z0)*np.sin(z1))
if radians:
return d
else:
return np.rad2deg(d)
def topocentric_sunpos(dt, latitude, longitude, elevation, delta_t=0, radians=False):
"""Compute the topocentric coordinates of the sun as viewed at the given time and location.
Parameters
----------
dt : array_like of datetime or float
UTC datetime objects or UTC timestamps (as per datetime.utcfromtimestamp) representing the times of observations
latitude, longitude : array_like of float
decimal degrees, positive for north of the equator and east of Greenwich
elevation : array_like of float
meters, relative to the WGS-84 ellipsoid
delta_t : array_like of float, optional
seconds, default is 0, difference between the earth's rotation time (TT) and universal time (UT)
radians : bool, optional
return results in radians if True, degrees if False (default)
Returns
-------
right_ascension : ndarray, topocentric
declination : ndarray, topocentric
hour_angle : ndarray, topocentric
"""
jd = julian_day(dt)
return _topo_pos_v(jd, latitude, longitude, elevation, delta_t, radians)
def sunpos(dt, latitude, longitude, elevation, temperature=None, pressure=None, atmos_refract=None, delta_t=0, radians=False):
"""Compute the observed and topocentric coordinates of the sun as viewed at the given time and location.
Parameters
----------
dt : array_like of datetime or float
UTC datetime objects or UTC timestamps (as per datetime.utcfromtimestamp) representing the times of observations
latitude, longitude : array_like of float
decimal degrees, positive for north of the equator and east of Greenwich
elevation : array_like of float
meters, relative to the WGS-84 ellipsoid
temperature : None or array_like of float, optional
celcius, default is 14.6 (global average in 2013)
pressure : None or array_like of float, optional
millibar, default is 1013 (global average in ??)
atmos_refract : None or array_like of float, optional
Atmospheric refraction at sunrise and sunset, in degrees. Default is 0.5667
delta_t : array_like of float, optional
seconds, default is 0, difference between the earth's rotation time (TT) and universal time (UT)
radians : bool, optional
return results in radians if True, degrees if False (default)
Returns
-------
azimuth_angle : ndarray, measured eastward from north
zenith_angle : ndarray, measured down from vertical
right_ascension : ndarray, topocentric
declination : ndarray, topocentric
hour_angle : ndarray, topocentric
"""
if temperature is None:
temperature = 14.6
if pressure is None:
pressure = 1013
if atmos_refract is None:
atmos_refract = 0.5667
jd = julian_day(dt)
return _pos_v(jd, latitude, longitude, elevation, temperature, pressure, atmos_refract, delta_t, radians)
def observed_sunpos(dt, latitude, longitude, elevation, temperature=None, pressure=None, atmos_refract=None, delta_t=0, radians=False):
"""Compute the observed coordinates of the sun as viewed at the given time and location.
Parameters
----------
dt : array_like of datetime or float
UTC datetime objects or UTC timestamps (as per datetime.utcfromtimestamp) representing the times of observations
latitude, longitude : array_like of float
decimal degrees, positive for north of the equator and east of Greenwich
elevation : array_like of float
meters, relative to the WGS-84 ellipsoid
temperature : None or array_like of float, optional
celcius, default is 14.6 (global average in 2013)
pressure : None or array_like of float, optional
millibar, default is 1013 (global average in ??)
atmos_refract : None or array_like of float, optional
Atmospheric refraction at sunrise and sunset, in degrees. Default is 0.5667
delta_t : array_like of float, optional
seconds, default is 0, difference between the earth's rotation time (TT) and universal time (UT)
radians : bool, optional
return results in radians if True, degrees if False (default)
Returns
-------
azimuth_angle : ndarray, measured eastward from north
zenith_angle : ndarray, measured down from vertical
"""
return sunpos(dt, latitude, longitude, elevation, temperature, pressure, atmos_refract, delta_t, radians)[:2]
def test():
test_file = 'test_1.txt'
# Parse and compare results from https://midcdmz.nrel.gov/solpos/spa.html
param_names = ['syear','smonth','sday','eyear','emonth','eday','otype','step','stepunit','hr','min','sec','latitude','longitude','timezone','elev','press','temp','dut1','deltat','azmrot','slope','refract']
param_dtype = np.dtype([(name, float) for name in param_names])
params = np.loadtxt(test_file, param_dtype, delimiter=',', skiprows=2, max_rows=1)
row_type = np.dtype([
('Date_M/D/YYYY', 'S10'),
('Time_H:MM:SS', 'S8'),
('Topo_zen', float),
('Topo_az', float),
('Julian_day', float),
('Julian_century', float),
('Julian_ephemeris_day', float),
('Julian_ephemeris_century', float),
('Julian_ephemeris_millennium', float),
('Earth_heliocentric_longitude', float),
('Earth_heliocentric_latitude', float),
('Earth_radius_vector', float),
('Geocentric_longitude', float),
('Geocentric_latitude', float),
('Mean_elongation', float),
('Mean_anomaly_sun', float),
('Mean_anomaly_moon', float),
('Argument_latitude_moon', float),
('Ascending_longitude_moon', float),
('Nutation_longitude', float),
('Nutation_obliquity', float),
('Ecliptic_mean_obliquity', float),
('Ecliptic_true_obliquity', float),
('Aberration_correction', float),
('Apparent_sun_longitude', float),
('Greenwich_mean_sidereal_time', float),
('Greenwich_sidereal_time', float),
('Geocentric_sun_right_ascension', float),
('Geocentric_sun_declination', float),
('Observer_hour_angle', float),
('Sun_equatorial_horizontal_parallax', float),
('Sun_right_ascension_parallax', float),
('Topo_sun_declination', float),
('Topo_sun_right_ascension', float),
('Topo_local_hour_angle', float),
('Topo_elevation_angle_uncorrected', float),
('Atmospheric_refraction_correction', float),
('Topo_elevation_angle_corrected', float),
('Equation_of_time', float),
('Sunrise_hour_angle', float),
('Sunset_hour_angle', float),
('Sun_transit_altitude', float)])
true_data = np.loadtxt(test_file, row_type, delimiter=',', skiprows=4)
def to_datetime(date_time_pair):
s = str(b' '.join(date_time_pair),'UTF-8')
return datetime.strptime(s, '%m/%d/%Y %H:%M:%S')
def angle_diff(a1, a2, period=2*np.pi):
"""(a1 - a2 + d) % (2*d) - d; d = period/2"""
d = period/2
return ((a1 - a2 + d) % (period)) - d
dts = [to_datetime(dt_pair) for dt_pair in true_data[['Date_M/D/YYYY','Time_H:MM:SS']]]
lat,lon,elev,temp,press,deltat = params['latitude'],params['longitude'],params['elev'],params['temp'],params['press'],params['deltat']
all_errs = []
for dt,truth in zip(dts,true_data):
t = _calendar_time(dt)
jd = _julian_day(t) #Julian_day
jde = _julian_ephemeris_day(jd, deltat) #Julian_ephemeris_day
jce = _julian_century(jde) #Julian_ephemeris_century
jme = _julian_millennium(jce) #Julian_ephemeris_millenium
L,B,R = _heliocentric_position(jme) #Earth_heliocentric_longitude, Earth_heliocentric_latitude, Earth_radius_vector
delta_psi, epsilon = _nutation_obliquity(jce) #Nutation_longitude, Ecliptic_true_obliquity
theta,beta = _geocentric_position((L,B,R)) #Geocentric_longitude, Geocentric_latitude
delta_tau = _abberation_correction(R) #Aberration_correction
llambda, beta = _sun_longitude((L,B,R), delta_psi) #Apparent_sun_longitude, Geocentric_latitude (identical to previous)
v = _greenwich_sidereal_time(jd, delta_psi, epsilon) #Greenwich_sidereal_time
alpha, delta = _sun_ra_decl(llambda, epsilon, beta) #Geocentric_sun_right_ascension, Geocentric_sun_declination
alpha_p, delta_p, H_p = _sun_topo_ra_decl_hour(lat,lon,elev,jd,deltat) #Topo_sun_right_ascension, Topo_sun_declination, Topo_local_hour_angle
az, zen, delta_e = _sun_topo_azimuth_zenith(lat,delta_p,H_p,temp,press) #Topo_az, Topo_zen, Atmospheric_refraction_correction
jd_err = jd - truth['Julian_day']
jde_err = jde - truth['Julian_ephemeris_day']
jce_err = jce - truth['Julian_ephemeris_century']
jme_err = jme - truth['Julian_ephemeris_millennium']
L_err = L - truth['Earth_heliocentric_longitude']
B_err = B - truth['Earth_heliocentric_latitude']
R_err = R - truth['Earth_radius_vector']
delta_psi_err = delta_psi - truth['Nutation_longitude']
epsilon_err = epsilon - truth['Ecliptic_true_obliquity']
theta_err = theta - truth['Geocentric_longitude']
beta_err = beta - truth['Geocentric_latitude']
delta_tau_err = delta_tau - truth['Aberration_correction']
lambda_err = llambda - truth['Apparent_sun_longitude']
v_err = v - truth['Greenwich_sidereal_time']
alpha_err = alpha - truth['Geocentric_sun_right_ascension']
delta_err = delta - truth['Geocentric_sun_declination']
alpha_prime_err = alpha_p - truth['Topo_sun_right_ascension']
delta_prime_err = delta_p - truth['Topo_sun_declination']
H_prime_err = angle_diff(H_p, truth['Topo_local_hour_angle'], 360)
az_err = angle_diff(az, truth['Topo_az'], 360)
delta_e_err = delta_e - truth['Atmospheric_refraction_correction']
zen_err = zen - truth['Topo_zen']
all_errs.append([jd_err,jde_err,jce_err,jme_err,L_err,B_err,R_err,delta_psi_err,
epsilon_err,theta_err,beta_err,delta_tau_err,lambda_err,
v_err,alpha_err,delta_err,alpha_prime_err,delta_prime_err,
H_prime_err,az_err,delta_e_err, zen_err])
rms_err = np.sqrt(np.mean(np.array(all_errs)**2,0))
err_names = ['Julian day', 'Julian ephemeris day', 'Julian ephemeris century', 'Julian ephemeris millennium', 'Earth heliocentric longitude', 'Earth heliocentric latitude', 'Earth radius vector', 'Nutation longitude', 'Ecliptic true obliquity', 'Geocentric longitude', 'Geocentric latitude', 'Aberration correction', 'Apparent sun longitude', 'Greenwich sidereal time', 'Geocentric sun right ascension', 'Geocentric sun declination', 'Topo sun right ascension', 'Topo sun declination', 'Topo local hour angle', 'Topo az', 'Atmospheric_refraction_correction','Topo zen']
print('RMS Errors')
for n, e in zip(err_names, rms_err):
print('{}: {}'.format(n, e))
def main(args):
az, zen, ra, dec, h = sunpos(args.t, args.lat, args.lon, args.elev, args.temp, args.p, args.dt, args.rad)
if args.csv:
#machine readable
print('{t}, {dt}, {lat}, {lon}, {elev}, {temp}, {p}, {az}, {zen}, {ra}, {dec}, {h}'.format(t=args.t, dt=args.dt, lat=args.lat, lon=args.lon, elev=args.elev,temp=args.temp, p=args.p,az=az, zen=zen, ra=ra, dec=dec, h=h))
else:
dr='deg'
if args.rad:
dr='rad'
print("Computing sun position at T = {t} + {dt} s".format(t=args.t, dt=args.dt))
print("Lat, Lon, Elev = {lat} deg, {lon} deg, {elev} m".format(lat=args.lat, lon=args.lon, elev=args.elev))
print("T, P = {temp} C, {press} mbar".format(temp=args.temp, press=args.p))
print("Results:")
print("Azimuth, zenith = {az} {dr}, {zen} {dr}".format(az=az,zen=zen,dr=dr))
print("RA, dec, H = {ra} {dr}, {dec} {dr}, {h} {dr}".format(ra=ra, dec=dec, h=h, dr=dr))
if __name__ == '__main__':
if args.test:
test()
else:
main(args)
| |
"""Disables an account"""
from baseCmd import *
from baseResponse import *
class disableAccountCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""If true, only lock the account; else disable the account"""
"""Required"""
self.lock = None
self.typeInfo['lock'] = 'boolean'
"""Disables specified account."""
self.account = None
self.typeInfo['account'] = 'string'
"""Disables specified account in this domain."""
self.domainid = None
self.typeInfo['domainid'] = 'uuid'
"""Account id"""
self.id = None
self.typeInfo['id'] = 'uuid'
self.required = ["lock", ]
class disableAccountResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the id of the account"""
self.id = None
self.typeInfo['id'] = 'string'
"""details for the account"""
self.accountdetails = None
self.typeInfo['accountdetails'] = 'map'
"""account type (admin, domain-admin, user)"""
self.accounttype = None
self.typeInfo['accounttype'] = 'short'
"""the total number of cpu cores available to be created for this account"""
self.cpuavailable = None
self.typeInfo['cpuavailable'] = 'string'
"""the total number of cpu cores the account can own"""
self.cpulimit = None
self.typeInfo['cpulimit'] = 'string'
"""the total number of cpu cores owned by account"""
self.cputotal = None
self.typeInfo['cputotal'] = 'long'
"""the default zone of the account"""
self.defaultzoneid = None
self.typeInfo['defaultzoneid'] = 'string'
"""name of the Domain the account belongs too"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""id of the Domain the account belongs too"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""the total number of public ip addresses available for this account to acquire"""
self.ipavailable = None
self.typeInfo['ipavailable'] = 'string'
"""the total number of public ip addresses this account can acquire"""
self.iplimit = None
self.typeInfo['iplimit'] = 'string'
"""the total number of public ip addresses allocated for this account"""
self.iptotal = None
self.typeInfo['iptotal'] = 'long'
"""true if the account requires cleanup"""
self.iscleanuprequired = None
self.typeInfo['iscleanuprequired'] = 'boolean'
"""true if account is default, false otherwise"""
self.isdefault = None
self.typeInfo['isdefault'] = 'boolean'
"""the total memory (in MB) available to be created for this account"""
self.memoryavailable = None
self.typeInfo['memoryavailable'] = 'string'
"""the total memory (in MB) the account can own"""
self.memorylimit = None
self.typeInfo['memorylimit'] = 'string'
"""the total memory (in MB) owned by account"""
self.memorytotal = None
self.typeInfo['memorytotal'] = 'long'
"""the name of the account"""
self.name = None
self.typeInfo['name'] = 'string'
"""the total number of networks available to be created for this account"""
self.networkavailable = None
self.typeInfo['networkavailable'] = 'string'
"""the network domain"""
self.networkdomain = None
self.typeInfo['networkdomain'] = 'string'
"""the total number of networks the account can own"""
self.networklimit = None
self.typeInfo['networklimit'] = 'string'
"""the total number of networks owned by account"""
self.networktotal = None
self.typeInfo['networktotal'] = 'long'
"""the total primary storage space (in GiB) available to be used for this account"""
self.primarystorageavailable = None
self.typeInfo['primarystorageavailable'] = 'string'
"""the total primary storage space (in GiB) the account can own"""
self.primarystoragelimit = None
self.typeInfo['primarystoragelimit'] = 'string'
"""the total primary storage space (in GiB) owned by account"""
self.primarystoragetotal = None
self.typeInfo['primarystoragetotal'] = 'long'
"""the total number of projects available for administration by this account"""
self.projectavailable = None
self.typeInfo['projectavailable'] = 'string'
"""the total number of projects the account can own"""
self.projectlimit = None
self.typeInfo['projectlimit'] = 'string'
"""the total number of projects being administrated by this account"""
self.projecttotal = None
self.typeInfo['projecttotal'] = 'long'
"""the total number of network traffic bytes received"""
self.receivedbytes = None
self.typeInfo['receivedbytes'] = 'long'
"""the total secondary storage space (in GiB) available to be used for this account"""
self.secondarystorageavailable = None
self.typeInfo['secondarystorageavailable'] = 'string'
"""the total secondary storage space (in GiB) the account can own"""
self.secondarystoragelimit = None
self.typeInfo['secondarystoragelimit'] = 'string'
"""the total secondary storage space (in GiB) owned by account"""
self.secondarystoragetotal = None
self.typeInfo['secondarystoragetotal'] = 'long'
"""the total number of network traffic bytes sent"""
self.sentbytes = None
self.typeInfo['sentbytes'] = 'long'
"""the total number of snapshots available for this account"""
self.snapshotavailable = None
self.typeInfo['snapshotavailable'] = 'string'
"""the total number of snapshots which can be stored by this account"""
self.snapshotlimit = None
self.typeInfo['snapshotlimit'] = 'string'
"""the total number of snapshots stored by this account"""
self.snapshottotal = None
self.typeInfo['snapshottotal'] = 'long'
"""the state of the account"""
self.state = None
self.typeInfo['state'] = 'string'
"""the total number of templates available to be created by this account"""
self.templateavailable = None
self.typeInfo['templateavailable'] = 'string'
"""the total number of templates which can be created by this account"""
self.templatelimit = None
self.typeInfo['templatelimit'] = 'string'
"""the total number of templates which have been created by this account"""
self.templatetotal = None
self.typeInfo['templatetotal'] = 'long'
"""the total number of virtual machines available for this account to acquire"""
self.vmavailable = None
self.typeInfo['vmavailable'] = 'string'
"""the total number of virtual machines that can be deployed by this account"""
self.vmlimit = None
self.typeInfo['vmlimit'] = 'string'
"""the total number of virtual machines running for this account"""
self.vmrunning = None
self.typeInfo['vmrunning'] = 'integer'
"""the total number of virtual machines stopped for this account"""
self.vmstopped = None
self.typeInfo['vmstopped'] = 'integer'
"""the total number of virtual machines deployed by this account"""
self.vmtotal = None
self.typeInfo['vmtotal'] = 'long'
"""the total volume available for this account"""
self.volumeavailable = None
self.typeInfo['volumeavailable'] = 'string'
"""the total volume which can be used by this account"""
self.volumelimit = None
self.typeInfo['volumelimit'] = 'string'
"""the total volume being used by this account"""
self.volumetotal = None
self.typeInfo['volumetotal'] = 'long'
"""the total number of vpcs available to be created for this account"""
self.vpcavailable = None
self.typeInfo['vpcavailable'] = 'string'
"""the total number of vpcs the account can own"""
self.vpclimit = None
self.typeInfo['vpclimit'] = 'string'
"""the total number of vpcs owned by account"""
self.vpctotal = None
self.typeInfo['vpctotal'] = 'long'
"""the list of users associated with account"""
self.user = []
class user:
def __init__(self):
""""the user ID"""
self.id = None
""""the account name of the user"""
self.account = None
""""the account ID of the user"""
self.accountid = None
""""the account type of the user"""
self.accounttype = None
""""the api key of the user"""
self.apikey = None
""""the date and time the user account was created"""
self.created = None
""""the domain name of the user"""
self.domain = None
""""the domain ID of the user"""
self.domainid = None
""""the user email address"""
self.email = None
""""the user firstname"""
self.firstname = None
""""the boolean value representing if the updating target is in caller's child domain"""
self.iscallerchilddomain = None
""""true if user is default, false otherwise"""
self.isdefault = None
""""the user lastname"""
self.lastname = None
""""the secret key of the user"""
self.secretkey = None
""""the user state"""
self.state = None
""""the timezone user was created in"""
self.timezone = None
""""the user name"""
self.username = None
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import warnings
from typing import Dict, List, Optional
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
class GlueJobHook(AwsBaseHook):
"""
Interact with AWS Glue - create job, trigger, crawler
:param s3_bucket: S3 bucket where logs and local etl script will be uploaded
:param job_name: unique job name per AWS account
:param desc: job description
:param concurrent_run_limit: The maximum number of concurrent runs allowed for a job
:param script_location: path to etl script on s3
:param retry_limit: Maximum number of times to retry this job if it fails
:param num_of_dpus: Number of AWS Glue DPUs to allocate to this Job
:param region_name: aws region name (example: us-east-1)
:param iam_role_name: AWS IAM Role for Glue Job Execution
:param create_job_kwargs: Extra arguments for Glue Job Creation
"""
JOB_POLL_INTERVAL = 6 # polls job status after every JOB_POLL_INTERVAL seconds
def __init__(
self,
s3_bucket: Optional[str] = None,
job_name: Optional[str] = None,
desc: Optional[str] = None,
concurrent_run_limit: int = 1,
script_location: Optional[str] = None,
retry_limit: int = 0,
num_of_dpus: Optional[int] = None,
iam_role_name: Optional[str] = None,
create_job_kwargs: Optional[dict] = None,
*args,
**kwargs,
):
self.job_name = job_name
self.desc = desc
self.concurrent_run_limit = concurrent_run_limit
self.script_location = script_location
self.retry_limit = retry_limit
self.s3_bucket = s3_bucket
self.role_name = iam_role_name
self.s3_glue_logs = 'logs/glue-logs/'
self.create_job_kwargs = create_job_kwargs or {}
worker_type_exists = "WorkerType" in self.create_job_kwargs
num_workers_exists = "NumberOfWorkers" in self.create_job_kwargs
if worker_type_exists and num_workers_exists:
if num_of_dpus is not None:
raise ValueError("Cannot specify num_of_dpus with custom WorkerType")
elif not worker_type_exists and num_workers_exists:
raise ValueError("Need to specify custom WorkerType when specifying NumberOfWorkers")
elif worker_type_exists and not num_workers_exists:
raise ValueError("Need to specify NumberOfWorkers when specifying custom WorkerType")
elif num_of_dpus is None:
self.num_of_dpus = 10
else:
self.num_of_dpus = num_of_dpus
kwargs['client_type'] = 'glue'
super().__init__(*args, **kwargs)
def list_jobs(self) -> List:
""":return: Lists of Jobs"""
conn = self.get_conn()
return conn.get_jobs()
def get_iam_execution_role(self) -> Dict:
""":return: iam role for job execution"""
session, endpoint_url = self._get_credentials(region_name=self.region_name)
iam_client = session.client('iam', endpoint_url=endpoint_url, config=self.config, verify=self.verify)
try:
glue_execution_role = iam_client.get_role(RoleName=self.role_name)
self.log.info("Iam Role Name: %s", self.role_name)
return glue_execution_role
except Exception as general_error:
self.log.error("Failed to create aws glue job, error: %s", general_error)
raise
def initialize_job(
self,
script_arguments: Optional[dict] = None,
run_kwargs: Optional[dict] = None,
) -> Dict[str, str]:
"""
Initializes connection with AWS Glue
to run job
:return:
"""
glue_client = self.get_conn()
script_arguments = script_arguments or {}
run_kwargs = run_kwargs or {}
try:
job_name = self.get_or_create_glue_job()
job_run = glue_client.start_job_run(JobName=job_name, Arguments=script_arguments, **run_kwargs)
return job_run
except Exception as general_error:
self.log.error("Failed to run aws glue job, error: %s", general_error)
raise
def get_job_state(self, job_name: str, run_id: str) -> str:
"""
Get state of the Glue job. The job state can be
running, finished, failed, stopped or timeout.
:param job_name: unique job name per AWS account
:param run_id: The job-run ID of the predecessor job run
:return: State of the Glue job
"""
glue_client = self.get_conn()
job_run = glue_client.get_job_run(JobName=job_name, RunId=run_id, PredecessorsIncluded=True)
job_run_state = job_run['JobRun']['JobRunState']
return job_run_state
def job_completion(self, job_name: str, run_id: str) -> Dict[str, str]:
"""
Waits until Glue job with job_name completes or
fails and return final state if finished.
Raises AirflowException when the job failed
:param job_name: unique job name per AWS account
:param run_id: The job-run ID of the predecessor job run
:return: Dict of JobRunState and JobRunId
"""
failed_states = ['FAILED', 'TIMEOUT']
finished_states = ['SUCCEEDED', 'STOPPED']
while True:
job_run_state = self.get_job_state(job_name, run_id)
if job_run_state in finished_states:
self.log.info("Exiting Job %s Run State: %s", run_id, job_run_state)
return {'JobRunState': job_run_state, 'JobRunId': run_id}
if job_run_state in failed_states:
job_error_message = "Exiting Job " + run_id + " Run State: " + job_run_state
self.log.info(job_error_message)
raise AirflowException(job_error_message)
else:
self.log.info(
"Polling for AWS Glue Job %s current run state with status %s", job_name, job_run_state
)
time.sleep(self.JOB_POLL_INTERVAL)
def get_or_create_glue_job(self) -> str:
"""
Creates(or just returns) and returns the Job name
:return:Name of the Job
"""
glue_client = self.get_conn()
try:
get_job_response = glue_client.get_job(JobName=self.job_name)
self.log.info("Job Already exist. Returning Name of the job")
return get_job_response['Job']['Name']
except glue_client.exceptions.EntityNotFoundException:
self.log.info("Job doesn't exist. Now creating and running AWS Glue Job")
if self.s3_bucket is None:
raise AirflowException('Could not initialize glue job, error: Specify Parameter `s3_bucket`')
s3_log_path = f's3://{self.s3_bucket}/{self.s3_glue_logs}{self.job_name}'
execution_role = self.get_iam_execution_role()
try:
if "WorkerType" in self.create_job_kwargs and "NumberOfWorkers" in self.create_job_kwargs:
create_job_response = glue_client.create_job(
Name=self.job_name,
Description=self.desc,
LogUri=s3_log_path,
Role=execution_role['Role']['Arn'],
ExecutionProperty={"MaxConcurrentRuns": self.concurrent_run_limit},
Command={"Name": "glueetl", "ScriptLocation": self.script_location},
MaxRetries=self.retry_limit,
**self.create_job_kwargs,
)
else:
create_job_response = glue_client.create_job(
Name=self.job_name,
Description=self.desc,
LogUri=s3_log_path,
Role=execution_role['Role']['Arn'],
ExecutionProperty={"MaxConcurrentRuns": self.concurrent_run_limit},
Command={"Name": "glueetl", "ScriptLocation": self.script_location},
MaxRetries=self.retry_limit,
MaxCapacity=self.num_of_dpus,
**self.create_job_kwargs,
)
return create_job_response['Name']
except Exception as general_error:
self.log.error("Failed to create aws glue job, error: %s", general_error)
raise
class AwsGlueJobHook(GlueJobHook):
"""
This hook is deprecated.
Please use :class:`airflow.providers.amazon.aws.hooks.glue.GlueJobHook`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"This hook is deprecated. "
"Please use :class:`airflow.providers.amazon.aws.hooks.glue.GlueJobHook`.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
| |
# -*- coding: utf-8 -*-
"""
Yelp API v2.0 code sample.
This program demonstrates the capability of the Yelp API version 2.0
by using the Search API to query for businesses by a search term and location,
and the Business API to query additional information about the top result
from the search query.
Please refer to http://www.yelp.com/developers/documentation for the API
documentation.
This program requires the Python oauth2 library, which you can install via:
`pip install -r requirements.txt`.
Sample usage of the program:
`python sample.py --term="bars" --location="San Francisco, CA"`
"""
import argparse
import json
import pprint
import sys
import urllib
import urllib2
import oauth2
import csv
import os
API_HOST = 'api.yelp.com'
DEFAULT_TERM = 'dinner'
DEFAULT_LOCATION = 'Oklahoma City, OK'
SEARCH_LIMIT = 20
SEARCH_PATH = '/v2/search/'
BUSINESS_PATH = '/v2/business/'
CATEGORY_FILTER = 'coffee'
# OAuth credential placeholders that must be filled in by users.
CONSUMER_KEY = 'rpnKJ9PU9jNJ0vftf-796A'
CONSUMER_SECRET = 'Oi4Mhzd5LpOLu0-u_cXiU-OUK4k'
TOKEN = '7dQKVMB2Yw19MS_Gi1axF9bE4jOX8Gny'
TOKEN_SECRET = 'ZrZJtF0ZIiayrpXRraFiNj--kco'
def request(host, path, url_params=None):
"""Prepares OAuth authentication and sends the request to the API.
Args:
host (str): The domain host of the API.
path (str): The path of the API after the domain.
url_params (dict): An optional set of query parameters in the request.
Returns:
dict: The JSON response from the request.
Raises:
urllib2.HTTPError: An error occurs from the HTTP request.
"""
url_params = url_params or {}
url = 'https://{0}{1}?'.format(host, urllib.quote(path.encode('utf8')))
consumer = oauth2.Consumer(CONSUMER_KEY, CONSUMER_SECRET)
oauth_request = oauth2.Request(
method="GET", url=url, parameters=url_params)
oauth_request.update(
{
'oauth_nonce': oauth2.generate_nonce(),
'oauth_timestamp': oauth2.generate_timestamp(),
'oauth_token': TOKEN,
'oauth_consumer_key': CONSUMER_KEY
}
)
token = oauth2.Token(TOKEN, TOKEN_SECRET)
oauth_request.sign_request(
oauth2.SignatureMethod_HMAC_SHA1(), consumer, token)
signed_url = oauth_request.to_url()
print u'Querying {0} ...'.format(url)
conn = urllib2.urlopen(signed_url, None)
try:
response = json.loads(conn.read())
finally:
conn.close()
return response
def search(term, location, offset):
"""Query the Search API by a search term and location.
Args:
term (str): The search term passed to the API.
location (str): The search location passed to the API.
Returns:
dict: The JSON response from the request.
"""
url_params = {
'location': location.replace(' ', '+'),
'category_filter': CATEGORY_FILTER,
'limit': SEARCH_LIMIT,
'offset': offset
}
return request(API_HOST, SEARCH_PATH, url_params=url_params)
def get_business(business_id):
"""Query the Business API by a business ID.
Args:
business_id (str): The ID of the business to query.
Returns:
dict: The JSON response from the request.
"""
business_path = BUSINESS_PATH + business_id
return request(API_HOST, business_path)
def query_api(term, location):
"""Queries the API by the input values from the user.
Args:
term (str): The search term to query.
location (str): The location of the business to query.
"""
i = 0
offset = 0
businesses = []
while i < 140:
response = search(term, location, i)
newBusinesses = response.get('businesses')
businesses.extend(newBusinesses)
i = i + 20
if not businesses:
print u'No businesses for {0} in {1} found.'.format(term, location)
return
#business_id = businesses[0]['id']
for business in businesses:
print business['name']
writeCSV(businesses)
print u'Done!'
# print u'{0} businesses found, querying business info ' \
# 'for the top result "{1}" ...'.format(
# len(businesses), business_id)
# response = get_business(business_id)
#
# print u'Result for business "{0}" found:'.format(business_id)
# pprint.pprint(response, indent=2)
def writeCSV(businesses):
if os.path.exists('yelp.csv'):
os.remove('yelp.csv')
with open('yelp.csv', 'w') as csvfile:
fieldnames = [ 'name', 'rating', 'location-street', 'location-lat', 'location-long']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for business in businesses:
if len(business['location']['address']) > 0:
address = business['location']['address'][0].encode('ASCII', 'ignore')
else:
address = 'null'
row = {
'name': business['name'].encode('ascii','ignore'),
'rating': business['rating'],
'location-street': address,
'location-lat': business['location']['coordinate']['latitude'],
'location-long': business['location']['coordinate']['longitude']
}
writer.writerow(row)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--term', dest='term', default=DEFAULT_TERM,
type=str, help='Search term (default: %(default)s)')
parser.add_argument('-l', '--location', dest='location',
default=DEFAULT_LOCATION, type=str,
help='Search location (default: %(default)s)')
input_values = parser.parse_args()
try:
query_api(input_values.term, input_values.location)
except urllib2.HTTPError as error:
sys.exit(
'Encountered HTTP error {0}. Abort program.'.format(error.code))
if __name__ == '__main__':
main()
| |
#!/usr/bin/python
#
# To make our hack work, we change CC and LD to point at this script, and this
# script will swap out all the OSX-specific flags for iOS-specific flags.
#
import glob
import os
import re
import subprocess
import sys
def get_developer_dir():
if 'DEVELOPER_DIR' in os.environ:
return os.environ['DEVELOPER_DIR']
else:
# Before building, Xcode runs clang in a special mode that dumps a bunch
# of the internal macros that are set. e.g. --
#
# /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang \
# -v -E -dM -arch i386 -isysroot /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk \
# -x objective-c -c /dev/null 2>&1
#
# At this point, it's not yet setting DEVELOPER_DIR in the environment, but
# it is prepending '/Applications/Xcode.app/Contents/Developer/usr/bin' to
# the path. We can deduce DEVELOPER_DIR from that.
# This should give us a path that's inside the developer dir, given how
# Xcode has set our path. e.g. --
# /Applications/Xcode.app/Contents/Developer/usr/bin/xcodebuild
xcodebuild_path = subprocess.check_output(['/usr/bin/which', 'xcodebuild']).strip()
usr_bin_path = os.path.dirname(xcodebuild_path)
return os.path.normpath(os.path.join(usr_bin_path, '..', '..'))
def get_clang_path():
return os.path.join(get_developer_dir(),
'Toolchains/XcodeDefault.xctoolchain/usr/bin/clang')
def get_path_for_iphonesimulator_platform(developer_dir):
return ':'.join([
os.path.join(developer_dir, 'Toolchains/XcodeDefault.xctoolchain/usr/bin'),
os.path.join(developer_dir, 'Toolchains/XcodeDefault.xctoolchain/usr/libexec'),
os.path.join(developer_dir, 'Platforms/iPhoneSimulator.platform/Developer/usr/bin'),
os.path.join(developer_dir, 'Platforms/iPhoneSimulator.platform/Developer/usr/local/bin'),
os.path.join(developer_dir, 'Platforms/iPhoneSimulator.platform/usr/bin'),
os.path.join(developer_dir, 'Platforms/iPhoneSimulator.platform/usr/local/bin'),
'/usr/bin',
'/usr/local/bin',
'/Tools',
'/usr/bin',
'/bin',
'/usr/sbin',
'/sbin'])
def get_env_for_iphonesimulator_platform(developer_dir):
env = os.environ.copy()
if 'MACOSX_DEPLOYMENT_TARGET' in env:
del env['MACOSX_DEPLOYMENT_TARGET']
env['PATH'] = get_path_for_iphonesimulator_platform(developer_dir)
return env
def get_args_without_osxims():
new_argv = []
i = 1
while i < len(sys.argv):
arg = sys.argv[i]
next_arg = sys.argv[i + 1] if (i + 1 < len(sys.argv)) else None
if arg == '-isysroot' and 'SDKs/MacOSX' in next_arg:
# skip, it's referencing the OSX SDK.
i = i + 2
elif ('-mmacosx-version-min' in arg):
# clang won't be OK with '-mmacosx-version-min' and
# '-miphoneos-version-min' being passed at the same time.
i = i + 1
elif (arg == '-F/Applications/Xcode.app/Contents/'
'Developer/Library/Frameworks'):
# skip - for some reason Xcode5 always includes this framework path
# and it's causing ld to select the wrong version of SenTestingKit:
#
# ld: building for iOS Simulator, but linking against dylib built for
# MacOSX file '/Applications/Xcode.app/Contents/Developer/Library/
# Frameworks/SenTestingKit.framework/SenTestingKit' for
# architecture i386
#
# It's also always using /Application/Xcode.app even when the active
# Xcode is something else.
i = i + 1
else:
i = i + 1
new_argv.append(arg)
return new_argv
def get_iphonesimulator_sdk_versions_for_arch(developer_dir, arch):
sdk_paths = glob.glob(os.path.join(
developer_dir,
'Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator*.*.sdk'))
sdk_paths.sort()
sdk_names = [os.path.basename(sdk_path) for sdk_path in sdk_paths]
sdk_versions = [
re.match(r'iPhoneSimulator(.+?)\.sdk', sdk_name).group(1) for sdk_name
in sdk_names]
# Only 7.0+ supports x86_64 builds.
if arch == 'x86_64':
sdk_versions = [
version for version in sdk_versions
if float(version) >= 7.0]
if len(sdk_versions) == 0:
raise Exception('No matching SDK for arch %s' % arch)
return sdk_versions
def get_latest_iphonesimulator_sdk_version_arch(developer_dir, arch):
sdk_versions = get_iphonesimulator_sdk_versions_for_arch(
developer_dir, arch)
return sdk_versions[len(sdk_versions) - 1]
def get_earliest_iphonesimulator_sdk_version_arch(developer_dir, arch):
sdk_versions = get_iphonesimulator_sdk_versions_for_arch(
developer_dir, arch)
return sdk_versions[0]
def get_iphonesimulator_sdk_path(developer_dir, version):
return '%s/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator%s.sdk' % (
developer_dir, version)
def get_args_for_iphonesimulator_platform(developer_dir, sdk_version, deployment_target):
isysroot = get_iphonesimulator_sdk_path(developer_dir, sdk_version)
new_args = get_args_without_osxims()
new_args.extend([
'-isysroot', isysroot,
'-F%s/Developer/Library/Frameworks' % isysroot,
'-F%s/../../Library/Frameworks' % isysroot,
'-mios-simulator-version-min=%s' % deployment_target,
])
return new_args
def parse_sdk_version_and_deployment_target_from_script_name(
developer_dir, script_name):
clang_name = None
tool = None
sdk_version_label = None
deployment_target_label = None
pattern = re.compile(r'^(?:(?P<clang_name>clang(?:\+\+)?)-)?(?P<tool>cc|ld)-iphonesimulator-(?P<sdk>.*?)-targeting-(?P<target>.*?)$')
match = pattern.match(script_name)
if match:
clang_name = match.group('clang_name') or 'clang'
tool = match.group('tool')
sdk_version_label = match.group('sdk')
deployment_target_label = match.group('target')
else:
raise Exception(
'script_name was not formatted as '
'TOOL-iphonesimulator-VERSION-targeting-VERSION or '
'CLANG_NAME-TOOL-iphonesimulator-VERSION-targeting-VERSION')
latest_sdk_version = get_latest_iphonesimulator_sdk_version_arch(
developer_dir, get_arch_from_args())
earliest_sdk_version = get_earliest_iphonesimulator_sdk_version_arch(
developer_dir, get_arch_from_args())
def version_from_label(label):
if label == 'latest':
return latest_sdk_version
elif label == 'earliest':
return earliest_sdk_version
else:
return label
sdk_version = version_from_label(sdk_version_label)
deployment_target = version_from_label(deployment_target_label)
return (clang_name, tool, sdk_version, deployment_target)
def get_arch_from_args():
for i in range(len(sys.argv)):
arg = sys.argv[i]
next_arg = sys.argv[i + 1] if (i + 1) < len(sys.argv) else None
if arg == '-arch':
return next_arg
raise Exception('Did not find the -arch argument')
developer_dir = get_developer_dir()
script_name = os.path.basename(sys.argv[0])
(clang_name, tool, sdk_version, deployment_target) = \
parse_sdk_version_and_deployment_target_from_script_name(
developer_dir, script_name)
new_env = get_env_for_iphonesimulator_platform(developer_dir)
new_argv = get_args_for_iphonesimulator_platform(
developer_dir, sdk_version, deployment_target)
# Always be verbose, otherwise the build logs are entirely confusing because
# they only show OSX-specific args.
new_argv.append('-v')
if tool == 'ld':
new_env['IPHONEOS_DEPLOYMENT_TARGET'] = deployment_target
clang_path = os.path.join(
developer_dir,
'Toolchains/XcodeDefault.xctoolchain/usr/bin/' + clang_name)
os.execve(clang_path, [clang_path] + new_argv, new_env)
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to extract size information for chrome, executed by buildbot.
When this is run, the current directory (cwd) should be the outer build
directory (e.g., chrome-release/build/).
For a list of command-line options, call this script with '--help'.
"""
import errno
import json
import platform
import optparse
import os
import re
import stat
import subprocess
import sys
import tempfile
from slave import build_directory
SRC_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', '..', '..'))
class ResultsCollector(object):
def __init__(self):
self.results = {}
def add_result(self, name, identifier, value, units):
assert name not in self.results
self.results[name] = {
'identifier': identifier,
'value': int(value),
'units': units
}
# Legacy printing, previously used for parsing the text logs.
print 'RESULT %s: %s= %s %s' % (name, identifier, value, units)
def get_size(filename):
return os.stat(filename)[stat.ST_SIZE]
def get_linux_stripped_size(filename):
EU_STRIP_NAME = 'eu-strip'
# Assumes |filename| is in out/Release
# build/linux/bin/eu-strip'
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(filename)))
eu_strip_path = os.path.join(src_dir, 'build', 'linux', 'bin', EU_STRIP_NAME)
if (platform.architecture()[0] == '64bit' or
not os.path.exists(eu_strip_path)):
eu_strip_path = EU_STRIP_NAME
with tempfile.NamedTemporaryFile() as stripped_file:
strip_cmd = [eu_strip_path, '-o', stripped_file.name, filename]
result = 0
result, _ = run_process(result, strip_cmd)
if result != 0:
return (result, 0)
return (result, get_size(stripped_file.name))
def run_process(result, command):
p = subprocess.Popen(command, stdout=subprocess.PIPE)
stdout = p.communicate()[0]
if p.returncode != 0:
print 'ERROR from command "%s": %d' % (' '.join(command), p.returncode)
if result == 0:
result = p.returncode
return result, stdout
def print_si_fail_hint(path_to_tool):
"""Print a hint regarding how to handle a static initializer failure."""
print '# HINT: To get this list, run %s' % path_to_tool
print '# HINT: diff against the log from the last run to see what changed'
def main_mac(options, args, results_collector):
"""Print appropriate size information about built Mac targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
build_dir = build_directory.GetBuildOutputDirectory(SRC_DIR)
target_dir = os.path.join(build_dir, options.target)
result = 0
# Work with either build type.
base_names = ('Chromium', 'Google Chrome')
for base_name in base_names:
app_bundle = base_name + '.app'
framework_name = base_name + ' Framework'
framework_bundle = framework_name + '.framework'
framework_dsym_bundle = framework_bundle + '.dSYM'
chromium_app_dir = os.path.join(target_dir, app_bundle)
chromium_executable = os.path.join(chromium_app_dir,
'Contents', 'MacOS', base_name)
chromium_framework_dir = os.path.join(target_dir, framework_bundle)
chromium_framework_executable = os.path.join(chromium_framework_dir,
framework_name)
chromium_framework_dsym_dir = os.path.join(target_dir,
framework_dsym_bundle)
chromium_framework_dsym = os.path.join(chromium_framework_dsym_dir,
'Contents', 'Resources', 'DWARF',
framework_name)
if os.path.exists(chromium_executable):
print_dict = {
# Remove spaces in the names so any downstream processing is less
# likely to choke.
'app_name' : re.sub(r'\s', '', base_name),
'app_bundle' : re.sub(r'\s', '', app_bundle),
'framework_name' : re.sub(r'\s', '', framework_name),
'framework_bundle' : re.sub(r'\s', '', framework_bundle),
'app_size' : get_size(chromium_executable),
'framework_size' : get_size(chromium_framework_executable)
}
# Collect the segment info out of the App
result, stdout = run_process(result, ['size', chromium_executable])
print_dict['app_text'], print_dict['app_data'], print_dict['app_objc'] = \
re.search(r'(\d+)\s+(\d+)\s+(\d+)', stdout).groups()
# Collect the segment info out of the Framework
result, stdout = run_process(result, ['size',
chromium_framework_executable])
print_dict['framework_text'], print_dict['framework_data'], \
print_dict['framework_objc'] = \
re.search(r'(\d+)\s+(\d+)\s+(\d+)', stdout).groups()
# Collect the whole size of the App bundle on disk (include the framework)
result, stdout = run_process(result, ['du', '-s', '-k', chromium_app_dir])
du_s = re.search(r'(\d+)', stdout).group(1)
print_dict['app_bundle_size'] = (int(du_s) * 1024)
# Count the number of files with at least one static initializer.
pipes = [['otool', '-l', chromium_framework_executable],
['grep', '__mod_init_func', '-C', '5'],
['grep', 'size']]
last_stdout = None
for pipe in pipes:
p = subprocess.Popen(pipe, stdin=last_stdout, stdout=subprocess.PIPE)
last_stdout = p.stdout
stdout = p.communicate()[0]
initializers = re.search('0x([0-9a-f]+)', stdout)
if initializers:
initializers_s = initializers.group(1)
if result == 0:
result = p.returncode
else:
initializers_s = '0'
word_size = 4 # Assume 32 bit
si_count = int(initializers_s, 16) / word_size
print_dict['initializers'] = si_count
# For Release builds only, use dump-static-initializers.py to print the
# list of static initializers.
if si_count > 0 and options.target == 'Release':
dump_static_initializers = os.path.join(
os.path.dirname(build_dir), 'tools', 'mac',
'dump-static-initializers.py')
result, stdout = run_process(result, [dump_static_initializers,
chromium_framework_dsym])
print '\n# Static initializers in %s:' % chromium_framework_executable
print_si_fail_hint('tools/mac/dump-static-initializers.py')
print stdout
results_collector.add_result(
print_dict['app_name'], print_dict['app_name'],
print_dict['app_size'], 'bytes')
results_collector.add_result(
'%s-__TEXT' % print_dict['app_name'], '__TEXT',
print_dict['app_text'], 'bytes')
results_collector.add_result(
'%s-__DATA' % print_dict['app_name'], '__DATA',
print_dict['app_data'], 'bytes')
results_collector.add_result(
'%s-__OBJC' % print_dict['app_name'], '__OBJC',
print_dict['app_objc'], 'bytes')
results_collector.add_result(
print_dict['framework_name'], print_dict['framework_name'],
print_dict['framework_size'], 'bytes')
results_collector.add_result(
'%s-__TEXT' % print_dict['framework_name'], '__TEXT',
print_dict['framework_text'], 'bytes')
results_collector.add_result(
'%s-__DATA' % print_dict['framework_name'], '__DATA',
print_dict['framework_data'], 'bytes')
results_collector.add_result(
'%s-__OBJC' % print_dict['framework_name'], '__OBJC',
print_dict['framework_objc'], 'bytes')
results_collector.add_result(
print_dict['app_bundle'], print_dict['app_bundle'],
print_dict['app_bundle_size'], 'bytes')
results_collector.add_result(
'chrome-si', 'initializers',
print_dict['initializers'], 'files')
# Found a match, don't check the other base_names.
return result
# If no base_names matched, fail script.
return 66
def check_linux_binary(target_dir, binary_name, options):
"""Collect appropriate size information about the built Linux binary given.
Returns a tuple (result, sizes). result is the first non-zero exit
status of any command it executes, or zero on success. sizes is a list
of tuples (name, identifier, totals_identifier, value, units).
The printed line looks like:
name: identifier= value units
When this same data is used for totals across all the binaries, then
totals_identifier is the identifier to use, or '' to just use identifier.
"""
binary_file = os.path.join(target_dir, binary_name)
if not os.path.exists(binary_file):
# Don't print anything for missing files.
return 0, []
result = 0
sizes = []
def get_elf_section_size(readelf_stdout, section_name):
# Matches: .ctors PROGBITS 000000000516add0 5169dd0 000010 00 WA 0 0 8
match = re.search(r'\.%s.*$' % re.escape(section_name),
readelf_stdout, re.MULTILINE)
if not match:
return (False, -1)
size_str = re.split(r'\W+', match.group(0))[5]
return (True, int(size_str, 16))
sizes.append((binary_name, binary_name, 'size',
get_size(binary_file), 'bytes'))
result, stripped_size = get_linux_stripped_size(binary_file)
sizes.append((binary_name + '-stripped', 'stripped', 'stripped',
stripped_size, 'bytes'))
result, stdout = run_process(result, ['size', binary_file])
text, data, bss = re.search(r'(\d+)\s+(\d+)\s+(\d+)', stdout).groups()
sizes += [
(binary_name + '-text', 'text', '', text, 'bytes'),
(binary_name + '-data', 'data', '', data, 'bytes'),
(binary_name + '-bss', 'bss', '', bss, 'bytes'),
]
# Find the number of files with at least one static initializer.
# First determine if we're 32 or 64 bit
result, stdout = run_process(result, ['readelf', '-h', binary_file])
elf_class_line = re.search('Class:.*$', stdout, re.MULTILINE).group(0)
elf_class = re.split(r'\W+', elf_class_line)[1]
if elf_class == 'ELF32':
word_size = 4
else:
word_size = 8
# Then find the number of files with global static initializers.
# NOTE: this is very implementation-specific and makes assumptions
# about how compiler and linker implement global static initializers.
si_count = 0
result, stdout = run_process(result, ['readelf', '-SW', binary_file])
has_init_array, init_array_size = get_elf_section_size(stdout, 'init_array')
if has_init_array:
si_count = init_array_size / word_size
si_count = max(si_count, 0)
sizes.append((binary_name + '-si', 'initializers', '', si_count, 'files'))
# For Release builds only, use dump-static-initializers.py to print the list
# of static initializers.
if si_count > 0 and options.target == 'Release':
build_dir = os.path.dirname(target_dir)
dump_static_initializers = os.path.join(os.path.dirname(build_dir),
'tools', 'linux',
'dump-static-initializers.py')
result, stdout = run_process(result, [dump_static_initializers,
'-d', binary_file])
print '\n# Static initializers in %s:' % binary_file
print_si_fail_hint('tools/linux/dump-static-initializers.py')
print stdout
# Determine if the binary has the DT_TEXTREL marker.
result, stdout = run_process(result, ['readelf', '-Wd', binary_file])
if re.search(r'\bTEXTREL\b', stdout) is None:
# Nope, so the count is zero.
count = 0
else:
# There are some, so count them.
result, stdout = run_process(result, ['eu-findtextrel', binary_file])
count = stdout.count('\n')
sizes.append((binary_name + '-textrel', 'textrel', '', count, 'relocs'))
return result, sizes
def main_linux(options, args, results_collector):
"""Print appropriate size information about built Linux targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
build_dir = build_directory.GetBuildOutputDirectory(SRC_DIR)
target_dir = os.path.join(build_dir, options.target)
binaries = [
'chrome',
'nacl_helper',
'nacl_helper_bootstrap',
'libffmpegsumo.so',
'libgcflashplayer.so',
'libppGoogleNaClPluginChrome.so',
]
result = 0
totals = {}
for binary in binaries:
this_result, this_sizes = check_linux_binary(target_dir, binary, options)
if result == 0:
result = this_result
for name, identifier, totals_id, value, units in this_sizes:
results_collector.add_result(name, identifier, value, units)
totals_id = totals_id or identifier, units
totals[totals_id] = totals.get(totals_id, 0) + int(value)
files = [
'nacl_irt_x86_64.nexe',
'resources.pak',
]
for filename in files:
path = os.path.join(target_dir, filename)
try:
size = get_size(path)
except OSError, e:
if e.errno == errno.ENOENT:
continue # Don't print anything for missing files.
raise
results_collector.add_result(filename, filename, size, 'bytes')
totals['size', 'bytes'] += size
# TODO(mcgrathr): This should all be refactored so the mac and win flavors
# also deliver data structures rather than printing, and the logic for
# the printing and the summing totals is shared across all three flavors.
for (identifier, units), value in sorted(totals.iteritems()):
results_collector.add_result(
'totals-%s' % identifier, identifier, value, units)
return result
def check_android_binaries(binaries, target_dir, options):
"""Common method for printing size information for Android targets.
"""
result = 0
for binary in binaries:
this_result, this_sizes = check_linux_binary(target_dir, binary, options)
if result == 0:
result = this_result
for name, identifier, _, value, units in this_sizes:
print 'RESULT %s: %s= %s %s' % (name.replace('/', '_'), identifier, value,
units)
return result
def main_android(options, args, results_collector):
"""Print appropriate size information about built Android targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
target_dir = os.path.join(build_directory.GetBuildOutputDirectory(SRC_DIR),
options.target)
binaries = [
'chrome_public_apk/libs/armeabi-v7a/libchrome_public.so',
'lib/libchrome_public.so',
]
return check_android_binaries(binaries, target_dir, options)
def main_android_webview(options, args, results_collector):
"""Print appropriate size information about Android WebViewChromium targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
target_dir = os.path.join(build_directory.GetBuildOutputDirectory(SRC_DIR),
options.target)
binaries = ['lib/libwebviewchromium.so']
return check_android_binaries(binaries, target_dir, options)
def main_android_cronet(options, args, results_collector):
"""Print appropriate size information about Android Cronet targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
target_dir = os.path.join(build_directory.GetBuildOutputDirectory(SRC_DIR),
options.target)
binaries = ['cronet_sample_apk/libs/arm64-v8a/libcronet.so',
'cronet_sample_apk/libs/armeabi-v7a/libcronet.so',
'cronet_sample_apk/libs/armeabi/libcronet.so',
'cronet_sample_apk/libs/mips/libcronet.so',
'cronet_sample_apk/libs/x86_64/libcronet.so',
'cronet_sample_apk/libs/x86/libcronet.so']
return check_android_binaries(binaries, target_dir, options)
def main_win(options, args, results_collector):
"""Print appropriate size information about built Windows targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
build_dir = build_directory.GetBuildOutputDirectory(SRC_DIR)
target_dir = os.path.join(build_dir, options.target)
chrome_dll = os.path.join(target_dir, 'chrome.dll')
chrome_child_dll = os.path.join(target_dir, 'chrome_child.dll')
chrome_exe = os.path.join(target_dir, 'chrome.exe')
mini_installer_exe = os.path.join(target_dir, 'mini_installer.exe')
setup_exe = os.path.join(target_dir, 'setup.exe')
result = 0
print 'RESULT chrome.dll: chrome.dll= %s bytes' % get_size(chrome_dll)
if os.path.exists(chrome_child_dll):
fmt = 'RESULT chrome_child.dll: chrome_child.dll= %s bytes'
print fmt % get_size(chrome_child_dll)
print 'RESULT chrome.exe: chrome.exe= %s bytes' % get_size(chrome_exe)
if os.path.exists(mini_installer_exe):
fmt = 'RESULT mini_installer.exe: mini_installer.exe= %s bytes'
print fmt % get_size(mini_installer_exe)
if os.path.exists(setup_exe):
print 'RESULT setup.exe: setup.exe= %s bytes' % get_size(setup_exe)
return result
def main():
if sys.platform in ('win32', 'cygwin'):
default_platform = 'win'
elif sys.platform.startswith('darwin'):
default_platform = 'mac'
elif sys.platform == 'linux2':
default_platform = 'linux'
else:
default_platform = None
main_map = {
'android' : main_android,
'android-webview' : main_android_webview,
'android-cronet' : main_android_cronet,
'linux' : main_linux,
'mac' : main_mac,
'win' : main_win,
}
platforms = sorted(main_map.keys())
option_parser = optparse.OptionParser()
option_parser.add_option('--target',
default='Release',
help='build target (Debug, Release) '
'[default: %default]')
option_parser.add_option('--target-dir', help='ignored')
option_parser.add_option('--build-dir', help='ignored')
option_parser.add_option('--platform',
default=default_platform,
help='specify platform (%s) [default: %%default]'
% ', '.join(platforms))
option_parser.add_option('--json', help='Path to JSON output file')
options, args = option_parser.parse_args()
real_main = main_map.get(options.platform)
if not real_main:
if options.platform is None:
sys.stderr.write('Unsupported sys.platform %s.\n' % repr(sys.platform))
else:
sys.stderr.write('Unknown platform %s.\n' % repr(options.platform))
msg = 'Use the --platform= option to specify a supported platform:\n'
sys.stderr.write(msg + ' ' + ' '.join(platforms) + '\n')
return 2
results_collector = ResultsCollector()
rc = real_main(options, args, results_collector)
if options.json:
with open(options.json, 'w') as f:
json.dump(results_collector.results, f)
return rc
if '__main__' == __name__:
sys.exit(main())
| |
"""Support for Flux lights."""
import logging
import random
from flux_led import BulbScanner, WifiLedBulb
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_HS_COLOR,
ATTR_WHITE_VALUE,
EFFECT_COLORLOOP,
EFFECT_RANDOM,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.const import ATTR_MODE, CONF_DEVICES, CONF_NAME, CONF_PROTOCOL
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
_LOGGER = logging.getLogger(__name__)
CONF_AUTOMATIC_ADD = "automatic_add"
CONF_CUSTOM_EFFECT = "custom_effect"
CONF_COLORS = "colors"
CONF_SPEED_PCT = "speed_pct"
CONF_TRANSITION = "transition"
DOMAIN = "flux_led"
SUPPORT_FLUX_LED = SUPPORT_BRIGHTNESS | SUPPORT_EFFECT | SUPPORT_COLOR
MODE_RGB = "rgb"
MODE_RGBW = "rgbw"
# This mode enables white value to be controlled by brightness.
# RGB value is ignored when this mode is specified.
MODE_WHITE = "w"
# Constant color temp values for 2 flux_led special modes
# Warm-white and Cool-white modes
COLOR_TEMP_WARM_VS_COLD_WHITE_CUT_OFF = 285
# List of supported effects which aren't already declared in LIGHT
EFFECT_RED_FADE = "red_fade"
EFFECT_GREEN_FADE = "green_fade"
EFFECT_BLUE_FADE = "blue_fade"
EFFECT_YELLOW_FADE = "yellow_fade"
EFFECT_CYAN_FADE = "cyan_fade"
EFFECT_PURPLE_FADE = "purple_fade"
EFFECT_WHITE_FADE = "white_fade"
EFFECT_RED_GREEN_CROSS_FADE = "rg_cross_fade"
EFFECT_RED_BLUE_CROSS_FADE = "rb_cross_fade"
EFFECT_GREEN_BLUE_CROSS_FADE = "gb_cross_fade"
EFFECT_COLORSTROBE = "colorstrobe"
EFFECT_RED_STROBE = "red_strobe"
EFFECT_GREEN_STROBE = "green_strobe"
EFFECT_BLUE_STROBE = "blue_strobe"
EFFECT_YELLOW_STROBE = "yellow_strobe"
EFFECT_CYAN_STROBE = "cyan_strobe"
EFFECT_PURPLE_STROBE = "purple_strobe"
EFFECT_WHITE_STROBE = "white_strobe"
EFFECT_COLORJUMP = "colorjump"
EFFECT_CUSTOM = "custom"
EFFECT_MAP = {
EFFECT_COLORLOOP: 0x25,
EFFECT_RED_FADE: 0x26,
EFFECT_GREEN_FADE: 0x27,
EFFECT_BLUE_FADE: 0x28,
EFFECT_YELLOW_FADE: 0x29,
EFFECT_CYAN_FADE: 0x2A,
EFFECT_PURPLE_FADE: 0x2B,
EFFECT_WHITE_FADE: 0x2C,
EFFECT_RED_GREEN_CROSS_FADE: 0x2D,
EFFECT_RED_BLUE_CROSS_FADE: 0x2E,
EFFECT_GREEN_BLUE_CROSS_FADE: 0x2F,
EFFECT_COLORSTROBE: 0x30,
EFFECT_RED_STROBE: 0x31,
EFFECT_GREEN_STROBE: 0x32,
EFFECT_BLUE_STROBE: 0x33,
EFFECT_YELLOW_STROBE: 0x34,
EFFECT_CYAN_STROBE: 0x35,
EFFECT_PURPLE_STROBE: 0x36,
EFFECT_WHITE_STROBE: 0x37,
EFFECT_COLORJUMP: 0x38,
}
EFFECT_CUSTOM_CODE = 0x60
TRANSITION_GRADUAL = "gradual"
TRANSITION_JUMP = "jump"
TRANSITION_STROBE = "strobe"
FLUX_EFFECT_LIST = sorted(EFFECT_MAP) + [EFFECT_RANDOM]
CUSTOM_EFFECT_SCHEMA = vol.Schema(
{
vol.Required(CONF_COLORS): vol.All(
cv.ensure_list,
vol.Length(min=1, max=16),
[
vol.All(
vol.ExactSequence((cv.byte, cv.byte, cv.byte)), vol.Coerce(tuple)
)
],
),
vol.Optional(CONF_SPEED_PCT, default=50): vol.All(
vol.Range(min=0, max=100), vol.Coerce(int)
),
vol.Optional(CONF_TRANSITION, default=TRANSITION_GRADUAL): vol.All(
cv.string, vol.In([TRANSITION_GRADUAL, TRANSITION_JUMP, TRANSITION_STROBE])
),
}
)
DEVICE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(ATTR_MODE, default=MODE_RGBW): vol.All(
cv.string, vol.In([MODE_RGBW, MODE_RGB, MODE_WHITE])
),
vol.Optional(CONF_PROTOCOL): vol.All(cv.string, vol.In(["ledenet"])),
vol.Optional(CONF_CUSTOM_EFFECT): CUSTOM_EFFECT_SCHEMA,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_DEVICES, default={}): {cv.string: DEVICE_SCHEMA},
vol.Optional(CONF_AUTOMATIC_ADD, default=False): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Flux lights."""
lights = []
light_ips = []
for ipaddr, device_config in config.get(CONF_DEVICES, {}).items():
device = {}
device["name"] = device_config[CONF_NAME]
device["ipaddr"] = ipaddr
device[CONF_PROTOCOL] = device_config.get(CONF_PROTOCOL)
device[ATTR_MODE] = device_config[ATTR_MODE]
device[CONF_CUSTOM_EFFECT] = device_config.get(CONF_CUSTOM_EFFECT)
light = FluxLight(device)
lights.append(light)
light_ips.append(ipaddr)
if not config.get(CONF_AUTOMATIC_ADD, False):
add_entities(lights, True)
return
# Find the bulbs on the LAN
scanner = BulbScanner()
scanner.scan(timeout=10)
for device in scanner.getBulbInfo():
ipaddr = device["ipaddr"]
if ipaddr in light_ips:
continue
device["name"] = f"{device['id']} {ipaddr}"
device[ATTR_MODE] = None
device[CONF_PROTOCOL] = None
device[CONF_CUSTOM_EFFECT] = None
light = FluxLight(device)
lights.append(light)
add_entities(lights, True)
class FluxLight(LightEntity):
"""Representation of a Flux light."""
def __init__(self, device):
"""Initialize the light."""
self._name = device["name"]
self._ipaddr = device["ipaddr"]
self._protocol = device[CONF_PROTOCOL]
self._mode = device[ATTR_MODE]
self._custom_effect = device[CONF_CUSTOM_EFFECT]
self._bulb = None
self._error_reported = False
def _connect(self):
"""Connect to Flux light."""
self._bulb = WifiLedBulb(self._ipaddr, timeout=5)
if self._protocol:
self._bulb.setProtocol(self._protocol)
# After bulb object is created the status is updated. We can
# now set the correct mode if it was not explicitly defined.
if not self._mode:
if self._bulb.rgbwcapable:
self._mode = MODE_RGBW
else:
self._mode = MODE_RGB
def _disconnect(self):
"""Disconnect from Flux light."""
self._bulb = None
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._bulb is not None
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._bulb.isOn()
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
if self._mode == MODE_WHITE:
return self.white_value
return self._bulb.brightness
@property
def hs_color(self):
"""Return the color property."""
return color_util.color_RGB_to_hs(*self._bulb.getRgb())
@property
def supported_features(self):
"""Flag supported features."""
if self._mode == MODE_RGBW:
return SUPPORT_FLUX_LED | SUPPORT_WHITE_VALUE | SUPPORT_COLOR_TEMP
if self._mode == MODE_WHITE:
return SUPPORT_BRIGHTNESS
return SUPPORT_FLUX_LED
@property
def white_value(self):
"""Return the white value of this light between 0..255."""
return self._bulb.getRgbw()[3]
@property
def effect_list(self):
"""Return the list of supported effects."""
if self._custom_effect:
return FLUX_EFFECT_LIST + [EFFECT_CUSTOM]
return FLUX_EFFECT_LIST
@property
def effect(self):
"""Return the current effect."""
current_mode = self._bulb.raw_state[3]
if current_mode == EFFECT_CUSTOM_CODE:
return EFFECT_CUSTOM
for effect, code in EFFECT_MAP.items():
if current_mode == code:
return effect
return None
def turn_on(self, **kwargs):
"""Turn the specified or all lights on."""
if not self.is_on:
self._bulb.turnOn()
hs_color = kwargs.get(ATTR_HS_COLOR)
if hs_color:
rgb = color_util.color_hs_to_RGB(*hs_color)
else:
rgb = None
brightness = kwargs.get(ATTR_BRIGHTNESS)
effect = kwargs.get(ATTR_EFFECT)
white = kwargs.get(ATTR_WHITE_VALUE)
color_temp = kwargs.get(ATTR_COLOR_TEMP)
# handle special modes
if color_temp is not None:
if brightness is None:
brightness = self.brightness
if color_temp > COLOR_TEMP_WARM_VS_COLD_WHITE_CUT_OFF:
self._bulb.setRgbw(w=brightness)
else:
self._bulb.setRgbw(w2=brightness)
return
# Show warning if effect set with rgb, brightness, or white level
if effect and (brightness or white or rgb):
_LOGGER.warning(
"RGB, brightness and white level are ignored when"
" an effect is specified for a flux bulb"
)
# Random color effect
if effect == EFFECT_RANDOM:
self._bulb.setRgb(
random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)
)
return
if effect == EFFECT_CUSTOM:
if self._custom_effect:
self._bulb.setCustomPattern(
self._custom_effect[CONF_COLORS],
self._custom_effect[CONF_SPEED_PCT],
self._custom_effect[CONF_TRANSITION],
)
return
# Effect selection
if effect in EFFECT_MAP:
self._bulb.setPresetPattern(EFFECT_MAP[effect], 50)
return
# Preserve current brightness on color/white level change
if brightness is None:
brightness = self.brightness
# Preserve color on brightness/white level change
if rgb is None:
rgb = self._bulb.getRgb()
if white is None and self._mode == MODE_RGBW:
white = self.white_value
# handle W only mode (use brightness instead of white value)
if self._mode == MODE_WHITE:
self._bulb.setRgbw(0, 0, 0, w=brightness)
# handle RGBW mode
elif self._mode == MODE_RGBW:
self._bulb.setRgbw(*tuple(rgb), w=white, brightness=brightness)
# handle RGB mode
else:
self._bulb.setRgb(*tuple(rgb), brightness=brightness)
def turn_off(self, **kwargs):
"""Turn the specified or all lights off."""
self._bulb.turnOff()
def update(self):
"""Synchronize state with bulb."""
if not self.available:
try:
self._connect()
self._error_reported = False
except OSError:
self._disconnect()
if not self._error_reported:
_LOGGER.warning(
"Failed to connect to bulb %s, %s", self._ipaddr, self._name
)
self._error_reported = True
return
self._bulb.update_state(retry=2)
| |
# -*- coding: utf-8 -*-
# File: trainers.py
import multiprocessing as mp
import os
import sys
import tensorflow as tf
from tensorpack.compat import tfv1
from ..callbacks import CallbackFactory, RunOp
from ..graph_builder.distributed import DistributedParameterServerBuilder, DistributedReplicatedBuilder
from ..graph_builder.training import (
AsyncMultiGPUBuilder, SyncMultiGPUParameterServerBuilder, SyncMultiGPUReplicatedBuilder)
from ..graph_builder.utils import override_to_local_variable
from ..input_source import FeedfreeInput, QueueInput
from ..tfutils import get_global_step_var
from ..tfutils.common import get_tf_version_tuple
from ..tfutils.distributed import get_distributed_session_creator
from ..tfutils.sesscreate import NewSessionCreator
from ..tfutils.tower import TrainTowerContext
from ..utils import logger
from ..utils.argtools import map_arg
from ..utils.develop import HIDE_DOC, deprecated
from .tower import SingleCostTrainer
__all__ = ['NoOpTrainer', 'SimpleTrainer',
'QueueInputTrainer',
'SyncMultiGPUTrainer',
'SyncMultiGPUTrainerReplicated',
'SyncMultiGPUTrainerParameterServer',
'AsyncMultiGPUTrainer',
'DistributedTrainerParameterServer',
'DistributedTrainerReplicated',
'HorovodTrainer', 'BytePSTrainer']
def _int_to_range(x):
if isinstance(x, int):
assert x > 0, "Argument cannot be {}!".format(x)
return list(range(x))
return x
class SimpleTrainer(SingleCostTrainer):
"""
Single-GPU single-cost single-tower trainer.
"""
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
logger.info("Building graph for a single training tower ...")
with TrainTowerContext(''):
grads = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)()
opt = get_opt_fn()
self.train_op = opt.apply_gradients(grads, name='train_op')
return []
class NoOpTrainer(SimpleTrainer):
"""
A special trainer that builds the graph (if given a tower function)
and does nothing in each step.
It is used to only run the callbacks.
Note that `steps_per_epoch` and `max_epochs` are still valid options.
"""
def run_step(self):
self.hooked_sess.run([])
# Only exists for type check & back-compatibility
class QueueInputTrainer(SimpleTrainer):
@deprecated("SimpleTrainer is sufficient!", "2019-12-31")
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
assert isinstance(input, QueueInput), input
return super(QueueInputTrainer, self)._setup_graph(input, get_cost_fn, get_opt_fn)
class SyncMultiGPUTrainerParameterServer(SingleCostTrainer):
__doc__ = SyncMultiGPUParameterServerBuilder.__doc__ + """
Attributes:
devices (list[int]): List of GPU ids.
"""
@map_arg(gpus=_int_to_range)
def __init__(self, gpus, ps_device=None):
"""
Args:
gpus ([int]): list of GPU ids.
ps_device: either 'gpu' or 'cpu', where variables are stored.
The default value is subject to change.
"""
self.devices = gpus
if ps_device is None:
ps_device = 'gpu' if len(gpus) <= 2 else 'cpu'
self._builder = SyncMultiGPUParameterServerBuilder(gpus, ps_device)
super(SyncMultiGPUTrainerParameterServer, self).__init__()
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
if len(self.devices) > 1:
assert isinstance(input, FeedfreeInput), input
tower_fn = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)
grad_list = self._builder.call_for_each_tower(tower_fn)
self.train_op = self._builder.build(grad_list, get_opt_fn)
return []
def SyncMultiGPUTrainer(gpus):
"""
Return a default multi-GPU trainer, if you don't care about the details.
It may not be the most efficient one for your task.
Args:
gpus (list[int]): list of GPU ids.
"""
return SyncMultiGPUTrainerParameterServer(gpus, ps_device='cpu')
class AsyncMultiGPUTrainer(SingleCostTrainer):
__doc__ = AsyncMultiGPUBuilder.__doc__ + """
Attributes:
devices (list[int]): List of GPU ids.
"""
@map_arg(gpus=_int_to_range)
def __init__(self, gpus, scale_gradient=True):
"""
Args:
gpus ([int]): list of GPU ids.
scale_gradient (bool): if True, will scale each gradient by ``1.0/nr_gpu``.
"""
self.devices = gpus
self._builder = AsyncMultiGPUBuilder(gpus, scale_gradient)
super(AsyncMultiGPUTrainer, self).__init__()
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
if len(self.devices) > 1:
assert isinstance(input, FeedfreeInput), input
tower_fn = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)
grad_list = self._builder.call_for_each_tower(tower_fn)
self.train_op = self._builder.build(grad_list, get_opt_fn)
return []
class SyncMultiGPUTrainerReplicated(SingleCostTrainer):
__doc__ = SyncMultiGPUReplicatedBuilder.__doc__ + """
Attributes:
devices (list[int]): List of GPU ids.
BROADCAST_EVERY_EPOCH (bool):
Whether to broadcast the variables every epoch.
Theoretically this is a no-op (because the variables
are supposed to be in-sync).
But this cheap operation may help prevent
certain numerical issues in practice.
Note that in cases such as BatchNorm, the variables may not be in sync:
e.g., non-master worker may not maintain EMAs.
For benchmark, disable this option.
"""
@map_arg(gpus=_int_to_range)
def __init__(self, gpus, average=True, mode=None):
"""
Args:
gpus (int or [int]): list of GPU ids.
average (bool): whether to average or sum gradients.
mode (str or None): Gradient aggregation mode.
Supported values: ['nccl', 'hierarchical', 'cpu', 'gpu'].
These modes may differ in speed.
Default to pick automatically by heuristics.
"hierarchical" mode was designed for DGX-like 8-GPU machines.
"""
self.devices = gpus
if mode is not None:
mode = mode.lower()
# Heuristics about mode selection:
if mode == 'hierarchical' and len(gpus) != 8:
logger.warn("mode='hierarchical' requires 8 GPUs. Will fallback to default mode.")
mode = None
if mode is None:
if len(gpus) == 8:
mode = 'hierarchical'
else:
# https://github.com/tensorflow/tensorflow/issues/41539
mode = 'nccl' if get_tf_version_tuple() < (1, 15) else 'gpu'
if mode == 'cpu' and get_tf_version_tuple() >= (2, 0):
# cpu mode causes the entire model to get located on cpu
mode = 'gpu'
if mode == 'nccl' and get_tf_version_tuple() >= (1, 15):
logger.warning(
"NCCL in TensorFlow has a serious bug that is likely to trigger in TF>=1.15. "
"Try 'mode=None' to use a better default mode.")
self._builder = SyncMultiGPUReplicatedBuilder(gpus, average, mode)
self.BROADCAST_EVERY_EPOCH = True
super(SyncMultiGPUTrainerReplicated, self).__init__()
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
if len(self.devices) > 1:
assert isinstance(input, FeedfreeInput), input
tower_fn = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)
grad_list = self._builder.call_for_each_tower(tower_fn)
self.train_op, post_init_op = self._builder.build(grad_list, get_opt_fn)
if post_init_op is not None:
cb = RunOp(
post_init_op,
run_before=True,
run_as_trigger=self.BROADCAST_EVERY_EPOCH,
verbose=True)
cb.name_scope = "SyncVariables"
return [cb]
else:
return []
# deprecated
class DistributedTrainerBase(SingleCostTrainer):
devices = None
def __init__(self, gpus, server):
super(DistributedTrainerBase, self).__init__()
self.devices = gpus
self.server = server
self.job_name = server.server_def.job_name
logger.info("Distributed training on cluster:\n" + str(server.server_def.cluster))
def join(self):
logger.info("Calling server.join() on {}:{}".format(self.job_name, self.server.server_def.task_index))
logger.info("Kill me with 'kill {}'".format(os.getpid()))
self.server.join() # this function will never return tensorflow#4713
raise RuntimeError("This is a bug. Server.join() for should never return!")
@HIDE_DOC
def initialize(self, session_creator, session_init):
if not isinstance(session_creator, NewSessionCreator) or \
session_creator.user_provided_config:
raise ValueError(
"You are not allowed to set session_creator or session_config for distributed training! "
"To use a custom session config, pass it to tf.train.Server.")
super(DistributedTrainerBase, self).initialize(
get_distributed_session_creator(self.server), session_init)
# This is slow. deprecated in favor of horovod
class DistributedTrainerParameterServer(DistributedTrainerBase):
__doc__ = DistributedParameterServerBuilder.__doc__
@map_arg(gpus=_int_to_range)
def __init__(self, gpus, server, caching_device='cpu'):
"""
Args:
gpus ([int]): list of GPU ids.
server (tf.train.Server): the server with ps and workers.
caching_device (str): either 'cpu' or 'gpu'. The device to cache variables copied from PS
"""
super(DistributedTrainerParameterServer, self).__init__(gpus, server)
assert self.job_name in ['ps', 'worker'], self.job_name
if self.job_name == 'ps':
self.join()
self._builder = DistributedParameterServerBuilder(gpus, server, caching_device)
self.is_chief = self._builder.is_chief
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
assert isinstance(input, FeedfreeInput), input
self.train_op = self._builder.build(
self._make_get_grad_fn(input, get_cost_fn, get_opt_fn), get_opt_fn)
return []
# This is slow. deprecated in favor of horovod
class DistributedTrainerReplicated(DistributedTrainerBase):
__doc__ = DistributedReplicatedBuilder.__doc__
@map_arg(gpus=_int_to_range)
def __init__(self, gpus, server):
"""
Args:
gpus (list[int]): list of GPU ids.
server (tf.train.Server): the server with ps and workers.
"""
super(DistributedTrainerReplicated, self).__init__(gpus, server)
assert self.job_name in ['ps', 'worker'], self.job_name
if self.job_name == 'ps':
self.join()
self._builder = DistributedReplicatedBuilder(gpus, server)
self.is_chief = self._builder.is_chief
def _setup_input(self, input_signature, input):
with override_to_local_variable():
get_global_step_var() # gs should be local
# input source may create variables (queue size summary)
# TODO This is not good because we don't know from here
# whether something should be global or local. We now assume
# they should be local.
assert not input.setup_done()
return input.setup(input_signature)
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
assert isinstance(input, FeedfreeInput), input
self.train_op, initial_sync_op, model_sync_op = self._builder.build(
self._make_get_grad_fn(input, get_cost_fn, get_opt_fn), get_opt_fn)
callbacks = []
# Initial syncing vars from PS
cb = RunOp(lambda: initial_sync_op,
run_before=True, run_as_trigger=False, verbose=True)
cb.chief_only = False
callbacks.append(cb)
# Sync model_variables to PS, only chief needs to do this
if model_sync_op:
cb = RunOp(lambda: model_sync_op,
run_before=False, run_as_trigger=True, verbose=True)
logger.warn("For efficiency, local MODEL_VARIABLES are only synced to PS once "
"every epoch. Be careful if you save the model more frequently than this.")
callbacks.append(cb)
return callbacks
@property
def _main_tower_vs_name(self):
return "tower0"
class HorovodTrainer(SingleCostTrainer):
"""
Horovod trainer, support both multi-GPU and distributed training.
To use for multi-GPU training:
.. code-block:: bash
# First, change trainer to HorovodTrainer(), then
CUDA_VISIBLE_DEVICES=0,1,2,3 NCCL_DEBUG=INFO horovodrun -np 4 --output-filename mylog python train.py
To use for distributed training:
.. code-block:: bash
# First, change trainer to HorovodTrainer(), then
horovodrun -np 8 -H server1:4,server2:4 --output-filename mylog \\
python train.py
Note:
1. To reach the maximum speed in your system, there are many options to tune
in Horovod installation, horovodrun arguments, and in the MPI command line.
See Horovod docs for details.
2. Due to a TF bug (#8136), you must not initialize CUDA context before the trainer starts training.
Therefore TF functions like `is_gpu_available()` or `list_local_devices()`
must be avoided.
You can, however, use `tf.config.experimental.list_physical_devices('GPU')`, introduced in TF 1.14.
3. Horovod supports both MPI and gloo. There are a few drawbacks of the MPI backend:
+ MPI does not like `fork()`. If your code (e.g. dataflow) contains multiprocessing, it may cause problems.
+ MPI sometimes fails to kill all processes in the end. Be sure to check it afterwards.
The gloo backend is recommended though it may come with very minor slow down.
To use gloo backend, see
`horovod documentation <https://github.com/horovod/horovod#running-horovod>`_ for more details.
4. Keep in mind that there is one process running the script per GPU, therefore:
+ Make sure your InputSource has reasonable randomness.
+ If your data processing is heavy, doing it in a single dedicated process might be
a better choice than doing them repeatedly in each process.
+ You need to make sure log directories in each process won't conflict.
You can set it only for the chief process, or set a different one for each process.
+ Callbacks have an option to be run only in the chief process, or in all processes.
See :meth:`Callback.set_chief_only()`. Most callbacks have a reasonable
default already, but certain callbacks may need your customization.
Report an issue if you find any bad defaults.
+ You can use Horovod API such as `hvd.rank()` to know which process you are and choose
different code path. Chief process has rank 0.
5. Due to these caveats, see
`ResNet-Horovod <https://github.com/tensorpack/benchmarks/tree/master/ResNet-Horovod>`_
for a full example which has handled these common issues.
This example can train ImageNet in roughly an hour following the paper's setup.
Attributes:
BROADCAST_EVERY_EPOCH (bool):
Whether to broadcast the variables every epoch.
Theoretically this is a no-op (because the variables
are supposed to be in-sync).
But this cheap operation may help prevent certain numerical issues in practice.
Note that in cases such as BatchNorm, the variables may not be in sync:
e.g., non-master worker may not maintain EMAs.
For benchmark, disable this option.
"""
def __init__(self, average=True, compression=None):
"""
Args:
average (bool): whether to average or sum the gradients across processes.
compression: `hvd.Compression.fp16` or `hvd.Compression.none`
"""
if 'pyarrow' in sys.modules:
logger.warn("Horovod and pyarrow may conflict due to pyarrow bugs.")
# lazy import
import horovod.tensorflow as hvd
import horovod
hvd_version = tuple(map(int, horovod.__version__.split('.')[:3]))
self.hvd = hvd
hvd.init()
self.is_chief = hvd.rank() == 0
self._local_rank = hvd.local_rank()
self._rank = hvd.rank()
self._average = average
self._compression = compression
self._has_compression = hvd_version >= (0, 15, 0)
logger.info("[HorovodTrainer] local rank={}".format(self._local_rank))
super(HorovodTrainer, self).__init__()
self.BROADCAST_EVERY_EPOCH = True
def mpi_enabled(self):
"""
Returns:
bool: whether hvd is currently running under MPI
"""
try:
return self.hvd.mpi_enabled()
except AttributeError:
return False
def allreduce(self, grads):
if self.hvd.size() == 1:
return grads
# copied from https://github.com/uber/horovod/blob/master/horovod/tensorflow/__init__.py
averaged_gradients = []
with tf.name_scope("AllReduce"):
for grad, var in grads:
if grad is not None:
if self._compression is not None and self._has_compression:
avg_grad = self.hvd.allreduce(grad, average=self._average, compression=self._compression)
else:
avg_grad = self.hvd.allreduce(grad, average=self._average)
averaged_gradients.append((avg_grad, var))
else:
averaged_gradients.append((None, var))
return averaged_gradients
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
with TrainTowerContext(''):
grads = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)()
grads = self.allreduce(grads)
opt = get_opt_fn()
self.train_op = opt.apply_gradients(grads, name='train_op')
cb = CallbackFactory(
before_train=self.broadcast,
trigger=self.broadcast if self.BROADCAST_EVERY_EPOCH else None
).set_chief_only(False)
return [cb]
def broadcast(self, _):
logger.info("Broadcasting {} global variables ...".format(self._num_global_variables))
# the op will be created in initialize()
self.sess.run(self._broadcast_op)
@HIDE_DOC
def initialize(self, session_creator, session_init):
# broadcast_op should be the last setup_graph: it needs to be created
# "right before" the graph is finalized,
# because it needs to capture all the variables (which may be created by callbacks).
self._num_global_variables = len(tfv1 .global_variables())
self._broadcast_op = self.hvd.broadcast_global_variables(0)
# it's important that our NewSessionCreator does not finalize the graph
if not isinstance(session_creator, NewSessionCreator):
raise ValueError(
"session_creator has to be `NewSessionCreator` for horovod/byteps training! ")
# NOTE It will fail if GPU was already detected before initializing the session
# https://github.com/tensorflow/tensorflow/issues/8136
session_creator.config.gpu_options.visible_device_list = str(self._local_rank)
try:
session_creator.config.inter_op_parallelism_threads = mp.cpu_count() // self.hvd.local_size()
except AttributeError: # old horovod does not have local_size
pass
super(HorovodTrainer, self).initialize(session_creator, session_init)
# This broadcast belongs to the "intialize" stage
# It should not be delayed to the "before_train" stage.
# TODO:
# 1. a allgather helper to concat strings
# 2. check variables on each rank match each other, print warnings, and broadcast the common set.
if self.is_chief:
logger.info("Broadcasting initialization of {} global variables ...".format(self._num_global_variables))
else:
logger.info("Rank {} waiting for initialization of {} variables ...".format(
self._rank, self._num_global_variables))
self.sess.run(self._broadcast_op)
class BytePSTrainer(HorovodTrainer):
"""
BytePS trainer. Supports both multi-GPU and distributed training.
It achieves better scalability than horovod in distributed training, if the model is communication
intensive and you have properly set up the machines following its
`best practices <https://github.com/bytedance/byteps/blob/master/docs/best-practice.md>`_
which requires a few extra bandwidth servers than horovod.
To use it, switch the trainer, and refer to BytePS documentation on how to
launch server/scheduler/workers.
Attributes:
hvd (module): the byteps module that contains horovod-compatible APIs
like `rank(),size()`.
This attribute exists so that downstream code that uses these APIs
does not need to worry about which library is being used under the hood.
"""
def __init__(self, average=True):
"""
Args:
average (bool): whether to average or sum the gradients across processes.
"""
import byteps.tensorflow as bps
self.hvd = bps # BytePS has the same interface as Horovod
self.hvd.allreduce = bps.push_pull # https://github.com/bytedance/byteps/issues/8
assert os.environ.get("DMLC_ROLE", None) == "worker"
assert "DMLC_WORKER_ID" in os.environ and "DMLC_NUM_WORKER" in os.environ
bps.init()
self.is_chief = bps.rank() == 0
self._local_rank = bps.local_rank()
self._rank = bps.rank()
self._average = average
self._compression = None
self._has_compression = False
logger.info("[BytePSTrainer] local rank={}".format(self._local_rank))
SingleCostTrainer.__init__(self)
def mpi_enabled(self):
"""
Returns:
bool: whether hvd is currently running under MPI
"""
return False
| |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
import sys
sys.path.append("..")
from op_test_xpu import XPUOpTest
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
import time
paddle.enable_static()
def bilinear_interp_np(input,
out_h,
out_w,
out_size=None,
actual_shape=None,
align_corners=True,
align_mode=0,
data_layout='NCHW'):
"""bilinear interpolation implement in shape [N, C, H, W]"""
if data_layout == "NHWC":
input = np.transpose(input, (0, 3, 1, 2)) # NHWC => NCHW
if out_size is not None:
out_h = out_size[0]
out_w = out_size[1]
if actual_shape is not None:
out_h = actual_shape[0]
out_w = actual_shape[1]
batch_size, channel, in_h, in_w = input.shape
ratio_h = ratio_w = 0.0
if out_h > 1:
if (align_corners):
ratio_h = (in_h - 1.0) / (out_h - 1.0)
else:
ratio_h = 1.0 * in_h / out_h
if out_w > 1:
if (align_corners):
ratio_w = (in_w - 1.0) / (out_w - 1.0)
else:
ratio_w = 1.0 * in_w / out_w
out = np.zeros((batch_size, channel, out_h, out_w))
for i in range(out_h):
if (align_mode == 0 and not align_corners):
h = int(ratio_h * (i + 0.5) - 0.5)
else:
h = int(ratio_h * i)
h = max(0, h)
hid = 1 if h < in_h - 1 else 0
if (align_mode == 0 and not align_corners):
idx_src_h = max(ratio_h * (i + 0.5) - 0.5, 0)
h1lambda = idx_src_h - h
else:
h1lambda = ratio_h * i - h
h2lambda = 1.0 - h1lambda
for j in range(out_w):
if (align_mode == 0 and not align_corners):
w = int(ratio_w * (j + 0.5) - 0.5)
else:
w = int(ratio_w * j)
w = max(0, w)
wid = 1 if w < in_w - 1 else 0
if (align_mode == 0 and not align_corners):
idx_src_w = max(ratio_w * (j + 0.5) - 0.5, 0)
w1lambda = idx_src_w - w
else:
w1lambda = ratio_w * j - w
w2lambda = 1.0 - w1lambda
out[:, :, i, j] = h2lambda*(w2lambda*input[:, :, h, w] +
w1lambda*input[:, :, h, w+wid]) + \
h1lambda*(w2lambda*input[:, :, h+hid, w] +
w1lambda*input[:, :, h+hid, w+wid])
if data_layout == "NHWC":
out = np.transpose(out, (0, 2, 3, 1)) # NCHW => NHWC
return out.astype(input.dtype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterpOp(XPUOpTest):
def setUp(self):
self.use_xpu = True
self.out_size = None
self.actual_shape = None
self.data_layout = 'NCHW'
self.init_test_case()
self.op_type = "bilinear_interp"
input_np = np.random.random(self.input_shape).astype("float32")
if self.data_layout == "NCHW":
in_h = self.input_shape[2]
in_w = self.input_shape[3]
else:
in_h = self.input_shape[1]
in_w = self.input_shape[2]
if self.scale > 0:
out_h = int(in_h * self.scale)
out_w = int(in_w * self.scale)
else:
out_h = self.out_h
out_w = self.out_w
output_np = bilinear_interp_np(input_np, out_h, out_w, self.out_size,
self.actual_shape, self.align_corners,
self.align_mode, self.data_layout)
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape
self.attrs = {
'out_h': self.out_h,
'out_w': self.out_w,
'scale': self.scale,
'interp_method': self.interp_method,
'align_corners': self.align_corners,
'align_mode': self.align_mode,
'data_layout': self.data_layout
}
self.outputs = {'Out': output_np}
def test_check_output(self):
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', in_place=True)
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 5]
self.out_h = 2
self.out_w = 2
self.scale = 0.
self.out_size = np.array([3, 3]).astype("int32")
self.align_corners = True
self.align_mode = 1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterpCase1(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.
self.align_corners = True
self.align_mode = 1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterpCase2(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.
self.align_corners = True
self.align_mode = 1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterpCase3(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.align_corners = True
self.align_mode = 1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterpCase4(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.
self.out_size = np.array([2, 2]).astype("int32")
self.align_corners = True
self.align_mode = 1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterpCase5(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.
self.out_size = np.array([11, 11]).astype("int32")
self.align_corners = True
self.align_mode = 1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterpCase6(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.out_size = np.array([65, 33]).astype("int32")
self.align_corners = True
self.align_mode = 1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterpSame(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 32, 64]
self.out_h = 32
self.out_w = 64
self.scale = 0.
self.align_corners = True
self.align_mode = 1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterpActualShape(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
self.align_mode = 1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterpDataLayout(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 5, 5, 3]
self.out_h = 2
self.out_w = 2
self.scale = 0.
self.out_size = np.array([3, 3]).astype("int32")
self.align_corners = True
self.align_mode = 1
self.data_layout = "NHWC"
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterpOtherMethod1(TestBilinearInterpOp):
def set_align_mode(self):
self.align_corners = False
self.align_mode = 1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterpWithMethod2(TestBilinearInterpOp):
def set_align_mode(self):
self.align_corners = False
self.align_mode = 0
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterpWithMethod3(TestBilinearInterpOp):
def set_align_mode(self):
self.align_corners = True
self.align_mode = 0
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterpScale1(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 2.
self.align_corners = True
self.align_mode = 1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterpScale2(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 1.
self.align_corners = True
self.align_mode = 1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterpScale3(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 1.5
self.align_corners = True
self.align_mode = 1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterpZero(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 0.2
self.align_corners = False
self.align_mode = 0
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterpOp_attr_tensor(XPUOpTest):
def setUp(self):
self.out_size = None
self.actual_shape = None
self.init_test_case()
self.op_type = "bilinear_interp"
self.shape_by_1Dtensor = False
self.scale_by_1Dtensor = False
self.attrs = {
'interp_method': self.interp_method,
'align_corners': self.align_corners,
}
input_np = np.random.random(self.input_shape).astype("float32")
self.inputs = {'X': input_np}
if self.scale_by_1Dtensor:
self.inputs['Scale'] = np.array([self.scale]).astype("float32")
elif self.scale > 0:
out_h = int(self.input_shape[2] * self.scale)
out_w = int(self.input_shape[3] * self.scale)
self.attrs['scale'] = self.scale
else:
out_h = self.out_h
out_w = self.out_w
if self.shape_by_1Dtensor:
self.inputs['OutSize'] = self.out_size
elif self.out_size is not None:
size_tensor = []
for index, ele in enumerate(self.out_size):
size_tensor.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele))
self.inputs['SizeTensor'] = size_tensor
self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w
output_np = bilinear_interp_np(input_np, out_h, out_w, self.out_size,
self.actual_shape, self.align_corners)
self.outputs = {'Out': output_np}
def test_check_output(self):
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', in_place=True)
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 5]
self.out_h = 3
self.out_w = 3
self.scale = 0.
self.out_size = [3, 3]
self.align_corners = True
# out_size is a 1-D tensor
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterp_attr_tensor_Case1(TestBilinearInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.
self.out_size = [8, 12]
self.align_corners = True
# scale is a 1-D tensor
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterp_attr_tensor_Case2(TestBilinearInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
self.shape_by_1Dtensor = True
# scale is a 1-D tensor
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterp_attr_tensor_Case3(TestBilinearInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 2.0
self.out_size = None
self.align_corners = True
self.scale_by_1Dtensor = True
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestBilinearInterpOpAPI(unittest.TestCase):
def test_case(self):
x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32")
dim = fluid.data(name="dim", shape=[1], dtype="int32")
shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
actual_size = fluid.data(name="actual_size", shape=[2], dtype="int32")
scale_tensor = fluid.data(
name="scale_tensor", shape=[1], dtype="float32")
out1 = fluid.layers.resize_bilinear(x, out_shape=[12, 12])
out2 = fluid.layers.resize_bilinear(x, out_shape=[12, dim])
out3 = fluid.layers.resize_bilinear(x, out_shape=shape_tensor)
out4 = fluid.layers.resize_bilinear(
x, out_shape=[4, 4], actual_shape=actual_size)
out5 = fluid.layers.resize_bilinear(x, scale=scale_tensor)
x_data = np.random.random((2, 3, 6, 6)).astype("float32")
dim_data = np.array([12]).astype("int32")
shape_data = np.array([12, 12]).astype("int32")
actual_size_data = np.array([12, 12]).astype("int32")
scale_data = np.array([2.0]).astype("float32")
place = core.XPUPlace(0)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
results = exe.run(fluid.default_main_program(),
feed={
"x": x_data,
"dim": dim_data,
"shape_tensor": shape_data,
"actual_size": actual_size_data,
"scale_tensor": scale_data
},
fetch_list=[out1, out2, out3, out4, out5],
return_numpy=True)
expect_res = bilinear_interp_np(
x_data, out_h=12, out_w=12, align_corners=True)
for res in results:
self.assertTrue(np.allclose(res, expect_res))
if __name__ == "__main__":
unittest.main()
| |
import scrapy
import hashlib
import random
from random import randint
from games.items import Game
from games.items import Platform
from games.items import Screenshot
from games.items import Shot
class ShotSpider(scrapy.Spider):
name = "shots"
start_url = "http://www.mobygames.com"
saida = None
def parseShot(self, response):
goid = response.meta['goid']
url = response.xpath("//div[@class='screenshot']/img/@src").extract()
if len(url) == 0:
url = response.xpath("//meta[contains(@property,'og:image')]/@content").extract()
desc = response.xpath("//meta[contains(@name,'description')]/@content").extract()
code = hashlib.sha1(url[0]).hexdigest()
linha = goid+ ";" + code + ";" + url[0] +";"+desc[0]+"\n"
self.saida.write(linha)
def start_requests(self):
arq = open('sem_shots','r')
self.saida = open('saida.txt','w')
for linha in arq:
tokens = linha.split(' ')
req = scrapy.Request(url=self.start_url+tokens[1], callback=self.parseShot)
req.meta['goid'] = tokens[0]
yield req
arq.close()
class GameSpider(scrapy.Spider):
name = "games"
root = "http://www.mobygames.com"
#start: 0 end:100
#start: 101 end:200
#start: 201 end:300
#301 66175
def start_requests(self):
urls = []
#start = 201
#end = 300
pages = []
count = 0
while count<500:
n = randint(301,66175)
if (n-1)%25 == 0:
pages.append(n)
count += 1
for p in pages:
st = 'http://www.mobygames.com/browse/games/offset,'+str(p)+'/so,0a/list-games/'
urls.append(st)
pass
#for i in range(start,end):
# st = 'http://www.mobygames.com/browse/games/offset,'+str(i*25)+'/so,0a/list-games/'
# urls.append(st)
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parseShot(self, response):
self.logger.info("Parse each shot")
ind = response.meta['platform_index']
screen_ind = response.meta['screen_index']
screen = Screenshot(response.meta['screen'])
g = Game(response.meta['game'])
p = g['platforms'][ind]
url = response.xpath("//div[@class='screenshot']/img/@src").extract()
if len(url)==0:
#url = response.xpath("//div[@class='screenshot']/a/@href").extract()
url = response.xpath("//meta[contains(@property,'og:image')]/@content").extract()
if len(url)>0:
temp = []
self.logger.info(url)
shot = Shot()
shot['image_urls'] = [self.root+url[0]]
shot['code'] = hashlib.sha1(self.root+url[0]).hexdigest()
temp.append(shot)
screen['shots'] = temp
p['screens'][screen_ind] = screen
yield shot
yield g
def parseScreenshots(self, response):
self.logger.info("Parse Screenshots")
g = Game(response.meta['game'])
ind = response.meta['platform_index']
p = g['platforms'][ind]
urls = response.xpath("//div[@class='thumbnail-image-wrapper']/a/@href").extract()
self.logger.info(p)
if len(urls)==0:
pass
else:
captions = response.xpath("//div[@class='thumbnail-caption']/small/text()").extract()
temp= []
for i in range(0,len(urls)):
s = Screenshot()
s['url'] = urls[i]
s['title'] = captions[i]
temp.append(s)
p['screens'] = temp
for i in range(0,len(urls)):
req = scrapy.Request(url=self.root+urls[i], callback=self.parseShot)
req.meta['screen'] = temp[i]
req.meta['game'] = g
req.meta['platform_index'] = ind
req.meta['screen_index'] = i
yield req
def parsePlatform(self, response):
self.logger.info("Parse Platform")
g = Game(response.meta['game'])
ind = response.meta['platform_index']
p = g['platforms'][ind]
#Os divs do bloco da esquerda
left = response.xpath("//div[@id='coreGameRelease']/div")
p.setCoreGameRelease(left)
g['platforms'][ind] = dict(p)
screens = response.xpath("//div[@class='rightPanelHeader']/ul/li/a[contains(@href, '/screenshot')]/@href").extract()
for s in screens:
req = scrapy.Request(url=self.root+s, callback=self.parseScreenshots)
req.meta['game'] = g
req.meta['platform_index'] = ind
yield req
def parseGame(self, response):
if response.url == 'http://www.mobygames.com/game/_' or response.url == 'http://www.mobygames.com/game/07-zgo-si':
pass
else:
self.logger.info("Parse Game")
g = Game()
g['url'] = response.url
g['name'] = response.xpath("//div[@class='rightPanelHeader']/h1/a/text()").extract()[0]
self.logger.info("Parse Game "+g['name'])
#Os divs do bloco da esquerda
left = response.xpath("//div[@id='coreGameRelease']/div")
g.setCoreGameRelease(left)
#Os divs do bloco da direita
right = response.xpath("//div[@id='coreGameGenre']/div/div")
g.setCoreGameGenre(right)
#Para cada plataforma faz uma requisicao
#Se tem uma plataforma trata de um jeito
if len(g['platforms'])==1:
self.logger.info("Parse Game - One platform")
req = scrapy.Request(url=response.url, callback=self.parsePlatform, dont_filter=True)
req.meta['game'] = g
req.meta['platform_index'] = 0
yield req
else:
ind = 0
for p in g['platforms']:
self.logger.info("Parse Game - Platform - "+p['name'])
req = scrapy.Request(url=self.root+p['url'], callback=self.parsePlatform)
req.meta['platform_index'] = ind
req.meta['game'] = g
ind = ind + 1
yield req
def parse(self, response):
self.logger.info("Parse List Game")
linksGames = response.xpath("//div[@class='molist']/table//a[contains(@href, '/game/')]/@href").extract()
random.shuffle(linksGames)
for i in range(5):
yield scrapy.Request(url=self.root+linksGames[i], callback=self.parseGame)
'''
scrapy crawl onegame -a idgame=state-of-emergency
scrapy crawl onegame -a url=state-of-emergency
'''
class OneGameSpider(GameSpider):
name = "onegame"
start_url = ""
def __init__(self, url=None, idgame='', *args, **kwargs):
super(OneGameSpider, self).__init__(*args, **kwargs)
if(url==None):
self.start_url = "http://www.mobygames.com/game/"+idgame
else:
self.start_url = url
def start_requests(self):
yield scrapy.Request(url=self.start_url, callback=self.parseGame)
| |
#!/usr/bin/python
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# chrome_tests.py
''' Runs various chrome tests through valgrind_test.py.
This file is a copy of ../purify/chrome_tests.py. Eventually, it would be nice
to merge these two files.
'''
import glob
import logging
import optparse
import os
import stat
import sys
import google.logging_utils
import google.path_utils
# Import the platform_utils up in the layout tests which have been modified to
# work under non-Windows platforms instead of the ones that are in the
# tools/python/google directory. (See chrome_tests.sh which sets PYTHONPATH
# correctly.)
#
# TODO(erg): Copy/Move the relevant functions from the layout_package version
# of platform_utils back up to google.platform_utils
# package. http://crbug.com/6164
import layout_package.platform_utils
import common
class TestNotFound(Exception): pass
def Dir2IsNewer(dir1, dir2):
if dir2 == None or not os.path.isdir(dir2):
return False
if dir1 == None or not os.path.isdir(dir1):
return True
return (os.stat(dir2)[stat.ST_MTIME] - os.stat(dir1)[stat.ST_MTIME]) > 0
def FindNewestDir(dirs):
newest_dir = None
for dir in dirs:
if Dir2IsNewer(newest_dir, dir):
newest_dir = dir
return newest_dir
def File2IsNewer(file1, file2):
if file2 == None or not os.path.isfile(file2):
return False
if file1 == None or not os.path.isfile(file1):
return True
return (os.stat(file2)[stat.ST_MTIME] - os.stat(file1)[stat.ST_MTIME]) > 0
def FindDirContainingNewestFile(dirs, file):
newest_dir = None
newest_file = None
for dir in dirs:
the_file = os.path.join(dir, file)
if File2IsNewer(newest_file, the_file):
newest_dir = dir
newest_file = the_file
if newest_dir == None:
logging.error("cannot find file %s anywhere, have you built it?" % file)
sys.exit(-1)
return newest_dir
class ChromeTests:
'''This class is derived from the chrome_tests.py file in ../purify/.
'''
def __init__(self, options, args, test):
# The known list of tests.
# Recognise the original abbreviations as well as full executable names.
self._test_list = {
"base": self.TestBase, "base_unittests": self.TestBase,
"googleurl": self.TestGURL, "googleurl_unittests": self.TestGURL,
"ipc": self.TestIpc, "ipc_tests": self.TestIpc,
"layout": self.TestLayout, "layout_tests": self.TestLayout,
"media": self.TestMedia, "media_unittests": self.TestMedia,
"net": self.TestNet, "net_unittests": self.TestNet,
"printing": self.TestPrinting, "printing_unittests": self.TestPrinting,
"startup": self.TestStartup, "startup_tests": self.TestStartup,
"test_shell": self.TestTestShell, "test_shell_tests": self.TestTestShell,
"ui": self.TestUI, "ui_tests": self.TestUI,
"unit": self.TestUnit, "unit_tests": self.TestUnit,
"app": self.TestApp, "app_unittests": self.TestApp,
}
if test not in self._test_list:
raise TestNotFound("Unknown test: %s" % test)
self._options = options
self._args = args
self._test = test
script_dir = google.path_utils.ScriptDir()
utility = layout_package.platform_utils.PlatformUtility(script_dir)
# Compute the top of the tree (the "source dir") from the script dir (where
# this script lives). We assume that the script dir is in tools/valgrind/
# relative to the top of the tree.
self._source_dir = os.path.dirname(os.path.dirname(script_dir))
# since this path is used for string matching, make sure it's always
# an absolute Windows-style path
self._source_dir = utility.GetAbsolutePath(self._source_dir)
valgrind_test = os.path.join(script_dir, "valgrind_test.py")
self._command_preamble = ["python", valgrind_test,
"--source_dir=%s" % (self._source_dir)]
def _DefaultCommand(self, module, exe=None, valgrind_test_args=None):
'''Generates the default command array that most tests will use.'''
module_dir = os.path.join(self._source_dir, module)
# We need multiple data dirs, the current script directory and a module
# specific one. The global suppression file lives in our directory, and the
# module specific suppression file lives with the module.
self._data_dirs = [google.path_utils.ScriptDir()]
if module == "chrome":
# unfortunately, not all modules have the same directory structure
self._data_dirs.append(os.path.join(module_dir, "test", "data",
"valgrind"))
else:
self._data_dirs.append(os.path.join(module_dir, "data", "valgrind"))
if not self._options.build_dir:
dirs = [
os.path.join(self._source_dir, "xcodebuild", "Debug"),
os.path.join(self._source_dir, "sconsbuild", "Debug"),
os.path.join(self._source_dir, "out", "Debug"),
]
if exe:
self._options.build_dir = FindDirContainingNewestFile(dirs, exe)
else:
self._options.build_dir = FindNewestDir(dirs)
cmd = list(self._command_preamble)
for directory in self._data_dirs:
suppression_file = os.path.join(directory, "suppressions.txt")
if os.path.exists(suppression_file):
cmd.append("--suppressions=%s" % suppression_file)
# Platform specific suppression
suppression_platform = {
'darwin': 'mac',
'linux2': 'linux'
}[sys.platform]
suppression_file_platform = \
os.path.join(directory, 'suppressions_%s.txt' % suppression_platform)
if os.path.exists(suppression_file_platform):
cmd.append("--suppressions=%s" % suppression_file_platform)
if self._options.baseline:
cmd.append("--baseline")
if self._options.verbose:
cmd.append("--verbose")
if self._options.show_all_leaks:
cmd.append("--show_all_leaks")
if self._options.track_origins:
cmd.append("--track_origins")
if self._options.generate_dsym:
cmd.append("--generate_dsym")
if self._options.generate_suppressions:
cmd.append("--generate_suppressions")
if self._options.custom_valgrind_command:
cmd.append("--custom_valgrind_command=%s"
% self._options.custom_valgrind_command)
if valgrind_test_args != None:
for arg in valgrind_test_args:
cmd.append(arg)
if exe:
cmd.append(os.path.join(self._options.build_dir, exe))
# Valgrind runs tests slowly, so slow tests hurt more; show elapased time
# so we can find the slowpokes.
cmd.append("--gtest_print_time")
return cmd
def Run(self):
''' Runs the test specified by command-line argument --test '''
logging.info("running test %s" % (self._test))
return self._test_list[self._test]()
def _ReadGtestFilterFile(self, name, cmd):
'''Read a file which is a list of tests to filter out with --gtest_filter
and append the command-line option to cmd.
'''
filters = []
for directory in self._data_dirs:
platform_suffix = {'darwin': 'mac',
'linux2': 'linux'}[sys.platform]
gtest_filter_files = [
os.path.join(directory, name + ".gtest.txt"),
os.path.join(directory, name + ".gtest_%s.txt" % platform_suffix)]
for filename in gtest_filter_files:
if os.path.exists(filename):
logging.info("reading gtest filters from %s" % filename)
f = open(filename, 'r')
for line in f.readlines():
if line.startswith("#") or line.startswith("//") or line.isspace():
continue
line = line.rstrip()
filters.append(line)
gtest_filter = self._options.gtest_filter
if len(filters):
if gtest_filter:
gtest_filter += ":"
if gtest_filter.find("-") < 0:
gtest_filter += "-"
else:
gtest_filter = "-"
gtest_filter += ":".join(filters)
if gtest_filter:
cmd.append("--gtest_filter=%s" % gtest_filter)
def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
cmd = self._DefaultCommand(module, name, valgrind_test_args)
self._ReadGtestFilterFile(name, cmd)
if cmd_args:
cmd.extend(["--"])
cmd.extend(cmd_args)
return common.RunSubprocess(cmd, 0)
def TestBase(self):
return self.SimpleTest("base", "base_unittests")
def TestGURL(self):
return self.SimpleTest("chrome", "googleurl_unittests")
def TestMedia(self):
return self.SimpleTest("chrome", "media_unittests")
def TestPrinting(self):
return self.SimpleTest("chrome", "printing_unittests")
def TestIpc(self):
return self.SimpleTest("chrome", "ipc_tests",
valgrind_test_args=["--trace_children"])
def TestNet(self):
return self.SimpleTest("net", "net_unittests")
def TestStartup(self):
# We don't need the performance results, we're just looking for pointer
# errors, so set number of iterations down to the minimum.
os.putenv("STARTUP_TESTS_NUMCYCLES", "1")
logging.info("export STARTUP_TESTS_NUMCYCLES=1");
return self.SimpleTest("chrome", "startup_tests",
valgrind_test_args=[
"--trace_children",
"--indirect"])
def TestTestShell(self):
return self.SimpleTest("webkit", "test_shell_tests")
def TestUnit(self):
return self.SimpleTest("chrome", "unit_tests")
def TestApp(self):
return self.SimpleTest("chrome", "app_unittests")
def TestUI(self):
return self.SimpleTest("chrome", "ui_tests",
valgrind_test_args=[
"--timeout=120000",
"--trace_children",
"--indirect"],
cmd_args=[
"--ui-test-timeout=120000",
"--ui-test-action-timeout=80000",
"--ui-test-action-max-timeout=180000",
"--ui-test-terminate-timeout=60000"])
def TestLayoutChunk(self, chunk_num, chunk_size):
# Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
# list of tests. Wrap around to beginning of list at end.
# If chunk_size is zero, run all tests in the list once.
# If a text file is given as argument, it is used as the list of tests.
#
# Build the ginormous commandline in 'cmd'.
# It's going to be roughly
# python valgrind_test.py ... python run_webkit_tests.py ...
# but we'll use the --indirect flag to valgrind_test.py
# to avoid valgrinding python.
# Start by building the valgrind_test.py commandline.
cmd = self._DefaultCommand("webkit")
cmd.append("--trace_children")
cmd.append("--indirect")
# Now build script_cmd, the run_webkits_tests.py commandline
# Store each chunk in its own directory so that we can find the data later
chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
test_shell = os.path.join(self._options.build_dir, "test_shell")
out_dir = os.path.join(google.path_utils.ScriptDir(), "latest")
out_dir = os.path.join(out_dir, chunk_dir)
if os.path.exists(out_dir):
old_files = glob.glob(os.path.join(out_dir, "*.txt"))
for f in old_files:
os.remove(f)
else:
os.makedirs(out_dir)
script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
"run_webkit_tests.py")
script_cmd = ["python", script, "--run-singly", "-v",
"--noshow-results", "--time-out-ms=200000",
"--nocheck-sys-deps"]
# Pass build mode to run_webkit_tests.py. We aren't passed it directly,
# so parse it out of build_dir. run_webkit_tests.py can only handle
# the two values "Release" and "Debug".
# TODO(Hercules): unify how all our scripts pass around build mode
# (--mode / --target / --build_dir / --debug)
if self._options.build_dir.endswith("Debug"):
script_cmd.append("--debug");
if (chunk_size > 0):
script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
if len(self._args):
# if the arg is a txt file, then treat it as a list of tests
if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
script_cmd.append("--test-list=%s" % self._args[0])
else:
script_cmd.extend(self._args)
self._ReadGtestFilterFile("layout", script_cmd)
# Now run script_cmd with the wrapper in cmd
cmd.extend(["--"])
cmd.extend(script_cmd)
ret = common.RunSubprocess(cmd, 0)
return ret
def TestLayout(self):
# A "chunk file" is maintained in the local directory so that each test
# runs a slice of the layout tests of size chunk_size that increments with
# each run. Since tests can be added and removed from the layout tests at
# any time, this is not going to give exact coverage, but it will allow us
# to continuously run small slices of the layout tests under purify rather
# than having to run all of them in one shot.
chunk_size = self._options.num_tests
if (chunk_size == 0):
return self.TestLayoutChunk(0, 0)
chunk_num = 0
chunk_file = os.path.join("valgrind_layout_chunk.txt")
logging.info("Reading state from " + chunk_file)
try:
f = open(chunk_file)
if f:
str = f.read()
if len(str):
chunk_num = int(str)
# This should be enough so that we have a couple of complete runs
# of test data stored in the archive (although note that when we loop
# that we almost guaranteed won't be at the end of the test list)
if chunk_num > 10000:
chunk_num = 0
f.close()
except IOError, (errno, strerror):
logging.error("error reading from file %s (%d, %s)" % (chunk_file,
errno, strerror))
ret = self.TestLayoutChunk(chunk_num, chunk_size)
# Wait until after the test runs to completion to write out the new chunk
# number. This way, if the bot is killed, we'll start running again from
# the current chunk rather than skipping it.
logging.info("Saving state to " + chunk_file)
try:
f = open(chunk_file, "w")
chunk_num += 1
f.write("%d" % chunk_num)
f.close()
except IOError, (errno, strerror):
logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
strerror))
# Since we're running small chunks of the layout tests, it's important to
# mark the ones that have errors in them. These won't be visible in the
# summary list for long, but will be useful for someone reviewing this bot.
return ret
def _main(_):
parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
"[-t <test> ...]")
parser.disable_interspersed_args()
parser.add_option("-b", "--build_dir",
help="the location of the output of the compiler output")
parser.add_option("-t", "--test", action="append",
help="which test to run")
parser.add_option("", "--baseline", action="store_true", default=False,
help="generate baseline data instead of validating")
parser.add_option("", "--gtest_filter",
help="additional arguments to --gtest_filter")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="verbose output - enable debug log messages")
parser.add_option("", "--show_all_leaks", action="store_true",
default=False,
help="also show even less blatant leaks")
parser.add_option("", "--track_origins", action="store_true",
default=False,
help="Show whence uninit bytes came. 30% slower.")
parser.add_option("", "--generate_dsym", action="store_true",
default=False,
help="Generate .dSYM file on Mac if needed. Slow!")
parser.add_option("", "--generate_suppressions", action="store_true",
default=False,
help="Skip analysis and generate suppressions")
parser.add_option("", "--custom_valgrind_command",
help="Use custom valgrind command and options")
# My machine can do about 120 layout tests/hour in release mode.
# Let's do 30 minutes worth per run.
# The CPU is mostly idle, so perhaps we can raise this when
# we figure out how to run them more efficiently.
parser.add_option("-n", "--num_tests", default=60, type="int",
help="for layout tests: # of subtests per run. 0 for all.")
options, args = parser.parse_args()
if options.verbose:
google.logging_utils.config_root(logging.DEBUG)
else:
google.logging_utils.config_root()
if not options.test or not len(options.test):
parser.error("--test not specified")
for t in options.test:
tests = ChromeTests(options, args, t)
ret = tests.Run()
if ret: return ret
return 0
if __name__ == "__main__":
ret = _main(sys.argv)
sys.exit(ret)
| |
"""
Object for isochrone storage and basic calculations.
NOTE: only absolute magnitudes are used in the Isochrone class
ADW: There are some complicated issues here. As we are generally using a
forward-folding likelihood technique, what we would like to do is to
convolve the isochrone model with the survey response functions to
derive a model of the observed distribution of objects given a
specific true isochrone. This convolution involves two distinct parts:
1) the object completeness as a function of delta-magnitude
(difference between magnitude and local limiting magnitude), 2) the
magnitude dispersion (magnitude uncertainty) as a function of
delta-magnitude.
Since the survey response (i.e., depth) changes on the pixel scale,
this would means deriving the convolved isochrone for every pixel in
the interior of the ROI. Assuming a magnitude binning of 70x70, and a
set of 3000 interior pixels, this is a 70x70x3000 matrix. However,
the issue is that to generate this array you need to sample the
isochrone at roughly 1000 points and sum these points. Needless to
say it is fairly intensive to calculate and store this matrix. Things
become much more reasonable if you only calculate this matrix once for
each unique magnitude limit, but again this becomes difficult because
you need each unique limit in both magnitudes.
"""
# FIXME: Need to vectorize CMD and MMD calculation
import sys
import os
from abc import abstractmethod
import collections
from collections import OrderedDict as odict
import inspect
import glob
from functools import wraps
import numpy as np
import scipy.interpolate
import scipy.stats
import scipy.spatial
import scipy.ndimage as ndimage
import ugali.analysis.imf
from ugali.analysis.model import Model, Parameter
from ugali.utils.stats import norm_cdf
from ugali.utils.shell import mkdir, get_ugali_dir, get_iso_dir
from ugali.utils.projector import mod2dist
from ugali.utils.config import Config
from ugali.utils.logger import logger
############################################################
def sum_mags(mags, weights=None):
"""
Sum an array of magnitudes in flux space.
Parameters:
-----------
mags : array of magnitudes
weights : array of weights for each magnitude (i.e. from a pdf)
Returns:
--------
sum_mag : the summed magnitude of all the stars
"""
flux = 10**(-np.asarray(mags) / 2.5)
if weights is None:
return -2.5 * np.log10(np.sum(flux))
else:
return -2.5 * np.log10(np.sum(weights*flux))
def jester_mag_v(g_sdss, r_sdss):
"""
Convert from SDSS g,r to Johnson V using the Table 1 of Jester
2005 [astro-ph/0506022] for stars with R-I < 1.15:
V = g_sdss - 0.59(g_sdss-r_sdss) - 0.01
Parameters:
-----------
g_sdss : SDSS g-band magnitude
r_sdss : SDSS r-band magnitude
pdf : pdf weighting for each star
Returns:
--------
mag_v : total
"""
return g_sdss - 0.59 * (g_sdss - r_sdss) - 0.01
class IsochroneModel(Model):
""" Abstract base class for dealing with isochrone models. """
_params = odict([
('distance_modulus', Parameter(15.0, [10.0, 30.0]) ),
('age', Parameter(10.0, [0.1, 15.0]) ), # Gyr
('metallicity', Parameter(0.0002, [0.0,0.02]) ),
])
_mapping = odict([
('mod','distance_modulus'),
('a','age'),
('z','metallicity'),
])
# ADW: Careful, there are weird things going on with adding
# defaults to subclasses... When converted to a dict, the
# last duplicate entry is filled.
# ADW: Need to explicitly call '_cache' when updating these parameters.
defaults = (
('survey','des','Name of survey filter system'),
('dirname',get_iso_dir(),'Directory name for isochrone files'),
('band_1','g','Field name for magnitude one'),
('band_2','r','Field name for magnitude two'),
('band_1_detection',True,'Band one is detection band'),
('imf_type','Chabrier2003','Initial mass function'),
('hb_stage',None,'Horizontal branch stage name'),
('hb_spread',0.0,'Intrinisic spread added to horizontal branch'),
)
def __init__(self, **kwargs):
self._setup(**kwargs)
super(IsochroneModel,self).__init__(**kwargs)
def _setup(self, **kwargs):
# ADW: Should we add a warning for kwargs not in defaults (and
# thus not set)?
defaults = odict([(d[0],d[1]) for d in self.defaults])
[defaults.update([i]) for i in list(kwargs.items()) if i[0] in defaults]
for k,v in list(defaults.items()):
setattr(self,k,v)
self.imf = ugali.analysis.imf.factory(defaults['imf_type'])
self.index = None
def _parse(self,filename):
msg = "Not implemented for base class"
raise Exception(msg)
def get_dirname(self):
return os.path.expandvars(self.dirname.format(survey=self.survey))
def todict(self):
ret = super(IsochroneModel,self).todict()
defaults = odict([(d[0],d[1]) for d in self.defaults])
for k,v in defaults.items():
if getattr(self,k) != v: ret[k] = getattr(self,k)
return ret
@property
def distance(self):
""" Convert to physical distance (kpc) """
return mod2dist(self.distance_modulus)
def sample(self, mode='data', mass_steps=1000, mass_min=0.1, full_data_range=False):
"""Sample the isochrone in steps of mass interpolating between
the originally defined isochrone points.
Parameters:
-----------
mode :
mass_steps :
mass_min : Minimum mass [Msun]
full_data_range :
Returns:
--------
mass_init : Initial mass of each point
mass_pdf : PDF of number of stars in each point
mass_act : Actual (current mass) of each stellar point
mag_1 : Array of absolute magnitudes in first band (no distance modulus applied)
mag_2 : Array of absolute magnitudes in second band (no distance modulus applied)
"""
if full_data_range:
# ADW: Might be depricated 02/10/2015
# Generate points over full isochrone data range
select = slice(None)
else:
# Not generating points for the post-AGB stars,
# but still count those stars towards the normalization
select = slice(self.index)
mass_steps = int(mass_steps)
mass_init = self.mass_init[select]
mass_act = self.mass_act[select]
mag_1 = self.mag_1[select]
mag_2 = self.mag_2[select]
# ADW: Assume that the isochrones are pre-sorted by mass_init
# This avoids some numerical instability from points that have the same
# mass_init value (discontinuities in the isochrone).
# ADW: Might consider using np.interp for speed
mass_act_interpolation = scipy.interpolate.interp1d(mass_init, mass_act,assume_sorted=True)
mag_1_interpolation = scipy.interpolate.interp1d(mass_init, mag_1,assume_sorted=True)
mag_2_interpolation = scipy.interpolate.interp1d(mass_init, mag_2,assume_sorted=True)
# ADW: Any other modes possible?
if mode=='data':
# Mass interpolation with uniform coverage between data points from isochrone file
mass_interpolation = scipy.interpolate.interp1d(np.arange(len(mass_init)), mass_init)
mass_array = mass_interpolation(np.linspace(0, len(mass_init)-1, mass_steps+1))
d_mass = mass_array[1:] - mass_array[:-1]
mass_init_array = np.sqrt(mass_array[1:] * mass_array[:-1])
mass_pdf_array = d_mass * self.imf.pdf(mass_init_array, log_mode=False)
mass_act_array = mass_act_interpolation(mass_init_array)
mag_1_array = mag_1_interpolation(mass_init_array)
mag_2_array = mag_2_interpolation(mass_init_array)
# Horizontal branch dispersion
if self.hb_spread and (self.stage==self.hb_stage).any():
logger.debug("Performing dispersion of horizontal branch...")
mass_init_min = self.mass_init[self.stage==self.hb_stage].min()
mass_init_max = self.mass_init[self.stage==self.hb_stage].max()
cut = (mass_init_array>mass_init_min)&(mass_init_array<mass_init_max)
if isinstance(self.hb_spread,collections.Iterable):
# Explicit dispersion spacing
dispersion_array = self.hb_spread
n = len(dispersion_array)
else:
# Default dispersion spacing
dispersion = self.hb_spread
spacing = 0.025
n = int(round(2.0*self.hb_spread/spacing))
if n % 2 != 1: n += 1
dispersion_array = np.linspace(-dispersion, dispersion, n)
# Reset original values
mass_pdf_array[cut] = mass_pdf_array[cut] / float(n)
# Isochrone values for points on the HB
mass_init_hb = mass_init_array[cut]
mass_pdf_hb = mass_pdf_array[cut]
mass_act_hb = mass_act_array[cut]
mag_1_hb = mag_1_array[cut]
mag_2_hb = mag_2_array[cut]
# Add dispersed values
for dispersion in dispersion_array:
if dispersion == 0.: continue
msg = 'Dispersion=%-.4g, HB Points=%i, Iso Points=%i'%(dispersion,cut.sum(),len(mass_init_array))
logger.debug(msg)
mass_init_array = np.append(mass_init_array, mass_init_hb)
mass_pdf_array = np.append(mass_pdf_array, mass_pdf_hb)
mass_act_array = np.append(mass_act_array, mass_act_hb)
mag_1_array = np.append(mag_1_array, mag_1_hb + dispersion)
mag_2_array = np.append(mag_2_array, mag_2_hb + dispersion)
# Note that the mass_pdf_array is not generally normalized to unity
# since the isochrone data range typically covers a different range
# of initial masses
#mass_pdf_array /= np.sum(mass_pdf_array) # ORIGINAL
# Normalize to the number of stars in the satellite with mass > mass_min
mass_pdf_array /= self.imf.integrate(mass_min, self.mass_init_upper_bound)
out = np.vstack([mass_init_array,mass_pdf_array,mass_act_array,mag_1_array,mag_2_array])
return out
def stellar_mass(self, mass_min=0.1, steps=10000):
"""
Compute the stellar mass (Msun; average per star). PDF comes
from IMF, but weight by actual stellar mass.
Parameters:
-----------
mass_min : Minimum mass to integrate the IMF
steps : Number of steps to sample the isochrone
Returns:
--------
mass : Stellar mass [Msun]
"""
mass_max = self.mass_init_upper_bound
d_log_mass = (np.log10(mass_max) - np.log10(mass_min)) / float(steps)
log_mass = np.linspace(np.log10(mass_min), np.log10(mass_max), steps)
mass = 10.**log_mass
if mass_min < np.min(self.mass_init):
mass_act_interpolation = scipy.interpolate.interp1d(np.insert(self.mass_init, 0, mass_min),
np.insert(self.mass_act, 0, mass_min))
else:
mass_act_interpolation = scipy.interpolate.interp1d(self.mass_init, self.mass_act)
mass_act = mass_act_interpolation(mass)
return np.sum(mass_act * d_log_mass * self.imf.pdf(mass, log_mode=True))
def stellar_luminosity(self, steps=10000):
"""
Compute the stellar luminosity (Lsun; average per star). PDF
comes from IMF. The range of integration only covers the
input isochrone data (no extrapolation used), but this seems
like a sub-percent effect if the isochrone goes to 0.15 Msun
for the old and metal-poor stellar populations of interest.
Note that the stellar luminosity is very sensitive to the
post-AGB population.
Parameters:
-----------
steps : Number of steps to sample the isochrone.
Returns:
--------
lum : The stellar luminosity [Lsun]
"""
mass_min = np.min(self.mass_init)
mass_max = self.mass_init_upper_bound
d_log_mass = (np.log10(mass_max) - np.log10(mass_min)) / float(steps)
log_mass = np.linspace(np.log10(mass_min), np.log10(mass_max), steps)
mass = 10.**log_mass
luminosity_interpolation = scipy.interpolate.interp1d(self.mass_init, self.luminosity,fill_value=0,bounds_error=False)
luminosity = luminosity_interpolation(mass)
return np.sum(luminosity * d_log_mass * self.imf.pdf(mass, log_mode=True))
# ADW: For temporary backward compatibility
stellarMass = stellar_mass
stellarLuminosity = stellar_luminosity
def absolute_magnitude(self, richness=1, steps=1e4):
"""
Calculate the absolute visual magnitude (Mv) from the richness
by transforming the isochrone in the SDSS system and using the
g,r -> V transform equations from Jester 2005
[astro-ph/0506022].
TODO: ADW If richness not specified, should use self.richness
Parameters:
-----------
richness : isochrone normalization parameter
steps : number of isochrone sampling steps
Returns:
--------
abs_mag : Absolute magnitude (Mv)
"""
# Using the SDSS g,r -> V from Jester 2005 [astro-ph/0506022]
# for stars with R-I < 1.15
# V = g_sdss - 0.59*(g_sdss - r_sdss) - 0.01
# Create a copy of the isochrone in the SDSS system
params = {k:v.value for k,v in self._params.items()}
params.update(band_1='g',band_2='r',survey='sdss')
iso = self.__class__(**params)
# g, r are absolute magnitude
mass_init, mass_pdf, mass_act, sdss_g, sdss_r = iso.sample(mass_steps=steps)
V = jester_mag_v(sdss_g,sdss_r)
# Sum the V-band absolute magnitudes
return sum_mags(V,weights=mass_pdf*richness)
#V = g - 0.59*(g - r) - 0.01
#flux = np.sum(mass_pdf*10**(-V/2.5))
#Mv = -2.5*np.log10(richness*flux)
#return Mv
def absolute_magnitude_martin(self, richness=1, steps=1e4, n_trials=1000, mag_bright=None, mag_faint=23., alpha=0.32, seed=None):
"""
Calculate the absolute magnitude (Mv) of the isochrone using
the prescription of Martin et al. 2008.
ADW: Seems like the faint and bright limits should depend on the survey maglim?
Parameters:
-----------
richness : Isochrone nomalization factor
steps : Number of steps for sampling the isochrone.
n_trials : Number of bootstrap samples
mag_bright : Bright magnitude limit [SDSS g-band] for luminosity calculation
mag_faint : Faint magnitude limit [SDSS g-band] for luminosity calculation
alpha : Output confidence interval (1-alpha)
seed : Random seed
Returns:
--------
med,lo,hi : Total absolute magnitude interval
"""
# ADW: This function is not quite right. It should restrict
# the catalog to the obsevable space using the mask in each
# pixel. This becomes even more complicated when we transform
# the isochrone into SDSS g,r...
if seed is not None: np.random.seed(seed)
# Create a copy of the isochrone in the SDSS system
params = {k:v.value for k,v in self._params.items()}
params.update(band_1='g',band_2='r',survey='sdss')
iso = self.__class__(**params)
# Analytic part (below detection threshold)
# g, r are absolute magnitudes
mass_init, mass_pdf, mass_act, sdss_g, sdss_r = iso.sample(mass_steps = steps)
V = jester_mag_v(sdss_g, sdss_r)
cut = ( (sdss_g + iso.distance_modulus) > mag_faint)
mag_unobs = sum_mags(V[cut], weights = richness * mass_pdf[cut])
# Stochastic part (above detection threshold)
abs_mag_v = np.zeros(n_trials)
for i in range(n_trials):
if i%100==0: logger.debug('%i absolute magnitude trials'%i)
# g,r are apparent magnitudes
sdss_g, sdss_r = iso.simulate(richness * iso.stellar_mass())
cut = (sdss_g < mag_faint)
# V is absolute magnitude
V = jester_mag_v(sdss_g[cut]-iso.distance_modulus,
sdss_r[cut]-iso.distance_modulus)
mag_obs = sum_mags(V)
abs_mag_v[i] = sum_mags([mag_obs,mag_unobs])
# ADW: Careful, fainter abs mag is larger (less negative) number
q = [100*alpha/2., 50, 100*(1-alpha/2.)]
hi,med,lo = np.percentile(abs_mag_v,q)
return ugali.utils.stats.interval(med,lo,hi)
def simulate(self, stellar_mass, distance_modulus=None, **kwargs):
"""
Simulate a set of stellar magnitudes (no uncertainty) for a
satellite of a given stellar mass and distance.
Parameters:
-----------
stellar_mass : the total stellar mass of the system (Msun)
distance_modulus : distance modulus of the system (if None takes from isochrone)
kwargs : passed to iso.imf.sample
Returns:
--------
mag_1, mag_2 : simulated magnitudes with length stellar_mass/iso.stellar_mass()
"""
if distance_modulus is None: distance_modulus = self.distance_modulus
# Total number of stars in system
n = int(round(stellar_mass / self.stellar_mass()))
f_1 = scipy.interpolate.interp1d(self.mass_init, self.mag_1)
f_2 = scipy.interpolate.interp1d(self.mass_init, self.mag_2)
mass_init_sample = self.imf.sample(n, np.min(self.mass_init), np.max(self.mass_init), **kwargs)
mag_1_sample, mag_2_sample = f_1(mass_init_sample), f_2(mass_init_sample)
return mag_1_sample + distance_modulus, mag_2_sample + distance_modulus
def observableFractionCMDX(self, mask, distance_modulus, mass_min=0.1):
"""
Compute observable fraction of stars with masses greater than mass_min in each
pixel in the interior region of the mask.
ADW: Careful, this function is fragile! The selection here should
be the same as mask.restrictCatalogToObservable space. However,
for technical reasons it is faster to do the calculation with
broadcasting here.
ADW: Could this function be even faster / more readable?
ADW: Should this include magnitude error leakage?
"""
mass_init_array,mass_pdf_array,mass_act_array,mag_1_array,mag_2_array = self.sample(mass_min=mass_min,full_data_range=False)
mag = mag_1_array if self.band_1_detection else mag_2_array
color = mag_1_array - mag_2_array
# ADW: Only calculate observable fraction over interior pixels...
pixels = mask.roi.pixels_interior
mag_1_mask = mask.mask_1.mask_roi_sparse[mask.roi.pixel_interior_cut]
mag_2_mask = mask.mask_2.mask_roi_sparse[mask.roi.pixel_interior_cut]
# ADW: Restrict mag and color to range of mask with sufficient solid angle
cmd_cut = ugali.utils.binning.take2D(mask.solid_angle_cmd,color,mag+distance_modulus,
mask.roi.bins_color, mask.roi.bins_mag) > 0
# Pre-apply these cuts to the 1D mass_pdf_array to save time
mass_pdf_cut = mass_pdf_array*cmd_cut
# Create 2D arrays of cuts for each pixel
mask_1_cut = (mag_1_array+distance_modulus)[:,np.newaxis] < mag_1_mask
mask_2_cut = (mag_2_array+distance_modulus)[:,np.newaxis] < mag_2_mask
mask_cut_repeat = mask_1_cut & mask_2_cut
observable_fraction = (mass_pdf_cut[:,np.newaxis]*mask_cut_repeat).sum(axis=0)
return observable_fraction
def observableFractionCMD(self, mask, distance_modulus, mass_min=0.1):
"""
Compute observable fraction of stars with masses greater than mass_min in each
pixel in the interior region of the mask.
ADW: Careful, this function is fragile! The selection here should
be the same as mask.restrictCatalogToObservable space. However,
for technical reasons it is faster to do the calculation with
broadcasting here.
ADW: Could this function be even faster / more readable?
ADW: Should this include magnitude error leakage?
"""
if distance_modulus is None: distance_modulus = self.distance_modulus
mass_init,mass_pdf,mass_act,mag_1,mag_2 = self.sample(mass_min=mass_min,full_data_range=False)
mag = mag_1 if self.band_1_detection else mag_2
color = mag_1 - mag_2
# ADW: Only calculate observable fraction for unique mask values
mag_1_mask,mag_2_mask = mask.mask_roi_unique.T
# ADW: Restrict mag and color to range of mask with sufficient solid angle
cmd_cut = ugali.utils.binning.take2D(mask.solid_angle_cmd,color,mag+distance_modulus,
mask.roi.bins_color, mask.roi.bins_mag) > 0
# Pre-apply these cuts to the 1D mass_pdf_array to save time
mass_pdf_cut = mass_pdf*cmd_cut
# Create 2D arrays of cuts for each pixel
mask_1_cut = (mag_1+distance_modulus)[:,np.newaxis] < mag_1_mask
mask_2_cut = (mag_2+distance_modulus)[:,np.newaxis] < mag_2_mask
mask_cut_repeat = (mask_1_cut & mask_2_cut)
# Condense back into one per digi
observable_fraction = (mass_pdf_cut[:,np.newaxis]*mask_cut_repeat).sum(axis=0)
# Expand to the roi and multiply by coverage fraction
return observable_fraction[mask.mask_roi_digi[mask.roi.pixel_interior_cut]] * mask.frac_interior_sparse
def observableFractionCDF(self, mask, distance_modulus, mass_min=0.1):
"""
Compute observable fraction of stars with masses greater than mass_min in each
pixel in the interior region of the mask. Incorporates simplistic
photometric errors.
ADW: Careful, this function is fragile! The selection here should
be the same as mask.restrictCatalogToObservable space. However,
for technical reasons it is faster to do the calculation with
broadcasting here.
ADW: This function is currently a rate-limiting step in the likelihood
calculation. Could it be faster?
"""
method = 'step'
mass_init,mass_pdf,mass_act,mag_1,mag_2 = self.sample(mass_min=mass_min,full_data_range=False)
mag_1 = mag_1+distance_modulus
mag_2 = mag_2+distance_modulus
mask_1,mask_2 = mask.mask_roi_unique.T
mag_err_1 = mask.photo_err_1(mask_1[:,np.newaxis]-mag_1)
mag_err_2 = mask.photo_err_2(mask_2[:,np.newaxis]-mag_2)
# "upper" bound set by maglim
delta_hi_1 = (mask_1[:,np.newaxis]-mag_1)/mag_err_1
delta_hi_2 = (mask_2[:,np.newaxis]-mag_2)/mag_err_2
# "lower" bound set by bins_mag (maglim shouldn't be 0)
delta_lo_1 = (mask.roi.bins_mag[0]-mag_1)/mag_err_1
delta_lo_2 = (mask.roi.bins_mag[0]-mag_2)/mag_err_2
cdf_1 = norm_cdf(delta_hi_1) - norm_cdf(delta_lo_1)
cdf_2 = norm_cdf(delta_hi_2) - norm_cdf(delta_lo_2)
cdf = cdf_1*cdf_2
if method is None or method == 'none':
comp_cdf = cdf
elif self.band_1_detection == True:
comp = mask.mask_1.completeness(mag_1, method=method)
comp_cdf = comp*cdf
elif self.band_1_detection == False:
comp =mask.mask_2.completeness(mag_2, method=method)
comp_cdf = comp*cdf
else:
comp_1 = mask.mask_1.completeness(mag_1, method=method)
comp_2 = mask.mask_2.completeness(mag_2, method=method)
comp_cdf = comp_1*comp_2*cdf
observable_fraction = (mass_pdf[np.newaxis]*comp_cdf).sum(axis=-1)
return observable_fraction[mask.mask_roi_digi[mask.roi.pixel_interior_cut]]
def observableFractionMMD(self, mask, distance_modulus, mass_min=0.1):
# This can be done faster...
logger.info('Calculating observable fraction from MMD')
mmd = self.signalMMD(mask,distance_modulus)
obs_frac = mmd.sum(axis=-1).sum(axis=-1)[mask.mask_roi_digi[mask.roi.pixel_interior_cut]]
return obs_frac
observable_fraction = observableFractionCMD
observableFraction = observable_fraction
def signalMMD(self, mask, distance_modulus, mass_min=0.1, nsigma=5, delta_mag=0.03, mass_steps=1000, method='step'):
roi = mask.roi
mass_init,mass_pdf,mass_act,mag_1,mag_2 = self.sample(mass_steps=mass_steps,mass_min=mass_min,full_data_range=False)
mag_1 = mag_1+distance_modulus
mag_2 = mag_2+distance_modulus
mask_1,mask_2 = mask.mask_roi_unique.T
mag_err_1 = mask.photo_err_1(mask_1[:,np.newaxis]-mag_1)
mag_err_2 = mask.photo_err_2(mask_2[:,np.newaxis]-mag_2)
# Set mag_err for mask==0 to epsilon
mag_err_1[mask_1==0] *= -np.inf
mag_err_2[mask_2==0] *= -np.inf
#edges_mag = np.arange(mask.roi.bins_mag[0] - (0.5*delta_mag),
# mask.roi.bins_mag[-1] + (0.5*delta_mag),
# delta_mag)
#nedges = edges_mag.shape[0]
nedges = np.rint((roi.bins_mag[-1]-roi.bins_mag[0])/delta_mag)+1
edges_mag,delta_mag = np.linspace(roi.bins_mag[0],roi.bins_mag[-1],nedges,retstep=True)
edges_mag_1 = edges_mag_2 = edges_mag
nbins = nedges - 1
mag_err_1_max = mag_err_1.max(axis=0)
mag_err_2_max = mag_err_2.max(axis=0)
max_idx_1 = np.searchsorted(edges_mag[:-1],mag_1+nsigma*mag_err_1_max)
min_idx_1 = np.searchsorted(edges_mag[:-1],mag_1-nsigma*mag_err_1_max)
max_idx_2 = np.searchsorted(edges_mag[:-1],mag_2+nsigma*mag_err_1_max)
min_idx_2 = np.searchsorted(edges_mag[:-1],mag_2-nsigma*mag_err_1_max)
# Select only isochrone values that will contribute to the MMD space
sel = (max_idx_1>0)&(min_idx_1<nbins)&(max_idx_2>0)&(min_idx_2<nbins)
if sel.sum() == 0:
msg = 'No isochrone points in magnitude selection range'
raise Exception(msg)
mag_1,mag_2 = mag_1[sel],mag_2[sel]
mag_err_1,mag_err_2 = mag_err_1[:,sel],mag_err_2[:,sel]
mass_pdf = mass_pdf[sel]
mag_err_1_max = mag_err_1.max(axis=0)
mag_err_2_max = mag_err_2.max(axis=0)
min_idx_1,max_idx_1 = min_idx_1[sel],max_idx_1[sel]
min_idx_2,max_idx_2 = min_idx_2[sel],max_idx_2[sel]
nmaglim,niso = mag_err_1.shape
# Find valid indices in MMD space (can we avoid this loop?)
nidx = ((max_idx_1-min_idx_1)*(max_idx_2-min_idx_2))
mag_idx = np.arange(niso).repeat(nidx)
bin_idx = np.zeros(nidx.sum(),dtype=int)
ii = 0
# ADW: Can we avoid this loop?
for i in range(niso):
x = np.ravel_multi_index(np.mgrid[min_idx_1[i]:max_idx_1[i],
min_idx_2[i]:max_idx_2[i]],
[nbins,nbins]).ravel()
bin_idx[ii:ii+len(x)] = x
ii += len(x)
#idx = np.unique(idx)
idx_1,idx_2 = np.unravel_index(bin_idx,[nbins,nbins])
# Pre-compute the indexed arrays to save time at the cost of memory
mag_1_idx,mag_2_idx = mag_1[mag_idx],mag_2[mag_idx]
mag_err_1_idx,mag_err_2_idx = mag_err_1[:,mag_idx],mag_err_2[:,mag_idx]
edges_mag_1_idx,edges_mag_2_idx = edges_mag[idx_1],edges_mag[idx_2]
arg_mag_1_hi = (mag_1_idx - edges_mag_1_idx) / mag_err_1_idx
arg_mag_1_lo = arg_mag_1_hi - delta_mag/mag_err_1_idx
arg_mag_2_hi = (mag_2_idx - edges_mag_2_idx) / mag_err_2_idx
arg_mag_2_lo = arg_mag_2_hi - delta_mag/mag_err_2_idx
del mag_1_idx,mag_2_idx
del mag_err_1_idx,mag_err_2_idx
del edges_mag_1_idx,edges_mag_2_idx
# This may become necessary with more maglim bins
### # PDF is only ~nonzero for object-bin pairs within 5 sigma in both magnitudes
### index_nonzero = np.nonzero((arg_mag_1_hi > -nsigma)*(arg_mag_1_lo < nsigma) \
### *(arg_mag_2_hi > -nsigma)*(arg_mag_2_lo < nsigma))
### idx_maglim,idx_iso,idx_idx = index_nonzero
### subidx = idx[idx_idx]
pdf_val_1 = norm_cdf(arg_mag_1_hi)-norm_cdf(arg_mag_1_lo)
pdf_val_2 = norm_cdf(arg_mag_2_hi)-norm_cdf(arg_mag_2_lo)
pdf_val = pdf_val_1 * pdf_val_2
# Deal with completeness
if method is None or method == 'none':
comp = None
elif self.band_1_detection == True:
comp=mask.completeness(mask_1[:,np.newaxis]-mag_1, method=method)
elif self.band_1_detection == False:
comp=mask.completeness(mask_2[:,np.newaxis]-mag_2, method=method)
else:
comp_1 = mask.completeness(mask_1[:,np.newaxis]-mag_1, method=method)
comp_2 = mask.completeness(mask_2[:,np.newaxis]-mag_2, method=method)
comp = comp_1*comp_2
if comp is not None:
comp_pdf_val = pdf_val*comp[:,mag_idx]
else:
comp_pdf_val = pdf_val
# Deal with mass pdf values
scaled_pdf_val = comp_pdf_val*mass_pdf[mag_idx]
# Do the sum without creating the huge sparse array.
label_idx = np.arange(nmaglim*nbins**2).reshape(nmaglim,nbins**2)
labels = label_idx[:,bin_idx]
sum_pdf = ndimage.sum(scaled_pdf_val,labels,label_idx.flat).reshape(nmaglim,nbins**2)
# This is the clipping of the pdf at the maglim
# Probably want to move this out of this function.
final_pdf = sum_pdf.reshape(nmaglim,nbins,nbins)
argmax_hi_1 = np.argmax((mask_1[:,np.newaxis] <= edges_mag[1:]),axis=1)
argmax_hi_2 = np.argmax((mask_2[:,np.newaxis] <= edges_mag[1:]),axis=1)
bin_frac_1 = (mask_1 - edges_mag[argmax_hi_1])/delta_mag
bin_frac_2 = (mask_2 - edges_mag[argmax_hi_2])/delta_mag
for i,(argmax_1,argmax_2) in enumerate(zip(argmax_hi_1,argmax_hi_2)):
final_pdf[i,argmax_1,:] *= bin_frac_1[i]
final_pdf[i,:,argmax_2] *= bin_frac_2[i]
final_pdf[i,argmax_1+1:,:] = 0
final_pdf[i,:,argmax_2+1:] = 0
## This is the actual data selection cut...
#bins_2,bins_1 = np.meshgrid(edges_mag[:-1],edges_mag[:-1])
#cut = (bins_1 < mask_1[:,np.newaxis,np.newaxis])*(bins_2 < mask_2[:,np.newaxis,np.newaxis])
#final_pdf = sum_pdf.reshape(nmaglim,nbins,nbins)*cut
return final_pdf
def histogram2d(self,distance_modulus=None,delta_mag=0.03,steps=10000):
"""
Return a 2D histogram the isochrone in mag-mag space.
Parameters:
-----------
distance_modulus : distance modulus to calculate histogram at
delta_mag : magnitude bin size
mass_steps : number of steps to sample isochrone at
Returns:
--------
bins_mag_1 : bin edges for first magnitude
bins_mag_2 : bin edges for second magnitude
isochrone_pdf : weighted pdf of isochrone in each bin
"""
if distance_modulus is not None:
self.distance_modulus = distance_modulus
# Isochrone will be binned, so might as well sample lots of points
mass_init,mass_pdf,mass_act,mag_1,mag_2 = self.sample(mass_steps=steps)
#logger.warning("Fudging intrinisic dispersion in isochrone.")
#mag_1 += np.random.normal(scale=0.02,size=len(mag_1))
#mag_2 += np.random.normal(scale=0.02,size=len(mag_2))
# We cast to np.float32 to save memory
bins_mag_1 = np.arange(self.mod+mag_1.min() - (0.5*delta_mag),
self.mod+mag_1.max() + (0.5*delta_mag),
delta_mag).astype(np.float32)
bins_mag_2 = np.arange(self.mod+mag_2.min() - (0.5*delta_mag),
self.mod+mag_2.max() + (0.5*delta_mag),
delta_mag).astype(np.float32)
# ADW: Completeness needs to go in mass_pdf here...
isochrone_pdf = np.histogram2d(self.mod + mag_1,
self.mod + mag_2,
bins=[bins_mag_1, bins_mag_2],
weights=mass_pdf)[0].astype(np.float32)
return isochrone_pdf, bins_mag_1, bins_mag_2
def pdf_mmd(self, lon, lat, mag_1, mag_2, distance_modulus, mask, delta_mag=0.03, steps=1000):
"""
Ok, now here comes the beauty of having the signal MMD.
"""
logger.info('Running MMD pdf')
roi = mask.roi
mmd = self.signalMMD(mask,distance_modulus,delta_mag=delta_mag,mass_steps=steps)
# This is fragile, store this information somewhere else...
nedges = np.rint((roi.bins_mag[-1]-roi.bins_mag[0])/delta_mag)+1
edges_mag,delta_mag = np.linspace(roi.bins_mag[0],roi.bins_mag[-1],nedges,retstep=True)
idx_mag_1 = np.searchsorted(edges_mag,mag_1)
idx_mag_2 = np.searchsorted(edges_mag,mag_2)
if np.any(idx_mag_1 > nedges) or np.any(idx_mag_1 == 0):
msg = "Magnitude out of range..."
raise Exception(msg)
if np.any(idx_mag_2 > nedges) or np.any(idx_mag_2 == 0):
msg = "Magnitude out of range..."
raise Exception(msg)
idx = mask.roi.indexROI(lon,lat)
u_color = mmd[(mask.mask_roi_digi[idx],idx_mag_1,idx_mag_2)]
# Remove the bin size to convert the pdf to units of mag^-2
u_color /= delta_mag**2
return u_color
#import memory_profiler
#@memory_profiler.profile
def pdf(self, mag_1, mag_2, mag_err_1, mag_err_2,
distance_modulus=None, delta_mag=0.03, steps=10000):
"""
Compute isochrone probability for each catalog object.
ADW: This is a memory intensive function, so try as much as
possible to keep array types at `float32` or smaller (maybe
using add.at would be good?)
ADW: Still a little speed to be gained here (broadcasting)
ADW: Units? [mag^-2] [per sr?]
Parameters:
-----------
mag_1 : magnitude of stars (pdf sample points) in first band
mag_2 : magnitude of stars (pdf sample points) in second band
mag_err_1 : magnitude error of stars (pdf sample points) in first band
mag_err_2 : magnitude error of stars (pdf sample points) in second band
distance_modulus : distance modulus of isochrone
delta_mag : magnitude binning for evaluating the pdf
steps : number of isochrone sample points
Returns:
--------
u_color : probability that the star belongs to the isochrone [mag^-2]
"""
nsigma = 5.0
#pad = 1. # mag
if distance_modulus is None:
distance_modulus = self.distance_modulus
# ADW: HACK TO ADD SYSTEMATIC UNCERTAINTY (0.010 mag)
mag_err_1 = np.sqrt(mag_err_1**2 + 0.01**2)
mag_err_2 = np.sqrt(mag_err_2**2 + 0.01**2)
# Binned pdf of the isochrone
histo_pdf,bins_mag_1,bins_mag_2 = self.histogram2d(distance_modulus,delta_mag,steps)
# Keep only isochrone bins that are within the magnitude
# space of the sample
mag_1_mesh, mag_2_mesh = np.meshgrid(bins_mag_2[1:], bins_mag_1[1:])
# pdf contribution only calculated out to nsigma,
# so padding shouldn't be necessary.
mag_1_max = np.max(mag_1+nsigma*mag_err_1)# +pad
mag_1_min = np.min(mag_1-nsigma*mag_err_1)# -pad
mag_2_max = np.max(mag_2+nsigma*mag_err_2)# +pad
mag_2_min = np.min(mag_2-nsigma*mag_err_2)# -pad
in_mag_space = ((mag_1_mesh>=mag_1_min)&(mag_1_mesh<=mag_1_max))
in_mag_space*= ((mag_2_mesh>=mag_2_min)&(mag_2_mesh<=mag_2_max))
histo_pdf *= in_mag_space
idx_mag_1, idx_mag_2 = np.nonzero(histo_pdf)
isochrone_pdf = histo_pdf[idx_mag_1, idx_mag_2]
n_catalog = len(mag_1)
n_isochrone_bins = len(idx_mag_1)
mag_1 = mag_1.reshape([n_catalog, 1])
mag_err_1 = mag_err_1.reshape([n_catalog, 1])
mag_2 = mag_2.reshape([n_catalog, 1])
mag_err_2 = mag_err_2.reshape([n_catalog, 1])
# Calculate (normalized) distance between each catalog object
# and isochrone bin. Assume normally distributed photometric
# uncertainties so that the normalized distance is:
# norm_dist = (mag_1 - bins_mag_1)/mag_err_1
# ADW: Creating the dist arrays is memory intensive.
# Can we cut it down (maybe with add.at)?
dist_mag_1_hi = (mag_1-bins_mag_1[idx_mag_1])/mag_err_1
dist_mag_1_lo = (mag_1-bins_mag_1[idx_mag_1+1])/mag_err_1
dist_mag_2_hi = (mag_2-bins_mag_2[idx_mag_2])/mag_err_2
dist_mag_2_lo = (mag_2-bins_mag_2[idx_mag_2+1])/mag_err_2
# Only calculate the PDF using bins that are < nsigma from the
# data point (i.e., where it is ~nonzero).
idx_nonzero_0,idx_nonzero_1 = np.nonzero((dist_mag_1_hi > -nsigma) \
*(dist_mag_1_lo < nsigma)\
*(dist_mag_2_hi > -nsigma)\
*(dist_mag_2_lo < nsigma))
# Now calculate the pdf as the delta of the normalized cdf
# (more accurate than the point evaluation of the pdf)
pdf_mag_1 = np.zeros([n_catalog, n_isochrone_bins],dtype=np.float32)
pdf_mag_1[idx_nonzero_0,idx_nonzero_1] = norm_cdf(dist_mag_1_hi[idx_nonzero_0,idx_nonzero_1]) \
- norm_cdf(dist_mag_1_lo[idx_nonzero_0,idx_nonzero_1])
pdf_mag_2 = np.zeros([n_catalog, n_isochrone_bins],dtype=np.float32)
pdf_mag_2[idx_nonzero_0,idx_nonzero_1] = norm_cdf(dist_mag_2_hi[idx_nonzero_0,idx_nonzero_1]) \
- norm_cdf(dist_mag_2_lo[idx_nonzero_0,idx_nonzero_1])
# Signal "color probability" (as opposed to "spatial
# probability", but more accurately "isochrone probability")
# is the product of PDFs for each object-bin pair summed over
# isochrone bins
#ADW: Here is where add.at would be good...
u_color = np.sum(pdf_mag_1 * pdf_mag_2 * isochrone_pdf, axis=1)
# Remove the bin size to convert the pdf to units of mag^-2
u_color /= delta_mag**2
return u_color.astype(np.float32)
def raw_separation(self,mag_1,mag_2,steps=10000):
"""
Calculate the separation in magnitude-magnitude space between points and isochrone. Uses a dense sampling of the isochrone and calculates the metric distance from any isochrone sample point.
Parameters:
-----------
mag_1 : The magnitude of the test points in the first band
mag_2 : The magnitude of the test points in the second band
steps : Number of steps to sample the isochrone
Returns:
--------
sep : Minimum separation between test points and isochrone sample
"""
# http://stackoverflow.com/q/12653120/
mag_1 = np.array(mag_1,copy=False,ndmin=1)
mag_2 = np.array(mag_2,copy=False,ndmin=1)
init,pdf,act,iso_mag_1,iso_mag_2 = self.sample(mass_steps=steps)
iso_mag_1+=self.distance_modulus
iso_mag_2+=self.distance_modulus
iso_cut = (iso_mag_1<np.max(mag_1))&(iso_mag_1>np.min(mag_1)) | \
(iso_mag_2<np.max(mag_2))&(iso_mag_2>np.min(mag_2))
iso_mag_1 = iso_mag_1[iso_cut]
iso_mag_2 = iso_mag_2[iso_cut]
dist_mag_1 = mag_1[:,np.newaxis]-iso_mag_1
dist_mag_2 = mag_2[:,np.newaxis]-iso_mag_2
return np.min(np.sqrt(dist_mag_1**2 + dist_mag_2**2),axis=1)
def separation(self, mag_1, mag_2):
"""
Calculate the separation between a specific point and the
isochrone in magnitude-magnitude space. Uses an interpolation
ADW: Could speed this up...
Parameters:
-----------
mag_1 : The magnitude of the test points in the first band
mag_2 : The magnitude of the test points in the second band
Returns:
--------
sep : Minimum separation between test points and isochrone interpolation
"""
iso_mag_1 = self.mag_1 + self.distance_modulus
iso_mag_2 = self.mag_2 + self.distance_modulus
def interp_iso(iso_mag_1,iso_mag_2,mag_1,mag_2):
interp_1 = scipy.interpolate.interp1d(iso_mag_1,iso_mag_2,bounds_error=False)
interp_2 = scipy.interpolate.interp1d(iso_mag_2,iso_mag_1,bounds_error=False)
dy = interp_1(mag_1) - mag_2
dx = interp_2(mag_2) - mag_1
dmag_1 = np.fabs(dx*dy) / (dx**2 + dy**2) * dy
dmag_2 = np.fabs(dx*dy) / (dx**2 + dy**2) * dx
return dmag_1, dmag_2
# Separate the various stellar evolution stages
if np.issubdtype(self.stage.dtype,np.number):
sel = (self.stage < self.hb_stage)
else:
sel = (self.stage != self.hb_stage)
# First do the MS/RGB
rgb_mag_1 = iso_mag_1[sel]
rgb_mag_2 = iso_mag_2[sel]
dmag_1,dmag_2 = interp_iso(rgb_mag_1,rgb_mag_2,mag_1,mag_2)
# Then do the HB (if it exists)
if not np.all(sel):
hb_mag_1 = iso_mag_1[~sel]
hb_mag_2 = iso_mag_2[~sel]
hb_dmag_1,hb_dmag_2 = interp_iso(hb_mag_1,hb_mag_2,mag_1,mag_2)
dmag_1 = np.nanmin([dmag_1,hb_dmag_1],axis=0)
dmag_2 = np.nanmin([dmag_2,hb_dmag_2],axis=0)
#return dmag_1,dmag_2
return np.sqrt(dmag_1**2 + dmag_2**2)
class Isochrone(IsochroneModel):
""" Abstract base class for isochrones """
_prefix = 'iso'
_basename = '%(prefix)s_a%(age)04.1f_z%(z)0.5f.dat'
_dirname = os.path.join(get_iso_dir(),'{survey}')
def __init__(self,**kwargs):
super(Isochrone,self).__init__(**kwargs)
self.grid = self.create_grid()
self.tree = self.create_tree(self.grid)
self.agrid, self.zgrid = self.grid
self.params['age'].set_bounds([self.agrid.min(),self.agrid.max()])
self.params['metallicity'].set_bounds([self.zgrid.min(),self.zgrid.max()])
self.filename = None
self._cache()
def __str__(self,indent=0):
ret = super(Isochrone,self).__str__(indent)
filename = 'Filename: %s'%self.filename
ret += '\n{0:>{2}}{1}'.format('',filename,indent+2)
return ret
@classmethod
def z2feh(cls, z):
msg = "Must be implemented by subclass"
raise Exception(msg)
@classmethod
def feh2z(cls, feh):
msg = "Must be implemented by subclass"
raise Exception(msg)
@property
def feh(self):
"""
Calculate [Fe/H] from the (initial) metallicity, Z.
Section 3.1 of https://arxiv.org/abs/1604.08592 describes how
this is done for the MESA isochrones and serves as a good
template in general. The metallicity is computed as:
[Fe/H] = log10( (Z_init/X_init) / (Z_solar/X_solar)
= log10( (Z_init/Z_solar) / (X_solar/X_init)
where,
Z_init = Initial metal abundance (user provided)
Y_init = Y_p + c*Z_init = Initial He abundance
X_init = 1 - Y_init - Z_init = Primordial H-abundance
X_solar and Z_solar = Solar abundances taken from references
Thus, to properly calculate [Fe/H] requires the definition of
several quantities: Z_init, Y_init, X_solar, and
Z_solar. Genereally, Y_init is assumed to scale linearly
between the primordial and solar abundances (scale factor c).
"""
return self.z2feh(self.metallicity)
@classmethod
def params2filename(cls,age,metallicity):
return cls._basename%dict(prefix=cls._prefix,age=age,z=metallicity)
@classmethod
def filename2params(cls,filename):
#ADW: Could probably do something more clever so that parsing info
#is stored in only one place...
basename = os.path.basename(filename)
prefix,a,z = os.path.splitext(basename)[0].split('_')
if prefix != cls._prefix:
msg = 'File prefix does not match: %s'%filename
raise Exception(msg)
age = float(a.strip('a'))
metallicity = float(z.strip('z'))
return age,metallicity
def create_grid(self,abins=None,zbins=None):
if abins is None and zbins is None:
filenames = glob.glob(self.get_dirname()+'/%s_*.dat'%(self._prefix))
data = np.array([self.filename2params(f) for f in filenames])
if not len(data):
msg = "No isochrone files found in: %s"%self.get_dirname()
raise Exception(msg)
arange = np.unique(data[:,0])
zrange = np.unique(data[:,1])
elif abins is not None and zbins is not None:
# Age in units of Gyr
arange = np.linspace(abins[0],abins[1],abins[2]+1)
# Metallicity sampled logarithmically
zrange = np.logspace(np.log10(zbins[0]),np.log10(zbins[1]),zbins[2]+1)
else:
msg = "Must specify both `abins` and `zbins` or neither"
raise Exception(msg)
aa,zz = np.meshgrid(arange,zrange)
return aa.flatten(),zz.flatten()
def create_tree(self,grid=None):
if grid is None: grid = self.create_grid()
return scipy.spatial.cKDTree(np.vstack(grid).T)
def get_filename(self):
dirname = self.get_dirname()
p = [self.age,self.metallicity]
dist,idx = self.tree.query(p)
age = self.grid[0][idx]
z = self.grid[1][idx]
return os.path.join(dirname,self.params2filename(age,z))
def _cache(self,name=None):
# For first call before init fully run
if not hasattr(self,'tree'): return
if name in ['distance_modulus']: return
filename = self.get_filename()
if filename != self.filename:
self.filename = filename
self._parse(self.filename)
def _parse(self,filename):
raise Exception("Must be implemented by subclass.")
def print_info(self,age,metallicity):
params = dict(age=age,z=metallicity)
params['name'] = self.__class__.__name__
params['survey'] = self.survey
params['feh'] = self.z2feh(metallicity)
msg = 'Downloading: %(name)s (survey=%(survey)s, age=%(age).1fGyr, Z=%(z).5f, Fe/H=%(feh).3f)'%params
logger.info(msg)
return msg
def query_server(self,outfile,age,metallicity):
msg = "'query_server' not implemented by base class."
logger.error(msg)
raise RuntimeError(msg)
@classmethod
def verify(cls,filename,survey,age,metallicity):
msg = "'verify' not implemented by base class."
logger.error(msg)
raise RuntimeError(msg)
def download(self,age=None,metallicity=None,outdir=None,force=False):
"""
Check valid parameter range and download isochrones from:
http://stev.oapd.inaf.it/cgi-bin/cmd
Parameters
----------
age : age in (Gyr)
metallicity : Z
outdir : output directory (default to current directory)
force : force overwrite of file
Returns
-------
outfile : the output isochrone
"""
try:
from urllib.error import URLError
except ImportError:
from urllib2 import URLError
if age is None: age = float(self.age)
if metallicity is None: metallicity = float(self.metallicity)
if outdir is None: outdir = './'
basename = self.params2filename(age,metallicity)
outfile = os.path.join(outdir,basename)
if os.path.exists(outfile) and not force:
try:
self.verify(outfile,self.survey,age,metallicity)
logger.info("Found %s; skipping..."%(outfile))
return
except Exception as e:
msg = "Overwriting corrupted %s..."%(outfile)
logger.warn(msg)
os.remove(outfile)
mkdir(outdir)
self.print_info(age,metallicity)
self.query_server(outfile,age,metallicity)
if not os.path.exists(outfile):
raise RuntimeError('Download failed')
try:
self.verify(outfile,self.survey,age,metallicity)
except Exception as e:
msg = "Output file is corrupted."
logger.error(msg)
msg = "Removing %s."%outfile
logger.info(msg)
os.remove(outfile)
raise(e)
return outfile
# Class Aliases
#Composite = CompositeIsochrone
def absolute_magnitude(distance_modulus,g,r,prob=None):
""" Calculate the absolute magnitude from a set of bands """
V = g - 0.487*(g - r) - 0.0249
flux = np.sum(10**(-(V-distance_modulus)/2.5))
Mv = -2.5*np.log10(flux)
return Mv
| |
#!/usr/bin/env python
import numpy as np
import tables as tb
# This class is accessible only for the examples
class Small(tb.IsDescription):
var1 = tb.StringCol(itemsize=4, pos=2)
var2 = tb.Int32Col(pos=1)
var3 = tb.Float64Col(pos=0)
# Define a user record to characterize some kind of particles
class Medium(tb.IsDescription):
name = tb.StringCol(itemsize=16, pos=0) # 16-character String
float1 = tb.Float64Col(shape=2, dflt=np.arange(2), pos=1)
#float1 = Float64Col(dflt=2.3)
#float2 = Float64Col(dflt=2.3)
# zADCcount = Int16Col() # signed short integer
ADCcount = tb.Int32Col(pos=6) # signed short integer
grid_i = tb.Int32Col(pos=7) # integer
grid_j = tb.Int32Col(pos=8) # integer
pressure = tb.Float32Col(pos=9) # float (single-precision)
energy = tb.Float64Col(pos=2) # double (double-precision)
# unalig = Int8Col() # just to unalign data
# Define a user record to characterize some kind of particles
class Big(tb.IsDescription):
name = tb.StringCol(itemsize=16) # 16-character String
float1 = tb.Float64Col(shape=32, dflt=np.arange(32))
float2 = tb.Float64Col(shape=32, dflt=2.2)
TDCcount = tb.Int8Col() # signed short integer
#ADCcount = Int32Col()
# ADCcount = Int16Col() # signed short integer
grid_i = tb.Int32Col() # integer
grid_j = tb.Int32Col() # integer
pressure = tb.Float32Col() # float (single-precision)
energy = tb.Float64Col() # double (double-precision)
def createFile(filename, totalrows, filters, recsize):
# Open a file in "w"rite mode
fileh = tb.open_file(filename, mode="w", title="Table Benchmark",
filters=filters)
# Table title
title = "This is the table title"
# Create a Table instance
group = fileh.root
rowswritten = 0
for j in range(3):
# Create a table
if recsize == "big":
table = fileh.create_table(group, 'tuple' + str(j), Big, title,
None,
totalrows)
elif recsize == "medium":
table = fileh.create_table(group, 'tuple' + str(j), Medium, title,
None,
totalrows)
elif recsize == "small":
table = fileh.create_table(group, 'tuple' + str(j), Small, title,
None,
totalrows)
else:
raise RuntimeError("This should never happen")
table.attrs.test = 2
rowsize = table.rowsize
# Get the row object associated with the new table
d = table.row
# Fill the table
if recsize == "big":
for i in range(totalrows):
# d['name'] = 'Part: %6d' % (i)
d['TDCcount'] = i % 256
#d['float1'] = NP.array([i]*32, NP.float64)
#d['float2'] = NP.array([i**2]*32, NP.float64)
#d['float1'][0] = float(i)
#d['float2'][0] = float(i*2)
# Common part with medium
d['grid_i'] = i
d['grid_j'] = 10 - i
d['pressure'] = float(i * i)
# d['energy'] = float(d['pressure'] ** 4)
d['energy'] = d['pressure']
# d['idnumber'] = i * (2 ** 34)
d.append()
elif recsize == "medium":
for i in range(totalrows):
#d['name'] = 'Part: %6d' % (i)
#d['float1'] = NP.array([i]*2, NP.float64)
#d['float1'] = arr
#d['float1'] = i
#d['float2'] = float(i)
# Common part with big:
d['grid_i'] = i
d['grid_j'] = 10 - i
d['pressure'] = i * 2
# d['energy'] = float(d['pressure'] ** 4)
d['energy'] = d['pressure']
d.append()
else: # Small record
for i in range(totalrows):
#d['var1'] = str(random.randrange(1000000))
#d['var3'] = random.randrange(10000000)
d['var1'] = str(i)
#d['var2'] = random.randrange(totalrows)
d['var2'] = i
#d['var3'] = 12.1e10
d['var3'] = totalrows - i
d.append() # This is a 10% faster than table.append()
rowswritten += totalrows
if recsize == "small":
# Testing with indexing
pass
# table._createIndex("var3", Filters(1,"zlib",shuffle=1))
# table.flush()
group._v_attrs.test2 = "just a test"
# Create a new group
group2 = fileh.create_group(group, 'group' + str(j))
# Iterate over this new group (group2)
group = group2
table.flush()
# Close the file (eventually destroy the extended type)
fileh.close()
return (rowswritten, rowsize)
def readFile(filename, recsize, verbose):
# Open the HDF5 file in read-only mode
fileh = tb.open_file(filename, mode="r")
rowsread = 0
for groupobj in fileh.walk_groups(fileh.root):
# print "Group pathname:", groupobj._v_pathname
row = 0
for table in fileh.list_nodes(groupobj, 'Table'):
rowsize = table.rowsize
print("reading", table)
if verbose:
print("Max rows in buf:", table.nrowsinbuf)
print("Rows in", table._v_pathname, ":", table.nrows)
print("Buffersize:", table.rowsize * table.nrowsinbuf)
print("MaxTuples:", table.nrowsinbuf)
if recsize == "big" or recsize == "medium":
# e = [ p.float1 for p in table.iterrows()
# if p.grid_i < 2 ]
#e = [ str(p) for p in table.iterrows() ]
# if p.grid_i < 2 ]
# e = [ p['grid_i'] for p in table.iterrows()
# if p['grid_j'] == 20 and p['grid_i'] < 20 ]
# e = [ p['grid_i'] for p in table
# if p['grid_i'] <= 2 ]
# e = [ p['grid_i'] for p in table.where("grid_i<=20")]
# e = [ p['grid_i'] for p in
# table.where('grid_i <= 20')]
e = [p['grid_i'] for p in
table.where('(grid_i <= 20) & (grid_j == 20)')]
# e = [ p['grid_i'] for p in table.iterrows()
# if p.nrow() == 20 ]
# e = [ table.delrow(p.nrow()) for p in table.iterrows()
# if p.nrow() == 20 ]
# The version with a for loop is only 1% better than
# comprenhension list
#e = []
# for p in table.iterrows():
# if p.grid_i < 20:
# e.append(p.grid_j)
else: # small record case
# e = [ p['var3'] for p in table.iterrows()
# if p['var2'] < 20 and p['var3'] < 20 ]
# e = [ p['var3'] for p in table.where("var3 <= 20")
# if p['var2'] < 20 ]
# e = [ p['var3'] for p in table.where("var3 <= 20")]
# Cuts 1) and 2) issues the same results but 2) is about 10 times faster
# Cut 1)
# e = [ p.nrow() for p in
# table.where(table.cols.var2 > 5)
# if p["var2"] < 10]
# Cut 2)
# e = [ p.nrow() for p in
# table.where(table.cols.var2 < 10)
# if p["var2"] > 5]
# e = [ (p._nrow,p["var3"]) for p in
# e = [ p["var3"] for p in
# table.where(table.cols.var3 < 10)]
# table.where(table.cols.var3 < 10)]
# table if p["var3"] <= 10]
# e = [ p['var3'] for p in table.where("var3 <= 20")]
# e = [ p['var3'] for p in
# table.where(table.cols.var1 == "10")] # More
# than ten times faster than the next one
# e = [ p['var3'] for p in table
# if p['var1'] == "10"]
# e = [ p['var3'] for p in table.where('var2 <= 20')]
e = [p['var3']
for p in table.where('(var2 <= 20) & (var2 >= 3)')]
# e = [ p[0] for p in table.where('var2 <= 20')]
#e = [ p['var3'] for p in table if p['var2'] <= 20 ]
# e = [ p[:] for p in table if p[1] <= 20 ]
# e = [ p['var3'] for p in table._whereInRange(table.cols.var2 <=20)]
#e = [ p['var3'] for p in table.iterrows(0,21) ]
# e = [ p['var3'] for p in table.iterrows()
# if p.nrow() <= 20 ]
#e = [ p['var3'] for p in table.iterrows(1,0,1000)]
#e = [ p['var3'] for p in table.iterrows(1,100)]
# e = [ p['var3'] for p in table.iterrows(step=2)
# if p.nrow() < 20 ]
# e = [ p['var2'] for p in table.iterrows()
# if p['var2'] < 20 ]
# for p in table.iterrows():
# pass
if verbose:
# print "Last record read:", p
print("resulting selection list ==>", e)
rowsread += table.nrows
row += 1
if verbose:
print("Total selected records ==> ", len(e))
# Close the file (eventually destroy the extended type)
fileh.close()
return (rowsread, rowsize)
def readField(filename, field, rng, verbose):
fileh = tb.open_file(filename, mode="r")
rowsread = 0
if rng is None:
rng = [0, -1, 1]
if field == "all":
field = None
for groupobj in fileh.walk_groups(fileh.root):
for table in fileh.list_nodes(groupobj, 'Table'):
rowsize = table.rowsize
# table.nrowsinbuf = 3 # For testing purposes
if verbose:
print("Max rows in buf:", table.nrowsinbuf)
print("Rows in", table._v_pathname, ":", table.nrows)
print("Buffersize:", table.rowsize * table.nrowsinbuf)
print("MaxTuples:", table.nrowsinbuf)
print("(field, start, stop, step) ==>", (field, rng[0], rng[1],
rng[2]))
e = table.read(rng[0], rng[1], rng[2], field)
rowsread += table.nrows
if verbose:
print("Selected rows ==> ", e)
print("Total selected rows ==> ", len(e))
# Close the file (eventually destroy the extended type)
fileh.close()
return (rowsread, rowsize)
if __name__ == "__main__":
import sys
import getopt
try:
import psyco
psyco_imported = 1
except:
psyco_imported = 0
from time import perf_counter as clock
from time import process_time as cpuclock
usage = """usage: %s [-v] [-p] [-P] [-R range] [-r] [-w] [-s recsize] [-f field] [-c level] [-l complib] [-i iterations] [-S] [-F] file
-v verbose
-p use "psyco" if available
-P do profile
-R select a range in a field in the form "start,stop,step"
-r only read test
-w only write test
-s use [big] record, [medium] or [small]
-f only read stated field name in tables ("all" means all fields)
-c sets a compression level (do not set it or 0 for no compression)
-S activate shuffling filter
-F activate fletcher32 filter
-l sets the compression library to be used ("zlib", "lzo", "blosc", "bzip2")
-i sets the number of rows in each table\n""" % sys.argv[0]
try:
opts, pargs = getopt.getopt(sys.argv[1:], 'vpPSFR:rwf:s:c:l:i:')
except:
sys.stderr.write(usage)
sys.exit(0)
# if we pass too much parameters, abort
if len(pargs) != 1:
sys.stderr.write(usage)
sys.exit(0)
# default options
verbose = 0
profile = 0
rng = None
recsize = "medium"
fieldName = None
testread = 1
testwrite = 1
usepsyco = 0
complevel = 0
shuffle = 0
fletcher32 = 0
complib = "zlib"
iterations = 100
# Get the options
for option in opts:
if option[0] == '-v':
verbose = 1
if option[0] == '-p':
usepsyco = 1
if option[0] == '-P':
profile = 1
if option[0] == '-S':
shuffle = 1
if option[0] == '-F':
fletcher32 = 1
elif option[0] == '-R':
rng = [int(i) for i in option[1].split(",")]
elif option[0] == '-r':
testwrite = 0
elif option[0] == '-w':
testread = 0
elif option[0] == '-f':
fieldName = option[1]
elif option[0] == '-s':
recsize = option[1]
if recsize not in ["big", "medium", "small"]:
sys.stderr.write(usage)
sys.exit(0)
elif option[0] == '-c':
complevel = int(option[1])
elif option[0] == '-l':
complib = option[1]
elif option[0] == '-i':
iterations = int(option[1])
# Build the Filters instance
filters = tb.Filters(complevel=complevel, complib=complib,
shuffle=shuffle, fletcher32=fletcher32)
# Catch the hdf5 file passed as the last argument
file = pargs[0]
if verbose:
print("numpy version:", np.__version__)
if psyco_imported and usepsyco:
print("Using psyco version:", psyco.version_info)
if testwrite:
print("Compression level:", complevel)
if complevel > 0:
print("Compression library:", complib)
if shuffle:
print("Suffling...")
t1 = clock()
cpu1 = cpuclock()
if psyco_imported and usepsyco:
psyco.bind(createFile)
if profile:
import profile as prof
import pstats
prof.run(
'(rowsw, rowsz) = createFile(file, iterations, filters, '
'recsize)',
'table-bench.prof')
stats = pstats.Stats('table-bench.prof')
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
else:
(rowsw, rowsz) = createFile(file, iterations, filters, recsize)
t2 = clock()
cpu2 = cpuclock()
tapprows = t2 - t1
cpuapprows = cpu2 - cpu1
print(f"Rows written: {rowsw} Row size: {rowsz}")
print(
f"Time writing rows: {tapprows:.3f} s (real) "
f"{cpuapprows:.3f} s (cpu) {cpuapprows / tapprows:.0%}")
print(f"Write rows/sec: {rowsw / tapprows}")
print(f"Write KB/s : {rowsw * rowsz / (tapprows * 1024):.0f}")
if testread:
t1 = clock()
cpu1 = cpuclock()
if psyco_imported and usepsyco:
psyco.bind(readFile)
# psyco.bind(readField)
pass
if rng or fieldName:
(rowsr, rowsz) = readField(file, fieldName, rng, verbose)
pass
else:
for i in range(1):
(rowsr, rowsz) = readFile(file, recsize, verbose)
t2 = clock()
cpu2 = cpuclock()
treadrows = t2 - t1
cpureadrows = cpu2 - cpu1
print(f"Rows read: {rowsw} Row size: {rowsz}")
print(
f"Time reading rows: {treadrows:.3f} s (real) "
f"{cpureadrows:.3f} s (cpu) {cpureadrows / treadrows:.0%}")
print(f"Read rows/sec: {rowsr / treadrows}")
print(f"Read KB/s : {rowsr * rowsz / (treadrows * 1024):.0f}")
| |
# position/views_admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .controllers import positions_import_from_master_server, refresh_cached_position_info_for_election, \
refresh_positions_with_candidate_details_for_election, \
refresh_positions_with_contest_office_details_for_election, \
refresh_positions_with_contest_measure_details_for_election
from .models import ANY_STANCE, PositionEntered, PositionForFriends, PositionListManager, PERCENT_RATING
from admin_tools.views import redirect_to_sign_in_page
from candidate.models import CandidateCampaign
from config.base import get_environment_variable
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.messages import get_messages
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.db import (IntegrityError)
from django.db.models import Q
from election.models import ElectionManager
from exception.models import handle_record_found_more_than_one_exception,\
handle_record_not_found_exception, handle_record_not_saved_exception
from measure.controllers import push_contest_measure_data_to_other_table_caches
from office.controllers import push_contest_office_data_to_other_table_caches
from voter.models import voter_has_authority
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, positive_value_exists
from django.http import HttpResponse
import json
POSITIONS_SYNC_URL = get_environment_variable("POSITIONS_SYNC_URL") # positionsSyncOut
WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL")
logger = wevote_functions.admin.get_logger(__name__)
# This page does not need to be protected.
def positions_sync_out_view(request): # positionsSyncOut
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
if not positive_value_exists(google_civic_election_id):
json_data = {
'success': False,
'status': 'POSITION_LIST_CANNOT_BE_RETURNED-ELECTION_ID_REQUIRED'
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
stance_we_are_looking_for = ANY_STANCE
try:
# Only return public positions
position_list_query = PositionEntered.objects.order_by('date_entered')
# As of Aug 2018 we are no longer using PERCENT_RATING
position_list_query = position_list_query.exclude(stance__iexact=PERCENT_RATING)
position_list_query = position_list_query.filter(google_civic_election_id=google_civic_election_id)
# SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING
if stance_we_are_looking_for != ANY_STANCE:
# If we passed in the stance "ANY" it means we want to not filter down the list
position_list_query = position_list_query.filter(stance__iexact=stance_we_are_looking_for)
# convert datetime to str for date_entered and date_last_changed columns
position_list_query = position_list_query.extra(
select={'date_entered': "to_char(date_entered, 'YYYY-MM-DD HH24:MI:SS')"})
position_list_query = position_list_query.extra(
select={'date_last_changed': "to_char(date_last_changed, 'YYYY-MM-DD HH24:MI:SS')"})
position_list_dict = position_list_query.values(
'we_vote_id', 'ballot_item_display_name', 'ballot_item_image_url_https',
'ballot_item_twitter_handle', 'speaker_display_name',
'speaker_image_url_https', 'speaker_twitter_handle', 'date_entered',
'date_last_changed', 'organization_we_vote_id', 'voter_we_vote_id',
'public_figure_we_vote_id', 'google_civic_election_id', 'state_code',
'vote_smart_rating_id', 'vote_smart_time_span', 'vote_smart_rating',
'vote_smart_rating_name', 'contest_office_we_vote_id',
'candidate_campaign_we_vote_id', 'google_civic_candidate_name',
'politician_we_vote_id', 'contest_measure_we_vote_id', 'stance',
'statement_text', 'statement_html', 'more_info_url', 'from_scraper',
'organization_certified', 'volunteer_certified', 'voter_entering_position',
'tweet_source_id', 'twitter_user_entered_position')
if position_list_dict:
position_list_json = list(position_list_dict)
return HttpResponse(json.dumps(position_list_json), content_type='application/json')
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
json_data = {
'success': False,
'status': 'POSITION_LIST_MISSING'
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
@login_required
def positions_import_from_master_server_view(request):
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'admin'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
if WE_VOTE_SERVER_ROOT_URL in POSITIONS_SYNC_URL:
messages.add_message(request, messages.ERROR, "Cannot sync with Master We Vote Server -- "
"this is the Master We Vote Server.")
return HttpResponseRedirect(reverse('admin_tools:admin_home', args=()))
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
if not positive_value_exists(google_civic_election_id):
messages.add_message(request, messages.INFO, 'Google civic election id is required for Positions import.')
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
results = positions_import_from_master_server(request, google_civic_election_id)
if not results['success']:
messages.add_message(request, messages.ERROR, results['status'])
else:
messages.add_message(request, messages.INFO, 'Positions import completed. '
'Saved: {saved}, Updated: {updated}, '
'Duplicates skipped: '
'{duplicates_removed}, '
'Not processed: {not_processed}'
''.format(saved=results['saved'],
updated=results['updated'],
duplicates_removed=results['duplicates_removed'],
not_processed=results['not_processed']))
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
@login_required
def position_list_view(request):
"""
We actually don't want to see PositionForFriends entries in this view
:param request:
:return:
"""
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'partner_organization', 'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
show_all_elections = request.GET.get('show_all_elections', False)
state_code = request.GET.get('state_code', '')
position_search = request.GET.get('position_search', '')
# Publicly visible positions
public_position_list_query = PositionEntered.objects.order_by('-id') # This order_by is temp
# As of Aug 2018 we are no longer using PERCENT_RATING
public_position_list_query = public_position_list_query.exclude(stance__iexact=PERCENT_RATING)
if positive_value_exists(google_civic_election_id):
public_position_list_query = public_position_list_query.filter(google_civic_election_id=google_civic_election_id)
if positive_value_exists(position_search):
search_words = position_search.split()
for one_word in search_words:
filters = []
new_filter = Q(state_code__icontains=one_word)
filters.append(new_filter)
new_filter = Q(we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(candidate_campaign_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(contest_measure_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(contest_office_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(organization_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(voter_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(google_civic_measure_title__icontains=one_word)
filters.append(new_filter)
new_filter = Q(speaker_display_name__icontains=one_word)
filters.append(new_filter)
new_filter = Q(ballot_item_display_name__icontains=one_word)
filters.append(new_filter)
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
public_position_list_query = public_position_list_query.filter(final_filters)
public_position_list_count_query = public_position_list_query
public_position_list_count = public_position_list_count_query.count()
public_position_list_query = public_position_list_query[: 50]
public_position_list = list(public_position_list_query)
# Friends-only visible positions
friends_only_position_list_query = PositionForFriends.objects.order_by('-id') # This order_by is temp
# As of Aug 2018 we are no longer using PERCENT_RATING
friends_only_position_list_query = friends_only_position_list_query.exclude(stance__iexact=PERCENT_RATING)
if positive_value_exists(google_civic_election_id):
friends_only_position_list_query = friends_only_position_list_query.filter(
google_civic_election_id=google_civic_election_id)
if positive_value_exists(position_search):
search_words = position_search.split()
for one_word in search_words:
filters = []
new_filter = Q(state_code__icontains=one_word)
filters.append(new_filter)
new_filter = Q(we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(candidate_campaign_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(contest_measure_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(contest_office_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(organization_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(voter_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(google_civic_measure_title__icontains=one_word)
filters.append(new_filter)
new_filter = Q(speaker_display_name__icontains=one_word)
filters.append(new_filter)
new_filter = Q(ballot_item_display_name__icontains=one_word)
filters.append(new_filter)
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
friends_only_position_list_query = friends_only_position_list_query.filter(final_filters)
friends_only_position_list_count_query = friends_only_position_list_query
friends_only_position_list_count = friends_only_position_list_count_query.count()
friends_only_position_list_query = friends_only_position_list_query[: 50]
friends_only_position_list = list(friends_only_position_list_query)
position_list = public_position_list + friends_only_position_list
messages.add_message(request, messages.INFO, str(public_position_list_count) + ' public positions found. ' +
str(friends_only_position_list_count) + ' friends-only positions found.')
# Heal some data
# As of Aug 2018 we are no longer using PERCENT_RATING
# if positive_value_exists(google_civic_election_id):
# public_position_list_query = PositionEntered.objects.order_by('-id')
# public_position_list_query = public_position_list_query.filter(
# google_civic_election_id=google_civic_election_id)
# public_position_list_query = public_position_list_query.filter(vote_smart_rating_integer__isnull=True)
# public_position_list_query = public_position_list_query.filter(stance=PERCENT_RATING)
# public_position_list_query = public_position_list_query[:5000]
# public_position_list_heal = list(public_position_list_query)
# integrity_error_count = 0
# for one_position in public_position_list_heal:
# one_position.vote_smart_rating_integer = convert_to_int(one_position.vote_smart_rating)
# try:
# one_position.save()
# except IntegrityError as e:
# integrity_error_count += 1
#
# if len(public_position_list_heal):
# positions_updated = len(public_position_list_heal) - integrity_error_count
# if positive_value_exists(positions_updated):
# messages.add_message(request, messages.INFO, str(positions_updated) +
# ' positions updated with vote_smart_rating_integer.')
# if positive_value_exists(integrity_error_count) and positive_value_exists(positions_updated):
# messages.add_message(request, messages.ERROR, str(integrity_error_count) +
# ' integrity errors.')
election_manager = ElectionManager()
if positive_value_exists(show_all_elections):
results = election_manager.retrieve_elections()
election_list = results['election_list']
else:
results = election_manager.retrieve_upcoming_elections()
election_list = results['election_list']
# Make sure we always include the current election in the election_list, even if it is older
if positive_value_exists(google_civic_election_id):
this_election_found = False
for one_election in election_list:
if convert_to_int(one_election.google_civic_election_id) == convert_to_int(google_civic_election_id):
this_election_found = True
break
if not this_election_found:
results = election_manager.retrieve_election(google_civic_election_id)
if results['election_found']:
one_election = results['election']
election_list.append(one_election)
template_values = {
'messages_on_stage': messages_on_stage,
'position_list': position_list,
'position_search': position_search,
'election_list': election_list,
'google_civic_election_id': google_civic_election_id,
'show_all_elections': show_all_elections,
'state_code': state_code,
}
return render(request, 'position/position_list.html', template_values)
@login_required
def position_new_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'position/position_edit.html', template_values)
@login_required
def position_edit_view(request, position_we_vote_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
position_on_stage_found = False
try:
position_on_stage = PositionEntered.objects.get(we_vote_id=position_we_vote_id)
position_on_stage_found = True
except PositionEntered.MultipleObjectsReturned as e:
pass
except PositionEntered.DoesNotExist:
# This is fine, create new
pass
if position_on_stage_found:
template_values = {
'messages_on_stage': messages_on_stage,
'position': position_on_stage,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'position/position_edit.html', template_values)
@login_required
def position_edit_process_view(request): # TODO DALE I don't think this is in use, but needs to be updated
"""
Process the new or edit position forms
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
position_we_vote_id = request.POST.get('position_we_vote_id')
position_name = request.POST['position_name']
twitter_handle = request.POST['twitter_handle']
position_website = request.POST['position_website']
# Check to see if this position is already being used anywhere
position_on_stage_found = False
try:
position_query = PositionEntered.objects.filter(we_vote_id=position_we_vote_id)
if len(position_query):
position_on_stage = position_query[0]
position_on_stage_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
try:
if position_on_stage_found:
# Update
position_on_stage.position_name = position_name
position_on_stage.twitter_handle = twitter_handle
position_on_stage.position_website = position_website
position_on_stage.save()
messages.add_message(request, messages.INFO, 'PositionEntered updated.')
else:
# Create new
position_on_stage = CandidateCampaign(
position_name=position_name,
twitter_handle=twitter_handle,
position_website=position_website,
)
position_on_stage.save()
messages.add_message(request, messages.INFO, 'New position saved.')
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
messages.add_message(request, messages.ERROR, 'Could not save position.')
return HttpResponseRedirect(reverse('position:position_list', args=()))
@login_required
def position_summary_view(request, position_we_vote_id):
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'partner_organization', 'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
position_on_stage_found = False
position_on_stage = PositionEntered()
try:
position_on_stage = PositionEntered.objects.get(we_vote_id=position_we_vote_id)
position_on_stage_found = True
except PositionEntered.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except PositionEntered.DoesNotExist:
# This is fine, create new
pass
if position_on_stage_found:
template_values = {
'messages_on_stage': messages_on_stage,
'position': position_on_stage,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'position/position_summary.html', template_values)
@login_required
def refresh_cached_position_info_for_election_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
results = refresh_cached_position_info_for_election(google_civic_election_id=google_civic_election_id,
state_code=state_code)
public_positions_updated = results['public_positions_updated']
friends_only_positions_updated = results['friends_only_positions_updated']
messages.add_message(request, messages.INFO,
'public_positions_updated: {public_positions_updated}, '
'friends_only_positions_updated: {friends_only_positions_updated}'
''.format(public_positions_updated=public_positions_updated,
friends_only_positions_updated=friends_only_positions_updated))
return HttpResponseRedirect(reverse('position:position_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id) +
'&state_code=' + str(state_code))
@login_required
def refresh_positions_with_candidate_details_for_election_view(request):
"""
Refresh Positions with candidate details
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
results = refresh_positions_with_candidate_details_for_election(google_civic_election_id=google_civic_election_id,
state_code=state_code)
if not results['success']:
messages.add_message(request, messages.INFO, results['status'])
else:
positions_updated_count = results['positions_updated_count']
messages.add_message(request, messages.INFO,
"Social media retrieved. Positions refreshed: {update_all_positions_results_count},"
.format(update_all_positions_results_count=positions_updated_count))
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id) +
'&state_code=' + str(state_code))
@login_required
def refresh_positions_with_contest_office_details_for_election_view(request):
"""
Refresh positions with contest office details
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
contest_office_id = request.GET.get('office_id', 0)
contest_office_we_vote_id = request.GET.get('office_we_vote_id', '')
if positive_value_exists(contest_office_id):
results = push_contest_office_data_to_other_table_caches(contest_office_id)
elif positive_value_exists(contest_office_we_vote_id):
results = push_contest_office_data_to_other_table_caches(contest_office_we_vote_id)
elif positive_value_exists(google_civic_election_id):
results = refresh_positions_with_contest_office_details_for_election(
google_civic_election_id=google_civic_election_id, state_code=state_code)
else:
results = refresh_positions_with_contest_office_details_for_election(
google_civic_election_id=google_civic_election_id, state_code=state_code)
if not results['success']:
messages.add_message(request, messages.INFO, results['status'])
else:
positions_updated_count = results['positions_updated_count']
messages.add_message(request, messages.INFO,
"Social media retrieved. Positions refreshed: {update_all_positions_results_count},"
.format(update_all_positions_results_count=positions_updated_count))
if positive_value_exists(google_civic_election_id):
return HttpResponseRedirect(reverse('office:office_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id) +
'&state_code=' + str(state_code))
elif positive_value_exists(contest_office_id):
return HttpResponseRedirect(reverse('office:office_summary', args=(contest_office_id,)))
else:
return HttpResponseRedirect (reverse ('office:office_list', args=()) +
'?google_civic_election_id=' + str (google_civic_election_id) +
'&state_code=' + str (state_code))
@login_required
def refresh_positions_with_contest_measure_details_for_election_view(request):
"""
Refresh positions with contest measure details
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
contest_measure_id = request.GET.get('measure_id', 0)
contest_measure_we_vote_id = request.GET.get('measure_we_vote_id', '')
if positive_value_exists(contest_measure_id):
results = push_contest_measure_data_to_other_table_caches(contest_measure_id)
elif positive_value_exists(contest_measure_we_vote_id):
results = push_contest_measure_data_to_other_table_caches(contest_measure_we_vote_id)
elif positive_value_exists(google_civic_election_id):
results = refresh_positions_with_contest_measure_details_for_election(
google_civic_election_id=google_civic_election_id, state_code=state_code)
else:
results = refresh_positions_with_contest_measure_details_for_election(
google_civic_election_id=google_civic_election_id, state_code=state_code)
if not results['success']:
messages.add_message(request, messages.INFO, results['status'])
else:
positions_updated_count = results['positions_updated_count']
messages.add_message(request, messages.INFO,
"Social media retrieved. Positions refreshed: {update_all_positions_results_count},"
.format(update_all_positions_results_count=positions_updated_count))
if positive_value_exists(google_civic_election_id):
return HttpResponseRedirect(reverse('measure:measure_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id) +
'&state_code=' + str(state_code))
elif positive_value_exists(contest_measure_id):
return HttpResponseRedirect(reverse('measure:measure_summary', args=(contest_measure_id,)))
else:
return HttpResponseRedirect (reverse ('measure:measure_list', args=()) +
'?google_civic_election_id=' + str (google_civic_election_id) +
'&state_code=' + str (state_code))
@login_required
def relink_candidates_measures_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages.add_message(request, messages.INFO, 'TO BE BUILT: relink_candidates_measures_view')
return HttpResponseRedirect(reverse('position:position_list', args=()))
@login_required
def position_delete_process_view(request):
"""
Delete a position
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
position_we_vote_id = request.GET.get('position_we_vote_id', '')
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
# Retrieve this position
position_on_stage_found = False
position_on_stage = PositionEntered()
organization_id = 0
try:
position_query = PositionEntered.objects.filter(we_vote_id=position_we_vote_id)
if len(position_query):
position_on_stage = position_query[0]
organization_id = position_on_stage.organization_id
position_on_stage_found = True
except Exception as e:
messages.add_message(request, messages.ERROR, 'Could not find position -- exception.')
if not position_on_stage_found:
messages.add_message(request, messages.ERROR, 'Could not find position.')
return HttpResponseRedirect(reverse('position:position_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
try:
if position_on_stage_found:
# Delete
position_on_stage.delete()
messages.add_message(request, messages.INFO, 'Position deleted.')
if positive_value_exists(organization_id):
return HttpResponseRedirect(reverse('organization:organization_position_list',
args=([organization_id])) +
"?google_civic_election_id=" + str(google_civic_election_id))
else:
messages.add_message(request, messages.ERROR, 'Could not find position.')
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
messages.add_message(request, messages.ERROR, 'Could not save position.')
return HttpResponseRedirect(reverse('position:position_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.