gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/env python
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import time
import wpan
from wpan import verify
# -----------------------------------------------------------------------------------------------------------------------
# Test description:
#
# This test covers the situation for SED child (re)attaching to a parent with multiple IPv6 addresses present on the
# child.
#
# Network topology
#
# leader ---- parent
# |
# |
# child (sleepy)
#
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print('-' * 120)
print('Starting \'{}\''.format(test_name))
# -----------------------------------------------------------------------------------------------------------------------
# Utility functions
def verify_address(node_list, prefix):
"""
This function verifies that all nodes in the `node_list` contain an IPv6 address with the given `prefix`.
"""
for node in node_list:
all_addrs = wpan.parse_list(node.get(wpan.WPAN_IP6_ALL_ADDRESSES))
verify(any([addr.startswith(prefix[:-1]) for addr in all_addrs]))
# -----------------------------------------------------------------------------------------------------------------------
# Creating `wpan.Nodes` instances
speedup = 4
wpan.Node.set_time_speedup_factor(speedup)
leader = wpan.Node()
parent = wpan.Node()
child = wpan.Node()
# -----------------------------------------------------------------------------------------------------------------------
# Init all nodes
wpan.Node.init_all_nodes()
# -----------------------------------------------------------------------------------------------------------------------
# Build network topology
leader.form('multi-addr-test')
leader.whitelist_node(parent)
parent.whitelist_node(leader)
parent.join_node(leader, wpan.JOIN_TYPE_ROUTER)
parent.whitelist_node(child)
child.whitelist_node(parent)
child.join_node(parent, node_type=wpan.JOIN_TYPE_SLEEPY_END_DEVICE)
child.set(wpan.WPAN_POLL_INTERVAL, '400')
# -----------------------------------------------------------------------------------------------------------------------
# Test implementation
WAIT_TIME = 5
CHILD_SUPERVISION_CHECK_TIMEOUT = 1
prefix1 = 'fd00:1::'
prefix2 = 'fd00:2::'
prefix3 = 'fd00:3::'
prefix4 = 'fd00:4::'
# Add 4 prefixes (all with SLAAC bit set).
leader.add_prefix(prefix1, on_mesh=True, slaac=True, configure=True)
leader.add_prefix(prefix2, on_mesh=True, slaac=True, configure=True)
leader.add_prefix(prefix3, on_mesh=True, slaac=True, configure=True)
leader.add_prefix(prefix4, on_mesh=True, slaac=True, configure=True)
# Verify that the sleepy child gets all 4 SLAAC addresses.
def check_addresses_on_child():
verify_address([child], prefix1)
verify_address([child], prefix2)
verify_address([child], prefix3)
verify_address([child], prefix4)
wpan.verify_within(check_addresses_on_child, WAIT_TIME)
# Remove child from parent's white-list
parent.remove(
wpan.WPAN_MAC_WHITELIST_ENTRIES, child.get(wpan.WPAN_EXT_ADDRESS)[1:-1]
)
# Enable supervision check on child, this ensures that child is detached soon.
child.set(
wpan.WPAN_CHILD_SUPERVISION_CHECK_TIMEOUT,
str(CHILD_SUPERVISION_CHECK_TIMEOUT),
)
# Wait for child to get detached.
def check_child_is_detached():
verify(not child.is_associated())
wpan.verify_within(check_child_is_detached, WAIT_TIME)
# Now reset parent and wait for it to be associated.
parent.reset()
def check_parent_is_associated():
verify(parent.is_associated())
wpan.verify_within(check_parent_is_associated, WAIT_TIME)
# Now verify that child is indeed getting attached back.
def check_child_is_associated():
verify(child.is_associated())
wpan.verify_within(check_child_is_associated, WAIT_TIME)
# Any finally check that we see all the child addresses in the parent's
# child table.
def check_child_addressses_on_parent():
child_addrs = parent.get(wpan.WPAN_THREAD_CHILD_TABLE_ADDRESSES)
verify(child_addrs.find(prefix1) > 0)
verify(child_addrs.find(prefix2) > 0)
verify(child_addrs.find(prefix3) > 0)
verify(child_addrs.find(prefix4) > 0)
wpan.verify_within(check_child_addressses_on_parent, WAIT_TIME)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Check the child recovery after a parent reset using quick re-attach
# ("Child Update" exchange).
# Disable supervision check on the child.
child.set(wpan.WPAN_CHILD_SUPERVISION_CHECK_TIMEOUT, '1000')
child.set(wpan.WPAN_POLL_INTERVAL, '10000')
time.sleep(0.1)
# We use the "stat:ncp" wpantund property to verify that child does not
# get detached.
child_num_state_changes = len(wpan.parse_list(child.get("stat:ncp")))
# Reset parent and wait for it to be associated.
parent.reset()
wpan.verify_within(check_parent_is_associated, WAIT_TIME)
child.set(wpan.WPAN_POLL_INTERVAL, '100')
# Verify that we again see all the child addresses in the parent's child table.
# Note that child should register its addresses using "Child Update
# Request" exchange.
wpan.verify_within(check_child_addressses_on_parent, WAIT_TIME)
# Verify that there was no state change on child.
verify(child_num_state_changes == len(wpan.parse_list(child.get("stat:ncp"))))
# -----------------------------------------------------------------------------------------------------------------------
# Test finished
wpan.Node.finalize_all_nodes()
print('\'{}\' passed.'.format(test_name))
|
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Stardust Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test the SegWit changeover logic
#
from test_framework.test_framework import StardustTestFramework
from test_framework.util import *
from test_framework.mininode import sha256, ripemd160
import os
import shutil
NODE_0 = 0
NODE_1 = 1
NODE_2 = 2
WIT_V0 = 0
WIT_V1 = 1
def witness_script(version, pubkey):
if (version == 0):
pubkeyhash = bytes_to_hex_str(ripemd160(sha256(hex_str_to_bytes(pubkey))))
pkscript = "0014" + pubkeyhash
elif (version == 1):
# 1-of-1 multisig
scripthash = bytes_to_hex_str(sha256(hex_str_to_bytes("5121" + pubkey + "51ae")))
pkscript = "0020" + scripthash
else:
assert("Wrong version" == "0 or 1")
return pkscript
def addlength(script):
scriptlen = format(len(script)//2, 'x')
assert(len(scriptlen) == 2)
return scriptlen + script
def create_witnessprogram(version, node, utxo, pubkey, encode_p2sh, amount):
pkscript = witness_script(version, pubkey);
if (encode_p2sh):
p2sh_hash = bytes_to_hex_str(ripemd160(sha256(hex_str_to_bytes(pkscript))))
pkscript = "a914"+p2sh_hash+"87"
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]} )
DUMMY_P2SH = "2MySexEGVzZpRgNQ1JdjdP5bRETznm3roQ2" # P2SH of "OP_1 OP_DROP"
outputs[DUMMY_P2SH] = amount
tx_to_witness = node.createrawtransaction(inputs,outputs)
#replace dummy output with our own
tx_to_witness = tx_to_witness[0:110] + addlength(pkscript) + tx_to_witness[-8:]
return tx_to_witness
def send_to_witness(version, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
tx_to_witness = create_witnessprogram(version, node, utxo, pubkey, encode_p2sh, amount)
if (sign):
signed = node.signrawtransaction(tx_to_witness)
assert("errors" not in signed or len(["errors"]) == 0)
return node.sendrawtransaction(signed["hex"])
else:
if (insert_redeem_script):
tx_to_witness = tx_to_witness[0:82] + addlength(insert_redeem_script) + tx_to_witness[84:]
return node.sendrawtransaction(tx_to_witness)
def getutxo(txid):
utxo = {}
utxo["vout"] = 0
utxo["txid"] = txid
return utxo
class SegWitTest(StardustTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-logtimemicros", "-debug", "-walletprematurewitness"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-logtimemicros", "-debug", "-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-logtimemicros", "-debug", "-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness"]))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 1)
connect_nodes(self.nodes[0], 2)
self.is_network_split = False
self.sync_all()
def success_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 2)
sync_blocks(self.nodes)
def skip_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 1)
sync_blocks(self.nodes)
def fail_accept(self, node, txid, sign, redeem_script=""):
try:
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
except JSONRPCException as exp:
assert(exp.error["code"] == -26)
else:
raise AssertionError("Tx should not have been accepted")
def fail_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
try:
node.generate(1)
except JSONRPCException as exp:
assert(exp.error["code"] == -1)
else:
raise AssertionError("Created valid block when TestBlockValidity should have failed")
sync_blocks(self.nodes)
def run_test(self):
self.nodes[0].generate(160) #block 160
self.pubkey = []
p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh
wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness
for i in range(3):
newaddress = self.nodes[i].getnewaddress()
self.pubkey.append(self.nodes[i].validateaddress(newaddress)["pubkey"])
multiaddress = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]])
self.nodes[i].addwitnessaddress(newaddress)
self.nodes[i].addwitnessaddress(multiaddress)
p2sh_ids.append([])
wit_ids.append([])
for v in range(2):
p2sh_ids[i].append([])
wit_ids[i].append([])
for i in range(5):
for n in range(3):
for v in range(2):
wit_ids[n][v].append(send_to_witness(v, self.nodes[0], self.nodes[0].listunspent()[0], self.pubkey[n], False, Decimal("49.999")))
p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], self.nodes[0].listunspent()[0], self.pubkey[n], True, Decimal("49.999")))
self.nodes[0].generate(1) #block 161
sync_blocks(self.nodes)
# Make sure all nodes recognize the transactions as theirs
assert_equal(self.nodes[0].getbalance(), 60*50 - 60*50 + 20*Decimal("49.999") + 50)
assert_equal(self.nodes[1].getbalance(), 20*Decimal("49.999"))
assert_equal(self.nodes[2].getbalance(), 20*Decimal("49.999"))
self.nodes[0].generate(262) #block 423
sync_blocks(self.nodes)
print("Verify default node can't accept any witness format txs before fork")
# unsigned, no scriptsig
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], False)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], False)
# unsigned with redeem script
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], False, addlength(witness_script(0, self.pubkey[0])))
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], False, addlength(witness_script(1, self.pubkey[0])))
# signed
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True)
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True)
print("Verify witness txs are skipped for mining before the fork")
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) #block 426
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) #block 427
# TODO: An old node would see these txs without witnesses and be able to mine them
print("Verify unsigned bare witness txs in versionbits-setting blocks are valid before the fork")
self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][1], False) #block 428
self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][1], False) #block 429
print("Verify unsigned p2sh witness txs without a redeem script are invalid")
self.fail_accept(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][1], False)
self.fail_accept(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][1], False)
print("Verify unsigned p2sh witness txs with a redeem script in versionbits-settings blocks are valid before the fork")
self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][1], False, addlength(witness_script(0, self.pubkey[2]))) #block 430
self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][1], False, addlength(witness_script(1, self.pubkey[2]))) #block 431
print("Verify previous witness txs skipped for mining can now be mined")
assert_equal(len(self.nodes[2].getrawmempool()), 4)
block = self.nodes[2].generate(1) #block 432 (first block with new rules; 432 = 144 * 3)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[2].getrawmempool()), 0)
assert_equal(len(self.nodes[2].getblock(block[0])["tx"]), 5)
print("Verify witness txs without witness data are invalid after the fork")
self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][2], False)
self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][2], False)
self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][2], False, addlength(witness_script(0, self.pubkey[2])))
self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][2], False, addlength(witness_script(1, self.pubkey[2])))
print("Verify default node can now use witness txs")
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) #block 432
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) #block 433
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435
if __name__ == '__main__':
SegWitTest().main()
|
|
import bs
import bsUtils
import bsElimination
import bsBomb
import bsSpaz
import random
import math
class Map:
center = (0, 3, -4)
radius = 8
@classmethod
def inBounds(cls, pos):
dx, dy, dz = pos[0] - cls.center[0], pos[1] - cls.center[1], pos[2] - cls.center[2],
return cls.radius >= math.sqrt(dx**2 + dy**2 + dz**2)
class Crate(bsBomb.Bomb):
def __init__(self, position=(0, 1, 0), velocity=(0, 0, 0)):
self.position = position
bsBomb.Bomb.__init__(self, position, velocity,
bombType='tnt', blastRadius=0.0,
sourcePlayer=None, owner=None)
self.node.extraAcceleration = (0, -50, 0)
def handleMessage(self, m):
#if isinstance(m, bs.PickedUpMessage):
# self._heldBy = m.node
#elif isinstance(m, bs.DroppedMessage):
# bs.animate(self._powText, 'scale', {0:0.01, 500: 0.03})
# bs.gameTimer(500, bs.WeakCall(self.pow))
bsBomb.Bomb.handleMessage(self, m)
def explode(self):
pos = self.position
bs.gameTimer(100, bs.WeakCall(bs.getActivity().dropPowerup, pos))
bs.gameTimer(1, bs.WeakCall(self.handleMessage, bs.DieMessage()))
class Bomb(bsBomb.Bomb):
def explode(self):
if self._exploded:
return
self._exploded = True
size = int(self.blastRadius)
for mod in range(-size, size+1):
pos = self.node.position
posX = (pos[0] + mod*1.0, pos[1], pos[2])
posY = (pos[0], pos[1], pos[2] + mod*1.0)
if Map.inBounds(posX):
bs.gameTimer(abs(mod)*150, bs.Call(blast, posX, self.bombType, self.sourcePlayer, self.hitType, self.hitSubType))
if Map.inBounds(posY):
bs.gameTimer(abs(mod)*150, bs.Call(blast, posY, self.bombType, self.sourcePlayer, self.hitType, self.hitSubType))
bs.gameTimer(1, bs.WeakCall(self.handleMessage, bs.DieMessage()))
class Blast(bsBomb.Blast):
# all that code to reduce the camera shake effect
def __init__(self,position=(0,1,0),velocity=(0,0,0),blastRadius=2.0,blastType="normal",sourcePlayer=None,hitType='explosion',hitSubType='normal'):
"""
Instantiate with given values.
"""
bs.Actor.__init__(self)
factory = Bomb.getFactory()
self.blastType = blastType
self.sourcePlayer = sourcePlayer
self.hitType = hitType;
self.hitSubType = hitSubType;
# blast radius
self.radius = blastRadius
self.node = bs.newNode('region',
attrs={'position':(position[0],position[1]-0.1,position[2]), # move down a bit so we throw more stuff upward
'scale':(self.radius,self.radius,self.radius),
'type':'sphere',
'materials':(factory.blastMaterial,bs.getSharedObject('attackMaterial'))},
delegate=self)
bs.gameTimer(50,self.node.delete)
# throw in an explosion and flash
explosion = bs.newNode("explosion",
attrs={'position':position,
'velocity':(velocity[0],max(-1.0,velocity[1]),velocity[2]),
'radius':self.radius,
'big':(self.blastType == 'tnt')})
if self.blastType == "ice":
explosion.color = (0,0.05,0.4)
bs.gameTimer(1000,explosion.delete)
if self.blastType != 'ice': bs.emitBGDynamics(position=position,velocity=velocity,count=int(1.0+random.random()*4),emitType='tendrils',tendrilType='thinSmoke')
bs.emitBGDynamics(position=position,velocity=velocity,count=int(4.0+random.random()*4),emitType='tendrils',tendrilType='ice' if self.blastType == 'ice' else 'smoke')
bs.emitBGDynamics(position=position,emitType='distortion',spread=1.0 if self.blastType == 'tnt' else 2.0)
# and emit some shrapnel..
if self.blastType == 'ice':
def _doEmit():
bs.emitBGDynamics(position=position,velocity=velocity,count=30,spread=2.0,scale=0.4,chunkType='ice',emitType='stickers');
bs.gameTimer(50,_doEmit) # looks better if we delay a bit
elif self.blastType == 'sticky':
def _doEmit():
bs.emitBGDynamics(position=position,velocity=velocity,count=int(4.0+random.random()*8),spread=0.7,chunkType='slime');
bs.emitBGDynamics(position=position,velocity=velocity,count=int(4.0+random.random()*8),scale=0.5, spread=0.7,chunkType='slime');
bs.emitBGDynamics(position=position,velocity=velocity,count=15,scale=0.6,chunkType='slime',emitType='stickers');
bs.emitBGDynamics(position=position,velocity=velocity,count=20,scale=0.7,chunkType='spark',emitType='stickers');
bs.emitBGDynamics(position=position,velocity=velocity,count=int(6.0+random.random()*12),scale=0.8,spread=1.5,chunkType='spark');
bs.gameTimer(50,_doEmit) # looks better if we delay a bit
elif self.blastType == 'impact': # regular bomb shrapnel
def _doEmit():
bs.emitBGDynamics(position=position,velocity=velocity,count=int(4.0+random.random()*8),scale=0.8,chunkType='metal');
bs.emitBGDynamics(position=position,velocity=velocity,count=int(4.0+random.random()*8),scale=0.4,chunkType='metal');
bs.emitBGDynamics(position=position,velocity=velocity,count=20,scale=0.7,chunkType='spark',emitType='stickers');
bs.emitBGDynamics(position=position,velocity=velocity,count=int(8.0+random.random()*15),scale=0.8,spread=1.5,chunkType='spark');
bs.gameTimer(50,_doEmit) # looks better if we delay a bit
else: # regular or land mine bomb shrapnel
def _doEmit():
if self.blastType != 'tnt':
bs.emitBGDynamics(position=position,velocity=velocity,count=int(4.0+random.random()*8),chunkType='rock');
bs.emitBGDynamics(position=position,velocity=velocity,count=int(4.0+random.random()*8),scale=0.5,chunkType='rock');
bs.emitBGDynamics(position=position,velocity=velocity,count=30,scale=1.0 if self.blastType=='tnt' else 0.7,chunkType='spark',emitType='stickers');
bs.emitBGDynamics(position=position,velocity=velocity,count=int(18.0+random.random()*20),scale=1.0 if self.blastType == 'tnt' else 0.8,spread=1.5,chunkType='spark');
# tnt throws splintery chunks
if self.blastType == 'tnt':
def _emitSplinters():
bs.emitBGDynamics(position=position,velocity=velocity,count=int(20.0+random.random()*25),scale=0.8,spread=1.0,chunkType='splinter');
bs.gameTimer(10,_emitSplinters)
# every now and then do a sparky one
if self.blastType == 'tnt' or random.random() < 0.1:
def _emitExtraSparks():
bs.emitBGDynamics(position=position,velocity=velocity,count=int(10.0+random.random()*20),scale=0.8,spread=1.5,chunkType='spark');
bs.gameTimer(20,_emitExtraSparks)
bs.gameTimer(50,_doEmit) # looks better if we delay a bit
light = bs.newNode('light',
attrs={'position':position,
'color': (0.6,0.6,1.0) if self.blastType == 'ice' else (1,0.3,0.1),
'volumeIntensityScale': 10.0})
s = random.uniform(0.6,0.9)
scorchRadius = lightRadius = self.radius
if self.blastType == 'tnt':
lightRadius *= 1.4
scorchRadius *= 1.15
s *= 3.0
iScale = 1.6
bsUtils.animate(light,"intensity",{0:2.0*iScale, int(s*20):0.1*iScale, int(s*25):0.2*iScale, int(s*50):17.0*iScale, int(s*60):5.0*iScale, int(s*80):4.0*iScale, int(s*200):0.6*iScale, int(s*2000):0.00*iScale, int(s*3000):0.0})
bsUtils.animate(light,"radius",{0:lightRadius*0.2, int(s*50):lightRadius*0.55, int(s*100):lightRadius*0.3, int(s*300):lightRadius*0.15, int(s*1000):lightRadius*0.05})
bs.gameTimer(int(s*3000),light.delete)
# make a scorch that fades over time
scorch = bs.newNode('scorch',
attrs={'position':position,'size':scorchRadius*0.5,'big':(self.blastType == 'tnt')})
if self.blastType == 'ice':
scorch.color = (1,1,1.5)
bsUtils.animate(scorch,"presence",{3000:1, 13000:0})
bs.gameTimer(13000,scorch.delete)
if self.blastType == 'ice':
bs.playSound(factory.hissSound,position=light.position)
p = light.position
bs.playSound(factory.getRandomExplodeSound(),position=p)
bs.playSound(factory.debrisFallSound,position=p)
########
bs.shakeCamera(intensity=5.0 if self.blastType == 'tnt' else 0.05)
########
# tnt is more epic..
if self.blastType == 'tnt':
bs.playSound(factory.getRandomExplodeSound(),position=p)
def _extraBoom():
bs.playSound(factory.getRandomExplodeSound(),position=p)
bs.gameTimer(250,_extraBoom)
def _extraDebrisSound():
bs.playSound(factory.debrisFallSound,position=p)
bs.playSound(factory.woodDebrisFallSound,position=p)
bs.gameTimer(400,_extraDebrisSound)
def blast(pos, blastType, sourcePlayer, hitType, hitSubType):
Blast(position=pos, velocity=(0, 1, 0),
blastRadius=0.5,blastType=blastType,
sourcePlayer=sourcePlayer,hitType=hitType,
hitSubType=hitSubType).autoRetain()
class Player(bs.PlayerSpaz):
isDead = False
#def __init__(self, *args, **kwargs):
# super(self.__class__, self).init(*args, **kwargs)
# self.multiplyer = 0
def handleMessage(self, m):
if False:
pass
elif isinstance(m, bs.PowerupMessage):
if m.powerupType == 'punch':
self.blastRadius += 1.0
self.setScoreText("range up")
super(self.__class__, self).handleMessage(m)
else:
super(self.__class__, self).handleMessage(m)
def dropBomb(self):
"""
Tell the spaz to drop one of his bombs, and returns
the resulting bomb object.
If the spaz has no bombs or is otherwise unable to
drop a bomb, returns None.
"""
if (self.landMineCount <= 0 and self.bombCount <= 0) or self.frozen: return
p = self.node.positionForward
v = self.node.velocity
if self.landMineCount > 0:
droppingBomb = False
self.setLandMineCount(self.landMineCount-1)
bombType = 'landMine'
else:
droppingBomb = True
bombType = self.bombType
bomb = Bomb(position=(p[0],p[1] - 0.0,p[2]),
velocity=(v[0],v[1],v[2]),
bombType=bombType,
blastRadius=self.blastRadius,
sourcePlayer=self.sourcePlayer,
owner=self.node).autoRetain()
if droppingBomb:
self.bombCount -= 1
bomb.node.addDeathAction(bs.WeakCall(self.handleMessage,bsSpaz._BombDiedMessage()))
self._pickUp(bomb.node)
for c in self._droppedBombCallbacks: c(self,bomb)
return bomb
def bsGetAPIVersion():
return 4
def bsGetGames():
return [Bomberman]
class Bomberman(bs.TeamGameActivity):
@classmethod
def getName(cls):
return 'Bomberman'
@classmethod
def getScoreInfo(cls):
return {'scoreName':'Survived',
'scoreType':'seconds',
'scoreVersion':'B',
'noneIsWinner':True}
@classmethod
def getDescription(cls, sessionType):
return "Destroy crates and collect powerups"
def getInstanceDescription(self):
return 'Destroy crates and collect powerups'
@classmethod
def supportsSessionType(cls, sessionType):
return True if (issubclass(sessionType, bs.TeamsSession)
or issubclass(sessionType, bs.FreeForAllSession)) else False
@classmethod
def getSupportedMaps(cls, sessionType):
return ["Doom Shroom"]
@classmethod
def getSettings(cls, sessionType):
return [("Time Limit",{'choices':[('None',0),('1 Minute',60),('2 Minutes',120),
('5 Minutes',300)],'default':0}),
("Lives (0 = Unlimited)",{'minValue':0,'default':3,'increment':1}),
("Epic Mode",{'default':False})]
def __init__(self, settings):
bs.TeamGameActivity.__init__(self,settings)
if self.settings['Epic Mode']:
self._isSlowMotion = True
# print messages when players die (since its meaningful in this game)
self.announcePlayerDeaths = True
self._lastPlayerDeathTime = None
self._startGameTime = 1000
self.gridsize = (1.0, 1.0)
self.gridnum = (18, 18)
def onTransitionIn(self):
bs.TeamGameActivity.onTransitionIn(self, music='Epic' if self.settings['Epic Mode'] else 'Survival')
self._startGameTime = bs.getGameTime()
def onBegin(self):
bs.TeamGameActivity.onBegin(self)
self.setupStandardTimeLimit(self.settings['Time Limit'])
for x in range(self.gridnum[0]):
for y in range(self.gridnum[1]):
self.dropCrate(x, y)
def dropCrate(self, gridX, gridY):
pos = (Map.center[0] + self.gridsize[0]*gridX - self.gridnum[0]*self.gridsize[0]*0.5,
Map.center[1],
Map.center[2] + self.gridsize[1]*gridY - self.gridnum[1]*self.gridsize[1]*0.5)
#print('dropped crate @', pos)
if Map.inBounds(pos):
Crate(position=pos).autoRetain()
def dropPowerup(self, position):
powerupType = random.choice(["punch", "tripleBombs", "health"])
bs.Powerup(position=position, powerupType=powerupType, expire=False).autoRetain()
def onPlayerJoin(self, player):
self.spawnPlayer(player)
def onPlayerLeave(self, player):
bs.TeamGameActivity.onPlayerLeave(self, player)
# overriding the default character spawning..
def spawnPlayer(self, player):
if isinstance(self.getSession(), bs.TeamsSession):
position = self.getMap().getStartPosition(player.getTeam().getID())
else:
# otherwise do free-for-all spawn locations
position = self.getMap().getFFAStartPosition(self.players)
angle = None
#spaz = self.spawnPlayerSpaz(player)
# lets reconnect this player's controls to this
# spaz but *without* the ability to attack or pick stuff up
#spaz.connectControlsToPlayer(enablePunch=False,
# enableBomb=False,
# enablePickUp=False)
# also lets have them make some noise when they die..
#spaz.playBigDeathSound = True
name = player.getName()
lightColor = bsUtils.getNormalizedColor(player.color)
displayColor = bs.getSafeColor(player.color, targetIntensity=0.75)
spaz = Player(color=player.color,
highlight=player.highlight,
character=player.character,
player=player)
player.setActor(spaz)
# we want a bigger area-of-interest in co-op mode
# if isinstance(self.getSession(),bs.CoopSession): spaz.node.areaOfInterestRadius = 5.0
# else: spaz.node.areaOfInterestRadius = 5.0
# if this is co-op and we're on Courtyard or Runaround, add the material that allows us to
# collide with the player-walls
# FIXME; need to generalize this
if isinstance(self.getSession(), bs.CoopSession) and self.getMap().getName() in ['Courtyard', 'Tower D']:
mat = self.getMap().preloadData['collideWithWallMaterial']
spaz.node.materials += (mat,)
spaz.node.rollerMaterials += (mat,)
spaz.node.name = name
spaz.node.nameColor = displayColor
spaz.connectControlsToPlayer( enableJump=True, enablePunch=True, enablePickUp=False, enableBomb=True, enableRun=True, enableFly=False)
self.scoreSet.playerGotNewSpaz(player,spaz)
# move to the stand position and add a flash of light
spaz.handleMessage(bs.StandMessage(position,angle if angle is not None else random.uniform(0, 360)))
t = bs.getGameTime()
bs.playSound(self._spawnSound, 1, position=spaz.node.position)
light = bs.newNode('light', attrs={'color': lightColor})
spaz.node.connectAttr('position', light, 'position')
bsUtils.animate(light, 'intensity', {0:0, 250:1, 500:0})
bs.gameTimer(500, light.delete)
# various high-level game events come through this method
def handleMessage(self,m):
if isinstance(m, bs.PlayerSpazDeathMessage):
bs.TeamGameActivity.handleMessage(self, m) # augment standard behavior
player = m.spaz.getPlayer()
player.gameData["survivalSeconds"] = bs.getGameTime()
if len(self._getLivingTeams()) < 2:
self._roundEndTimer = bs.Timer(1000, self.endGame)
else:
# default handler:
super(self.__class__, self).handleMessage(m)#bs.TeamGameActivity.handleMessage(self,m)
def endGame(self):
curTime = bs.getGameTime()
# mark 'death-time' as now for any still-living players
# and award players points for how long they lasted.
# (these per-player scores are only meaningful in team-games)
for team in self.teams:
for player in team.players:
# throw an extra fudge factor +1 in so teams that
# didn't die come out ahead of teams that did
if 'survivalSeconds' in player.gameData:
score = player.gameData['survivalSeconds']
elif 'survivalSeconds' in team.gameData:
score = team.gameData['survivalSeconds']
else:
score = (curTime - self._startGameTime)/1000 + 1
#if 'survivalSeconds' not in player.gameData:
# player.gameData['survivalSeconds'] = (curTime - self._startGameTime)/1000 + 1
# print('extraBonusSwag for player')
# award a per-player score depending on how many seconds they lasted
# (per-player scores only affect teams mode; everywhere else just looks at the per-team score)
#score = (player.gameData['survivalSeconds'])
self.scoreSet.playerScored(player, score, screenMessage=False)
# ok now calc game results: set a score for each team and then tell the game to end
results = bs.TeamGameResults()
# remember that 'free-for-all' mode is simply a special form of 'teams' mode
# where each player gets their own team, so we can just always deal in teams
# and have all cases covered
for team in self.teams:
# set the team score to the max time survived by any player on that team
longestLife = 0
for player in team.players:
if 'survivalSeconds' in player.gameData:
time = player.gameData['survivalSeconds']
elif 'survivalSeconds' in team.gameData:
time = team.gameData['survivalSeconds']
else:
time = (curTime - self._startGameTime)/1000 + 1
longestLife = max(longestLife, time)
results.setTeamScore(team, longestLife)
self.end(results=results)
def _getLivingTeams(self):
return [team for team in self.teams if len(team.players) > 0 and any('survivalSeconds' not in player.gameData for player in team.players)]
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GTFlow Estimator definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.boosted_trees.estimator_batch import model
from tensorflow.contrib.boosted_trees.python.utils import losses
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.python.estimator.canned import head as core_head_lib
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import losses as core_losses
# ================== Old estimator interface===================================
# The estimators below were designed for old feature columns and old estimator
# interface. They can be used with new feature columns and losses by setting
# use_core_libs = True.
class GradientBoostedDecisionTreeClassifier(estimator.Estimator):
"""An estimator using gradient boosted decision trees."""
def __init__(self,
learner_config,
examples_per_layer,
n_classes=2,
num_trees=None,
feature_columns=None,
weight_column_name=None,
model_dir=None,
config=None,
label_keys=None,
feature_engineering_fn=None,
logits_modifier_function=None,
center_bias=True,
use_core_libs=False,
output_leaf_index=False,
override_global_step_value=None,
num_quantiles=100):
"""Initializes a GradientBoostedDecisionTreeClassifier estimator instance.
Args:
learner_config: A config for the learner.
examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
n_classes: Number of classes in the classification.
num_trees: An int, number of trees to build.
feature_columns: A list of feature columns.
weight_column_name: Name of the column for weights, or None if not
weighted.
model_dir: Directory for model exports, etc.
config: `RunConfig` object to configure the runtime settings.
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
logits_modifier_function: A modifier function for the logits.
center_bias: Whether a separate tree should be created for first fitting
the bias.
use_core_libs: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
output_leaf_index: whether to output leaf indices along with predictions
during inference. The leaf node indexes are available in predictions
dict by the key 'leaf_index'. It is a Tensor of rank 2 and its shape is
[batch_size, num_trees].
For example,
result_iter = classifier.predict(...)
for result_dict in result_iter:
# access leaf index list by result_dict["leaf_index"]
# which contains one leaf index per tree
override_global_step_value: If after the training is done, global step
value must be reset to this value. This should be used to reset global
step to a number > number of steps used to train the current ensemble.
For example, the usual way is to train a number of trees and set a very
large number of training steps. When the training is done (number of
trees were trained), this parameter can be used to set the global step
to a large value, making it look like that number of training steps ran.
If None, no override of global step will happen.
num_quantiles: Number of quantiles to build for numeric feature values.
Raises:
ValueError: If learner_config is not valid.
"""
if n_classes > 2:
# For multi-class classification, use our loss implementation that
# supports second order derivative.
def loss_fn(labels, logits, weights=None):
result = losses.per_example_maxent_loss(
labels=labels,
logits=logits,
weights=weights,
num_classes=n_classes)
return math_ops.reduce_mean(result[0])
else:
loss_fn = None
head = head_lib.multi_class_head(
n_classes=n_classes,
weight_column_name=weight_column_name,
enable_centered_bias=False,
loss_fn=loss_fn,
label_keys=label_keys)
if learner_config.num_classes == 0:
learner_config.num_classes = n_classes
elif learner_config.num_classes != n_classes:
raise ValueError("n_classes (%d) doesn't match learner_config (%d)." %
(learner_config.num_classes, n_classes))
super(GradientBoostedDecisionTreeClassifier, self).__init__(
model_fn=model.model_builder,
params={
'head': head,
'feature_columns': feature_columns,
'learner_config': learner_config,
'num_trees': num_trees,
'weight_column_name': weight_column_name,
'examples_per_layer': examples_per_layer,
'center_bias': center_bias,
'logits_modifier_function': logits_modifier_function,
'use_core_libs': use_core_libs,
'output_leaf_index': output_leaf_index,
'override_global_step_value': override_global_step_value,
'num_quantiles': num_quantiles,
},
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class GradientBoostedDecisionTreeRegressor(estimator.Estimator):
"""An estimator using gradient boosted decision trees."""
def __init__(self,
learner_config,
examples_per_layer,
label_dimension=1,
num_trees=None,
feature_columns=None,
label_name=None,
weight_column_name=None,
model_dir=None,
config=None,
feature_engineering_fn=None,
logits_modifier_function=None,
center_bias=True,
use_core_libs=False,
output_leaf_index=False,
override_global_step_value=None,
num_quantiles=100):
"""Initializes a GradientBoostedDecisionTreeRegressor estimator instance.
Args:
learner_config: A config for the learner.
examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
num_trees: An int, number of trees to build.
feature_columns: A list of feature columns.
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: Name of the column for weights, or None if not
weighted.
model_dir: Directory for model exports, etc.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
logits_modifier_function: A modifier function for the logits.
center_bias: Whether a separate tree should be created for first fitting
the bias.
use_core_libs: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
output_leaf_index: whether to output leaf indices along with predictions
during inference. The leaf node indexes are available in predictions
dict by the key 'leaf_index'. For example,
result_dict = classifier.predict(...)
for example_prediction_result in result_dict:
# access leaf index list by example_prediction_result["leaf_index"]
# which contains one leaf index per tree
override_global_step_value: If after the training is done, global step
value must be reset to this value. This should be used to reset global
step to a number > number of steps used to train the current ensemble.
For example, the usual way is to train a number of trees and set a very
large number of training steps. When the training is done (number of
trees were trained), this parameter can be used to set the global step
to a large value, making it look like that number of training steps ran.
If None, no override of global step will happen.
num_quantiles: Number of quantiles to build for numeric feature values.
"""
head = head_lib.regression_head(
label_name=label_name,
label_dimension=label_dimension,
weight_column_name=weight_column_name,
enable_centered_bias=False)
if label_dimension == 1:
learner_config.num_classes = 2
else:
learner_config.num_classes = label_dimension
super(GradientBoostedDecisionTreeRegressor, self).__init__(
model_fn=model.model_builder,
params={
'head': head,
'feature_columns': feature_columns,
'learner_config': learner_config,
'num_trees': num_trees,
'weight_column_name': weight_column_name,
'examples_per_layer': examples_per_layer,
'logits_modifier_function': logits_modifier_function,
'center_bias': center_bias,
'use_core_libs': use_core_libs,
'output_leaf_index': False,
'override_global_step_value': override_global_step_value,
'num_quantiles': num_quantiles,
},
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class GradientBoostedDecisionTreeEstimator(estimator.Estimator):
"""An estimator using gradient boosted decision trees.
Useful for training with user specified `Head`.
"""
def __init__(self,
learner_config,
examples_per_layer,
head,
num_trees=None,
feature_columns=None,
weight_column_name=None,
model_dir=None,
config=None,
feature_engineering_fn=None,
logits_modifier_function=None,
center_bias=True,
use_core_libs=False,
output_leaf_index=False,
override_global_step_value=None,
num_quantiles=100):
"""Initializes a GradientBoostedDecisionTreeEstimator estimator instance.
Args:
learner_config: A config for the learner.
examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
head: `Head` instance.
num_trees: An int, number of trees to build.
feature_columns: A list of feature columns.
weight_column_name: Name of the column for weights, or None if not
weighted.
model_dir: Directory for model exports, etc.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
logits_modifier_function: A modifier function for the logits.
center_bias: Whether a separate tree should be created for first fitting
the bias.
use_core_libs: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
output_leaf_index: whether to output leaf indices along with predictions
during inference. The leaf node indexes are available in predictions
dict by the key 'leaf_index'. For example,
result_dict = classifier.predict(...)
for example_prediction_result in result_dict:
# access leaf index list by example_prediction_result["leaf_index"]
# which contains one leaf index per tree
override_global_step_value: If after the training is done, global step
value must be reset to this value. This should be used to reset global
step to a number > number of steps used to train the current ensemble.
For example, the usual way is to train a number of trees and set a very
large number of training steps. When the training is done (number of
trees were trained), this parameter can be used to set the global step
to a large value, making it look like that number of training steps ran.
If None, no override of global step will happen.
num_quantiles: Number of quantiles to build for numeric feature values.
"""
super(GradientBoostedDecisionTreeEstimator, self).__init__(
model_fn=model.model_builder,
params={
'head': head,
'feature_columns': feature_columns,
'learner_config': learner_config,
'num_trees': num_trees,
'weight_column_name': weight_column_name,
'examples_per_layer': examples_per_layer,
'logits_modifier_function': logits_modifier_function,
'center_bias': center_bias,
'use_core_libs': use_core_libs,
'output_leaf_index': False,
'override_global_step_value': override_global_step_value,
'num_quantiles': num_quantiles,
},
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class GradientBoostedDecisionTreeRanker(estimator.Estimator):
"""A ranking estimator using gradient boosted decision trees."""
def __init__(self,
learner_config,
examples_per_layer,
head,
ranking_model_pair_keys,
num_trees=None,
feature_columns=None,
weight_column_name=None,
model_dir=None,
config=None,
label_keys=None,
feature_engineering_fn=None,
logits_modifier_function=None,
center_bias=False,
use_core_libs=False,
output_leaf_index=False,
override_global_step_value=None,
num_quantiles=100):
"""Initializes a GradientBoostedDecisionTreeRanker instance.
This is an estimator that can be trained off the pairwise data and can be
used for inference on non-paired data. This is essentially LambdaMart.
Args:
learner_config: A config for the learner.
examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
head: `Head` instance.
ranking_model_pair_keys: Keys to distinguish between features
for left and right part of the training pairs for ranking. For example,
for an Example with features "a.f1" and "b.f1", the keys would be
("a", "b").
num_trees: An int, number of trees to build.
feature_columns: A list of feature columns.
weight_column_name: Name of the column for weights, or None if not
weighted.
model_dir: Directory for model exports, etc.
config: `RunConfig` object to configure the runtime settings.
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
logits_modifier_function: A modifier function for the logits.
center_bias: Whether a separate tree should be created for first fitting
the bias.
use_core_libs: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
output_leaf_index: whether to output leaf indices along with predictions
during inference. The leaf node indexes are available in predictions
dict by the key 'leaf_index'. It is a Tensor of rank 2 and its shape is
[batch_size, num_trees].
For example,
result_iter = classifier.predict(...)
for result_dict in result_iter:
# access leaf index list by result_dict["leaf_index"]
# which contains one leaf index per tree
override_global_step_value: If after the training is done, global step
value must be reset to this value. This should be used to reset global
step to a number > number of steps used to train the current ensemble.
For example, the usual way is to train a number of trees and set a very
large number of training steps. When the training is done (number of
trees were trained), this parameter can be used to set the global step
to a large value, making it look like that number of training steps ran.
If None, no override of global step will happen.
num_quantiles: Number of quantiles to build for numeric feature values.
Raises:
ValueError: If learner_config is not valid.
"""
super(GradientBoostedDecisionTreeRanker, self).__init__(
model_fn=model.ranking_model_builder,
params={
'head': head,
'n_classes': 2,
'feature_columns': feature_columns,
'learner_config': learner_config,
'num_trees': num_trees,
'weight_column_name': weight_column_name,
'examples_per_layer': examples_per_layer,
'center_bias': center_bias,
'logits_modifier_function': logits_modifier_function,
'use_core_libs': use_core_libs,
'output_leaf_index': output_leaf_index,
'ranking_model_pair_keys': ranking_model_pair_keys,
'override_global_step_value': override_global_step_value,
'num_quantiles': num_quantiles,
},
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
# ================== New Estimator interface===================================
# The estimators below use new core Estimator interface and must be used with
# new feature columns and heads.
# For multiclass classification, use the following head since it uses loss
# that is twice differentiable.
def core_multiclass_head(n_classes):
"""Core head for multiclass problems."""
def loss_fn(labels, logits):
result = losses.per_example_maxent_loss(
labels=labels, logits=logits, weights=None, num_classes=n_classes)
return result[0]
# pylint:disable=protected-access
head_fn = core_head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=n_classes,
loss_fn=loss_fn,
loss_reduction=core_losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
# pylint:enable=protected-access
return head_fn
class CoreGradientBoostedDecisionTreeEstimator(core_estimator.Estimator):
"""An estimator using gradient boosted decision trees.
Useful for training with user specified `Head`.
"""
def __init__(self,
learner_config,
examples_per_layer,
head,
num_trees=None,
feature_columns=None,
weight_column_name=None,
model_dir=None,
config=None,
label_keys=None,
feature_engineering_fn=None,
logits_modifier_function=None,
center_bias=True,
output_leaf_index=False,
num_quantiles=100):
"""Initializes a core version of GradientBoostedDecisionTreeEstimator.
Args:
learner_config: A config for the learner.
examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
head: `Head` instance.
num_trees: An int, number of trees to build.
feature_columns: A list of feature columns.
weight_column_name: Name of the column for weights, or None if not
weighted.
model_dir: Directory for model exports, etc.
config: `RunConfig` object to configure the runtime settings.
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
logits_modifier_function: A modifier function for the logits.
center_bias: Whether a separate tree should be created for first fitting
the bias.
output_leaf_index: whether to output leaf indices along with predictions
during inference. The leaf node indexes are available in predictions
dict by the key 'leaf_index'. For example,
result_dict = classifier.predict(...)
for example_prediction_result in result_dict:
# access leaf index list by example_prediction_result["leaf_index"]
# which contains one leaf index per tree
num_quantiles: Number of quantiles to build for numeric feature values.
"""
def _model_fn(features, labels, mode, config):
return model.model_builder(
features=features,
labels=labels,
mode=mode,
config=config,
params={
'head': head,
'feature_columns': feature_columns,
'learner_config': learner_config,
'num_trees': num_trees,
'weight_column_name': weight_column_name,
'examples_per_layer': examples_per_layer,
'center_bias': center_bias,
'logits_modifier_function': logits_modifier_function,
'use_core_libs': True,
'output_leaf_index': output_leaf_index,
'override_global_step_value': None,
'num_quantiles': num_quantiles,
},
output_type=model.ModelBuilderOutputType.ESTIMATOR_SPEC)
super(CoreGradientBoostedDecisionTreeEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
class CoreGradientBoostedDecisionTreeRanker(core_estimator.Estimator):
"""A ranking estimator using gradient boosted decision trees."""
def __init__(self,
learner_config,
examples_per_layer,
head,
ranking_model_pair_keys,
num_trees=None,
feature_columns=None,
weight_column_name=None,
model_dir=None,
config=None,
label_keys=None,
logits_modifier_function=None,
center_bias=False,
output_leaf_index=False,
num_quantiles=100):
"""Initializes a GradientBoostedDecisionTreeRanker instance.
This is an estimator that can be trained off the pairwise data and can be
used for inference on non-paired data. This is essentially LambdaMart.
Args:
learner_config: A config for the learner.
examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
head: `Head` instance.
ranking_model_pair_keys: Keys to distinguish between features
for left and right part of the training pairs for ranking. For example,
for an Example with features "a.f1" and "b.f1", the keys would be
("a", "b").
num_trees: An int, number of trees to build.
feature_columns: A list of feature columns.
weight_column_name: Name of the column for weights, or None if not
weighted.
model_dir: Directory for model exports, etc.
config: `RunConfig` object to configure the runtime settings.
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
logits_modifier_function: A modifier function for the logits.
center_bias: Whether a separate tree should be created for first fitting
the bias.
output_leaf_index: whether to output leaf indices along with predictions
during inference. The leaf node indexes are available in predictions
dict by the key 'leaf_index'. It is a Tensor of rank 2 and its shape is
[batch_size, num_trees].
For example,
result_iter = classifier.predict(...)
for result_dict in result_iter:
# access leaf index list by result_dict["leaf_index"]
# which contains one leaf index per tree
num_quantiles: Number of quantiles to build for numeric feature values.
Raises:
ValueError: If learner_config is not valid.
"""
def _model_fn(features, labels, mode, config):
return model.ranking_model_builder(
features=features,
labels=labels,
mode=mode,
config=config,
params={
'head': head,
'n_classes': 2,
'feature_columns': feature_columns,
'learner_config': learner_config,
'num_trees': num_trees,
'weight_column_name': weight_column_name,
'examples_per_layer': examples_per_layer,
'center_bias': center_bias,
'logits_modifier_function': logits_modifier_function,
'use_core_libs': True,
'output_leaf_index': output_leaf_index,
'ranking_model_pair_keys': ranking_model_pair_keys,
'override_global_step_value': None,
'num_quantiles': num_quantiles,
},
output_type=model.ModelBuilderOutputType.ESTIMATOR_SPEC)
super(CoreGradientBoostedDecisionTreeRanker, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
|
|
import sys
import json
import uuid
import logging
from testtools.matchers import Equals, Contains, Not
from testtools import content, content_type, ExpectedException
import webtest.app
from vnc_api.vnc_api import *
sys.path.append('../common/tests')
from test_utils import *
import test_common
import test_case
logger = logging.getLogger(__name__)
class TestStrictCompOn(test_case.NeutronBackendTestCase):
@classmethod
def setUpClass(cls):
super(TestStrictCompOn, cls).setUpClass(
extra_config_knobs=[('NEUTRON', 'strict_compliance', True)])
#end setUpClass
def _create_resource(self, res_type, proj_id, name=None, extra_res_fields=None):
context = {'operation': 'CREATE',
'user_id': '',
'is_admin': False,
'roles': '',
'tenant_id': proj_id}
if name:
res_name = name
else:
res_name = '%s-%s' %(res_type, str(uuid.uuid4()))
data = {'resource': {'name': res_name,
'tenant_id': proj_id}}
if extra_res_fields:
data['resource'].update(extra_res_fields)
body = {'context': context, 'data': data}
resp = self._api_svr_app.post_json('/neutron/%s' %(res_type), body)
res_q = json.loads(resp.text)
return res_name, res_q
# end _create_resource
def _create_floatingip_and_associate_port_without_ext_gw(self, proj_id, name=None):
#external network
net_name, net_q = self._create_resource('network', proj_id, extra_res_fields={'router:external':True})
subnet_name, subnet_q = self._create_resource('subnet', proj_id, name, extra_res_fields={'network_id': net_q['id'], 'cidr': '10.2.0.0/24', 'ip_version': 4})
#private network
pvt_net_name, pvt_net_q = self._create_resource('network', proj_id)
pvt_subnet_name, pvt_subnet_q = self._create_resource('subnet', proj_id, name, extra_res_fields={'network_id': pvt_net_q['id'], 'cidr': '10.1.0.0/24', 'ip_version': 4})
port_name, port_q = self._create_resource('port', proj_id, name, extra_res_fields={'network_id': pvt_subnet_q['network_id']})
return self._create_resource('floatingip', proj_id, name, extra_res_fields={'floating_network_id': net_q['id'], 'port_id':port_q['id']})
def _create_floatingip_and_associate_port_with_ext_gw(self, proj_id, name=None):
#external network
net_name, net_q = self._create_resource('network', proj_id, extra_res_fields={'router:external':True})
subnet_name, subnet_q = self._create_resource('subnet', proj_id, name, extra_res_fields={'network_id': net_q['id'], 'cidr': '10.2.0.0/24', 'ip_version': 4})
router_name, router_q = self._create_resource('router',proj_id, name)
#private network
pvt_net_name, pvt_net_q = self._create_resource('network', proj_id)
pvt_subnet_name, pvt_subnet_q = self._create_resource('subnet', proj_id, name, extra_res_fields={'network_id': pvt_net_q['id'], 'cidr': '10.1.0.0/24', 'ip_version': 4})
port_name, port_q = self._create_resource('port', proj_id, name, extra_res_fields={'network_id': pvt_subnet_q['network_id']})
port2_name, port2_q = self._create_resource('port', proj_id, name, extra_res_fields={'network_id': pvt_subnet_q['network_id']})
#External gateway
router_name, router_q = self._update_resource('router', router_q['id'], proj_id, name, extra_res_fields={'external_gateway_info': {'network_id':net_q['id']}})
router_name, router_q = self._add_router_interface('router', router_q['id'], proj_id, name, extra_res_fields={'port_id':port2_q['id']})
return self._create_resource('floatingip', proj_id, name, extra_res_fields={'floating_network_id': net_q['id'], 'port_id':port_q['id']})
def _update_resource(self, res_type, res_id, proj_id, name=None, extra_res_fields=None):
context = {'operation': 'UPDATE',
'user_id': '',
'is_admin': False,
'roles': '',
'tenant_id': proj_id}
if name:
res_name = name
else:
res_name = '%s-%s' %(res_type, str(uuid.uuid4()))
data = {'resource': {'name': res_name,
'tenant_id': proj_id},
'id': res_id}
if extra_res_fields:
data['resource'].update(extra_res_fields)
body = {'context': context, 'data': data}
resp = self._api_svr_app.post_json('/neutron/%s' %(res_type), body)
res_q = json.loads(resp.text)
return res_name, res_q
# end _update_resource
def _add_router_interface(self, res_type, res_id, proj_id, name=None, extra_res_fields=None):
context = {'operation': 'ADDINTERFACE',
'user_id': '',
'is_admin': False,
'roles': '',
'tenant_id': proj_id}
if name:
res_name = name
else:
res_name = '%s-%s' %(res_type, str(uuid.uuid4()))
data = {'resource': {'name': res_name,
'tenant_id': proj_id},
'id': res_id}
if extra_res_fields:
data['resource'].update(extra_res_fields)
body = {'context': context, 'data': data}
resp = self._api_svr_app.post_json('/neutron/%s' %(res_type), body)
res_q = json.loads(resp.text)
return res_name, res_q
# end _update_resource
#test when strict_compliance is ON
def test_create_fip_and_associate_port_without_ext_gw(self):
proj_obj = self._vnc_lib.project_read(fq_name=['default-domain', 'default-project'])
for res_type in ['security_group']:
res_name, res_q = getattr(self, '_create_' + res_type, lambda x:self._create_resource(res_type, x))(proj_obj.uuid)
res_list = self._list_resources(res_type, tenant_id=proj_obj.uuid, name=res_name)
with ExpectedException(webtest.app.AppError):
self._create_floatingip_and_associate_port_without_ext_gw(proj_obj.uuid)
#test when strict_compliance is ON
def test_create_fip_and_associate_port_with_ext_gw(self):
proj_obj = self._vnc_lib.project_read(fq_name=['default-domain', 'default-project'])
for res_type in ['security_group']:
res_name, res_q = getattr(self, '_create_' + res_type, lambda x:self._create_resource(res_type, x))(proj_obj.uuid)
res_list = self._list_resources(res_type, tenant_id=proj_obj.uuid, name=res_name)
self._create_floatingip_and_associate_port_with_ext_gw(proj_obj.uuid)
def _list_resources(self, res_type, fields=None, tenant_id=None, name=None):
context = {'operation': 'READALL',
'userid': '',
'roles': '',
'is_admin': False,
'tenant': tenant_id,
'tenant_id': tenant_id}
data = {'filters':{}, 'fields': None}
if name:
data.update({'filters': {'name': [name]}})
if fields:
data.update({'fields': fields})
body = {'context': context, 'data': data}
resp = self._api_svr_app.post_json('/neutron/%s' %(res_type), body)
res_q = json.loads(resp.text)
return res_q
# end _list_resources
# end class TestStrictCompON
class TestStrictCompOff(test_case.NeutronBackendTestCase):
@classmethod
def setUpClass(cls):
super(TestStrictCompOff, cls).setUpClass(
extra_config_knobs=[('NEUTRON', 'strict_compliance', False)])
#end setUpClass
def _create_resource(self, res_type, proj_id, name=None, extra_res_fields=None):
context = {'operation': 'CREATE',
'user_id': '',
'is_admin': False,
'roles': '',
'tenant_id': proj_id}
if name:
res_name = name
else:
res_name = '%s-%s' %(res_type, str(uuid.uuid4()))
data = {'resource': {'name': res_name,
'tenant_id': proj_id}}
if extra_res_fields:
data['resource'].update(extra_res_fields)
body = {'context': context, 'data': data}
resp = self._api_svr_app.post_json('/neutron/%s' %(res_type), body)
res_q = json.loads(resp.text)
return res_name, res_q
# end _create_resource
def _create_floatingip_and_associate_port_without_ext_gw(self, proj_id, name=None):
#external network
net_name, net_q = self._create_resource('network', proj_id, extra_res_fields={'router:external':True})
subnet_name, subnet_q = self._create_resource('subnet', proj_id, name, extra_res_fields={'network_id': net_q['id'], 'cidr': '10.2.0.0/24', 'ip_version': 4})
#private network
pvt_net_name, pvt_net_q = self._create_resource('network', proj_id)
pvt_subnet_name, pvt_subnet_q = self._create_resource('subnet', proj_id, name, extra_res_fields={'network_id': pvt_net_q['id'], 'cidr': '10.1.0.0/24', 'ip_version': 4})
port_name, port_q = self._create_resource('port', proj_id, name, extra_res_fields={'network_id': pvt_subnet_q['network_id']})
return self._create_resource('floatingip', proj_id, name, extra_res_fields={'floating_network_id': net_q['id'], 'port_id':port_q['id']})
#test when strict_compliance is OFF
def test_create_fip_and_associate_port_without_ext_gw(self):
proj_obj = self._vnc_lib.project_read(fq_name=['default-domain', 'default-project'])
for res_type in ['security_group']:
res_name, res_q = getattr(self, '_create_' + res_type, lambda x:self._create_resource(res_type, x))(proj_obj.uuid)
res_list = self._list_resources(res_type, tenant_id=proj_obj.uuid, name=res_name)
self._create_floatingip_and_associate_port_without_ext_gw(proj_obj.uuid)
def _list_resources(self, res_type, fields=None, tenant_id=None, name=None):
context = {'operation': 'READALL',
'userid': '',
'roles': '',
'is_admin': False,
'tenant': tenant_id,
'tenant_id': tenant_id}
data = {'filters':{}, 'fields': None}
if name:
data.update({'filters': {'name': [name]}})
if fields:
data.update({'fields': fields})
body = {'context': context, 'data': data}
resp = self._api_svr_app.post_json('/neutron/%s' %(res_type), body)
res_q = json.loads(resp.text)
return res_q
# end _list_resources
# end class TestStrictCompOFF
|
|
"""Support to interact with a Music Player Daemon."""
from contextlib import suppress
from datetime import timedelta
import hashlib
import logging
import os
import mpd
from mpd.asyncio import MPDClient
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
REPEAT_MODE_ALL,
REPEAT_MODE_OFF,
REPEAT_MODE_ONE,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_REPEAT_SET,
SUPPORT_SEEK,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "MPD"
DEFAULT_PORT = 6600
PLAYLIST_UPDATE_INTERVAL = timedelta(seconds=120)
SUPPORT_MPD = (
SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_PLAY_MEDIA
| SUPPORT_PLAY
| SUPPORT_CLEAR_PLAYLIST
| SUPPORT_REPEAT_SET
| SUPPORT_SHUFFLE_SET
| SUPPORT_SEEK
| SUPPORT_STOP
| SUPPORT_TURN_OFF
| SUPPORT_TURN_ON
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the MPD platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
password = config.get(CONF_PASSWORD)
entity = MpdDevice(host, port, password, name)
async_add_entities([entity], True)
class MpdDevice(MediaPlayerEntity):
"""Representation of a MPD server."""
# pylint: disable=no-member
def __init__(self, server, port, password, name):
"""Initialize the MPD device."""
self.server = server
self.port = port
self._name = name
self.password = password
self._status = None
self._currentsong = None
self._playlists = None
self._currentplaylist = None
self._is_connected = False
self._muted = False
self._muted_volume = None
self._media_position_updated_at = None
self._media_position = None
self._commands = None
# set up MPD client
self._client = MPDClient()
self._client.timeout = 30
self._client.idletimeout = None
async def _connect(self):
"""Connect to MPD."""
try:
await self._client.connect(self.server, self.port)
if self.password is not None:
await self._client.password(self.password)
except mpd.ConnectionError:
return
self._is_connected = True
def _disconnect(self):
"""Disconnect from MPD."""
with suppress(mpd.ConnectionError):
self._client.disconnect()
self._is_connected = False
self._status = None
async def _fetch_status(self):
"""Fetch status from MPD."""
self._status = await self._client.status()
self._currentsong = await self._client.currentsong()
position = self._status.get("elapsed")
if position is None:
position = self._status.get("time")
if isinstance(position, str) and ":" in position:
position = position.split(":")[0]
if position is not None and self._media_position != position:
self._media_position_updated_at = dt_util.utcnow()
self._media_position = int(float(position))
await self._update_playlists()
@property
def available(self):
"""Return true if MPD is available and connected."""
return self._is_connected
async def async_update(self):
"""Get the latest data and update the state."""
try:
if not self._is_connected:
await self._connect()
self._commands = list(await self._client.commands())
await self._fetch_status()
except (mpd.ConnectionError, OSError, ValueError) as error:
# Cleanly disconnect in case connection is not in valid state
_LOGGER.debug("Error updating status: %s", error)
self._disconnect()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the media state."""
if self._status is None:
return STATE_OFF
if self._status["state"] == "play":
return STATE_PLAYING
if self._status["state"] == "pause":
return STATE_PAUSED
if self._status["state"] == "stop":
return STATE_OFF
return STATE_OFF
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return self._currentsong.get("file")
@property
def media_content_type(self):
"""Return the content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
# Time does not exist for streams
return self._currentsong.get("time")
@property
def media_position(self):
"""Position of current playing media in seconds.
This is returned as part of the mpd status rather than in the details
of the current song.
"""
return self._media_position
@property
def media_position_updated_at(self):
"""Last valid time of media position."""
return self._media_position_updated_at
@property
def media_title(self):
"""Return the title of current playing media."""
name = self._currentsong.get("name", None)
title = self._currentsong.get("title", None)
file_name = self._currentsong.get("file", None)
if name is None and title is None:
if file_name is None:
return "None"
return os.path.basename(file_name)
if name is None:
return title
if title is None:
return name
return f"{name}: {title}"
@property
def media_artist(self):
"""Return the artist of current playing media (Music track only)."""
return self._currentsong.get("artist")
@property
def media_album_name(self):
"""Return the album of current playing media (Music track only)."""
return self._currentsong.get("album")
@property
def media_image_hash(self):
"""Hash value for media image."""
file = self._currentsong.get("file")
if file:
return hashlib.sha256(file.encode("utf-8")).hexdigest()[:16]
return None
async def async_get_media_image(self):
"""Fetch media image of current playing track."""
file = self._currentsong.get("file")
if not file:
return None, None
# not all MPD implementations and versions support the `albumart` and `fetchpicture` commands
can_albumart = "albumart" in self._commands
can_readpicture = "readpicture" in self._commands
response = None
# read artwork embedded into the media file
if can_readpicture:
try:
response = await self._client.readpicture(file)
except mpd.CommandError as error:
if error.errno is not mpd.FailureResponseCode.NO_EXIST:
_LOGGER.warning(
"Retrieving artwork through `readpicture` command failed: %s",
error,
)
# read artwork contained in the media directory (cover.{jpg,png,tiff,bmp}) if none is embedded
if can_albumart and not response:
try:
response = await self._client.albumart(file)
except mpd.CommandError as error:
if error.errno is not mpd.FailureResponseCode.NO_EXIST:
_LOGGER.warning(
"Retrieving artwork through `albumart` command failed: %s",
error,
)
if not response:
return None, None
image = bytes(response.get("binary"))
mime = response.get(
"type", "image/png"
) # readpicture has type, albumart does not
return (image, mime)
@property
def volume_level(self):
"""Return the volume level."""
if "volume" in self._status:
return int(self._status["volume"]) / 100
return None
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self._status is None:
return 0
supported = SUPPORT_MPD
if "volume" in self._status:
supported |= SUPPORT_VOLUME_SET | SUPPORT_VOLUME_STEP | SUPPORT_VOLUME_MUTE
if self._playlists is not None:
supported |= SUPPORT_SELECT_SOURCE
return supported
@property
def source(self):
"""Name of the current input source."""
return self._currentplaylist
@property
def source_list(self):
"""Return the list of available input sources."""
return self._playlists
async def async_select_source(self, source):
"""Choose a different available playlist and play it."""
await self.async_play_media(MEDIA_TYPE_PLAYLIST, source)
@Throttle(PLAYLIST_UPDATE_INTERVAL)
async def _update_playlists(self, **kwargs):
"""Update available MPD playlists."""
try:
self._playlists = []
for playlist_data in await self._client.listplaylists():
self._playlists.append(playlist_data["playlist"])
except mpd.CommandError as error:
self._playlists = None
_LOGGER.warning("Playlists could not be updated: %s:", error)
async def async_set_volume_level(self, volume):
"""Set volume of media player."""
if "volume" in self._status:
await self._client.setvol(int(volume * 100))
async def async_volume_up(self):
"""Service to send the MPD the command for volume up."""
if "volume" in self._status:
current_volume = int(self._status["volume"])
if current_volume <= 100:
self._client.setvol(current_volume + 5)
async def async_volume_down(self):
"""Service to send the MPD the command for volume down."""
if "volume" in self._status:
current_volume = int(self._status["volume"])
if current_volume >= 0:
await self._client.setvol(current_volume - 5)
async def async_media_play(self):
"""Service to send the MPD the command for play/pause."""
if self._status["state"] == "pause":
await self._client.pause(0)
else:
await self._client.play()
async def async_media_pause(self):
"""Service to send the MPD the command for play/pause."""
await self._client.pause(1)
async def async_media_stop(self):
"""Service to send the MPD the command for stop."""
await self._client.stop()
async def async_media_next_track(self):
"""Service to send the MPD the command for next track."""
await self._client.next()
async def async_media_previous_track(self):
"""Service to send the MPD the command for previous track."""
await self._client.previous()
async def async_mute_volume(self, mute):
"""Mute. Emulated with set_volume_level."""
if "volume" in self._status:
if mute:
self._muted_volume = self.volume_level
await self.async_set_volume_level(0)
elif self._muted_volume is not None:
await self.async_set_volume_level(self._muted_volume)
self._muted = mute
async def async_play_media(self, media_type, media_id, **kwargs):
"""Send the media player the command for playing a playlist."""
_LOGGER.debug("Playing playlist: %s", media_id)
if media_type == MEDIA_TYPE_PLAYLIST:
if media_id in self._playlists:
self._currentplaylist = media_id
else:
self._currentplaylist = None
_LOGGER.warning("Unknown playlist name %s", media_id)
await self._client.clear()
await self._client.load(media_id)
await self._client.play()
else:
await self._client.clear()
self._currentplaylist = None
await self._client.add(media_id)
await self._client.play()
@property
def repeat(self):
"""Return current repeat mode."""
if self._status["repeat"] == "1":
if self._status["single"] == "1":
return REPEAT_MODE_ONE
return REPEAT_MODE_ALL
return REPEAT_MODE_OFF
async def async_set_repeat(self, repeat):
"""Set repeat mode."""
if repeat == REPEAT_MODE_OFF:
await self._client.repeat(0)
await self._client.single(0)
else:
await self._client.repeat(1)
if repeat == REPEAT_MODE_ONE:
await self._client.single(1)
else:
await self._client.single(0)
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return bool(int(self._status["random"]))
async def async_set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
await self._client.random(int(shuffle))
async def async_turn_off(self):
"""Service to send the MPD the command to stop playing."""
await self._client.stop()
async def async_turn_on(self):
"""Service to send the MPD the command to start playing."""
await self._client.play()
await self._update_playlists(no_throttle=True)
async def async_clear_playlist(self):
"""Clear players playlist."""
await self._client.clear()
async def async_media_seek(self, position):
"""Send seek command."""
await self._client.seekcur(position)
|
|
# Command line arguments:
# - path to directory containing previously generated .csv files
# - path to new OTT (not ending in /)
# - versioned name of new OTT e.g. ott3.1
# - path to OTT properties.json file
# - path to output directory (will hold ott*.csv and by_qid.csv)
import sys, os, csv, json, argparse
def extend_idlist(previous_path, ott_path, ott_name, props_path, out_path):
if ott_path.endswith('/'): ott_path = ott_path[0:-1]
previous_regs = os.path.join(previous_path, 'regs')
names = previous_versions_list(previous_regs)
if ott_name + '.csv' in names:
sys.stderr.write('%s has already been processed. Wait until there is a new OTT version before making a new idlist.\n' % ott_name)
sys.exit(1)
info = get_sources_table(ott_name, props_path)
# previous_regs is a directory of .csv files, one per OTT version
registrations = read_registrations(previous_regs, names)
(registrations_by_id, ids_for_qid) = index_registrations(registrations)
new_regs = do_one_taxonomy(ott_name, ott_path, info,
registrations_by_id, ids_for_qid)
regs_path = os.path.join(out_path, 'regs')
# Assume regs directory exists (other files are in it already)
write_registrations(new_regs, os.path.join(regs_path, ott_name + '.csv'))
write_indexes(registrations_by_id,
ids_for_qid,
out_path)
def do_one_taxonomy(ott_name, ott_path, info, registrations_by_id, ids_for_qid):
ott = read_taxonomy(ott_path)
new_regs = []
merges = changes = dups = 0
info_losers = 0
for taxon in sorted_taxa(ott):
(id, qids) = taxon
if len(qids) == 0:
qids = [('ott', str(id))]
# Could be from edit/
print >>sys.stderr, '** Sourceless taxon: %s in %s' % (id, ott_name)
qid = qids[0]
# Existing id(s) for this qid
prev_id = None
for q in qids:
old_ids = ids_for_qid.get(q)
if old_ids != None:
prev_id = old_ids[-1]
break
if prev_id == id:
# Re-used!
continue
# Does this id already map?
regs = registrations_by_id.get(id)
if regs == None:
# Are we creating a new id for a qid that already has one?
if prev_id == None:
note = '' # New id
else:
note = 'dup of %s' % prev_id
dups += 1
else:
prev_reg = regs[-1] # most recent registration
prev_qid = prev_reg[1] # e.g. gbif:4275326
# If previous qid is among the current qids, this is a simple reuse.
reuse = None
for q in qids: # e.g. ncbi:1155186,gbif:4275326
if q == prev_qid: # Java
# Re-used! No new registration
reuse = q
break
if reuse != None:
continue
# We're changing the qid for a registered id
if prev_id == None:
# Reverting to a previous qid?
if qid in [reg[1] for reg in regs]:
print >>sys.stderr, '%s returning' % id
note = 'return %s' % unparse_qid(prev_qid)
else:
note = 'was %s' % unparse_qid(prev_qid)
changes += 1
else:
note = 'merge %s %s' % (unparse_qid(prev_qid), prev_id) # Java
merges += 1
# Need to make a new registration (id/qid association).
(src, sid) = qid
if src == 'ott':
source_version = ''
elif src.startswith('additions'):
# Should be git commit
source_version = ''
elif not src in info:
info_losers += 1
if info_losers <= 10:
# Could be from edits/
print '** Missing version info for %s (OTT id %s)' % (src, id)
print info
source_version = ''
else:
source_version = info[unicode(src)]
reg = (id, qid, source_version, ott_name, note)
new_regs.append(reg)
print merges, 'merges'
print changes, 'changes'
print dups, 'dups'
return new_regs
def read_taxonomy(tax_path):
# was: ott = Taxonomy.getRawTaxonomy(ott_path + '/', 'ott')
tax = []
path = os.path.join(tax_path, 'taxonomy.tsv')
if not os.path.exists(path):
path = os.path.join(tax_path, 'taxonomy')
with open(path, 'r') as infile:
print 'Reading', path
id_column = 0
info_column = None
source_column = None
sourceid_column = None
for line in infile:
row = line.strip().split('\t|\t')
if row[id_column].isdigit():
qids = []
if info_column != None:
sourceids = row[info_column]
if sourceids == 'null':
qids = []
else:
qids = map(parse_qid, sourceids.split(','))
elif source_column != None:
sid = row[sourceid_column]
if sid == '':
qids = []
else:
qids = [(row[source_column].lower(), sid)]
qids = [canonicalize(q) for q in qids]
id = int(row[id_column])
tax.append((id, qids))
if len(tax) % 500000 == 0:
print len(tax), id
else:
id_column = row.index('uid')
if id_column == None:
print '** No uid column'
if 'sourceinfo' in row:
info_column = row.index('sourceinfo')
elif 'source' in row:
source_column = row.index('source')
sourceid_column = row.index('sourceid')
print 'taxa:', len(tax)
return tax
def parse_qid(qid_string):
if qid_string.startswith('http'):
return (qid_string, None)
parts = qid_string.split(':', 1)
if len(parts) == 2:
(prefix, n) = parts
return (prefix, n)
else:
print 'Odd qid: %s' % qid_string
return (qid_string, None)
def unparse_qid(qid):
(prefix, n) = qid
if n == None:
return prefix
else:
return '%s:%s' % (qid)
def sorted_taxa(ott):
ott.sort(key=lambda (id, qids): id)
return ott
def index_registrations(registrations):
registrations_by_id = {}
ids_for_qid = {}
for reg in registrations:
(id, qid, version, reg_type, name) = reg
regs = registrations_by_id.get(id)
if regs == None:
registrations_by_id[id] = [reg]
else:
regs.append(reg)
if qid[1] != None:
ids = ids_for_qid.get(qid)
if ids == None:
ids_for_qid[qid] = [id]
else:
ids.append(id)
return (registrations_by_id, ids_for_qid)
# Return [..., 'ott2.3.csv', ...]
def previous_versions_list(previous_regs):
names = os.listdir(previous_regs)
names = [name for name in names if name.startswith('ott') and name.endswith('.csv')]
def sort_key(name):
(major, minor) = name[3:][0:-4].split('.')
return (int(major), int(minor))
return sorted(names, key=sort_key)
def read_registrations(previous_regs, names):
regs = []
for name in names:
path = os.path.join(previous_regs, name)
with open(path, 'r') as infile:
print 'Reading', path
reader = csv.reader(infile)
for row in reader:
(id, qid, source, ottver, note) = row
regs.append((int(id), parse_qid(qid), source, ottver, note))
print 'Got %s registrations' % len(regs)
return regs
def write_registrations(new_regs, csv_path):
print 'Writing %s registrations to %s' % (len(new_regs), csv_path)
with open(csv_path, 'w') as outfile:
writer = csv.writer(outfile)
for (id, qid, source, ottver, note) in new_regs:
writer.writerow([id, unparse_qid(qid), source, ottver, note])
# Return mapping source series -> source version for a particular OTT version
# as stored in properties file
def get_sources_table(ott_name, props_path):
with open(props_path, 'r') as infile:
info = json.load(infile)
sources = info["sources"]
# Convert Makefile source name to idspace name
if "fung" in sources and not "if" in sources:
sources["if"] = sources["fung"]
return sources
def canonicalize(qid):
(prefix, n) = qid
if prefix == 'IF':
return ('if', n)
else:
return qid
def write_indexes(registrations_by_id, ids_for_qid, out_path):
qid_path = os.path.join(out_path, 'by_qid.csv')
print >>sys.stderr, 'Writing %s qid records to %s' % (len(ids_for_qid), qid_path)
with open(qid_path, 'w') as outfile:
writer = csv.writer(outfile)
for qid in sorted(ids_for_qid.keys()):
# ott ids can get out of order, e.g. gbif:6197514,3190274;3185577
writer.writerow([unparse_qid(qid), ';'.join([str(i) for i in ids_for_qid[qid]])])
id_path = os.path.join(out_path, 'by_id.csv')
print >>sys.stderr, 'Writing %s id records to %s' % (len(registrations_by_id), id_path)
with open(id_path, 'w') as outfile:
writer = csv.writer(outfile)
for id in registrations_by_id:
regs = registrations_by_id[id]
qid_strings = [unparse_qid(reg[1]) for reg in regs]
writer.writerow([id, ';'.join(qid_strings)])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Extend the identifier list with new taxonomy version')
parser.add_argument('previous', help='path to old islist directory')
parser.add_argument('ott', help='new OTT (directory containing taxonomy.tsv)')
parser.add_argument('name', help='new OTT version (e.g. ott3.1)')
parser.add_argument('properties', help='properties.json file for new OTT version')
parser.add_argument('output', help='path to new idlist directory (output)')
args = parser.parse_args()
# previous, ott, ottver, captures, out
extend_idlist(args.previous, args.ott, args.name, args.properties, args.output)
|
|
#!/usr/bin/env python
"""
General Utilities
(part of web.py)
"""
__all__ = [
"Storage", "storage", "storify",
"Counter", "counter",
"iters",
"rstrips", "lstrips", "strips",
"safeunicode", "safestr", "utf8",
"TimeoutError", "timelimit",
"Memoize", "memoize",
"re_compile", "re_subm",
"group", "uniq", "iterview",
"IterBetter", "iterbetter",
"safeiter", "safewrite",
"dictreverse", "dictfind", "dictfindall", "dictincr", "dictadd",
"requeue", "restack",
"listget", "intget", "datestr",
"numify", "denumify", "commify", "dateify",
"nthstr", "cond",
"CaptureStdout", "capturestdout", "Profile", "profile",
"tryall",
"ThreadedDict", "threadeddict",
"autoassign",
"to36",
"safemarkdown",
"sendmail"
]
import re, sys, time, threading, itertools, traceback, os
try:
import subprocess
except ImportError:
subprocess = None
try: import datetime
except ImportError: pass
try: set
except NameError:
from sets import Set as set
try:
from threading import local as threadlocal
except ImportError:
from python23 import threadlocal
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`.
>>> o = storage(a=1)
>>> o.a
1
>>> o['a']
1
>>> o.a = 2
>>> o['a']
2
>>> del o.a
>>> o.a
Traceback (most recent call last):
...
AttributeError: 'a'
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError, k:
raise AttributeError, k
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError, k:
raise AttributeError, k
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
storage = Storage
def storify(mapping, *requireds, **defaults):
"""
Creates a `storage` object from dictionary `mapping`, raising `KeyError` if
d doesn't have all of the keys in `requireds` and using the default
values for keys found in `defaults`.
For example, `storify({'a':1, 'c':3}, b=2, c=0)` will return the equivalent of
`storage({'a':1, 'b':2, 'c':3})`.
If a `storify` value is a list (e.g. multiple values in a form submission),
`storify` returns the last element of the list, unless the key appears in
`defaults` as a list. Thus:
>>> storify({'a':[1, 2]}).a
2
>>> storify({'a':[1, 2]}, a=[]).a
[1, 2]
>>> storify({'a':1}, a=[]).a
[1]
>>> storify({}, a=[]).a
[]
Similarly, if the value has a `value` attribute, `storify will return _its_
value, unless the key appears in `defaults` as a dictionary.
>>> storify({'a':storage(value=1)}).a
1
>>> storify({'a':storage(value=1)}, a={}).a
<Storage {'value': 1}>
>>> storify({}, a={}).a
{}
Optionally, keyword parameter `_unicode` can be passed to convert all values to unicode.
>>> storify({'x': 'a'}, _unicode=True)
<Storage {'x': u'a'}>
>>> storify({'x': storage(value='a')}, x={}, _unicode=True)
<Storage {'x': <Storage {'value': 'a'}>}>
>>> storify({'x': storage(value='a')}, _unicode=True)
<Storage {'x': u'a'}>
"""
_unicode = defaults.pop('_unicode', False)
def unicodify(s):
if _unicode and isinstance(s, str): return safeunicode(s)
else: return s
def getvalue(x):
if hasattr(x, 'file') and hasattr(x, 'value'):
return x.value
elif hasattr(x, 'value'):
return unicodify(x.value)
else:
return unicodify(x)
stor = Storage()
for key in requireds + tuple(mapping.keys()):
value = mapping[key]
if isinstance(value, list):
if isinstance(defaults.get(key), list):
value = [getvalue(x) for x in value]
else:
value = value[-1]
if not isinstance(defaults.get(key), dict):
value = getvalue(value)
if isinstance(defaults.get(key), list) and not isinstance(value, list):
value = [value]
setattr(stor, key, value)
for (key, value) in defaults.iteritems():
result = value
if hasattr(stor, key):
result = stor[key]
if value == () and not isinstance(result, tuple):
result = (result,)
setattr(stor, key, result)
return stor
class Counter(storage):
"""Keeps count of how many times something is added.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c
<Counter {'y': 1, 'x': 5}>
>>> c.most()
['x']
"""
def add(self, n):
self.setdefault(n, 0)
self[n] += 1
def most(self):
"""Returns the keys with maximum count."""
m = max(self.itervalues())
return [k for k, v in self.iteritems() if v == m]
def least(self):
"""Returns the keys with mininum count."""
m = min(self.itervalues())
return [k for k, v in self.iteritems() if v == m]
def percent(self, key):
"""Returns what percentage a certain key is of all entries.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.percent('x')
0.75
>>> c.percent('y')
0.25
"""
return float(self[key])/sum(self.values())
def sorted_keys(self):
"""Returns keys sorted by value.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.sorted_keys()
['x', 'y']
"""
return sorted(self.keys(), key=lambda k: self[k], reverse=True)
def sorted_values(self):
"""Returns values sorted by value.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.sorted_values()
[2, 1]
"""
return [self[k] for k in self.sorted_keys()]
def sorted_items(self):
"""Returns items sorted by value.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.sorted_items()
[('x', 2), ('y', 1)]
"""
return [(k, self[k]) for k in self.sorted_keys()]
def __repr__(self):
return '<Counter ' + dict.__repr__(self) + '>'
counter = Counter
iters = [list, tuple]
import __builtin__
if hasattr(__builtin__, 'set'):
iters.append(set)
if hasattr(__builtin__, 'frozenset'):
iters.append(set)
if sys.version_info < (2,6): # sets module deprecated in 2.6
try:
from sets import Set
iters.append(Set)
except ImportError:
pass
class _hack(tuple): pass
iters = _hack(iters)
iters.__doc__ = """
A list of iterable items (like lists, but not strings). Includes whichever
of lists, tuples, sets, and Sets are available in this version of Python.
"""
def _strips(direction, text, remove):
if direction == 'l':
if text.startswith(remove):
return text[len(remove):]
elif direction == 'r':
if text.endswith(remove):
return text[:-len(remove)]
else:
raise ValueError, "Direction needs to be r or l."
return text
def rstrips(text, remove):
"""
removes the string `remove` from the right of `text`
>>> rstrips("foobar", "bar")
'foo'
"""
return _strips('r', text, remove)
def lstrips(text, remove):
"""
removes the string `remove` from the left of `text`
>>> lstrips("foobar", "foo")
'bar'
"""
return _strips('l', text, remove)
def strips(text, remove):
"""
removes the string `remove` from the both sides of `text`
>>> strips("foobarfoo", "foo")
'bar'
"""
return rstrips(lstrips(text, remove), remove)
def safeunicode(obj, encoding='utf-8'):
r"""
Converts any given object to unicode string.
>>> safeunicode('hello')
u'hello'
>>> safeunicode(2)
u'2'
>>> safeunicode('\xe1\x88\xb4')
u'\u1234'
"""
t = type(obj)
if t is unicode:
return obj
elif t is str:
return obj.decode(encoding)
elif t in [int, float, bool]:
return unicode(obj)
elif hasattr(obj, '__unicode__') or isinstance(obj, unicode):
return unicode(obj)
else:
return str(obj).decode(encoding)
def safestr(obj, encoding='utf-8'):
r"""
Converts any given object to utf-8 encoded string.
>>> safestr('hello')
'hello'
>>> safestr(u'\u1234')
'\xe1\x88\xb4'
>>> safestr(2)
'2'
"""
if isinstance(obj, unicode):
return obj.encode(encoding)
elif isinstance(obj, str):
return obj
elif hasattr(obj, 'next') and hasattr(obj, '__iter__'): # iterator
return itertools.imap(safestr, obj)
else:
return str(obj)
# for backward-compatibility
utf8 = safestr
class TimeoutError(Exception): pass
def timelimit(timeout):
"""
A decorator to limit a function to `timeout` seconds, raising `TimeoutError`
if it takes longer.
>>> import time
>>> def meaningoflife():
... time.sleep(.2)
... return 42
>>>
>>> timelimit(.1)(meaningoflife)()
Traceback (most recent call last):
...
TimeoutError: took too long
>>> timelimit(1)(meaningoflife)()
42
_Caveat:_ The function isn't stopped after `timeout` seconds but continues
executing in a separate thread. (There seems to be no way to kill a thread.)
inspired by <http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/473878>
"""
def _1(function):
def _2(*args, **kw):
class Dispatch(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = None
self.error = None
self.setDaemon(True)
self.start()
def run(self):
try:
self.result = function(*args, **kw)
except:
self.error = sys.exc_info()
c = Dispatch()
c.join(timeout)
if c.isAlive():
raise TimeoutError, 'took too long'
if c.error:
raise c.error[0], c.error[1]
return c.result
return _2
return _1
class Memoize:
"""
'Memoizes' a function, caching its return values for each input.
If `expires` is specified, values are recalculated after `expires` seconds.
If `background` is specified, values are recalculated in a separate thread.
>>> calls = 0
>>> def howmanytimeshaveibeencalled():
... global calls
... calls += 1
... return calls
>>> fastcalls = memoize(howmanytimeshaveibeencalled)
>>> howmanytimeshaveibeencalled()
1
>>> howmanytimeshaveibeencalled()
2
>>> fastcalls()
3
>>> fastcalls()
3
>>> import time
>>> fastcalls = memoize(howmanytimeshaveibeencalled, .1, background=False)
>>> fastcalls()
4
>>> fastcalls()
4
>>> time.sleep(.2)
>>> fastcalls()
5
>>> def slowfunc():
... time.sleep(.1)
... return howmanytimeshaveibeencalled()
>>> fastcalls = memoize(slowfunc, .2, background=True)
>>> fastcalls()
6
>>> timelimit(.05)(fastcalls)()
6
>>> time.sleep(.2)
>>> timelimit(.05)(fastcalls)()
6
>>> timelimit(.05)(fastcalls)()
6
>>> time.sleep(.2)
>>> timelimit(.05)(fastcalls)()
7
>>> fastcalls = memoize(slowfunc, None, background=True)
>>> threading.Thread(target=fastcalls).start()
>>> time.sleep(.01)
>>> fastcalls()
9
"""
def __init__(self, func, expires=None, background=True):
self.func = func
self.cache = {}
self.expires = expires
self.background = background
self.running = {}
def __call__(self, *args, **keywords):
key = (args, tuple(keywords.items()))
if not self.running.get(key):
self.running[key] = threading.Lock()
def update(block=False):
if self.running[key].acquire(block):
try:
self.cache[key] = (self.func(*args, **keywords), time.time())
finally:
self.running[key].release()
if key not in self.cache:
update(block=True)
elif self.expires and (time.time() - self.cache[key][1]) > self.expires:
if self.background:
threading.Thread(target=update).start()
else:
update()
return self.cache[key][0]
memoize = Memoize
re_compile = memoize(re.compile) #@@ threadsafe?
re_compile.__doc__ = """
A memoized version of re.compile.
"""
class _re_subm_proxy:
def __init__(self):
self.match = None
def __call__(self, match):
self.match = match
return ''
def re_subm(pat, repl, string):
"""
Like re.sub, but returns the replacement _and_ the match object.
>>> t, m = re_subm('g(oo+)fball', r'f\\1lish', 'goooooofball')
>>> t
'foooooolish'
>>> m.groups()
('oooooo',)
"""
compiled_pat = re_compile(pat)
proxy = _re_subm_proxy()
compiled_pat.sub(proxy.__call__, string)
return compiled_pat.sub(repl, string), proxy.match
def group(seq, size):
"""
Returns an iterator over a series of lists of length size from iterable.
>>> list(group([1,2,3,4], 2))
[[1, 2], [3, 4]]
>>> list(group([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
"""
def take(seq, n):
for i in xrange(n):
yield seq.next()
if not hasattr(seq, 'next'):
seq = iter(seq)
while True:
x = list(take(seq, size))
if x:
yield x
else:
break
def uniq(seq, key=None):
"""
Removes duplicate elements from a list while preserving the order of the rest.
>>> uniq([9,0,2,1,0])
[9, 0, 2, 1]
The value of the optional `key` parameter should be a function that
takes a single argument and returns a key to test the uniqueness.
>>> uniq(["Foo", "foo", "bar"], key=lambda s: s.lower())
['Foo', 'bar']
"""
key = key or (lambda x: x)
seen = set()
result = []
for v in seq:
k = key(v)
if k in seen:
continue
seen.add(k)
result.append(v)
return result
def iterview(x):
"""
Takes an iterable `x` and returns an iterator over it
which prints its progress to stderr as it iterates through.
"""
WIDTH = 70
def plainformat(n, lenx):
return '%5.1f%% (%*d/%d)' % ((float(n)/lenx)*100, len(str(lenx)), n, lenx)
def bars(size, n, lenx):
val = int((float(n)*size)/lenx + 0.5)
if size - val:
spacing = ">" + (" "*(size-val))[1:]
else:
spacing = ""
return "[%s%s]" % ("="*val, spacing)
def eta(elapsed, n, lenx):
if n == 0:
return '--:--:--'
if n == lenx:
secs = int(elapsed)
else:
secs = int((elapsed/n) * (lenx-n))
mins, secs = divmod(secs, 60)
hrs, mins = divmod(mins, 60)
return '%02d:%02d:%02d' % (hrs, mins, secs)
def format(starttime, n, lenx):
out = plainformat(n, lenx) + ' '
if n == lenx:
end = ' '
else:
end = ' ETA '
end += eta(time.time() - starttime, n, lenx)
out += bars(WIDTH - len(out) - len(end), n, lenx)
out += end
return out
starttime = time.time()
lenx = len(x)
for n, y in enumerate(x):
sys.stderr.write('\r' + format(starttime, n, lenx))
yield y
sys.stderr.write('\r' + format(starttime, n+1, lenx) + '\n')
class IterBetter:
"""
Returns an object that can be used as an iterator
but can also be used via __getitem__ (although it
cannot go backwards -- that is, you cannot request
`iterbetter[0]` after requesting `iterbetter[1]`).
>>> import itertools
>>> c = iterbetter(itertools.count())
>>> c[1]
1
>>> c[5]
5
>>> c[3]
Traceback (most recent call last):
...
IndexError: already passed 3
For boolean test, IterBetter peeps at first value in the itertor without effecting the iteration.
>>> c = iterbetter(iter(range(5)))
>>> bool(c)
True
>>> list(c)
[0, 1, 2, 3, 4]
>>> c = iterbetter(iter([]))
>>> bool(c)
False
>>> list(c)
[]
"""
def __init__(self, iterator):
self.i, self.c = iterator, 0
def __iter__(self):
if hasattr(self, "_head"):
yield self._head
while 1:
yield self.i.next()
self.c += 1
def __getitem__(self, i):
#todo: slices
if i < self.c:
raise IndexError, "already passed "+str(i)
try:
while i > self.c:
self.i.next()
self.c += 1
# now self.c == i
self.c += 1
return self.i.next()
except StopIteration:
raise IndexError, str(i)
def __nonzero__(self):
if hasattr(self, "__len__"):
return len(self) != 0
elif hasattr(self, "_head"):
return True
else:
try:
self._head = self.i.next()
except StopIteration:
return False
else:
return True
iterbetter = IterBetter
def safeiter(it, cleanup=None, ignore_errors=True):
"""Makes an iterator safe by ignoring the exceptions occured during the iteration.
"""
def next():
while True:
try:
return it.next()
except StopIteration:
raise
except:
traceback.print_exc()
it = iter(it)
while True:
yield next()
def safewrite(filename, content):
"""Writes the content to a temp file and then moves the temp file to
given filename to avoid overwriting the existing file in case of errors.
"""
f = file(filename + '.tmp', 'w')
f.write(content)
f.close()
os.rename(f.name, path)
def dictreverse(mapping):
"""
Returns a new dictionary with keys and values swapped.
>>> dictreverse({1: 2, 3: 4})
{2: 1, 4: 3}
"""
return dict([(value, key) for (key, value) in mapping.iteritems()])
def dictfind(dictionary, element):
"""
Returns a key whose value in `dictionary` is `element`
or, if none exists, None.
>>> d = {1:2, 3:4}
>>> dictfind(d, 4)
3
>>> dictfind(d, 5)
"""
for (key, value) in dictionary.iteritems():
if element is value:
return key
def dictfindall(dictionary, element):
"""
Returns the keys whose values in `dictionary` are `element`
or, if none exists, [].
>>> d = {1:4, 3:4}
>>> dictfindall(d, 4)
[1, 3]
>>> dictfindall(d, 5)
[]
"""
res = []
for (key, value) in dictionary.iteritems():
if element is value:
res.append(key)
return res
def dictincr(dictionary, element):
"""
Increments `element` in `dictionary`,
setting it to one if it doesn't exist.
>>> d = {1:2, 3:4}
>>> dictincr(d, 1)
3
>>> d[1]
3
>>> dictincr(d, 5)
1
>>> d[5]
1
"""
dictionary.setdefault(element, 0)
dictionary[element] += 1
return dictionary[element]
def dictadd(*dicts):
"""
Returns a dictionary consisting of the keys in the argument dictionaries.
If they share a key, the value from the last argument is used.
>>> dictadd({1: 0, 2: 0}, {2: 1, 3: 1})
{1: 0, 2: 1, 3: 1}
"""
result = {}
for dct in dicts:
result.update(dct)
return result
def requeue(queue, index=-1):
"""Returns the element at index after moving it to the beginning of the queue.
>>> x = [1, 2, 3, 4]
>>> requeue(x)
4
>>> x
[4, 1, 2, 3]
"""
x = queue.pop(index)
queue.insert(0, x)
return x
def restack(stack, index=0):
"""Returns the element at index after moving it to the top of stack.
>>> x = [1, 2, 3, 4]
>>> restack(x)
1
>>> x
[2, 3, 4, 1]
"""
x = stack.pop(index)
stack.append(x)
return x
def listget(lst, ind, default=None):
"""
Returns `lst[ind]` if it exists, `default` otherwise.
>>> listget(['a'], 0)
'a'
>>> listget(['a'], 1)
>>> listget(['a'], 1, 'b')
'b'
"""
if len(lst)-1 < ind:
return default
return lst[ind]
def intget(integer, default=None):
"""
Returns `integer` as an int or `default` if it can't.
>>> intget('3')
3
>>> intget('3a')
>>> intget('3a', 0)
0
"""
try:
return int(integer)
except (TypeError, ValueError):
return default
def datestr(then, now=None):
"""
Converts a (UTC) datetime object to a nice string representation.
>>> from datetime import datetime, timedelta
>>> d = datetime(1970, 5, 1)
>>> datestr(d, now=d)
'0 microseconds ago'
>>> for t, v in {
... timedelta(microseconds=1): '1 microsecond ago',
... timedelta(microseconds=2): '2 microseconds ago',
... -timedelta(microseconds=1): '1 microsecond from now',
... -timedelta(microseconds=2): '2 microseconds from now',
... timedelta(microseconds=2000): '2 milliseconds ago',
... timedelta(seconds=2): '2 seconds ago',
... timedelta(seconds=2*60): '2 minutes ago',
... timedelta(seconds=2*60*60): '2 hours ago',
... timedelta(days=2): '2 days ago',
... }.iteritems():
... assert datestr(d, now=d+t) == v
>>> datestr(datetime(1970, 1, 1), now=d)
'January 1'
>>> datestr(datetime(1969, 1, 1), now=d)
'January 1, 1969'
>>> datestr(datetime(1970, 6, 1), now=d)
'June 1, 1970'
>>> datestr(None)
''
"""
def agohence(n, what, divisor=None):
if divisor: n = n // divisor
out = str(abs(n)) + ' ' + what # '2 day'
if abs(n) != 1: out += 's' # '2 days'
out += ' ' # '2 days '
if n < 0:
out += 'from now'
else:
out += 'ago'
return out # '2 days ago'
oneday = 24 * 60 * 60
if not then: return ""
if not now: now = datetime.datetime.utcnow()
if type(now).__name__ == "DateTime":
now = datetime.datetime.fromtimestamp(now)
if type(then).__name__ == "DateTime":
then = datetime.datetime.fromtimestamp(then)
elif type(then).__name__ == "date":
then = datetime.datetime(then.year, then.month, then.day)
delta = now - then
deltaseconds = int(delta.days * oneday + delta.seconds + delta.microseconds * 1e-06)
deltadays = abs(deltaseconds) // oneday
if deltaseconds < 0: deltadays *= -1 # fix for oddity of floor
if deltadays:
if abs(deltadays) < 4:
return agohence(deltadays, 'day')
out = then.strftime('%B %e') # e.g. 'June 13'
if then.year != now.year or deltadays < 0:
out += ', %s' % then.year
return out
if int(deltaseconds):
if abs(deltaseconds) > (60 * 60):
return agohence(deltaseconds, 'hour', 60 * 60)
elif abs(deltaseconds) > 60:
return agohence(deltaseconds, 'minute', 60)
else:
return agohence(deltaseconds, 'second')
deltamicroseconds = delta.microseconds
if delta.days: deltamicroseconds = int(delta.microseconds - 1e6) # datetime oddity
if abs(deltamicroseconds) > 1000:
return agohence(deltamicroseconds, 'millisecond', 1000)
return agohence(deltamicroseconds, 'microsecond')
def numify(string):
"""
Removes all non-digit characters from `string`.
>>> numify('800-555-1212')
'8005551212'
>>> numify('800.555.1212')
'8005551212'
"""
return ''.join([c for c in str(string) if c.isdigit()])
def denumify(string, pattern):
"""
Formats `string` according to `pattern`, where the letter X gets replaced
by characters from `string`.
>>> denumify("8005551212", "(XXX) XXX-XXXX")
'(800) 555-1212'
"""
out = []
for c in pattern:
if c == "X":
out.append(string[0])
string = string[1:]
else:
out.append(c)
return ''.join(out)
def commify(n):
"""
Add commas to an integer `n`.
>>> commify(1)
'1'
>>> commify(123)
'123'
>>> commify(1234)
'1,234'
>>> commify(1234567890)
'1,234,567,890'
>>> commify(123.0)
'123.0'
>>> commify(1234.5)
'1,234.5'
>>> commify(1234.56789)
'1,234.56789'
>>> commify('%.2f' % 1234.5)
'1,234.50'
>>> commify(None)
>>>
"""
if n is None: return None
n = str(n)
if '.' in n:
dollars, cents = n.split('.')
else:
dollars, cents = n, None
r = []
for i, c in enumerate(str(dollars)[::-1]):
if i and (not (i % 3)):
r.insert(0, ',')
r.insert(0, c)
out = ''.join(r)
if cents:
out += '.' + cents
return out
def dateify(datestring):
"""
Formats a numified `datestring` properly.
"""
return denumify(datestring, "XXXX-XX-XX XX:XX:XX")
def nthstr(n):
"""
Formats an ordinal.
Doesn't handle negative numbers.
>>> nthstr(1)
'1st'
>>> nthstr(0)
'0th'
>>> [nthstr(x) for x in [2, 3, 4, 5, 10, 11, 12, 13, 14, 15]]
['2nd', '3rd', '4th', '5th', '10th', '11th', '12th', '13th', '14th', '15th']
>>> [nthstr(x) for x in [91, 92, 93, 94, 99, 100, 101, 102]]
['91st', '92nd', '93rd', '94th', '99th', '100th', '101st', '102nd']
>>> [nthstr(x) for x in [111, 112, 113, 114, 115]]
['111th', '112th', '113th', '114th', '115th']
"""
assert n >= 0
if n % 100 in [11, 12, 13]: return '%sth' % n
return {1: '%sst', 2: '%snd', 3: '%srd'}.get(n % 10, '%sth') % n
def cond(predicate, consequence, alternative=None):
"""
Function replacement for if-else to use in expressions.
>>> x = 2
>>> cond(x % 2 == 0, "even", "odd")
'even'
>>> cond(x % 2 == 0, "even", "odd") + '_row'
'even_row'
"""
if predicate:
return consequence
else:
return alternative
class CaptureStdout:
"""
Captures everything `func` prints to stdout and returns it instead.
>>> def idiot():
... print "foo"
>>> capturestdout(idiot)()
'foo\\n'
**WARNING:** Not threadsafe!
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **keywords):
from cStringIO import StringIO
# Not threadsafe!
out = StringIO()
oldstdout = sys.stdout
sys.stdout = out
try:
self.func(*args, **keywords)
finally:
sys.stdout = oldstdout
return out.getvalue()
capturestdout = CaptureStdout
class Profile:
"""
Profiles `func` and returns a tuple containing its output
and a string with human-readable profiling information.
>>> import time
>>> out, inf = profile(time.sleep)(.001)
>>> out
>>> inf[:10].strip()
'took 0.0'
"""
def __init__(self, func):
self.func = func
def __call__(self, *args): ##, **kw): kw unused
import hotshot, hotshot.stats, os, tempfile ##, time already imported
f, filename = tempfile.mkstemp()
os.close(f)
prof = hotshot.Profile(filename)
stime = time.time()
result = prof.runcall(self.func, *args)
stime = time.time() - stime
prof.close()
import cStringIO
out = cStringIO.StringIO()
stats = hotshot.stats.load(filename)
stats.stream = out
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(40)
stats.print_callers()
x = '\n\ntook '+ str(stime) + ' seconds\n'
x += out.getvalue()
# remove the tempfile
try:
os.remove(filename)
except IOError:
pass
return result, x
profile = Profile
import traceback
# hack for compatibility with Python 2.3:
if not hasattr(traceback, 'format_exc'):
from cStringIO import StringIO
def format_exc(limit=None):
strbuf = StringIO()
traceback.print_exc(limit, strbuf)
return strbuf.getvalue()
traceback.format_exc = format_exc
def tryall(context, prefix=None):
"""
Tries a series of functions and prints their results.
`context` is a dictionary mapping names to values;
the value will only be tried if it's callable.
>>> tryall(dict(j=lambda: True))
j: True
----------------------------------------
results:
True: 1
For example, you might have a file `test/stuff.py`
with a series of functions testing various things in it.
At the bottom, have a line:
if __name__ == "__main__": tryall(globals())
Then you can run `python test/stuff.py` and get the results of
all the tests.
"""
context = context.copy() # vars() would update
results = {}
for (key, value) in context.iteritems():
if not hasattr(value, '__call__'):
continue
if prefix and not key.startswith(prefix):
continue
print key + ':',
try:
r = value()
dictincr(results, r)
print r
except:
print 'ERROR'
dictincr(results, 'ERROR')
print ' ' + '\n '.join(traceback.format_exc().split('\n'))
print '-'*40
print 'results:'
for (key, value) in results.iteritems():
print ' '*2, str(key)+':', value
class ThreadedDict(threadlocal):
"""
Thread local storage.
>>> d = ThreadedDict()
>>> d.x = 1
>>> d.x
1
>>> import threading
>>> def f(): d.x = 2
...
>>> t = threading.Thread(target=f)
>>> t.start()
>>> t.join()
>>> d.x
1
"""
_instances = set()
def __init__(self):
ThreadedDict._instances.add(self)
def __del__(self):
ThreadedDict._instances.remove(self)
def __hash__(self):
return id(self)
def clear_all():
"""Clears all ThreadedDict instances.
"""
for t in ThreadedDict._instances:
t.clear()
clear_all = staticmethod(clear_all)
# Define all these methods to more or less fully emulate dict -- attribute access
# is built into threading.local.
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, value):
self.__dict__[key] = value
def __delitem__(self, key):
del self.__dict__[key]
def __contains__(self, key):
return key in self.__dict__
has_key = __contains__
def clear(self):
self.__dict__.clear()
def copy(self):
return self.__dict__.copy()
def get(self, key, default=None):
return self.__dict__.get(key, default)
def items(self):
return self.__dict__.items()
def iteritems(self):
return self.__dict__.iteritems()
def keys(self):
return self.__dict__.keys()
def iterkeys(self):
return self.__dict__.iterkeys()
iter = iterkeys
def values(self):
return self.__dict__.values()
def itervalues(self):
return self.__dict__.itervalues()
def pop(self, key, *args):
return self.__dict__.pop(key, *args)
def popitem(self):
return self.__dict__.popitem()
def setdefault(self, key, default=None):
return self.__dict__.setdefault(key, default)
def update(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def __repr__(self):
return '<ThreadedDict %r>' % self.__dict__
__str__ = __repr__
threadeddict = ThreadedDict
def autoassign(self, locals):
"""
Automatically assigns local variables to `self`.
>>> self = storage()
>>> autoassign(self, dict(a=1, b=2))
>>> self
<Storage {'a': 1, 'b': 2}>
Generally used in `__init__` methods, as in:
def __init__(self, foo, bar, baz=1): autoassign(self, locals())
"""
for (key, value) in locals.iteritems():
if key == 'self':
continue
setattr(self, key, value)
def to36(q):
"""
Converts an integer to base 36 (a useful scheme for human-sayable IDs).
>>> to36(35)
'z'
>>> to36(119292)
'2k1o'
>>> int(to36(939387374), 36)
939387374
>>> to36(0)
'0'
>>> to36(-393)
Traceback (most recent call last):
...
ValueError: must supply a positive integer
"""
if q < 0: raise ValueError, "must supply a positive integer"
letters = "0123456789abcdefghijklmnopqrstuvwxyz"
converted = []
while q != 0:
q, r = divmod(q, 36)
converted.insert(0, letters[r])
return "".join(converted) or '0'
r_url = re_compile('(?<!\()(http://(\S+))')
def safemarkdown(text):
"""
Converts text to HTML following the rules of Markdown, but blocking any
outside HTML input, so that only the things supported by Markdown
can be used. Also converts raw URLs to links.
(requires [markdown.py](http://webpy.org/markdown.py))
"""
from markdown import markdown
if text:
text = text.replace('<', '<')
# TODO: automatically get page title?
text = r_url.sub(r'<\1>', text)
text = markdown(text)
return text
def sendmail(from_address, to_address, subject, message, headers=None, **kw):
"""
Sends the email message `message` with mail and envelope headers
for from `from_address_` to `to_address` with `subject`.
Additional email headers can be specified with the dictionary
`headers.
Optionally cc, bcc and attachments can be specified as keyword arguments.
Attachments must be an iterable and each attachment can be either a
filename or a file object or a dictionary with filename, content and
optionally content_type keys.
If `web.config.smtp_server` is set, it will send the message
to that SMTP server. Otherwise it will look for
`/usr/sbin/sendmail`, the typical location for the sendmail-style
binary. To use sendmail from a different path, set `web.config.sendmail_path`.
"""
attachments = kw.pop("attachments", [])
mail = _EmailMessage(from_address, to_address, subject, message, headers, **kw)
for a in attachments:
if isinstance(a, dict):
mail.attach(a['filename'], a['content'], a.get('content_type'))
elif hasattr(a, 'read'): # file
filename = os.path.basename(getattr(a, "name", ""))
content_type = getattr(a, 'content_type', None)
mail.attach(filename, a.read(), content_type)
elif isinstance(a, basestring):
f = open(a, 'rb')
content = f.read()
f.close()
filename = os.path.basename(a)
mail.attach(filename, content, None)
else:
raise ValueError, "Invalid attachment: %s" % repr(a)
mail.send()
class _EmailMessage:
def __init__(self, from_address, to_address, subject, message, headers=None, **kw):
def listify(x):
if not isinstance(x, list):
return [safestr(x)]
else:
return [safestr(a) for a in x]
subject = safestr(subject)
message = safestr(message)
from_address = safestr(from_address)
to_address = listify(to_address)
cc = listify(kw.get('cc', []))
bcc = listify(kw.get('bcc', []))
recipients = to_address + cc + bcc
import email.Utils
self.from_address = email.Utils.parseaddr(from_address)[1]
self.recipients = [email.Utils.parseaddr(r)[1] for r in recipients]
self.headers = dictadd({
'From': from_address,
'To': ", ".join(to_address),
'Subject': subject
}, headers or {})
if cc:
self.headers['Cc'] = ", ".join(cc)
self.message = self.new_message()
self.message.add_header("Content-Transfer-Encoding", "7bit")
self.message.add_header("Content-Disposition", "inline")
self.message.add_header("MIME-Version", "1.0")
self.message.set_payload(message, 'utf-8')
self.multipart = False
def new_message(self):
from email.Message import Message
return Message()
def attach(self, filename, content, content_type=None):
if not self.multipart:
msg = self.new_message()
msg.add_header("Content-Type", "multipart/mixed")
msg.attach(self.message)
self.message = msg
self.multipart = True
import mimetypes
try:
from email import encoders
except:
from email import Encoders as encoders
content_type = content_type or mimetypes.guess_type(filename)[0] or "applcation/octet-stream"
msg = self.new_message()
msg.set_payload(content)
msg.add_header('Content-Type', content_type)
msg.add_header('Content-Disposition', 'attachment', filename=filename)
if not content_type.startswith("text/"):
encoders.encode_base64(msg)
self.message.attach(msg)
def prepare_message(self):
for k, v in self.headers.iteritems():
if k.lower() == "content-type":
self.message.set_type(v)
else:
self.message.add_header(k, v)
self.headers = {}
def send(self):
try:
import webapi
except ImportError:
webapi = Storage(config=Storage())
self.prepare_message()
message_text = self.message.as_string()
if webapi.config.get('smtp_server'):
server = webapi.config.get('smtp_server')
port = webapi.config.get('smtp_port', 0)
username = webapi.config.get('smtp_username')
password = webapi.config.get('smtp_password')
debug_level = webapi.config.get('smtp_debuglevel', None)
starttls = webapi.config.get('smtp_starttls', False)
import smtplib
smtpserver = smtplib.SMTP(server, port)
if debug_level:
smtpserver.set_debuglevel(debug_level)
if starttls:
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo()
if username and password:
smtpserver.login(username, password)
smtpserver.sendmail(self.from_address, self.recipients, message_text)
smtpserver.quit()
elif webapi.config.get('email_engine') == 'aws':
import boto.ses
c = boto.ses.SESConnection(
aws_access_key_id=webapi.config.get('aws_access_key_id'),
aws_secret_access_key=web.api.config.get('aws_secret_access_key'))
c.send_raw_email(self.from_address, message_text, self.from_recipients)
else:
sendmail = webapi.config.get('sendmail_path', '/usr/sbin/sendmail')
assert not self.from_address.startswith('-'), 'security'
for r in self.recipients:
assert not r.startswith('-'), 'security'
cmd = [sendmail, '-f', self.from_address] + self.recipients
if subprocess:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE)
p.stdin.write(message_text)
p.stdin.close()
p.wait()
else:
i, o = os.popen2(cmd)
i.write(message)
i.close()
o.close()
del i, o
def __repr__(self):
return "<EmailMessage>"
def __str__(self):
return self.message.as_string()
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
# file openpyxl/reader/style.py
# Copyright (c) 2010-2011 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
"""Read shared style definitions"""
# package imports
from openpyxl.shared.xmltools import fromstring
from openpyxl.shared.exc import MissingNumberFormat
from openpyxl.style import Style, NumberFormat, Font, Fill, Borders, Protection, Color
from openpyxl.shared.ooxml import SHEET_MAIN_NS
from copy import deepcopy
def read_style_table(xml_source):
"""Read styles from the shared style table"""
style_prop = {'table': {}}
root = fromstring(xml_source)
custom_num_formats = parse_custom_num_formats(root)
style_prop['color_index'] = parse_color_index(root)
font_list = parse_fonts(root, style_prop['color_index'])
fill_list = parse_fills(root, style_prop['color_index'])
border_list = parse_borders(root, style_prop['color_index'])
style_prop['dxf_list'] = parse_dxfs(root, style_prop['color_index'])
builtin_formats = NumberFormat._BUILTIN_FORMATS
cell_xfs = root.find('{%s}cellXfs' % SHEET_MAIN_NS)
if cell_xfs is not None: # can happen on bad OOXML writers (e.g. Gnumeric)
cell_xfs_nodes = cell_xfs.findall('{%s}xf' % SHEET_MAIN_NS)
for index, cell_xfs_node in enumerate(cell_xfs_nodes):
new_style = Style(static=True)
number_format_id = int(cell_xfs_node.get('numFmtId'))
if number_format_id < 164:
new_style.number_format.format_code = \
builtin_formats.get(number_format_id, 'General')
else:
if number_format_id in custom_num_formats:
new_style.number_format.format_code = \
custom_num_formats[number_format_id]
else:
raise MissingNumberFormat('%s' % number_format_id)
if cell_xfs_node.get('applyAlignment') == '1':
alignment = cell_xfs_node.find('{%s}alignment' % SHEET_MAIN_NS)
if alignment is not None:
if alignment.get('horizontal') is not None:
new_style.alignment.horizontal = alignment.get('horizontal')
if alignment.get('vertical') is not None:
new_style.alignment.vertical = alignment.get('vertical')
if alignment.get('wrapText'):
new_style.alignment.wrap_text = True
if alignment.get('shrinkToFit'):
new_style.alignment.shrink_to_fit = True
if alignment.get('indent') is not None:
new_style.alignment.ident = int(alignment.get('indent'))
if alignment.get('textRotation') is not None:
new_style.alignment.text_rotation = int(alignment.get('textRotation'))
# ignore justifyLastLine option when horizontal = distributed
if cell_xfs_node.get('applyFont') == '1':
new_style.font = deepcopy(font_list[int(cell_xfs_node.get('fontId'))])
new_style.font.color = deepcopy(font_list[int(cell_xfs_node.get('fontId'))].color)
if cell_xfs_node.get('applyFill') == '1':
new_style.fill = deepcopy(fill_list[int(cell_xfs_node.get('fillId'))])
new_style.fill.start_color = deepcopy(fill_list[int(cell_xfs_node.get('fillId'))].start_color)
new_style.fill.end_color = deepcopy(fill_list[int(cell_xfs_node.get('fillId'))].end_color)
if cell_xfs_node.get('applyBorder') == '1':
new_style.borders = deepcopy(border_list[int(cell_xfs_node.get('borderId'))])
new_style.borders.left = deepcopy(border_list[int(cell_xfs_node.get('borderId'))].left)
new_style.borders.left.color = deepcopy(border_list[int(cell_xfs_node.get('borderId'))].left.color)
new_style.borders.right = deepcopy(border_list[int(cell_xfs_node.get('borderId'))].right)
new_style.borders.right.color = deepcopy(border_list[int(cell_xfs_node.get('borderId'))].right.color)
new_style.borders.top = deepcopy(border_list[int(cell_xfs_node.get('borderId'))].top)
new_style.borders.top.color = deepcopy(border_list[int(cell_xfs_node.get('borderId'))].top.color)
new_style.borders.bottom = deepcopy(border_list[int(cell_xfs_node.get('borderId'))].bottom)
new_style.borders.bottom.color = deepcopy(border_list[int(cell_xfs_node.get('borderId'))].bottom.color)
new_style.borders.diagonal = deepcopy(border_list[int(cell_xfs_node.get('borderId'))].diagonal)
new_style.borders.diagonal.color = deepcopy(border_list[int(cell_xfs_node.get('borderId'))].diagonal.color)
if cell_xfs_node.get('applyProtection') == '1':
protection = cell_xfs_node.find('{%s}protection' % SHEET_MAIN_NS)
# Ignore if there are no protection sub-nodes
if protection is not None:
if protection.get('locked') is not None:
if protection.get('locked') == '1':
new_style.protection.locked = Protection.PROTECTION_PROTECTED
else:
new_style.protection.locked = Protection.PROTECTION_UNPROTECTED
if protection.get('hidden') is not None:
if protection.get('hidden') == '1':
new_style.protection.hidden = Protection.PROTECTION_PROTECTED
else:
new_style.protection.hidden = Protection.PROTECTION_UNPROTECTED
style_prop['table'][index] = new_style
return style_prop
def parse_custom_num_formats(root):
"""Read in custom numeric formatting rules from the shared style table"""
custom_formats = {}
num_fmts = root.find('{%s}numFmts' % SHEET_MAIN_NS)
if num_fmts is not None:
num_fmt_nodes = num_fmts.findall('{%s}numFmt' % SHEET_MAIN_NS)
for num_fmt_node in num_fmt_nodes:
custom_formats[int(num_fmt_node.get('numFmtId'))] = \
num_fmt_node.get('formatCode').lower()
return custom_formats
def parse_color_index(root):
"""Read in the list of indexed colors"""
color_index = []
colors = root.find('{%s}colors' % SHEET_MAIN_NS)
if colors is not None:
indexedColors = colors.find('{%s}indexedColors' % SHEET_MAIN_NS)
if indexedColors is not None:
color_nodes = indexedColors.findall('{%s}rgbColor' % SHEET_MAIN_NS)
for color_node in color_nodes:
color_index.append(color_node.get('rgb'))
if not color_index:
# Default Color Index as per http://dmcritchie.mvps.org/excel/colors.htm
color_index = ['FF000000', 'FFFFFFFF', 'FFFF0000', 'FF00FF00', 'FF0000FF', 'FFFFFF00', 'FFFF00FF', 'FF00FFFF',
'FF800000', 'FF008000', 'FF000080', 'FF808000', 'FF800080', 'FF008080', 'FFC0C0C0', 'FF808080',
'FF9999FF', 'FF993366', 'FFFFFFCC', 'FFCCFFFF', 'FF660066', 'FFFF8080', 'FF0066CC', 'FFCCCCFF',
'FF000080', 'FFFF00FF', 'FFFFFF00', 'FF00FFFF', 'FF800080', 'FF800000', 'FF008080', 'FF0000FF',
'FF00CCFF', 'FFCCFFFF', 'FFCCFFCC', 'FFFFFF99', 'FF99CCFF', 'FFFF99CC', 'FFCC99FF', 'FFFFCC99',
'FF3366FF', 'FF33CCCC', 'FF99CC00', 'FFFFCC00', 'FFFF9900', 'FFFF6600', 'FF666699', 'FF969696',
'FF003366', 'FF339966', 'FF003300', 'FF333300', 'FF993300', 'FF993366', 'FF333399', 'FF333333']
return color_index
def parse_dxfs(root, color_index):
"""Read in the dxfs effects - used by conditional formatting."""
dxf_list = []
dxfs = root.find('{%s}dxfs' % SHEET_MAIN_NS)
if dxfs is not None:
nodes = dxfs.findall('{%s}dxf' % SHEET_MAIN_NS)
for dxf in nodes:
dxf_item = {}
font_node = dxf.find('{%s}font' % SHEET_MAIN_NS)
if font_node is not None:
dxf_item['font'] = {}
dxf_item['font']['bold'] = True if len(font_node.findall('{%s}b' % SHEET_MAIN_NS)) else False
dxf_item['font']['italic'] = True if len(font_node.findall('{%s}i' % SHEET_MAIN_NS)) else False
if len(font_node.findall('{%s}u' % SHEET_MAIN_NS)):
underline = font_node.find('{%s}u' % SHEET_MAIN_NS).get('val')
dxf_item['font']['underline'] = underline if underline else 'single'
color = font_node.find('{%s}color' % SHEET_MAIN_NS)
if color is not None:
dxf_item['font']['color'] = Color(Color.BLACK)
if color.get('indexed') is not None and 0 <= int(color.get('indexed')) < len(color_index):
dxf_item['font']['color'].index = color_index[int(color.get('indexed'))]
elif color.get('theme') is not None:
if color.get('tint') is not None:
dxf_item['font']['color'] .index = 'theme:%s:%s' % (color.get('theme'), color.get('tint'))
else:
dxf_item['font']['color'] .index = 'theme:%s:' % color.get('theme') # prefix color with theme
elif color.get('rgb'):
dxf_item['font']['color'] .index = color.get('rgb')
fill_node = dxf.find('{%s}fill' % SHEET_MAIN_NS)
if fill_node is not None:
dxf_item['fill'] = parse_fills(dxf, color_index, True)
dxf_item['border'] = parse_borders(dxf, color_index, True)
dxf_list.append(dxf_item)
return dxf_list
def parse_fonts(root, color_index):
"""Read in the fonts"""
font_list = []
fonts = root.find('{%s}fonts' % SHEET_MAIN_NS)
if fonts is not None:
font_nodes = fonts.findall('{%s}font' % SHEET_MAIN_NS)
for font_node in font_nodes:
font = Font()
font.size = font_node.find('{%s}sz' % SHEET_MAIN_NS).get('val')
font.name = font_node.find('{%s}name' % SHEET_MAIN_NS).get('val')
font.bold = True if len(font_node.findall('{%s}b' % SHEET_MAIN_NS)) else False
font.italic = True if len(font_node.findall('{%s}i' % SHEET_MAIN_NS)) else False
if len(font_node.findall('{%s}u' % SHEET_MAIN_NS)):
underline = font_node.find('{%s}u' % SHEET_MAIN_NS).get('val')
font.underline = underline if underline else 'single'
color = font_node.find('{%s}color' % SHEET_MAIN_NS)
if color is not None:
if color.get('indexed') is not None and 0 <= int(color.get('indexed')) < len(color_index):
font.color.index = color_index[int(color.get('indexed'))]
elif color.get('theme') is not None:
if color.get('tint') is not None:
font.color.index = 'theme:%s:%s' % (color.get('theme'), color.get('tint'))
else:
font.color.index = 'theme:%s:' % color.get('theme') # prefix color with theme
elif color.get('rgb'):
font.color.index = color.get('rgb')
font_list.append(font)
return font_list
def parse_fills(root, color_index, skip_find=False):
"""Read in the list of fills"""
fill_list = []
if skip_find:
fills = root
else:
fills = root.find('{%s}fills' % SHEET_MAIN_NS)
count = 0
if fills is not None:
fillNodes = fills.findall('{%s}fill' % SHEET_MAIN_NS)
for fill in fillNodes:
# Rotation is unset
patternFill = fill.find('{%s}patternFill' % SHEET_MAIN_NS)
if patternFill is not None:
newFill = Fill()
newFill.fill_type = patternFill.get('patternType')
fgColor = patternFill.find('{%s}fgColor' % SHEET_MAIN_NS)
if fgColor is not None:
if fgColor.get('indexed') is not None and 0 <= int(fgColor.get('indexed')) < len(color_index):
newFill.start_color.index = color_index[int(fgColor.get('indexed'))]
elif fgColor.get('indexed') is not None:
# Invalid color - out of range of color_index, set to white
newFill.start_color.index = 'FFFFFFFF'
elif fgColor.get('theme') is not None:
if fgColor.get('tint') is not None:
newFill.start_color.index = 'theme:%s:%s' % (fgColor.get('theme'), fgColor.get('tint'))
else:
newFill.start_color.index = 'theme:%s:' % fgColor.get('theme') # prefix color with theme
else:
newFill.start_color.index = fgColor.get('rgb')
bgColor = patternFill.find('{%s}bgColor' % SHEET_MAIN_NS)
if bgColor is not None:
if bgColor.get('indexed') is not None and 0 <= int(bgColor.get('indexed')) < len(color_index):
newFill.end_color.index = color_index[int(bgColor.get('indexed'))]
elif bgColor.get('indexed') is not None:
# Invalid color - out of range of color_index, set to white
newFill.end_color.index = 'FFFFFFFF'
elif bgColor.get('theme') is not None:
if bgColor.get('tint') is not None:
newFill.end_color.index = 'theme:%s:%s' % (bgColor.get('theme'), bgColor.get('tint'))
else:
newFill.end_color.index = 'theme:%s:' % bgColor.get('theme') # prefix color with theme
elif bgColor.get('rgb'):
newFill.end_color.index = bgColor.get('rgb')
count += 1
fill_list.append(newFill)
return fill_list
def parse_borders(root, color_index, skip_find=False):
"""Read in the boarders"""
border_list = []
if skip_find:
borders = root
else:
borders = root.find('{%s}borders' % SHEET_MAIN_NS)
if borders is not None:
boarderNodes = borders.findall('{%s}border' % SHEET_MAIN_NS)
count = 0
for boarder in boarderNodes:
newBorder = Borders()
if boarder.get('diagonalup') == 1:
newBorder.diagonal_direction = newBorder.DIAGONAL_UP
if boarder.get('diagonalDown') == 1:
if newBorder.diagonal_direction == newBorder.DIAGONAL_UP:
newBorder.diagonal_direction = newBorder.DIAGONAL_BOTH
else:
newBorder.diagonal_direction = newBorder.DIAGONAL_DOWN
for side in ('left', 'right', 'top', 'bottom', 'diagonal'):
node = boarder.find('{%s}%s' % (SHEET_MAIN_NS, side))
if node is not None:
borderSide = getattr(newBorder,side)
if node.get('style') is not None:
borderSide.border_style = node.get('style')
color = node.find('{%s}color' % SHEET_MAIN_NS)
if color is not None:
# Ignore 'auto'
if color.get('indexed') is not None and 0 <= int(color.get('indexed')) < len(color_index):
borderSide.color.index = color_index[int(color.get('indexed'))]
elif color.get('theme') is not None:
if color.get('tint') is not None:
borderSide.color.index = 'theme:%s:%s' % (color.get('theme'), color.get('tint'))
else:
borderSide.color.index = 'theme:%s:' % color.get('theme') # prefix color with theme
elif color.get('rgb'):
borderSide.color.index = color.get('rgb')
count += 1
border_list.append(newBorder)
return border_list
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
libG(oogle)Reader
Copyright (C) 2010 Matt Behrens <askedrelic@gmail.com> http://asktherelic.com
Python library for working with the unofficial Google Reader API.
Unit tests for oauth and ClientAuthMethod in libgreader.
"""
try:
import unittest2 as unittest
except:
import unittest
from libgreader import GoogleReader, OAuthMethod, OAuth2Method, ClientAuthMethod, Feed
import requests
import re
from .config import *
class TestClientAuthMethod(unittest.TestCase):
def test_ClientAuthMethod_login(self):
ca = ClientAuthMethod(username,password)
self.assertNotEqual(ca, None)
def test_reader(self):
ca = ClientAuthMethod(username,password)
reader = GoogleReader(ca)
self.assertNotEqual(reader, None)
def test_bad_user_details(self):
self.assertRaises(IOError, ClientAuthMethod, 'asdsa', '')
def test_reader_user_info(self):
ca = ClientAuthMethod(username,password)
reader = GoogleReader(ca)
info = reader.getUserInfo()
self.assertEqual(dict, type(info))
self.assertEqual(firstname, info['userName'])
#automated approval of oauth url
#returns mechanize Response of the last "You have accepted" page
def automated_oauth_approval(url):
#general process is:
# 1. assume user isn't logged in, so get redirected to google accounts
# login page. login using test account credentials
# 2. redirected back to oauth approval page. br.submit() should choose the
# first submit on that page, which is the "Accept" button
br = mechanize.Browser()
br.open(url)
br.select_form(nr=0)
br["Email"] = username
br["Passwd"] = password
response1 = br.submit()
br.select_form(nr=0)
req2 = br.click(type="submit", nr=0)
response2 = br.open(req2)
return response2
@unittest.skip('deprecated')
class TestOAuth(unittest.TestCase):
def test_oauth_login(self):
auth = OAuthMethod(oauth_key, oauth_secret)
self.assertNotEqual(auth, None)
def test_getting_request_token(self):
auth = OAuthMethod(oauth_key, oauth_secret)
token, token_secret = auth.setAndGetRequestToken()
url = auth.buildAuthUrl()
response = automated_oauth_approval(url)
self.assertNotEqual(-1,response.get_data().find('You have successfully granted'))
def test_full_auth_process_without_callback(self):
auth = OAuthMethod(oauth_key, oauth_secret)
auth.setRequestToken()
auth_url = auth.buildAuthUrl()
response = automated_oauth_approval(auth_url)
auth.setAccessToken()
reader = GoogleReader(auth)
info = reader.getUserInfo()
self.assertEqual(dict, type(info))
self.assertEqual(firstname, info['userName'])
def test_full_auth_process_with_callback(self):
auth = OAuthMethod(oauth_key, oauth_secret)
#must be a working callback url for testing
auth.setCallback("http://www.asktherelic.com")
token, token_secret = auth.setAndGetRequestToken()
auth_url = auth.buildAuthUrl()
#callback section
#get response, which is a redirect to the callback url
response = automated_oauth_approval(auth_url)
query_string = urlparse.urlparse(response.geturl()).query
#grab the verifier token from the callback url query string
token_verifier = urlparse.parse_qs(query_string)['oauth_verifier'][0]
auth.setAccessTokenFromCallback(token, token_secret, token_verifier)
reader = GoogleReader(auth)
info = reader.getUserInfo()
self.assertEqual(dict, type(info))
self.assertEqual(firstname, info['userName'])
#automate getting the approval token
def mechanize_oauth2_approval(url):
"""
general process is:
1. assume user isn't logged in, so get redirected to google accounts
login page. login using account credentials
But, if the user has already granted access, the user is auto redirected without
having to confirm again.
2. redirected back to oauth approval page. br.submit() should choose the
first submit on that page, which is the "Accept" button
3. mechanize follows the redirect, and should throw 40X exception and
we return the token
"""
br = mechanize.Browser()
br.open(url)
br.select_form(nr=0)
br["Email"] = username
br["Passwd"] = password
try:
response1 = br.submit()
br.select_form(nr=0)
response2 = br.submit()
except Exception as e:
#watch for 40X exception on trying to load redirect page
pass
callback_url = br.geturl()
# split off the token in hackish fashion
return callback_url.split('code=')[1]
def automated_oauth2_approval(url):
"""
general process is:
1. assume user isn't logged in, so get redirected to google accounts
login page. login using account credentials
2. get redirected to oauth approval screen
3. authorize oauth app
"""
auth_url = url
headers = {'Referer': auth_url}
s = requests.Session()
r1 = s.get(auth_url)
post_data = dict((x[0],x[1]) for x in re.findall('name="(.*?)".*?value="(.*?)"', str(r1.content), re.MULTILINE))
post_data['Email'] = username
post_data['Passwd'] = password
post_data['timeStmp'] = ''
post_data['secTok'] = ''
post_data['signIn'] = 'Sign in'
post_data['GALX'] = s.cookies['GALX']
r2 = s.post('https://accounts.google.com/ServiceLoginAuth', data=post_data, headers=headers, allow_redirects=False)
#requests is fucking up the url encoding and double encoding ampersands
scope_url = r2.headers['location'].replace('amp%3B','')
# now get auth screen
r3 = s.get(scope_url)
# unless we have already authed!
if 'asktherelic' in r3.url:
code = r3.url.split('=')[1]
return code
post_data = dict((x[0],x[1]) for x in re.findall('name="(.*?)".*?value="(.*?)"', str(r3.content)))
post_data['submit_access'] = 'true'
post_data['_utf8'] = '☃'
# again, fucked encoding for amp;
action_url = re.findall('action="(.*?)"', str(r3.content))[0].replace('amp;','')
r4 = s.post(action_url, data=post_data, headers=headers, allow_redirects=False)
code = r4.headers['Location'].split('=')[1]
s.close()
return code
@unittest.skipIf("client_id" not in globals(), 'OAuth2 config not setup')
class TestOAuth2(unittest.TestCase):
def test_full_auth_and_access_userdata(self):
auth = OAuth2Method(client_id, client_secret)
auth.setRedirectUri(redirect_url)
url = auth.buildAuthUrl()
token = automated_oauth2_approval(url)
auth.code = token
auth.setAccessToken()
reader = GoogleReader(auth)
info = reader.getUserInfo()
self.assertEqual(dict, type(info))
self.assertEqual(firstname, info['userName'])
def test_oauth_subscribe(self):
auth = OAuth2Method(client_id, client_secret)
auth.setRedirectUri(redirect_url)
url = auth.buildAuthUrl()
token = automated_oauth2_approval(url)
auth.code = token
auth.setAccessToken()
auth.setActionToken()
reader = GoogleReader(auth)
slashdot = 'feed/http://rss.slashdot.org/Slashdot/slashdot'
#unsubscribe always return true; revert feedlist state
self.assertTrue(reader.unsubscribe(slashdot))
# now subscribe
self.assertTrue(reader.subscribe(slashdot))
# wait for server to update
import time
time.sleep(1)
reader.buildSubscriptionList()
# test subscribe successful
self.assertIn(slashdot, [x.id for x in reader.getSubscriptionList()])
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
"""
.. py:currentmodule:: FileFormat.test_SimulationInputs
.. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca>
Test the module `SimulationInputs`.
"""
# Script information for the file.
__author__ = "Hendrix Demers (hendrix.demers@mail.mcgill.ca)"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2012 Hendrix Demers"
__license__ = ""
# Subversion informations for the file.
__svnRevision__ = "$Revision$"
__svnDate__ = "$Date$"
__svnId__ = "$Id$"
# Standard library modules.
import unittest
import logging
import os.path
import copy
# Third party modules.
from nose.plugins.skip import SkipTest
# Local modules.
# Project modules
import pymcxray.FileFormat.SimulationInputs as SimulationInputs
import pymcxray.FileFormat.testUtilities as testUtilities
import pymcxray.FileFormat.Version as Version
# Globals and constants variables.
class TestSimulationInputs(unittest.TestCase):
"""
TestCase class for the module `moduleName`.
"""
def setUp(self):
"""
Setup method.
"""
unittest.TestCase.setUp(self)
self.testDataPath = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../test_data"))
self.tempDataPath = testUtilities.createTempDataPath(self.testDataPath)
def tearDown(self):
"""
Teardown method.
"""
unittest.TestCase.tearDown(self)
testUtilities.removeTempDataPath(self.tempDataPath)
def testSkeleton(self):
"""
First test to check if the testcase is working with the testing framework.
"""
#self.fail("Test if the testcase is working.")
self.assert_(True)
def test_read(self):
"""
Tests for method `read`.
"""
simulationInputs = SimulationInputs.SimulationInputs()
for title in testUtilities.getSimulationTitles():
filepath = os.path.abspath(os.path.join(self.testDataPath, "%s/%s.sim" % (title, title)))
simulationInputs.read(filepath)
self.assertEquals(title, simulationInputs.title)
self.assertEquals("%s.sam" % (title), simulationInputs.specimenFilename)
self.assertEquals("%s.mdl" % (title), simulationInputs.modelFilename)
self.assertEquals("%s.mic" % (title), simulationInputs.microsopeFilename)
self.assertEquals("%s.par" % (title), simulationInputs.simulationParametersFilename)
#self.fail("Test if the testcase is working.")
def test_read_1_1_1(self):
"""
Tests for method `read`.
"""
simulationInputs = SimulationInputs.SimulationInputs()
title = "AlMgBulk5keV_version_1_1_1"
filepath = os.path.abspath(os.path.join(self.testDataPath, "inputs", "%s.sim" % (title)))
simulationInputs.read(filepath)
self.assertEquals(title, simulationInputs.title)
self.assertEquals(Version.VERSION_1_1_1.major, simulationInputs.version.major)
self.assertEquals(Version.VERSION_1_1_1.minor, simulationInputs.version.minor)
self.assertEquals(Version.VERSION_1_1_1.revision, simulationInputs.version.revision)
self.assertEquals(Version.VERSION_1_1_1, simulationInputs.version)
self.assertEquals("%s.sam" % (title), simulationInputs.specimenFilename)
self.assertEquals("%s.mdl" % (title), simulationInputs.modelFilename)
self.assertEquals("%s.mic" % (title), simulationInputs.microsopeFilename)
self.assertEquals("%s.par" % (title), simulationInputs.simulationParametersFilename)
#self.fail("Test if the testcase is working.")
def test_read_1_2_0(self):
"""
Tests for method `read`.
"""
simulationInputs = SimulationInputs.SimulationInputs()
title = "AlMgBulk5keV_version_1_2_0"
filepath = os.path.abspath(os.path.join(self.testDataPath, "inputs", "%s.sim" % (title)))
simulationInputs.read(filepath)
self.assertEquals(title, simulationInputs.title)
self.assertEquals(Version.VERSION_1_2_0.major, simulationInputs.version.major)
self.assertEquals(Version.VERSION_1_2_0.minor, simulationInputs.version.minor)
self.assertEquals(Version.VERSION_1_2_0.revision, simulationInputs.version.revision)
self.assertEquals(Version.VERSION_1_2_0, simulationInputs.version)
self.assertEquals("%s.sam" % (title), simulationInputs.specimenFilename)
self.assertEquals("%s.mdl" % (title), simulationInputs.modelFilename)
self.assertEquals("%s.mic" % (title), simulationInputs.microsopeFilename)
self.assertEquals("%s.par" % (title), simulationInputs.simulationParametersFilename)
#self.fail("Test if the testcase is working.")
def test_read_1_2_1(self):
"""
Tests for method `read`.
"""
simulationInputs = SimulationInputs.SimulationInputs()
title = "AlMgBulk5keV_version_1_2_1"
filepath = os.path.abspath(os.path.join(self.testDataPath, "inputs", "%s.sim" % (title)))
simulationInputs.read(filepath)
self.assertEquals(title, simulationInputs.title)
self.assertEquals(Version.VERSION_1_2_1.major, simulationInputs.version.major)
self.assertEquals(Version.VERSION_1_2_1.minor, simulationInputs.version.minor)
self.assertEquals(Version.VERSION_1_2_1.revision, simulationInputs.version.revision)
self.assertEquals(Version.VERSION_1_2_1, simulationInputs.version)
self.assertEquals("%s.sam" % (title), simulationInputs.specimenFilename)
self.assertEquals("%s.mdl" % (title), simulationInputs.modelFilename)
self.assertEquals("%s.mic" % (title), simulationInputs.microsopeFilename)
self.assertEquals("%s.par" % (title), simulationInputs.simulationParametersFilename)
self.assertEquals("%s.mpp" % (title), simulationInputs.mapFilename)
#self.fail("Test if the testcase is working.")
def test_read_1_4_1(self):
"""
Tests for method `read`.
"""
simulationInputs = SimulationInputs.SimulationInputs()
title = "AlMgBulk5keV_version_1_4_1"
filepath = os.path.abspath(os.path.join(self.testDataPath, "inputs", "%s.sim" % (title)))
simulationInputs.read(filepath)
self.assertEquals(title, simulationInputs.title)
self.assertEquals(Version.VERSION_1_4_1.major, simulationInputs.version.major)
self.assertEquals(Version.VERSION_1_4_1.minor, simulationInputs.version.minor)
self.assertEquals(Version.VERSION_1_4_1.revision, simulationInputs.version.revision)
self.assertEquals(Version.VERSION_1_4_1, simulationInputs.version)
self.assertEquals("%s.sam" % (title), simulationInputs.specimenFilename)
self.assertEquals("%s.mdl" % (title), simulationInputs.modelFilename)
self.assertEquals("%s.mic" % (title), simulationInputs.microsopeFilename)
self.assertEquals("%s.par" % (title), simulationInputs.simulationParametersFilename)
self.assertEquals("%s.mpp" % (title), simulationInputs.mapFilename)
self.assertEquals("%s.rp" % (title), simulationInputs.resultParametersFilename)
#self.fail("Test if the testcase is working.")
def test__createKeys(self):
"""
Tests for method `_createKeys`.
"""
numberKeys = 6
keys = SimulationInputs.SimulationInputs()._createKeys()
self.assertEquals(numberKeys, len(keys))
#self.fail("Test if the testcase is working.")
def test_write(self):
"""
Tests for method `write`.
"""
raise SkipTest
self.maxDiff = None
# todo: make test pass using testUtilities.getSimulationTitles().
for title in ["BioRitchieNew111017"]:
filepathReference = os.path.abspath(os.path.join(self.testDataPath, "%s/%s.sim" % (title, title)))
filepath = os.path.join(self.tempDataPath, "%s.sim" % (title))
simulationInputs = SimulationInputs.SimulationInputs()
simulationInputs.write(filepath)
self.assertEquals("%s.sam" % (title), simulationInputs.specimenFilename)
self.assertEquals("%s.mdl" % (title), simulationInputs.modelFilename)
self.assertEquals("%s.mic" % (title), simulationInputs.microsopeFilename)
self.assertEquals("%s.par" % (title), simulationInputs.simulationParametersFilename)
linesRef = open(filepathReference, 'r').readlines()
lines = open(filepath, 'r').readlines()
self.assertListEqual(linesRef, lines)
#self.fail("Test if the testcase is working.")
def test_write_1_1_1(self):
"""
Tests for method `write`.
"""
raise SkipTest
self.maxDiff = None
simulationInputsRef = SimulationInputs.SimulationInputs()
simulationInputsRef.version = copy.deepcopy(Version.VERSION_1_1_1)
title = "AlMgBulk5keV_version_1_1_1"
filepathReference = os.path.abspath(os.path.join(self.testDataPath, "inputs", "%s.sim" % (title)))
simulationInputsRef.read(filepathReference)
self.assertEquals(title, simulationInputsRef.title)
filepath = os.path.join(self.tempDataPath, "%s.sim" % (title))
simulationInputs = SimulationInputs.SimulationInputs()
simulationInputs.version = copy.deepcopy(Version.VERSION_1_1_1)
simulationInputs.write(filepath)
self.assertEquals("%s.sam" % (title), simulationInputs.specimenFilename)
self.assertEquals("%s.mdl" % (title), simulationInputs.modelFilename)
self.assertEquals("%s.mic" % (title), simulationInputs.microsopeFilename)
self.assertEquals("%s.par" % (title), simulationInputs.simulationParametersFilename)
self.assertEquals(simulationInputsRef.version.major, simulationInputs.version.major)
self.assertEquals(simulationInputsRef.version.minor, simulationInputs.version.minor)
self.assertEquals(simulationInputsRef.version.revision, simulationInputs.version.revision)
self.assertEquals(simulationInputsRef.version, simulationInputs.version)
linesRef = open(filepathReference, 'r').readlines()
lines = open(filepath, 'r').readlines()
self.assertListEqual(linesRef, lines)
self.fail("Test if the testcase is working.")
def test_write_1_2_0(self):
"""
Tests for method `write`.
"""
self.maxDiff = None
simulationInputsRef = SimulationInputs.SimulationInputs()
simulationInputsRef.version = copy.deepcopy(Version.VERSION_1_2_0)
title = "AlMgBulk5keV_version_1_2_0"
filepathReference = os.path.abspath(os.path.join(self.testDataPath, "inputs", "%s.sim" % (title)))
simulationInputsRef.read(filepathReference)
self.assertEquals(title, simulationInputsRef.title)
filepath = os.path.join(self.tempDataPath, "%s.sim" % (title))
simulationInputs = SimulationInputs.SimulationInputs()
simulationInputs.version = copy.deepcopy(Version.VERSION_1_2_0)
simulationInputs.write(filepath)
self.assertEquals("%s.sam" % (title), simulationInputs.specimenFilename)
self.assertEquals("%s.mdl" % (title), simulationInputs.modelFilename)
self.assertEquals("%s.mic" % (title), simulationInputs.microsopeFilename)
self.assertEquals("%s.par" % (title), simulationInputs.simulationParametersFilename)
self.assertEquals(simulationInputsRef.version.major, simulationInputs.version.major)
self.assertEquals(simulationInputsRef.version.minor, simulationInputs.version.minor)
self.assertEquals(simulationInputsRef.version.revision, simulationInputs.version.revision)
self.assertEquals(simulationInputsRef.version, simulationInputs.version)
linesRef = open(filepathReference, 'r').readlines()
lines = open(filepath, 'r').readlines()
self.assertListEqual(linesRef, lines)
#self.fail("Test if the testcase is working.")
def test_write_1_2_1(self):
"""
Tests for method `write`.
"""
self.maxDiff = None
simulationInputsRef = SimulationInputs.SimulationInputs()
simulationInputsRef.version = copy.deepcopy(Version.VERSION_1_2_1)
title = "AlMgBulk5keV_version_1_2_1"
filepathReference = os.path.abspath(os.path.join(self.testDataPath, "inputs", "%s.sim" % (title)))
simulationInputsRef.read(filepathReference)
self.assertEquals(title, simulationInputsRef.title)
filepath = os.path.join(self.tempDataPath, "%s.sim" % (title))
simulationInputs = SimulationInputs.SimulationInputs()
simulationInputs.version = copy.deepcopy(Version.VERSION_1_2_1)
simulationInputs.write(filepath)
self.assertEquals("%s.sam" % (title), simulationInputs.specimenFilename)
self.assertEquals("%s.mdl" % (title), simulationInputs.modelFilename)
self.assertEquals("%s.mic" % (title), simulationInputs.microsopeFilename)
self.assertEquals("%s.par" % (title), simulationInputs.simulationParametersFilename)
self.assertEquals(simulationInputsRef.version.major, simulationInputs.version.major)
self.assertEquals(simulationInputsRef.version.minor, simulationInputs.version.minor)
self.assertEquals(simulationInputsRef.version.revision, simulationInputs.version.revision)
self.assertEquals(simulationInputsRef.version, simulationInputs.version)
linesRef = open(filepathReference, 'r').readlines()
lines = open(filepath, 'r').readlines()
self.assertListEqual(linesRef, lines)
#self.fail("Test if the testcase is working.")
def test_write_1_4_1(self):
"""
Tests for method `write`.
"""
self.maxDiff = None
simulationInputsRef = SimulationInputs.SimulationInputs()
simulationInputsRef.version = copy.deepcopy(Version.VERSION_1_4_1)
title = "AlMgBulk5keV_version_1_4_1"
filepathReference = os.path.abspath(os.path.join(self.testDataPath, "inputs", "%s.sim" % (title)))
simulationInputsRef.read(filepathReference)
self.assertEquals(title, simulationInputsRef.title)
filepath = os.path.join(self.tempDataPath, "%s.sim" % (title))
simulationInputs = SimulationInputs.SimulationInputs()
simulationInputs.version = Version.Version(1, 4, 1)
simulationInputs.write(filepath)
self.assertEquals("%s.sam" % (title), simulationInputs.specimenFilename)
self.assertEquals("%s.mdl" % (title), simulationInputs.modelFilename)
self.assertEquals("%s.mic" % (title), simulationInputs.microsopeFilename)
self.assertEquals("%s.par" % (title), simulationInputs.simulationParametersFilename)
self.assertEquals("%s.mpp" % (title), simulationInputs.mapFilename)
self.assertEquals("%s.rp" % (title), simulationInputs.resultParametersFilename)
self.assertEquals(simulationInputsRef.version.major, simulationInputs.version.major)
self.assertEquals(simulationInputsRef.version.minor, simulationInputs.version.minor)
self.assertEquals(simulationInputsRef.version.revision, simulationInputs.version.revision)
self.assertEquals(simulationInputsRef.version, simulationInputs.version)
linesRef = open(filepathReference, 'r').readlines()
lines = open(filepath, 'r').readlines()
self.assertListEqual(linesRef, lines)
#self.fail("Test if the testcase is working.")
def test__extractTitleFromFilepath(self):
"""
Tests for method `_extractTitleFromFilepath`.
"""
simulationInputs = SimulationInputs.SimulationInputs()
for titleRef in testUtilities.getSimulationTitles():
filepath = os.path.abspath(os.path.join(self.testDataPath, "%s/%s.sim" % (titleRef, titleRef)))
title = simulationInputs._extractTitleFromFilepath(filepath)
self.assertEquals(titleRef, title)
#self.fail("Test if the testcase is working.")
if __name__ == '__main__': #pragma: no cover
import nose
nose.runmodule()
|
|
import numpy as np
def sample_features_PoissonProp(F_prev,gamma0,data_struct,dist_struct,theta,obsModel,hyperparams,Kstar):
# function [F dist_struct theta config_log_likelihood num_prop num_accept] =
# sample_features_PoissonProp(F_prev,gamma0,data_struct,dist_struct,theta,obsModel,...
# hyperparams,Kstar)
num_accept = zeros(1,Kstar);
num_prop = zeros(1,Kstar);
obsModelType = obsModel.type;
priorType = obsModel.priorType;
[numObj Kz_prev] = size(F_prev);
Kz_max = Kz_prev + Kstar;
F = zeros(numObj,Kz_max);
F(:,1:Kz_prev) = F_prev;
F = (F > 0);
featureCounts = sum(F,1);
stored_log_likelihood = zeros(1,numObj);
Ks = size(dist_struct(1).pi_s,2);
log_likelihood_ii_kk = [0 0];
seq_order = randperm(numObj);
feature_inds = [1:Kz_max];
config_log_likelihood = 0;
for ii=seq_order
# Reset vector indicating the previous set of unique features to object i:
unique_features_ii = zeros(1,Kz_max);
unique_features_ii = (unique_features_ii > 0);
# Compute likelihood under all possible parameters (including ones we
# have not yet seen in the data):
log_likelihood = compute_likelihood_unnorm(data_struct(ii),theta,obsModelType,[1:Kz_max],Kz_max,Ks);
dimu = size(data_struct(ii).obs,1);
# Compute current likelihood of the current feature assignments:
if sum(F(ii,:)) == 0:
stored_log_likelihood(ii) = -inf;
else:
pi_init = dist_struct(ii).pi_init(F(ii,:));
pi_init = pi_init./sum(pi_init);
pi_z = dist_struct(ii).pi_z(F(ii,:),F(ii,:));
pi_z = pi_z./repmat(sum(pi_z,2),[1,size(pi_z,2)]);
pi_s = dist_struct(ii).pi_s(F(ii,:));
pi_s = pi_s./repmat(sum(pi_s,2),[1,size(pi_s,2)]);
# Pass messages forward to integrate over the mode/state sequence:
log_likelihood_ii = log_likelihood(F(ii,:),:,:);
log_normalizer_ii = max(max(log_likelihood_ii,[],1),[],2);
log_likelihood_ii = log_likelihood_ii - log_normalizer_ii(ones(sum(F(ii,:)),1),ones(Ks,1),:);
likelihood_ii = exp(log_likelihood_ii);
log_normalizer_ii = log_normalizer_ii - (dimu/2)*log(2*pi);
fwd_msg, neglog_c = forward_message_vec(likelihood_ii,log_normalizer_ii,data_struct(ii).blockEnd,pi_z,pi_s,pi_init);
if np.isnan(np.sum(neglog_c)):
stored_log_likelihood(ii) = -np.inf;
else:
stored_log_likelihood(ii) = sum(neglog_c);
# For each of the currently instantiated features (this vector will
# change after sampling each object ii):
for kk=feature_inds((featureCounts>0))
# Store previous feature value:
Fik_prev = F(ii,kk);
# Remove object i's count from the kth feature count:
featureCounts(kk) = featureCounts(kk)-F(ii,kk);
# If other objects are using this feature:
if featureCounts(kk)>0:
# Grab out previous likelihood of data under this feature assignment:
log_likelihood_ii_kk(Fik_prev+1) = stored_log_likelihood(ii);
# Try out other value for f_{ik}:
F(ii,kk) = ~Fik_prev;
if sum(F(ii,:)) == 0:
log_likelihood_ii_kk(~Fik_prev+1) = -inf;
else:
pi_init = dist_struct(ii).pi_init(F(ii,:));
pi_init = pi_init./sum(pi_init);
pi_z = dist_struct(ii).pi_z(F(ii,:),F(ii,:));
pi_z = pi_z./repmat(sum(pi_z,2),[1,size(pi_z,2)]);
pi_s = dist_struct(ii).pi_s(F(ii,:));
pi_s = pi_s./repmat(sum(pi_s,2),[1,size(pi_s,2)]);
# Pass messages forward to integrate over the mode/state sequence:
log_likelihood_ii = log_likelihood(F(ii,:),:,:);
log_normalizer_ii = max(max(log_likelihood_ii,[],1),[],2);
log_likelihood_ii = log_likelihood_ii - log_normalizer_ii(ones(sum(F(ii,:)),1),ones(Ks,1),:);
likelihood_ii = exp(log_likelihood_ii);
log_normalizer_ii = log_normalizer_ii - (dimu/2)*log(2*pi);
[fwd_msg neglog_c] = forward_message_vec(likelihood_ii,log_normalizer_ii,data_struct(ii).blockEnd,pi_z,pi_s,pi_init);
if isnan(sum(neglog_c)):
log_likelihood_ii_kk(~Fik_prev+1) = -inf;
else:
log_likelihood_ii_kk(~Fik_prev+1) = sum(neglog_c);
# Compute accept-reject ratio:
log_rho_star = log(numObj - featureCounts(kk)) + log_likelihood_ii_kk(1)-log(featureCounts(kk)) - log_likelihood_ii_kk(2);
rho = exp(sign(Fik_prev-0.5)*log_rho_star);
# Sample new feature value:
if isnan(rho):
F(ii,kk)=0;
else:
if rho>1
F(ii,kk) = ~Fik_prev;
else
sample_set = [Fik_prev ~Fik_prev];
ind = 1+(rand(1)>(1-rho));
F(ii,kk) = sample_set(ind);
# Store likelihood of current assignment:
stored_log_likelihood(ii) = log_likelihood_ii_kk(F(ii,kk)+1);
# Add new assignment of f_{ik} to feature count of kth feature:
featureCounts(kk) = featureCounts(kk)+F(ii,kk);
else
# If kth feature is specific to object i, place it in the
# indicator vector:
unique_features_ii(kk) = 1;
end
end
# deal with unique features
unique_feature_inds = feature_inds(unique_features_ii);
# Sample from Poisson proposal;
num_new_unique_features = np.random.poisson(gamma0/numObj);
num_prop(num_new_unique_features+1) = num_prop(num_new_unique_features+1)+1;
f_ii = F(ii,:);
f_ii(unique_feature_inds) = 0;
f_ii(Kz_prev+1:Kz_prev+num_new_unique_features) = 1;
# Grab likelihood under the previous assignment:
log_likelihood_ii_kk(1) = stored_log_likelihood(ii);
# Compute likelihood under the proposed change:
if np.sum(f_ii) == 0:
log_likelihood_ii_kk[1] = -np.inf;
else:
if Kz_prev+num_new_unique_features>Kz_max
Kz_extra = Kstar + num_new_unique_features;
# Expand transition distributions:
for jj=1:numObj
dist_struct(jj).pi_init(Kz_max+1:Kz_max+Kz_extra) = randgamma(hyperparams.alpha0*ones(1,Kz_extra));
dist_struct(jj).pi_z(:,Kz_max+1:Kz_max+Kz_extra) = randgamma(hyperparams.alpha0*ones(Kz_max,Kz_extra));
dist_struct(jj).pi_z(Kz_max+1:Kz_max+Kz_extra,1:Kz_max) = randgamma(hyperparams.alpha0*ones(Kz_extra,Kz_max));
dist_struct(jj).pi_z(Kz_max+1:Kz_max+Kz_extra,Kz_max+1:Kz_max+Kz_extra) = randgamma(hyperparams.alpha0*ones(Kz_extra) \
+ hyperparams.kappa0*eye(Kz_extra));
dist_struct(jj).pi_s(Kz_max+1:Kz_max+Kz_extra,:) = 1;
# Expand theta:
theta = sample_theta_extra(theta,obsModel,Kz_extra);
Kz_max = Kz_max + Kz_extra;
# Expand F:
F_prev = F;
F = zeros(numObj,Kz_max);
F = F > 0;
F(:,1:Kz_max-Kz_extra) = F_prev;
f_ii_prev = f_ii;
f_ii = zeros(1,Kz_max);
f_ii = f_ii>0;
f_ii(1:length(f_ii_prev)) = f_ii_prev;
log_likelihood = compute_likelihood_unnorm(data_struct(ii),theta,obsModelType,[1:Kz_max],Kz_max,Ks);
print 'adding more parameters'
end
pi_init = dist_struct(ii).pi_init(f_ii);
pi_init = pi_init./sum(pi_init);
pi_z = dist_struct(ii).pi_z(f_ii,f_ii);
pi_z = pi_z./repmat(sum(pi_z,2),[1,size(pi_z,2)]);
pi_s = dist_struct(ii).pi_s(f_ii);
pi_s = pi_s./repmat(sum(pi_s,2),[1,size(pi_s,2)]);
# Pass messages forward to integrate over the mode/state sequence:
log_likelihood_ii = log_likelihood(f_ii,:,:);
log_normalizer_ii = max(max(log_likelihood_ii,[],1),[],2);
log_likelihood_ii = log_likelihood_ii - log_normalizer_ii(ones(sum(f_ii),1),ones(Ks,1),:);
likelihood_ii = exp(log_likelihood_ii);
log_normalizer_ii = log_normalizer_ii - (dimu/2)*log(2*pi);
fwd_msg, neglog_c = forward_message_vec(likelihood_ii,log_normalizer_ii,data_struct(ii).blockEnd,pi_z,pi_s,pi_init);
if np.isnan(sum(neglog_c)):
log_likelihood_ii_kk[1] = -np.inf;
else:
log_likelihood_ii_kk[1] = np.sum(neglog_c);
# Compute accept-reject ratio:
log_rho_star = (log_likelihood_ii_kk[1] - log_likelihood_ii_kk[0]);
rho = np.exp(log_rho_star);
# Sample new feature value:
if np.isnan(rho):
raise ValueError('sample_features_PoissonProp: NaN rho')
else:
if rho>1
F(ii,:) = f_ii;
ind = 1;
else
ind = (rand(1)>(1-rho));
F(ii,:) = (1-ind)*F(ii,:) + (ind-0)*f_ii;
num_accept(num_new_unique_features+1) = num_accept(num_new_unique_features+1)+ind;
stored_log_likelihood(ii) = log_likelihood_ii_kk(ind+1);
config_log_likelihood = config_log_likelihood + stored_log_likelihood(ii);
featureCounts = sum(F,1);
used_features = find(featureCounts>0);
Kz_prev = used_features(end);
end
F, dist_struct, theta = reallocate_states(F,dist_struct,theta,priorType);
return F, dist_struct, theta, config_log_likelihood, num_prop, num_accept
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import shutil
import tempfile
import os
from stalker import (db, Version, Task, Project, Structure, StatusList,
Repository, Status, FilenameTemplate)
import unittest
from anima.env.external import ExternalEnv, ExternalEnvFactory
class ExternalEnvTestCase(unittest.TestCase):
"""tests ExternalEnv class
"""
def setUp(self):
"""set up the test
"""
db.setup()
db.init()
self.temp_path = tempfile.mkdtemp()
self.repo = Repository(
name='Test Repository',
linux_path=self.temp_path,
windows_path=self.temp_path,
osx_path=self.temp_path
)
self.status_new = Status.query.filter_by(code='NEW').first()
self.status_wip = Status.query.filter_by(code='WIP').first()
self.status_cmpl = Status.query.filter_by(code='CMPL').first()
self.project_status_list = StatusList(
target_entity_type='Project',
name='Project Statuses',
statuses=[self.status_new, self.status_wip, self.status_cmpl]
)
self.task_filename_template = FilenameTemplate(
name='Task Filename Template',
target_entity_type='Task',
path='{{project.code}}/{%- for parent_task in parent_tasks -%}'
'{{parent_task.nice_name}}/{%- endfor -%}',
filename='{{version.nice_name}}'
'_v{{"%03d"|format(version.version_number)}}{{extension}}'
)
self.project_structure = Structure(
name='Project Structure',
templates=[self.task_filename_template]
)
self.project = Project(
name='Test Project',
code='TP',
status_list=self.project_status_list,
repository=self.repo,
structure=self.project_structure
)
self.task = Task(
name='Test Task',
project=self.project
)
self.version = Version(
task=self.task
)
self.kwargs = {
'name': 'Photoshop',
'extensions': ['psd'],
'structure': ['Outputs']
}
self.external_env = ExternalEnv(**self.kwargs)
def tearDown(self):
"""clean up the test
"""
shutil.rmtree(self.temp_path)
def test_name_argument_cannot_be_skipped(self):
"""testing if a TypeError will raised when the name argument is skipped
"""
self.kwargs.pop('name')
self.assertRaises(TypeError, ExternalEnv, **self.kwargs)
def test_name_argument_cannot_be_None(self):
"""testing if a TypeError will be raised when the name argument is None
"""
self.kwargs['name'] = None
self.assertRaises(TypeError, ExternalEnv, **self.kwargs)
def test_name_attribute_cannot_be_set_to_None(self):
"""testing if a TypeError will be raised when the name attribute is set
to None
"""
self.assertRaises(TypeError, setattr, self.external_env, 'name', None)
def test_name_argument_should_be_a_string(self):
"""testing if a TypeError will be raised when the name argument is not
a string
"""
self.kwargs['name'] = 32
self.assertRaises(TypeError, **self.kwargs)
def test_name_attribute_should_be_set_to_a_string(self):
"""testing if a TypeError will be raised when the name attribute is set
to a value other than a string
"""
self.assertRaises(TypeError, setattr, self.external_env, 'name', 23)
def test_name_argument_is_working_properly(self):
"""testing if the name argument value is correctly passed to the name
attribute
"""
test_value = 'ZBrush'
self.kwargs['name'] = test_value
external_env = ExternalEnv(**self.kwargs)
self.assertEqual(test_value, external_env.name)
def test_name_attribute_is_working_properly(self):
"""testing if the name attribute value is correctly set
"""
test_value = 'ZBrush'
self.external_env.name = test_value
self.assertEqual(test_value, self.external_env.name)
def test_extension_argument_cannot_be_skipped(self):
"""testing if a TypeError will raised when the extension argument is
skipped
"""
self.kwargs.pop('extensions')
self.assertRaises(TypeError, ExternalEnv, **self.kwargs)
def test_extension_argument_cannot_be_None(self):
"""testing if a TypeError will be raised when the extension argument is
None
"""
self.kwargs['extensions'] = None
self.assertRaises(TypeError, ExternalEnv, **self.kwargs)
def test_extension_attribute_cannot_be_set_to_None(self):
"""testing if a TypeError will be raised when the extension attribute
is set to None
"""
self.assertRaises(TypeError, setattr, self.external_env, 'extensions', None)
def test_extension_argument_should_be_a_string(self):
"""testing if a TypeError will be raised when the extension argument is
not a string
"""
self.kwargs['extension'] = 32
self.assertRaises(TypeError, **self.kwargs)
def test_extension_attribute_should_be_set_to_a_string(self):
"""testing if a TypeError will be raised when the extension attribute
is set to a value other than a string
"""
self.assertRaises(TypeError, setattr, self.external_env, 'extensions', 23)
def test_extension_argument_with_no_dots_is_working(self):
"""testing if extension argument accepts strings without a dot at the
beginning
"""
self.kwargs['extensions'] = ['psd']
external_env = ExternalEnv(**self.kwargs)
self.assertEqual(['.psd'], external_env.extensions)
def test_extension_attribute_with_no_dots_is_working(self):
"""testing if extension attribute accepts strings without a dot at the
beginning
"""
self.external_env.extensions = ['psd']
self.assertEqual(['.psd'], self.external_env.extensions)
def test_extension_argument_is_working_properly(self):
"""testing if the extension argument value is correctly passed to the
extension attribute
"""
test_value = ['.ztl']
self.kwargs['extensions'] = test_value
external_env = ExternalEnv(**self.kwargs)
self.assertEqual(test_value, external_env.extensions)
def test_extension_attribute_is_working_properly(self):
"""testing if the extension attribute value is correctly set
"""
test_value = ['.ztl']
self.external_env.extensions = test_value
self.assertEqual(test_value, self.external_env.extensions)
def test_structure_argument_can_be_skipped(self):
"""testing if the structure argument can be skipped
"""
self.kwargs.pop('structure')
ExternalEnv(**self.kwargs)
def test_structure_attribute_value_when_structure_argument_is_skipped(self):
"""testing if the structure argument attribute will be an empty list
when the structure argument is skipped
"""
self.kwargs.pop('structure')
external_env = ExternalEnv(**self.kwargs)
self.assertEqual(external_env.structure, [])
def test_structure_argument_can_be_set_to_None(self):
"""testing if the structure argument can be set to None
"""
self.kwargs['structure'] = None
ExternalEnv(**self.kwargs)
def test_structure_attribute_value_when_structure_argument_is_None(self):
"""testing if the structure argument attribute will be an empty list
when the structure argument value is None
"""
self.kwargs['structure'] = None
external_env = ExternalEnv(**self.kwargs)
self.assertEqual(external_env.structure, [])
def test_structure_attribute_can_be_set_to_None(self):
"""testing if the structure attribute value will be an empty list when
the structure attribute is set to None
"""
self.external_env.structure = None
def test_structure_argument_is_not_a_list(self):
"""testing if a TypeError will be raised when the structure argument
is not None or a list
"""
self.kwargs['structure'] = 'this is not a list'
self.assertRaises(TypeError, ExternalEnv, **self.kwargs)
def test_structure_attribute_is_not_a_list(self):
"""testing if a TypeError will be raised when the structure attribute
is not a set to None or a list
"""
self.assertRaises(TypeError, self.external_env, 'structure',
'this is not a list')
def test_structure_argument_is_not_a_list_of_strings(self):
"""testing if a TypeError will be raised when not all the the elements
are strings in structure argument
"""
self.kwargs['structure'] = ['not', 1, 'list of', 'strings']
self.assertRaises(TypeError, ExternalEnv, **self.kwargs)
def test_structure_attribute_is_not_a_list_of_strings(self):
"""testing if a TypeError will be raised when not all the the elements
are strings in structure attribute value
"""
test_value = ['not', 1, 'list of', 'strings']
self.assertRaises(TypeError, setattr, self.external_env, 'structure',
test_value)
def test_structure_argument_is_working_properly(self):
"""testing if the structure argument value is correctly passed to the
structure attribute
"""
test_value = ['Outputs', 'Inputs', 'cache']
self.kwargs['structure'] = test_value
external_env = ExternalEnv(**self.kwargs)
self.assertEqual(sorted(test_value), sorted(external_env.structure))
def test_structure_attribute_is_working_properly(self):
"""testing if the structure attribute value can be correctly updated
"""
test_value = ['Outputs', 'Inputs', 'cache']
self.external_env.structure = test_value
self.assertEqual(sorted(test_value),
sorted(self.external_env.structure))
def test_conform_version_argument_accepts_Version_instances_only(self):
"""testing if a TypeError will be raised when the version argument in
conform method is not a Version instance
"""
self.assertRaises(TypeError, self.external_env.conform,
version='not a version instance')
def test_conform_method_will_set_the_version_extension(self):
"""testing if the conform method will set the version extension to the
environment extension correctly
"""
self.assertNotEqual(self.version.extension, '.ztl')
external_env = ExternalEnv(name='ZBrush', extensions=['.ztl'])
external_env.conform(self.version)
self.assertEqual(self.version.extension, '.ztl')
def test_conform_method_will_set_the_version_created_with(self):
"""testing if the conform method will set the version extension to the
environment name
"""
self.assertNotEqual(self.version.extension, '.ztl')
external_env = ExternalEnv(name='ZBrush', extensions=['.ztl'])
external_env.conform(self.version)
self.assertEqual(self.version.extension, '.ztl')
self.assertEqual(self.version.created_with, 'ZBrush')
def test_initialize_structure_version_argument_accepts_Version_instances_only(self):
"""testing if a TypeError will be raised when the version argument in
initialize_structure method is not a Version instance
"""
self.assertRaises(TypeError, self.external_env.initialize_structure,
version='not a version instance')
def test_initialize_structure_will_create_the_folders_of_the_environment(self):
"""testing if the initialize_structure method will create the folders
at the given Version instance path
"""
self.external_env.initialize_structure(self.version)
for folder in self.external_env.structure:
self.assertTrue(
os.path.exists(
os.path.join(self.version.absolute_path, folder)
)
)
def test_initialize_structure_will_handle_OSErrors(self):
"""testing if the initialize_structure method will handle OSErrors when
creating folders which are already there
"""
# call it multiple times
self.external_env.initialize_structure(self.version)
self.external_env.initialize_structure(self.version)
self.external_env.initialize_structure(self.version)
def test_save_as_will_conform_and_initialize_structure(self):
"""testing if the save_as method will conform the given version and
initialize the structure
"""
self.external_env.save_as(self.version)
self.assertEquals(self.external_env.extensions[0],
self.version.extension)
for folder in self.external_env.structure:
self.assertTrue(
os.path.exists(
os.path.join(self.version.absolute_path, folder)
)
)
def test_get_settings_file_path_returns_the_settings_path_correctly(self):
"""testing if the get_settings_path returns the settings path correctly
"""
self.assertEqual(
os.path.expanduser('~/.atrc/last_version'),
ExternalEnv.get_settings_file_path()
)
def test_append_to_recent_files_version_argument_is_not_a_Version_instance(self):
"""testing if a TypeError will be raised when the version argument in
append_to_recent_files() method is not a stalker.models.version.Version
instance
"""
self.assertRaises(TypeError, self.external_env.append_to_recent_files,
3121)
def test_append_to_recent_files_working_properly(self):
"""testing if the append_to_recent_files() method is working properly
"""
# set the id attribute of the test version to a random number
self.version.id = 234
self.external_env.append_to_recent_files(self.version)
# check the settings file
path = self.external_env.get_settings_file_path()
with open(path, 'r') as f:
vid = f.read()
self.assertEqual(vid, str(234))
def test_get_last_version_is_working_properly(self):
"""testing if hte get_last_version() method will return Version
instance properly
"""
# need a database for this test
from stalker import db
db.setup({'sqlalchemy.url': 'sqlite:///:memory:'})
db.DBSession.add(self.version)
db.DBSession.commit()
self.assertTrue(self.version.id is not None)
self.external_env.append_to_recent_files(self.version)
last_version = self.external_env.get_last_version()
self.assertEqual(last_version, self.version)
class ExternalEnvFactoryTestCase(unittest.TestCase):
"""tests ExternalEnvFactory class
"""
@classmethod
def setUpClass(cls):
"""set up once
"""
db.setup()
db.init()
def test_get_env_names_method_will_return_all_environment_names_properly(self):
"""testing if ExternalEnvFactory.get_env_names() method will
return all the environment names as a list of strings
"""
from anima.env.external import external_environments
expected_result = external_environments.keys()
ext_env_factory = ExternalEnvFactory()
result = ext_env_factory.get_env_names()
self.assertEqual(expected_result, result)
def test_get_env_names_method_will_return_complex_environment_names_properly(self):
"""testing if ExternalEnvFactory.get_env_names() method will
return all the environment names as a list of strings in desired format
when environment_name_format is set
"""
name_format = '%e - %n'
expected_result = [
'.ztl - ZBrush',
'.mud - MudBox',
#'.psd - Photoshop'
]
ext_env_factory = ExternalEnvFactory()
result = ext_env_factory.get_env_names(name_format=name_format)
self.assertEqual(sorted(expected_result), sorted(result))
def test_get_env_method_name_argument_is_not_a_string(self):
"""testing if a TypeError will be raised when the name argument is not
a string in ExternalEnvironmentFactory.get_env() method
"""
ext_env_factory = ExternalEnvFactory()
self.assertRaises(TypeError, ext_env_factory.get_env, 234)
def test_get_env_method_name_is_not_in_list(self):
"""testing if a ValueError will be raised when the name argument value
is not in the anima.env.external_environments list
"""
ext_env_factory = ExternalEnvFactory()
self.assertRaises(ValueError, ext_env_factory.get_env, 'Modo')
def test_get_env_method_will_return_desired_environment(self):
"""testing if ExternalEnvFactory.get_env() will return desired
ExternalEnvironment instance
"""
ext_env_factory = ExternalEnvFactory()
#photoshop = ext_env_factory.get_env('Photoshop')
#self.assertIsInstance(photoshop, ExternalEnv)
#self.assertEqual(photoshop.name, 'Photoshop')
#self.assertEqual(photoshop.extension, '.psd')
#self.assertEqual(photoshop.structure, ['Outputs'])
zbrush_tool = ext_env_factory.get_env('ZBrush')
self.assertTrue(isinstance(zbrush_tool, ExternalEnv))
self.assertEqual(zbrush_tool.name, 'ZBrush')
self.assertEqual(zbrush_tool.extensions, ['.ztl'])
self.assertEqual(zbrush_tool.structure, ['Outputs'])
mudbox = ext_env_factory.get_env('MudBox')
self.assertTrue(isinstance(mudbox, ExternalEnv))
self.assertEqual(mudbox.name, 'MudBox')
self.assertEqual(mudbox.extensions, ['.mud'])
self.assertEqual(mudbox.structure, ['Outputs'])
def test_get_env_method_will_return_desired_environment_even_with_complex_formats(self):
"""testing if ExternalEnvFactory.get_env() will return desired
ExternalEnvironment instance even with names like "MudBox (.mud)"
"""
ext_env_factory = ExternalEnvFactory()
#
#photoshop = ext_env_factory.get_env('Photoshop (.psd)',
# name_format='%n (%e)')
#self.assertIsInstance(photoshop, ExternalEnv)
#self.assertEqual(photoshop.name, 'Photoshop')
#self.assertEqual(photoshop.extension, '.psd')
#self.assertEqual(photoshop.structure, ['Outputs'])
zbrush = ext_env_factory.get_env('ZBrush (.ztl)',
name_format='%n (%e)')
self.assertTrue(isinstance(zbrush, ExternalEnv))
self.assertEqual(zbrush.name, 'ZBrush')
self.assertEqual(zbrush.extensions, ['.ztl'])
self.assertEqual(zbrush.structure, ['Outputs'])
mudbox = ext_env_factory.get_env('MudBox (.mud)',
name_format='%n (%e)')
self.assertTrue(isinstance(mudbox, ExternalEnv))
self.assertEqual(mudbox.name, 'MudBox')
self.assertEqual(mudbox.extensions, ['.mud'])
self.assertEqual(mudbox.structure, ['Outputs'])
def test_get_env_method_will_return_desired_environment_even_with_custom_formats(self):
"""testing if ExternalEnvFactory.get_env() will return desired
ExternalEnvironment instance even with names like "MudBox (.mud)"
"""
ext_env_factory = ExternalEnvFactory()
name_format = '(%e) - %n'
#photoshop = ext_env_factory.get_env('(.psd) - Photoshop',
# name_format=name_format)
#self.assertIsInstance(photoshop, ExternalEnv)
#self.assertEqual(photoshop.name, 'Photoshop')
#self.assertEqual(photoshop.extension, '.psd')
#self.assertEqual(photoshop.structure, ['Outputs'])
zbrush = ext_env_factory.get_env('(.ztl) ZBrush',
name_format=name_format)
self.assertTrue(isinstance(zbrush, ExternalEnv))
self.assertEqual(zbrush.name, 'ZBrush')
self.assertEqual(zbrush.extensions, ['.ztl'])
self.assertEqual(zbrush.structure, ['Outputs'])
mudbox = ext_env_factory.get_env('(.mud) MudBox',
name_format=name_format)
self.assertTrue(isinstance(mudbox, ExternalEnv))
self.assertEqual(mudbox.name, 'MudBox')
self.assertEqual(mudbox.extensions, ['.mud'])
self.assertEqual(mudbox.structure, ['Outputs'])
|
|
# Copyright (c) 2017 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
import os
import re
import math
import hashlib
import logging
import warnings
import errno
from time import sleep
from collections import defaultdict
from ..core.json import json
from ..common import six
from ..common import errors
from .utility import walkdepth, is_string
from .hashing import calc_id
if six.PY2:
import imp
else:
import importlib.machinery
logger = logging.getLogger(__name__)
KEY_PROJECT = 'project'
KEY_FILENAME = 'filename'
KEY_PATH = 'root'
KEY_PAYLOAD = 'format'
def md5(file):
"Calculate and return the md5 hash value for the file data."
m = hashlib.md5()
for chunk in iter(lambda: file.read(4096), b''):
m.update(chunk)
return m.hexdigest()
def _is_blank_module(module):
with open(module.__file__) as file:
return not bool(file.read().strip())
class BaseCrawler(object):
"""Crawl through `root` and index all files.
The crawler creates an index on data, which can be exported
to a database for easier access."""
tags = None
def __init__(self, root):
"""Initialize a BaseCrawler instance.
:param root: The path to the root directory to crawl through.
:type root: str"""
self.root = os.path.expanduser(root)
self.tags = set() if self.tags is None else set(self.tags)
def docs_from_file(self, dirpath, fn):
"""Implement this method to generate documents from files.
:param dirpath: The path of the file, relative to `root`.
:type dirpath: str
:param fn: The filename.
:type fn: str
:yields: Index documents.
"""
raise NotImplementedError()
yield
def fetch(self, doc, mode='r'):
"""Implement this generator method to associate data with a document.
:returns: object associated with doc
"""
raise errors.FetchError("Unable to fetch object for '{}'.".format(doc))
@classmethod
def _calculate_hash(cls, doc, dirpath, fn):
blob = json.dumps(doc, sort_keys=True)
m = hashlib.md5()
m.update(dirpath.encode('utf-8'))
m.update(fn.encode('utf-8'))
m.update(blob.encode('utf-8'))
return m.hexdigest()
def crawl(self, depth=0):
"""Crawl through the `root` directory.
The crawler will inspect every file and directory up
until the specified `depth` and call the
:meth:`docs_from_file` method.
:param depth: Crawl through the directory for the specified depth.
A value of 0 specifies no limit.
:type dept: int
:yields: (id, doc)-tuples"""
logger.info("Crawling '{}' (depth={})...".format(self.root, depth))
for dirpath, dirnames, filenames in walkdepth(self.root, depth):
for fn in filenames:
for doc in self.docs_from_file(dirpath, fn):
logger.debug("doc from file: '{}'.".format(
os.path.join(dirpath, fn)))
doc.setdefault(KEY_PAYLOAD, None)
doc.setdefault(
'_id', self._calculate_hash(doc, dirpath, fn))
yield doc
logger.info("Crawl of '{}' done.".format(self.root))
def process(self, doc, dirpath, fn):
"""Implement this method for additional processing of generated docs.
The default implementation will return the unmodified `doc`.
:param dirpath: The path of the file, relative to `root`.
:type dirpath: str
:param fn: The filename.
:type fn: str
:returns: A document, that means an instance of mapping.
:rtype: mapping"""
return doc
class RegexFileCrawler(BaseCrawler):
"""Generate documents from filenames and associate each file with a data type.
The `RegexFileCrawler` uses regular expressions to generate
data from files. This is a particular easy method to retrieve meta data
associated with files. Inherit from this class to configure a crawler
for your data structre.
Let's assume we want to index text files, with a naming pattern, that
specifies a parameter `a` through the filename, e.g.:
.. code-block:: python
~/my_project/a_0.txt
~/my_project/a_1.txt
...
A valid regular expression to match this pattern would
be: ``.*\/a_(?P<a>\d+)\.txt`` which may be defined for a crawler as such:
.. code-block:: python
MyCrawler(RegexFileCrawler):
pass
MyCrawler.define('.*\/a_(?P<a>\d+)\.txt', 'TextFile')
"""
"Mapping of compiled regex objects and associated formats."
definitions = dict()
@classmethod
def define(cls, regex, format_=None):
"""Define a format for a particular regular expression.
:param regex: All files of the specified format
must match this regular expression.
:type regex: :class:`str`
:param format_: The format associated with all matching files.
:type format_: :class:`object`
"""
if six.PY2:
if isinstance(regex, basestring): # noqa
regex = re.compile(regex)
else:
if isinstance(regex, str):
regex = re.compile(regex)
definitions = dict(cls.definitions)
definitions[regex] = format_
cls.definitions = definitions
@classmethod
def compute_file_id(cls, doc, file):
"""Compute the file id for a given doc and the associated file.
:param doc: The index document
:param file: The associated file
:returns: The file id.
"""
file_id = doc['md5'] = md5(file)
return file_id
def docs_from_file(self, dirpath, fn):
"""Generate documents from filenames.
This method implements the abstract
:py:meth:~.BaseCrawler.docs_from_file` and yields index
documents associated with files.
.. note::
It is not recommended to reimplement this method to modify
documents generated from filenames.
See :meth:`~RegexFileCrawler.process` instead.
:param dirpath: The path of the file relative to root.
:param fn: The filename of the file.
:yields: Index documents.
"""
for regex, format_ in self.definitions.items():
m = regex.match(os.path.join(dirpath, fn))
if m:
doc = self.process(m.groupdict(), dirpath, fn)
doc[KEY_FILENAME] = os.path.relpath(
os.path.join(dirpath, fn), self.root)
doc[KEY_PATH] = os.path.abspath(self.root)
doc[KEY_PAYLOAD] = str(format_)
with open(os.path.join(dirpath, fn), 'rb') as file:
doc['file_id'] = self.compute_file_id(doc, file)
yield doc
def fetch(self, doc, mode='r'):
"""Fetch the data associated with `doc`.
:param doc: A index document.
:type doc: :class:`dict`
:returns: The file associated with the index document.
:rtype: A file-like object
"""
fn = doc.get(KEY_FILENAME)
if fn:
for regex, format_ in self.definitions.items():
ffn = os.path.join(self.root, fn)
m = regex.match(ffn)
if m:
if is_string(format_):
return open(ffn, mode=mode)
else:
for meth in ('read', 'close'):
if not callable(getattr(format_, meth, None)):
msg = "Format {} has no {}() method.".format(format_, meth)
warnings.warn(msg)
return format_(open(ffn, mode=mode))
else:
raise errors.FetchError("Unable to match file path of doc '{}' "
"to format definition.".format(doc))
else:
raise errors.FetchError("Insufficient meta data in doc '{}'.".format(doc))
def process(self, doc, dirpath, fn):
"""Post-process documents generated from filenames.
Example:
.. code-block:: python
MyCrawler(signac.indexing.RegexFileCrawler):
def process(self, doc, dirpath, fn):
doc['long_name_for_a'] = doc['a']
return super(MyCrawler, self).process(doc, dirpath, fn)
:param dirpath: The path of the file, relative to `root`.
:type dirpath: str
:param fn: The filename.
:type fn: str
:returns: An index document, that means an instance of mapping.
:rtype: mapping"""
result = dict()
for key, value in doc.items():
if value is None or isinstance(value, bool):
result[key] = value
continue
try:
value = float(value)
except Exception:
result[key] = value
else:
if not math.isnan(value) or math.isinf(value):
if float(value) == int(value):
result[key] = int(value)
else:
result[key] = float(value)
return super(RegexFileCrawler, self).process(result, dirpath, fn)
def crawl(self, depth=0):
if self.definitions:
for doc in super(RegexFileCrawler, self).crawl(depth=depth):
yield doc
else:
return
class JSONCrawler(BaseCrawler):
encoding = 'utf-8'
fn_regex = '.*\.json'
def docs_from_json(self, doc):
yield doc
def docs_from_file(self, dirpath, fn):
if re.match(self.fn_regex, os.path.join(dirpath, fn)):
with open(os.path.join(dirpath, fn), 'rb') as file:
doc = json.loads(file.read().decode(self.encoding))
for d in self.docs_from_json(doc):
yield d
def _index_signac_project_workspace(root,
include_job_document=True,
fn_statepoint='signac_statepoint.json',
fn_job_document='signac_job_document.json',
statepoint_index='statepoint',
signac_id_alias='_id',
encoding='utf-8',
statepoint_dict=None):
"Yields standard index documents for a signac project workspace."
logger.debug("Indexing workspace '{}'...".format(root))
m = re.compile(r'[a-f0-9]{32}')
try:
job_ids = [jid for jid in os.listdir(root) if m.match(jid)]
except OSError as error:
if error.errno == errno.ENOENT:
return
else:
raise
for i, job_id in enumerate(job_ids):
if not m.match(job_id):
continue
doc = {'signac_id': job_id, KEY_PATH: root}
if signac_id_alias:
doc[signac_id_alias] = job_id
fn_sp = os.path.join(root, job_id, fn_statepoint)
with open(fn_sp, 'rb') as file:
sp = json.loads(file.read().decode(encoding))
if statepoint_dict is not None:
statepoint_dict[job_id] = sp
if statepoint_index:
doc[statepoint_index] = sp
else:
doc.update(sp)
if include_job_document:
fn_doc = os.path.join(root, job_id, fn_job_document)
try:
with open(fn_doc, 'rb') as file:
doc.update(json.loads(file.read().decode(encoding)))
except IOError as error:
if error.errno != errno.ENOENT:
raise
yield doc
if job_ids:
logger.debug("Indexed workspace '{}', {} entries.".format(root, i+1))
class SignacProjectCrawler(RegexFileCrawler):
"""Index a signac project workspace.
Without any file format definitions, this crawler
yields index documents for each job, including
the statepoint and the job document.
See also: :py:class:`~.RegexFileCrawler`
:param root: The path to the project's root directory.
:type root: str"""
encoding = 'utf-8'
statepoint_index = 'statepoint'
fn_statepoint = 'signac_statepoint.json'
fn_job_document = 'signac_job_document.json'
signac_id_alias = '_id'
def __init__(self, root):
from .project import get_project
root = get_project(root=root).workspace()
self._statepoints = dict()
return super(SignacProjectCrawler, self).__init__(root=root)
def _get_job_id(self, dirpath):
return os.path.relpath(dirpath, self.root).split('/')[0]
def _read_statepoint(self, job_id):
fn_sp = os.path.join(self.root, job_id, self.fn_statepoint)
with open(fn_sp, 'rb') as file:
return json.loads(file.read().decode(self.encoding))
def _get_statepoint(self, job_id):
sp = self._statepoints.setdefault(job_id, self._read_statepoint(job_id))
assert calc_id(sp) == job_id
return sp
def get_statepoint(self, dirpath):
job_id = self._get_job_id(dirpath)
return job_id, self._get_statepoint(self, job_id)
def process(self, doc, dirpath, fn):
if dirpath is not None:
job_id = self._get_job_id(dirpath)
statepoint = self._get_statepoint(job_id)
doc['signac_id'] = job_id
if self.statepoint_index:
doc[self.statepoint_index] = statepoint
else:
doc.update(statepoint)
return super(SignacProjectCrawler, self).process(doc, dirpath, fn)
def crawl(self, depth=0):
for doc in _index_signac_project_workspace(
root=self.root,
fn_statepoint=self.fn_statepoint,
fn_job_document=self.fn_job_document,
statepoint_index=self.statepoint_index,
signac_id_alias=self.signac_id_alias,
encoding=self.encoding,
statepoint_dict=self._statepoints):
yield self.process(doc, None, None)
for doc in super(SignacProjectCrawler, self).crawl(depth=depth):
yield doc
class MasterCrawler(BaseCrawler):
"""Compiles a master index from indexes defined in access modules.
An instance of this crawler will search the data space for access
modules, which by default are named ``signac_access.py``. Once such
a file is found, the crawler will import the module and try to execute
two special functions given that they are defined within the module's
global namespace: ``get_indexes()`` and ``get_crawlers()``.
The ``get_indexes()`` is assumed to yield one or multiple index generator
functions, while the ``get_crawlers()`` function is assumed to yield
one or more crawler instances.
This is an example for such an access module:
.. code-block:: python
import signac
def get_indexes(root):
yield signac.index_files(root, '.*\.txt')
def get_crawlers(root):
yield MyCrawler(root)
In case that the master crawler has tags, the ``get_indexes()`` function
will always be ignored while crawlers yielded from the ``get_crawlers()``
function will only be executed in case that they match at least one
of the tags.
In case that the access module is completely empty, it will be executed
as if it had the following directives:
.. code-block:: python
import signac
def get_indexes(root):
yield signac.get_project(root).index()
Tags for indexes yielded from the `get_indexes()` function can be specified
by assigning them directly to the function:
.. code-block:: python
def get_indexes(root):
yield signac.get_project(root).index()
get_indexes.tags = {'foo'}
:param root: The path to the root directory to crawl through.
:type root: str
:param raise_on_error: Raise all exceptions encountered during
during crawling instead of ignoring them.
:type raise_on_error: bool
"""
FN_ACCESS_MODULE = 'signac_access.py'
"The filename of modules containing crawler definitions."
def __init__(self, root, raise_on_error=False):
self.raise_on_error = raise_on_error
super(MasterCrawler, self).__init__(root=root)
def _docs_from_module(self, dirpath, fn):
name = os.path.join(dirpath, fn)
module = _load_crawler(name)
logger.info("Crawling from module '{}'.".format(module.__file__))
has_tags = self.tags is not None and len(set(self.tags))
def _check_tags(tags):
if tags is None or not len(set(tags)):
if has_tags:
logger.info("Skipping, index has no defined tags.")
return False
else:
return True
else:
if not has_tags:
logger.info("Skipping, index requires tags.")
return False
elif set(self.tags).intersection(set(tags)):
return True # at least one tag matches!
else:
logger.info("Skipping, tag mismatch.")
return False
if not has_tags and _is_blank_module(module):
from .project import get_project
for doc in get_project(root=dirpath).index():
yield doc
if hasattr(module, 'get_indexes'):
if _check_tags(getattr(module.get_indexes, 'tags', None)):
for index in module.get_indexes(dirpath):
for doc in index:
yield doc
if hasattr(module, 'get_crawlers'):
for crawler in module.get_crawlers(dirpath):
logger.info("Executing slave crawler:\n {}".format(crawler))
if _check_tags(getattr(crawler, 'tags', None)):
for doc in crawler.crawl():
doc.setdefault(
KEY_PROJECT, os.path.relpath(dirpath, self.root))
yield doc
def docs_from_file(self, dirpath, fn):
"""Compile master index from file in case it is an access module.
:param dirpath: The path of the file relative to root.
:param fn: The filename of the file.
:yields: Index documents.
"""
if fn == self.FN_ACCESS_MODULE:
try:
for doc in self._docs_from_module(dirpath, fn):
yield doc
except Exception:
logger.error("Error while indexing from module '{}'.".format(
os.path.join(dirpath, fn)))
if self.raise_on_error:
raise
else:
logger.debug("Completed indexing from '{}'.".format(os.path.join(dirpath, fn)))
def _load_crawler(name):
if six.PY2:
return imp.load_source(os.path.splitext(name)[0], name)
else:
return importlib.machinery.SourceFileLoader(name, name).load_module()
def fetch(doc_or_id, mode='r', mirrors=None, num_tries=3, timeout=60, ignore_local=False):
"""Fetch the file associated with this document or file id.
This function retrieves a file associated with the provided
index document or file id and behaves like the built-in
:py:func:`open` function, e.g.:
.. code-block:: python
for doc in index:
with signac.fetch(doc) as file:
do_something_with(file)
:param doc_or_id: A file_id or a document with a file_id value.
:param mode: Mode to use for opening files.
:param mirrors: An optional set of mirrors to fetch the file from.
:param num_tries: The number of automatic retry attempts in case of
mirror connection errors.
:type num_tries: int
:param timeout: The time in seconds to wait before an
automatic retry attempt.
:type timeout: int
:returns: The file associated with the document or file id.
:rtype: A file-like object
"""
if doc_or_id is None:
raise ValueError("Argument 'doc_or_id' must not be None!")
file_id = doc_or_id if isinstance(doc_or_id, str) else doc_or_id.get('file_id')
if not ignore_local:
try:
fn = os.path.join(doc_or_id['root'], doc_or_id['filename'])
return open(fn, mode=mode)
except KeyError:
raise errors.FetchError("Insufficient file meta data for fetch.", doc_or_id)
except OSError as error:
if error.errno == errno.ENOENT:
if file_id is None:
raise errors.FetchError("Failed to fetch '{}'.".format(doc_or_id))
if mirrors is None:
raise errors.FetchError("No mirrors provided!")
else:
for i in range(num_tries):
for mirror in mirrors:
try:
return mirror.get(file_id, mode=mode)
except mirror.AutoRetry as error:
logger.warning(error)
sleep(timeout)
except mirror.FileNotFoundError as error:
logger.debug(error)
else:
raise errors.FetchError("Unable to fetch object for '{}'.".format(file_id))
def fetch_one(doc, *args, **kwargs):
raise DeprecationWarning(
"The fetch_one() function has been removed. Use fetch() instead.")
def fetched(docs):
"""Iterate over documents and yield associated files."""
for doc in docs:
if 'file_id' in doc:
yield doc, fetch(doc)
def _export_to_mirror(file, file_id, mirror):
"Export a file-like object with file_id to mirror."
with mirror.new_file(_id=file_id) as dst:
dst.write(file.read())
def export_to_mirror(doc, mirror, num_tries=3, timeout=60):
"""Export a file associated with doc to mirror.
:param doc: A document with a file_id entry.
:param mirror: A file-system object to export the file to.
:param num_tries: The number of automatic retry attempts in case of
mirror connection errors.
:type num_tries: int
:param timeout: The time in seconds to wait before an
automatic retry attempt.
:type timeout: int
:returns: The file id after successful export.
"""
if 'file_id' not in doc:
raise errors.ExportError("Doc '{}' does not have a file_id entry.".format(doc))
for i in range(num_tries):
try:
with fetch(doc, mode='rb') as file:
_export_to_mirror(file, doc['file_id'], mirror)
except mirror.FileExistsError:
logger.debug(
"File with id '{}' already exported, skipping.".format(doc['file_id']))
break
except mirror.AutoRetry as error:
logger.warning("Error during export: '{}', retrying...".format(error))
sleep(timeout)
else:
logger.debug(
"Stored file with id '{}' in mirror '{}'.".format(doc['file_id'], mirror))
return doc['file_id']
else:
raise errors.ExportError(doc)
def export_one(doc, index, mirrors=None, num_tries=3, timeout=60):
"""Export one document to index and an optionally associated file to mirrors.
:param doc: A document with a file_id entry.
:param docs: The index collection to export to.
:param mirrors: An optional set of mirrors to export files to.
:param num_tries: The number of automatic retry attempts in case of
mirror connection errors.
:type num_tries: int
:param timeout: The time in seconds to wait before an
automatic retry attempt.
:type timeout: int
:returns: The id and file id after successful export.
"""
index.replace_one({'_id': doc['_id']}, doc, upsert=True)
if mirrors and 'file_id' in doc:
for mirror in mirrors:
export_to_mirror(doc, mirror, num_tries, timeout)
return doc['_id'], doc['file_id']
else:
return doc['_id'], None
def export(docs, index, mirrors=None, update=False,
num_tries=3, timeout=60, **kwargs):
"""Export docs to index and optionally associated files to mirrors.
The behavior of this function is equivalent to:
.. code-block:: python
for doc in docs:
export_one(doc, index, mirrors, num_tries)
If the `update` argument is set to True, the export algorithm will
automatically identify stale index documents, that means documents
that refer to files or state points that have been removed and are
no longer part of the data space. Any document which shares the
`root`, but not the `_id` field with any of the updated documents
is considered stale and removed. Using `update` in combination with
an empty docs sequence will raise `ExportError`, since it is not
possible to identify stale documents in that case.
.. note::
This function will automatically delegate to specialized
implementations for special index types. For example, if
the index argument is a MongoDB document collection, the
index documents will be exported via :py:func:`~.export_pymongo`.
:param docs: The index documents to export.
:param index: The collection to export the index to.
:param mirrors: An optional set of mirrors to export files to.
:param update: If True, remove stale index documents, that means
documents that refer to files or state points that no longer exist.
:type update: bool
:param num_tries: The number of automatic retry attempts in case of
mirror connection errors.
:type num_tries: int
:param timeout: The time in seconds to wait before an
automatic retry attempt.
:type timeout: int
:param kwargs: Optional keyword arguments to pass to
delegate implementations.
:raises ExportError: When using the update argument in combination with
an empty docs sequence.
"""
try:
import pymongo
except ImportError:
pass
else:
if isinstance(index, pymongo.collection.Collection):
logger.info("Using optimized export function export_pymongo().")
return export_pymongo(docs=docs, index=index, mirrors=mirrors,
update=update, num_tries=num_tries, timeout=timeout, **kwargs)
ids = defaultdict(list)
for doc in docs:
_id, _ = export_one(doc, index, mirrors, num_tries, timeout, **kwargs)
if update:
root = doc.get('root')
if root is not None:
ids[root].append(_id)
if update:
if ids:
stale = set()
for root in ids:
docs_ = index.find({'root': root})
all_ = {doc['_id'] for doc in docs_}
stale.update(all_.difference(ids[root]))
logger.info("Removing {} stale documents.".format(len(stale)))
for _id in set(stale):
index.delete_one(dict(_id=_id))
else:
raise errors.ExportError(
"The exported docs sequence is empty! Unable to update!")
def _export_pymongo(docs, operations, index, mirrors, num_tries, timeout):
"""Export docs via operations to index and files to mirrors."""
import pymongo
if mirrors is not None:
for mirror in mirrors:
for doc in docs:
if 'file_id' in doc:
export_to_mirror(doc, mirror, num_tries, timeout)
for i in range(num_tries):
try:
index.bulk_write(operations)
break
except pymongo.errors.AutoReconnect as error:
logger.warning(error)
sleep(timeout)
else:
raise errors.ExportError()
def export_pymongo(docs, index, mirrors=None, update=False, num_tries=3, timeout=60, chunksize=100):
"""Optimized :py:func:`~.export` function for pymongo index collections.
The behavior of this function is rougly equivalent to:
.. code-block:: python
for doc in docs:
export_one(doc, index, mirrors, num_tries)
.. note::
All index documents must be JSON-serializable to
be able to be exported to a MongoDB collection.
:param docs: The index documents to export.
:param index: The database collection to export the index to.
:type index: :class:`pymongo.collection.Collection`
:param num_tries: The number of automatic retry attempts in case of
mirror connection errors.
:type num_tries: int
:param timeout: The time in seconds to wait before an
automatic retry attempt.
:type timeout: int
:param chunksize: The buffer size for export operations.
:type chunksize: int"""
import pymongo
logger.info("Exporting to pymongo database collection index '{}'.".format(index))
chunk = []
operations = []
ids = defaultdict(list)
for doc in docs:
f = {'_id': doc['_id']}
if update:
root = doc.get('root')
if root is not None:
ids[root].append(doc['_id'])
chunk.append(doc)
operations.append(pymongo.ReplaceOne(f, doc, upsert=True))
if len(chunk) >= chunksize:
logger.debug("Pushing chunk.")
_export_pymongo(chunk, operations, index, mirrors, num_tries, timeout)
chunk[:] = []
operations[:] = []
if len(operations):
logger.debug("Pushing final chunk.")
_export_pymongo(chunk, operations, index, mirrors, num_tries, timeout)
if update:
if ids:
stale = set()
for root in ids:
docs_ = index.find({'root': root})
all_ = {doc['_id'] for doc in docs_}
stale.update(all_.difference(ids[root]))
logger.info("Removing {} stale documents.".format(len(stale)))
for _id in set(stale):
index.delete_one(dict(_id=_id))
else:
raise errors.ExportError(
"The exported docs sequence is empty! Unable to update!")
def index_files(root='.', formats=None, depth=0):
"""Generate a file index.
This generator function yields file index documents,
where each index document corresponds to one file.
To index all files in the current working directory,
simply execute:
.. code-block:: python
for doc in signac.index_files():
print(doc)
A file associated with a file index document can be
fetched via the :py:func:`fetch` function:
.. code-block:: python
for doc in signac.index_files():
with signac.fetch(doc) as file:
print(file.read())
This is especially useful if the file index is part of
a collection (:py:class:`.Collection`) which can be searched
for specific entries.
To limit the file index to files with a specific filename
formats, provide a regular expression as the formats argument.
To index all files that have file ending `.txt`, execute:
.. code-block:: python
for doc in signac.index_files(formats='.*\.txt'):
print(doc)
We can specify specific formats by providing a dictionary as
``formats`` argument, where the key is the filename pattern and
the value is an arbitrary formats string, e.g.:
.. code-block:: python
for doc in signac.index_files(formats=
{'.*\.txt': 'TextFile', '.*\.zip': 'ZipFile'}):
print(doc)
:param root: The directory to index, defaults to the
current working directory.
:type root: str
:param formats: Limit the index to files that match the
given regular expression and optionally associate formats
with given patterns.
:param depth: Limit the search to the specified directory depth.
:type depth: int
:yields: The file index documents as dicts.
"""
if formats is None:
formats = {'.*': 'File'}
if six.PY2:
if isinstance(formats, basestring): # noqa
formats = {formats: 'File'}
else:
if isinstance(formats, str):
formats = {formats: 'File'}
class Crawler(RegexFileCrawler):
pass
for regex, fmt in formats.items():
Crawler.define(regex, fmt)
for doc in Crawler(root).crawl(depth=depth):
yield doc
def index(root='.', tags=None, depth=0, **kwargs):
"""Generate a master index.
A master index is compiled from other indexes by searching
for modules named ``signac_access.py`` and compiling all
indexes which are yielded from a function ``get_indexes(root)``
defined within that module as well as the indexes generated by
crawlers yielded from a function ``get_crawlers(root)`` defined
within that module.
This is a minimal example for a ``signac_access.py`` file:
.. code-block:: python
import signac
def get_indexes(root):
yield signac.index_files(root, '.*\.txt')
Internally, this function constructs an instance of
:py:class:`.MasterCrawler` and all extra key-word arguments
will be forwarded to the constructor of said master crawler.
:param root: Look for access modules under this directory path.
:type root: str
:param tags: If tags are provided, do not execute slave crawlers
that don't match the same tags.
:param depth: Limit the search to the specified directory depth.
:param kwargs: These keyword-arguments are forwarded to the
internal MasterCrawler instance.
:type depth: int
:yields: The master index documents as instances of dict.
"""
class Crawler(MasterCrawler):
pass
if tags is not None:
Crawler.tags = tags
for doc in Crawler(root, **kwargs).crawl(depth=depth):
yield doc
|
|
import math
import sys
import numpy
from numpy.testing import (assert_, assert_equal, assert_array_equal,
assert_array_almost_equal, suppress_warnings)
import pytest
from pytest import raises as assert_raises
import scipy.ndimage as ndimage
from . import types
eps = 1e-12
class TestNdimageInterpolation:
@pytest.mark.parametrize(
'mode, expected_value',
[('nearest', [1.5, 2.5, 3.5, 4, 4, 4, 4]),
('wrap', [1.5, 2.5, 3.5, 1.5, 2.5, 3.5, 1.5]),
# ('reflect', TODO),
('mirror', [1.5, 2.5, 3.5, 3.5, 2.5, 1.5, 1.5]),
('constant', [1.5, 2.5, 3.5, -1, -1, -1, -1])]
)
def test_boundaries(self, mode, expected_value):
def shift(x):
return (x[0] + 0.5,)
data = numpy.array([1, 2, 3, 4.])
assert_array_equal(
expected_value,
ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
output_shape=(7,), order=1))
@pytest.mark.parametrize(
'mode, expected_value',
[('nearest', [1, 1, 2, 3]),
('wrap', [3, 1, 2, 3]),
# ('reflect', TODO),
('mirror', [2, 1, 2, 3]),
('constant', [-1, 1, 2, 3])]
)
def test_boundaries2(self, mode, expected_value):
def shift(x):
return (x[0] - 0.9,)
data = numpy.array([1, 2, 3, 4])
assert_array_equal(
expected_value,
ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
output_shape=(4,)))
@pytest.mark.parametrize('order', range(2, 6))
@pytest.mark.parametrize('dtype', types)
def test_spline01(self, dtype, order):
data = numpy.ones([], dtype)
out = ndimage.spline_filter(data, order=order)
assert_array_almost_equal(out, 1)
@pytest.mark.parametrize('order', range(2, 6))
@pytest.mark.parametrize('dtype', types)
def test_spline02(self, dtype, order):
data = numpy.array([1], dtype)
out = ndimage.spline_filter(data, order=order)
assert_array_almost_equal(out, [1])
@pytest.mark.parametrize('order', range(2, 6))
@pytest.mark.parametrize('dtype', types)
def test_spline03(self, dtype, order):
data = numpy.ones([], dtype)
out = ndimage.spline_filter(data, order, output=dtype)
assert_array_almost_equal(out, 1)
@pytest.mark.parametrize('order', range(2, 6))
@pytest.mark.parametrize('dtype', types)
def test_spline04(self, dtype, order):
data = numpy.ones([4], dtype)
out = ndimage.spline_filter(data, order)
assert_array_almost_equal(out, [1, 1, 1, 1])
@pytest.mark.parametrize('order', range(2, 6))
@pytest.mark.parametrize('dtype', types)
def test_spline05(self, dtype, order):
data = numpy.ones([4, 4], dtype)
out = ndimage.spline_filter(data, order=order)
assert_array_almost_equal(out, [[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform01(self, order):
data = numpy.array([1])
def mapping(x):
return x
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [1])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform02(self, order):
data = numpy.ones([4])
def mapping(x):
return x
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [1, 1, 1, 1])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform03(self, order):
data = numpy.ones([4])
def mapping(x):
return (x[0] - 1,)
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [0, 1, 1, 1])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform04(self, order):
data = numpy.array([4, 1, 3, 2])
def mapping(x):
return (x[0] - 1,)
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [0, 4, 1, 3])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform05(self, order):
data = numpy.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
def mapping(x):
return (x[0], x[1] - 1)
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform06(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0], x[1] - 1)
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [[0, 4, 1, 3],
[0, 7, 6, 8],
[0, 3, 5, 3]])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform07(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0] - 1, x[1])
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[4, 1, 3, 2],
[7, 6, 8, 5]])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform08(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0] - 1, x[1] - 1)
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform10(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0] - 1, x[1] - 1)
if (order > 1):
filtered = ndimage.spline_filter(data, order=order)
else:
filtered = data
out = ndimage.geometric_transform(filtered, mapping, data.shape,
order=order, prefilter=False)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform13(self, order):
data = numpy.ones([2], numpy.float64)
def mapping(x):
return (x[0] // 2,)
out = ndimage.geometric_transform(data, mapping, [4], order=order)
assert_array_almost_equal(out, [1, 1, 1, 1])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform14(self, order):
data = [1, 5, 2, 6, 3, 7, 4, 4]
def mapping(x):
return (2 * x[0],)
out = ndimage.geometric_transform(data, mapping, [4], order=order)
assert_array_almost_equal(out, [1, 2, 3, 4])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform15(self, order):
data = [1, 2, 3, 4]
def mapping(x):
return (x[0] / 2,)
out = ndimage.geometric_transform(data, mapping, [8], order=order)
assert_array_almost_equal(out[::2], [1, 2, 3, 4])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform16(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9.0, 10, 11, 12]]
def mapping(x):
return (x[0], x[1] * 2)
out = ndimage.geometric_transform(data, mapping, (3, 2),
order=order)
assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform17(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] * 2, x[1])
out = ndimage.geometric_transform(data, mapping, (1, 4),
order=order)
assert_array_almost_equal(out, [[1, 2, 3, 4]])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform18(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] * 2, x[1] * 2)
out = ndimage.geometric_transform(data, mapping, (1, 2),
order=order)
assert_array_almost_equal(out, [[1, 3]])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform19(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0], x[1] / 2)
out = ndimage.geometric_transform(data, mapping, (3, 8),
order=order)
assert_array_almost_equal(out[..., ::2], data)
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform20(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] / 2, x[1])
out = ndimage.geometric_transform(data, mapping, (6, 4),
order=order)
assert_array_almost_equal(out[::2, ...], data)
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform21(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] / 2, x[1] / 2)
out = ndimage.geometric_transform(data, mapping, (6, 8),
order=order)
assert_array_almost_equal(out[::2, ::2], data)
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform22(self, order):
data = numpy.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], numpy.float64)
def mapping1(x):
return (x[0] / 2, x[1] / 2)
def mapping2(x):
return (x[0] * 2, x[1] * 2)
out = ndimage.geometric_transform(data, mapping1,
(6, 8), order=order)
out = ndimage.geometric_transform(out, mapping2,
(3, 4), order=order)
assert_array_almost_equal(out, data)
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform23(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (1, x[0] * 2)
out = ndimage.geometric_transform(data, mapping, (2,), order=order)
out = out.astype(numpy.int32)
assert_array_almost_equal(out, [5, 7])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform24(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x, a, b):
return (a, x[0] * b)
out = ndimage.geometric_transform(
data, mapping, (2,), order=order, extra_arguments=(1,),
extra_keywords={'b': 2})
assert_array_almost_equal(out, [5, 7])
def test_geometric_transform_endianness_with_output_parameter(self):
# geometric transform given output ndarray or dtype with
# non-native endianness. see issue #4127
data = numpy.array([1])
def mapping(x):
return x
for out in [data.dtype, data.dtype.newbyteorder(),
numpy.empty_like(data),
numpy.empty_like(data).astype(data.dtype.newbyteorder())]:
returned = ndimage.geometric_transform(data, mapping, data.shape,
output=out)
result = out if returned is None else returned
assert_array_almost_equal(result, [1])
def test_geometric_transform_with_string_output(self):
data = numpy.array([1])
def mapping(x):
return x
out = ndimage.geometric_transform(data, mapping, output='f')
assert_(out.dtype is numpy.dtype('f'))
assert_array_almost_equal(out, [1])
@pytest.mark.parametrize('order', range(0, 6))
def test_map_coordinates01(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
idx = numpy.indices(data.shape)
idx -= 1
out = ndimage.map_coordinates(data, idx, order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
@pytest.mark.parametrize('order', range(0, 6))
def test_map_coordinates02(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
idx = numpy.indices(data.shape, numpy.float64)
idx -= 0.5
out1 = ndimage.shift(data, 0.5, order=order)
out2 = ndimage.map_coordinates(data, idx, order=order)
assert_array_almost_equal(out1, out2)
def test_map_coordinates03(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]], order='F')
idx = numpy.indices(data.shape) - 1
out = ndimage.map_coordinates(data, idx)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
assert_array_almost_equal(out, ndimage.shift(data, (1, 1)))
idx = numpy.indices(data[::2].shape) - 1
out = ndimage.map_coordinates(data[::2], idx)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3]])
assert_array_almost_equal(out, ndimage.shift(data[::2], (1, 1)))
idx = numpy.indices(data[:, ::2].shape) - 1
out = ndimage.map_coordinates(data[:, ::2], idx)
assert_array_almost_equal(out, [[0, 0], [0, 4], [0, 7]])
assert_array_almost_equal(out, ndimage.shift(data[:, ::2], (1, 1)))
def test_map_coordinates_endianness_with_output_parameter(self):
# output parameter given as array or dtype with either endianness
# see issue #4127
data = numpy.array([[1, 2], [7, 6]])
expected = numpy.array([[0, 0], [0, 1]])
idx = numpy.indices(data.shape)
idx -= 1
for out in [
data.dtype,
data.dtype.newbyteorder(),
numpy.empty_like(expected),
numpy.empty_like(expected).astype(expected.dtype.newbyteorder())
]:
returned = ndimage.map_coordinates(data, idx, output=out)
result = out if returned is None else returned
assert_array_almost_equal(result, expected)
def test_map_coordinates_with_string_output(self):
data = numpy.array([[1]])
idx = numpy.indices(data.shape)
out = ndimage.map_coordinates(data, idx, output='f')
assert_(out.dtype is numpy.dtype('f'))
assert_array_almost_equal(out, [[1]])
@pytest.mark.skipif('win32' in sys.platform or numpy.intp(0).itemsize < 8,
reason='do not run on 32 bit or windows '
'(no sparse memory)')
def test_map_coordinates_large_data(self):
# check crash on large data
try:
n = 30000
a = numpy.empty(n**2, dtype=numpy.float32).reshape(n, n)
# fill the part we might read
a[n - 3:, n - 3:] = 0
ndimage.map_coordinates(a, [[n - 1.5], [n - 1.5]], order=1)
except MemoryError as e:
raise pytest.skip('Not enough memory available') from e
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform01(self, order):
data = numpy.array([1])
out = ndimage.affine_transform(data, [[1]], order=order)
assert_array_almost_equal(out, [1])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform02(self, order):
data = numpy.ones([4])
out = ndimage.affine_transform(data, [[1]], order=order)
assert_array_almost_equal(out, [1, 1, 1, 1])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform03(self, order):
data = numpy.ones([4])
out = ndimage.affine_transform(data, [[1]], -1, order=order)
assert_array_almost_equal(out, [0, 1, 1, 1])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform04(self, order):
data = numpy.array([4, 1, 3, 2])
out = ndimage.affine_transform(data, [[1]], -1, order=order)
assert_array_almost_equal(out, [0, 4, 1, 3])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform05(self, order):
data = numpy.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
[0, -1], order=order)
assert_array_almost_equal(out, [[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform06(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
[0, -1], order=order)
assert_array_almost_equal(out, [[0, 4, 1, 3],
[0, 7, 6, 8],
[0, 3, 5, 3]])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform07(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
[-1, 0], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[4, 1, 3, 2],
[7, 6, 8, 5]])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform08(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
[-1, -1], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform09(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
if (order > 1):
filtered = ndimage.spline_filter(data, order=order)
else:
filtered = data
out = ndimage.affine_transform(filtered, [[1, 0], [0, 1]],
[-1, -1], order=order,
prefilter=False)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform10(self, order):
data = numpy.ones([2], numpy.float64)
out = ndimage.affine_transform(data, [[0.5]], output_shape=(4,),
order=order)
assert_array_almost_equal(out, [1, 1, 1, 0])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform11(self, order):
data = [1, 5, 2, 6, 3, 7, 4, 4]
out = ndimage.affine_transform(data, [[2]], 0, (4,), order=order)
assert_array_almost_equal(out, [1, 2, 3, 4])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform12(self, order):
data = [1, 2, 3, 4]
out = ndimage.affine_transform(data, [[0.5]], 0, (8,), order=order)
assert_array_almost_equal(out[::2], [1, 2, 3, 4])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform13(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9.0, 10, 11, 12]]
out = ndimage.affine_transform(data, [[1, 0], [0, 2]], 0, (3, 2),
order=order)
assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform14(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
out = ndimage.affine_transform(data, [[2, 0], [0, 1]], 0, (1, 4),
order=order)
assert_array_almost_equal(out, [[1, 2, 3, 4]])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform15(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
out = ndimage.affine_transform(data, [[2, 0], [0, 2]], 0, (1, 2),
order=order)
assert_array_almost_equal(out, [[1, 3]])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform16(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
out = ndimage.affine_transform(data, [[1, 0.0], [0, 0.5]], 0,
(3, 8), order=order)
assert_array_almost_equal(out[..., ::2], data)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform17(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
out = ndimage.affine_transform(data, [[0.5, 0], [0, 1]], 0,
(6, 4), order=order)
assert_array_almost_equal(out[::2, ...], data)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform18(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0,
(6, 8), order=order)
assert_array_almost_equal(out[::2, ::2], data)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform19(self, order):
data = numpy.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], numpy.float64)
out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0,
(6, 8), order=order)
out = ndimage.affine_transform(out, [[2.0, 0], [0, 2.0]], 0,
(3, 4), order=order)
assert_array_almost_equal(out, data)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform20(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
out = ndimage.affine_transform(data, [[0], [2]], 0, (2,),
order=order)
assert_array_almost_equal(out, [1, 3])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform21(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
out = ndimage.affine_transform(data, [[2], [0]], 0, (2,),
order=order)
assert_array_almost_equal(out, [1, 9])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform22(self, order):
# shift and offset interaction; see issue #1547
data = numpy.array([4, 1, 3, 2])
out = ndimage.affine_transform(data, [[2]], [-1], (3,),
order=order)
assert_array_almost_equal(out, [0, 1, 2])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform23(self, order):
# shift and offset interaction; see issue #1547
data = numpy.array([4, 1, 3, 2])
out = ndimage.affine_transform(data, [[0.5]], [-1], (8,),
order=order)
assert_array_almost_equal(out[::2], [0, 4, 1, 3])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform24(self, order):
# consistency between diagonal and non-diagonal case; see issue #1547
data = numpy.array([4, 1, 3, 2])
with suppress_warnings() as sup:
sup.filter(UserWarning,
'The behavior of affine_transform with a 1-D array .* '
'has changed')
out1 = ndimage.affine_transform(data, [2], -1, order=order)
out2 = ndimage.affine_transform(data, [[2]], -1, order=order)
assert_array_almost_equal(out1, out2)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform25(self, order):
# consistency between diagonal and non-diagonal case; see issue #1547
data = numpy.array([4, 1, 3, 2])
with suppress_warnings() as sup:
sup.filter(UserWarning,
'The behavior of affine_transform with a 1-D array .* '
'has changed')
out1 = ndimage.affine_transform(data, [0.5], -1, order=order)
out2 = ndimage.affine_transform(data, [[0.5]], -1, order=order)
assert_array_almost_equal(out1, out2)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform26(self, order):
# test homogeneous coordinates
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
if (order > 1):
filtered = ndimage.spline_filter(data, order=order)
else:
filtered = data
tform_original = numpy.eye(2)
offset_original = -numpy.ones((2, 1))
tform_h1 = numpy.hstack((tform_original, offset_original))
tform_h2 = numpy.vstack((tform_h1, [[0, 0, 1]]))
out1 = ndimage.affine_transform(filtered, tform_original,
offset_original.ravel(),
order=order, prefilter=False)
out2 = ndimage.affine_transform(filtered, tform_h1, order=order,
prefilter=False)
out3 = ndimage.affine_transform(filtered, tform_h2, order=order,
prefilter=False)
for out in [out1, out2, out3]:
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_affine_transform27(self):
# test valid homogeneous transformation matrix
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
tform_h1 = numpy.hstack((numpy.eye(2), -numpy.ones((2, 1))))
tform_h2 = numpy.vstack((tform_h1, [[5, 2, 1]]))
assert_raises(ValueError, ndimage.affine_transform, data, tform_h2)
def test_affine_transform_1d_endianness_with_output_parameter(self):
# 1d affine transform given output ndarray or dtype with
# either endianness. see issue #7388
data = numpy.ones((2, 2))
for out in [numpy.empty_like(data),
numpy.empty_like(data).astype(data.dtype.newbyteorder()),
data.dtype, data.dtype.newbyteorder()]:
with suppress_warnings() as sup:
sup.filter(UserWarning,
'The behavior of affine_transform with a 1-D array '
'.* has changed')
returned = ndimage.affine_transform(data, [1, 1], output=out)
result = out if returned is None else returned
assert_array_almost_equal(result, [[1, 1], [1, 1]])
def test_affine_transform_multi_d_endianness_with_output_parameter(self):
# affine transform given output ndarray or dtype with either endianness
# see issue #4127
data = numpy.array([1])
for out in [data.dtype, data.dtype.newbyteorder(),
numpy.empty_like(data),
numpy.empty_like(data).astype(data.dtype.newbyteorder())]:
returned = ndimage.affine_transform(data, [[1]], output=out)
result = out if returned is None else returned
assert_array_almost_equal(result, [1])
def test_affine_transform_with_string_output(self):
data = numpy.array([1])
out = ndimage.affine_transform(data, [[1]], output='f')
assert_(out.dtype is numpy.dtype('f'))
assert_array_almost_equal(out, [1])
@pytest.mark.parametrize('order', range(0, 6))
def test_shift01(self, order):
data = numpy.array([1])
out = ndimage.shift(data, [1], order=order)
assert_array_almost_equal(out, [0])
@pytest.mark.parametrize('order', range(0, 6))
def test_shift02(self, order):
data = numpy.ones([4])
out = ndimage.shift(data, [1], order=order)
assert_array_almost_equal(out, [0, 1, 1, 1])
@pytest.mark.parametrize('order', range(0, 6))
def test_shift03(self, order):
data = numpy.ones([4])
out = ndimage.shift(data, -1, order=order)
assert_array_almost_equal(out, [1, 1, 1, 0])
@pytest.mark.parametrize('order', range(0, 6))
def test_shift04(self, order):
data = numpy.array([4, 1, 3, 2])
out = ndimage.shift(data, 1, order=order)
assert_array_almost_equal(out, [0, 4, 1, 3])
@pytest.mark.parametrize('order', range(0, 6))
def test_shift05(self, order):
data = numpy.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
out = ndimage.shift(data, [0, 1], order=order)
assert_array_almost_equal(out, [[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]])
@pytest.mark.parametrize('order', range(0, 6))
def test_shift06(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
out = ndimage.shift(data, [0, 1], order=order)
assert_array_almost_equal(out, [[0, 4, 1, 3],
[0, 7, 6, 8],
[0, 3, 5, 3]])
@pytest.mark.parametrize('order', range(0, 6))
def test_shift07(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
out = ndimage.shift(data, [1, 0], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[4, 1, 3, 2],
[7, 6, 8, 5]])
@pytest.mark.parametrize('order', range(0, 6))
def test_shift08(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
out = ndimage.shift(data, [1, 1], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
@pytest.mark.parametrize('order', range(0, 6))
def test_shift09(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
if (order > 1):
filtered = ndimage.spline_filter(data, order=order)
else:
filtered = data
out = ndimage.shift(filtered, [1, 1], order=order, prefilter=False)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
@pytest.mark.parametrize('order', range(0, 6))
def test_zoom1(self, order):
for z in [2, [2, 2]]:
arr = numpy.array(list(range(25))).reshape((5, 5)).astype(float)
arr = ndimage.zoom(arr, z, order=order)
assert_equal(arr.shape, (10, 10))
assert_(numpy.all(arr[-1, :] != 0))
assert_(numpy.all(arr[-1, :] >= (20 - eps)))
assert_(numpy.all(arr[0, :] <= (5 + eps)))
assert_(numpy.all(arr >= (0 - eps)))
assert_(numpy.all(arr <= (24 + eps)))
def test_zoom2(self):
arr = numpy.arange(12).reshape((3, 4))
out = ndimage.zoom(ndimage.zoom(arr, 2), 0.5)
assert_array_equal(out, arr)
def test_zoom3(self):
arr = numpy.array([[1, 2]])
out1 = ndimage.zoom(arr, (2, 1))
out2 = ndimage.zoom(arr, (1, 2))
assert_array_almost_equal(out1, numpy.array([[1, 2], [1, 2]]))
assert_array_almost_equal(out2, numpy.array([[1, 1, 2, 2]]))
@pytest.mark.parametrize('order', range(0, 6))
def test_zoom_affine01(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
with suppress_warnings() as sup:
sup.filter(UserWarning,
'The behavior of affine_transform with a 1-D array .* '
'has changed')
out = ndimage.affine_transform(data, [0.5, 0.5], 0,
(6, 8), order=order)
assert_array_almost_equal(out[::2, ::2], data)
def test_zoom_infinity(self):
# Ticket #1419 regression test
dim = 8
ndimage.zoom(numpy.zeros((dim, dim)), 1. / dim, mode='nearest')
def test_zoom_zoomfactor_one(self):
# Ticket #1122 regression test
arr = numpy.zeros((1, 5, 5))
zoom = (1.0, 2.0, 2.0)
out = ndimage.zoom(arr, zoom, cval=7)
ref = numpy.zeros((1, 10, 10))
assert_array_almost_equal(out, ref)
def test_zoom_output_shape_roundoff(self):
arr = numpy.zeros((3, 11, 25))
zoom = (4.0 / 3, 15.0 / 11, 29.0 / 25)
out = ndimage.zoom(arr, zoom)
assert_array_equal(out.shape, (4, 15, 29))
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate01(self, order):
data = numpy.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]], dtype=numpy.float64)
out = ndimage.rotate(data, 0, order=order)
assert_array_almost_equal(out, data)
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate02(self, order):
data = numpy.array([[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]], dtype=numpy.float64)
out = ndimage.rotate(data, 90, order=order)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate03(self, order):
data = numpy.array([[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]], dtype=numpy.float64)
out = ndimage.rotate(data, 90, order=order)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate04(self, order):
data = numpy.array([[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0]], dtype=numpy.float64)
out = ndimage.rotate(data, 90, reshape=False, order=order)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate05(self, order):
data = numpy.empty((4, 3, 3))
for i in range(3):
data[:, :, i] = numpy.array([[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]], dtype=numpy.float64)
out = ndimage.rotate(data, 90, order=order)
for i in range(3):
assert_array_almost_equal(out[:, :, i], expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate06(self, order):
data = numpy.empty((3, 4, 3))
for i in range(3):
data[:, :, i] = numpy.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]], dtype=numpy.float64)
out = ndimage.rotate(data, 90, order=order)
for i in range(3):
assert_array_almost_equal(out[:, :, i], expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate07(self, order):
data = numpy.array([[[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
data = data.transpose()
expected = numpy.array([[[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]] * 2, dtype=numpy.float64)
expected = expected.transpose([2, 1, 0])
out = ndimage.rotate(data, 90, axes=(0, 1), order=order)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate08(self, order):
data = numpy.array([[[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
data = data.transpose()
expected = numpy.array([[[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
expected = expected.transpose()
out = ndimage.rotate(data, 90, axes=(0, 1), reshape=False, order=order)
assert_array_almost_equal(out, expected)
def test_rotate09(self):
data = numpy.array([[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]] * 2, dtype=numpy.float64)
with assert_raises(ValueError):
ndimage.rotate(data, 90, axes=(0, data.ndim))
def test_rotate10(self):
data = numpy.arange(45, dtype=numpy.float64).reshape((3, 5, 3))
# The output of ndimage.rotate before refactoring
expected = numpy.array([[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[6.54914793, 7.54914793, 8.54914793],
[10.84520162, 11.84520162, 12.84520162],
[0.0, 0.0, 0.0]],
[[6.19286575, 7.19286575, 8.19286575],
[13.4730712, 14.4730712, 15.4730712],
[21.0, 22.0, 23.0],
[28.5269288, 29.5269288, 30.5269288],
[35.80713425, 36.80713425, 37.80713425]],
[[0.0, 0.0, 0.0],
[31.15479838, 32.15479838, 33.15479838],
[35.45085207, 36.45085207, 37.45085207],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]])
out = ndimage.rotate(data, angle=12, reshape=False)
assert_array_almost_equal(out, expected)
def test_rotate_exact_180(self):
a = numpy.tile(numpy.arange(5), (5, 1))
b = ndimage.rotate(ndimage.rotate(a, 180), -180)
assert_equal(a, b)
def test_zoom_output_shape():
"""Ticket #643"""
x = numpy.arange(12).reshape((3, 4))
ndimage.zoom(x, 2, output=numpy.zeros((6, 8)))
|
|
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import eventlet
from oslo.config import cfg
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('external_pids',
default='$state_path/external/pids',
help=_('Location to store child pid files')),
cfg.BoolOpt('check_child_processes', default=False,
help=_("Periodically check child processes")),
cfg.StrOpt('check_child_processes_action', default='respawn',
choices=['respawn', 'exit'],
help=_('Action to be executed when a child process dies')),
cfg.IntOpt('check_child_processes_interval', default=60,
help=_('Interval between checks of child process liveness '
'(seconds)')),
]
cfg.CONF.register_opts(OPTS)
class ProcessManager(object):
"""An external process manager for Neutron spawned processes.
Note: The manager expects uuid to be in cmdline.
"""
def __init__(self, conf, uuid, root_helper='sudo',
namespace=None, service=None, pids_path=None,
default_cmd_callback=None,
cmd_addl_env=None):
self.conf = conf
self.uuid = uuid
self.root_helper = root_helper
self.namespace = namespace
self.default_cmd_callback = default_cmd_callback
self.cmd_addl_env = cmd_addl_env
self.pids_path = pids_path or self.conf.external_pids
if service:
self.service_pid_fname = 'pid.' + service
self.service = service
else:
self.service_pid_fname = 'pid'
self.service = 'default-service'
def enable(self, cmd_callback=None, reload_cfg=False):
if not self.active:
if not cmd_callback:
cmd_callback = self.default_cmd_callback
cmd = cmd_callback(self.get_pid_file_name(ensure_pids_dir=True))
ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace)
ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env)
elif reload_cfg:
self.reload_cfg()
def reload_cfg(self):
self.disable('HUP')
def disable(self, sig='9'):
pid = self.pid
if self.active:
cmd = ['kill', '-%s' % (sig), pid]
utils.execute(cmd, self.root_helper)
# In the case of shutting down, remove the pid file
if sig == '9':
utils.remove_conf_file(self.pids_path,
self.uuid,
self.service_pid_fname)
elif pid:
LOG.debug('Process for %(uuid)s pid %(pid)d is stale, ignoring '
'signal %(signal)s', {'uuid': self.uuid, 'pid': pid,
'signal': sig})
else:
LOG.debug('No process started for %s', self.uuid)
def get_pid_file_name(self, ensure_pids_dir=False):
"""Returns the file name for a given kind of config file."""
return utils.get_conf_file_name(self.pids_path,
self.uuid,
self.service_pid_fname,
ensure_pids_dir)
@property
def pid(self):
"""Last known pid for this external process spawned for this uuid."""
return utils.get_value_from_conf_file(self.pids_path,
self.uuid,
self.service_pid_fname,
int)
@property
def active(self):
pid = self.pid
if pid is None:
return False
cmdline = '/proc/%s/cmdline' % pid
try:
with open(cmdline, "r") as f:
return self.uuid in f.readline()
except IOError:
return False
ServiceId = collections.namedtuple('ServiceId', ['uuid', 'service'])
class ProcessMonitor(object):
def __init__(self, config, root_helper, resource_type, exit_handler):
"""Handle multiple process managers and watch over all of them.
:param config: oslo config object with the agent configuration.
:type config: oslo.config.ConfigOpts
:param root_helper: root helper to be used with new ProcessManagers
:type root_helper: str
:param resource_type: can be dhcp, router, load_balancer, etc.
:type resource_type: str
:param exit_handler: function to execute when agent exit has to
be executed, it should take care of actual
exit
:type exit_hanlder: function
"""
self._config = config
self._root_helper = root_helper
self._resource_type = resource_type
self._exit_handler = exit_handler
self._process_managers = {}
if self._config.check_child_processes:
self._spawn_checking_thread()
def enable(self, uuid, cmd_callback, namespace=None, service=None,
reload_cfg=False, cmd_addl_env=None):
"""Creates a process and ensures that it is monitored.
It will create a new ProcessManager and tie it to the uuid/service.
"""
process_manager = ProcessManager(conf=self._config,
uuid=uuid,
root_helper=self._root_helper,
namespace=namespace,
service=service,
default_cmd_callback=cmd_callback,
cmd_addl_env=cmd_addl_env)
process_manager.enable(reload_cfg=reload_cfg)
service_id = ServiceId(uuid, service)
self._process_managers[service_id] = process_manager
def disable(self, uuid, namespace=None, service=None):
"""Disables the process and stops monitoring it."""
service_id = ServiceId(uuid, service)
process_manager = self._process_managers.pop(service_id, None)
# we could be trying to disable a process_manager which was
# started on a separate run of this agent, or during netns-cleanup
# therefore we won't know about such uuid and we need to
# build the process_manager to kill it
if not process_manager:
process_manager = ProcessManager(conf=self._config,
uuid=uuid,
root_helper=self._root_helper,
namespace=namespace,
service=service)
process_manager.disable()
def disable_all(self):
for service_id in self._process_managers.keys():
self.disable(uuid=service_id.uuid, service=service_id.service)
def get_process_manager(self, uuid, service=None):
"""Returns a process manager for manipulation"""
service_id = ServiceId(uuid, service)
return self._process_managers.get(service_id)
def _get_process_manager_attribute(self, attribute, uuid, service=None):
process_manager = self.get_process_manager(uuid, service)
if process_manager:
return getattr(process_manager, attribute)
else:
return False
def is_active(self, uuid, service=None):
return self._get_process_manager_attribute('active', uuid, service)
def get_pid(self, uuid, service=None):
return self._get_process_manager_attribute('pid', uuid, service)
def _spawn_checking_thread(self):
eventlet.spawn(self._periodic_checking_thread)
@lockutils.synchronized("_check_child_processes")
def _check_child_processes(self):
for service_id in self._process_managers:
pm = self._process_managers.get(service_id)
if pm and not pm.active:
LOG.error(_LE("%(service)s for %(resource_type)s "
"with uuid %(uuid)s not found. "
"The process should not have died"),
{'service': pm.service,
'resource_type': self._resource_type,
'uuid': service_id.uuid})
self._execute_action(service_id)
eventlet.sleep(0)
def _periodic_checking_thread(self):
while True:
eventlet.sleep(self._config.check_child_processes_interval)
eventlet.spawn(self._check_child_processes)
def _execute_action(self, service_id):
action_function = getattr(
self, "_%s_action" % self._config.check_child_processes_action)
action_function(service_id)
def _respawn_action(self, service_id):
LOG.error(_LE("respawning %(service)s for uuid %(uuid)s"),
{'service': service_id.service,
'uuid': service_id.uuid})
self._process_managers[service_id].enable()
def _exit_action(self, service_id):
LOG.error(_LE("Exiting agent as programmed in check_child_processes_"
"actions"))
self._exit_handler(service_id.uuid, service_id.service)
|
|
"""The Shelly integration."""
import asyncio
from datetime import timedelta
import logging
import aioshelly
import async_timeout
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_DEVICE_ID,
CONF_HOST,
CONF_PASSWORD,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, device_registry, update_coordinator
import homeassistant.helpers.config_validation as cv
from .const import (
AIOSHELLY_DEVICE_TIMEOUT_SEC,
ATTR_CHANNEL,
ATTR_CLICK_TYPE,
ATTR_DEVICE,
BATTERY_DEVICES_WITH_PERMANENT_CONNECTION,
COAP,
CONF_COAP_PORT,
DATA_CONFIG_ENTRY,
DEFAULT_COAP_PORT,
DEVICE,
DOMAIN,
EVENT_SHELLY_CLICK,
INPUTS_EVENTS_DICT,
POLLING_TIMEOUT_SEC,
REST,
REST_SENSORS_UPDATE_INTERVAL,
SHBTN_MODELS,
SLEEP_PERIOD_MULTIPLIER,
UPDATE_PERIOD_MULTIPLIER,
)
from .utils import get_coap_context, get_device_name, get_device_sleep_period
PLATFORMS = ["binary_sensor", "cover", "light", "sensor", "switch"]
SLEEPING_PLATFORMS = ["binary_sensor", "sensor"]
_LOGGER = logging.getLogger(__name__)
COAP_SCHEMA = vol.Schema(
{
vol.Optional(CONF_COAP_PORT, default=DEFAULT_COAP_PORT): cv.port,
}
)
CONFIG_SCHEMA = vol.Schema({DOMAIN: COAP_SCHEMA}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Shelly component."""
hass.data[DOMAIN] = {DATA_CONFIG_ENTRY: {}}
conf = config.get(DOMAIN)
if conf is not None:
hass.data[DOMAIN][CONF_COAP_PORT] = conf[CONF_COAP_PORT]
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Shelly from a config entry."""
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id] = {}
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][DEVICE] = None
temperature_unit = "C" if hass.config.units.is_metric else "F"
options = aioshelly.ConnectionOptions(
entry.data[CONF_HOST],
entry.data.get(CONF_USERNAME),
entry.data.get(CONF_PASSWORD),
temperature_unit,
)
coap_context = await get_coap_context(hass)
device = await aioshelly.Device.create(
aiohttp_client.async_get_clientsession(hass),
coap_context,
options,
False,
)
dev_reg = await device_registry.async_get_registry(hass)
device_entry = None
if entry.unique_id is not None:
device_entry = dev_reg.async_get_device(
identifiers={(DOMAIN, entry.unique_id)}, connections=set()
)
if device_entry and entry.entry_id not in device_entry.config_entries:
device_entry = None
sleep_period = entry.data.get("sleep_period")
@callback
def _async_device_online(_):
_LOGGER.debug("Device %s is online, resuming setup", entry.title)
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][DEVICE] = None
if sleep_period is None:
data = {**entry.data}
data["sleep_period"] = get_device_sleep_period(device.settings)
data["model"] = device.settings["device"]["type"]
hass.config_entries.async_update_entry(entry, data=data)
hass.async_create_task(async_device_setup(hass, entry, device))
if sleep_period == 0:
# Not a sleeping device, finish setup
_LOGGER.debug("Setting up online device %s", entry.title)
try:
async with async_timeout.timeout(AIOSHELLY_DEVICE_TIMEOUT_SEC):
await device.initialize(True)
except (asyncio.TimeoutError, OSError) as err:
raise ConfigEntryNotReady from err
await async_device_setup(hass, entry, device)
elif sleep_period is None or device_entry is None:
# Need to get sleep info or first time sleeping device setup, wait for device
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][DEVICE] = device
_LOGGER.debug(
"Setup for device %s will resume when device is online", entry.title
)
device.subscribe_updates(_async_device_online)
await device.coap_request("s")
else:
# Restore sensors for sleeping device
_LOGGER.debug("Setting up offline device %s", entry.title)
await async_device_setup(hass, entry, device)
return True
async def async_device_setup(
hass: HomeAssistant, entry: ConfigEntry, device: aioshelly.Device
):
"""Set up a device that is online."""
device_wrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][
COAP
] = ShellyDeviceWrapper(hass, entry, device)
await device_wrapper.async_setup()
platforms = SLEEPING_PLATFORMS
if not entry.data.get("sleep_period"):
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][
REST
] = ShellyDeviceRestWrapper(hass, device)
platforms = PLATFORMS
hass.config_entries.async_setup_platforms(entry, platforms)
class ShellyDeviceWrapper(update_coordinator.DataUpdateCoordinator):
"""Wrapper for a Shelly device with Home Assistant specific functions."""
def __init__(self, hass, entry, device: aioshelly.Device):
"""Initialize the Shelly device wrapper."""
self.device_id = None
sleep_period = entry.data["sleep_period"]
if sleep_period:
update_interval = SLEEP_PERIOD_MULTIPLIER * sleep_period
else:
update_interval = (
UPDATE_PERIOD_MULTIPLIER * device.settings["coiot"]["update_period"]
)
device_name = get_device_name(device) if device.initialized else entry.title
super().__init__(
hass,
_LOGGER,
name=device_name,
update_interval=timedelta(seconds=update_interval),
)
self.hass = hass
self.entry = entry
self.device = device
self._async_remove_device_updates_handler = self.async_add_listener(
self._async_device_updates_handler
)
self._last_input_events_count: dict = {}
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self._handle_ha_stop)
@callback
def _async_device_updates_handler(self):
"""Handle device updates."""
if not self.device.initialized:
return
# For buttons which are battery powered - set initial value for last_event_count
if self.model in SHBTN_MODELS and self._last_input_events_count.get(1) is None:
for block in self.device.blocks:
if block.type != "device":
continue
if block.wakeupEvent[0] == "button":
self._last_input_events_count[1] = -1
break
# Check for input events
for block in self.device.blocks:
if (
"inputEvent" not in block.sensor_ids
or "inputEventCnt" not in block.sensor_ids
):
continue
channel = int(block.channel or 0) + 1
event_type = block.inputEvent
last_event_count = self._last_input_events_count.get(channel)
self._last_input_events_count[channel] = block.inputEventCnt
if (
last_event_count is None
or last_event_count == block.inputEventCnt
or event_type == ""
):
continue
if event_type in INPUTS_EVENTS_DICT:
self.hass.bus.async_fire(
EVENT_SHELLY_CLICK,
{
ATTR_DEVICE_ID: self.device_id,
ATTR_DEVICE: self.device.settings["device"]["hostname"],
ATTR_CHANNEL: channel,
ATTR_CLICK_TYPE: INPUTS_EVENTS_DICT[event_type],
},
)
else:
_LOGGER.warning(
"Shelly input event %s for device %s is not supported, please open issue",
event_type,
self.name,
)
async def _async_update_data(self):
"""Fetch data."""
if self.entry.data.get("sleep_period"):
# Sleeping device, no point polling it, just mark it unavailable
raise update_coordinator.UpdateFailed("Sleeping device did not update")
_LOGGER.debug("Polling Shelly Device - %s", self.name)
try:
async with async_timeout.timeout(POLLING_TIMEOUT_SEC):
return await self.device.update()
except OSError as err:
raise update_coordinator.UpdateFailed("Error fetching data") from err
@property
def model(self):
"""Model of the device."""
return self.entry.data["model"]
@property
def mac(self):
"""Mac address of the device."""
return self.entry.unique_id
async def async_setup(self):
"""Set up the wrapper."""
dev_reg = await device_registry.async_get_registry(self.hass)
sw_version = self.device.settings["fw"] if self.device.initialized else ""
entry = dev_reg.async_get_or_create(
config_entry_id=self.entry.entry_id,
name=self.name,
connections={(device_registry.CONNECTION_NETWORK_MAC, self.mac)},
# This is duplicate but otherwise via_device can't work
identifiers={(DOMAIN, self.mac)},
manufacturer="Shelly",
model=aioshelly.MODEL_NAMES.get(self.model, self.model),
sw_version=sw_version,
)
self.device_id = entry.id
self.device.subscribe_updates(self.async_set_updated_data)
def shutdown(self):
"""Shutdown the wrapper."""
if self.device:
self.device.shutdown()
self._async_remove_device_updates_handler()
self.device = None
@callback
def _handle_ha_stop(self, _):
"""Handle Home Assistant stopping."""
_LOGGER.debug("Stopping ShellyDeviceWrapper for %s", self.name)
self.shutdown()
class ShellyDeviceRestWrapper(update_coordinator.DataUpdateCoordinator):
"""Rest Wrapper for a Shelly device with Home Assistant specific functions."""
def __init__(self, hass, device: aioshelly.Device):
"""Initialize the Shelly device wrapper."""
if (
device.settings["device"]["type"]
in BATTERY_DEVICES_WITH_PERMANENT_CONNECTION
):
update_interval = (
SLEEP_PERIOD_MULTIPLIER * device.settings["coiot"]["update_period"]
)
else:
update_interval = REST_SENSORS_UPDATE_INTERVAL
super().__init__(
hass,
_LOGGER,
name=get_device_name(device),
update_interval=timedelta(seconds=update_interval),
)
self.device = device
async def _async_update_data(self):
"""Fetch data."""
try:
async with async_timeout.timeout(AIOSHELLY_DEVICE_TIMEOUT_SEC):
_LOGGER.debug("REST update for %s", self.name)
return await self.device.update_status()
except OSError as err:
raise update_coordinator.UpdateFailed("Error fetching data") from err
@property
def mac(self):
"""Mac address of the device."""
return self.device.settings["device"]["mac"]
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
device = hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id].get(DEVICE)
if device is not None:
# If device is present, device wrapper is not setup yet
device.shutdown()
return True
platforms = SLEEPING_PLATFORMS
if not entry.data.get("sleep_period"):
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][REST] = None
platforms = PLATFORMS
unload_ok = await hass.config_entries.async_unload_platforms(entry, platforms)
if unload_ok:
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][COAP].shutdown()
hass.data[DOMAIN][DATA_CONFIG_ENTRY].pop(entry.entry_id)
return unload_ok
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts a frozen graph into a TFLite FlatBuffer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum # pylint: disable=g-bad-import-order
import os as _os
import platform as _platform
import subprocess as _subprocess
import tempfile as _tempfile
from tensorflow.contrib.lite.python import lite_constants
from tensorflow.contrib.lite.toco import model_flags_pb2 as _model_flags_pb2
from tensorflow.contrib.lite.toco import toco_flags_pb2 as _toco_flags_pb2
from tensorflow.python.platform import resource_loader as _resource_loader
from tensorflow.python.util import deprecation
from tensorflow.python.util.lazy_loader import LazyLoader
# Lazy load since some of the performance benchmark skylark rules
# break dependencies.
_toco_python = LazyLoader(
"tensorflow_wrap_toco", globals(),
"tensorflow.contrib.lite.toco.python."
"tensorflow_wrap_toco")
del LazyLoader
# Find the toco_from_protos binary using the resource loader if using from
# bazel, otherwise we are in a pip where console_scripts already has
# the toco_from_protos tool.
if lite_constants.EXPERIMENTAL_USE_TOCO_API_DIRECTLY:
_toco_from_proto_bin = ""
else:
_toco_from_proto_bin = _resource_loader.get_path_to_datafile(
"../toco/python/toco_from_protos")
if _toco_from_proto_bin and not _os.path.exists(_toco_from_proto_bin):
_toco_from_proto_bin = "toco_from_protos"
def _try_convert_to_unicode(output):
if output is None:
return u""
if isinstance(output, bytes):
try:
return output.decode()
except UnicodeDecodeError:
pass
return output
class OpsSet(enum.Enum):
"""Enum class defining the sets of ops available to generate TFLite models.
WARNING: Experimental interface, subject to change.
"""
# Convert model using TensorFlow Lite builtin ops.
TFLITE_BUILTINS = "TFLITE_BUILTINS"
# Convert model using TensorFlow ops. Not all TensorFlow ops are available.
# WARNING: Experimental interface, subject to change.
SELECT_TF_OPS = "SELECT_TF_OPS"
def __str__(self):
return self.value
@staticmethod
def get_options():
"""Returns a list of OpsSet options as a list of strings."""
return [str(option) for option in list(OpsSet)]
def toco_convert_protos(model_flags_str, toco_flags_str, input_data_str):
"""Convert `input_data_str` according to model and toco parameters.
Unless you know what you are doing consider using
the more friendly `tf.contrib.lite.toco_convert`.
Args:
model_flags_str: Serialized proto describing model properties, see
`toco/model_flags.proto`.
toco_flags_str: Serialized proto describing conversion properties, see
`toco/toco_flags.proto`.
input_data_str: Input data in serialized form (e.g. a graphdef is common)
Returns:
Converted model in serialized form (e.g. a TFLITE model is common).
Raises:
RuntimeError: When conversion fails, an exception is raised with the error
message embedded.
"""
# TODO(aselle): When toco does not use fatal errors for failure, we can
# switch this on.
if not _toco_from_proto_bin:
return _toco_python.TocoConvert(
model_flags_str, toco_flags_str, input_data_str)
# Windows and TemporaryFile are not that useful together,
# since you cannot have two readers/writers. So we have to
# make the temporaries and close and delete them explicitly.
toco_filename, model_filename, input_filename, output_filename = (
None, None, None, None)
try:
# Build all input files
with _tempfile.NamedTemporaryFile(delete=False) as fp_toco, \
_tempfile.NamedTemporaryFile(delete=False) as fp_model, \
_tempfile.NamedTemporaryFile(delete=False) as fp_input:
toco_filename = fp_toco.name
input_filename = fp_input.name
model_filename = fp_model.name
fp_model.write(model_flags_str)
fp_toco.write(toco_flags_str)
fp_input.write(input_data_str)
fp_model.flush()
fp_toco.flush()
fp_input.flush()
# Reserve an output file
with _tempfile.NamedTemporaryFile(delete=False) as fp:
output_filename = fp.name
# Run
cmd = [
_toco_from_proto_bin, model_filename, toco_filename, input_filename,
output_filename
]
cmdline = " ".join(cmd)
is_windows = _platform.system() == "Windows"
proc = _subprocess.Popen(
cmdline,
shell=True,
stdout=_subprocess.PIPE,
stderr=_subprocess.STDOUT,
close_fds=not is_windows)
stdout, stderr = proc.communicate()
exitcode = proc.returncode
if exitcode == 0:
with open(output_filename, "rb") as fp:
return fp.read()
else:
stdout = _try_convert_to_unicode(stdout)
stderr = _try_convert_to_unicode(stderr)
raise RuntimeError(
"TOCO failed see console for info.\n%s\n%s\n" % (stdout, stderr))
finally:
# Must manually cleanup files.
for filename in [
toco_filename, input_filename, model_filename, output_filename]:
try:
_os.unlink(filename)
except (OSError, TypeError):
pass
def tensor_name(x):
return x.name.split(":")[0]
def build_toco_convert_protos(input_tensors,
output_tensors,
inference_type=lite_constants.FLOAT,
inference_input_type=None,
input_format=lite_constants.TENSORFLOW_GRAPHDEF,
input_shapes=None,
output_format=lite_constants.TFLITE,
quantized_input_stats=None,
default_ranges_stats=None,
drop_control_dependency=True,
reorder_across_fake_quant=False,
allow_custom_ops=False,
change_concat_input_ranges=False,
post_training_quantize=False,
dump_graphviz_dir=None,
dump_graphviz_video=False,
target_ops=None,
allow_nonexistent_arrays=False):
"""Builds protocol buffers describing a conversion of a model using TOCO.
Typically this is to convert from TensorFlow GraphDef to TFLite, in which
case the default `input_format` and `output_format` are sufficient.
Args:
input_tensors: List of input tensors. Type and shape are computed using
`foo.get_shape()` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
inference_type: Target data type of real-number arrays in the output file.
Must be `{FLOAT, QUANTIZED_UINT8}`. (default FLOAT)
inference_input_type: Target data type of real-number input arrays. Allows
for a different type for input arrays in the case of quantization.
Must be `{FLOAT, QUANTIZED_UINT8}`. (default `inference_type`)
input_format: Type of data to read Currently must be
`{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF)
input_shapes: Input array shape. It needs to be a list of the same length
as `input_tensors`, or None. (default None)
output_format: Output file format. Currently must be `{TFLITE,
GRAPHVIZ_DOT}`. (default TFLITE)
quantized_input_stats: List of tuples of floats representing the mean and
standard deviation. Each tuple maps to the corresponding input tensor.
Only need if `inference_input_type` is `QUANTIZED_UINT8`.
real_input_value = (quantized_input_value - mean_value) / std_dev_value.
(default None)
default_ranges_stats: Tuple of integers representing (min, max) range values
for all arrays without a specified range. Intended for experimenting with
quantization via "dummy quantization". (default None)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver.
(default False)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
post_training_quantize: Boolean indicating whether to quantize the weights
of the converted float model. Model size will be reduced and there will be
latency improvements (at the cost of accuracy).
(default False)
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the graph after
every graph transformation. (default False)
target_ops: Experimental flag, subject to change. Set of OpsSet
options indicating which converter to use.
(default set([OpsSet.TFLITE_BUILTINS]))
allow_nonexistent_arrays: Allow specifying array names that don't exist
or are unused in the final graph. (default False)
Returns:
model_flags, toco_flags: two protocol buffers describing the conversion
process.
Raises:
ValueError: If the input tensor type is unknown
RuntimeError: If TOCO fails to convert (in which case the runtime error's
error text will contain the TOCO error log)
"""
toco = _toco_flags_pb2.TocoFlags()
toco.input_format = input_format
toco.output_format = output_format
toco.inference_type = inference_type
if inference_input_type:
toco.inference_input_type = inference_input_type
else:
toco.inference_input_type = toco.inference_type
toco.drop_control_dependency = drop_control_dependency
toco.reorder_across_fake_quant = reorder_across_fake_quant
toco.allow_custom_ops = allow_custom_ops
toco.post_training_quantize = post_training_quantize
if default_ranges_stats:
toco.default_ranges_min = default_ranges_stats[0]
toco.default_ranges_max = default_ranges_stats[1]
if dump_graphviz_dir:
toco.dump_graphviz_dir = dump_graphviz_dir
toco.dump_graphviz_include_video = dump_graphviz_video
if target_ops:
if set(target_ops) == set([OpsSet.TFLITE_BUILTINS, OpsSet.SELECT_TF_OPS]):
toco.allow_flex_ops = True
elif set(target_ops) == set([OpsSet.SELECT_TF_OPS]):
toco.allow_flex_ops = True
toco.force_flex_ops = True
model = _model_flags_pb2.ModelFlags()
model.change_concat_input_ranges = change_concat_input_ranges
for idx, input_tensor in enumerate(input_tensors):
input_array = model.input_arrays.add()
if toco.inference_input_type == lite_constants.QUANTIZED_UINT8:
input_array.mean_value, input_array.std_value = quantized_input_stats[idx]
input_array.name = tensor_name(input_tensor)
if input_shapes is None:
shape = input_tensor.get_shape()
else:
shape = input_shapes[idx]
input_array.shape.dims.extend(map(int, shape))
for output_tensor in output_tensors:
model.output_arrays.append(tensor_name(output_tensor))
model.allow_nonexistent_arrays = allow_nonexistent_arrays
return model, toco
def toco_convert_graph_def(input_data, input_arrays_with_shape, output_arrays,
*args, **kwargs):
""""Convert a model using TOCO.
This function is used to convert GraphDefs that cannot be loaded into
TensorFlow to TFLite. Conversion can be customized by providing arguments
that are forwarded to `build_toco_convert_protos` (see documentation for
details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` is None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `output_tensors` is None.
(default None)
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags = build_toco_convert_protos(
input_tensors=[], output_tensors=[], *args, **kwargs)
for idx, (name, shape) in enumerate(input_arrays_with_shape):
input_array = model_flags.input_arrays.add()
if kwargs["inference_type"] == lite_constants.QUANTIZED_UINT8:
input_array.mean_value, input_array.std_value = kwargs[
"quantized_input_stats"][idx]
input_array.name = name
input_array.shape.dims.extend(map(int, shape))
for name in output_arrays:
model_flags.output_arrays.append(name)
data = toco_convert_protos(model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString())
return data
def toco_convert_impl(input_data, input_tensors, output_tensors, *args,
**kwargs):
""""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.get_shape()` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags = build_toco_convert_protos(
input_tensors, output_tensors, *args, **kwargs)
data = toco_convert_protos(model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString())
return data
@deprecation.deprecated(None, "Use `lite.TFLiteConverter` instead.")
def toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):
"""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details). This function has
been deprecated. Please use `lite.TFLiteConverter` instead.
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.get_shape()` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
return toco_convert_impl(input_data, input_tensors, output_tensors, *args,
**kwargs)
|
|
import re
from rdopkg.actionmods import rdoinfo
from rdopkg.actionmods import query
from rdopkg.actionmods import pymod2pkg
from rdopkg import exception
from rdopkg.utils.cmd import git
from rdopkg.utils import log
from rdopkg.utils import specfile
from rdopkg import helpers
class DiffReq(object):
def __init__(self, name, vers):
self.name = name
self.vers = vers
self.old_vers = vers
def __str__(self):
s = '%s %s' % (self.name, self.vers)
if self.old_vers != self.vers:
s += ' (was %s)' % (self.old_vers or 'not capped',)
return s
class CheckReq(object):
def __init__(self, name, desired_vers, vers):
self.name = name
self.desired_vers = desired_vers
self.vers = vers
def met(self):
if self.vers is None:
return False
# TODO: smarter version rage comparison
if self.desired_vers:
if self.desired_vers == self.vers:
return True
else:
return False
return True
def __str__(self):
s = self.name
if self.desired_vers:
s += ' ' + self.desired_vers
if self.vers:
s += ' (%s in .spec)' % self.vers
return s
def parse_reqs_txt(txt):
reqs = []
lines = sorted(txt.split('\n'), key=lambda l: l.lower())
for line in lines:
if not line or re.match('\W', line):
continue
line = re.sub(r'\s*(?:#.*)$', '', line)
m = re.match(r'([^<>=!\s]+)\s*(.*)$', line)
if not m:
log.warn("Failed to parse requirement: %s" % line)
continue
r = DiffReq(name=m.group(1), vers=m.group(2))
reqs.append(r)
return reqs
def get_reqs_from_ref(ref):
o = git('show', '%s:requirements.txt' % ref, log_cmd=False)
return parse_reqs_txt(o)
def get_reqs_from_path(path):
o = open(path).read()
return parse_reqs_txt(o)
def get_reqs_from_spec(as_objects=False):
spec = specfile.Spec()
reqs = spec.get_requires(versions_as_string=True)
if as_objects:
creqs = []
for name in sorted(reqs):
req = DiffReq(name, reqs[name])
creqs.append(req)
return creqs
return reqs
def map_reqs2pkgs(reqs, dist):
for r in reqs:
r.name = pymod2pkg.module2package(r.name, dist)
return reqs
def reqdiff(reqs1, reqs2):
added, changed = [], []
removed = list(reqs1)
for r2 in reqs2:
for r1 in reqs1:
if r1.name == r2.name:
if r1.vers != r2.vers:
r2.old_vers = r1.vers
changed.append(r2)
try:
removed.remove(r1)
except Exception as ex:
pass
break
else:
added.append(r2)
return added, changed, removed
def reqdiff_from_refs(ref1, ref2):
r1 = get_reqs_from_ref(ref1)
r2 = get_reqs_from_ref(ref2)
return reqdiff(r1, r2)
def print_reqdiff(added, changed, removed):
if added:
print("\n{t.bold}ADDED{t.normal}:".format(t=log.term))
helpers.print_list(added, pre=' ')
if changed:
print("\n{t.bold}CHANGED{t.normal}:".format(t=log.term))
helpers.print_list(changed, pre=' ')
if removed:
print("\n{t.bold}REMOVED{t.normal}:".format(t=log.term))
helpers.print_list(removed, pre=' ')
if not (added or changed or removed):
print("\nno requirements changed")
print("")
def reqcheck(desired_reqs, reqs):
met, any_version, wrong_version, missing = [], [], [], []
for dr in desired_reqs:
vers = reqs.get(dr.name)
r = CheckReq(dr.name, dr.vers, vers)
if r.vers is None:
missing.append(r)
elif r.vers is '':
if r.desired_vers:
any_version.append(r)
else:
met.append(r)
else:
if r.met():
met.append(r)
else:
wrong_version.append(r)
return met, any_version, wrong_version, missing
def print_reqcheck(met, any_version, wrong_version, missing):
cats = [
("\n{t.bold_green}MET{t.normal}:", met),
("\n{t.bold}VERSION NOT ENFORCED{t.normal}:", any_version),
("\n{t.bold_yellow}VERSION MISMATCH{t.normal}:", wrong_version),
("\n{t.bold_red}MISSING{t.normal}:", missing),
]
for title, reqs in cats:
if not reqs:
continue
print(title.format(t=log.term))
helpers.print_list(reqs, pre=' ')
def reqcheck_spec(ref=None, reqs_txt=None):
if (ref and reqs_txt) or (not ref and not reqs_txt):
raise exception.InvalidUsage(
why="reqcheck_spec needs either ref (git ref) or reqs_txt (path)")
if ref:
reqs_txt = get_reqs_from_ref(ref)
else:
reqs_txt = get_reqs_from_path(reqs_txt)
map_reqs2pkgs(reqs_txt, 'epel')
spec_reqs = get_reqs_from_spec()
return reqcheck(reqs_txt, spec_reqs)
VER_OK, VER_FAIL, VER_WTF = range(3)
VERCMP_TABLE = {
'<': [-1],
'<=': [-1, 0],
'>': [1],
'>=': [1, 0],
'==': [0],
'=': [0],
'!=': [-1, 1],
}
VERCMP_COLORS = {
VER_OK: 'green',
VER_FAIL: 'red',
VER_WTF: 'yellow'
}
def match_version_rule(ver, rver):
m = re.match('(<|<=|>|>=|==|=|!=)(\d.*)$', rver)
if not m:
return VER_WTF
op, rv = m.groups()
goal = VERCMP_TABLE.get(op, [])
c = specfile.vcmp(ver, rv)
if c in goal:
return VER_OK
return VER_FAIL
def match_required_vers(ver, rvers):
if not rvers:
if ver:
return [('any version', VER_OK)]
else:
return [('any version', VER_FAIL)]
matches = []
for rv in rvers.split(','):
matches.append((rv, match_version_rule(ver, rv)))
return matches
def color_matched_required_vers(matches):
vers = []
for ver, m in matches:
s = '{t.%s}{v}{t.normal}' % VERCMP_COLORS[m]
vers.append(s.format(t=log.term, v=ver))
return ','.join(vers)
def reqquery(reqs, release, dist=None, module2pkg=True, verbose=False):
info = rdoinfo.get_default_inforepo()
distrepos = info.get_distrepos(release=release, dist=dist)
r = []
for rls, dist, repos in distrepos:
packages = []
for req in reqs:
if module2pkg:
pkg_name = pymod2pkg.module2package(req.name, dist)
else:
pkg_name = req.name
vers = query.query_repos(repos, pkg_name, verbose=verbose)
repo, nvr, v = None, None, None
if vers:
repo, nvr = vers[0]
v = specfile.nvr2version(nvr)
pkg = {
'package': pkg_name,
'version_required': req.vers or None,
'version_available': v,
'nvr_available': nvr,
'repo_available': repo,
}
if module2pkg:
pkg['module'] = req.name
packages.append(pkg)
vers = {
'release': rls,
'dist': dist,
'packages': packages
}
r.append(vers)
return r
def print_reqquery(q):
first = True
for ver in q:
if first:
first = False
else:
print('')
fmt = "\n{t.bold}{rls}{t.normal}/{t.bold}{dist}{t.normal}"
print(fmt.format(t=log.term, rls=ver['release'], dist=ver['dist']))
for pkg in ver['packages']:
module = pkg.get('module') or pkg['package']
mvers = match_required_vers(pkg['version_available'],
pkg['version_required'])
cvers = color_matched_required_vers(mvers)
nvr = pkg['nvr_available']
if nvr and pkg['version_available']:
nvr = re.sub(re.escape(pkg['version_available']),
'{t.bold}%s{t.normal}' % pkg['version_available'],
nvr)
nvr = nvr.format(t=log.term)
print(" {t.bold}{m}{t.normal}".format(t=log.term, m=module))
if nvr:
print(" nvr: %s" % nvr)
else:
fmt = " nvr: {pkg} {t.red}not available{t.normal}"
print(fmt.format(t=log.term, pkg=pkg['package']))
print(" need: %s" % cvers)
met = '{t.green}OK{t.normal}'
for _, m in mvers:
if m != VER_OK:
met = '{t.red}not met{t.normal}'
break
print((" state: " + met).format(t=log.term))
|
|
#!/usr/bin/python3
#
# Copyright 2015 MarkLogic Corporation
#
# This script lists all of the artifacts on the server or, if the name (and
# optionally a type) of an artifact is given, displays the properties of
# that artifact.
#
# For example:
#
# python3 properties.py
#
# or
#
# python3 properties.py App-Services
#
# If there's more than one artifact with a given name, you can specify
# the artifact type.
#
# python3 properties.py --forest Documents
#
# TODO
#
# * The set of artifacts is incomplete.
from __future__ import unicode_literals, print_function, absolute_import
import sys
import json
import logging
import argparse
import re
from marklogic.connection import Connection
from marklogic.models.database import Database
from marklogic import MarkLogic
from requests.auth import HTTPDigestAuth
from resources import TestConnection as tc
logging.basicConfig(level=logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("marklogic").setLevel(logging.DEBUG)
logging.getLogger("marklogic.examples").setLevel(logging.INFO)
class Properties:
def __init__(self):
self._artifact_types = {'database': 'D',
'server': 'S',
'host': 'H',
'user': 'U',
'forest': 'F',
'role': 'R'}
def artifact_types(self):
return self._artifact_types
def connect(self, args):
try:
adminuser, adminpass = re.split(":", args['credentials'])
except ValueError:
print ("--credentials value must be 'user:password':",
args['credentials'])
sys.exit(1)
if args['debug']:
logging.basicConfig(level=logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("marklogic").setLevel(logging.DEBUG)
self.connection \
= Connection(args['hostname'], HTTPDigestAuth(adminuser, adminpass))
self.mls = MarkLogic(self.connection)
self.args = args
def list_artifacts(self):
alltypes = True
artifact_types = self.artifact_types()
args = self.args
mls = self.mls
for atype in artifact_types:
if args[atype]:
alltypes = False
for atype in artifact_types:
if not alltypes and not args[atype]:
continue
if atype == 'database':
print("Databases:")
for db in mls.databases():
print("\t{0}".format(db))
elif atype == 'server':
alist = mls.http_servers()
if alist:
print("HTTP Servers:")
for server in alist:
print("\t{0}".format(server))
alist = mls.xdbc_servers()
if alist:
print("XDBC Servers:")
for server in alist:
print("\t{0}".format(server))
alist = mls.odbc_servers()
if alist:
print("ODBC Servers:")
for server in alist:
print("\t{0}".format(server))
alist = mls.webdav_servers()
if alist:
print("WebDAV Servers:")
for server in alist:
print("\t{0}".format(server))
elif atype == 'host':
alist = mls.hosts()
if alist:
print("Hosts:")
for host in alist:
print("\t{0}".format(host))
elif atype == 'user':
alist = mls.users()
if alist:
print("Users:")
for user in alist:
print("\t{0}".format(user))
elif atype == 'forest':
alist = mls.forests()
if alist:
print("Forests:")
for forest in alist:
print("\t{0}".format(forest))
elif atype == 'role':
alist = mls.roles()
if alist:
print("Roles:")
for role in alist:
print("\t{0}".format(role))
else:
print("Internal error: unexpected artifact type:", atype)
def show_artifact(self, artifact):
alltypes = True
artifact_types = self.artifact_types()
args = self.args
mls = self.mls
for atype in artifact_types:
if args[atype]:
alltypes = False
for atype in artifact_types:
if not alltypes and not args[atype]:
continue
if atype == 'database':
alist = mls.databases()
if artifact in alist:
prop = mls.database(artifact)
print(json.dumps(prop.marshal()))
sys.exit(0)
elif atype == 'server':
try:
servername,groupname = re.split(":", artifact)
except ValueError:
servername = artifact
groupname = "Default"
key = "{0}|{1}".format(groupname, servername)
alist = mls.http_servers()
if key in alist:
prop = mls.http_server(key)
print(json.dumps(prop.marshal()))
sys.exit(0)
alist = mls.odbc_servers()
if key in alist:
prop = mls.odbc_server(key)
print(json.dumps(prop.marshal()))
sys.exit(0)
alist = mls.xdbc_servers()
if key in alist:
prop = mls.xdbc_server(key)
print(json.dumps(prop.marshal()))
sys.exit(0)
alist = mls.webdav_servers()
if key in alist:
prop = mls.webdav_server(key)
print(json.dumps(prop.marshal()))
sys.exit(0)
elif atype == 'host':
alist = mls.hosts()
if artifact in alist:
prop = mls.host(artifact)
print(json.dumps(prop.marshal()))
sys.exit(0)
elif atype == 'user':
alist = mls.users()
if artifact in alist:
prop = mls.user(artifact)
print(json.dumps(prop.marshal()))
sys.exit(0)
elif atype == 'forest':
alist = mls.forests()
if artifact in alist:
prop = mls.forest(artifact)
print(json.dumps(prop.marshal()))
sys.exit(0)
elif atype == 'role':
alist = mls.roles()
if artifact in alist:
prop = mls.role(artifact)
print(json.dumps(prop.marshal()))
sys.exit(0)
else:
print("Internal error: unexpected artifact type:", atype)
print("No artifact named:", artifact)
def main():
props = Properties()
parser = argparse.ArgumentParser(
description="Dump MarkLogic server artifact properties.")
artifact_types = props.artifact_types()
parser.add_argument('artifact', metavar='artifact-name', nargs="?",
help='The name of an artifact (database, server, ...)')
parser.add_argument('hostname', metavar='host', nargs="?", default='localhost',
help='The host to query')
parser.add_argument('-u', '--credentials', default='admin:admin',
metavar='USER:PASS',
help='Admin user:pass for new cluster')
parser.add_argument('--debug', action='store_true',
help='Turn on debug logging')
for atype in artifact_types:
parser.add_argument("-{0}".format(artifact_types[atype]),
"--{0}".format(atype),
action='store_true',
help='Select only ' + atype + ' artifacts')
args = vars(parser.parse_args())
props.connect(args)
if args['artifact'] is None:
props.list_artifacts()
else:
props.show_artifact(args['artifact'])
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated by generateDS.py.
#
import sys
import getopt
import re as re_
import base64
import datetime as datetime_
etree_ = None
Verbose_import_ = False
(
XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return input_data
def gds_validate_datetime(self, input_data, node, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class carrierType(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('fleet', 'Vehicle', 1),
]
subclass = None
superclass = None
def __init__(self, fleet=None):
if fleet is None:
self.fleet = []
else:
self.fleet = fleet
def factory(*args_, **kwargs_):
if carrierType.subclass:
return carrierType.subclass(*args_, **kwargs_)
else:
return carrierType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_fleet(self): return self.fleet
def set_fleet(self, fleet): self.fleet = fleet
def add_fleet(self, value): self.fleet.append(value)
def insert_fleet(self, index, value): self.fleet[index] = value
def hasContent_(self):
if (
self.fleet
):
return True
else:
return False
def export(self, outfile, level, namespace_='target:', name_='carrierType', namespacedef_='xmlns:target="http://cars.example.com/schema"', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='carrierType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='target:', name_='carrierType'):
pass
def exportChildren(self, outfile, level, namespace_='target:', name_='carrierType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for fleet_ in self.fleet:
fleet_.export(outfile, level, namespace_, name_='fleet', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='carrierType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('fleet=[\n')
level += 1
for fleet_ in self.fleet:
showIndent(outfile, level)
outfile.write('model_.Vehicle(\n')
fleet_.exportLiteral(outfile, level, name_='Vehicle')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'fleet':
type_name_ = child_.attrib.get(
'{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <fleet> element')
self.fleet.append(obj_)
# end class carrierType
class Vehicle(GeneratedsSuper):
member_data_items_ = [
]
subclass = None
superclass = None
def __init__(self, extensiontype_=None):
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if Vehicle.subclass:
return Vehicle.subclass(*args_, **kwargs_)
else:
return Vehicle(*args_, **kwargs_)
factory = staticmethod(factory)
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='target:', name_='Vehicle', namespacedef_='xmlns:target="http://cars.example.com/schema"', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Vehicle')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='target:', name_='Vehicle'):
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
pass
def exportChildren(self, outfile, level, namespace_='target:', name_='Vehicle', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='Vehicle'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Vehicle
class Car(Vehicle):
member_data_items_ = [
]
subclass = None
superclass = Vehicle
def __init__(self):
super(Car, self).__init__()
pass
def factory(*args_, **kwargs_):
if Car.subclass:
return Car.subclass(*args_, **kwargs_)
else:
return Car(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
super(Car, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='target:', name_='Car', namespacedef_='xmlns:target="http://cars.example.com/schema"', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Car')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='target:', name_='Car'):
super(Car, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Car')
def exportChildren(self, outfile, level, namespace_='target:', name_='Car', fromsubclass_=False, pretty_print=True):
super(Car, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
pass
def exportLiteral(self, outfile, level, name_='Car'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(Car, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Car, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(Car, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(Car, self).buildChildren(child_, node, nodeName_, True)
pass
# end class Car
class Plane(Vehicle):
member_data_items_ = [
]
subclass = None
superclass = Vehicle
def __init__(self):
super(Plane, self).__init__()
pass
def factory(*args_, **kwargs_):
if Plane.subclass:
return Plane.subclass(*args_, **kwargs_)
else:
return Plane(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
super(Plane, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='target:', name_='Plane', namespacedef_='xmlns:target="http://cars.example.com/schema"', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Plane')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='target:', name_='Plane'):
super(Plane, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Plane')
def exportChildren(self, outfile, level, namespace_='target:', name_='Plane', fromsubclass_=False, pretty_print=True):
super(Plane, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
pass
def exportLiteral(self, outfile, level, name_='Plane'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(Plane, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Plane, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(Plane, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(Plane, self).buildChildren(child_, node, nodeName_, True)
pass
# end class Plane
GDSClassesMapping = {
'fleet': Vehicle,
'carrier': carrierType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'carrier'
rootClass = carrierType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## if not silence:
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(
## sys.stdout, 0, name_=rootTag,
## namespacedef_='xmlns:target="http://cars.example.com/schema"',
## pretty_print=True)
return rootObj
def parseEtree(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'carrier'
rootClass = carrierType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
## if not silence:
## content = etree_.tostring(
## rootElement, pretty_print=True,
## xml_declaration=True, encoding="utf-8")
## sys.stdout.write(content)
## sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
roots = get_root_tag(rootNode)
rootClass = roots[1]
if rootClass is None:
rootClass = carrierType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## if not silence:
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(
## sys.stdout, 0, name_="carrier",
## namespacedef_='xmlns:target="http://cars.example.com/schema"')
return rootObj
def parseLiteral(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'carrier'
rootClass = carrierType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## if not silence:
## sys.stdout.write('#from abstract_type2_sup import *\n\n')
## sys.stdout.write('import abstract_type2_sup as model_\n\n')
## sys.stdout.write('rootObj = model_.rootTag(\n')
## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
## sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"Car",
"Plane",
"Vehicle",
"carrierType"
]
|
|
# This file is part of COFFEE
#
# COFFEE is Copyright (c) 2014, Imperial College London.
# Please see the AUTHORS file in the main source directory for
# a full list of copyright holders. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The name of Imperial College London or that of other
# contributors may not be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTERS
# ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""COFFEE's autotuning system."""
from base import *
from vectorizer import vect_roundup
import subprocess
import os
import tempfile
class Autotuner(object):
_code_template = """
// This file was automatically generated by COFFEE for kernels autotuning.
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
// Timing
#include <stdint.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
// Firedrake headers
#include "firedrake_geometry.h"
%(vect_header)s
#define VECTOR_ALIGN %(vect_align)d
%(blas_header)s
%(blas_namespace)s
#define RESOLUTION %(resolution)d
#define TOLERANCE 0.000000001
#define PRINT_ARRAY(ARR, SZ) do { \\
printf("ARR: "); \\
for (int k = 0; k < SZ; ++k) \\
printf("%%e ", ARR[k]); \\
printf("\\n"); \\
} while (0);
static inline long stamp()
{
struct timespec tv;
clock_gettime(CLOCK_MONOTONIC, &tv);
return tv.tv_sec * 1000 * 1000 * 1000 + tv.tv_nsec;
}
#ifdef DEBUG
static int compare_1d(double A1[%(cols)s], double A2[%(cols)s], FILE* out)
{
for(int i = 0; i < %(cols)s; i++)
{
if(fabs(A1[i] - A2[i]) > TOLERANCE)
{
fprintf(out, "i=%%d, A1[i]=%%e, A2[i]=%%e\\n", i, A1[i], A2[i]);
return 1;
}
}
return 0;
}
static int compare_2d(double A1[%(rows)s][%(cols)s], double A2[%(rows)s][%(cols)s], FILE* out)
{
for(int i = 0; i < %(rows)s; i++)
{
for(int j = 0; j < %(cols)s; j++)
{
if(fabs(A1[i][j] - A2[i][j]) > TOLERANCE)
{
fprintf(out, "i=%%d, j=%%d, A1[i][j]=%%e, A2[i][j]=%%e\\n", i, j, A1[i][j], A2[i][j]);
return 1;
}
}
}
return 0;
}
#endif
%(globals)s
%(variants)s
%(externc_open)s
int main()
{
int i = 0, c = 0;
int counters[%(nvariants)d] = {0};
char* all_opts[%(nvariants)d];
/* Call kernel variants */
%(call_variants)s
/* Find the fastest variant */
int best = 0;
for(int j = 0; j < %(nvariants)d; j++)
{
if(counters[j] > counters[best])
{
best = j;
}
}
/* Output all variants */
FILE* out = fopen("%(filename)s", "a");
fprintf(out, "COFFEE Autotuner: cost of variants:\\n");
for (int j = 0; j < %(nvariants)d; j++)
{
fprintf(out, " Variant %%d: %%d\\n", j, counters[j]);
}
/* Output base, licm1, and fastest variants */
/*
fprintf(out, "Summary:\\n");
fprintf(out, "Base variant: %%d \\n", counters[0]);
fprintf(out, "Licm1 variant: %%d \\n", counters[1]);
*/
fprintf(out, "Fastest variant ID=%%d: %%d \\n", best, counters[best]);
fprintf(out, "***Chosen optimizations set: %%s***\\n", all_opts[best]);
#ifdef DEBUG
%(debug_code)s
#endif
fclose(out);
return best;
}
%(externc_close)s
"""
_coeffs_template = """
// Initialize coefficients
for (int j = 0; j < %(ndofs)d; j++)
{
%(init_coeffs)s
}
"""
_run_template = """
// Code variant %(iter)d call
srand (1);
all_opts[%(iter)d] = "%(used_opts)s";
long start%(iter)d, end%(iter)d;
%(decl_params)s
start%(iter)d = stamp();
end%(iter)d = start%(iter)d + RESOLUTION;
#ifndef DEBUG
#pragma forceinline
while (stamp() < end%(iter)d)
#else
while (c < 1)
#endif
{
// Initialize coordinates
for (int j = 0; j < %(ncoords)d; j++)
{
#ifndef DEBUG
vertex_coordinates_%(iter)d[j][0] = (double)rand();
#else
vertex_coordinates_%(iter)d[j][0] = (double)(rand()%%10);
#endif
}
%(init_coeffs)s
#pragma noinline
%(call_variant)s
c++;
}
counters[i++] = c;
c = 0;
"""
_debug_template = """
if(%(call_debug)s(A_0, A_%(iter)s, out))
{
fprintf(out, "COFFEE Warning: code variants 0 and %(iter)s differ\\n");
}
"""
_filename = "autotuning_code"
_coord_size = {
'compute_jacobian_interval_1d': 2,
'compute_jacobian_interval_2d': 4,
'compute_jacobian_interval_3d': 6,
'compute_jacobian_quad_2d': 8,
'compute_jacobian_quad_3d': 12,
'compute_jacobian_triangle_2d': 6,
'compute_jacobian_triangle_3d': 9,
'compute_jacobian_tetrahedron_3d': 12,
'compute_jacobian_prism_3d': 18,
'compute_jacobian_interval_int_1d': 4,
'compute_jacobian_interval_int_2d': 8,
'compute_jacobian_quad_int_2d': 16,
'compute_jacobian_quad_int_3d': 24,
'compute_jacobian_interval_int_3d': 12,
'compute_jacobian_triangle_int_2d': 12,
'compute_jacobian_triangle_int_3d': 18,
'compute_jacobian_tetrahedron_int_3d': 24,
'compute_jacobian_prism_int_3d': 36
}
"""Create and execute a C file in which multiple variants of the same kernel
are executed to determine the fastest implementation."""
def __init__(self, variants, include, compiler, isa, blas):
"""Initialize the autotuner.
:arg variants: list of (ast, used_optimizations) for autotuning
:arg include: list of directories to be searched for header files
:arg compiler: backend compiler info
:arg isa: instruction set architecture info
:arg blas: COFFEE's dense linear algebra library info
"""
self.variants = variants
self.include = include
self.compiler = compiler
self.isa = isa
self.blas = blas
# Set the directory in which COFFEE will dump any relevant information
coffee_dir = os.path.join(tempfile.gettempdir(), "coffee-dump-uid%s" % os.getuid())
# Wrap in try/except to protect against race conditions in parallel
try:
if not os.path.exists(coffee_dir):
os.makedirs(coffee_dir)
except OSError:
pass
# Set the directory where the autotuner will dump its output
kernel_name = variants[0][0].children[1].name
tempfile.tempdir = coffee_dir
self.coffee_dir = tempfile.mkdtemp(suffix="_tune_%s" % kernel_name)
tempfile.tempdir = None
def _retrieve_coords_size(self, kernel):
"""Return coordinates array size"""
for i in Autotuner._coord_size:
if i in kernel:
return Autotuner._coord_size[i]
raise RuntimeError("COFFEE: Autotuner does not know how to expand the jacobian")
def _retrieve_coeff_size(self, root, coeffs):
"""Return coefficient sizes, rounded up to multiple of vector length"""
def find_coeff_size(node, coeff, loop_sizes):
if isinstance(node, FlatBlock):
return 0
elif isinstance(node, Symbol):
if node.symbol == coeff:
return loop_sizes[node.rank[0]] if node.rank[0] != '0' else 1
return 0
elif isinstance(node, For):
loop_sizes[node.dim] = node.size
for n in node.children:
size = find_coeff_size(n, coeff, loop_sizes)
if size:
return size
coeffs_size = {}
for c in coeffs:
size = find_coeff_size(root, c, {})
coeffs_size[c] = vect_roundup(size if size else 1) # Else handles constants case
return coeffs_size
def _run(self, src):
"""Compile and run the generated test cases. Return the fastest kernel version."""
# If requested, run the autotuner in debug mode: eventually, a log file
# is outputed reporting the result of the numerical comparison of the
# element matrices as evaluated by the various code variants
debug_mode = [] if not os.environ.get('COFFEE_DEBUG') else ["-DDEBUG"]
fext = "c"
cppargs = ["-std=gnu99", "-O3", self.compiler['native_opt']] + debug_mode + \
["-I%s" % d for d in self.include]
ldargs = ["-lrt", "-lm"]
if self.compiler:
cppargs += [self.compiler[self.isa['inst_set']]]
cppargs += [self.compiler['ipo']]
if self.blas:
blas_dir = self.blas['dir']
if blas_dir:
cppargs += ["-I%s/include" % blas_dir]
ldargs += ["-L%s/lib" % blas_dir]
ldargs += self.blas['link']
if self.blas['name'] == 'eigen':
fext = "cpp"
# Dump autotuning source out to a file
filename = os.path.join(self.coffee_dir, "%s.%s" % (Autotuner._filename, fext))
with file(filename, 'w') as f:
f.write(src)
objname = os.path.join(self.coffee_dir, Autotuner._filename)
logfile = os.path.join(self.coffee_dir, "%s.log" % Autotuner._filename)
errfile = os.path.join(self.coffee_dir, "%s.err" % Autotuner._filename)
cc = [self.compiler["cmd"], filename] + cppargs + ['-o', objname] + ldargs
with file(logfile, "a") as log:
with file(errfile, "a") as err:
log.write("Compilation command:\n")
log.write(" ".join(cc))
log.write("\n\n")
# Compile the source code
try:
subprocess.check_call(cc, stderr=err, stdout=log)
except:
raise RuntimeError("""Unable to compile autotuner file
See %s for more info about the error""" % errfile)
# Execute the autotuner
try:
return subprocess.call([objname], stderr=err, stdout=log)
except:
raise RuntimeError("""Unable to run the autotuner
See %s for more info about the error""" % logfile)
def tune(self, resolution):
"""Return the fastest kernel implementation.
:arg resolution: the amount of time in milliseconds a kernel is run."""
is_global = lambda s: isinstance(s, Decl) and ('static' and 'const' in s.qual)
# First, determine sizes of parameters in the non-transformed variant
non_transf_ast = self.variants[0][0]
fun_decl = non_transf_ast.children[1]
# Local tensor size
tensor_rank = fun_decl.args[0].sym.rank
lt_rows, lt_cols = tensor_rank[0], tensor_rank[-1]
# Coordinates size
coords_size = self._retrieve_coords_size(str(non_transf_ast))
# Coefficients size
coeffs_syms = [f.sym.symbol.replace('*', '') for f in fun_decl.args[2:]]
coeffs_size = self._retrieve_coeff_size(fun_decl, coeffs_syms)
# Create the invidual test cases
call_variants, debug_code, global_decls = ([], [], [])
for i, variant in enumerate(self.variants):
ast, used_opts = variant
# Create ficticious kernel parameters
# Here, we follow the "standard" convention:
# - The first parameter is the local tensor (lt)
# - The second parameter is the coordinates field (coords)
# - (Optional) any additional parameter is a generic field,
# whose size is bound to the number of dofs in the kernel
fun_decl = ast.children[1]
fun_decl.pred.remove('inline')
lt_arg = fun_decl.args[0].sym
lt_sym = lt_arg.symbol + "_%d" % i
lt_init = "".join("{" for r in lt_arg.rank) + "0.0" + \
"".join("}" for r in lt_arg.rank)
lt_align = self.compiler['align']("VECTOR_ALIGN")
if lt_arg.rank[-1] % self.isa["dp_reg"]:
lt_align = ""
lt_decl = "double " + lt_sym + "".join(["[%d]" % r for r in lt_arg.rank]) + \
lt_align + " = " + lt_init
# Coordinates
coords_sym = fun_decl.args[1].sym.symbol.replace('*', '')
coords_decl = "double " + coords_sym + "_%d[%d][1]" % (i, coords_size)
# Coefficients
coeffs_syms = [f.sym.symbol.replace('*', '') for f in fun_decl.args[2:]]
coeffs_types = [f.typ.replace('*', '') for f in fun_decl.args[2:]]
coeffs_decl = ["%s " % t + f + "_%d[%d][1]" % (i, coeffs_size[f]) for t, f
in zip(coeffs_types, coeffs_syms)]
# Adjust kernel's signature
fun_decl.args[1].typ = "double"
fun_decl.args[1].sym = Symbol(coords_sym, ("%d" % coords_size, 1))
for d, f in zip(fun_decl.args[2:], coeffs_syms):
d.typ = "double"
d.sym = Symbol(f, ("%d" % coeffs_size[f], 1))
# Adjust symbols names for kernel invokation
coords_sym += "_%d" % i
coeffs_syms = [f + "_%d" % i for f in coeffs_syms]
# Adjust kernel name
fun_decl.name = fun_decl.name + "_%d" % i
# Remove any static const declaration from the kernel (they are declared
# just once at the beginning of the file, to reduce code size)
global_decls = "\n".join([str(s) for s in fun_decl.body if is_global(s)])
fun_decl.body = [s for s in fun_decl.body if not is_global(s)]
# Initialize coefficients (if any)
init_coeffs = ""
if coeffs_syms:
wrap_coeffs = "#ifndef DEBUG\n %s\n#else\n %s\n#endif"
real_coeffs = ";\n ".join([f + "[j][0] = (double)rand();"
for f in coeffs_syms])
debug_coeffs = ";\n ".join([f + "[j][0] = (double)(rand()%10);"
for f in coeffs_syms])
init_coeffs = Autotuner._coeffs_template % {
'ndofs': min(coeffs_size.values()),
'init_coeffs': wrap_coeffs % (real_coeffs, debug_coeffs)
}
# Instantiate code variant
params = ", ".join([lt_sym, coords_sym] + coeffs_syms)
call_variants.append(Autotuner._run_template % {
'iter': i,
'used_opts': str(used_opts),
'decl_params': ";\n ".join([lt_decl, coords_decl] + coeffs_decl) + ";",
'ncoords': coords_size,
'init_coeffs': init_coeffs,
'call_variant': fun_decl.name + "(%s);" % params
})
# Create debug code, apart from the BLAS case
if not used_opts.get('blas'):
debug_code.append(Autotuner._debug_template % {
'iter': i,
'call_debug': "compare_2d"
})
# Instantiate the autotuner skeleton
kernels_code = "\n".join(["/* Code variant %d */" % i + str(k.children[1])
for i, k in enumerate(zip(*self.variants)[0])])
code_template = Autotuner._code_template % {
'filename': os.path.join(self.coffee_dir, "%s.out" % Autotuner._filename),
'rows': lt_rows,
'cols': lt_cols,
'vect_header': self.compiler['vect_header'],
'vect_align': self.isa['alignment'],
'blas_header': self.blas.get('header'),
'blas_namespace': self.blas.get('namespace'),
'resolution': resolution,
'globals': global_decls,
'variants': kernels_code,
'nvariants': len(self.variants),
'call_variants': "".join(call_variants),
'externc_open': 'extern "C" {' if self.blas.get('name') in ['eigen'] else "",
'externc_close': "}" if self.blas.get('name') in ['eigen'] else "",
'debug_code': "".join(debug_code)
}
# Clean code from spurious pragmas
code_template = '\n'.join(l for l in code_template.split("\n")
if not l.strip().startswith('#pragma coffee'))
return self._run(code_template)
|
|
#!/usr/bin/env python
#/******************************************************************************
# * $Id: ogrinfo.py 23329 2011-11-05 21:09:07Z rouault $
# *
# * Project: OpenGIS Simple Features Reference Implementation
# * Purpose: Python port of a simple client for viewing OGR driver data.
# * Author: Even Rouault, <even dot rouault at mines dash paris dot org>
# *
# * Port from ogrinfo.cpp whose author is Frank Warmerdam
# *
# ******************************************************************************
# * Copyright (c) 2010, Even Rouault
# * Copyright (c) 1999, Frank Warmerdam
# *
# * Permission is hereby granted, free of charge, to any person obtaining a
# * copy of this software and associated documentation files (the "Software"),
# * to deal in the Software without restriction, including without limitation
# * the rights to use, copy, modify, merge, publish, distribute, sublicense,
# * and/or sell copies of the Software, and to permit persons to whom the
# * Software is furnished to do so, subject to the following conditions:
# *
# * The above copyright notice and this permission notice shall be included
# * in all copies or substantial portions of the Software.
# *
# * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# * DEALINGS IN THE SOFTWARE.
# ****************************************************************************/
# Note : this is the most direct port of ogrinfo.cpp possible
# It could be made much more Python'ish !
import sys
try:
from osgeo import gdal
from osgeo import ogr
except:
import gdal
import ogr
bReadOnly = False
bVerbose = True
bSummaryOnly = False
nFetchFID = ogr.NullFID
papszOptions = None
def EQUAL(a, b):
return a.lower() == b.lower()
#/************************************************************************/
#/* main() */
#/************************************************************************/
def main(argv = None):
global bReadOnly
global bVerbose
global bSummaryOnly
global nFetchFID
global papszOptions
pszWHERE = None
pszDataSource = None
papszLayers = None
poSpatialFilter = None
nRepeatCount = 1
bAllLayers = False
pszSQLStatement = None
pszDialect = None
options = {}
if argv is None:
argv = sys.argv
argv = ogr.GeneralCmdLineProcessor( argv )
#/* -------------------------------------------------------------------- */
#/* Processing command line arguments. */
#/* -------------------------------------------------------------------- */
if argv is None:
return 1
nArgc = len(argv)
iArg = 1
while iArg < nArgc:
if EQUAL(argv[iArg],"--utility_version"):
print("%s is running against GDAL %s" %
(argv[0], gdal.VersionInfo("RELEASE_NAME")))
return 0
elif EQUAL(argv[iArg],"-ro"):
bReadOnly = True
elif EQUAL(argv[iArg],"-q") or EQUAL(argv[iArg],"-quiet"):
bVerbose = False
elif EQUAL(argv[iArg],"-fid") and iArg < nArgc-1:
iArg = iArg + 1
nFetchFID = int(argv[iArg])
elif EQUAL(argv[iArg],"-spat") and iArg + 4 < nArgc:
oRing = ogr.Geometry(ogr.wkbLinearRing)
oRing.AddPoint( float(argv[iArg+1]), float(argv[iArg+2]) )
oRing.AddPoint( float(argv[iArg+1]), float(argv[iArg+4]) )
oRing.AddPoint( float(argv[iArg+3]), float(argv[iArg+4]) )
oRing.AddPoint( float(argv[iArg+3]), float(argv[iArg+2]) )
oRing.AddPoint( float(argv[iArg+1]), float(argv[iArg+2]) )
poSpatialFilter = ogr.Geometry(ogr.wkbPolygon)
poSpatialFilter.AddGeometry(oRing)
iArg = iArg + 4
elif EQUAL(argv[iArg],"-where") and iArg < nArgc-1:
iArg = iArg + 1
pszWHERE = argv[iArg]
elif EQUAL(argv[iArg],"-sql") and iArg < nArgc-1:
iArg = iArg + 1
pszSQLStatement = argv[iArg]
elif EQUAL(argv[iArg],"-dialect") and iArg < nArgc-1:
iArg = iArg + 1
pszDialect = argv[iArg]
elif EQUAL(argv[iArg],"-rc") and iArg < nArgc-1:
iArg = iArg + 1
nRepeatCount = int(argv[iArg])
elif EQUAL(argv[iArg],"-al"):
bAllLayers = True
elif EQUAL(argv[iArg],"-so") or EQUAL(argv[iArg],"-summary"):
bSummaryOnly = True
elif len(argv[iArg]) > 8 and EQUAL(argv[iArg][0:8],"-fields="):
options['DISPLAY_FIELDS'] = argv[iArg][7:len(argv[iArg])]
elif len(argv[iArg]) > 6 and EQUAL(argv[iArg][0:6],"-geom="):
options['DISPLAY_GEOMETRY'] = argv[iArg][6:len(argv[iArg])]
elif argv[iArg][0] == '-':
return Usage()
elif pszDataSource is None:
pszDataSource = argv[iArg]
else:
if papszLayers is None:
papszLayers = []
papszLayers.append( argv[iArg] )
bAllLayers = False
iArg = iArg + 1
if pszDataSource is None:
return Usage()
#/* -------------------------------------------------------------------- */
#/* Open data source. */
#/* -------------------------------------------------------------------- */
poDS = None
poDriver = None
poDS = ogr.Open( pszDataSource, not bReadOnly )
if poDS is None and not bReadOnly:
poDS = ogr.Open( pszDataSource, False )
if poDS is not None and bVerbose:
print( "Had to open data source read-only." )
bReadOnly = True
#/* -------------------------------------------------------------------- */
#/* Report failure */
#/* -------------------------------------------------------------------- */
if poDS is None:
print( "FAILURE:\n"
"Unable to open datasource `%s' with the following drivers." % pszDataSource )
for iDriver in range(ogr.GetDriverCount()):
print( " -> %s" % ogr.GetDriver(iDriver).GetName() )
return 1
poDriver = poDS.GetDriver()
#/* -------------------------------------------------------------------- */
#/* Some information messages. */
#/* -------------------------------------------------------------------- */
if bVerbose:
print( "INFO: Open of `%s'\n"
" using driver `%s' successful." % (pszDataSource, poDriver.GetName()) )
poDS_Name = poDS.GetName()
if str(type(pszDataSource)) == "<type 'unicode'>" and str(type(poDS_Name)) == "<type 'str'>":
poDS_Name = unicode(poDS_Name, "utf8")
if bVerbose and pszDataSource != poDS_Name:
print( "INFO: Internal data source name `%s'\n"
" different from user name `%s'." % (poDS_Name, pszDataSource ))
#/* -------------------------------------------------------------------- */
#/* Special case for -sql clause. No source layers required. */
#/* -------------------------------------------------------------------- */
if pszSQLStatement is not None:
poResultSet = None
nRepeatCount = 0 #// skip layer reporting.
if papszLayers is not None:
print( "layer names ignored in combination with -sql." )
poResultSet = poDS.ExecuteSQL( pszSQLStatement, poSpatialFilter,
pszDialect )
if poResultSet is not None:
if pszWHERE is not None:
if poResultSet.SetAttributeFilter( pszWHERE ) != 0:
print("FAILURE: SetAttributeFilter(%s) failed." % pszWHERE)
return 1
ReportOnLayer( poResultSet, None, None, options )
poDS.ReleaseResultSet( poResultSet )
#gdal.Debug( "OGR", "GetLayerCount() = %d\n", poDS.GetLayerCount() )
for iRepeat in range(nRepeatCount):
if papszLayers is None:
#/* -------------------------------------------------------------------- */
#/* Process each data source layer. */
#/* -------------------------------------------------------------------- */
for iLayer in range(poDS.GetLayerCount()):
poLayer = poDS.GetLayer(iLayer)
if poLayer is None:
print( "FAILURE: Couldn't fetch advertised layer %d!" % iLayer )
return 1
if not bAllLayers:
line = "%d: %s" % (iLayer+1, poLayer.GetLayerDefn().GetName())
if poLayer.GetLayerDefn().GetGeomType() != ogr.wkbUnknown:
line = line + " (%s)" % ogr.GeometryTypeToName( poLayer.GetLayerDefn().GetGeomType() )
print(line)
else:
if iRepeat != 0:
poLayer.ResetReading()
ReportOnLayer( poLayer, pszWHERE, poSpatialFilter, options )
else:
#/* -------------------------------------------------------------------- */
#/* Process specified data source layers. */
#/* -------------------------------------------------------------------- */
for papszIter in papszLayers:
poLayer = poDS.GetLayerByName(papszIter)
if poLayer is None:
print( "FAILURE: Couldn't fetch requested layer %s!" % papszIter )
return 1
if iRepeat != 0:
poLayer.ResetReading()
ReportOnLayer( poLayer, pszWHERE, poSpatialFilter, options )
#/* -------------------------------------------------------------------- */
#/* Close down. */
#/* -------------------------------------------------------------------- */
poDS.Destroy()
return 0
#/************************************************************************/
#/* Usage() */
#/************************************************************************/
def Usage():
print( "Usage: ogrinfo [--help-general] [-ro] [-q] [-where restricted_where]\n"
" [-spat xmin ymin xmax ymax] [-fid fid]\n"
" [-sql statement] [-al] [-so] [-fields={YES/NO}]\n"
" [-geom={YES/NO/SUMMARY}][--formats]\n"
" datasource_name [layer [layer ...]]")
return 1
#/************************************************************************/
#/* ReportOnLayer() */
#/************************************************************************/
def ReportOnLayer( poLayer, pszWHERE, poSpatialFilter, options ):
poDefn = poLayer.GetLayerDefn()
#/* -------------------------------------------------------------------- */
#/* Set filters if provided. */
#/* -------------------------------------------------------------------- */
if pszWHERE is not None:
if poLayer.SetAttributeFilter( pszWHERE ) != 0:
print("FAILURE: SetAttributeFilter(%s) failed." % pszWHERE)
return
if poSpatialFilter is not None:
poLayer.SetSpatialFilter( poSpatialFilter )
#/* -------------------------------------------------------------------- */
#/* Report various overall information. */
#/* -------------------------------------------------------------------- */
print( "" )
print( "Layer name: %s" % poDefn.GetName() )
if bVerbose:
print( "Geometry: %s" % ogr.GeometryTypeToName( poDefn.GetGeomType() ) )
print( "Feature Count: %d" % poLayer.GetFeatureCount() )
oExt = poLayer.GetExtent(True, can_return_null = True)
if oExt is not None:
print("Extent: (%f, %f) - (%f, %f)" % (oExt[0], oExt[1], oExt[2], oExt[3]))
if poLayer.GetSpatialRef() is None:
pszWKT = "(unknown)"
else:
pszWKT = poLayer.GetSpatialRef().ExportToPrettyWkt()
print( "Layer SRS WKT:\n%s" % pszWKT )
if len(poLayer.GetFIDColumn()) > 0:
print( "FID Column = %s" % poLayer.GetFIDColumn() )
if len(poLayer.GetGeometryColumn()) > 0:
print( "Geometry Column = %s" % poLayer.GetGeometryColumn() )
for iAttr in range(poDefn.GetFieldCount()):
poField = poDefn.GetFieldDefn( iAttr )
print( "%s: %s (%d.%d)" % ( \
poField.GetNameRef(), \
poField.GetFieldTypeName( poField.GetType() ), \
poField.GetWidth(), \
poField.GetPrecision() ))
#/* -------------------------------------------------------------------- */
#/* Read, and dump features. */
#/* -------------------------------------------------------------------- */
poFeature = None
if nFetchFID == ogr.NullFID and not bSummaryOnly:
poFeature = poLayer.GetNextFeature()
while poFeature is not None:
DumpReadableFeature(poFeature, options)
poFeature = poLayer.GetNextFeature()
elif nFetchFID != ogr.NullFID:
poFeature = poLayer.GetFeature( nFetchFID )
if poFeature is None:
print( "Unable to locate feature id %d on this layer." % nFetchFID )
else:
DumpReadableFeature(poFeature, options)
return
def DumpReadableFeature( poFeature, options = None ):
poDefn = poFeature.GetDefnRef()
print("OGRFeature(%s):%ld" % (poDefn.GetName(), poFeature.GetFID() ))
if 'DISPLAY_FIELDS' not in options or EQUAL(options['DISPLAY_FIELDS'], 'yes'):
for iField in range(poDefn.GetFieldCount()):
poFDefn = poDefn.GetFieldDefn(iField)
line = " %s (%s) = " % ( \
poFDefn.GetNameRef(), \
ogr.GetFieldTypeName(poFDefn.GetType()) )
if poFeature.IsFieldSet( iField ):
line = line + "%s" % (poFeature.GetFieldAsString( iField ) )
else:
line = line + "(null)"
print(line)
if poFeature.GetStyleString() is not None:
if 'DISPLAY_STYLE' not in options or EQUAL(options['DISPLAY_STYLE'], 'yes'):
print(" Style = %s" % GetStyleString() )
poGeometry = poFeature.GetGeometryRef()
if poGeometry is not None:
if 'DISPLAY_GEOMETRY' not in options or not EQUAL(options['DISPLAY_GEOMETRY'], 'no'):
DumpReadableGeometry( poGeometry, " ", options)
print('')
return
def DumpReadableGeometry( poGeometry, pszPrefix, options ):
if pszPrefix == None:
pszPrefix = ""
if 'DISPLAY_GEOMETRY' in options and EQUAL(options['DISPLAY_GEOMETRY'], 'SUMMARY'):
line = ("%s%s : " % (pszPrefix, poGeometry.GetGeometryName() ))
eType = poGeometry.GetGeometryType()
if eType == ogr.wkbLineString or eType == ogr.wkbLineString25D:
line = line + ("%d points" % poGeometry.GetPointCount())
print(line)
elif eType == ogr.wkbPolygon or eType == ogr.wkbPolygon25D:
nRings = poGeometry.GetGeometryCount()
if nRings == 0:
line = line + "empty"
else:
poRing = poGeometry.GetGeometryRef(0)
line = line + ("%d points" % poRing.GetPointCount())
if nRings > 1:
line = line + (", %d inner rings (" % (nRings - 1))
for ir in range(0,nRings-1):
if ir > 0:
line = line + ", "
poRing = poGeometry.GetGeometryRef(ir+1)
line = line + ("%d points" % poRing.GetPointCount())
line = line + ")"
print(line)
elif eType == ogr.wkbMultiPoint or \
eType == ogr.wkbMultiPoint25D or \
eType == ogr.wkbMultiLineString or \
eType == ogr.wkbMultiLineString25D or \
eType == ogr.wkbMultiPolygon or \
eType == ogr.wkbMultiPolygon25D or \
eType == ogr.wkbGeometryCollection or \
eType == ogr.wkbGeometryCollection25D:
line = line + "%d geometries:" % poGeometry.GetGeometryCount()
print(line)
for ig in range(poGeometry.GetGeometryCount()):
subgeom = poGeometry.GetGeometryRef(ig)
from sys import version_info
if version_info >= (3,0,0):
exec('print("", end=" ")')
else:
exec('print "", ')
DumpReadableGeometry( subgeom, pszPrefix, options)
else:
print(line)
elif 'DISPLAY_GEOMETRY' not in options or EQUAL(options['DISPLAY_GEOMETRY'], 'yes') \
or EQUAL(options['DISPLAY_GEOMETRY'], 'WKT'):
print("%s%s" % (pszPrefix, poGeometry.ExportToWkt() ))
return
if __name__ == '__main__':
version_num = int(gdal.VersionInfo('VERSION_NUM'))
if version_num < 1800: # because of ogr.GetFieldTypeName
print('ERROR: Python bindings of GDAL 1.8.0 or later required')
sys.exit(1)
sys.exit(main( sys.argv ))
|
|
'''
Provides both a trailing spaces highlighter and a deletion command.
See README.md for details.
@author: Jean-Denis Vauguet <jd@vauguet.fr>, Oktay Acikalin <ok@ryotic.de>
@license: MIT (http://www.opensource.org/licenses/mit-license.php)
@since: 2011-02-25
'''
import sublime
import sublime_plugin
import difflib
import codecs
import re
from os.path import isfile
DEFAULT_MAX_FILE_SIZE = 1048576
DEFAULT_IS_ENABLED = True
DEFAULT_NON_VISIBLE_HIGHLIGHTING = 500
DEFAULT_UPDATE_INTERVAL = 250
DEFAULT_MODIFIED_LINES_ONLY = False
# Global settings object and flags.
# Flags duplicate some of the (core) JSON settings, in case the settings file has
# been corrupted or is empty (ST2 really dislikes that!)
ts_settings_filename = "trailing_spaces.sublime-settings"
ts_settings = None
trailing_spaces_live_matching = DEFAULT_IS_ENABLED
trailing_spaces_non_visible_highlighting = DEFAULT_NON_VISIBLE_HIGHLIGHTING
trailing_spaces_update_interval = DEFAULT_UPDATE_INTERVAL
trim_modified_lines_only = DEFAULT_MODIFIED_LINES_ONLY
trailing_spaces_syntax_ignore = []
startup_queue = []
on_disk = None
# dictionary of currently active view ids and last visible regions
active_views = {}
# Private: Loads settings and sets whether the plugin (live matching) is enabled.
#
# Returns nothing.
def plugin_loaded():
global ts_settings_filename, ts_settings, trailing_spaces_live_matching
global trailing_spaces_non_visible_highlighting, trailing_spaces_update_interval
global current_highlighting_scope, trim_modified_lines_only, startup_queue
global DEFAULT_COLOR_SCOPE_NAME, trailing_spaces_syntax_ignore
ts_settings = sublime.load_settings(ts_settings_filename)
trailing_spaces_live_matching = bool(ts_settings.get("trailing_spaces_enabled",
DEFAULT_IS_ENABLED))
trailing_spaces_non_visible_highlighting = int(ts_settings.get("trailing_spaces_non_visible_highlighting",
DEFAULT_UPDATE_INTERVAL))
trailing_spaces_update_interval = int(ts_settings.get("trailing_spaces_update_interval",
DEFAULT_UPDATE_INTERVAL))
current_highlighting_scope = ts_settings.get("trailing_spaces_highlight_color",
"region.redish")
DEFAULT_COLOR_SCOPE_NAME = current_highlighting_scope
trim_modified_lines_only = bool(ts_settings.get("trailing_spaces_modified_lines_only",
DEFAULT_MODIFIED_LINES_ONLY))
trailing_spaces_syntax_ignore = ts_settings.get('trailing_spaces_syntax_ignore', [])
if trailing_spaces_live_matching:
for view in startup_queue:
match_trailing_spaces(view)
else:
current_highlighting_scope = ""
if ts_settings.get("trailing_spaces_highlight_color") != current_highlighting_scope:
persist_settings()
# Private: Makes sure all timers are stopped.
#
# Returns nothing.
def plugin_unloaded():
# clear all active views to kill all timeouts
active_views.clear()
# Private: Updates user's settings with in-memory values.
#
# Allows for persistent settings from the menu.
#
# Returns nothing.
def persist_settings():
sublime.save_settings(ts_settings_filename)
# Private: Returns all regions within region that match regex.
#
# view - the view, you know
# region - the region to search
# regex - the regex pattern to search for
#
# Returns all matching regions within region.
def view_find_all_in_region(view, region, regex):
# find all matches in the region's text
text = view.substr(region)
matches = re.finditer(regex, text, re.MULTILINE)
# return the found positions translated to the region's starting position
return [sublime.Region(m.start() + region.begin(), m.end() + region.begin()) for m in matches]
# Private: Get the regions matching trailing spaces.
#
# As the core regexp matches lines, the regions are, well, "per lines".
#
# view - the view, you know
#
# Returns both the list of regions which map to trailing spaces and the list of
# regions which are to be highlighted, as a list [matched, highlightable].
def find_trailing_spaces(view):
include_empty_lines = bool(ts_settings.get("trailing_spaces_include_empty_lines",
DEFAULT_IS_ENABLED))
include_current_line = bool(ts_settings.get("trailing_spaces_include_current_line",
DEFAULT_IS_ENABLED))
regexp = ts_settings.get("trailing_spaces_regexp") + "$"
if not include_empty_lines:
regexp = "(?<=\\S)%s$" % regexp
# find all matches in the currently visible region plus a little before and after
searched_region = view.visible_region()
searched_region.a = max(searched_region.a - trailing_spaces_non_visible_highlighting, 0)
searched_region.b = min(searched_region.b + trailing_spaces_non_visible_highlighting, view.size())
searched_region = view.line(searched_region) # align to line start and end
offending_lines = view_find_all_in_region(view, searched_region, regexp)
ignored_scopes = ",".join(ts_settings.get("trailing_spaces_scope_ignore", []))
filtered_lines = []
for region in offending_lines:
if ignored_scopes and view.match_selector(region.begin(), ignored_scopes):
continue
filtered_lines.append(region)
sel = view.sel()
line = len(sel) and view.line(sel[0].b)
if include_current_line or not line:
return [filtered_lines, filtered_lines]
else:
# find all matches in the current line and exclude them from highlighting
current_offenders = view_find_all_in_region(view, line, regexp)
highlightable = [r for r in filtered_lines if r not in current_offenders]
return [filtered_lines, highlightable]
# Private: Find the freaking trailing spaces in the view and flags them as such!
#
# It will refresh highlighted regions as well. Does not execute if the
# document's size exceeds the file_max_size setting, or if the fired in a view
# which is not a legacy document (helper/build views and so on).
#
# view - the view, you know
#
# Returns nothing.
def match_trailing_spaces(view):
if ts_settings is None:
startup_queue.append(view)
return
# Silently pass ignored views.
if ignore_view(view):
return
# Silently pass if file is too big.
if max_size_exceeded(view):
return
(matched, highlightable) = find_trailing_spaces(view)
add_trailing_spaces_regions(view, matched)
highlight_trailing_spaces_regions(view, highlightable)
# Private: Checks if the view should be ignored.
#
# view - the view to check.
#
# Returns True if the view should be ignored, False otherwise.
def ignore_view(view):
if view.is_scratch():
return True
view_syntax = view.settings().get('syntax')
if not view_syntax:
return False
for syntax_ignore in trailing_spaces_syntax_ignore:
if syntax_ignore in view_syntax:
return True
return False
# Private: Checks whether the document is bigger than the max_size setting.
#
# view - the view, you know
#
# Returns True or False.
def max_size_exceeded(view):
return view.size() > ts_settings.get('trailing_spaces_file_max_size',
DEFAULT_MAX_FILE_SIZE)
# Private: Marks specified regions as trailing spaces.
#
# view - the view, you know
# regions - regions qualified as trailing spaces
#
# Returns nothing.
def add_trailing_spaces_regions(view, regions):
view.erase_regions('TrailingSpacesMatchedRegions')
view.add_regions('TrailingSpacesMatchedRegions',
regions,
"",
"",
sublime.HIDE_ON_MINIMAP)
# Private: Highlights specified regions as trailing spaces.
#
# It will use the scope enforced by the state of the toggable highlighting.
#
# view - the view, you know
# regions - regions qualified as trailing spaces
#
# Returns nothing.
def highlight_trailing_spaces_regions(view, regions):
view.erase_regions("TrailingSpacesHighlightedRegions")
view.add_regions('TrailingSpacesHighlightedRegions',
regions,
current_highlighting_scope or "",
"",
sublime.HIDE_ON_MINIMAP)
# Private: Toggles highlighting of all trailing spaces in the view.
#
# It has no effect is the plugin is disabled.
#
# view - the view, you know
#
# Returns True (highlighting was turned on) or False (turned off).
def toggle_highlighting(view):
global current_highlighting_scope
# If the scope is that of an invisible, there is nothing to toggle.
if DEFAULT_COLOR_SCOPE_NAME == "":
return "disabled!"
# If performing live, highlighted trailing regions must be updated
# internally.
if not trailing_spaces_live_matching:
(matched, highlightable) = find_trailing_spaces(view)
highlight_trailing_spaces_regions(view, highlightable)
scope = DEFAULT_COLOR_SCOPE_NAME if current_highlighting_scope == "" else ""
current_highlighting_scope = scope
highlight_trailing_spaces_regions(view, view.get_regions('TrailingSpacesHighlightedRegions'))
return "off" if current_highlighting_scope == "" else "on"
# Clear all the highlighted regions in all views.
#
# FIXME: this is not used! Delete?
#
# window - the window, you know
#
# Returns nothing.
def clear_trailing_spaces_highlight(window):
for view in window.views():
view.erase_regions('TrailingSpacesMatchedRegions')
# Find edited lines since last save, as line numbers, based on diff.
#
# It uses a Differ object to compute the diff between the file as red on the
# disk, and the current buffer (which may differ from the disk's state). See
# http://docs.python.org/2/library/difflib.html for details about diff codes.
#
# It relies on a full diff, so it may be expensive computation for very large
# files (diff generation + looping through all lines).
#
# old - a buffer of lines, as in "old version"
# new - a buffer of lines, as in "new version"
#
# Returns the list of edited line numbers.
def modified_lines_as_numbers(old, new):
d = difflib.Differ()
diffs = d.compare(old, new)
# Pretty Naive Algorithm (tm):
# - split off the "Differ code", to check whether:
# - the line is in either in both files or just b: increment the line number
# - the line is only in b: it qualifies as an edited line!
# Starting from -1 as ST2 is internally 0-based for lines.
lineNum = -1
edited_lines = []
for line in diffs:
code = line[:2]
# those lines with "? " are not real! watch out!
if code in (" ", "+ "):
lineNum += 1
if code == "+ ":
edited_lines.append(lineNum)
return False if not edited_lines else edited_lines
# Private: Find the dirty lines.
#
# view - the view, you know
#
# Returns the list of regions matching dirty lines.
def get_modified_lines(view):
try:
on_disk
on_buffer = view.substr(sublime.Region(0, view.size())).splitlines()
except UnicodeDecodeError:
sublime.status_message("File format incompatible with this feature (UTF-8 files only)")
return
lines = []
line_numbers = modified_lines_as_numbers(on_disk, on_buffer)
if line_numbers:
lines = [view.full_line(view.text_point(number, 0)) for number in line_numbers]
return lines
# Private: Finds the trailing spaces regions to be deleted.
#
# It abides by the user settings: while in mode "Only Modified Lines", it returns
# the subset of trailing spaces regions which are within dirty lines; otherwise, it
# returns all trailing spaces regions for the document.
#
# view - the view, you know
#
# Returns a list of regions to be deleted.
def find_regions_to_delete(view):
# If the plugin has been running in the background, regions have been matched.
# Otherwise, we must find trailing spaces right now!
if trailing_spaces_live_matching:
regions = view.get_regions('TrailingSpacesMatchedRegions')
else:
(regions, highlightable) = find_trailing_spaces(view)
# Filtering is required in case triming is restricted to dirty regions only.
if trim_modified_lines_only:
modified_lines = get_modified_lines(view)
# If there are no dirty lines, don't do nothing.
if not modified_lines:
return
# Super-private: filters trailing spaces regions to dirty lines only.
#
# As one cannot perform a smart find_all within arbitrary boundaries, we must do some
# extra work:
# - we want to loop through the modified lines set, not the whole trailing regions
# - but we need a way to match modified lines with trailings to those very regions
#
# Hence the reversed dict on regions: keys are the text_point of the begining of
# each region, values are the region's actual boundaries. As a Region is unhashable,
# trailing regions are being recreated later on from those two values.
#
# We loop then loop through the modified lines: for each line, we get its begining
# text_point, and check whether it matches a line with trailing spaces in the
# reversed dict. If so, this is a match (a modified line with trailing spaces), so
# we can re-create and store a Region for the relevant trailing spaces boundaries.
#
# Returns the filtered list of trailing spaces regions for the modified lines set.
def only_those_with_trailing_spaces():
regions_by_begin = {}
matches = []
for region in regions:
begin = view.line(region).begin()
regions_by_begin[begin] = (region.begin(), region.end())
for line in modified_lines:
text_point = line.begin()
if text_point in regions_by_begin:
matches.append(sublime.Region(regions_by_begin[text_point][0], regions_by_begin[text_point][1]))
return matches
regions = only_those_with_trailing_spaces()
return regions
# Private: Deletes the trailing spaces regions.
#
# view - the view, you know
# edit - the Edit object spawned by the deletion command
#
# Returns the number of deleted regions.
def delete_trailing_regions(view, edit):
regions = find_regions_to_delete(view)
if regions:
# Trick: reversing the regions takes care of the growing offset while
# deleting the successive regions.
regions.reverse()
for r in regions:
view.erase(edit, r)
return len(regions)
else:
return 0
# Public: Toggles the highlighting on or off.
class ToggleTrailingSpacesCommand(sublime_plugin.WindowCommand):
def run(self):
view = self.window.active_view()
if max_size_exceeded(view):
sublime.status_message("File is too big, trailing spaces handling disabled.")
return
state = toggle_highlighting(view)
ts_settings.set("trailing_spaces_highlight_color", current_highlighting_scope)
persist_settings()
sublime.status_message('Highlighting of trailing spaces is %s' % state)
def is_checked(self):
return current_highlighting_scope != ""
# Public: Toggles "Modified Lines Only" mode on or off.
class ToggleTrailingSpacesModifiedLinesOnlyCommand(sublime_plugin.WindowCommand):
def run(self):
global trim_modified_lines_only
was_on = ts_settings.get("trailing_spaces_modified_lines_only")
ts_settings.set("trailing_spaces_modified_lines_only", not was_on)
persist_settings()
# TODO: use ts_settings.add_on_change() when it lands in ST3
trim_modified_lines_only = ts_settings.get('trailing_spaces_modified_lines_only')
message = "Let's trim trailing spaces everywhere" if was_on \
else "Let's trim trailing spaces only on modified lines"
sublime.status_message(message)
def is_checked(self):
return ts_settings.get("trailing_spaces_modified_lines_only")
# Public: Matches and highlights trailing spaces on key events, according to the
# current settings.
class TrailingSpacesListener(sublime_plugin.EventListener):
def on_modified(self, view):
if trailing_spaces_live_matching:
match_trailing_spaces(view)
def on_selection_modified(self, view):
if trailing_spaces_live_matching:
match_trailing_spaces(view)
def on_activated(self, view):
global trim_modified_lines_only
if trim_modified_lines_only:
self.freeze_last_version(view)
if trailing_spaces_live_matching:
match_trailing_spaces(view)
# continuously watch view for changes to the visible region
if not view.id() in active_views:
# track
active_views[view.id()] = view.visible_region()
self.update_on_region_change(view)
def on_pre_save(self, view):
global trim_modified_lines_only
if trim_modified_lines_only:
self.freeze_last_version(view)
if ts_settings.get("trailing_spaces_trim_on_save"):
view.run_command("delete_trailing_spaces")
def on_close(self, view):
# untrack
active_views.pop(view.id(), None)
def update_on_region_change(self, view):
# remove views not currently visible
if not self.is_view_visible(view):
active_views.pop(view.id(), None)
return
# compare the currently visible region to the previous (if any) and
# update if there were changes
if view.visible_region() != active_views.get(view.id(), view.visible_region()):
match_trailing_spaces(view)
active_views[view.id()] = view.visible_region()
# continue only if the view is still active
if trailing_spaces_live_matching and view.id() in active_views:
sublime.set_timeout_async(lambda: self.update_on_region_change(view),
trailing_spaces_update_interval)
# Toggling messes with what is red from the disk, and it breaks the diff
# used when modified_lines_only is true. Honestly, I don't know why (yet).
# Anyway, let's cache the persisted version of the document's buffer for
# later use on specific event, so that we always have a decent version of
# "what's on the disk" to work with.
def freeze_last_version(self, view):
global on_disk
file_name = view.file_name()
# For some reasons, the on_activated hook gets fired on a ghost document
# from time to time.
if file_name and not view.is_scratch() and isfile(file_name):
with codecs.open(file_name, "r", "utf-8") as f:
on_disk = f.read().splitlines()
def is_view_visible(self, view):
window = view.window()
if not window:
return False
# panel views don't trigger on_close but are also not valid anymore
# after being hidden, so try to detect these cases here
if view.size() == 0 and not view.file_name():
return False
# see if this view is visible in its group
group = window.get_view_index(view)[0]
if group != -1:
return view.id() == window.active_view_in_group(group).id()
# check if this view is the active panel
active_panel = window.active_panel() or ""
# find_output_panel only works without the "output."" prefix
if active_panel.startswith("output."):
active_panel = active_panel[len("output."):]
panel_view = window.find_output_panel(active_panel)
if panel_view and view.id() == panel_view.id():
return True
return False
# Public: Deletes the trailing spaces.
class DeleteTrailingSpacesCommand(sublime_plugin.TextCommand):
def run(self, edit):
if max_size_exceeded(self.view):
sublime.status_message("File is too big, trailing spaces handling disabled.")
return
deleted = delete_trailing_regions(self.view, edit)
if deleted:
if ts_settings.get("trailing_spaces_save_after_trim") \
and not ts_settings.get("trailing_spaces_trim_on_save"):
sublime.set_timeout(lambda: self.save(self.view), 10)
msg_parts = {"nbRegions": deleted,
"plural": 's' if deleted > 1 else ''}
message = "Deleted %(nbRegions)s trailing spaces region%(plural)s" % msg_parts
else:
message = "No trailing spaces to delete!"
sublime.status_message(message)
def save(self, view):
if view.file_name() is None:
view.run_command('prompt_save_as')
else:
view.run_command('save')
# ST3 features a plugin_loaded hook which is called when ST's API is ready.
#
# We must therefore call our init callback manually on ST2. It must be the last
# thing in this plugin (thanks, beloved contributors!).
if not int(sublime.version()) > 3000:
plugin_loaded()
|
|
import decimal
import warnings
import functools
import contextlib
from decimal import Decimal
from datetime import datetime, timedelta
import pytest
from hypothesis import assume, example, given, target
from hypothesis.extra.numpy import array_shapes, arrays
from hypothesis.strategies import (composite, datetimes, floats, integers,
one_of, sampled_from, timedeltas, tuples)
import numpy as np
import erfa
from erfa import ErfaError, ErfaWarning
import astropy.units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import STANDARD_TIME_SCALES, Time, TimeDelta
from astropy.time.utils import day_frac, two_sum
from astropy.utils import iers
allclose_jd = functools.partial(np.allclose, rtol=np.finfo(float).eps, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=np.finfo(float).eps,
atol=np.finfo(float).eps) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=np.finfo(float).eps,
atol=np.finfo(float).eps * 24 * 3600)
tiny = np.finfo(float).eps
dt_tiny = TimeDelta(tiny, format='jd')
def setup_module():
# Pre-load leap seconds table to avoid flakiness in hypothesis runs.
# See https://github.com/astropy/astropy/issues/11030
Time('2020-01-01').ut1
@pytest.fixture(scope='module')
def iers_b():
"""This is an expensive operation, so we share it between tests using a
module-scoped fixture instead of using the context manager form. This
is particularly important for Hypothesis, which invokes the decorated
test function many times (100 by default; see conftest.py for details).
"""
with iers.earth_orientation_table.set(iers.IERS_B.open(iers.IERS_B_FILE)):
yield "<using IERS-B orientation table>"
@contextlib.contextmanager
def quiet_erfa():
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ErfaWarning)
yield
def assert_almost_equal(a, b, *, rtol=None, atol=None, label=''):
"""Assert numbers are almost equal.
This version also lets hypothesis know how far apart the inputs are, so
that it can work towards a failure and present the worst failure ever seen
as well as the simplest, which often just barely exceeds the threshold.
"""
__tracebackhide__ = True
if rtol is None or rtol == 0:
thresh = atol
elif atol is None:
thresh = rtol * (abs(a) + abs(b)) / 2
else:
thresh = atol + rtol * (abs(a) + abs(b)) / 2
amb = (a - b)
if isinstance(amb, TimeDelta):
ambv = amb.to_value(u.s)
target(ambv, label=label + " (a-b).to_value(u.s), from TimeDelta")
target(-ambv, label=label + " (b-a).to_value(u.s), from TimeDelta")
if isinstance(thresh, u.Quantity):
amb = amb.to(thresh.unit)
else:
try:
target_value = float(amb)
except TypeError:
pass
else:
target(target_value, label=label + " float(a-b)")
target(-target_value, label=label + " float(b-a)")
assert abs(amb) < thresh
# Days that end with leap seconds
# Some time scales use a so-called "leap smear" to cope with these, others
# have times they can't represent or can represent two different ways.
# In any case these days are liable to cause trouble in time conversions.
# Note that from_erfa includes some weird non-integer steps before 1970.
leap_second_table = iers.LeapSeconds.from_iers_leap_seconds()
# Days that contain leap_seconds
leap_second_days = leap_second_table["mjd"] - 1
leap_second_deltas = list(zip(leap_second_days[1:],
np.diff(leap_second_table["tai_utc"])))
today = Time.now()
mjd0 = Time(0, format="mjd")
def reasonable_ordinary_jd():
return tuples(floats(2440000, 2470000), floats(-0.5, 0.5))
@composite
def leap_second_tricky(draw):
mjd = draw(one_of(sampled_from(leap_second_days),
sampled_from(leap_second_days + 1),
sampled_from(leap_second_days - 1)))
return mjd + mjd0.jd1 + mjd0.jd2, draw(floats(0, 1))
def reasonable_jd():
"""Pick a reasonable JD.
These should be not too far in the past or future (so that date conversion
routines don't have to deal with anything too exotic), but they should
include leap second days as a special case, and they should include several
particularly simple cases (today, the beginning of the MJD scale, a
reasonable date) so that hypothesis' example simplification produces
obviously simple examples when they trigger problems.
"""
moments = [(2455000., 0.), (mjd0.jd1, mjd0.jd2), (today.jd1, today.jd2)]
return one_of(sampled_from(moments),
reasonable_ordinary_jd(),
leap_second_tricky())
def unreasonable_ordinary_jd():
"""JD pair that might be unordered or far away"""
return tuples(floats(-1e7, 1e7), floats(-1e7, 1e7))
def ordered_jd():
"""JD pair that is ordered but not necessarily near now"""
return tuples(floats(-1e7, 1e7), floats(-0.5, 0.5))
def unreasonable_jd():
return one_of(reasonable_jd(), ordered_jd(), unreasonable_ordinary_jd())
@composite
def jd_arrays(draw, jd_values):
s = draw(array_shapes())
d = np.dtype([("jd1", float), ("jd2", float)])
jdv = jd_values.map(lambda x: np.array(x, dtype=d))
a = draw(arrays(d, s, elements=jdv))
return a["jd1"], a["jd2"]
def unreasonable_delta():
return tuples(floats(-1e7, 1e7), floats(-1e7, 1e7))
def reasonable_delta():
return tuples(floats(-1e4, 1e4), floats(-0.5, 0.5))
# redundant?
def test_abs_jd2_always_less_than_half():
"""Make jd2 approach +/-0.5, and check that it doesn't go over."""
t1 = Time(2400000.5, [-tiny, +tiny], format='jd')
assert np.all(t1.jd1 % 1 == 0)
assert np.all(abs(t1.jd2) < 0.5)
t2 = Time(2400000., [[0.5 - tiny, 0.5 + tiny],
[-0.5 - tiny, -0.5 + tiny]], format='jd')
assert np.all(t2.jd1 % 1 == 0)
assert np.all(abs(t2.jd2) < 0.5)
@given(jd_arrays(unreasonable_jd()))
def test_abs_jd2_always_less_than_half_on_construction(jds):
jd1, jd2 = jds
t = Time(jd1, jd2, format="jd")
target(np.amax(np.abs(t.jd2)))
assert np.all(t.jd1 % 1 == 0)
assert np.all(abs(t.jd2) <= 0.5)
assert np.all((abs(t.jd2) < 0.5) | (t.jd1 % 2 == 0))
@given(integers(-10**8, 10**8), sampled_from([-0.5, 0.5]))
def test_round_to_even(jd1, jd2):
t = Time(jd1, jd2, format="jd")
assert (abs(t.jd2) == 0.5) and (t.jd1 % 2 == 0)
def test_addition():
"""Check that an addition at the limit of precision (2^-52) is seen"""
t = Time(2455555., 0.5, format='jd', scale='utc')
t_dt = t + dt_tiny
assert t_dt.jd1 == t.jd1 and t_dt.jd2 != t.jd2
# Check that the addition is exactly reversed by the corresponding
# subtraction
t2 = t_dt - dt_tiny
assert t2.jd1 == t.jd1 and t2.jd2 == t.jd2
def test_mult_div():
"""Test precision with multiply and divide"""
dt_small = 6 * dt_tiny
# pick a number that will leave remainder if divided by 6.
dt_big = TimeDelta(20000., format='jd')
dt_big_small_by_6 = (dt_big + dt_small) / 6.
dt_frac = dt_big_small_by_6 - TimeDelta(3333., format='jd')
assert allclose_jd2(dt_frac.jd2, 0.33333333333333354)
def test_init_variations():
"""Check that 3 ways of specifying a time + small offset are equivalent"""
dt_tiny_sec = dt_tiny.jd2 * 86400.
t1 = Time(1e11, format='cxcsec') + dt_tiny
t2 = Time(1e11, dt_tiny_sec, format='cxcsec')
t3 = Time(dt_tiny_sec, 1e11, format='cxcsec')
assert t1.jd1 == t2.jd1
assert t1.jd2 == t3.jd2
assert t1.jd1 == t2.jd1
assert t1.jd2 == t3.jd2
def test_precision_exceeds_64bit():
"""
Check that Time object really holds more precision than float64 by looking
at the (naively) summed 64-bit result and asserting equality at the
bit level.
"""
t1 = Time(1.23456789e11, format='cxcsec')
t2 = t1 + dt_tiny
assert t1.jd == t2.jd
def test_through_scale_change():
"""Check that precision holds through scale change (cxcsec is TT)"""
t0 = Time(1.0, format='cxcsec')
t1 = Time(1.23456789e11, format='cxcsec')
dt_tt = t1 - t0
dt_tai = t1.tai - t0.tai
assert allclose_jd(dt_tt.jd1, dt_tai.jd1)
assert allclose_jd2(dt_tt.jd2, dt_tai.jd2)
def test_iso_init():
"""Check when initializing from ISO date"""
t1 = Time('2000:001:00:00:00.00000001', scale='tai')
t2 = Time('3000:001:13:00:00.00000002', scale='tai')
dt = t2 - t1
assert allclose_jd2(dt.jd2, 13. / 24. + 1e-8 / 86400. - 1.0)
def test_jd1_is_mult_of_one():
"""
Check that jd1 is a multiple of 1.
"""
t1 = Time('2000:001:00:00:00.00000001', scale='tai')
assert np.round(t1.jd1) == t1.jd1
t1 = Time(1.23456789, 12345678.90123456, format='jd', scale='tai')
assert np.round(t1.jd1) == t1.jd1
def test_precision_neg():
"""
Check precision when jd1 is negative. This used to fail because ERFA
routines use a test like jd1 > jd2 to decide which component to update.
It was updated to abs(jd1) > abs(jd2) in erfa 1.6 (sofa 20190722).
"""
t1 = Time(-100000.123456, format='jd', scale='tt')
assert np.round(t1.jd1) == t1.jd1
t1_tai = t1.tai
assert np.round(t1_tai.jd1) == t1_tai.jd1
def test_precision_epoch():
"""
Check that input via epoch also has full precision, i.e., against
regression on https://github.com/astropy/astropy/pull/366
"""
t_utc = Time(range(1980, 2001), format='jyear', scale='utc')
t_tai = Time(range(1980, 2001), format='jyear', scale='tai')
dt = t_utc - t_tai
assert allclose_sec(dt.sec, np.round(dt.sec))
def test_leap_seconds_rounded_correctly():
"""Regression tests against #2083, where a leap second was rounded
incorrectly by the underlying ERFA routine."""
with iers.conf.set_temp('auto_download', False):
t = Time(['2012-06-30 23:59:59.413',
'2012-07-01 00:00:00.413'], scale='ut1', precision=3).utc
assert np.all(t.iso == np.array(['2012-06-30 23:59:60.000',
'2012-07-01 00:00:00.000']))
# with the bug, both yielded '2012-06-30 23:59:60.000'
@given(integers(-2**52+2, 2**52-2), floats(-1, 1))
@example(i=65536, f=3.637978807091714e-12)
def test_two_sum(i, f):
with decimal.localcontext(decimal.Context(prec=40)):
a = Decimal(i) + Decimal(f)
s, r = two_sum(i, f)
b = Decimal(s) + Decimal(r)
assert_almost_equal(a, b, atol=Decimal(tiny), rtol=Decimal(0))
@given(floats(), floats())
def test_two_sum_symmetric(f1, f2):
np.testing.assert_equal(two_sum(f1, f2), two_sum(f2, f1))
@given(floats(allow_nan=False, allow_infinity=False),
floats(allow_nan=False, allow_infinity=False))
@example(f1=8.988465674311579e+307, f2=8.98846567431158e+307)
@example(f1=8.988465674311579e+307, f2=-8.98846567431158e+307)
@example(f1=-8.988465674311579e+307, f2=-8.98846567431158e+307)
def test_two_sum_size(f1, f2):
r1, r2 = two_sum(f1, f2)
assert (abs(r1) > abs(r2) / np.finfo(float).eps
or r1 == r2 == 0
or not np.isfinite(f1 + f2))
@given(integers(-2**52+2, 2**52-2), floats(-1, 1))
@example(i=65536, f=3.637978807091714e-12)
def test_day_frac_harmless(i, f):
with decimal.localcontext(decimal.Context(prec=40)):
a = Decimal(i) + Decimal(f)
i_d, f_d = day_frac(i, f)
a_d = Decimal(i_d) + Decimal(f_d)
assert_almost_equal(a, a_d, atol=Decimal(tiny), rtol=Decimal(0))
@given(integers(-2**52+2, 2**52-2), floats(-0.5, 0.5))
@example(i=65536, f=3.637978807091714e-12)
def test_day_frac_exact(i, f):
assume(abs(f) < 0.5 or i % 2 == 0)
i_d, f_d = day_frac(i, f)
assert i == i_d
assert f == f_d
@given(integers(-2**52+2, 2**52-2), floats(-1, 1))
@example(i=65536, f=3.637978807091714e-12)
def test_day_frac_idempotent(i, f):
i_d, f_d = day_frac(i, f)
assert (i_d, f_d) == day_frac(i_d, f_d)
@given(integers(-2**52+2, 2**52-2), floats(-1, 1))
@example(i=65536, f=3.637978807091714e-12)
def test_mjd_initialization_precise(i, f):
t = Time(val=i, val2=f, format="mjd", scale="tai")
jd1, jd2 = day_frac(i + erfa.DJM0, f)
jd1_t, jd2_t = day_frac(t.jd1, t.jd2)
assert (abs((jd1 - jd1_t) + (jd2 - jd2_t)) * u.day).to(u.ns) < 1 * u.ns
@given(jd_arrays(unreasonable_jd()))
def test_day_frac_always_less_than_half(jds):
jd1, jd2 = jds
t_jd1, t_jd2 = day_frac(jd1, jd2)
assert np.all(t_jd1 % 1 == 0)
assert np.all(abs(t_jd2) <= 0.5)
assert np.all((abs(t_jd2) < 0.5) | (t_jd1 % 2 == 0))
@given(integers(-10**8, 10**8), sampled_from([-0.5, 0.5]))
def test_day_frac_round_to_even(jd1, jd2):
t_jd1, t_jd2 = day_frac(jd1, jd2)
assert (abs(t_jd2) == 0.5) and (t_jd1 % 2 == 0)
@given(scale=sampled_from(STANDARD_TIME_SCALES), jds=unreasonable_jd())
@example(scale="tai", jds=(0.0, 0.0))
@example(scale="tai", jds=(0.0, -31738.500000000346))
def test_resolution_never_decreases(scale, jds):
jd1, jd2 = jds
assume(not scale == 'utc' or 2440000 < jd1 + jd2 < 2460000)
t = Time(jd1, jd2, format="jd", scale=scale)
with quiet_erfa():
assert t != t + dt_tiny
@given(reasonable_jd())
def test_resolution_never_decreases_utc(jds):
"""UTC is very unhappy with unreasonable times"""
jd1, jd2 = jds
t = Time(jd1, jd2, format="jd", scale="utc")
with quiet_erfa():
assert t != t + dt_tiny
@given(scale1=sampled_from(STANDARD_TIME_SCALES),
scale2=sampled_from(STANDARD_TIME_SCALES),
jds=unreasonable_jd())
@example(scale1='tcg', scale2='ut1', jds=(2445149.5, 0.47187700984387526))
@example(scale1='tai', scale2='tcb', jds=(2441316.5, 0.0))
@example(scale1='tai', scale2='tcb', jds=(0.0, 0.0))
def test_conversion_preserves_jd1_jd2_invariant(iers_b, scale1, scale2, jds):
jd1, jd2 = jds
t = Time(jd1, jd2, scale=scale1, format="jd")
try:
with quiet_erfa():
t2 = getattr(t, scale2)
except iers.IERSRangeError: # UT1 conversion needs IERS data
assume(False)
except ErfaError:
assume(False)
assert t2.jd1 % 1 == 0
assert abs(t2.jd2) <= 0.5
assert abs(t2.jd2) < 0.5 or t2.jd1 % 2 == 0
@given(scale1=sampled_from(STANDARD_TIME_SCALES),
scale2=sampled_from(STANDARD_TIME_SCALES),
jds=unreasonable_jd())
@example(scale1='tai', scale2='utc', jds=(0.0, 0.0))
def test_conversion_never_loses_precision(iers_b, scale1, scale2, jds):
"""Check that time ordering remains if we convert to another scale.
Here, since scale differences can involve multiplication, we allow
for losing one ULP, i.e., we test that two times that differ by
two ULP will keep the same order if changed to another scale.
"""
jd1, jd2 = jds
t = Time(jd1, jd2, scale=scale1, format="jd")
# Near-zero UTC JDs degrade accuracy; not clear why,
# but also not so relevant, so ignoring.
if (scale1 == 'utc' or scale2 == 'utc') and abs(jd1+jd2) < 1:
tiny = 100*u.us
else:
tiny = 2*dt_tiny
try:
with quiet_erfa():
t2 = t + tiny
assert getattr(t, scale2) < getattr(t2, scale2)
except iers.IERSRangeError: # UT1 conversion needs IERS data
assume(scale1 != 'ut1' or 2440000 < jd1 + jd2 < 2458000)
assume(scale2 != 'ut1' or 2440000 < jd1 + jd2 < 2458000)
raise
except ErfaError:
# If the generated date is too early to compute a UTC julian date,
# and we're not converting between scales which are known to be safe,
# tell Hypothesis that this example is invalid and to try another.
# See https://docs.astropy.org/en/latest/time/index.html#time-scale
barycentric = {scale1, scale2}.issubset({'tcb', 'tdb'})
geocentric = {scale1, scale2}.issubset({'tai', 'tt', 'tcg'})
assume(jd1 + jd2 >= -31738.5 or geocentric or barycentric)
raise
@given(sampled_from(leap_second_deltas), floats(0.1, 0.9))
def test_leap_stretch_mjd(d, f):
mjd, delta = d
t0 = Time(mjd, format="mjd", scale="utc")
th = Time(mjd + f, format="mjd", scale="utc")
t1 = Time(mjd + 1, format="mjd", scale="utc")
assert_quantity_allclose((t1 - t0).to(u.s), (1 * u.day + delta * u.s))
assert_quantity_allclose((th - t0).to(u.s), f * (1 * u.day + delta * u.s))
assert_quantity_allclose((t1 - th).to(u.s), (1 - f) * (1 * u.day + delta * u.s))
@given(scale=sampled_from(STANDARD_TIME_SCALES),
jds=unreasonable_jd(),
delta=floats(-10000, 10000))
@example(scale='utc',
jds=(0.0, 2.2204460492503136e-13),
delta=6.661338147750941e-13)
@example(scale='utc',
jds=(2441682.5, 2.2204460492503136e-16),
delta=7.327471962526035e-12)
@example(scale='utc', jds=(0.0, 5.787592627370942e-13), delta=0.0)
def test_jd_add_subtract_round_trip(scale, jds, delta):
jd1, jd2 = jds
if scale == 'utc' and abs(jd1+jd2) < 1:
# Near-zero UTC JDs degrade accuracy; not clear why,
# but also not so relevant, so ignoring.
thresh = 100*u.us
else:
thresh = 2*dt_tiny
t = Time(jd1, jd2, scale=scale, format="jd")
try:
with quiet_erfa():
t2 = t + delta*u.day
if abs(delta) >= np.finfo(float).eps:
assert t2 != t
t3 = t2 - delta*u.day
assert_almost_equal(t3, t, atol=thresh, rtol=0)
except ErfaError:
assume(scale != 'utc' or 2440000 < jd1+jd2 < 2460000)
raise
@given(scale=sampled_from(STANDARD_TIME_SCALES),
jds=reasonable_jd(),
delta=floats(-3*tiny, 3*tiny))
@example(scale='tai', jds=(0.0, 3.5762786865234384), delta=2.220446049250313e-16)
def test_time_argminmaxsort(scale, jds, delta):
jd1, jd2 = jds
t = Time(jd1, jd2+np.array([0, delta]), scale=scale, format="jd")
imin = t.argmin()
imax = t.argmax()
isort = t.argsort()
diff = (t.jd1[1]-t.jd1[0]) + (t.jd2[1]-t.jd2[0])
if diff < 0: # item 1 smaller
assert delta < 0
assert imin == 1 and imax == 0 and np.all(isort == [1, 0])
elif diff == 0: # identical within precision
assert abs(delta) <= tiny
assert imin == 0 and imax == 0 and np.all(isort == [0, 1])
else:
assert delta > 0
assert imin == 0 and imax == 1 and np.all(isort == [0, 1])
@given(sampled_from(STANDARD_TIME_SCALES), unreasonable_jd(), unreasonable_jd())
@example(scale='utc',
jds_a=(2455000.0, 0.0),
jds_b=(2443144.5, 0.5000462962962965))
@example(scale='utc',
jds_a=(2459003.0, 0.267502885949074),
jds_b=(2454657.001045462, 0.49895453779026877))
def test_timedelta_full_precision(scale, jds_a, jds_b):
jd1_a, jd2_a = jds_a
jd1_b, jd2_b = jds_b
assume(scale != 'utc'
or (2440000 < jd1_a+jd2_a < 2460000
and 2440000 < jd1_b+jd2_b < 2460000))
if scale == 'utc':
# UTC subtraction implies a scale change, so possible rounding errors.
tiny = 2 * dt_tiny
else:
tiny = dt_tiny
t_a = Time(jd1_a, jd2_a, scale=scale, format="jd")
t_b = Time(jd1_b, jd2_b, scale=scale, format="jd")
dt = t_b - t_a
assert dt != (t_b + tiny) - t_a
with quiet_erfa():
assert_almost_equal(t_b-dt/2, t_a+dt/2, atol=2*dt_tiny, rtol=0,
label="midpoint")
assert_almost_equal(t_b+dt, t_a+2*dt, atol=2*dt_tiny, rtol=0, label="up")
assert_almost_equal(t_b-2*dt, t_a-dt, atol=2*dt_tiny, rtol=0, label="down")
@given(scale=sampled_from(STANDARD_TIME_SCALES),
jds_a=unreasonable_jd(),
jds_b=unreasonable_jd(),
x=integers(1, 100),
y=integers(1, 100))
def test_timedelta_full_precision_arithmetic(scale, jds_a, jds_b, x, y):
jd1_a, jd2_a = jds_a
jd1_b, jd2_b = jds_b
t_a = Time(jd1_a, jd2_a, scale=scale, format="jd")
t_b = Time(jd1_b, jd2_b, scale=scale, format="jd")
with quiet_erfa():
try:
dt = t_b - t_a
dt_x = x*dt/(x+y)
dt_y = y*dt/(x+y)
assert_almost_equal(dt_x + dt_y, dt, atol=(x+y)*dt_tiny, rtol=0)
except ErfaError:
assume(scale != 'utc'
or (2440000 < jd1_a+jd2_a < 2460000
and 2440000 < jd1_b+jd2_b < 2460000))
raise
@given(scale1=sampled_from(STANDARD_TIME_SCALES),
scale2=sampled_from(STANDARD_TIME_SCALES),
jds_a=reasonable_jd(),
jds_b=reasonable_jd())
def test_timedelta_conversion(scale1, scale2, jds_a, jds_b):
jd1_a, jd2_a = jds_a
jd1_b, jd2_b = jds_b
# not translation invariant so can't convert TimeDelta
assume('utc' not in [scale1, scale2])
# Conversions a problem but within UT1 it should work
assume(('ut1' not in [scale1, scale2]) or scale1 == scale2)
t_a = Time(jd1_a, jd2_a, scale=scale1, format="jd")
t_b = Time(jd1_b, jd2_b, scale=scale2, format="jd")
with quiet_erfa():
dt = t_b - t_a
t_a_2 = getattr(t_a, scale2)
t_b_2 = getattr(t_b, scale2)
dt_2 = getattr(dt, scale2)
assert_almost_equal(t_b_2 - t_a_2, dt_2, atol=dt_tiny, rtol=0,
label="converted")
# Implicit conversion
assert_almost_equal(t_b_2 - t_a_2, dt, atol=dt_tiny, rtol=0,
label="not converted")
# UTC disagrees when there are leap seconds
_utc_bad = [(pytest.param(s, marks=pytest.mark.xfail) if s == 'utc' else s)
for s in STANDARD_TIME_SCALES]
@given(datetimes(), datetimes()) # datetimes have microsecond resolution
@example(dt1=datetime(1235, 1, 1, 0, 0),
dt2=datetime(9950, 1, 1, 0, 0, 0, 890773))
@pytest.mark.parametrize("scale", _utc_bad)
def test_datetime_difference_agrees_with_timedelta(scale, dt1, dt2):
t1 = Time(dt1, scale=scale)
t2 = Time(dt2, scale=scale)
assert_almost_equal(t2-t1,
TimeDelta(dt2-dt1,
scale=None if scale == 'utc' else scale),
atol=2*u.us)
@given(days=integers(-3000*365, 3000*365),
microseconds=integers(0, 24*60*60*1000000))
@pytest.mark.parametrize("scale", _utc_bad)
def test_datetime_to_timedelta(scale, days, microseconds):
td = timedelta(days=days, microseconds=microseconds)
assert (TimeDelta(td, scale=scale)
== TimeDelta(days, microseconds/(86400*1e6), scale=scale, format="jd"))
@given(days=integers(-3000*365, 3000*365),
microseconds=integers(0, 24*60*60*1000000))
@pytest.mark.parametrize("scale", _utc_bad)
def test_datetime_timedelta_roundtrip(scale, days, microseconds):
td = timedelta(days=days, microseconds=microseconds)
assert td == TimeDelta(td, scale=scale).value
@given(days=integers(-3000*365, 3000*365), day_frac=floats(0, 1))
@example(days=262144, day_frac=2.314815006343452e-11)
@example(days=1048576, day_frac=1.157407503171726e-10)
@pytest.mark.parametrize("scale", _utc_bad)
def test_timedelta_datetime_roundtrip(scale, days, day_frac):
td = TimeDelta(days, day_frac, format="jd", scale=scale)
td.format = "datetime"
assert_almost_equal(td, TimeDelta(td.value, scale=scale), atol=2*u.us)
@given(integers(-3000*365, 3000*365), floats(0, 1))
@example(days=262144, day_frac=2.314815006343452e-11)
@pytest.mark.parametrize("scale", _utc_bad)
def test_timedelta_from_parts(scale, days, day_frac):
kwargs = dict(format="jd", scale=scale)
whole = TimeDelta(days, day_frac, **kwargs)
from_parts = TimeDelta(days, **kwargs) + TimeDelta(day_frac, **kwargs)
assert whole == from_parts
def test_datetime_difference_agrees_with_timedelta_no_hypothesis():
scale = "tai"
dt1 = datetime(1235, 1, 1, 0, 0)
dt2 = datetime(9950, 1, 1, 0, 0, 0, 890773)
t1 = Time(dt1, scale=scale)
t2 = Time(dt2, scale=scale)
assert(abs((t2-t1) - TimeDelta(dt2-dt1, scale=scale)) < 1*u.us)
# datetimes have microsecond resolution
@given(datetimes(), timedeltas())
@example(dt=datetime(2000, 1, 1, 0, 0),
td=timedelta(days=-397683, microseconds=2))
@example(dt=datetime(2179, 1, 1, 0, 0),
td=timedelta(days=-795365, microseconds=53))
@example(dt=datetime(2000, 1, 1, 0, 0),
td=timedelta(days=1590729, microseconds=10))
@example(dt=datetime(4357, 1, 1, 0, 0),
td=timedelta(days=-1590729, microseconds=107770))
@example(dt=datetime(4357, 1, 1, 0, 0, 0, 29),
td=timedelta(days=-1590729, microseconds=746292))
@pytest.mark.parametrize("scale", _utc_bad)
def test_datetime_timedelta_sum(scale, dt, td):
try:
dt + td
except OverflowError:
assume(False)
dt_a = Time(dt, scale=scale)
td_a = TimeDelta(td, scale=None if scale == 'utc' else scale)
assert_almost_equal(dt_a+td_a, Time(dt+td, scale=scale), atol=2*u.us)
@given(jds=reasonable_jd(),
lat1=floats(-90, 90),
lat2=floats(-90, 90),
lon=floats(-180, 180))
@pytest.mark.parametrize("kind", ["apparent", "mean"])
def test_sidereal_lat_independent(iers_b, kind, jds, lat1, lat2, lon):
jd1, jd2 = jds
t1 = Time(jd1, jd2, scale="ut1", format="jd", location=(lon, lat1))
t2 = Time(jd1, jd2, scale="ut1", format="jd", location=(lon, lat2))
try:
assert_almost_equal(t1.sidereal_time(kind),
t2.sidereal_time(kind),
atol=1*u.uas)
except iers.IERSRangeError:
assume(False)
@given(jds=reasonable_jd(),
lat=floats(-90, 90),
lon=floats(-180, 180),
lon_delta=floats(-360, 360))
@pytest.mark.parametrize("kind", ["apparent", "mean"])
def test_sidereal_lon_independent(iers_b, kind, jds, lat, lon, lon_delta):
jd1, jd2 = jds
t1 = Time(jd1, jd2, scale="ut1", format="jd", location=(lon, lat))
t2 = Time(jd1, jd2, scale="ut1", format="jd", location=(lon+lon_delta, lat))
try:
diff = t1.sidereal_time(kind) + lon_delta*u.degree - t2.sidereal_time(kind)
except iers.IERSRangeError:
assume(False)
else:
expected_degrees = (diff.to_value(u.degree) + 180) % 360
assert_almost_equal(expected_degrees, 180, atol=1/(60*60*1000))
|
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam_role
short_description: Manage AWS IAM roles
description:
- Manage AWS IAM roles
version_added: "2.3"
author: Rob White, @wimnat
options:
path:
description:
- The path to the role. For more information about paths, see U(http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html).
required: false
default: "/"
name:
description:
- The name of the role to create.
required: true
assume_role_policy_document:
description:
- "The trust relationship policy document that grants an entity permission to assume the role. This parameter is required when state: present."
required: false
managed_policy:
description:
- A list of managed policy ARNs or, since Ansible 2.4, a list of either managed policy ARNs or friendly names.
To embed an inline policy, use M(iam_policy). To remove existing policies, use an empty list item.
required: true
aliases: ['managed_policies']
state:
description:
- Create or remove the IAM role
required: true
choices: [ 'present', 'absent' ]
requirements: [ botocore, boto3 ]
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create a role
- iam_role:
name: mynewrole
assume_role_policy_document: "{{ lookup('file','policy.json') }}"
state: present
# Create a role and attach a managed policy called "PowerUserAccess"
- iam_role:
name: mynewrole
assume_role_policy_document: "{{ lookup('file','policy.json') }}"
state: present
managed_policy:
- arn:aws:iam::aws:policy/PowerUserAccess
# Keep the role created above but remove all managed policies
- iam_role:
name: mynewrole
assume_role_policy_document: "{{ lookup('file','policy.json') }}"
state: present
managed_policy:
-
# Delete the role
- iam_role:
name: mynewrole
assume_role_policy_document: "{{ lookup('file','policy.json') }}"
state: absent
'''
RETURN = '''
path:
description: the path to the role
type: string
returned: always
sample: /
role_name:
description: the friendly name that identifies the role
type: string
returned: always
sample: myrole
role_id:
description: the stable and unique string identifying the role
type: string
returned: always
sample: ABCDEFF4EZ4ABCDEFV4ZC
arn:
description: the Amazon Resource Name (ARN) specifying the role
type: string
returned: always
sample: "arn:aws:iam::1234567890:role/mynewrole"
create_date:
description: the date and time, in ISO 8601 date-time format, when the role was created
type: string
returned: always
sample: "2016-08-14T04:36:28+00:00"
assume_role_policy_document:
description: the policy that grants an entity permission to assume the role
type: string
returned: always
sample: {
'statement': [
{
'action': 'sts:AssumeRole',
'effect': 'Allow',
'principal': {
'service': 'ec2.amazonaws.com'
},
'sid': ''
}
],
'version': '2012-10-17'
}
attached_policies:
description: a list of dicts containing the name and ARN of the managed IAM policies attached to the role
type: list
returned: always
sample: [
{
'policy_arn': 'arn:aws:iam::aws:policy/PowerUserAccess',
'policy_name': 'PowerUserAccess'
}
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info, boto3_conn, sort_json_policy_dict
from ansible.module_utils.ec2 import HAS_BOTO3
import json
import traceback
try:
from botocore.exceptions import ClientError, NoCredentialsError
except ImportError:
pass # caught by imported HAS_BOTO3
def compare_assume_role_policy_doc(current_policy_doc, new_policy_doc):
if sort_json_policy_dict(current_policy_doc) == sort_json_policy_dict(json.loads(new_policy_doc)):
return True
else:
return False
def compare_attached_role_policies(current_attached_policies, new_attached_policies):
# If new_attached_policies is None it means we want to remove all policies
if len(current_attached_policies) > 0 and new_attached_policies is None:
return False
current_attached_policies_arn_list = []
for policy in current_attached_policies:
current_attached_policies_arn_list.append(policy['PolicyArn'])
if set(current_attached_policies_arn_list) == set(new_attached_policies):
return True
else:
return False
def convert_friendly_names_to_arns(connection, module, policy_names):
if not any([not policy.startswith('arn:') for policy in policy_names]):
return policy_names
allpolicies = {}
paginator = connection.get_paginator('list_policies')
policies = paginator.paginate().build_full_result()['Policies']
for policy in policies:
allpolicies[policy['PolicyName']] = policy['Arn']
allpolicies[policy['Arn']] = policy['Arn']
try:
return [allpolicies[policy] for policy in policy_names]
except KeyError as e:
module.fail_json(msg="Couldn't find policy: " + str(e))
def create_or_update_role(connection, module):
params = dict()
params['Path'] = module.params.get('path')
params['RoleName'] = module.params.get('name')
params['AssumeRolePolicyDocument'] = module.params.get('assume_role_policy_document')
managed_policies = module.params.get('managed_policy')
if managed_policies:
managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies)
changed = False
# Get role
role = get_role(connection, module, params['RoleName'])
# If role is None, create it
if role is None:
try:
role = connection.create_role(**params)
changed = True
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
else:
# Check Assumed Policy document
if not compare_assume_role_policy_doc(role['AssumeRolePolicyDocument'], params['AssumeRolePolicyDocument']):
try:
connection.update_assume_role_policy(RoleName=params['RoleName'], PolicyDocument=json.dumps(json.loads(params['AssumeRolePolicyDocument'])))
changed = True
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
if managed_policies is not None:
# Get list of current attached managed policies
current_attached_policies = get_attached_policy_list(connection, module, params['RoleName'])
# If a single empty list item then all managed policies to be removed
if len(managed_policies) == 1 and not managed_policies[0]:
for policy in current_attached_policies:
try:
connection.detach_role_policy(RoleName=params['RoleName'], PolicyArn=policy['PolicyArn'])
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
else:
# Make a list of the ARNs from the attached policies
current_attached_policies_arn_list = []
for policy in current_attached_policies:
current_attached_policies_arn_list.append(policy['PolicyArn'])
# Detach roles not defined in task
for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)):
try:
connection.detach_role_policy(RoleName=params['RoleName'], PolicyArn=policy_arn)
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
# Attach roles not already attached
for policy_arn in list(set(managed_policies) - set(current_attached_policies_arn_list)):
try:
connection.attach_role_policy(RoleName=params['RoleName'], PolicyArn=policy_arn)
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
# Instance profile
try:
instance_profiles = connection.list_instance_profiles_for_role(RoleName=params['RoleName'])['InstanceProfiles']
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
if not any(p['InstanceProfileName'] == params['RoleName'] for p in instance_profiles):
# Make sure an instance profile is attached
try:
connection.create_instance_profile(InstanceProfileName=params['RoleName'], Path=params['Path'])
changed = True
except ClientError as e:
# If the profile already exists, no problem, move on
if e.response['Error']['Code'] == 'EntityAlreadyExists':
pass
else:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
connection.add_role_to_instance_profile(InstanceProfileName=params['RoleName'], RoleName=params['RoleName'])
# Get the role again
role = get_role(connection, module, params['RoleName'])
role['attached_policies'] = get_attached_policy_list(connection, module, params['RoleName'])
module.exit_json(changed=changed, iam_role=camel_dict_to_snake_dict(role))
def destroy_role(connection, module):
params = dict()
params['RoleName'] = module.params.get('name')
if get_role(connection, module, params['RoleName']):
# We need to remove any instance profiles from the role before we delete it
try:
instance_profiles = connection.list_instance_profiles_for_role(RoleName=params['RoleName'])['InstanceProfiles']
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
# Now remove the role from the instance profile(s)
for profile in instance_profiles:
try:
connection.remove_role_from_instance_profile(InstanceProfileName=profile['InstanceProfileName'], RoleName=params['RoleName'])
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
# Now remove any attached policies otherwise deletion fails
try:
for policy in get_attached_policy_list(connection, module, params['RoleName']):
connection.detach_role_policy(RoleName=params['RoleName'], PolicyArn=policy['PolicyArn'])
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
try:
connection.delete_role(**params)
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
else:
module.exit_json(changed=False)
module.exit_json(changed=True)
def get_role(connection, module, name):
try:
return connection.get_role(RoleName=name)['Role']
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
return None
else:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except NoCredentialsError as e:
module.fail_json(msg="AWS authentication problem. " + e.message, exception=traceback.format_exc())
def get_attached_policy_list(connection, module, name):
try:
return connection.list_attached_role_policies(RoleName=name)['AttachedPolicies']
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
return None
else:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
path=dict(default="/", type='str'),
assume_role_policy_document=dict(type='json'),
managed_policy=dict(type='list', aliases=['managed_policies']),
state=dict(choices=['present', 'absent'], required=True)
)
)
module = AnsibleModule(argument_spec=argument_spec,
required_if=[('state', 'present', ['assume_role_policy_document'])])
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_params)
state = module.params.get("state")
if state == 'present':
create_or_update_role(connection, module)
else:
destroy_role(connection, module)
if __name__ == '__main__':
main()
|
|
from prefixcommons import curie_util
from prefixcommons.curie_util import contract_uri, expand_uri, get_prefixes
from ontobio.vocabulary.relations import OboRO, Evidence
from ontobio.vocabulary.upper import UpperLevel
from ontobio.ecomap import EcoMap
from ontobio.rdfgen import relations
from ontobio.model import association as association_model
from rdflib import Namespace
from rdflib import BNode
from rdflib import Literal
from rdflib import URIRef
from rdflib.namespace import RDF
from rdflib.namespace import RDFS
from rdflib.namespace import OWL
import rdflib
import logging
import uuid
import re
import json
ro = OboRO()
evt = Evidence()
upt = UpperLevel()
# Pull the go_context file from prefixcommons.
# NOTE: this is a temporary measure. We will build the go json ld context as part of the pipeline in future
# See https://github.com/geneontology/go-site/issues/617
prefix_context = {key: value for context in curie_util.default_curie_maps + [curie_util.read_biocontext("go_context")] for key, value in context.items()}
HAS_SUPPORTING_REFERENCE = URIRef(expand_uri(evt.has_supporting_reference, cmaps=[evt._prefixmap]))
ENABLED_BY = URIRef(expand_uri(ro.enabled_by))
ENABLES = URIRef(expand_uri(ro.enables))
INVOLVED_IN = URIRef(expand_uri(ro.involved_in))
PART_OF = URIRef(expand_uri(ro.part_of))
OCCURS_IN = URIRef(expand_uri(ro.occurs_in))
COLOCALIZES_WITH = URIRef(expand_uri(ro.colocalizes_with))
MOLECULAR_FUNCTION = URIRef(expand_uri(upt.molecular_function))
logger = logging.getLogger(__name__)
def genid(base=None):
return URIRef(str(uuid.uuid4()), base=base)
class RdfWriter(object):
"""
Abstract base class for all RdfWriters
"""
pass
class TurtleRdfWriter(RdfWriter):
"""
Default implementation of RdfWriter
use rdflib to generate a turtle file
"""
def __init__(self, label=None):
self.base = genid(base="http://model.geneontology.org") + '/'
self.graph = rdflib.Graph(identifier=self.base)
self.graph.bind("owl", OWL)
self.graph.bind("obo", "http://purl.obolibrary.org/obo/")
self.graph.add((self.base, RDF.type, OWL.Ontology))
if label != None:
self.graph.add((self.base, RDFS.label, Literal(label)))
def add(self, s, p, o):
self.graph.add((s, p, o))
def serialize(self, destination=None, format='ttl', **args):
if destination != None:
self.graph.serialize(destination, format, **args)
else:
return self.graph.serialize(destination, format, **args)
class RdfTransform(object):
"""
base class for all RDF generators
"""
def __init__(self, writer=None):
if writer is None:
writer = TurtleRdfWriter()
self.writer = writer
self.include_subject_info = False
self.ecomap = EcoMap()
self._emit_header_done = False
self.uribase = writer.base
self.ecomap.mappings()
self.bad_chars_regex = re.compile("[^\.:_\-0-9a-zA-Z]")
self.ro_lookup = dict(relations.label_relation_lookup())
def blanknode(self):
return BNode()
def uri(self, id):
# allow either atoms or objects
if isinstance(id, dict):
return self.uri(id['id'])
# logger.info("Expand: {}".format(id))
id = self.bad_chars_regex.sub("_", id)
uri = curie_util.expand_uri(id, cmaps=[prefix_context])
if uri != id:
# If URI is different, then that means we found an curie expansion, and we should add the prefix
prefix = id.split(":")[0]
self.writer.graph.bind(prefix, prefix_context[prefix])
return URIRef(uri)
def lookup_relation(self, label):
label = label.replace('_', ' ')
# Return the cached label -> URI or None
if label in self.ro_lookup:
return self.uri(self.ro_lookup[label])
else:
return None
def emit(self, s, p, o):
logger.debug("TRIPLE: {} {} {}".format(s,p,o))
self.writer.add(s,p,o)
return (s,p,o)
def emit_type(self, s, t):
return self.emit(s, RDF.type, t)
def emit_label(self, s, o):
return self.emit(s, RDFS.label, Literal(o))
def emit_not(self, s, t):
bn = self.blanknode()
self.emit_type(bn, OWL.Class)
self.emit(bn, OWL.complementOf, URIRef(expand_uri(t)))
return self.emit_type(s, bn)
def eco_class(self, code, coderef=None):
eco_cls_id = self.ecomap.coderef_to_ecoclass(code, coderef)
logger.debug(self.ecomap._mappings)
logger.debug('ECO: {},{}->{}'.format(code, coderef, eco_cls_id))
return self.uri(eco_cls_id)
def translate_evidence(self, association, stmt):
"""
``
_:1 a Axiom
owl:annotatedSource s
owl:annotatedProperty p
owl:annotatedTarget o
evidence [ a ECO ; ...]
``
"""
ev = association['evidence']
ev_id = None
if 'id' in ev:
ev_id = self.uri(ev['id'])
else:
ev_id = genid(base=self.writer.base + '/')
stmt_id = self.blanknode() ## OWL reification: must be blank
(s,p,o) = stmt
self.emit_type(stmt_id, OWL.Axiom)
self.emit(stmt_id, OWL.annotatedSource, s)
self.emit(stmt_id, OWL.annotatedProperty, p)
self.emit(stmt_id, OWL.annotatedTarget, o)
self.emit(stmt_id, self.uri(evt.axiom_has_evidence), ev_id)
ev_cls = self.eco_class(self.uri(ev['type']))
self.emit_type(ev_id, OWL.NamedIndividual)
self.emit_type(ev_id, ev_cls)
if 'with_support_from' in ev:
for w in ev['with_support_from']:
self.emit(ev_id, self.uri(evt.evidence_with_support_from), self.uri(w))
for ref in ev['has_supporting_reference']:
o = self.uri(ref)
if ref == expand_uri(ref):
o = Literal(ref)
self.emit(ev_id, HAS_SUPPORTING_REFERENCE, o)
if 'with_support_from' in ev:
for ref in ev['with_support_from']:
self.emit(ev_id, self.uri(evt.evidence_with_support_from), self.uri(ref))
class CamRdfTransform(RdfTransform):
"""
Granular instance-based representation (GO-CAM)
Perform gappy translation from simple assocs model to GOCAM
See https://github.com/geneontology/minerva/blob/master/specs/owl-model.md
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bad_properties_found = set()
def emit_header(self):
if self._emit_header_done:
return
self._emit_header_done = True
self.emit_type(ENABLED_BY, OWL.ObjectProperty)
self.emit_type(PART_OF, OWL.ObjectProperty)
self.emit_type(OCCURS_IN, OWL.ObjectProperty)
def translate(self, association: association_model.GoAssociation):
# See https://github.com/biolink/ontobio/pull/136
# if the association has an annotation extension, and this
# is a union, then we treat each element in the union
# as a distinct assertion/annotation, where each assertion
# has its own conjunction of relational expressions
if isinstance(association, dict) and "header" in association:
return
association = association.to_hash_assoc() # type: Dict
object_extensions = association.get('object_extensions', {})
conjunctive_sets = [] # Will be a list of lists, e.g. [[rel1(GO:1), rel2(GO:2)], [rel1(GO:3)]]
for cj in object_extensions.get("union_of", []):
conjunctive_sets.append(cj['intersection_of'])
if not conjunctive_sets:
conjunctive_sets.append([]) # A dummy list val to trigger one go through the loop
for conjunctive_set in conjunctive_sets:
# and_xps = ix.get'intersection_of']
# self.translate(association, and_xps)
sub = association['subject']
obj = association['object']
rel = association['relation']
sub_uri = self.uri(sub)
obj_uri = self.uri(obj)
# E.g. instance of gene product class
enabler_id = genid(base=self.writer.base)
self.emit_type(enabler_id, sub_uri)
self.emit_type(enabler_id, OWL.NamedIndividual)
# subject GP class label and taxon restriction
self.emit_label(sub_uri, sub["label"])
if "taxon" in sub:
restriction = rdflib.BNode()
self.emit_type(restriction, OWL.Restriction)
self.emit(restriction, OWL.onProperty, self.uri(ro.in_taxon))
self.emit(restriction, OWL.someValuesFrom, self.uri(sub["taxon"]["id"]))
self.emit(sub_uri, RDFS.subClassOf, restriction)
# E.g. instance of GO class
tgt_id = genid(base=self.writer.base)
self.emit_type(tgt_id, obj_uri)
self.emit_type(tgt_id, OWL.NamedIndividual)
aspect = association['aspect']
aspect_triple = None
# todo: use relation
if aspect == 'F':
aspect_triple = self.emit(tgt_id, ENABLED_BY, enabler_id)
elif aspect == 'P':
mf_id = genid(base=self.writer.base)
self.emit_type(mf_id, MOLECULAR_FUNCTION)
aspect_triple = self.emit(mf_id, ENABLED_BY, enabler_id)
aspect_triple = self.emit(mf_id, PART_OF, tgt_id)
elif aspect == 'C':
mf_id = genid(base=self.writer.base)
self.emit_type(mf_id, MOLECULAR_FUNCTION)
aspect_triple = self.emit(mf_id, ENABLED_BY, enabler_id)
aspect_triple = self.emit(mf_id, OCCURS_IN, tgt_id)
else:
# Skip this association if the aspect makes no sense.
logger.warning("Aspect field is not F, P, or C, so this association is skipped.")
return
if self.include_subject_info:
pass
# TODO
if association['object_extensions'] != {}:
pass
if conjunctive_set != []:
for ext in conjunctive_set:
filler_inst = genid(base=self.writer.base)
self.emit_type(filler_inst, self.uri(ext['filler']))
p = self.lookup_relation(ext['property'])
if p is None:
if ext["property"] not in self.bad_properties_found:
self.bad_properties_found.add(ext["property"])
logger.warning("No such property {}".format(ext))
else:
self.emit(tgt_id, p, filler_inst)
self.translate_evidence(association, aspect_triple)
def provenance(self):
self.writer.graph.bind("metago", "http://model.geneontology.org/")
self.writer.graph.add((self.writer.base, URIRef("http://model.geneontology.org/graphType"), URIRef("http://model.geneontology.org/gafCam")))
class SimpleAssocRdfTransform(RdfTransform):
"""
Follows simple OBAN-style model
See: https://github.com/EBISPOT/OBAN
See also: https://github.com/monarch-initiative/dipper/
"""
def emit_header(self):
if self._emit_header_done:
return
self._emit_header_done = True
def translate(self, association: association_model.GoAssociation):
association = association.to_hash_assoc()
sub = association['subject']
obj = association['subject']
rel = association['relation']['id']
sub_uri = self.uri(sub)
obj_uri = self.uri(obj)
rel_url = None
rel_uri = None
if rel == 'part_of':
rel_uri = PART_OF
elif rel == 'enables':
rel_uri = ENABLES
elif rel == 'involved_in':
rel_uri = INVOLVED_IN
elif rel == 'colocalizes_with':
rel_uri = COLOCALIZES_WITH
else:
logger.error("Unknown: {}".format(rel))
# TODO: extensions
stmt = self.emit(sub_uri,rel_uri,obj_uri)
# optionally include info about subject (e.g. gene)
if self.include_subject_info:
self.emit_label(sub_uri, sub)
if 'taxon' in sub:
taxon = sub['taxon']
self.emit(sub_uri, ro.in_taxon, self.uri(taxon))
# TODO syns etc
self.translate_evidence(association, stmt)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import calendar
from flask import Flask, jsonify, render_template, request, abort, redirect, url_for, make_response
from flask.json import JSONEncoder
from datetime import datetime
import time
import json
import threading
import random
import string
import os
from . import config
from .models import Pokemon, Gym, Pokestop
from .scan import ScanMetrics, Scanner
from .utils import get_locale
log = logging.getLogger(__name__)
class Pogom(Flask):
def __init__(self, scan_config, *args, **kwargs):
super(Pogom, self).__init__(*args, **kwargs)
self.scan_config = scan_config
self.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
self.json_encoder = CustomJSONEncoder
self.route('/', methods=['GET'])(self.fullmap)
self.route('/heatmap-data', methods=['GET'])(self.heatmap_data)
self.route('/map-data', methods=['GET'])(self.map_data)
self.route('/spawnpoint-data', methods=['GET'])(self.spawnpoint_data)
self.route('/cover', methods=['GET'])(self.cover)
self.route('/location', methods=['POST'])(self.add_location)
self.route('/location', methods=['DELETE'])(self.delete_location)
self.route('/stats', methods=['GET'])(self.stats)
self.route('/config', methods=['GET'])(self.get_config_site)
self.route('/config', methods=['POST'])(self.post_config_site)
self.route('/login', methods=['GET', 'POST'])(self.login)
self.route('/locale', methods=['GET'])(self.locale)
def is_authenticated(self):
if config.get('CONFIG_PASSWORD', None) and not request.cookies.get("auth") == config['AUTH_KEY']:
return False
else:
return True
def fullmap(self):
# if 'search_thread' not in [t.name for t in threading.enumerate()]:
if (not config.get('GOOGLEMAPS_KEY', None) or
not config.get('ACCOUNTS', None)):
return redirect(url_for('get_config_site'))
return render_template('map.html',
scan_locations=json.dumps(self.scan_config.SCAN_LOCATIONS.values()),
gmaps_key=config['GOOGLEMAPS_KEY'],
is_authenticated=self.is_authenticated())
def login(self):
if self.is_authenticated():
return redirect(url_for('get_config_site'))
if request.method == "GET":
return render_template('login.html')
if request.form.get('password', None) == config.get('CONFIG_PASSWORD', None):
resp = make_response(redirect(url_for('get_config_site')))
resp.set_cookie('auth', config['AUTH_KEY'])
return resp
def spawnpoint_data(self):
pokemon_spawns = Pokemon.get_spawn_points()
filter_range = request.args.get('range', None)
if filter_range:
# in day
filter_range = int(filter_range)
after_timestamp = time.time() - filter_range * 86400
pokemon_spawns = filter(
lambda p: (p['lastseen'] - datetime(1970, 1, 1)).total_seconds() > after_timestamp,
pokemon_spawns
)
# remove time data for a smaller json
for p in pokemon_spawns:
p.pop('lastseen')
return jsonify(pokemon_spawns)
def heatmap_data(self):
return jsonify(Pokemon.get_heat_stats())
def get_config_site(self):
if not self.is_authenticated():
return redirect(url_for('login'))
return render_template(
'config.html',
locale=config.get('LOCALE', ''),
locales_available=config.get('LOCALES_AVAILABLE', []),
gmaps_key=config.get('GOOGLEMAPS_KEY', None),
accounts=config.get('ACCOUNTS', []),
password=config.get('CONFIG_PASSWORD', None),
detail_pokemons=','.join(map(str, self.scan_config.DETAIL_POKEMON_LIST)))
def post_config_site(self):
if not self.is_authenticated():
return redirect(url_for('login'))
config['LOCALE'] = request.form.get('locale', 'en')
config['GOOGLEMAPS_KEY'] = request.form.get('gmapsKey', '')
pw = request.form.get('configPassword', None)
pw_changed = (pw != config.get('CONFIG_PASSWORD', None))
if pw_changed:
config['CONFIG_PASSWORD'] = pw
config['AUTH_KEY'] = ''.join(random.choice(string.lowercase) for _ in range(32))
accounts_str = request.form.get('accounts', None)
usernames_before = set([])
for account in config.get('ACCOUNTS', []):
usernames_before.add(account['username'])
usernames = set([])
accounts_parsed = []
if accounts_str:
for a in accounts_str.splitlines():
a = a.split(":")
if (len(a) == 2) and (a[0].strip() not in usernames):
accounts_parsed.append({'username': a[0].strip(), 'password': a[1].strip()})
usernames.add(a[0].strip())
config['ACCOUNTS'] = accounts_parsed
self.scan_config.ACCOUNTS_CHANGED = (usernames_before != usernames)
pokemon_checklist = request.form.get('detailPokemonIds', '')
self.scan_config.update_pokemon_list_to_query(map(int, pokemon_checklist.split(',')))
self.save_config()
self.scan_config.RESTART = True
resp = make_response(render_template(
'config.html',
locale=config.get('LOCALE', ''),
locales_available=config.get('LOCALES_AVAILABLE', []),
gmaps_key=config.get('GOOGLEMAPS_KEY', None),
accounts=config.get('ACCOUNTS', []),
password=config.get('CONFIG_PASSWORD', None),
alert=True))
if pw_changed:
resp.set_cookie('auth', config['AUTH_KEY'])
return resp
def save_config(self):
if not self.is_authenticated():
return redirect(url_for('login'))
if (config['CONFIG_PATH'] is not None and
os.path.isfile(config['CONFIG_PATH'])):
config_path = config['CONFIG_PATH']
data = json.load(open(config_path, 'r'))
else:
config_path = os.path.join(config['ROOT_PATH'], 'config.json')
data = {}
with open(config_path, 'w') as f:
data.update({
'GOOGLEMAPS_KEY': config['GOOGLEMAPS_KEY'],
'LOCALE': config['LOCALE'],
'CONFIG_PASSWORD': config['CONFIG_PASSWORD'],
'SCAN_LOCATIONS': self.scan_config.SCAN_LOCATIONS.values(),
'ACCOUNTS': config['ACCOUNTS'],
'DETAIL_POKEMON_LIST': self.scan_config.DETAIL_POKEMON_LIST})
f.write(json.dumps(data))
def map_data(self):
d = {}
if not ScanMetrics.LAST_SUCCESSFUL_REQUEST:
time_since_last_req = "na"
elif ScanMetrics.LAST_SUCCESSFUL_REQUEST == -1:
time_since_last_req = "sleep"
else:
time_since_last_req = time.time() - ScanMetrics.LAST_SUCCESSFUL_REQUEST
d['server_status'] = {'num-threads': ScanMetrics.NUM_THREADS,
'num-accounts': ScanMetrics.NUM_ACCOUNTS,
'last-successful-request': time_since_last_req,
'complete-scan-time': ScanMetrics.COMPLETE_SCAN_TIME,
'current-scan-percent': ScanMetrics.CURRENT_SCAN_PERCENT}
d['scan_locations'] = self.scan_config.SCAN_LOCATIONS
if request.args.get('pokemon', 'true') == 'true':
d['pokemons'] = Pokemon.get_active()
if request.args.get('pokestops', 'false') == 'true':
d['pokestops'] = Pokestop.get_all()
# TODO: Lured pokestops
if request.args.get('gyms', 'true') == 'true':
d['gyms'] = Gym.get_all()
return jsonify(d)
def cover(self):
return jsonify({'cover': self.scan_config.COVER,
'scan_locations': self.scan_config.SCAN_LOCATIONS.values()})
def add_location(self):
if not self.is_authenticated():
return redirect(url_for('login'))
lat = request.values.get('lat', type=float)
lng = request.values.get('lng', type=float)
radius = request.values.get('radius', type=int)
if not (lat and lng and radius):
abort(400)
self.scan_config.add_scan_location(lat, lng, radius)
self.save_config()
return ('', 204)
def delete_location(self):
if not self.is_authenticated():
return redirect(url_for('login'))
lat = request.values.get('lat', type=float)
lng = request.values.get('lng', type=float)
if not (lat and lng):
abort(400)
self.scan_config.delete_scan_location(lat, lng)
self.save_config()
return ('', 204)
def stats(self):
stats = Pokemon.get_stats()
count = sum(p['count'] for p in stats)
return render_template('stats.html', pokemons=stats, total=count)
def locale(self):
return jsonify(get_locale())
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
try:
if isinstance(obj, datetime):
if obj.utcoffset() is not None:
obj = obj - obj.utcoffset()
millis = int(
calendar.timegm(obj.timetuple()) * 1000 + obj.microsecond / 1000
)
return millis
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, obj)
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from .. import core
from ..framework import Variable, convert_np_dtype_to_dtype_, _varbase_creator
from ..layers.layer_function_generator import OpProtoHolder
from . import no_grad
from ..framework import _in_eager_mode
import numpy as np
import warnings
from paddle import _C_ops
_supported_int_dtype_ = [
core.VarDesc.VarType.UINT8,
core.VarDesc.VarType.INT8,
core.VarDesc.VarType.INT16,
core.VarDesc.VarType.INT32,
core.VarDesc.VarType.INT64,
core.VarDesc.VarType.BOOL,
]
# NOTE(chenweihang): We currently do not fully support the type promotion
# between tensors. Parting support here is because the interoperation of
# real and complex numbers in paddle quantum is very frequent, such as the
# binary operation between `float` and `complex64`, so we must support the
# correct type promotion on the APIs paddle quantum used.
# Now only check in dygraph (paddle quantum based dygraph)
# Full type promotion support will need to be fully verified later.
_supported_promote_complex_types_ = [
'__add__',
'__radd__',
'__sub__',
'__rsub__',
'__mul__',
'__rmul__',
'__div__',
'__truediv__',
'__rdiv__',
'__rtruediv__',
'__matmul__',
]
_complex_dtypes = [
core.VarDesc.VarType.COMPLEX64,
core.VarDesc.VarType.COMPLEX128,
]
_already_patch_varbase = False
_already_patch_eager_tensor = False
def monkey_patch_math_varbase():
"""
Similar to monkey_patch_variable.
The difference is, in dygraph mode, use auto-generated op functions for better performance.
"""
@no_grad
def create_tensor(value, dtype, shape):
out = _varbase_creator(dtype=dtype)
out = _C_ops.fill_constant(out, 'dtype', dtype, 'shape', shape, 'value',
value, 'force_cpu', False)
out.stop_gradient = True
return out
def create_scalar(value, dtype):
return create_tensor(value, dtype, shape=[1])
def astype(self, dtype):
"""
Cast a Tensor to a specified data type.
Args:
dtype: The target data type.
Returns:
Tensor: a new Tensor with target dtype
Examples:
.. code-block:: python
import paddle
import numpy as np
original_tensor = paddle.ones([2, 2])
print("original tensor's dtype is: {}".format(original_tensor.dtype))
new_tensor = original_tensor.astype('float32')
print("new tensor's dtype is: {}".format(new_tensor.dtype))
"""
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
return _C_ops.cast(self, 'in_dtype', self.dtype, 'out_dtype', dtype)
def _scalar_elementwise_op_(var, scale, bias):
return _C_ops.scale(var, 'scale', scale, 'bias', bias)
def _neg_(var):
return _scalar_elementwise_op_(var, -1.0, 0.0)
def _float_(var):
numel = np.prod(var.shape)
assert numel == 1, "only one element variable can be converted to float."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return float(var.numpy().flatten()[0])
def _long_(var):
numel = np.prod(var.shape)
assert numel == 1, "only one element variable can be converted to long."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return int(var.numpy().flatten()[0])
def _int_(var):
numel = np.prod(var.shape)
assert numel == 1, "only one element variable can be converted to int."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return int(var.numpy().flatten()[0])
def _len_(var):
if var.type == core.VarDesc.VarType.VOCAB:
return len(var.value().get_map_tensor())
elif var.type == core.VarDesc.VarType.STRINGS:
return len(var.value().get_string_tensor())
else:
return var.shape[0]
def _index_(var):
numel = np.prod(var.shape)
assert numel == 1, "only one element variable can be converted to python index."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return int(var.numpy().flatten()[0])
@property
def _ndim_(var):
return len(var.shape)
@property
def _size_(var):
return np.prod(var.shape)
@property
def _T_(var):
if len(var.shape) == 1:
return var
perm = []
for i in range(len(var.shape)):
perm.insert(0, i)
out, _ = _C_ops.transpose2(var, 'axis', perm)
return out
def _scalar_add_(var, value):
return _scalar_elementwise_op_(var, 1.0, value)
def _scalar_sub_(var, value):
return _scalar_elementwise_op_(var, 1.0, -value)
def _scalar_rsub_(var, value):
return _scalar_elementwise_op_(var, -1.0, value)
def _scalar_mul_(var, value):
return _scalar_elementwise_op_(var, value, 0.0)
def _scalar_div_(var, value):
return _scalar_elementwise_op_(var, 1.0 / value, 0.0)
# for binary operator such as elementwise, compare
def _binary_creator_(method_name,
op_type,
reverse=False,
scalar_method=None):
def __impl__(self, other_var):
# 1. scalar exists cases
# we need combine the tensor.dtype and scalar.dtype, cast correct object
if isinstance(other_var, float):
# in all cases(+, -, *, /, **, //, %), we need cast tensor.dtype to float
if self.dtype in _supported_int_dtype_:
self = astype(self, 'float32')
# here use `scale` replace `elementwise` to get better performance
# but only +, -, *, / can use this method
if scalar_method is not None:
return scalar_method(self, other_var)
elif isinstance(other_var, int):
# in all cases(+, -, *, /, **, //, %), we can cast it to float
# because the output tensor.dtype depend on the type of input tensor
other_var = float(other_var)
# division is a special case
# NOTE(chenweihang): because we cast tensor to float32 instead float64,
# the division result can only guarantee the numerical accuracy of 6 digits
# after the decimal point. The result of numpy calculation is of float64 type,
# so the calculation result here and the calculation result of numpy are
# different after 6 decimal point. If necessary, we can also use float64 here.
# torch's behavior here is consistent with ours
if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_:
self = astype(self, 'float32')
# here use `scale` replace `elementwise` to get better performance
# but only +, -, *, / can use this method
if scalar_method is not None:
return scalar_method(self, other_var)
else:
# do nothing
pass
# 2. create varbase for scalar
lhs_dtype = self.dtype
if _in_eager_mode():
other_var_should_be = core.eager.Tensor
else:
other_var_should_be = core.VarBase
if not isinstance(other_var, other_var_should_be):
if isinstance(other_var, complex):
import paddle
other_var = paddle.to_tensor(other_var, dtype='complex64')
else:
if reverse:
other_var = create_tensor(
other_var, dtype=lhs_dtype, shape=self.shape)
else:
# add fill_op
other_var = create_scalar(
value=other_var, dtype=lhs_dtype)
# 3. promote types or unify right var type to left var
rhs_dtype = other_var.dtype
if lhs_dtype != rhs_dtype:
if method_name in _supported_promote_complex_types_ and (
lhs_dtype in _complex_dtypes or
rhs_dtype in _complex_dtypes):
# only when lhs_dtype or rhs_dtype is complex type,
# the dtype will promote, in other cases, directly
# use lhs_dtype, this is consistent will original rule
promote_dtype = core._promote_types_if_complex_exists(
lhs_dtype, rhs_dtype)
self = self if lhs_dtype == promote_dtype else astype(
self, promote_dtype)
other_var = other_var if rhs_dtype == promote_dtype else astype(
other_var, promote_dtype)
else:
warnings.warn(
'The dtype of left and right variables are not the same, left dtype is {}, but right dtype is {}, the right dtype will convert to {}'.
format(lhs_dtype, rhs_dtype, lhs_dtype))
other_var = astype(other_var, lhs_dtype)
if reverse:
tmp = self
self = other_var
other_var = tmp
# 4. calculation
axis = -1
math_op = getattr(_C_ops, op_type)
return math_op(self, other_var, 'axis', axis)
comment = OpProtoHolder.instance().get_op_proto(op_type).comment
__impl__.__doc__ = """
{0}
Args:
other_var(Tensor|float|int): right hand Tensor
Returns:
Tensor
""".format(comment)
__impl__.__name__ = method_name
return __impl__
varbase_methods = [
('__neg__', _neg_),
('__float__', _float_),
('__long__', _long_),
('__int__', _int_),
('__len__', _len_),
('__index__', _index_),
('astype', astype),
('dim', lambda x: len(x.shape)),
('ndimension', lambda x: len(x.shape)),
('ndim', _ndim_),
('size', _size_),
('T', _T_),
('__add__',
_binary_creator_('__add__', 'elementwise_add', False, _scalar_add_)),
## a+b == b+a. Do not need to reverse explicitly
('__radd__',
_binary_creator_('__radd__', 'elementwise_add', False, _scalar_add_)),
('__sub__', _binary_creator_('__sub__', 'elementwise_sub', False,
_scalar_sub_)),
('__rsub__', _binary_creator_('__rsub__', 'elementwise_sub', True,
_scalar_rsub_)),
('__mul__', _binary_creator_('__mul__', 'elementwise_mul', False,
_scalar_mul_)),
## a*b == b*a. Do not need to reverse explicitly
('__rmul__',
_binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)),
('__div__', _binary_creator_('__div__', 'elementwise_div', False,
_scalar_div_)),
('__truediv__', _binary_creator_('__truediv__', 'elementwise_div',
False, _scalar_div_)),
('__rdiv__', _binary_creator_('__rdiv__', 'elementwise_div', True,
None)),
('__rtruediv__', _binary_creator_('rtruediv__', 'elementwise_div', True,
None)),
('__pow__', _binary_creator_('__pow__', 'elementwise_pow', False,
None)),
('__rpow__', _binary_creator_('__rpow__', 'elementwise_pow', True,
None)),
('__floordiv__', _binary_creator_('__floordiv__',
'elementwise_floordiv', False, None)),
('__mod__', _binary_creator_('__mod__', 'elementwise_mod', False,
None)),
('__matmul__', _binary_creator_('__matmul__', "matmul_v2", False,
None)),
## for logical compare
('__eq__', _binary_creator_('__eq__', 'equal', False, None)),
('__ne__', _binary_creator_('__ne__', 'not_equal', False, None)),
('__lt__', _binary_creator_('__lt__', 'less_than', False, None)),
('__le__', _binary_creator_('__le__', 'less_equal', False, None)),
('__gt__', _binary_creator_('__gt__', 'greater_than', False, None)),
('__ge__', _binary_creator_('__ge__', 'greater_equal', False, None)),
('__array_ufunc__', None)
]
global _already_patch_varbase
global _already_patch_eager_tensor
if core._in_eager_mode():
local_already_patch = _already_patch_eager_tensor
_already_patch_eager_tensor = True
local_tensor = core.eager.Tensor
else:
local_already_patch = _already_patch_varbase
_already_patch_varbase = True
local_tensor = core.VarBase
if not local_already_patch:
for method in varbase_methods:
method_name = method[0]
method_impl = method[1]
setattr(local_tensor, method_name, method_impl)
else:
import paddle.tensor
# Tensor method from module paddle.tensor
for method_name in paddle.tensor.tensor_method_func:
if hasattr(local_tensor, method_name): continue
method_impl = getattr(paddle.tensor, method_name, None)
if method_impl: setattr(local_tensor, method_name, method_impl)
for magic_method, origin_method in paddle.tensor.magic_method_func:
impl = getattr(paddle.tensor, origin_method, None)
if impl: setattr(local_tensor, magic_method, impl)
|
|
import tempfile
import shutil
import os
import numpy as np
from numpy import pi
from numpy.testing import (assert_array_almost_equal,
assert_equal, assert_warns)
import pytest
from pytest import raises as assert_raises
from scipy.odr import (Data, Model, ODR, RealData, OdrStop, OdrWarning,
multilinear, exponential, unilinear, quadratic,
polynomial)
class TestODR:
# Bad Data for 'x'
def test_bad_data(self):
assert_raises(ValueError, Data, 2, 1)
assert_raises(ValueError, RealData, 2, 1)
# Empty Data for 'x'
def empty_data_func(self, B, x):
return B[0]*x + B[1]
def test_empty_data(self):
beta0 = [0.02, 0.0]
linear = Model(self.empty_data_func)
empty_dat = Data([], [])
assert_warns(OdrWarning, ODR,
empty_dat, linear, beta0=beta0)
empty_dat = RealData([], [])
assert_warns(OdrWarning, ODR,
empty_dat, linear, beta0=beta0)
# Explicit Example
def explicit_fcn(self, B, x):
ret = B[0] + B[1] * np.power(np.exp(B[2]*x) - 1.0, 2)
return ret
def explicit_fjd(self, B, x):
eBx = np.exp(B[2]*x)
ret = B[1] * 2.0 * (eBx-1.0) * B[2] * eBx
return ret
def explicit_fjb(self, B, x):
eBx = np.exp(B[2]*x)
res = np.vstack([np.ones(x.shape[-1]),
np.power(eBx-1.0, 2),
B[1]*2.0*(eBx-1.0)*eBx*x])
return res
def test_explicit(self):
explicit_mod = Model(
self.explicit_fcn,
fjacb=self.explicit_fjb,
fjacd=self.explicit_fjd,
meta=dict(name='Sample Explicit Model',
ref='ODRPACK UG, pg. 39'),
)
explicit_dat = Data([0.,0.,5.,7.,7.5,10.,16.,26.,30.,34.,34.5,100.],
[1265.,1263.6,1258.,1254.,1253.,1249.8,1237.,1218.,1220.6,
1213.8,1215.5,1212.])
explicit_odr = ODR(explicit_dat, explicit_mod, beta0=[1500.0, -50.0, -0.1],
ifixx=[0,0,1,1,1,1,1,1,1,1,1,0])
explicit_odr.set_job(deriv=2)
explicit_odr.set_iprint(init=0, iter=0, final=0)
out = explicit_odr.run()
assert_array_almost_equal(
out.beta,
np.array([1.2646548050648876e+03, -5.4018409956678255e+01,
-8.7849712165253724e-02]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([1.0349270280543437, 1.583997785262061, 0.0063321988657267]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[4.4949592379003039e-01, -3.7421976890364739e-01,
-8.0978217468468912e-04],
[-3.7421976890364739e-01, 1.0529686462751804e+00,
-1.9453521827942002e-03],
[-8.0978217468468912e-04, -1.9453521827942002e-03,
1.6827336938454476e-05]]),
)
# Implicit Example
def implicit_fcn(self, B, x):
return (B[2]*np.power(x[0]-B[0], 2) +
2.0*B[3]*(x[0]-B[0])*(x[1]-B[1]) +
B[4]*np.power(x[1]-B[1], 2) - 1.0)
def test_implicit(self):
implicit_mod = Model(
self.implicit_fcn,
implicit=1,
meta=dict(name='Sample Implicit Model',
ref='ODRPACK UG, pg. 49'),
)
implicit_dat = Data([
[0.5,1.2,1.6,1.86,2.12,2.36,2.44,2.36,2.06,1.74,1.34,0.9,-0.28,
-0.78,-1.36,-1.9,-2.5,-2.88,-3.18,-3.44],
[-0.12,-0.6,-1.,-1.4,-2.54,-3.36,-4.,-4.75,-5.25,-5.64,-5.97,-6.32,
-6.44,-6.44,-6.41,-6.25,-5.88,-5.5,-5.24,-4.86]],
1,
)
implicit_odr = ODR(implicit_dat, implicit_mod,
beta0=[-1.0, -3.0, 0.09, 0.02, 0.08])
out = implicit_odr.run()
assert_array_almost_equal(
out.beta,
np.array([-0.9993809167281279, -2.9310484652026476, 0.0875730502693354,
0.0162299708984738, 0.0797537982976416]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([0.1113840353364371, 0.1097673310686467, 0.0041060738314314,
0.0027500347539902, 0.0034962501532468]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[2.1089274602333052e+00, -1.9437686411979040e+00,
7.0263550868344446e-02, -4.7175267373474862e-02,
5.2515575927380355e-02],
[-1.9437686411979040e+00, 2.0481509222414456e+00,
-6.1600515853057307e-02, 4.6268827806232933e-02,
-5.8822307501391467e-02],
[7.0263550868344446e-02, -6.1600515853057307e-02,
2.8659542561579308e-03, -1.4628662260014491e-03,
1.4528860663055824e-03],
[-4.7175267373474862e-02, 4.6268827806232933e-02,
-1.4628662260014491e-03, 1.2855592885514335e-03,
-1.2692942951415293e-03],
[5.2515575927380355e-02, -5.8822307501391467e-02,
1.4528860663055824e-03, -1.2692942951415293e-03,
2.0778813389755596e-03]]),
)
# Multi-variable Example
def multi_fcn(self, B, x):
if (x < 0.0).any():
raise OdrStop
theta = pi*B[3]/2.
ctheta = np.cos(theta)
stheta = np.sin(theta)
omega = np.power(2.*pi*x*np.exp(-B[2]), B[3])
phi = np.arctan2((omega*stheta), (1.0 + omega*ctheta))
r = (B[0] - B[1]) * np.power(np.sqrt(np.power(1.0 + omega*ctheta, 2) +
np.power(omega*stheta, 2)), -B[4])
ret = np.vstack([B[1] + r*np.cos(B[4]*phi),
r*np.sin(B[4]*phi)])
return ret
def test_multi(self):
multi_mod = Model(
self.multi_fcn,
meta=dict(name='Sample Multi-Response Model',
ref='ODRPACK UG, pg. 56'),
)
multi_x = np.array([30.0, 50.0, 70.0, 100.0, 150.0, 200.0, 300.0, 500.0,
700.0, 1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 10000.0,
15000.0, 20000.0, 30000.0, 50000.0, 70000.0, 100000.0, 150000.0])
multi_y = np.array([
[4.22, 4.167, 4.132, 4.038, 4.019, 3.956, 3.884, 3.784, 3.713,
3.633, 3.54, 3.433, 3.358, 3.258, 3.193, 3.128, 3.059, 2.984,
2.934, 2.876, 2.838, 2.798, 2.759],
[0.136, 0.167, 0.188, 0.212, 0.236, 0.257, 0.276, 0.297, 0.309,
0.311, 0.314, 0.311, 0.305, 0.289, 0.277, 0.255, 0.24, 0.218,
0.202, 0.182, 0.168, 0.153, 0.139],
])
n = len(multi_x)
multi_we = np.zeros((2, 2, n), dtype=float)
multi_ifixx = np.ones(n, dtype=int)
multi_delta = np.zeros(n, dtype=float)
multi_we[0,0,:] = 559.6
multi_we[1,0,:] = multi_we[0,1,:] = -1634.0
multi_we[1,1,:] = 8397.0
for i in range(n):
if multi_x[i] < 100.0:
multi_ifixx[i] = 0
elif multi_x[i] <= 150.0:
pass # defaults are fine
elif multi_x[i] <= 1000.0:
multi_delta[i] = 25.0
elif multi_x[i] <= 10000.0:
multi_delta[i] = 560.0
elif multi_x[i] <= 100000.0:
multi_delta[i] = 9500.0
else:
multi_delta[i] = 144000.0
if multi_x[i] == 100.0 or multi_x[i] == 150.0:
multi_we[:,:,i] = 0.0
multi_dat = Data(multi_x, multi_y, wd=1e-4/np.power(multi_x, 2),
we=multi_we)
multi_odr = ODR(multi_dat, multi_mod, beta0=[4.,2.,7.,.4,.5],
delta0=multi_delta, ifixx=multi_ifixx)
multi_odr.set_job(deriv=1, del_init=1)
out = multi_odr.run()
assert_array_almost_equal(
out.beta,
np.array([4.3799880305938963, 2.4333057577497703, 8.0028845899503978,
0.5101147161764654, 0.5173902330489161]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([0.0130625231081944, 0.0130499785273277, 0.1167085962217757,
0.0132642749596149, 0.0288529201353984]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[0.0064918418231375, 0.0036159705923791, 0.0438637051470406,
-0.0058700836512467, 0.011281212888768],
[0.0036159705923791, 0.0064793789429006, 0.0517610978353126,
-0.0051181304940204, 0.0130726943624117],
[0.0438637051470406, 0.0517610978353126, 0.5182263323095322,
-0.0563083340093696, 0.1269490939468611],
[-0.0058700836512467, -0.0051181304940204, -0.0563083340093696,
0.0066939246261263, -0.0140184391377962],
[0.011281212888768, 0.0130726943624117, 0.1269490939468611,
-0.0140184391377962, 0.0316733013820852]]),
)
# Pearson's Data
# K. Pearson, Philosophical Magazine, 2, 559 (1901)
def pearson_fcn(self, B, x):
return B[0] + B[1]*x
def test_pearson(self):
p_x = np.array([0.,.9,1.8,2.6,3.3,4.4,5.2,6.1,6.5,7.4])
p_y = np.array([5.9,5.4,4.4,4.6,3.5,3.7,2.8,2.8,2.4,1.5])
p_sx = np.array([.03,.03,.04,.035,.07,.11,.13,.22,.74,1.])
p_sy = np.array([1.,.74,.5,.35,.22,.22,.12,.12,.1,.04])
p_dat = RealData(p_x, p_y, sx=p_sx, sy=p_sy)
# Reverse the data to test invariance of results
pr_dat = RealData(p_y, p_x, sx=p_sy, sy=p_sx)
p_mod = Model(self.pearson_fcn, meta=dict(name='Uni-linear Fit'))
p_odr = ODR(p_dat, p_mod, beta0=[1.,1.])
pr_odr = ODR(pr_dat, p_mod, beta0=[1.,1.])
out = p_odr.run()
assert_array_almost_equal(
out.beta,
np.array([5.4767400299231674, -0.4796082367610305]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([0.3590121690702467, 0.0706291186037444]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[0.0854275622946333, -0.0161807025443155],
[-0.0161807025443155, 0.003306337993922]]),
)
rout = pr_odr.run()
assert_array_almost_equal(
rout.beta,
np.array([11.4192022410781231, -2.0850374506165474]),
)
assert_array_almost_equal(
rout.sd_beta,
np.array([0.9820231665657161, 0.3070515616198911]),
)
assert_array_almost_equal(
rout.cov_beta,
np.array([[0.6391799462548782, -0.1955657291119177],
[-0.1955657291119177, 0.0624888159223392]]),
)
# Lorentz Peak
# The data is taken from one of the undergraduate physics labs I performed.
def lorentz(self, beta, x):
return (beta[0]*beta[1]*beta[2] / np.sqrt(np.power(x*x -
beta[2]*beta[2], 2.0) + np.power(beta[1]*x, 2.0)))
def test_lorentz(self):
l_sy = np.array([.29]*18)
l_sx = np.array([.000972971,.000948268,.000707632,.000706679,
.000706074, .000703918,.000698955,.000456856,
.000455207,.000662717,.000654619,.000652694,
.000000859202,.00106589,.00106378,.00125483, .00140818,.00241839])
l_dat = RealData(
[3.9094, 3.85945, 3.84976, 3.84716, 3.84551, 3.83964, 3.82608,
3.78847, 3.78163, 3.72558, 3.70274, 3.6973, 3.67373, 3.65982,
3.6562, 3.62498, 3.55525, 3.41886],
[652, 910.5, 984, 1000, 1007.5, 1053, 1160.5, 1409.5, 1430, 1122,
957.5, 920, 777.5, 709.5, 698, 578.5, 418.5, 275.5],
sx=l_sx,
sy=l_sy,
)
l_mod = Model(self.lorentz, meta=dict(name='Lorentz Peak'))
l_odr = ODR(l_dat, l_mod, beta0=(1000., .1, 3.8))
out = l_odr.run()
assert_array_almost_equal(
out.beta,
np.array([1.4306780846149925e+03, 1.3390509034538309e-01,
3.7798193600109009e+00]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([7.3621186811330963e-01, 3.5068899941471650e-04,
2.4451209281408992e-04]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[2.4714409064597873e-01, -6.9067261911110836e-05,
-3.1236953270424990e-05],
[-6.9067261911110836e-05, 5.6077531517333009e-08,
3.6133261832722601e-08],
[-3.1236953270424990e-05, 3.6133261832722601e-08,
2.7261220025171730e-08]]),
)
def test_ticket_1253(self):
def linear(c, x):
return c[0]*x+c[1]
c = [2.0, 3.0]
x = np.linspace(0, 10)
y = linear(c, x)
model = Model(linear)
data = Data(x, y, wd=1.0, we=1.0)
job = ODR(data, model, beta0=[1.0, 1.0])
result = job.run()
assert_equal(result.info, 2)
# Verify fix for gh-9140
def test_ifixx(self):
x1 = [-2.01, -0.99, -0.001, 1.02, 1.98]
x2 = [3.98, 1.01, 0.001, 0.998, 4.01]
fix = np.vstack((np.zeros_like(x1, dtype=int), np.ones_like(x2, dtype=int)))
data = Data(np.vstack((x1, x2)), y=1, fix=fix)
model = Model(lambda beta, x: x[1, :] - beta[0] * x[0, :]**2., implicit=True)
odr1 = ODR(data, model, beta0=np.array([1.]))
sol1 = odr1.run()
odr2 = ODR(data, model, beta0=np.array([1.]), ifixx=fix)
sol2 = odr2.run()
assert_equal(sol1.beta, sol2.beta)
# verify bugfix for #11800 in #11802
def test_ticket_11800(self):
# parameters
beta_true = np.array([1.0, 2.3, 1.1, -1.0, 1.3, 0.5])
nr_measurements = 10
std_dev_x = 0.01
x_error = np.array([[0.00063445, 0.00515731, 0.00162719, 0.01022866,
-0.01624845, 0.00482652, 0.00275988, -0.00714734, -0.00929201, -0.00687301],
[-0.00831623, -0.00821211, -0.00203459, 0.00938266, -0.00701829,
0.0032169, 0.00259194, -0.00581017, -0.0030283, 0.01014164]])
std_dev_y = 0.05
y_error = np.array([[0.05275304, 0.04519563, -0.07524086, 0.03575642,
0.04745194, 0.03806645, 0.07061601, -0.00753604, -0.02592543, -0.02394929],
[0.03632366, 0.06642266, 0.08373122, 0.03988822, -0.0092536,
-0.03750469, -0.03198903, 0.01642066, 0.01293648, -0.05627085]])
beta_solution = np.array([
2.62920235756665876536e+00, -1.26608484996299608838e+02, 1.29703572775403074502e+02,
-1.88560985401185465804e+00, 7.83834160771274923718e+01, -7.64124076838087091801e+01])
# model's function and Jacobians
def func(beta, x):
y0 = beta[0] + beta[1] * x[0, :] + beta[2] * x[1, :]
y1 = beta[3] + beta[4] * x[0, :] + beta[5] * x[1, :]
return np.vstack((y0, y1))
def df_dbeta_odr(beta, x):
nr_meas = np.shape(x)[1]
zeros = np.zeros(nr_meas)
ones = np.ones(nr_meas)
dy0 = np.array([ones, x[0, :], x[1, :], zeros, zeros, zeros])
dy1 = np.array([zeros, zeros, zeros, ones, x[0, :], x[1, :]])
return np.stack((dy0, dy1))
def df_dx_odr(beta, x):
nr_meas = np.shape(x)[1]
ones = np.ones(nr_meas)
dy0 = np.array([beta[1] * ones, beta[2] * ones])
dy1 = np.array([beta[4] * ones, beta[5] * ones])
return np.stack((dy0, dy1))
# do measurements with errors in independent and dependent variables
x0_true = np.linspace(1, 10, nr_measurements)
x1_true = np.linspace(1, 10, nr_measurements)
x_true = np.array([x0_true, x1_true])
y_true = func(beta_true, x_true)
x_meas = x_true + x_error
y_meas = y_true + y_error
# estimate model's parameters
model_f = Model(func, fjacb=df_dbeta_odr, fjacd=df_dx_odr)
data = RealData(x_meas, y_meas, sx=std_dev_x, sy=std_dev_y)
odr_obj = ODR(data, model_f, beta0=0.9 * beta_true, maxit=100)
#odr_obj.set_iprint(init=2, iter=0, iter_step=1, final=1)
odr_obj.set_job(deriv=3)
odr_out = odr_obj.run()
# check results
assert_equal(odr_out.info, 1)
assert_array_almost_equal(odr_out.beta, beta_solution)
def test_multilinear_model(self):
x = np.linspace(0.0, 5.0)
y = 10.0 + 5.0 * x
data = Data(x, y)
odr_obj = ODR(data, multilinear)
output = odr_obj.run()
assert_array_almost_equal(output.beta, [10.0, 5.0])
def test_exponential_model(self):
x = np.linspace(0.0, 5.0)
y = -10.0 + np.exp(0.5*x)
data = Data(x, y)
odr_obj = ODR(data, exponential)
output = odr_obj.run()
assert_array_almost_equal(output.beta, [-10.0, 0.5])
def test_polynomial_model(self):
x = np.linspace(0.0, 5.0)
y = 1.0 + 2.0 * x + 3.0 * x ** 2 + 4.0 * x ** 3
poly_model = polynomial(3)
data = Data(x, y)
odr_obj = ODR(data, poly_model)
output = odr_obj.run()
assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0, 4.0])
def test_unilinear_model(self):
x = np.linspace(0.0, 5.0)
y = 1.0 * x + 2.0
data = Data(x, y)
odr_obj = ODR(data, unilinear)
output = odr_obj.run()
assert_array_almost_equal(output.beta, [1.0, 2.0])
def test_quadratic_model(self):
x = np.linspace(0.0, 5.0)
y = 1.0 * x ** 2 + 2.0 * x + 3.0
data = Data(x, y)
odr_obj = ODR(data, quadratic)
output = odr_obj.run()
assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0])
def test_work_ind(self):
def func(par, x):
b0, b1 = par
return b0 + b1 * x
# generate some data
n_data = 4
x = np.arange(n_data)
y = np.where(x % 2, x + 0.1, x - 0.1)
x_err = np.full(n_data, 0.1)
y_err = np.full(n_data, 0.1)
# do the fitting
linear_model = Model(func)
real_data = RealData(x, y, sx=x_err, sy=y_err)
odr_obj = ODR(real_data, linear_model, beta0=[0.4, 0.4])
odr_obj.set_job(fit_type=0)
out = odr_obj.run()
sd_ind = out.work_ind['sd']
assert_array_almost_equal(out.sd_beta,
out.work[sd_ind:sd_ind + len(out.sd_beta)])
@pytest.mark.skipif(True, reason="Fortran I/O prone to crashing so better "
"not to run this test, see gh-13127")
def test_output_file_overwrite(self):
"""
Verify fix for gh-1892
"""
def func(b, x):
return b[0] + b[1] * x
p = Model(func)
data = Data(np.arange(10), 12 * np.arange(10))
tmp_dir = tempfile.mkdtemp()
error_file_path = os.path.join(tmp_dir, "error.dat")
report_file_path = os.path.join(tmp_dir, "report.dat")
try:
ODR(data, p, beta0=[0.1, 13], errfile=error_file_path,
rptfile=report_file_path).run()
ODR(data, p, beta0=[0.1, 13], errfile=error_file_path,
rptfile=report_file_path, overwrite=True).run()
finally:
# remove output files for clean up
shutil.rmtree(tmp_dir)
|
|
from __future__ import absolute_import
import urwid
from . import common
def _mkhelp():
text = []
keys = [
("A", "accept all intercepted flows"),
("a", "accept this intercepted flow"),
("C", "clear flow list or eventlog"),
("d", "delete flow"),
("D", "duplicate flow"),
("e", "toggle eventlog"),
("F", "toggle follow flow list"),
("l", "set limit filter pattern"),
("L", "load saved flows"),
("r", "replay request"),
("V", "revert changes to request"),
("w", "save flows "),
("W", "stream flows to file"),
("X", "kill and delete flow, even if it's mid-intercept"),
("tab", "tab between eventlog and flow list"),
("enter", "view flow"),
("|", "run script on this flow"),
]
text.extend(common.format_keyvals(keys, key="key", val="text", indent=4))
return text
help_context = _mkhelp()
footer = [
('heading_key', "?"), ":help ",
]
class EventListBox(urwid.ListBox):
def __init__(self, master):
self.master = master
urwid.ListBox.__init__(self, master.eventlist)
def keypress(self, size, key):
key = common.shortcuts(key)
if key == "C":
self.master.clear_events()
key = None
return urwid.ListBox.keypress(self, size, key)
class BodyPile(urwid.Pile):
def __init__(self, master):
h = urwid.Text("Event log")
h = urwid.Padding(h, align="left", width=("relative", 100))
self.inactive_header = urwid.AttrWrap(h, "heading_inactive")
self.active_header = urwid.AttrWrap(h, "heading")
urwid.Pile.__init__(
self,
[
FlowListBox(master),
urwid.Frame(EventListBox(master), header = self.inactive_header)
]
)
self.master = master
def keypress(self, size, key):
if key == "tab":
self.focus_position = (self.focus_position + 1)%len(self.widget_list)
if self.focus_position == 1:
self.widget_list[1].header = self.active_header
else:
self.widget_list[1].header = self.inactive_header
key = None
elif key == "e":
self.master.toggle_eventlog()
key = None
# This is essentially a copypasta from urwid.Pile's keypress handler.
# So much for "closed for modification, but open for extension".
item_rows = None
if len(size)==2:
item_rows = self.get_item_rows( size, focus=True )
i = self.widget_list.index(self.focus_item)
tsize = self.get_item_size(size,i,True,item_rows)
return self.focus_item.keypress( tsize, key )
class ConnectionItem(common.WWrap):
def __init__(self, master, state, flow, focus):
self.master, self.state, self.flow = master, state, flow
self.f = focus
w = self.get_text()
common.WWrap.__init__(self, w)
def get_text(self):
return common.format_flow(self.flow, self.f, hostheader=self.master.showhost)
def selectable(self):
return True
def save_flows_prompt(self, k):
if k == "a":
self.master.path_prompt(
"Save all flows to: ",
self.state.last_saveload,
self.master.save_flows
)
else:
self.master.path_prompt(
"Save this flow to: ",
self.state.last_saveload,
self.master.save_one_flow,
self.flow
)
def stop_server_playback_prompt(self, a):
if a != "n":
self.master.stop_server_playback()
def server_replay_prompt(self, k):
if k == "a":
self.master.start_server_playback(
[i.copy() for i in self.master.state.view],
self.master.killextra, self.master.rheaders,
False, self.master.nopop
)
elif k == "t":
self.master.start_server_playback(
[self.flow.copy()],
self.master.killextra, self.master.rheaders,
False, self.master.nopop
)
else:
self.master.path_prompt(
"Server replay path: ",
self.state.last_saveload,
self.master.server_playback_path
)
def keypress(self, (maxcol,), key):
key = common.shortcuts(key)
if key == "a":
self.flow.accept_intercept()
self.master.sync_list_view()
elif key == "d":
self.flow.kill(self.master)
self.state.delete_flow(self.flow)
self.master.sync_list_view()
elif key == "D":
f = self.master.duplicate_flow(self.flow)
self.master.view_flow(f)
elif key == "r":
self.flow.backup()
r = self.master.replay_request(self.flow)
if r:
self.master.statusbar.message(r)
self.master.sync_list_view()
elif key == "S":
if not self.master.server_playback:
self.master.prompt_onekey(
"Server Replay",
(
("all flows", "a"),
("this flow", "t"),
("file", "f"),
),
self.server_replay_prompt,
)
else:
self.master.prompt_onekey(
"Stop current server replay?",
(
("yes", "y"),
("no", "n"),
),
self.stop_server_playback_prompt,
)
elif key == "V":
if not self.flow.modified():
self.master.statusbar.message("Flow not modified.")
return
self.state.revert(self.flow)
self.master.sync_list_view()
self.master.statusbar.message("Reverted.")
elif key == "w":
self.master.prompt_onekey(
"Save",
(
("all flows", "a"),
("this flow", "t"),
),
self.save_flows_prompt,
)
elif key == "X":
self.flow.kill(self.master)
elif key == "enter":
if self.flow.request:
self.master.view_flow(self.flow)
elif key == "|":
self.master.path_prompt(
"Send flow to script: ",
self.state.last_script,
self.master.run_script_once,
self.flow
)
else:
return key
class FlowListWalker(urwid.ListWalker):
def __init__(self, master, state):
self.master, self.state = master, state
if self.state.flow_count():
self.set_focus(0)
def get_focus(self):
f, i = self.state.get_focus()
f = ConnectionItem(self.master, self.state, f, True) if f else None
return f, i
def set_focus(self, focus):
ret = self.state.set_focus(focus)
return ret
def get_next(self, pos):
f, i = self.state.get_next(pos)
f = ConnectionItem(self.master, self.state, f, False) if f else None
return f, i
def get_prev(self, pos):
f, i = self.state.get_prev(pos)
f = ConnectionItem(self.master, self.state, f, False) if f else None
return f, i
class FlowListBox(urwid.ListBox):
def __init__(self, master):
self.master = master
urwid.ListBox.__init__(self, master.flow_list_walker)
def keypress(self, size, key):
key = common.shortcuts(key)
if key == "A":
self.master.accept_all()
self.master.sync_list_view()
elif key == "C":
self.master.clear_flows()
elif key == "e":
self.master.toggle_eventlog()
elif key == "l":
self.master.prompt("Limit: ", self.master.state.limit_txt, self.master.set_limit)
elif key == "L":
self.master.path_prompt(
"Load flows: ",
self.master.state.last_saveload,
self.master.load_flows_callback
)
elif key == "F":
self.master.toggle_follow_flows()
elif key == "W":
if self.master.stream:
self.master.stop_stream()
else:
self.master.path_prompt(
"Stream flows to: ",
self.master.state.last_saveload,
self.master.start_stream
)
else:
return urwid.ListBox.keypress(self, size, key)
|
|
# No shebang line. This module is meant to be imported.
#
# Copyright 2010. Luis Artola. All rights reserved.
#
#
# $URL: file:///svn/restblog/trunk/src/python/restblog/post.py $
# $Date: 2010-07-31 14:27:54 -0700 (Sat, 31 Jul 2010) $
# $Revision: 186 $
#
# History:
# 2010.06.30 lartola Initial working version
#
'''
Functions to transform and manipulate a restblog post from XHTML.
:copyright: Copyright 2010 Luis Artola.
:license: BSD, see LICENSE.txt for details.
'''
import os
import subprocess
import tempfile
from xml.etree import ElementTree
import restblog2html
def createFormattedPost( file_name ):
'''createFormattedPost( file_name ) -> str
Translates the given `file_name` into an XHTML document.
Parameters:
- file_name: Input file with a post in reStructuredText format.
Returns the name of a file with the XHTML document.
'''
output_file, output_file_name = tempfile.mkstemp( prefix='restblog_', suffix='.html' )
os.close( output_file )
arguments = [ file_name, output_file_name ]
restblog2html.main( arguments )
return output_file_name
def getPostContents( file_name ):
'''getPostContents( file_name ) -> ( `xml.etree.ElementTree.Element`, dict )
Extracts the relevant portions of the post from the given XHTML `file_name`.
Parameters:
- file_name: Name of the XHTML input file name.
Returns a tuple with the following values:
- An `xml.etree.ElementTree.Element` that contains the post metadata. This is
basically the options extracted from the ``.. restblog::`` directive
stored in the input reStructuredText file used to produce the given XHTML
`file_name`. See `restblog.directives.restblogheader` for more information.
- A dictionary with the actual portion of the XHTML document that contains
the post contents. Contains the following keys:
- title: str
- description: str
- mt_excerpt: str
- mt_text_more: str
- mt_keywords: list
- categories: list
'''
# The input XHTML as generated by restblog uses namespaces
namespace = 'http://www.w3.org/1999/xhtml'
body_tag = str( ElementTree.QName( namespace, 'body' ) )
div_tag = str( ElementTree.QName( namespace, 'div' ) )
# Parse document for tearing it apart easily
document = ElementTree.parse( file_name )
body = document.find( body_tag )
# Find tags with special meaning for restblog
nodes = document.getiterator( div_tag )
metadata_node = None
full_story_sentinel = None
full_story_sentinel_index = 0
for index, node in enumerate( nodes ):
if node.attrib.get( 'name' ) == 'restblogmetadata':
metadata_node = node
elif node.attrib.get( 'name' ) == 'restblogfullstory':
full_story_sentinel = node
full_story_sentinel_index = index
if metadata_node is None:
raise RuntimeError, 'Unable to find restblog metadata in the formated post.'
metadata = ElementTree.XML( metadata_node.text )
# Extract actual contents of the post
title = metadata.attrib.get( 'title' )
if not title:
name, extension = os.path.splitext( os.path.basename( file_name ) )
title = name
categories = metadata.attrib.get( 'categories', [] )
if categories:
categories = map( str.strip, categories.split( ',' ) )
tags = metadata.attrib.get( 'tags', [] )
if tags:
tags = map( str.strip, tags.split( ',' ) )
# Translate XHTML portions we actually care about
body = body.find( div_tag )
removeLineBreaksFromElement( body )
if metadata_node is not None:
body.remove( metadata_node )
if full_story_sentinel is not None:
comment = ElementTree.Comment( 'more' )
body.insert( full_story_sentinel_index, comment )
body.remove( full_story_sentinel )
post = ElementTree.tostring( body )
# Remove any namespace notation from tags and translate special tags
post = post.replace( '<html:', '<' )
post = post.replace( '</html:', '</' )
post = post.replace( '<!-- more -->', '<!--more-->' )
post = post.strip()
# Build contents as expected by the metaWeblog.newPost API method
contents = dict(
title=title,
description=post,
mt_excerpt='',
mt_text_more='',
mt_keywords=tags,
categories=categories,
)
return metadata, contents
def removeLineBreaksFromElement( element ):
'''removeLineBreaksFromElement( element )
Removes line breaks from text in paragraphs that is not preformatted.
For some reason, Wordpress appears to be - incorrectly IMHO, replacing
new-line characters with an actual line break, i.e. <br />
Needless to say, that just goes against what straight HTML would do,
i.e. text in paragraphs does not respect line breaks and it's rendered
as one contiguous line, e.g.::
<p>one
two three
four five six</p>
Should be rendered as::
one two three four five six
Not::
one<br />
two three<br />
four five six
Simply because it is a <p/> element not <pre/>.
In any event, this function removes all line breaks and turns multiline
paragraphs into a single running line of text.
Parameters:
- element: An `xml.etree.ElementTree.Element` whose text will be stripped
off new-line characters.
'''
def removeLineBreaks( text ):
if text is None:
return text
lines = text.split( '\n' )
text = ' '.join( lines )
return text
namespace = 'http://www.w3.org/1999/xhtml'
p_tag = str( ElementTree.QName( namespace, 'p' ) )
paragraphs = element.findall( p_tag )
for paragraph in paragraphs:
paragraph.text = removeLineBreaks( paragraph.text )
paragraph.tail = removeLineBreaks( paragraph.tail )
for child in paragraph.getchildren():
child.text = removeLineBreaks( child.text )
child.tail = removeLineBreaks( child.tail )
def updateSourceMetadata( file_name, metadata ):
'''updateSourceMetadata( file_name, metadata )
Parameters:
- file_name: File to the source post in reStructuredText to be updated.
- metadata: An `xml.etree.ElementTree.Element` representing all the values
that describe a post. This maps to all the options to the
``.. restblog::`` directive.
'''
before, restblog, after = splitSourceAtRestblogDirective( file_name )
restblog = buildRestblogFromMetadata( metadata )
lines = before + restblog + after
file = open( file_name, 'w' )
file.writelines( lines )
file.close()
def splitSourceAtRestblogDirective( file_name ):
'''splitSourceAtRestblogDirective( file_name ) -> ( list, list, list )
Locates the block containing a ``.. restblog::`` directive and splits
the contents. Returns the block of lines before, the restblog block and
the lines after it.
I'm sure there is a better way, but given the structure of a
reStructuredText document, let's take a naive approach and scan the file
to update the lines starting with ``.. restblog::`` and the contiguous lines
before an empty line with the given metadata, e.g.::
.. restblog::
:title: Some title here
:source: yes
Parameters:
- file_name: File to the source post in reStructuredText to split.
Returns a tuple of three lists with the lines before the directive, the
directive itself and after it.
'''
file = open( file_name, 'r' )
lines = file.readlines()
file.close()
before = []
restblog = []
after = []
extracting = False
for index, line in enumerate( lines ):
if not extracting:
if '.. restblog::' in line:
extracting = True
restblog.append( line )
else:
before.append( line )
else:
if line.strip():
restblog.append( line )
else:
# we've reached the end of the directive as indicated by an
# empty line.
extracting = False
after = lines[index+1:]
break
return before, restblog, after
def buildRestblogFromMetadata( metadata ):
'''buildRestblogFromMetadata( metadata ) -> list
Recreates a ``.. restblog::`` directive from the given `metadata`.
Parameters:
- metadata: An `xml.etree.ElementTree.Element` with all the values to
describe a post.
Returns a list of strings.
'''
lines = [ '.. restblog::\n' ]
for key, value in sorted( metadata.attrib.items() ):
line = ' :%(key)s: %(value)s\n' % locals()
lines.append( line )
lines.append( '\n' )
return lines
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from sqlalchemy.exc import IntegrityError
from sqlalchemy.sql import and_, func, functions, join, literal, select
from superset.models.tags import ObjectTypes, TagTypes
def add_types(engine, metadata):
"""
Tag every object according to its type:
INSERT INTO tagged_object (tag_id, object_id, object_type)
SELECT
tag.id AS tag_id,
slices.id AS object_id,
'chart' AS object_type
FROM slices
JOIN tag
ON tag.name = 'type:chart'
LEFT OUTER JOIN tagged_object
ON tagged_object.tag_id = tag.id
AND tagged_object.object_id = slices.id
AND tagged_object.object_type = 'chart'
WHERE tagged_object.tag_id IS NULL;
INSERT INTO tagged_object (tag_id, object_id, object_type)
SELECT
tag.id AS tag_id,
dashboards.id AS object_id,
'dashboard' AS object_type
FROM dashboards
JOIN tag
ON tag.name = 'type:dashboard'
LEFT OUTER JOIN tagged_object
ON tagged_object.tag_id = tag.id
AND tagged_object.object_id = dashboards.id
AND tagged_object.object_type = 'dashboard'
WHERE tagged_object.tag_id IS NULL;
INSERT INTO tagged_object (tag_id, object_id, object_type)
SELECT
tag.id AS tag_id,
saved_query.id AS object_id,
'query' AS object_type
FROM saved_query
JOIN tag
ON tag.name = 'type:query';
LEFT OUTER JOIN tagged_object
ON tagged_object.tag_id = tag.id
AND tagged_object.object_id = saved_query.id
AND tagged_object.object_type = 'query'
WHERE tagged_object.tag_id IS NULL;
"""
tag = metadata.tables["tag"]
tagged_object = metadata.tables["tagged_object"]
slices = metadata.tables["slices"]
dashboards = metadata.tables["dashboards"]
saved_query = metadata.tables["saved_query"]
columns = ["tag_id", "object_id", "object_type"]
# add a tag for each object type
insert = tag.insert()
for type_ in ObjectTypes.__members__:
try:
engine.execute(insert, name=f"type:{type_}", type=TagTypes.type)
except IntegrityError:
pass # already exists
charts = (
select(
[
tag.c.id.label("tag_id"),
slices.c.id.label("object_id"),
literal(ObjectTypes.chart.name).label("object_type"),
]
)
.select_from(
join(
join(slices, tag, tag.c.name == "type:chart"),
tagged_object,
and_(
tagged_object.c.tag_id == tag.c.id,
tagged_object.c.object_id == slices.c.id,
tagged_object.c.object_type == "chart",
),
isouter=True,
full=False,
)
)
.where(tagged_object.c.tag_id.is_(None))
)
query = tagged_object.insert().from_select(columns, charts)
engine.execute(query)
dashboards = (
select(
[
tag.c.id.label("tag_id"),
dashboards.c.id.label("object_id"),
literal(ObjectTypes.dashboard.name).label("object_type"),
]
)
.select_from(
join(
join(dashboards, tag, tag.c.name == "type:dashboard"),
tagged_object,
and_(
tagged_object.c.tag_id == tag.c.id,
tagged_object.c.object_id == dashboards.c.id,
tagged_object.c.object_type == "dashboard",
),
isouter=True,
full=False,
)
)
.where(tagged_object.c.tag_id.is_(None))
)
query = tagged_object.insert().from_select(columns, dashboards)
engine.execute(query)
saved_queries = (
select(
[
tag.c.id.label("tag_id"),
saved_query.c.id.label("object_id"),
literal(ObjectTypes.query.name).label("object_type"),
]
)
.select_from(
join(
join(saved_query, tag, tag.c.name == "type:query"),
tagged_object,
and_(
tagged_object.c.tag_id == tag.c.id,
tagged_object.c.object_id == saved_query.c.id,
tagged_object.c.object_type == "query",
),
isouter=True,
full=False,
)
)
.where(tagged_object.c.tag_id.is_(None))
)
query = tagged_object.insert().from_select(columns, saved_queries)
engine.execute(query)
def add_owners(engine, metadata):
"""
Tag every object according to its owner:
INSERT INTO tagged_object (tag_id, object_id, object_type)
SELECT
tag.id AS tag_id,
slices.id AS object_id,
'chart' AS object_type
FROM slices
JOIN tag
ON tag.name = CONCAT('owner:', slices.created_by_fk)
LEFT OUTER JOIN tagged_object
ON tagged_object.tag_id = tag.id
AND tagged_object.object_id = slices.id
AND tagged_object.object_type = 'chart'
WHERE tagged_object.tag_id IS NULL;
SELECT
tag.id AS tag_id,
dashboards.id AS object_id,
'dashboard' AS object_type
FROM dashboards
JOIN tag
ON tag.name = CONCAT('owner:', dashboards.created_by_fk)
LEFT OUTER JOIN tagged_object
ON tagged_object.tag_id = tag.id
AND tagged_object.object_id = dashboards.id
AND tagged_object.object_type = 'dashboard'
WHERE tagged_object.tag_id IS NULL;
SELECT
tag.id AS tag_id,
saved_query.id AS object_id,
'query' AS object_type
FROM saved_query
JOIN tag
ON tag.name = CONCAT('owner:', saved_query.created_by_fk)
LEFT OUTER JOIN tagged_object
ON tagged_object.tag_id = tag.id
AND tagged_object.object_id = saved_query.id
AND tagged_object.object_type = 'query'
WHERE tagged_object.tag_id IS NULL;
"""
tag = metadata.tables["tag"]
tagged_object = metadata.tables["tagged_object"]
users = metadata.tables["ab_user"]
slices = metadata.tables["slices"]
dashboards = metadata.tables["dashboards"]
saved_query = metadata.tables["saved_query"]
columns = ["tag_id", "object_id", "object_type"]
# create a custom tag for each user
ids = select([users.c.id])
insert = tag.insert()
for (id_,) in engine.execute(ids):
try:
engine.execute(insert, name=f"owner:{id_}", type=TagTypes.owner)
except IntegrityError:
pass # already exists
charts = (
select(
[
tag.c.id.label("tag_id"),
slices.c.id.label("object_id"),
literal(ObjectTypes.chart.name).label("object_type"),
]
)
.select_from(
join(
join(
slices,
tag,
tag.c.name == functions.concat("owner:", slices.c.created_by_fk),
),
tagged_object,
and_(
tagged_object.c.tag_id == tag.c.id,
tagged_object.c.object_id == slices.c.id,
tagged_object.c.object_type == "chart",
),
isouter=True,
full=False,
)
)
.where(tagged_object.c.tag_id.is_(None))
)
query = tagged_object.insert().from_select(columns, charts)
engine.execute(query)
dashboards = (
select(
[
tag.c.id.label("tag_id"),
dashboards.c.id.label("object_id"),
literal(ObjectTypes.dashboard.name).label("object_type"),
]
)
.select_from(
join(
join(
dashboards,
tag,
tag.c.name
== functions.concat("owner:", dashboards.c.created_by_fk),
),
tagged_object,
and_(
tagged_object.c.tag_id == tag.c.id,
tagged_object.c.object_id == dashboards.c.id,
tagged_object.c.object_type == "dashboard",
),
isouter=True,
full=False,
)
)
.where(tagged_object.c.tag_id.is_(None))
)
query = tagged_object.insert().from_select(columns, dashboards)
engine.execute(query)
saved_queries = (
select(
[
tag.c.id.label("tag_id"),
saved_query.c.id.label("object_id"),
literal(ObjectTypes.query.name).label("object_type"),
]
)
.select_from(
join(
join(
saved_query,
tag,
tag.c.name
== functions.concat("owner:", saved_query.c.created_by_fk),
),
tagged_object,
and_(
tagged_object.c.tag_id == tag.c.id,
tagged_object.c.object_id == saved_query.c.id,
tagged_object.c.object_type == "query",
),
isouter=True,
full=False,
)
)
.where(tagged_object.c.tag_id.is_(None))
)
query = tagged_object.insert().from_select(columns, saved_queries)
engine.execute(query)
def add_favorites(engine, metadata):
"""
Tag every object that was favorited:
INSERT INTO tagged_object (tag_id, object_id, object_type)
SELECT
tag.id AS tag_id,
favstar.obj_id AS object_id,
LOWER(favstar.class_name) AS object_type
FROM favstar
JOIN tag
ON tag.name = CONCAT('favorited_by:', favstar.user_id)
LEFT OUTER JOIN tagged_object
ON tagged_object.tag_id = tag.id
AND tagged_object.object_id = favstar.obj_id
AND tagged_object.object_type = LOWER(favstar.class_name)
WHERE tagged_object.tag_id IS NULL;
"""
tag = metadata.tables["tag"]
tagged_object = metadata.tables["tagged_object"]
users = metadata.tables["ab_user"]
favstar = metadata.tables["favstar"]
columns = ["tag_id", "object_id", "object_type"]
# create a custom tag for each user
ids = select([users.c.id])
insert = tag.insert()
for (id_,) in engine.execute(ids):
try:
engine.execute(insert, name=f"favorited_by:{id_}", type=TagTypes.type)
except IntegrityError:
pass # already exists
favstars = (
select(
[
tag.c.id.label("tag_id"),
favstar.c.obj_id.label("object_id"),
func.lower(favstar.c.class_name).label("object_type"),
]
)
.select_from(
join(
join(
favstar,
tag,
tag.c.name == functions.concat("favorited_by:", favstar.c.user_id),
),
tagged_object,
and_(
tagged_object.c.tag_id == tag.c.id,
tagged_object.c.object_id == favstar.c.obj_id,
tagged_object.c.object_type == func.lower(favstar.c.class_name),
),
isouter=True,
full=False,
)
)
.where(tagged_object.c.tag_id.is_(None))
)
query = tagged_object.insert().from_select(columns, favstars)
engine.execute(query)
|
|
from rest_framework import serializers
from dcim import models
from netbox.api import WritableNestedSerializer
__all__ = [
'NestedCableSerializer',
'NestedConsolePortSerializer',
'NestedConsolePortTemplateSerializer',
'NestedConsoleServerPortSerializer',
'NestedConsoleServerPortTemplateSerializer',
'NestedDeviceBaySerializer',
'NestedDeviceBayTemplateSerializer',
'NestedDeviceRoleSerializer',
'NestedDeviceSerializer',
'NestedDeviceTypeSerializer',
'NestedFrontPortSerializer',
'NestedFrontPortTemplateSerializer',
'NestedInterfaceSerializer',
'NestedInterfaceTemplateSerializer',
'NestedInventoryItemSerializer',
'NestedManufacturerSerializer',
'NestedPlatformSerializer',
'NestedPowerFeedSerializer',
'NestedPowerOutletSerializer',
'NestedPowerOutletTemplateSerializer',
'NestedPowerPanelSerializer',
'NestedPowerPortSerializer',
'NestedPowerPortTemplateSerializer',
'NestedRackGroupSerializer',
'NestedRackReservationSerializer',
'NestedRackRoleSerializer',
'NestedRackSerializer',
'NestedRearPortSerializer',
'NestedRearPortTemplateSerializer',
'NestedRegionSerializer',
'NestedSiteSerializer',
'NestedVirtualChassisSerializer',
]
#
# Regions/sites
#
class NestedRegionSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:region-detail')
site_count = serializers.IntegerField(read_only=True)
_depth = serializers.IntegerField(source='level', read_only=True)
class Meta:
model = models.Region
fields = ['id', 'url', 'name', 'slug', 'site_count', '_depth']
class NestedSiteSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:site-detail')
class Meta:
model = models.Site
fields = ['id', 'url', 'name', 'slug']
#
# Racks
#
class NestedRackGroupSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:rackgroup-detail')
rack_count = serializers.IntegerField(read_only=True)
_depth = serializers.IntegerField(source='level', read_only=True)
class Meta:
model = models.RackGroup
fields = ['id', 'url', 'name', 'slug', 'rack_count', '_depth']
class NestedRackRoleSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:rackrole-detail')
rack_count = serializers.IntegerField(read_only=True)
class Meta:
model = models.RackRole
fields = ['id', 'url', 'name', 'slug', 'rack_count']
class NestedRackSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:rack-detail')
device_count = serializers.IntegerField(read_only=True)
class Meta:
model = models.Rack
fields = ['id', 'url', 'name', 'display_name', 'device_count']
class NestedRackReservationSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:rackreservation-detail')
user = serializers.SerializerMethodField(read_only=True)
class Meta:
model = models.RackReservation
fields = ['id', 'url', 'user', 'units']
def get_user(self, obj):
return obj.user.username
#
# Device types
#
class NestedManufacturerSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:manufacturer-detail')
devicetype_count = serializers.IntegerField(read_only=True)
class Meta:
model = models.Manufacturer
fields = ['id', 'url', 'name', 'slug', 'devicetype_count']
class NestedDeviceTypeSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:devicetype-detail')
manufacturer = NestedManufacturerSerializer(read_only=True)
device_count = serializers.IntegerField(read_only=True)
class Meta:
model = models.DeviceType
fields = ['id', 'url', 'manufacturer', 'model', 'slug', 'display_name', 'device_count']
class NestedConsolePortTemplateSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:consoleporttemplate-detail')
class Meta:
model = models.ConsolePortTemplate
fields = ['id', 'url', 'name']
class NestedConsoleServerPortTemplateSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:consoleserverporttemplate-detail')
class Meta:
model = models.ConsoleServerPortTemplate
fields = ['id', 'url', 'name']
class NestedPowerPortTemplateSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:powerporttemplate-detail')
class Meta:
model = models.PowerPortTemplate
fields = ['id', 'url', 'name']
class NestedPowerOutletTemplateSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:poweroutlettemplate-detail')
class Meta:
model = models.PowerOutletTemplate
fields = ['id', 'url', 'name']
class NestedInterfaceTemplateSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:interfacetemplate-detail')
class Meta:
model = models.InterfaceTemplate
fields = ['id', 'url', 'name']
class NestedRearPortTemplateSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:rearporttemplate-detail')
class Meta:
model = models.RearPortTemplate
fields = ['id', 'url', 'name']
class NestedFrontPortTemplateSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:frontporttemplate-detail')
class Meta:
model = models.FrontPortTemplate
fields = ['id', 'url', 'name']
class NestedDeviceBayTemplateSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:devicebaytemplate-detail')
class Meta:
model = models.DeviceBayTemplate
fields = ['id', 'url', 'name']
#
# Devices
#
class NestedDeviceRoleSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:devicerole-detail')
device_count = serializers.IntegerField(read_only=True)
virtualmachine_count = serializers.IntegerField(read_only=True)
class Meta:
model = models.DeviceRole
fields = ['id', 'url', 'name', 'slug', 'device_count', 'virtualmachine_count']
class NestedPlatformSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:platform-detail')
device_count = serializers.IntegerField(read_only=True)
virtualmachine_count = serializers.IntegerField(read_only=True)
class Meta:
model = models.Platform
fields = ['id', 'url', 'name', 'slug', 'device_count', 'virtualmachine_count']
class NestedDeviceSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:device-detail')
class Meta:
model = models.Device
fields = ['id', 'url', 'name', 'display_name']
class NestedConsoleServerPortSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:consoleserverport-detail')
device = NestedDeviceSerializer(read_only=True)
class Meta:
model = models.ConsoleServerPort
fields = ['id', 'url', 'device', 'name', 'cable']
class NestedConsolePortSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:consoleport-detail')
device = NestedDeviceSerializer(read_only=True)
class Meta:
model = models.ConsolePort
fields = ['id', 'url', 'device', 'name', 'cable']
class NestedPowerOutletSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:poweroutlet-detail')
device = NestedDeviceSerializer(read_only=True)
class Meta:
model = models.PowerOutlet
fields = ['id', 'url', 'device', 'name', 'cable']
class NestedPowerPortSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:powerport-detail')
device = NestedDeviceSerializer(read_only=True)
class Meta:
model = models.PowerPort
fields = ['id', 'url', 'device', 'name', 'cable']
class NestedInterfaceSerializer(WritableNestedSerializer):
device = NestedDeviceSerializer(read_only=True)
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:interface-detail')
class Meta:
model = models.Interface
fields = ['id', 'url', 'device', 'name', 'cable']
class NestedRearPortSerializer(WritableNestedSerializer):
device = NestedDeviceSerializer(read_only=True)
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:rearport-detail')
class Meta:
model = models.RearPort
fields = ['id', 'url', 'device', 'name', 'cable']
class NestedFrontPortSerializer(WritableNestedSerializer):
device = NestedDeviceSerializer(read_only=True)
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:frontport-detail')
class Meta:
model = models.FrontPort
fields = ['id', 'url', 'device', 'name', 'cable']
class NestedDeviceBaySerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:devicebay-detail')
device = NestedDeviceSerializer(read_only=True)
class Meta:
model = models.DeviceBay
fields = ['id', 'url', 'device', 'name']
class NestedInventoryItemSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:inventoryitem-detail')
device = NestedDeviceSerializer(read_only=True)
_depth = serializers.IntegerField(source='level', read_only=True)
class Meta:
model = models.InventoryItem
fields = ['id', 'url', 'device', 'name', '_depth']
#
# Cables
#
class NestedCableSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:cable-detail')
class Meta:
model = models.Cable
fields = ['id', 'url', 'label']
#
# Virtual chassis
#
class NestedVirtualChassisSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:virtualchassis-detail')
master = NestedDeviceSerializer()
member_count = serializers.IntegerField(read_only=True)
class Meta:
model = models.VirtualChassis
fields = ['id', 'name', 'url', 'master', 'member_count']
#
# Power panels/feeds
#
class NestedPowerPanelSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:powerpanel-detail')
powerfeed_count = serializers.IntegerField(read_only=True)
class Meta:
model = models.PowerPanel
fields = ['id', 'url', 'name', 'powerfeed_count']
class NestedPowerFeedSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:powerfeed-detail')
class Meta:
model = models.PowerFeed
fields = ['id', 'url', 'name', 'cable']
|
|
# -*- coding: utf-8 -*-
from django.forms.models import model_to_dict
from django.test.utils import override_settings
from cms.api import add_plugin
from cms.models import CMSPlugin, Placeholder, UserSettings
from cms.test_utils.project.placeholderapp.exceptions import PlaceholderHookException
from cms.test_utils.project.placeholderapp.models import Example1, CharPksExample
from cms.test_utils.testcases import CMSTestCase
class AppAdminTestCase(CMSTestCase):
def setUp(self):
self._obj = self._get_example_obj()
def _add_plugin_to_placeholder(self, placeholder,
plugin_type='LinkPlugin', language='en'):
plugin_data = {
'StylePlugin': {'tag_type': 'div'},
'LinkPlugin': {'name': 'A Link', 'external_link': 'https://www.django-cms.org'},
'PlaceholderPlugin': {'name': 'Content'},
}
plugin = add_plugin(
placeholder,
plugin_type,
language,
**plugin_data[plugin_type]
)
return plugin
def _get_add_plugin_uri(self, plugin_type='LinkPlugin', language='en'):
uri = self.get_add_plugin_uri(
placeholder=self._obj.placeholder,
plugin_type=plugin_type,
language=language,
)
return uri
def _get_example_obj(self):
obj = Example1.objects.create(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
return obj
class AppAdminTest(AppAdminTestCase):
placeholderconf = {'placeholder': {
'limits': {
'global': 2,
'StylePlugin': 1,
}
}
}
def test_global_limit_on_plugin_add(self):
"""
Ensures placeholder global plugin limit is respected
when adding plugins to the placeholder.
"""
superuser = self.get_superuser()
endpoint = self._get_add_plugin_uri()
with self.login_user_context(superuser):
with override_settings(CMS_PLACEHOLDER_CONF=self.placeholderconf):
data = {'name': 'A Link', 'external_link': 'https://www.django-cms.org'}
response = self.client.post(endpoint, data) # first
self.assertEqual(response.status_code, 200)
response = self.client.post(endpoint, data) # second
self.assertEqual(response.status_code, 200)
response = self.client.post(endpoint, data) # third
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.content,
b"This placeholder already has the maximum number of plugins (2).",
)
def test_global_limit_on_plugin_move(self):
"""
Ensures placeholder global plugin limit is respected
when moving plugins to the placeholder.
"""
superuser = self.get_superuser()
source_placeholder = self._obj.placeholder
target_placeholder = self._get_example_obj().placeholder
plugin_1 = self._add_plugin_to_placeholder(source_placeholder)
plugin_2 = self._add_plugin_to_placeholder(source_placeholder)
plugin_3 = self._add_plugin_to_placeholder(source_placeholder)
with self.login_user_context(superuser):
with self.settings(CMS_PLACEHOLDER_CONF=self.placeholderconf):
data = {
'plugin_id': plugin_1.pk,
'placeholder_id': target_placeholder.pk,
'plugin_parent': '',
}
endpoint = self.get_move_plugin_uri(plugin_1, container=Example1)
response = self.client.post(endpoint, data) # first
self.assertEqual(response.status_code, 200)
data = {
'plugin_id': plugin_2.pk,
'placeholder_id': target_placeholder.pk,
'plugin_parent': '',
}
endpoint = self.get_move_plugin_uri(plugin_2, container=Example1)
response = self.client.post(endpoint, data) # second
self.assertEqual(response.status_code, 200)
data = {
'plugin_id': plugin_3.pk,
'placeholder_id': target_placeholder.pk,
'plugin_parent': '',
}
endpoint = self.get_move_plugin_uri(plugin_3, container=Example1)
response = self.client.post(endpoint, data) # third
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.content,
b"This placeholder already has the maximum number of plugins (2)."
)
def test_no_global_limit_check_same_placeholder_move(self):
"""
Ensures no global limit exception is raised
when moving plugins inside of a placeholder.
"""
superuser = self.get_superuser()
source_placeholder = self._obj.placeholder
target_placeholder = source_placeholder
plugin_1 = self._add_plugin_to_placeholder(source_placeholder)
plugin_2 = self._add_plugin_to_placeholder(source_placeholder)
with self.login_user_context(superuser):
with self.settings(CMS_PLACEHOLDER_CONF=self.placeholderconf):
data = {
'plugin_id': plugin_1.pk,
'placeholder_id': target_placeholder.pk,
'plugin_parent': '',
'plugin_order': 1,
}
endpoint = self.get_move_plugin_uri(plugin_1, container=Example1)
response = self.client.post(endpoint, data) # first
self.assertEqual(response.status_code, 200)
data = {
'plugin_id': plugin_2.pk,
'placeholder_id': target_placeholder.pk,
'plugin_parent': '',
'plugin_order': 1,
}
endpoint = self.get_move_plugin_uri(plugin_2, container=Example1)
response = self.client.post(endpoint, data) # second
self.assertEqual(response.status_code, 200)
def test_type_limit_on_plugin_add(self):
"""
Ensures placeholder plugin type limit is respected
when adding plugins to the placeholder.
"""
superuser = self.get_superuser()
endpoint = self._get_add_plugin_uri('StylePlugin')
with self.login_user_context(superuser):
with self.settings(CMS_PLACEHOLDER_CONF=self.placeholderconf):
data = {'tag_type': 'div'}
response = self.client.post(endpoint, data) # first
self.assertEqual(response.status_code, 200)
response = self.client.post(endpoint, data) # second
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.content,
b"This placeholder already has the "
b"maximum number (1) of allowed Style plugins."
)
def test_type_limit_on_plugin_move(self):
"""
Ensures placeholder plugin type limit is respected
when moving plugins to the placeholder.
"""
superuser = self.get_superuser()
source_placeholder = self._obj.placeholder
target_placeholder = self._get_example_obj().placeholder
plugin_1 = self._add_plugin_to_placeholder(source_placeholder, 'StylePlugin')
plugin_2 = self._add_plugin_to_placeholder(source_placeholder, 'StylePlugin')
with self.login_user_context(superuser):
with self.settings(CMS_PLACEHOLDER_CONF=self.placeholderconf):
data = {
'plugin_id': plugin_1.pk,
'placeholder_id': target_placeholder.pk,
'plugin_parent': '',
}
endpoint = self.get_move_plugin_uri(plugin_1, container=Example1)
response = self.client.post(endpoint, data) # first
self.assertEqual(response.status_code, 200)
data = {
'plugin_id': plugin_2.pk,
'placeholder_id': target_placeholder.pk,
'plugin_parent': '',
}
endpoint = self.get_move_plugin_uri(plugin_2, container=Example1)
response = self.client.post(endpoint, data) # second
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
b"This placeholder already has the maximum number (1) of allowed Style plugins.")
def test_no_type_limit_check_same_placeholder_move(self):
"""
Ensures no plugin type limit exception is raised
when moving plugins inside of a placeholder.
"""
superuser = self.get_superuser()
source_placeholder = self._obj.placeholder
target_placeholder = source_placeholder
plugin_1 = self._add_plugin_to_placeholder(source_placeholder, 'StylePlugin')
with self.login_user_context(superuser):
with self.settings(CMS_PLACEHOLDER_CONF=self.placeholderconf):
data = {
'plugin_id': plugin_1.pk,
'placeholder_id': target_placeholder.pk,
'plugin_parent': '',
'plugin_order': 1,
}
endpoint = self.get_move_plugin_uri(plugin_1, container=Example1)
response = self.client.post(endpoint, data) # first
self.assertEqual(response.status_code, 200)
def test_placeholder_post_move_hook_resolve(self):
"""
Ensure moving a plugin from placeholder A
registered with admin A calls the move plugin hooks
on the target placeholder's registered admin.
"""
superuser = self.get_superuser()
exception = PlaceholderHookException
message = 'move plugin hook has been called.'
example_1 = self._obj
source_placeholder = example_1.placeholder
plugin = self._add_plugin_to_placeholder(source_placeholder)
example_2 = CharPksExample.objects.create(
char_1='one',
slug='two',
)
target_placeholder = example_2.placeholder_1
with self.login_user_context(superuser):
with self.assertRaisesMessage(exception, message):
# move plugin to placeholder_2
# this will cause the Example1 admin
# to resolve the attached model/admin of the target placeholder
# and call it's hook.
data = {
'plugin_id': plugin.pk,
'placeholder_id': target_placeholder.pk,
'plugin_parent': '',
}
endpoint = self.get_move_plugin_uri(plugin, container=Example1)
self.client.post(endpoint, data)
def test_placeholder_post_copy_hook_resolve(self):
"""
Ensure copying a plugin from placeholder A
registered with admin A calls the copy plugin hooks
on the target placeholder's registered admin.
"""
superuser = self.get_superuser()
exception = PlaceholderHookException
message = 'copy plugin hook has been called.'
example_1 = self._obj
source_placeholder = example_1.placeholder
plugin = self._add_plugin_to_placeholder(source_placeholder)
endpoint = self.get_copy_plugin_uri(plugin, container=Example1)
example_2 = CharPksExample.objects.create(
char_1='one',
slug='two',
)
target_placeholder = example_2.placeholder_1
with self.login_user_context(superuser):
with self.assertRaisesMessage(exception, message):
# move plugin to placeholder_2
# this will cause the Example1 admin
# to resolve the attached model/admin of the target placeholder
# and call it's hook.
data = {
'source_language': plugin.language,
'source_placeholder_id': source_placeholder.pk,
'source_plugin_id': plugin.pk,
'target_language': plugin.language,
'target_placeholder_id': target_placeholder.pk,
}
self.client.post(endpoint, data)
class AppAdminPermissionsTest(AppAdminTestCase):
def setUp(self):
self._obj = self._get_example_obj()
self._staff_user = self.get_staff_user_with_no_permissions()
def test_user_can_add_plugin(self):
"""
User can add a new plugin if he has change permissions
on the model attached to the placeholder and he has
add permissions on the plugin model.
"""
staff_user = self._staff_user
placeholder = self._obj.placeholder
plugins = placeholder.get_plugins('en').filter(plugin_type='LinkPlugin')
endpoint = self._get_add_plugin_uri()
self.add_permission(staff_user, 'change_example1')
self.add_permission(staff_user, 'add_link')
with self.login_user_context(staff_user):
data = {'name': 'A Link', 'external_link': 'https://www.django-cms.org'}
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(plugins.count(), 1)
def test_user_cant_add_plugin(self):
"""
User can't add a new plugin if he does not have
change permissions on the model attached to the placeholder
and/or does not have add permissions on the plugin model.
"""
staff_user = self._staff_user
placeholder = self._obj.placeholder
plugins = placeholder.get_plugins('en').filter(plugin_type='LinkPlugin')
endpoint = self._get_add_plugin_uri()
self.add_permission(staff_user, 'add_example1')
self.add_permission(staff_user, 'delete_example1')
self.add_permission(staff_user, 'add_link')
with self.login_user_context(staff_user):
data = {'name': 'A Link', 'external_link': 'https://www.django-cms.org'}
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
self.assertEqual(plugins.count(), 0)
self.add_permission(staff_user, 'change_example1')
self.remove_permission(staff_user, 'add_link')
with self.login_user_context(staff_user):
data = {'name': 'A Link', 'external_link': 'https://www.django-cms.org'}
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
self.assertEqual(plugins.count(), 0)
def test_user_can_edit_plugin(self):
"""
User can edit a plugin if he has change permissions
on the model attached to the placeholder and he has
change permissions on the plugin model.
"""
staff_user = self._staff_user
placeholder = self._obj.placeholder
plugin = self._add_plugin_to_placeholder(placeholder)
endpoint = self.get_change_plugin_uri(plugin, container=Example1)
self.add_permission(staff_user, 'change_example1')
self.add_permission(staff_user, 'change_link')
with self.login_user_context(staff_user):
data = model_to_dict(plugin, fields=['name', 'external_link'])
data['name'] = 'A link 2'
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
plugin.refresh_from_db()
self.assertEqual(plugin.name, data['name'])
def test_user_cant_edit_plugin(self):
"""
User can't edit a plugin if he does not have
change permissions on the model attached to the placeholder
and/or does not have change permissions on the plugin model.
"""
staff_user = self._staff_user
placeholder = self._obj.placeholder
plugin = self._add_plugin_to_placeholder(placeholder)
endpoint = self.get_change_plugin_uri(plugin, container=Example1)
self.add_permission(staff_user, 'add_example1')
self.add_permission(staff_user, 'delete_example1')
self.add_permission(staff_user, 'change_link')
with self.login_user_context(staff_user):
data = model_to_dict(plugin, fields=['name', 'external_link'])
data['name'] = 'A link 2'
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
plugin.refresh_from_db()
self.assertNotEqual(plugin.name, data['name'])
self.add_permission(staff_user, 'change_example1')
self.remove_permission(staff_user, 'change_link')
with self.login_user_context(staff_user):
data = model_to_dict(plugin, fields=['name', 'external_link'])
data['name'] = 'A link 2'
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
plugin.refresh_from_db()
self.assertNotEqual(plugin.name, data['name'])
def test_user_can_delete_plugin(self):
"""
User can delete a plugin if he has change permissions
on the model attached to the placeholder and he has
delete permissions on the plugin model.
"""
staff_user = self._staff_user
placeholder = self._obj.placeholder
plugin = self._add_plugin_to_placeholder(placeholder)
endpoint = self.get_delete_plugin_uri(plugin, container=Example1)
self.add_permission(staff_user, 'change_example1')
self.add_permission(staff_user, 'delete_link')
with self.login_user_context(staff_user):
data = {'post': True}
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 302)
self.assertFalse(CMSPlugin.objects.filter(pk=plugin.pk).exists())
def test_user_cant_delete_plugin(self):
"""
User can't delete a plugin if he does not have
change permissions on the model attached to the placeholder
and/or does not have delete permissions on the plugin model.
"""
staff_user = self._staff_user
placeholder = self._obj.placeholder
plugin = self._add_plugin_to_placeholder(placeholder)
endpoint = self.get_delete_plugin_uri(plugin, container=Example1)
self.add_permission(staff_user, 'add_example1')
self.add_permission(staff_user, 'delete_example1')
self.add_permission(staff_user, 'delete_link')
with self.login_user_context(staff_user):
data = {'post': True}
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
self.assertTrue(CMSPlugin.objects.filter(pk=plugin.pk).exists())
self.add_permission(staff_user, 'change_example1')
self.remove_permission(staff_user, 'delete_link')
with self.login_user_context(staff_user):
data = {'post': True}
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
self.assertTrue(CMSPlugin.objects.filter(pk=plugin.pk).exists())
def test_user_can_move_plugin(self):
"""
User can move a plugin if he has change permissions
on the model attached to the placeholder and he has
change permissions on the plugin model.
"""
staff_user = self._staff_user
source_placeholder = self._obj.placeholder
target_placeholder = self._get_example_obj().placeholder
plugin = self._add_plugin_to_placeholder(source_placeholder)
data = {
'plugin_id': plugin.pk,
'placeholder_id': target_placeholder.pk,
'plugin_parent': '',
}
self.add_permission(staff_user, 'change_example1')
self.add_permission(staff_user, 'change_link')
with self.login_user_context(staff_user):
endpoint = self.get_move_plugin_uri(plugin, container=Example1)
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertTrue(target_placeholder.get_plugins('en').filter(pk=plugin.pk))
self.assertFalse(source_placeholder.get_plugins('en').filter(pk=plugin.pk))
def test_user_cant_move_plugin(self):
"""
User can't move a plugin if he does not have
change permissions on the model attached to the placeholder
and/or does not have change permissions on the plugin model.
"""
staff_user = self._staff_user
source_placeholder = self._obj.placeholder
target_placeholder = self._get_example_obj().placeholder
plugin = self._add_plugin_to_placeholder(source_placeholder)
data = {
'plugin_id': plugin.pk,
'placeholder_id': target_placeholder.pk,
'plugin_parent': '',
}
self.add_permission(staff_user, 'add_example1')
self.add_permission(staff_user, 'delete_example1')
self.add_permission(staff_user, 'change_link')
with self.login_user_context(staff_user):
endpoint = self.get_move_plugin_uri(plugin, container=Example1)
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
self.assertFalse(target_placeholder.get_plugins('en').filter(pk=plugin.pk))
self.assertTrue(source_placeholder.get_plugins('en').filter(pk=plugin.pk))
self.add_permission(staff_user, 'change_example1')
self.remove_permission(staff_user, 'change_link')
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
self.assertFalse(target_placeholder.get_plugins('en').filter(pk=plugin.pk))
self.assertTrue(source_placeholder.get_plugins('en').filter(pk=plugin.pk))
def test_user_can_copy_plugin(self):
"""
User can copy a plugin if he has change permissions
on the model attached to the placeholder and he has
add permissions on the plugin model.
"""
staff_user = self._staff_user
placeholder = self._obj.placeholder
plugin = self._add_plugin_to_placeholder(placeholder)
endpoint = self.get_copy_plugin_uri(plugin, container=Example1)
source_placeholder = plugin.placeholder
target_placeholder = self._get_example_obj().placeholder
data = {
'source_plugin_id': plugin.pk,
'source_placeholder_id': source_placeholder.pk,
'source_language': plugin.language,
'target_language': 'en',
'target_placeholder_id': target_placeholder.pk,
}
self.add_permission(staff_user, 'change_example1')
self.add_permission(staff_user, 'add_link')
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertTrue(source_placeholder.get_plugins('en').filter(pk=plugin.pk).exists())
self.assertTrue(
target_placeholder
.get_plugins('en')
.filter(plugin_type=plugin.plugin_type)
.exists()
)
def test_user_cant_copy_plugin(self):
"""
User can't copy a plugin if he does not have
change permissions on the model attached to the placeholder
and/or does not have add permissions on the plugin model.
"""
staff_user = self._staff_user
placeholder = self._obj.placeholder
plugin = self._add_plugin_to_placeholder(placeholder)
endpoint = self.get_copy_plugin_uri(plugin, container=Example1)
source_placeholder = plugin.placeholder
target_placeholder = self._get_example_obj().placeholder
data = {
'source_plugin_id': plugin.pk,
'source_placeholder_id': source_placeholder.pk,
'source_language': plugin.language,
'target_language': 'en',
'target_placeholder_id': target_placeholder.pk,
}
self.add_permission(staff_user, 'add_example1')
self.add_permission(staff_user, 'delete_example1')
self.add_permission(staff_user, 'add_link')
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
self.assertTrue(source_placeholder.get_plugins('en').filter(pk=plugin.pk).exists())
self.assertFalse(
target_placeholder
.get_plugins('en')
.filter(plugin_type=plugin.plugin_type)
.exists()
)
self.add_permission(staff_user, 'change_example1')
self.remove_permission(staff_user, 'add_link')
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
self.assertTrue(source_placeholder.get_plugins('en').filter(pk=plugin.pk).exists())
self.assertFalse(
target_placeholder
.get_plugins('en')
.filter(plugin_type=plugin.plugin_type)
.exists()
)
def test_user_can_clear_empty_placeholder(self):
"""
User can clear a placeholder if he has change permissions
on the model attached to the placeholder.
"""
staff_user = self._staff_user
placeholder = self._obj.placeholder
endpoint = self.get_clear_placeholder_url(placeholder, container=Example1)
self.add_permission(staff_user, 'change_example1')
with self.login_user_context(staff_user):
response = self.client.post(endpoint, {'test': 0})
self.assertEqual(response.status_code, 302)
def test_user_cant_clear_empty_placeholder(self):
"""
User can't clear a placeholder if he does not have
change permissions on the model attached to the placeholder.
"""
staff_user = self._staff_user
placeholder = self._obj.placeholder
endpoint = self.get_clear_placeholder_url(placeholder, container=Example1)
with self.login_user_context(staff_user):
response = self.client.post(endpoint, {'test': 0})
self.assertEqual(response.status_code, 403)
def test_user_can_clear_non_empty_placeholder(self):
"""
User can clear a placeholder with plugins if he has
change permissions on the model attached to the placeholder
and delete permissions on the plugin model.
"""
staff_user = self._staff_user
placeholder = self._obj.placeholder
plugins = [
self._add_plugin_to_placeholder(placeholder, 'StylePlugin'),
self._add_plugin_to_placeholder(placeholder, 'LinkPlugin'),
]
placeholder = plugins[0].placeholder
endpoint = self.get_clear_placeholder_url(placeholder, container=Example1)
self.add_permission(staff_user, 'delete_style')
self.add_permission(staff_user, 'delete_link')
self.add_permission(staff_user, 'change_example1')
with self.login_user_context(staff_user):
response = self.client.post(endpoint, {'test': 0})
self.assertEqual(response.status_code, 302)
self.assertEqual(placeholder.get_plugins('en').count(), 0)
def test_user_cant_clear_non_empty_placeholder(self):
"""
User can't clear a placeholder with plugins if he does not have
change permissions on the model attached to the placeholder
and/or does not have delete permissions on the plugin model.
"""
staff_user = self._staff_user
placeholder = self._obj.placeholder
plugins = [
self._add_plugin_to_placeholder(placeholder, 'StylePlugin'),
self._add_plugin_to_placeholder(placeholder, 'LinkPlugin'),
]
placeholder = plugins[0].placeholder
endpoint = self.get_clear_placeholder_url(placeholder, container=Example1)
self.add_permission(staff_user, 'delete_text')
self.add_permission(staff_user, 'delete_link')
with self.login_user_context(staff_user):
response = self.client.post(endpoint, {'test': 0})
self.assertEqual(response.status_code, 403)
self.assertEqual(placeholder.get_plugins('en').count(), 2)
def test_user_can_copy_placeholder_to_clipboard(self):
"""
User can copy a placeholder to the clipboard
if he has add permissions on the plugin models
being copied.
"""
staff_user = self._staff_user
source_placeholder = self._obj.placeholder
endpoint = self.get_copy_placeholder_uri(source_placeholder, container=Example1)
self._add_plugin_to_placeholder(source_placeholder, 'StylePlugin')
self._add_plugin_to_placeholder(source_placeholder, 'LinkPlugin')
user_settings = UserSettings.objects.create(
language="en",
user=staff_user,
clipboard=Placeholder.objects.create(),
)
self.add_permission(staff_user, 'add_link')
self.add_permission(staff_user, 'add_style')
data = {
'source_plugin_id': '',
'source_placeholder_id': source_placeholder.pk,
'source_language': 'en',
'target_language': 'en',
'target_placeholder_id': user_settings.clipboard.pk,
}
with self.login_user_context(staff_user):
# Copy plugins into the clipboard
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
clipboard_plugins = user_settings.clipboard.get_plugins()
# assert the clipboard has a PlaceholderPlugin
self.assertTrue(clipboard_plugins.filter(plugin_type='PlaceholderPlugin').exists())
self.assertEqual(len(clipboard_plugins), 1)
placeholder_plugin = clipboard_plugins[0].get_plugin_instance()[0]
ref_placeholder = placeholder_plugin.placeholder_ref
# assert there's only two plugins in the clipboard
self.assertEqual(ref_placeholder.get_plugins().count(), 2)
def test_user_cant_copy_placeholder_to_clipboard(self):
"""
User cant copy a placeholder to the clipboard if he does not
have add permissions on the plugin models being copied.
"""
staff_user = self._staff_user
source_placeholder = self._obj.placeholder
endpoint = self.get_copy_placeholder_uri(source_placeholder, container=Example1)
self._add_plugin_to_placeholder(source_placeholder, 'StylePlugin')
self._add_plugin_to_placeholder(source_placeholder, 'LinkPlugin')
user_settings = UserSettings.objects.create(
language="en",
user=staff_user,
clipboard=Placeholder.objects.create(),
)
self.add_permission(staff_user, 'change_link')
self.add_permission(staff_user, 'delete_link')
self.add_permission(staff_user, 'change_style')
self.add_permission(staff_user, 'delete_style')
data = {
'source_plugin_id': '',
'source_placeholder_id': source_placeholder.pk,
'source_language': 'en',
'target_language': 'en',
'target_placeholder_id': user_settings.clipboard.pk,
}
with self.login_user_context(staff_user):
# Copy plugins into the clipboard
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
clipboard_plugins = user_settings.clipboard.get_plugins()
self.assertEqual(len(clipboard_plugins), 0)
def test_user_can_paste_from_clipboard(self):
"""
User can paste plugins from the clipboard if he has
change permissions on the model attached to the target
placeholder and he has add permissions on the plugin models
being copied.
"""
staff_user = self._staff_user
target_placeholder = self._obj.placeholder
self.add_permission(staff_user, 'change_example1')
self.add_permission(staff_user, 'add_link')
user_settings = UserSettings.objects.create(
language="en",
user=staff_user,
clipboard=Placeholder.objects.create(),
)
placeholder_plugin = self._add_plugin_to_placeholder(
user_settings.clipboard,
'PlaceholderPlugin',
)
ref_placeholder = placeholder_plugin.placeholder_ref
self._add_plugin_to_placeholder(ref_placeholder)
self._add_plugin_to_placeholder(ref_placeholder)
with self.login_user_context(staff_user):
# Paste plugins from clipboard into placeholder
# under the french language.
data = {
'placeholder_id': target_placeholder.pk,
'plugin_id': placeholder_plugin.pk,
'plugin_parent': '',
'plugin_language': 'fr',
'plugin_order[]': '__COPY__',
'move_a_copy': True,
}
endpoint = self.get_move_plugin_uri(placeholder_plugin, container=Example1)
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(target_placeholder.get_plugins('fr').count(), 2)
def test_user_cant_paste_from_clipboard(self):
"""
User cant paste plugins from the clipboard if he does not have
change permissions on the model attached to the target placeholder
and/or does not have add permissions on the plugin models
being copied.
"""
staff_user = self._staff_user
target_placeholder = self._obj.placeholder
self.add_permission(staff_user, 'add_example1')
self.add_permission(staff_user, 'delete_example1')
self.add_permission(staff_user, 'add_link')
user_settings = UserSettings.objects.create(
language="en",
user=staff_user,
clipboard=Placeholder.objects.create(),
)
placeholder_plugin = self._add_plugin_to_placeholder(
user_settings.clipboard,
'PlaceholderPlugin',
)
ref_placeholder = placeholder_plugin.placeholder_ref
self._add_plugin_to_placeholder(ref_placeholder)
self._add_plugin_to_placeholder(ref_placeholder)
with self.login_user_context(staff_user):
# Paste plugins from clipboard into placeholder
# under the french language.
data = {
'placeholder_id': target_placeholder.pk,
'plugin_id': placeholder_plugin.pk,
'plugin_parent': '',
'plugin_language': 'fr',
'plugin_order[]': '__COPY__',
'move_a_copy': True,
}
endpoint = self.get_move_plugin_uri(placeholder_plugin, container=Example1)
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
self.assertEqual(target_placeholder.get_plugins('fr').count(), 0)
self.add_permission(staff_user, 'change_example1')
self.remove_permission(staff_user, 'add_link')
with self.login_user_context(staff_user):
# Paste plugins from clipboard into placeholder
# under the french language.
data = {
'placeholder_id': target_placeholder.pk,
'plugin_id': placeholder_plugin.pk,
'plugin_parent': '',
'plugin_language': 'fr',
'plugin_order[]': '__COPY__',
'move_a_copy': True,
}
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
self.assertEqual(target_placeholder.get_plugins('fr').count(), 0)
|
|
# coding: spec
from tests.helpers import HarpoonCase
from harpoon.executor import docker_context as docker_context_maker
from delfick_project.errors_pytest import assertRaises
from docker.errors import ImageNotFound, NotFound
from contextlib import contextmanager
from textwrap import dedent
import subprocess
import requests
import pytest
import shlex
import time
import json
import os
pytestmark = pytest.mark.integration
def jsonloads(content):
# In python3.5, json.loads can't take in bytes
if isinstance(content, bytes):
content = content.decode()
return json.loads(content)
class Case(HarpoonCase):
def make_harpoon(self, harpoon_options=None):
if harpoon_options is None:
harpoon_options = {}
harpoon_options["docker_context"] = self.docker_client
harpoon_options["docker_context_maker"] = docker_context_maker
return HarpoonSpec().harpoon_spec.normalise(Meta.empty(), harpoon_options)
@contextmanager
def forwarded_port(self, port):
if "CI_SERVER" in os.environ:
yield
return
machine_name = os.environ["DOCKER_MACHINE_NAME"]
p = subprocess.Popen(
shlex.split(
"docker-machine ssh {0} -N -L 0.0.0.0:{1}:127.0.0.1:{1}".format(machine_name, port)
)
)
try:
yield
finally:
p.kill()
# I'm putting this in it's own describe at the top so that it pulls in python:2
# Ready for the other tests. (this test is specifically that container_manager
# can pull in an image)
describe Case, "Container manager pulling":
it "can pull in images", container_manager:
try:
self.docker_api.remove_image("python:2")
except ImageNotFound:
pass
port = container_manager.free_port()
config = dedent(
"""
---
images:
py:
context: false
commands:
- FROM python:2
- EXPOSE {port}
- - ADD
- dest: /a
content: "hello"
- WORKDIR /
- CMD python -m SimpleHTTPServer {port}
wait_condition:
command:
- ss -tanp | grep {port}
""".format(
port=port
)
)
info = container_manager.start(":{0}".format(port), port=port, config=config)
res = requests.post(
info["uri"]("/start_container"), json={"image": "py", "ports": [[0, port]]}
)
assert res.status_code == 200, res.content
res = jsonloads(res.content)
host_port = res["ports"][str(port)]
container_id = res["container_id"]
assert self.docker_api.inspect_container(container_id)["State"]["Running"]
with self.forwarded_port(host_port):
container_manager.wait_for_port(host_port)
assert host_port != port, res.content
assert requests.get("http://127.0.0.1:{0}/a".format(host_port)).content == b"hello"
info["shutdown"]()
with assertRaises(NotFound, r'404 Client Error: Not Found \("No such container:.+'):
self.docker_api.inspect_container(container_id)
describe Case, "container_manager":
it "can stop containers", container_manager:
port = container_manager.free_port()
config = dedent(
"""
---
images:
py:
context: false
commands:
- FROM python:2
- EXPOSE 4545
- - ADD
- dest: /a
content: "hello"
- WORKDIR /
- CMD python -m SimpleHTTPServer 4545
wait_condition:
command:
- ss -tanp | grep 4545
"""
)
info = container_manager.start(":{0}".format(port), port=port, config=config)
container_id = None
# Do it twice and assert that the second container is different to the first
for _ in range(2):
res = requests.post(
info["uri"]("/start_container"), json={"image": "py", "ports": [[0, 4545]]}
)
assert res.status_code == 200, res.content
res = jsonloads(res.content)
host_port = res["ports"]["4545"]
assert container_id != res["container_id"]
container_id = res["container_id"]
assert self.docker_api.inspect_container(container_id)["State"]["Running"]
with self.forwarded_port(host_port):
container_manager.wait_for_port(host_port)
assert host_port != port, res.content
assert requests.get("http://127.0.0.1:{0}/a".format(host_port)).content == b"hello"
res = requests.post(info["uri"]("/stop_container"), json={"image": "py"})
assert res.status_code == 204, res.content
with assertRaises(NotFound, r'404 Client Error: Not Found \("No such container:.+'):
self.docker_api.inspect_container(container_id)
it "returns the same container on subsequent starts", container_manager:
port = container_manager.free_port()
config = dedent(
"""
---
images:
py:
context: false
commands:
- FROM python:2
- EXPOSE 6789
- - ADD
- dest: /a
content: "hello"
- WORKDIR /
- CMD python -m SimpleHTTPServer 6789
wait_condition:
command:
- ss -tanp | grep 6789
"""
)
info = container_manager.start(":{0}".format(port), port=port, config=config)
res = requests.post(
info["uri"]("/start_container"), json={"image": "py", "ports": [[0, 6789]]}
)
assert res.status_code == 200, res.content
res = jsonloads(res.content)
host_port = res["ports"]["6789"]
container_id = res["container_id"]
start = time.time()
res = requests.post(
info["uri"]("/start_container"), json={"image": "py", "ports": [[0, port]]}
)
assert time.time() - start < 1
assert res.status_code == 200, res.content
res = jsonloads(res.content)
assert res["ports"]["6789"] == host_port
assert container_id == res["container_id"]
it "complains if the request is invalid", container_manager:
port = container_manager.free_port()
config = ""
info = container_manager.start(":{0}".format(port), port=port, config=config)
def assertError(res, content):
assert res.status_code == 500
assert res.headers["Content-Type"] == "application/json"
assert jsonloads(res.content) == content
res = requests.post(info["uri"]("/start_container"), json={"ports": [[0, 6789]]})
assertError(
res,
{
"error": {
"errors": [
{
"message": "Bad value. Expected a value but got none",
"meta": "{path=<request>.image}",
}
],
"message": "Bad value",
"meta": "{path=<request>}",
},
"error_code": "BadSpecValue",
},
)
res = requests.post(info["uri"]("/start_container"), json={"image": "py"})
assertError(
res,
{
"error": {
"errors": [
{
"message": "Bad value. Expected a value but got none",
"meta": "{path=<request>.ports}",
}
],
"message": "Bad value",
"meta": "{path=<request>}",
},
"error_code": "BadSpecValue",
},
)
res = requests.post(
info["uri"]("/start_container"), json={"image": "py", "ports": [[0, 6789]]}
)
assertError(
res,
{
"error": {"available": [], "message": "Couldn't find image", "wanted": "py"},
"error_code": "NoSuchImage",
},
)
|
|
from flask import Flask, jsonify, redirect
import sqlalchemy
from db import init_db_engine
import json
from collections import OrderedDict
connect_str = "postgres://codeai:codeai@localhost:5432/codeai"
global engine
engine = init_db_engine(connect_str)
MY_API = '/api/1.0/'
app = Flask(__name__)
@app.route(MY_API + 'submission/<handle>/<type>/<verdict>')
def user_submission(handle, type, verdict):
with engine.connect() as connection:
results = connection.execute(sqlalchemy.text("""
SELECT q.id,
u.handle,
verdict.name,
language.name as language,
luq.timestamp as timestamp,
luq.relative_time,
luq.participant_type
FROM l_user_question luq
JOIN user_table u
ON u.id = luq.user_id
JOIN question q
ON q.id = luq.question_id
JOIN verdict
ON luq.verdict_id = verdict.verdict_id
JOIN language
ON language.language_id = luq.language
WHERE u.handle = :handle
AND luq.participant_type = :participant_type
AND verdict.name = :verdict
"""), {
"handle": handle,
"participant_type": type,
"verdict": verdict,
})
rows = results.fetchall()
result = {"results": [(dict(row)) for row in rows]}
return jsonify(result)
@app.route(MY_API + 'people_submission/<handles>/<type>/<verdict>')
def people_submissions(handles, type, verdict):
exclude_handles = handles.split("%")
if len(exclude_handles) > 1:
exclude_handles = tuple((handles.split("%")[1], ))
else:
exclude_handles = tuple(('null',))
print(exclude_handles)
handles = tuple(handles.split(";"))
verdicts = tuple(verdict.split(";"))
types = tuple(type.split(";"))
with engine.connect() as connection:
results = connection.execute(sqlalchemy.text("""
SELECT distinct luq.question_id,
q.name,
q.solved
FROM l_user_question luq
JOIN question q
ON q.id = luq.question_id
JOIN "user_table" u
ON u.id = luq.user_id
JOIN verdict
ON verdict.verdict_id = luq.verdict_id
WHERE u.handle in :handles
AND u.handle not in :exclude_handles
AND luq.participant_type in :participant_type
AND verdict.name in :verdict
ORDER BY q.solved DESC
"""), {
"handles": handles,
"participant_type": types,
"verdict": verdicts,
"exclude_handles": exclude_handles,
})
rows = results.fetchall()
result = {"results": [(dict(row)) for row in rows]}
return jsonify(result)
@app.route(MY_API + 'suggested_questions/<handle>/<type>/<verdict>')
def suggested_submissions(handle, type, verdict):
# SELECT UPPER USERS:
with engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT rating,
id
FROM "user_table"
WHERE "user_table".handle=:handle
"""), {
"handle": handle,
})
row = dict(result.fetchone())
results = connection.execute(sqlalchemy.text("""
SELECT handle
FROM "user_table"
WHERE "user_table".rating > :rating
OFFSET :id
LIMIT 50
"""), row)
rows = results.fetchall()
rows = [dict(row) for row in rows]
handles = []
for row in rows:
handles.extend(row.values())
handles = ";".join(handles)
handles = handles + "{handle}".format(handle=handle)
return redirect(MY_API + 'people_submission/{handles}/{type}/{verdict}'.format(handles=handles, type=type, verdict=verdict))
@app.route(MY_API + 'languages/<handle>')
def languages(handle):
with engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT l.name
FROM l_user_question luq
JOIN language l
ON l.language_id = luq.language
JOIN "user_table" u
ON u.id = luq.user_id
WHERE u.handle = :handle
"""), {
"handle": handle,
})
rows = result.fetchall()
rows = [dict(row) for row in rows]
result = {}
result["results"] = rows
return jsonify(result)
@app.route(MY_API + 'tags/<handle>/<verdict>')
def tags(handle, verdict):
with engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT count(t.id),
t.name
FROM l_user_question luq
JOIN l_question_tag lqt
ON lqt.question = luq.question_id
JOIN tags t
ON lqt.tag = t.id
JOIN "user_table"
ON "user_table".id = luq.user_id
JOIN verdict
ON luq.verdict_id = verdict.verdict_id
WHERE "user_table".handle = :handle
AND verdict.name = :verdict
GROUP BY t.id, t.name
"""), {
"handle": handle,
"verdict": verdict,
})
rows = result.fetchall()
rows = [dict(row) for row in rows]
result = dict()
result["result"] = rows
return jsonify(result)
@app.route(MY_API + 'lang/<handle>')
def lang(handle):
with engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT l.name,
count(luq.language)
FROM l_user_question luq
JOIN language l
ON l.language_id = luq.language
JOIN "user_table" u
ON u.id = luq.user_id
WHERE u.handle = :handle
GROUP BY l.language_id, l.name
"""), {
"handle": handle,
})
rows = result.fetchall()
rows = [dict(row) for row in rows]
result = {}
result["results"] = rows
return jsonify(result)
@app.route(MY_API + 'weekday/<handle>/<verdict>')
def week(handle, verdict):
with engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT timestamp,
count(luq.question_id) as count
FROM l_user_question luq
JOIN "user_table"
ON "user_table".id = luq.user_id
JOIN verdict
ON verdict.verdict_id = luq.verdict_id
WHERE verdict.name = :verdict
AND "user_table".handle = :handle
GROUP BY timestamp
"""), {
"handle": handle,
"verdict": verdict,
})
rows = result.fetchall()
rows = [dict(row) for row in rows]
results = OrderedDict()
week = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
for day in week:
results[day] = 0
for row in rows:
day = row["timestamp"].weekday()
day = week[day]
results[day] += row["count"]
w = list(results.values())
results = {"count": w}
return jsonify(results)
@app.route(MY_API + 'timeday/<handle>/<verdict>')
def timeday(handle, verdict):
with engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT timestamp,
count(luq.question_id) as count
FROM l_user_question luq
JOIN "user_table"
ON "user_table".id = luq.user_id
JOIN verdict
ON verdict.verdict_id = luq.verdict_id
WHERE verdict.name = :verdict
AND "user_table".handle = :handle
GROUP BY timestamp
"""), {
"handle": handle,
"verdict": verdict,
})
rows = result.fetchall()
rows = [dict(row) for row in rows]
results = OrderedDict()
results["Morning"] = 0
results["Noon"] = 0
results["Evening"] = 0
results["Night"] = 0
for row in rows:
daytime = row["timestamp"].time()
daytime = daytime.hour
if(daytime > 0 and daytime < 11):
daytime = "Morning"
elif(daytime >= 12 and daytime <=16):
daytime = "Noon"
elif(daytime > 16 and daytime <= 20):
daytime = "Evening"
else:
daytime = "Night"
results[daytime] += row["count"]
w = list(results.values())
results = {"count": w}
return jsonify(results)
if __name__ == '__main__':
app.run(host='0.0.0.0',port=3000, debug=True)
|
|
# LexGen.py - implemented 2002 by Neil Hodgson neilh@scintilla.org
# Released to the public domain.
# Regenerate the Scintilla and SciTE source files that list
# all the lexers and all the properties files.
# Should be run whenever a new lexer is added or removed.
# Requires Python 2.4 or later
# Most files are regenerated in place with templates stored in comments.
# The VS .NET project file is generated into a different file as the
# VS .NET environment will not retain comments when modifying the file.
# The files are copied to a string apart from sections between a
# ++Autogenerated comment and a --Autogenerated comment which is
# generated by the CopyWithInsertion function. After the whole
# string is instantiated, it is compared with the target file and
# if different the file is rewritten.
# Does not regenerate the Visual C++ 6 project files but does the VS .NET
# project file.
import string
import sys
import os
import glob
# EOL constants
CR = "\r"
LF = "\n"
CRLF = "\r\n"
if sys.platform == "win32":
NATIVE = CRLF
else:
# Yes, LF is the native EOL even on Mac OS X. CR is just for
# Mac OS <=9 (a.k.a. "Mac Classic")
NATIVE = LF
# Automatically generated sections contain start and end comments,
# a definition line and the results.
# The results are replaced by regenerating based on the definition line.
# The definition line is a comment prefix followed by "**".
# If there is a digit after the ** then this indicates which list to use
# and the digit and next character are not part of the definition
# Backslash is used as an escape within the definition line.
# The part between \( and \) is repeated for each item in the list.
# \* is replaced by each list item. \t, and \n are tab and newline.
def CopyWithInsertion(input, commentPrefix, retainDefs, eolType, *lists):
copying = 1
listid = 0
output = []
for line in input.splitlines(0):
isStartGenerated = line.startswith(commentPrefix + "++Autogenerated")
if copying and not isStartGenerated:
output.append(line)
if isStartGenerated:
if retainDefs:
output.append(line)
copying = 0
definition = ""
elif not copying and line.startswith(commentPrefix + "**"):
if retainDefs:
output.append(line)
definition = line[len(commentPrefix + "**"):]
listid = 0
if definition[0] in string.digits:
listid = int(definition[:1])
definition = definition[2:]
# Hide double slashes as a control character
definition = definition.replace("\\\\", "\001")
# Do some normal C style transforms
definition = definition.replace("\\n", "\n")
definition = definition.replace("\\t", "\t")
# Get the doubled backslashes back as single backslashes
definition = definition.replace("\001", "\\")
startRepeat = definition.find("\\(")
endRepeat = definition.find("\\)")
intro = definition[:startRepeat]
out = ""
if intro.endswith("\n"):
pos = 0
else:
pos = len(intro)
out += intro
middle = definition[startRepeat+2:endRepeat]
for i in lists[listid]:
item = middle.replace("\\*", i)
if pos and (pos + len(item) >= 80):
out += "\\\n"
pos = 0
out += item
pos += len(item)
if item.endswith("\n"):
pos = 0
outro = definition[endRepeat+2:]
out += outro
out = out.replace("\n", eolType) # correct EOLs in generated content
output.append(out)
elif line.startswith(commentPrefix + "--Autogenerated"):
copying = 1
if retainDefs:
output.append(line)
output = [line.rstrip(" \t") for line in output] # trim trailing whitespace
return eolType.join(output) + eolType
def UpdateFile(filename, updated):
""" If the file is different to updated then copy updated
into the file else leave alone so CVS and make don't treat
it as modified. """
try:
infile = open(filename, "rb")
except IOError: # File is not there yet
out = open(filename, "wb")
out.write(updated)
out.close()
print "New", filename
return
original = infile.read()
infile.close()
if updated != original:
os.unlink(filename)
out = open(filename, "wb")
out.write(updated)
out.close()
print "Changed", filename
#~ else:
#~ print "Unchanged", filename
def Generate(inpath, outpath, commentPrefix, eolType, *lists):
"""Generate 'outpath' from 'inpath'.
"eolType" indicates the type of EOLs to use in the generated
file. It should be one of following constants: LF, CRLF,
CR, or NATIVE.
"""
#print "generate '%s' -> '%s' (comment prefix: %r, eols: %r)"\
# % (inpath, outpath, commentPrefix, eolType)
try:
infile = open(inpath, "r")
except IOError:
print "Can not open", inpath
return
original = infile.read()
infile.close()
updated = CopyWithInsertion(original, commentPrefix,
inpath == outpath, eolType, *lists)
UpdateFile(outpath, updated)
def Regenerate(filename, commentPrefix, eolType, *lists):
"""Regenerate the given file.
"eolType" indicates the type of EOLs to use in the generated
file. It should be one of following constants: LF, CRLF,
CR, or NATIVE.
"""
Generate(filename, filename, commentPrefix, eolType, *lists)
def FindModules(lexFile):
modules = []
f = open(lexFile)
for l in f.readlines():
if l.startswith("LexerModule"):
l = l.replace("(", " ")
modules.append(l.split()[1])
return modules
knownIrregularProperties = [
"fold",
"styling.within.preprocessor",
"tab.timmy.whinge.level",
"asp.default.language",
"html.tags.case.sensitive",
"ps.level",
"ps.tokenize",
"sql.backslash.escapes",
"nsis.uservars",
"nsis.ignorecase"
]
def FindProperties(lexFile):
properties = set()
f = open(lexFile)
for l in f.readlines():
if "GetProperty" in l:
l = l.strip()
if not l.startswith("//"): # Drop comments
propertyName = l.split("\"")[1]
if propertyName.lower() == propertyName:
# Only allow lower case property names
if propertyName in knownIrregularProperties or \
propertyName.startswith("fold.") or \
propertyName.startswith("lexer."):
properties.add(propertyName)
return properties
def ciCompare(a,b):
return cmp(a.lower(), b.lower())
def RegenerateAll():
root="../../"
# Find all the lexer source code files
lexFilePaths = glob.glob(root + "scintilla/src/Lex*.cxx")
lexFiles = [os.path.basename(f)[:-4] for f in lexFilePaths]
print lexFiles
lexerModules = []
lexerProperties = set()
for lexFile in lexFilePaths:
lexerModules.extend(FindModules(lexFile))
lexerProperties.update(FindProperties(lexFile))
lexerModules.sort(ciCompare)
lexerProperties.remove("fold.comment.python")
lexerProperties = list(lexerProperties)
lexerProperties.sort(ciCompare)
# Find all the SciTE properties files
otherProps = ["abbrev.properties", "Embedded.properties", "SciTEGlobal.properties", "SciTE.properties"]
if os.path.exists(root + "scite"):
propFilePaths = glob.glob(root + "scite/src/*.properties")
propFiles = [os.path.basename(f) for f in propFilePaths if os.path.basename(f) not in otherProps]
propFiles.sort(ciCompare)
print propFiles
# Find all the menu command IDs in the SciTE header
SciTEHeader = file(root + "scite/src/SciTE.h")
lines = SciTEHeader.read().split("\n")
SciTEHeader.close()
ids = [id for id in [l.split()[1] for l in lines if l.startswith("#define")] if id.startswith("IDM_")]
#print ids
Regenerate(root + "scintilla/src/KeyWords.cxx", "//", NATIVE, lexerModules)
Regenerate(root + "scintilla/win32/makefile", "#", NATIVE, lexFiles)
Regenerate(root + "scintilla/win32/scintilla.mak", "#", NATIVE, lexFiles)
Regenerate(root + "scintilla/win32/scintilla_vc6.mak", "#", NATIVE, lexFiles)
# Use Unix EOLs for gtk Makefiles so they work for Linux users when
# extracted from the Scintilla source ZIP (typically created on
# Windows).
Regenerate(root + "scintilla/gtk/makefile", "#", LF, lexFiles)
Regenerate(root + "scintilla/gtk/scintilla.mak", "#", NATIVE, lexFiles)
Regenerate(root + "scintilla/macosx/makefile", "#", LF, lexFiles)
if os.path.exists(root + "scite"):
Regenerate(root + "scite/win32/makefile", "#", NATIVE, lexFiles, propFiles)
Regenerate(root + "scite/win32/scite.mak", "#", NATIVE, lexFiles, propFiles)
Regenerate(root + "scite/src/SciTEProps.cxx", "//", NATIVE, lexerProperties, ids)
Generate(root + "scite/boundscheck/vcproj.gen",
root + "scite/boundscheck/SciTE.vcproj", "#", NATIVE, lexFiles)
RegenerateAll()
|
|
#!/usr/bin/env python
import os
import datetime
import numpy as np
import optparse
from collections import OrderedDict
from utils import create_input_batch
import loader
from utils import models_path, models_saver_path, evaluate
from loader import word_mapping, char_mapping, tag_mapping
from loader import prepare_dataset, prepare_dataset_
from model import Model
import tensorflow as tf
# Read parameters from command line
optparser = optparse.OptionParser()
optparser.add_option(
"-T", "--train", default="",
help="Train set location"
)
optparser.add_option(
"-d", "--dev", default="",
help="Dev set location"
)
optparser.add_option(
"-t", "--test", default="",
help="Test set location"
)
optparser.add_option(
"-l", "--lower", default="0",
type='int', help="Lowercase words (this will not affect character inputs)"
)
optparser.add_option(
"-z", "--zeros", default="0",
type='int', help="Replace digits with 0"
)
optparser.add_option(
"-c", "--char_dim", default="25",
type='int', help="Char embedding dimension"
)
optparser.add_option(
"-C", "--char_lstm_dim", default="25",
type='int', help="Char LSTM hidden layer size"
)
optparser.add_option(
"-b", "--char_bidirect", default="1",
type='int', help="Use a bidirectional LSTM for chars"
)
optparser.add_option(
"-w", "--word_dim", default="100",
type='int', help="Token embedding dimension"
)
optparser.add_option(
"-W", "--word_lstm_dim", default="100",
type='int', help="Token LSTM hidden layer size"
)
optparser.add_option(
"-B", "--word_bidirect", default="1",
type='int', help="Use a bidirectional LSTM for words"
)
optparser.add_option(
"-f", "--crf", default="1",
type='int', help="Use CRF (0 to disable)"
)
optparser.add_option(
"-D", "--dropout", default="0",
type='float', help="Droupout on the input (0 = no dropout)"
)
optparser.add_option(
"-L", "--lr_method", default="sgd",
help="Learning method (SGD, Adadelta, Adam..)"
)
optparser.add_option(
"-R", "--lr_rate", default="0.005",
type='float', help="learning rate"
)
optparser.add_option(
"-p", "--clip_norm", default="0",
type='float', help="The clipping ratio"
)
optparser.add_option(
"-r", "--mode", default="1",
type='int', help="1 for Train and 0 for Test"
)
optparser.add_option(
"-G", "--batch_size", default="20",
type='int', help="batch size"
)
optparser.add_option(
"-g", "--singleton", default="0",
type='float', help=" whether it needs to replace singletons by the unknown word or not"
)
optparser.add_option(
"-E", "--epoch", default="50",
type='int', help="number of epochs over the training set"
)
optparser.add_option(
"-F", "--freq", default="5000",
type='int', help="evaluate on dev every freq_eval steps"
)
optparser.add_option(
"-Z", "--gpu_no", default="-1",
type='int', help="whether using the cpu or gpu"
)
opts = optparser.parse_args()[0]
# Parse parameters
parameters = OrderedDict()
parameters['lower'] = opts.lower == 1
parameters['zeros'] = opts.zeros == 1
parameters['char_dim'] = opts.char_dim
parameters['char_lstm_dim'] = opts.char_lstm_dim
parameters['char_bidirect'] = opts.char_bidirect == 1
parameters['word_dim'] = opts.word_dim
parameters['word_lstm_dim'] = opts.word_lstm_dim
parameters['word_bidirect'] = opts.word_bidirect == 1
parameters['crf'] = opts.crf == 1
parameters['dropout'] = opts.dropout
parameters['lr_method'] = opts.lr_method
parameters['lr_rate'] = opts.lr_rate
parameters['clip_norm'] = opts.clip_norm
parameters['is_train'] = opts.mode
#parameters['update'] = opts.update_scheme
parameters['batch_size'] = opts.batch_size
# Check parameters validity
assert os.path.isfile(opts.train)
assert parameters['char_dim'] > 0 or parameters['word_dim'] > 0
assert 0. <= parameters['dropout'] < 1.0
if not os.path.exists(models_path):
os.makedirs(models_path)
if not os.path.exists(models_saver_path):
os.makedirs(models_saver_path)
# Initialize model
model = Model(parameters=parameters, models_path=models_path)
print "Model location: %s" % model.model_path
# Data parameters
lower = parameters['lower']
zeros = parameters['zeros']
batch_size = parameters['batch_size']
# Load sentences
train_sentences = loader.load_sentences(opts.train, lower, zeros)
dev_sentences = loader.load_sentences(opts.dev, lower, zeros)
test_sentences = loader.load_sentences(opts.test, lower, zeros)
# Create a dictionary / mapping of words
dico_words, word_to_id, id_to_word = word_mapping(train_sentences, lower)
dico_words_train = dico_words
# Create a dictionary and a mapping for words / POS tags / tags
id_to_char = {}
if opts.char_dim:
dico_chars, char_to_id, id_to_char = char_mapping(train_sentences)
dico_tags, tag_to_id, id_to_tag = tag_mapping(train_sentences)
n_tag = len(id_to_tag)
# Index data
if opts.char_dim:
train_data = prepare_dataset(
train_sentences, word_to_id, char_to_id, tag_to_id, lower
)
dev_data = prepare_dataset(
dev_sentences, word_to_id, char_to_id, tag_to_id, lower
)
test_data = prepare_dataset(
test_sentences, word_to_id, char_to_id, tag_to_id, lower
)
else:
train_data = prepare_dataset_(
train_sentences, word_to_id, tag_to_id, lower
)
dev_data = prepare_dataset_(
dev_sentences, word_to_id, tag_to_id, lower
)
test_data = prepare_dataset_(
test_sentences, word_to_id, tag_to_id, lower
)
print "%i / %i / %i sentences in train / dev / test." % (
len(train_data), len(dev_data), len(test_data))
# Save the mappings to disk
print 'Saving the mappings to disk...'
model.save_mappings(id_to_word, id_to_char, id_to_tag)
# Build the model
if opts.gpu_no < 0:
with tf.device("/cpu:0"):
cost, tags_scores, train_op = model.build(**parameters)
else:
with tf.device("/gpu:" + str(opts.gpu_no)):
cost, tags_scores, train_op = model.build(**parameters)
#
# Train network
#
singletons = None
if opts.singleton:
singletons = set([word_to_id[k] for k, v in dico_words_train.items() if v == 1])
n_epochs = opts.epoch # number of epochs over the training set
freq_eval = opts.freq # evaluate on dev every freq_eval steps
count = 0
best_dev = -np.inf
best_test = -np.inf
saver = tf.train.Saver()
start_time_all = datetime.datetime.now()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
for epoch in xrange(n_epochs):
epoch_costs = []
epoch_accus = []
epoch_sentence = []
print "Starting epoch %i..." % epoch
permutation_index = np.random.permutation(len(train_data))
train_data_count = 0
start_time_epoch = datetime.datetime.now()
token_count = 0.0
while train_data_count <= len(permutation_index):
batch_data = []
start_time = datetime.datetime.now()
for i in xrange(batch_size):
count += 1
index = i + train_data_count
if index >= len(permutation_index):
index %= len(permutation_index)
batch_data.append(train_data[permutation_index[index]])
input_ = create_input_batch(batch_data, parameters, n_tag, True, singletons)
feed_dict_ = {}
if parameters['char_dim']:
assert len(input_) == 8
feed_dict_[model.word_ids] = input_[0]
feed_dict_[model.word_pos_ids] = input_[1]
feed_dict_[model.char_for_ids] = input_[2]
feed_dict_[model.char_rev_ids] = input_[3]
feed_dict_[model.char_pos_ids] = input_[4]
feed_dict_[model.tag_ids] = input_tag = input_[5]
feed_dict_[model.tag_id_trans] = input_[6]
feed_dict_[model.tag_id_index] = input_[7]
else:
assert len(input_) == 5
feed_dict_[model.word_ids] = input_[0]
feed_dict_[model.word_pos_ids] = input_[1]
feed_dict_[model.tag_ids] = input_tag = input_[2]
feed_dict_[model.tag_id_trans] = input_[3]
feed_dict_[model.tag_id_index] = input_[4]
new_cost, f_scores, _ = sess.run([cost, tags_scores, train_op], feed_dict=feed_dict_)
accus_batch = []
sentence_batch = []
if parameters['crf']:
for x in xrange(batch_size):
f_score = f_scores[x]
word_pos = input_[1][x] + 2
y_pred = f_score[1:word_pos]
y_real = input_tag[x][0:(word_pos-1)]
correct_prediction = np.equal(y_pred, y_real)
accus = np.array(correct_prediction).astype(float).sum()
accus_mean = np.array(correct_prediction).astype(float).mean()
accus_batch.append(accus)
if accus_mean < 1.0:
sentence_batch.append(0.0)
else:
sentence_batch.append(1.0)
token_count += (input_[1][x] + 1)
sentence_val = np.array(sentence_batch).astype(float).mean()
else:
y_preds = f_scores.argmax(axis=-1)
y_reals = np.array(input_tag).astype(np.int32)
for x in xrange(batch_size):
word_pos = input_[1][x] + 1
y_pred = y_preds[x][0:word_pos]
y_real = y_reals[x][0:word_pos]
correct_prediction = np.equal(y_pred, y_real)
accus = np.array(correct_prediction).astype(float).sum()
accus_mean = np.array(correct_prediction).astype(float).mean()
accus_batch.append(accus)
if accus_mean < 1.0:
sentence_batch.append(0.0)
else:
sentence_batch.append(1.0)
token_count += word_pos
sentence_val = np.array(sentence_batch).astype(float).mean()
epoch_costs.append(new_cost)
epoch_accus.extend(accus_batch)
epoch_sentence.append(sentence_val)
end_time = datetime.datetime.now()
cost_time = (end_time - start_time).seconds
if train_data_count % freq_eval == 0 and train_data_count > 0:
assert token_count != 0.0
token_accus_freq = np.sum(epoch_accus) / token_count
print "%i, cost average: %f, accuracy average: %f, sentence accuracy avg: %f, cost time: %i" % (train_data_count, np.mean(epoch_costs), token_accus_freq, np.mean(epoch_sentence), cost_time)
if train_data_count % freq_eval == 0 and train_data_count > 0:
dev_score, dev_sentence_score = evaluate(sess, tags_scores, model, parameters, dev_data, n_tag)
test_score, test_sentence_score = evaluate(sess, tags_scores, model, parameters, test_data, n_tag)
print "Score on dev: %.5f" % dev_score
print "Score on test: %.5f" % test_score
if dev_score > best_dev:
best_dev = dev_score
print "New best score on dev."
print "Saving model to disk..."
saver.save(sess, os.path.join(models_saver_path, 'model.ckpt'), global_step=count)
if test_score > best_test:
best_test = test_score
print "New best score on test."
train_data_count += batch_size
assert token_count != 0.0
token_accus_epoch = np.sum(epoch_accus) / token_count
end_time_epoch = datetime.datetime.now()
cost_time_epoch = (end_time_epoch - start_time_epoch).seconds
print "Epoch %i done. Average cost: %f, Average accuracy: %f, Average sentence: %f, Cost time: %i" % (epoch, np.mean(epoch_costs), token_accus_epoch, np.mean(epoch_sentence), cost_time_epoch)
end_time_all = datetime.datetime.now()
cost_time_a = (end_time_all - start_time_all).seconds
print "Epoch %i done. Cost time: %i" % (n_epochs, cost_time_a)
|
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Vincent Michel <vincent.michel@inria.fr>
# Gilles Louppe <g.louppe@gmail.com>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import numpy as np
import numbers
from joblib import Parallel, effective_n_jobs
from ..utils.metaestimators import if_delegate_has_method
from ..utils.metaestimators import _safe_split
from ..utils._tags import _safe_tags
from ..utils.validation import check_is_fitted
from ..utils.fixes import delayed
from ..utils.deprecation import deprecated
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..model_selection import check_cv
from ..model_selection._validation import _score
from ..metrics import check_scoring
from ._base import SelectorMixin
from ._base import _get_feature_importances
def _rfe_single_fit(rfe, estimator, X, y, train, test, scorer):
"""
Return the score for a fit across one fold.
"""
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
return rfe._fit(
X_train,
y_train,
lambda estimator, features: _score(
estimator, X_test[:, features], y_test, scorer
),
).scores_
class RFE(SelectorMixin, MetaEstimatorMixin, BaseEstimator):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and the importance of each feature is obtained either through
any specific attribute or callable.
Then, the least important features are pruned from current set of features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : ``Estimator`` instance
A supervised learning estimator with a ``fit`` method that provides
information about feature importance
(e.g. `coef_`, `feature_importances_`).
n_features_to_select : int or float, default=None
The number of features to select. If `None`, half of the features are
selected. If integer, the parameter is the absolute number of features
to select. If float between 0 and 1, it is the fraction of features to
select.
.. versionchanged:: 0.24
Added float values for fractions.
step : int or float, default=1
If greater than or equal to 1, then ``step`` corresponds to the
(integer) number of features to remove at each iteration.
If within (0.0, 1.0), then ``step`` corresponds to the percentage
(rounded down) of features to remove at each iteration.
verbose : int, default=0
Controls verbosity of output.
importance_getter : str or callable, default='auto'
If 'auto', uses the feature importance either through a `coef_`
or `feature_importances_` attributes of estimator.
Also accepts a string that specifies an attribute name/path
for extracting feature importance (implemented with `attrgetter`).
For example, give `regressor_.coef_` in case of
:class:`~sklearn.compose.TransformedTargetRegressor` or
`named_steps.clf.feature_importances_` in case of
class:`~sklearn.pipeline.Pipeline` with its last step named `clf`.
If `callable`, overrides the default feature importance getter.
The callable is passed with the fitted estimator and it should
return importance for each feature.
.. versionadded:: 0.24
Attributes
----------
classes_ : ndarray of shape (n_classes,)
The classes labels. Only available when `estimator` is a classifier.
estimator_ : ``Estimator`` instance
The fitted estimator used to select features.
n_features_ : int
The number of selected features.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
.. versionadded:: 0.24
ranking_ : ndarray of shape (n_features,)
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
support_ : ndarray of shape (n_features,)
The mask of selected features.
Examples
--------
The following example shows how to retrieve the 5 most informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, n_features_to_select=5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_
array([ True, True, True, True, True, False, False, False, False,
False])
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
Notes
-----
Allows NaN/Inf in the input if the underlying estimator does as well.
See Also
--------
RFECV : Recursive feature elimination with built-in cross-validated
selection of the best number of features.
SelectFromModel : Feature selection based on thresholds of importance
weights.
SequentialFeatureSelector : Sequential cross-validation based feature
selection. Does not rely on importance weights.
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(
self,
estimator,
*,
n_features_to_select=None,
step=1,
verbose=0,
importance_getter="auto",
):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.importance_getter = importance_getter
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
@property
def classes_(self):
"""Classes labels available when `estimator` is a classifier.
Returns
-------
ndarray of shape (n_classes,)
"""
return self.estimator_.classes_
def fit(self, X, y, **fit_params):
"""Fit the RFE model and then the underlying estimator on the selected features.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,)
The target values.
**fit_params : dict
Additional parameters passed to the `fit` method of the underlying
estimator.
Returns
-------
self : object
Fitted estimator.
"""
return self._fit(X, y, **fit_params)
def _fit(self, X, y, step_score=None, **fit_params):
# Parameter step_score controls the calculation of self.scores_
# step_score is not exposed to users
# and is used when implementing RFECV
# self.scores_ will not be calculated when calling _fit through fit
tags = self._get_tags()
X, y = self._validate_data(
X,
y,
accept_sparse="csc",
ensure_min_features=2,
force_all_finite=not tags.get("allow_nan", True),
multi_output=True,
)
error_msg = (
"n_features_to_select must be either None, a "
"positive integer representing the absolute "
"number of features or a float in (0.0, 1.0] "
"representing a percentage of features to "
f"select. Got {self.n_features_to_select}"
)
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features // 2
elif self.n_features_to_select < 0:
raise ValueError(error_msg)
elif isinstance(self.n_features_to_select, numbers.Integral): # int
n_features_to_select = self.n_features_to_select
elif self.n_features_to_select > 1.0: # float > 1
raise ValueError(error_msg)
else: # float
n_features_to_select = int(n_features * self.n_features_to_select)
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=bool)
ranking_ = np.ones(n_features, dtype=int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y, **fit_params)
# Get importance and rank them
importances = _get_feature_importances(
estimator,
self.importance_getter,
transform_func="square",
)
ranks = np.argsort(importances)
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X[:, features], y, **fit_params)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate="estimator")
def predict(self, X):
"""Reduce X to the selected features and then predict using the underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
check_is_fitted(self)
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate="estimator")
def score(self, X, y, **fit_params):
"""Reduce X to the selected features and return the score of the underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
**fit_params : dict
Parameters to pass to the `score` method of the underlying
estimator.
.. versionadded:: 1.0
Returns
-------
score : float
Score of the underlying base estimator computed with the selected
features returned by `rfe.transform(X)` and `y`.
"""
check_is_fitted(self)
return self.estimator_.score(self.transform(X), y, **fit_params)
def _get_support_mask(self):
check_is_fitted(self)
return self.support_
@if_delegate_has_method(delegate="estimator")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like or sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
check_is_fitted(self)
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate="estimator")
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : {array-like or sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate="estimator")
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape (n_samples, n_classes)
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
return self.estimator_.predict_log_proba(self.transform(X))
def _more_tags(self):
return {
"poor_score": True,
"allow_nan": _safe_tags(self.estimator, key="allow_nan"),
"requires_y": True,
}
class RFECV(RFE):
"""Recursive feature elimination with cross-validation to select the number of features.
See glossary entry for :term:`cross-validation estimator`.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : ``Estimator`` instance
A supervised learning estimator with a ``fit`` method that provides
information about feature importance either through a ``coef_``
attribute or through a ``feature_importances_`` attribute.
step : int or float, default=1
If greater than or equal to 1, then ``step`` corresponds to the
(integer) number of features to remove at each iteration.
If within (0.0, 1.0), then ``step`` corresponds to the percentage
(rounded down) of features to remove at each iteration.
Note that the last iteration may remove fewer than ``step`` features in
order to reach ``min_features_to_select``.
min_features_to_select : int, default=1
The minimum number of features to be selected. This number of features
will always be scored, even if the difference between the original
feature count and ``min_features_to_select`` isn't divisible by
``step``.
.. versionadded:: 0.20
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used. If the
estimator is a classifier or if ``y`` is neither binary nor multiclass,
:class:`~sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value of None changed from 3-fold to 5-fold.
scoring : str, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int, default=0
Controls verbosity of output.
n_jobs : int or None, default=None
Number of cores to run in parallel while fitting across folds.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.18
importance_getter : str or callable, default='auto'
If 'auto', uses the feature importance either through a `coef_`
or `feature_importances_` attributes of estimator.
Also accepts a string that specifies an attribute name/path
for extracting feature importance.
For example, give `regressor_.coef_` in case of
:class:`~sklearn.compose.TransformedTargetRegressor` or
`named_steps.clf.feature_importances_` in case of
:class:`~sklearn.pipeline.Pipeline` with its last step named `clf`.
If `callable`, overrides the default feature importance getter.
The callable is passed with the fitted estimator and it should
return importance for each feature.
.. versionadded:: 0.24
Attributes
----------
classes_ : ndarray of shape (n_classes,)
The classes labels. Only available when `estimator` is a classifier.
estimator_ : ``Estimator`` instance
The fitted estimator used to select features.
grid_scores_ : ndarray of shape (n_subsets_of_features,)
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
.. deprecated:: 1.0
The `grid_scores_` attribute is deprecated in version 1.0 in favor
of `cv_results_` and will be removed in version 1.2.
cv_results_ : dict of ndarrays
A dict with keys:
split(k)_test_score : ndarray of shape (n_features,)
The cross-validation scores across (k)th fold.
mean_test_score : ndarray of shape (n_features,)
Mean of scores over the folds.
std_test_score : ndarray of shape (n_features,)
Standard deviation of scores over the folds.
.. versionadded:: 1.0
n_features_ : int
The number of selected features with cross-validation.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
.. versionadded:: 0.24
ranking_ : narray of shape (n_features,)
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
support_ : ndarray of shape (n_features,)
The mask of selected features.
See Also
--------
RFE : Recursive feature elimination.
Notes
-----
The size of ``grid_scores_`` is equal to
``ceil((n_features - min_features_to_select) / step) + 1``,
where step is the number of features removed at each iteration.
Allows NaN/Inf in the input if the underlying estimator does as well.
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_
array([ True, True, True, True, True, False, False, False, False,
False])
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
"""
def __init__(
self,
estimator,
*,
step=1,
min_features_to_select=1,
cv=None,
scoring=None,
verbose=0,
n_jobs=None,
importance_getter="auto",
):
self.estimator = estimator
self.step = step
self.importance_getter = importance_getter
self.cv = cv
self.scoring = scoring
self.verbose = verbose
self.n_jobs = n_jobs
self.min_features_to_select = min_features_to_select
def fit(self, X, y, groups=None):
"""Fit the RFE model and automatically tune the number of selected features.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like of shape (n_samples,)
Target values (integers for classification, real numbers for
regression).
groups : array-like of shape (n_samples,) or None, default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
.. versionadded:: 0.20
Returns
-------
self : object
Fitted estimator.
"""
tags = self._get_tags()
X, y = self._validate_data(
X,
y,
accept_sparse="csr",
ensure_min_features=2,
force_all_finite=not tags.get("allow_nan", True),
multi_output=True,
)
# Initialization
cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
# Build an RFE object, which will evaluate and score each possible
# feature count, down to self.min_features_to_select
rfe = RFE(
estimator=self.estimator,
n_features_to_select=self.min_features_to_select,
importance_getter=self.importance_getter,
step=self.step,
verbose=self.verbose,
)
# Determine the number of subsets of features by fitting across
# the train folds and choosing the "features_to_select" parameter
# that gives the least averaged error across all folds.
# Note that joblib raises a non-picklable error for bound methods
# even if n_jobs is set to 1 with the default multiprocessing
# backend.
# This branching is done so that to
# make sure that user code that sets n_jobs to 1
# and provides bound methods as scorers is not broken with the
# addition of n_jobs parameter in version 0.18.
if effective_n_jobs(self.n_jobs) == 1:
parallel, func = list, _rfe_single_fit
else:
parallel = Parallel(n_jobs=self.n_jobs)
func = delayed(_rfe_single_fit)
scores = parallel(
func(rfe, self.estimator, X, y, train, test, scorer)
for train, test in cv.split(X, y, groups)
)
scores = np.array(scores)
scores_sum = np.sum(scores, axis=0)
scores_sum_rev = scores_sum[::-1]
argmax_idx = len(scores_sum) - np.argmax(scores_sum_rev) - 1
n_features_to_select = max(
n_features - (argmax_idx * step), self.min_features_to_select
)
# Re-execute an elimination with best_k over the whole set
rfe = RFE(
estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step,
importance_getter=self.importance_getter,
verbose=self.verbose,
)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.fit(self.transform(X), y)
# reverse to stay consistent with before
scores_rev = scores[:, ::-1]
self.cv_results_ = {}
self.cv_results_["mean_test_score"] = np.mean(scores_rev, axis=0)
self.cv_results_["std_test_score"] = np.std(scores_rev, axis=0)
for i in range(scores.shape[0]):
self.cv_results_[f"split{i}_test_score"] = scores_rev[i]
return self
# TODO: Remove in v1.2 when grid_scores_ is removed
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"The `grid_scores_` attribute is deprecated in version 1.0 in favor "
"of `cv_results_` and will be removed in version 1.2."
)
@property
def grid_scores_(self):
# remove 2 for mean_test_score, std_test_score
grid_size = len(self.cv_results_) - 2
return np.asarray(
[self.cv_results_[f"split{i}_test_score"] for i in range(grid_size)]
).T
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debug the tf-learn iris example, based on the tf-learn tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tempfile
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python import debug as tf_debug
# URLs to download data sets from, if necessary.
IRIS_TRAINING_DATA_URL = "https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/examples/tutorials/monitors/iris_training.csv"
IRIS_TEST_DATA_URL = "https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/examples/tutorials/monitors/iris_test.csv"
def maybe_download_data(data_dir):
"""Download data sets if necessary.
Args:
data_dir: Path to where data should be downloaded.
Returns:
Paths to the training and test data files.
"""
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
training_data_path = os.path.join(data_dir,
os.path.basename(IRIS_TRAINING_DATA_URL))
if not os.path.isfile(training_data_path):
train_file = open(training_data_path, "wt")
urllib.request.urlretrieve(IRIS_TRAINING_DATA_URL, train_file.name)
train_file.close()
print("Training data are downloaded to %s" % train_file.name)
test_data_path = os.path.join(data_dir, os.path.basename(IRIS_TEST_DATA_URL))
if not os.path.isfile(test_data_path):
test_file = open(test_data_path, "wt")
urllib.request.urlretrieve(IRIS_TEST_DATA_URL, test_file.name)
test_file.close()
print("Test data are downloaded to %s" % test_file.name)
return training_data_path, test_data_path
_IRIS_INPUT_DIM = 4
def iris_input_fn():
iris = base.load_iris()
features = tf.reshape(tf.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = tf.reshape(tf.constant(iris.target), [-1])
return features, labels
def main(_):
# Load datasets.
if FLAGS.fake_data:
def training_input_fn():
return ({"features": tf.random_normal([128, 4])},
tf.random_uniform([128], minval=0, maxval=3, dtype=tf.int32))
def test_input_fn():
return ({"features": tf.random_normal([32, 4])},
tf.random_uniform([32], minval=0, maxval=3, dtype=tf.int32))
feature_columns = [
tf.feature_column.numeric_column("features", shape=(4,))]
else:
training_data_path, test_data_path = maybe_download_data(FLAGS.data_dir)
column_names = [
"sepal_length", "sepal_width", "petal_length", "petal_width", "label"]
batch_size = 32
def training_input_fn():
return tf.contrib.data.make_csv_dataset(
[training_data_path], batch_size,
column_names=column_names, label_name="label")
def test_input_fn():
return tf.contrib.data.make_csv_dataset(
[test_data_path], batch_size,
column_names=column_names, label_name="label")
feature_columns = [tf.feature_column.numeric_column(feature)
for feature in column_names[:-1]]
# Build 3 layer DNN with 10, 20, 10 units respectively.
model_dir = FLAGS.model_dir or tempfile.mkdtemp(prefix="debug_tflearn_iris_")
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
model_dir=model_dir)
hooks = None
if FLAGS.debug and FLAGS.tensorboard_debug_address:
raise ValueError(
"The --debug and --tensorboard_debug_address flags are mutually "
"exclusive.")
if FLAGS.debug:
debug_hook = tf_debug.LocalCLIDebugHook(ui_type=FLAGS.ui_type,
dump_root=FLAGS.dump_root)
elif FLAGS.tensorboard_debug_address:
debug_hook = tf_debug.TensorBoardDebugHook(FLAGS.tensorboard_debug_address)
hooks = [debug_hook]
# Train model, using tfdbg hook.
classifier.train(training_input_fn,
steps=FLAGS.train_steps,
hooks=hooks)
# Evaluate accuracy, using tfdbg hook.
accuracy_score = classifier.evaluate(test_input_fn,
steps=FLAGS.eval_steps,
hooks=hooks)["accuracy"]
print("After training %d steps, Accuracy = %f" %
(FLAGS.train_steps, accuracy_score))
# Make predictions, using tfdbg hook.
predict_results = classifier.predict(test_input_fn, hooks=hooks)
print("A prediction result: %s" % next(predict_results))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/iris_data",
help="Directory to save the training and test data in.")
parser.add_argument(
"--model_dir",
type=str,
default="",
help="Directory to save the trained model in.")
parser.add_argument(
"--train_steps",
type=int,
default=10,
help="Number of steps to run training for.")
parser.add_argument(
"--eval_steps",
type=int,
default=1,
help="Number of steps to run evaluation foir.")
parser.add_argument(
"--ui_type",
type=str,
default="curses",
help="Command-line user interface type (curses | readline)")
parser.add_argument(
"--fake_data",
type="bool",
nargs="?",
const=True,
default=False,
help="Use fake MNIST data for unit testing")
parser.add_argument(
"--debug",
type="bool",
nargs="?",
const=True,
default=False,
help="Use debugger to track down bad values during training. "
"Mutually exclusive with the --tensorboard_debug_address flag.")
parser.add_argument(
"--dump_root",
type=str,
default="",
help="Optional custom root directory for temporary debug dump data")
parser.add_argument(
"--tensorboard_debug_address",
type=str,
default=None,
help="Connect to the TensorBoard Debugger Plugin backend specified by "
"the gRPC address (e.g., localhost:1234). Mutually exclusive with the "
"--debug flag.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
|
"""
Linux on Hyper-V and Azure Test Code, ver. 1.0.0
Copyright (c) Microsoft Corporation
All rights reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
See the Apache Version 2.0 License for specific language governing
permissions and limitations under the License.
"""
import os
import time
import paramiko
import socket
class SSHClient(object):
"""
This class creates a paramiko.SSHClient() object that represents
a session with an SSH server. You can use the SSHClient object to send
commands to the remote host and manipulate files on the remote host.
:param server: A server hostname or ip.
:param host_key_file: The path to the user's .ssh key files.
:param user: The username for the SSH connection. Default = 'root'.
:param timeout: The optional timeout variable for the TCP connection.
:param ssh_pwd: An optional password to use for authentication or for
unlocking the private key.
:param ssh_key_file: SSH key pem data
"""
def __init__(self, server, host_key_file='~/.ssh/known_hosts', user='root', timeout=None,
ssh_pwd=None, ssh_key_file=None):
self.server = server
self.host_key_file = host_key_file
self.user = user
self._timeout = timeout
self._pkey = paramiko.RSAKey.from_private_key_file(ssh_key_file, password=ssh_pwd)
self._ssh_client = paramiko.SSHClient()
self._ssh_client.load_system_host_keys()
self._ssh_client.load_host_keys(os.path.expanduser(host_key_file))
self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.connect()
def connect(self, num_retries=5):
"""
Connect to an SSH server and authenticate with it.
:type num_retries: int
:param num_retries: The maximum number of connection attempts.
"""
retry = 0
while retry < num_retries:
try:
self._ssh_client.connect(self.server,
username=self.user,
pkey=self._pkey,
timeout=self._timeout)
return
except socket.error as xxx_todo_changeme:
(value, message) = xxx_todo_changeme.args
if value in (51, 61, 111):
print('SSH Connection refused, will retry in 5 seconds')
time.sleep(5)
retry += 1
else:
raise
except paramiko.BadHostKeyException:
print("{} has an entry in ~/.ssh/known_hosts and it doesn't match".format(
self.server))
print('Edit that file to remove the entry and then hit return to try again')
raw_input('Hit Enter when ready')
retry += 1
except EOFError:
print('Unexpected Error from SSH Connection, retry in 5 seconds')
time.sleep(5)
retry += 1
print('Could not establish SSH connection')
def open_sftp(self):
"""
Open an SFTP session on the SSH server.
:rtype: :class:`paramiko.sftp_client.SFTPClient`
:return: An SFTP client object.
"""
return self._ssh_client.open_sftp()
def get_file(self, src, dst):
"""
Open an SFTP session on the remote host, and copy a file from
the remote host to the specified path on the local host.
:type src: string
:param src: The path to the target file on the remote host.
:type dst: string
:param dst: The path on your local host where you want to
store the file.
"""
sftp_client = self.open_sftp()
sftp_client.get(src, dst)
def put_file(self, src, dst):
"""
Open an SFTP session on the remote host, and copy a file from
the local host to the specified path on the remote host.
:type src: string
:param src: The path to the target file on your local host.
:type dst: string
:param dst: The path on the remote host where you want to store
the file.
"""
sftp_client = self.open_sftp()
sftp_client.put(src, dst)
def open(self, filename, mode='r', bufsize=-1):
"""
Open an SFTP session to the remote host, and open a file on
that host.
:type filename: string
:param filename: The path to the file on the remote host.
:type mode: string
:param mode: The file interaction mode.
:type bufsize: integer
:param bufsize: The file buffer size.
:rtype: :class:`paramiko.sftp_file.SFTPFile`
:return: A paramiko proxy object for a file on the remote server.
"""
sftp_client = self.open_sftp()
return sftp_client.open(filename, mode, bufsize)
def listdir(self, path):
"""
List all of the files and subdirectories at the specified path
on the remote host.
:type path: string
:param path: The base path from which to obtain the list.
:rtype: list
:return: A list of files and subdirectories at the specified path.
"""
sftp_client = self.open_sftp()
return sftp_client.listdir(path)
def isdir(self, path):
"""
Check the specified path on the remote host to determine if
it is a directory.
:type path: string
:param path: The path to the directory that you want to check.
:rtype: integer
:return: If the path is a directory, the function returns 1.
If the path is a file or an invalid path, the function
returns 0.
"""
status = self.run('[ -d %s ] || echo "FALSE"' % path)
if status[1].startswith('FALSE'):
return 0
return 1
def exists(self, path):
"""
Check the remote host for the specified path, or a file
at the specified path. This function returns 1 if the
path or the file exist on the remote host, and returns 0 if
the path or the file does not exist on the remote host.
:type path: string
:param path: The path to the directory or file that you want to check.
:rtype: integer
:return: If the path or the file exist, the function returns 1.
If the path or the file do not exist on the remote host,
the function returns 0.
"""
status = self.run('[ -a %s ] || echo "FALSE"' % path)
if status[1].startswith('FALSE'):
return 0
return 1
def run(self, command):
"""
Run a command on the remote host.
:type command: string
:param command: The command that you want to send to the remote host.
:rtype: tuple
:return: This function returns a tuple that contains an integer status,
the stdout from the command, and the stderr from the command.
"""
status = 0
t = []
try:
t = self._ssh_client.exec_command(command)
except paramiko.SSHException:
status = 1
std_out = t[1].read()
std_err = t[2].read()
t[0].close()
t[1].close()
t[2].close()
return status, std_out, std_err
def run_pty(self, command):
"""
Request a pseudo-terminal from a server, and execute a command on that
server.
:type command: string
:param command: The command that you want to run on the remote host.
:rtype: :class:`paramiko.channel.Channel`
:return: An open channel object.
"""
channel = self._ssh_client.get_transport().open_session()
channel.get_pty()
channel.exec_command(command)
return channel
def close(self):
"""
Close an SSH session and any open channels that are tied to it.
"""
transport = self._ssh_client.get_transport()
transport.close()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import argparse
import logging
import multiprocessing
import os
import sys
import tempfile
from copy import deepcopy
from functools import cmp_to_key
from functools import partial
import sh
from six.moves import configparser
from dlrn.build import build_worker
from dlrn.config import ConfigOptions
from dlrn.config import getConfigOptions
from dlrn.config import setup_logging
from dlrn.db import CIVote
from dlrn.db import closeSession
from dlrn.db import Commit
from dlrn.db import getCommits
from dlrn.db import getLastBuiltCommit
from dlrn.db import getLastProcessedCommit
from dlrn.db import getSession
from dlrn.db import Project
from dlrn.notifications import sendnotifymail
from dlrn.notifications import submit_review
from dlrn.reporting import genreports
from dlrn.repositories import getsourcebranch
from dlrn.rpmspecfile import RpmSpecCollection
from dlrn.rpmspecfile import RpmSpecFile
from dlrn.rsync import sync_repo
from dlrn.rsync import sync_symlinks
from dlrn.utils import aggregate_repo_files
from dlrn.utils import dumpshas2file
from dlrn.utils import import_object
from dlrn.utils import isknownerror
from dlrn.utils import lock_file
from dlrn.utils import saveYAML_commit
from dlrn.utils import timesretried
from dlrn import version
logger = logging.getLogger("dlrn")
def deprecation():
# We will still call main, but will indicate that this way of calling
# the application will be deprecated.
print("Using the 'delorean' command has been deprecated. Please use 'dlrn'"
" instead.")
main()
def _add_commits(project_toprocess, toprocess, options, session):
# The first entry in the list of commits is a commit we have
# already processed, we want to process it again only if in dev
# mode or distro hash has changed, we can't simply check
# against the last commit in the db, as multiple commits can
# have the same commit date
for commit_toprocess in project_toprocess:
# We are adding an extra check here to cover a rare corner case:
# if we have two commits A and B with the exact same dt_commit, in a
# first pass we will build either just the last one (if we switched
# tags), or both. If we built the last one (A), we do not want to
# build the other (B), because B would be a previous commit.
# The only way to prevent this is to check that we have not built a
# commit with the same dt_commit and same distro and extended hashes.
# This could only be an issue if, for some reason, we want to discard
# commit A and build commit B in the future, but we can work around
# this by adding a change to the distgit.
if options.dev is True or \
options.run or \
(not session.query(Commit).filter(
Commit.commit_hash == commit_toprocess.commit_hash,
Commit.distro_hash == commit_toprocess.distro_hash,
Commit.extended_hash == commit_toprocess.extended_hash,
Commit.type == commit_toprocess.type,
Commit.status != "RETRY").all()
and not session.query(Commit).filter(
Commit.dt_commit == commit_toprocess.dt_commit,
Commit.distro_hash == commit_toprocess.distro_hash,
Commit.extended_hash == commit_toprocess.extended_hash,
Commit.type == commit_toprocess.type,
Commit.status != "RETRY").all()):
toprocess.append(commit_toprocess)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config-file',
default='projects.ini',
help="Config file. Default: projects.ini")
parser.add_argument('--config-override', action='append',
help="Override a configuration option from the"
" config file. Specify it as: "
"section.option=value. Can be used multiple "
"times if more than one override is needed.")
parser.add_argument('--info-repo',
help="use a local distroinfo repo instead of"
" fetching the default one. Only applies when"
" pkginfo_driver is rdoinfo or downstream in"
" projects.ini")
parser.add_argument('--build-env', action='append',
help="Variables for the build environment.")
parser.add_argument('--local', action="store_true",
help="Use local git repos if possible. Only commited"
" changes in the local repo will be used in the"
" build.")
parser.add_argument('--head-only', action="store_true",
help="Build from the most recent Git commit only.")
group = parser.add_mutually_exclusive_group()
group.add_argument('--project-name', action='append',
help="Build a specific project name only."
" Use multiple times to build more than one "
"project in a run.")
group.add_argument('--package-name', action='append',
help="Build a specific package name only."
" Use multiple times to build more than one "
"package in a run.")
parser.add_argument('--dev', action="store_true",
help="Don't reset packaging git repo, force build "
"and add public master repo for dependencies "
"(dev mode).")
parser.add_argument('--log-commands', action="store_true",
help="Log the commands run by dlrn.")
parser.add_argument('--use-public', action="store_true",
help="Use the public master repo for dependencies "
"when doing install verification.")
parser.add_argument('--order', action="store_true",
help="Compute the build order according to the spec "
"files instead of the dates of the commits. "
"Implies --sequential.")
parser.add_argument('--sequential', action="store_true",
help="Run all actions sequentially, regardless of the"
" number of workers specified in projects.ini.")
parser.add_argument('--status', action="store_true",
help="Get the status of packages.")
parser.add_argument('--recheck', action="store_true",
help="Force a rebuild for a particular package. "
"Implies --package-name")
parser.add_argument('--force-recheck', action="store_true",
help="Force a rebuild for a particular package, even "
"if its last build was successful. Requires setting "
"allow_force_rechecks=True in projects.ini. "
"Implies --package-name and --recheck")
parser.add_argument('--version',
action='version',
version=version.version_info.version_string())
parser.add_argument('--run',
help="Run a program instead of trying to build. "
"Implies --head-only")
parser.add_argument('--stop', action="store_true",
help="Stop on error.")
parser.add_argument('--verbose-build', action="store_true",
help="Show verbose output during the package build.")
parser.add_argument('--verbose-mock', action="store_true",
help=argparse.SUPPRESS)
parser.add_argument('--no-repo', action="store_true",
help="Do not generate a repo with all the built "
"packages.")
parser.add_argument('--debug', action='store_true',
help="Print debug logs")
options = parser.parse_args(sys.argv[1:])
setup_logging(options.debug)
if options.verbose_mock:
logger.warning('The --verbose-mock command-line option is deprecated.'
' Please use --verbose-build instead.')
options.verbose_build = options.verbose_mock
global verbose_build
verbose_build = options.verbose_build
cp = configparser.RawConfigParser()
cp.read(options.config_file)
if options.log_commands is True:
logging.getLogger("sh.command").setLevel(logging.INFO)
if options.order is True:
options.sequential = True
config_options = ConfigOptions(cp, overrides=options.config_override)
if options.dev:
_, tmpdb_path = tempfile.mkstemp()
logger.info("Using file %s for temporary db" % tmpdb_path)
config_options.database_connection = "sqlite:///%s" % tmpdb_path
session = getSession(config_options.database_connection)
pkginfo_driver = config_options.pkginfo_driver
global pkginfo
pkginfo = import_object(pkginfo_driver, cfg_options=config_options)
packages = pkginfo.getpackages(local_info_repo=options.info_repo,
tags=config_options.tags,
dev_mode=options.dev)
if options.project_name:
pkg_names = [p['name'] for p in packages
if p['project'] in options.project_name]
elif options.package_name:
pkg_names = options.package_name
else:
pkg_names = None
if options.status is True:
if not pkg_names:
pkg_names = [p['name'] for p in packages]
for name in pkg_names:
package = [p for p in packages if p['name'] == name][0]
for build_type in package.get('types', ['rpm']):
commit = getLastProcessedCommit(
session, name, 'invalid status',
type=build_type)
if commit:
print("{:>9}".format(build_type), name, commit.status)
else:
print("{:>9}".format(build_type), name, 'NO_BUILD')
sys.exit(0)
if pkg_names:
pkg_name = pkg_names[0]
else:
pkg_name = None
def recheck_commit(commit, force):
if commit.status == 'SUCCESS':
if not force:
logger.error(
"Trying to recheck an already successful commit,"
" ignoring. If you want to force it, use --force-recheck"
" and set allow_force_rechecks=True in projects.ini")
sys.exit(1)
else:
logger.info("Forcefully rechecking a successfully built "
"commit for %s" % commit.project_name)
elif commit.status == 'RETRY':
# In this case, we are going to retry anyway, so
# do nothing and exit
logger.warning("Trying to recheck a commit in RETRY state,"
" ignoring.")
sys.exit(0)
# We could set the status to RETRY here, but if we have gone
# beyond max_retries it wouldn't work as expected. Thus, our
# only chance is to remove the commit
session.delete(commit)
session.commit()
sys.exit(0)
if options.recheck is True:
if not pkg_name:
logger.error('Please use --package-name or --project-name '
'with --recheck.')
sys.exit(1)
if options.force_recheck and config_options.allow_force_rechecks:
force_recheck = True
else:
force_recheck = False
package = [p for p in packages if p['name'] == pkg_name][0]
for build_type in package.get('types', ['rpm']):
commit = getLastProcessedCommit(session, pkg_name, type=build_type)
if commit:
recheck_commit(commit, force_recheck)
else:
logger.error("There are no existing commits for package %s",
pkg_name)
sys.exit(1)
# when we run a program instead of building we don't care about
# the commits, we just want to run once per package
if options.run:
options.head_only = True
# Build a list of commits we need to process
toprocess = []
skipped_list = []
if not pkg_name and not pkg_names:
pool = multiprocessing.Pool() # This will use all the system cpus
# Use functools.partial to iterate on the packages to process,
# while keeping a few options fixed
getinfo_wrapper = partial(getinfo, local=options.local,
dev_mode=options.dev,
head_only=options.head_only,
db_connection=config_options.
database_connection)
iterator = pool.imap(getinfo_wrapper, packages)
while True:
try:
project_toprocess, updated_pkg, skipped = iterator.next()
for package in packages:
if package['name'] == updated_pkg['name']:
if package['upstream'] == 'Unknown':
package['upstream'] = updated_pkg['upstream']
logger.debug(
"Updated upstream for package %s to %s",
package['name'], package['upstream'])
break
if skipped:
skipped_list.append(updated_pkg['name'])
_add_commits(project_toprocess, toprocess, options, session)
except StopIteration:
break
pool.close()
pool.join()
else:
for package in packages:
if package['name'] in pkg_names:
project_toprocess, _, skipped = getinfo(
package, local=options.local,
dev_mode=options.dev,
head_only=options.head_only,
db_connection=config_options.
database_connection)
if skipped:
skipped_list.append(package['name'])
_add_commits(project_toprocess, toprocess, options, session)
closeSession(session) # Close session, will reopen during post_build
# Store skip list
datadir = os.path.realpath(config_options.datadir)
if not os.path.exists(os.path.join(datadir, 'repos')):
os.makedirs(os.path.join(datadir, 'repos'))
with open(os.path.join(datadir, 'repos', 'skiplist.txt'), 'w') as fp:
for pkg in skipped_list:
fp.write(pkg + '\n')
# Check if there is any commit at all to process
if len(toprocess) == 0:
if not pkg_name:
# Use a shorter message if this was a full run
logger.info("No commits to build.")
else:
logger.info("No commits to build. If this is not expected, please"
" make sure the package name(s) are correct, and that "
"any failed commit you want to rebuild has been "
"removed from the database.")
return 0
# if requested do a sort according to build and install
# dependencies
if options.order is True:
# collect info from all spec files
logger.info("Reading rpm spec files")
projects = sorted([c.project_name for c in toprocess])
speclist = []
bootstraplist = []
for project_name in projects:
# Preprocess spec if needed
pkginfo.preprocess(package_name=project_name)
filename = None
for f in os.listdir(pkginfo.distgit_dir(project_name)):
if f.endswith('.spec'):
filename = f
if filename:
specpath = os.path.join(pkginfo.distgit_dir(project_name),
filename)
speclist.append(sh.rpmspec('-D', 'repo_bootstrap 1',
'-P', specpath))
# Check if repo_bootstrap is defined in the package.
# If so, we'll need to rebuild after the whole bootstrap
rawspec = open(specpath).read(-1)
if 'repo_bootstrap' in rawspec:
bootstraplist.append(project_name)
else:
logger.warning("Could not find a spec for package %s" %
project_name)
logger.debug("Packages to rebuild: %s" % bootstraplist)
specs = RpmSpecCollection([RpmSpecFile(spec)
for spec in speclist])
# compute order according to BuildRequires
logger.info("Computing build order")
orders = specs.compute_order()
# hack because the package name is not consistent with the directory
# name and the spec file name
if 'python-networking_arista' in orders:
orders.insert(orders.index('python-networking_arista'),
'python-networking-arista')
# sort the commits according to the score of their project and
# then use the timestamp of the commits as a secondary key
def my_cmp(a, b):
if a.project_name == b.project_name:
_a = a.dt_commit
_b = b.dt_commit
else:
_a = orders.index(a.project_name) if a.project_name in \
orders else sys.maxsize
_b = orders.index(b.project_name) if b.project_name in \
orders else sys.maxsize
# cmp is no longer available in python3 so replace it. See Ordering
# Comparisons on:
# https://docs.python.org/3.0/whatsnew/3.0.html
return (_a > _b) - (_a < _b)
toprocess.sort(key=cmp_to_key(my_cmp))
else:
# sort according to the timestamp of the commits
toprocess.sort()
exit_code = 0
if options.sequential is True:
toprocess_copy = deepcopy(toprocess)
for commit in toprocess:
status = build_worker(packages, commit, run_cmd=options.run,
build_env=options.build_env,
dev_mode=options.dev,
use_public=options.use_public,
order=options.order, sequential=True)
exception = status[3]
consistent = False
datadir = os.path.realpath(config_options.datadir)
with lock_file(os.path.join(datadir, 'remote.lck')):
session = getSession(config_options.database_connection)
if exception is not None:
logger.error("Received exception %s" % exception)
failures = 1
else:
if not options.run:
failures = post_build(status, packages, session,
build_repo=not options.no_repo)
consistent = (failures == 0)
exit_value = process_build_result(status, packages, session,
toprocess_copy,
dev_mode=options.dev,
run_cmd=options.run,
stop=options.stop,
build_env=options.build_env,
head_only=options.head_only,
consistent=consistent,
failures=failures)
closeSession(session)
if exit_value != 0:
exit_code = exit_value
if options.stop and exit_code != 0:
return exit_code
else:
# Setup multiprocessing pool
pool = multiprocessing.Pool(config_options.workers)
# Use functools.partial to iterate on the commits to process,
# while keeping a few options fixed
build_worker_wrapper = partial(build_worker, packages,
run_cmd=options.run,
build_env=options.build_env,
dev_mode=options.dev,
use_public=options.use_public,
order=options.order, sequential=False)
iterator = pool.imap(build_worker_wrapper, toprocess)
while True:
try:
status = iterator.next()
exception = status[3]
consistent = False
datadir = os.path.realpath(config_options.datadir)
with lock_file(os.path.join(datadir, 'remote.lck')):
session = getSession(config_options.database_connection)
if exception is not None:
logger.info("Received exception %s" % exception)
failures = 1
else:
# Create repo, build versions.csv file.
# This needs to be sequential
if not options.run:
failures = post_build(
status, packages, session,
build_repo=not options.no_repo)
consistent = (failures == 0)
exit_value = process_build_result(
status, packages,
session, toprocess,
dev_mode=options.dev,
run_cmd=options.run,
stop=options.stop,
build_env=options.build_env,
head_only=options.head_only,
consistent=consistent,
failures=failures)
closeSession(session)
if exit_value != 0:
exit_code = exit_value
if options.stop and exit_code != 0:
return exit_code
except StopIteration:
break
pool.close()
pool.join()
# If we were bootstrapping, set the packages that required it to RETRY
session = getSession(config_options.database_connection)
if options.order is True and not pkg_name:
for bpackage in bootstraplist:
commit = getLastProcessedCommit(session, bpackage)
commit.status = 'RETRY'
session.add(commit)
session.commit()
genreports(packages, options.head_only, session, [])
closeSession(session)
if options.dev:
os.remove(tmpdb_path)
return exit_code
def process_build_result(status, *args, **kwargs):
if status[0].type == "rpm":
return process_build_result_rpm(status, *args, **kwargs)
elif status[0].type == "container":
return process_build_result_container(status, *args, **kwargs)
else:
raise Exception("Unknown type %s" % status[0].type)
def process_build_result_container(
status, packages, session, packages_to_process,
dev_mode=False, run_cmd=False, stop=False,
build_env=None, head_only=False, consistent=False,
failures=0):
raise NotImplementedError()
def process_build_result_rpm(
status, packages, session, packages_to_process,
dev_mode=False, run_cmd=False, stop=False,
build_env=None, head_only=False, consistent=False,
failures=0):
config_options = getConfigOptions()
commit = status[0]
built_rpms = status[1]
notes = status[2]
exception = status[3]
commit_hash = commit.commit_hash
project = commit.project_name
project_info = session.query(Project).filter(
Project.project_name == project).first()
if not project_info:
project_info = Project(project_name=project, last_email=0)
exit_code = 0
if run_cmd:
if exception is not None:
exit_code = 1
if stop:
return exit_code
return exit_code
if exception is None:
commit.status = "SUCCESS"
commit.notes = notes
commit.artifacts = ",".join(built_rpms)
else:
logger.error("Received exception %s" % exception)
datadir = os.path.realpath(config_options.datadir)
yumrepodir = os.path.join(datadir, "repos",
commit.getshardedcommitdir())
logfile = os.path.join(yumrepodir,
"rpmbuild.log")
# If the log file hasn't been created we add what we have
# This happens if the rpm build script didn't run.
if not os.path.exists(yumrepodir):
os.makedirs(yumrepodir)
if not os.path.exists(logfile):
with open(logfile, "w") as fp:
fp.write(str(exception))
if (isknownerror(logfile) and
(timesretried(project, session, commit_hash, commit.distro_hash) <
config_options.maxretries)):
logger.exception("Known error building packages for %s,"
" will retry later" % project)
commit.status = "RETRY"
commit.notes = str(exception)
# do not switch from an error exit code to a retry
# exit code
if exit_code != 1:
exit_code = 2
else:
exit_code = 1
if not project_info.suppress_email():
sendnotifymail(packages, commit)
project_info.sent_email()
session.add(project_info)
# allow to submit a gerrit review only if the last build
# was successful or non existent to avoid creating a gerrit
# review for the same problem multiple times.
if config_options.gerrit is not None:
if build_env:
env_vars = list(build_env)
else:
env_vars = []
last_build = getLastProcessedCommit(session, project)
if not last_build or last_build.status == 'SUCCESS':
try:
submit_review(commit, packages, env_vars)
except Exception:
logger.error('Unable to create review '
'see review.log')
else:
logger.info('Last build not successful '
'for %s' % project)
commit.status = "FAILED"
commit.notes = str(exception)
if stop:
return exit_code
# Add commit to the session
session.add(commit)
genreports(packages, head_only, session, packages_to_process)
# Export YAML file containing commit metadata
export_commit_yaml(commit)
try:
sync_repo(commit)
except Exception as e:
logger.error('Repo sync failed for project %s' % project)
consistent = False # If we were consistent before, we are not anymore
if exit_code == 0: # The commit was ok, so marking as failed
exit_code = 1
# We need to make the commit status be "failed"
commit.status = "FAILED"
commit.notes = str(e)
session.add(commit)
# And open a review if needed
if config_options.gerrit is not None:
if build_env:
env_vars = list(build_env)
else:
env_vars = []
try:
submit_review(commit, packages, env_vars)
except Exception:
logger.error('Unable to create review '
'see review.log')
session.commit()
# Generate the current and consistent symlinks
if exception is None:
dirnames = ['current']
datadir = os.path.realpath(config_options.datadir)
yumrepodir = os.path.join(datadir, "repos",
commit.getshardedcommitdir())
yumrepodir_abs = os.path.join(datadir, yumrepodir)
if consistent:
dirnames.append('consistent')
else:
if config_options.use_components:
logger.info('%d packages not built correctly for component'
' %s: not updating the consistent symlink' %
(failures, commit.component))
else:
logger.info('%d packages not built correctly: not updating'
' the consistent symlink' % failures)
for dirname in dirnames:
if config_options.use_components:
target_repo_dir = os.path.join(datadir, "repos/component",
commit.component, dirname)
source_repo_dir = os.path.join(datadir, "repos/component",
commit.component)
else:
target_repo_dir = os.path.join(datadir, "repos", dirname)
source_repo_dir = os.path.join(datadir, "repos")
os.symlink(os.path.relpath(yumrepodir_abs, source_repo_dir),
target_repo_dir + "_")
os.rename(target_repo_dir + "_", target_repo_dir)
# If using components, synchronize the upper-level repo files
if config_options.use_components:
for dirname in dirnames:
aggregate_repo_files(dirname, datadir, session,
config_options.reponame, hashed_dir=True)
# And synchronize them
sync_symlinks(commit)
if dev_mode is False:
if consistent:
# We have a consistent repo. Let's create a CIVote entry in the DB
vote = CIVote(commit_id=commit.id, ci_name='consistent',
ci_url='', ci_vote=True, ci_in_progress=False,
timestamp=int(commit.dt_build), notes='',
component=commit.component)
session.add(vote)
session.commit()
return exit_code
def export_commit_yaml(commit):
config_options = getConfigOptions()
# Export YAML file containing commit metadata
datadir = os.path.realpath(config_options.datadir)
yumrepodir = os.path.join(datadir, "repos",
commit.getshardedcommitdir())
saveYAML_commit(commit, os.path.join(yumrepodir, 'commit.yaml'))
def post_build(status, *args, **kwargs):
if status[0].type == "rpm":
return post_build_rpm(status, *args, **kwargs)
elif status[0].type == "container":
return post_build_container(status, *args, **kwargs)
else:
raise Exception("Unknown type %s" % status[0].type)
def post_build_container(status, packages, session, build_repo=None):
raise NotImplementedError()
def post_build_rpm(status, packages, session, build_repo=True):
config_options = getConfigOptions()
commit = status[0]
built_rpms = status[1]
project_name = commit.project_name
commit_hash = commit.commit_hash
datadir = os.path.realpath(config_options.datadir)
yumrepodir = os.path.join("repos", commit.getshardedcommitdir())
yumrepodir_abs = os.path.join(datadir, yumrepodir)
shafile = open(os.path.join(yumrepodir_abs, "versions.csv"), "w")
shafile.write("Project,Source Repo,Source Sha,Dist Repo,Dist Sha,"
"Status,Last Success Timestamp,Component,Extended Sha,"
"Pkg NVR\n")
failures = 0
for otherproject in packages:
if (config_options.use_components and 'component' in otherproject and
otherproject['component'] != commit.component):
# Only dump information and create symlinks for the same component
continue
otherprojectname = otherproject["name"]
if otherprojectname == project_name:
# Output sha's this project
dumpshas2file(shafile, commit, otherproject["upstream"],
otherproject["master-distgit"], "SUCCESS",
commit.dt_build, commit.component, built_rpms)
continue
# Output sha's of all other projects represented in this repo
last_success = getCommits(session, project=otherprojectname,
with_status="SUCCESS",
type=commit.type).first()
last_processed = getCommits(session, project=otherprojectname,
type=commit.type).first()
if last_success:
if build_repo:
for rpm in last_success.artifacts.split(","):
rpm_link_src = os.path.join(yumrepodir_abs,
os.path.split(rpm)[1])
os.symlink(os.path.relpath(os.path.join(datadir, rpm),
yumrepodir_abs), rpm_link_src)
last = last_success
else:
last = last_processed
if last:
if last.artifacts:
rpmlist = last.artifacts.split(",")
else:
rpmlist = []
upstream = otherproject.get('upstream', '')
dumpshas2file(shafile, last, upstream,
otherproject["master-distgit"],
last_processed.status, last.dt_build,
commit.component, rpmlist)
if last_processed.status != 'SUCCESS':
failures += 1
else:
failures += 1
shafile.close()
if build_repo:
# Use createrepo_c when available
try:
from sh import createrepo_c
sh.createrepo = createrepo_c
except ImportError:
pass
if config_options.include_srpm_in_repo:
sh.createrepo(yumrepodir_abs)
else:
sh.createrepo('-x', '*.src.rpm', yumrepodir_abs)
with open(os.path.join(
yumrepodir_abs, "%s.repo" % config_options.reponame),
"w") as fp:
if config_options.use_components:
repo_id = "%s-component-%s" % (config_options.reponame,
commit.component)
else:
repo_id = config_options.reponame
fp.write("[%s]\nname=%s-%s-%s\nbaseurl=%s/%s\nenabled=1\n"
"gpgcheck=0\npriority=1\n" % (
repo_id,
config_options.reponame,
project_name, commit_hash,
config_options.baseurl,
commit.getshardedcommitdir()))
return failures
def getinfo(package, local=False, dev_mode=False, head_only=False,
db_connection=None, type="rpm"):
project = package["name"]
since = "-1"
session = getSession(db_connection)
commit = getLastProcessedCommit(session, project, type=type)
if commit:
# If we have switched source branches, we want to behave
# as if no previous commits had been built, and only build
# the last one
if commit.commit_branch == getsourcebranch(package):
# This will return all commits since the last handled commit
# including the last handled commit, remove it later if needed.
since = "--after=%d" % (commit.dt_commit)
else:
# The last processed commit belongs to a different branch. Just
# in case, let's check if we built a previous commit from the
# current branch
commit = getLastBuiltCommit(session, project,
getsourcebranch(package), type=type)
if commit:
logger.info("Last commit belongs to another branch, but"
" we're ok with that")
since = "--after=%d" % (commit.dt_commit)
# In any case, we just want to build the last commit, if any
head_only = True
project_toprocess, skipped = pkginfo.getinfo(
project=project, package=package,
since=since, local=local,
dev_mode=dev_mode, type=type)
closeSession(session)
# If since == -1, then we only want to trigger a build for the
# most recent change
if since == "-1" or head_only:
del project_toprocess[:-1]
return project_toprocess, package, skipped
|
|
#!/usr/bin/env python
"""
instaRaider.py
This function contains code that is originally Copyright (c) {{{2014}}} {{{Amir Kurtovic}}}
"""
from bs4 import BeautifulSoup
import selenium.webdriver as webdriver
import re
from time import sleep
import urllib
import urllib2
import os
import sys
import argparse
import pdb
import requests
class instaRaider(object):
def getImageCount(self, url):
'''
Given a url to Instagram profile, return number of photos posted
'''
response = urllib2.urlopen(url)
countsCode = re.search(r'counts\":{\"media\":\d+', response.read())
count = re.findall(r'\d+', countsCode.group())
return int(count[0])
def URLexists(self,url):
r = requests.head(url)
return r.status_code == requests.codes.ok
def loadInstagram(self, profileUrl):
'''
Using Selenium WebDriver, load Instagram page to get page source
'''
count = self.getImageCount(self.profileUrl)
print self.userName + " has " + str(count) + " photos on Instagram."
print "Loading Selenium WebDriver..."
# Load webdriver and scale window down
driver = webdriver.Firefox()
driver.set_window_size(200,200)
driver.set_window_position(100,100)
print "Loading Instagram profile..."
# load Instagram profile and wait for PAUSE
driver.get(self.profileUrl)
driver.implicitly_wait(self.PAUSE)
# Check if the profile is private. If so, exit
try:
driver.find_element_by_css_selector('.MediaComp')
except:
sys.exit("User profile is private. Aborting.")
clicks = (int(count)-60)/20+1
for x in range(3):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
sys.stdout.write('.')
sys.stdout.flush()
sleep(self.PAUSE)
#pdb.set_trace()
# Load full Instagram profile if more than initial 60 photos desired
if (args.count < 61):
pass
else:
# Click on "Load more..." label
##pdb.set_trace()
## seems not working
##element = driver.find_element_by_xpath(self.loadLabelXPATH)
## new
element = driver.find_element_by_class_name('PhotoGridMoreButton')
for y in range(clicks):
#print(y)
element.click()
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
sys.stdout.write('.')
sys.stdout.flush()
sleep(self.PAUSE)
# After load all profile photos, return source to getPhotos()
source = BeautifulSoup(driver.page_source)
# close Firefox window
driver.close()
return source
def validUser(self, userName):
'''
returns True if Instagram username is valid
'''
# check if Instagram username is valid
req = urllib2.Request(self.profileUrl)
try:
urllib2.urlopen(req)
except:
return False
# if req doesn't fail, user profile exists
return True
def photoExists(self, url):
'''
Returns true if photo exists
Used when checking which suffix Instagram used for full-res photo
url: URL to Instagram photo
'''
try:
urllib2.urlopen(url)
except:
return False
return True
def getPhotos(self, source, userName, count):
'''
Given source code for loaded Instagram page,
extract all hrefs and download full-resolution photos
source: HTML source code of Instagram profile papge
'''
# directory where photos will be saved
directory = './Images/' + userName + '/'
# check if directory exists, if not, make it
if not os.path.exists(directory):
os.makedirs(directory)
# logfile to store urls is csv format
logfile = './Images/' + userName + '/' + userName + '.csv'
try:
file = open(logfile, "a")
except IOError:
print "\nLog file does not exist."
# photo number for file names
photoNumber = 0
# indexes for progress bar
photosSaved = 0
progressBar = 0
print "\nRaiding Instagram..."
print "Saving photos to " + directory
print "------"
# print progress bar
print "Photos saved so far:"
print "---------10--------20--------30--------40--------50"
##pdb.set_trace()
for x in source.findAll('div', {'class':'Image'}):
if (photoNumber >= count):
break
else:
# increment photonumber for next image
photoNumber += 1
#extract url from each photo
rawUrl = x['src']
# rawUrl is the thumbnail url
# for photos after 2014:
# I need to remove '/s306x306/e15' from it
# https://scontent.cdninstagram.com/hphotos-xaf1/t51.2885-15/s306x306/e15/11055946_406244246223563_666799740_n.jpg
# for photos before 2014?
# I need to change trailing '6.jpg' to '7.jpg'
# https://scontent.cdninstagram.com/hphotos-xpa1/outbound-distilleryimage9/t0.0-17/OBPTH/7502a400c17b11e1a39b1231381b7ba1_6.jpg
# for some, I need to change trailing '6.jpg' to '8.jpg'
# https://scontent.cdninstagram.com/hphotos-xfa1/outbound-distilleryimage1/t0.0-17/OBPTH/60e6518ab40211e3b686124d53b510cd_6.jpg
rawUrl = rawUrl.encode('utf-8')
if '/s306x306/e15' in rawUrl:
photoUrl = rawUrl.replace('/s306x306/e15','')
print(userName + " " + str(count) + " " + str(photoNumber) + ": scheme 1")
elif 'outbound-distilleryimage' in rawUrl:
photoUrl = rawUrl.replace('6.jpg','7.jpg')
if(not self.URLexists(photoUrl)):
photoUrl = rawUrl.replace('6.jpg','8.jpg')
print(userName + " " + str(count) + " " + str(photoNumber) + ": scheme 2")
print(rawUrl)
print(photoUrl)
else:
print(userName + " " + str(count) + " " + str(photoNumber) + ": probably a thumbnail image: ")
print(rawUrl)
photoUrl = rawUrl
photoName = directory + userName + "_" + str(photoNumber) + '.jpg'
# save full-resolution photo
urllib.urlretrieve(photoUrl, photoName)
# save filename and url to CSV file
file.write(photoUrl + "," + photoName + "\n")
# print hash to progress bar
if (photosSaved == 50):
photosSaved = 1
progressBar += 50
sys.stdout.write('\n')
sys.stdout.write('#')
sys.stdout.flush()
else:
# increment progress bar
photosSaved += 1
sys.stdout.write('#')
sys.stdout.flush()
sleep(self.PAUSE)
print "\n------"
print "Saved " + str(photoNumber) + " images to " + directory
# close logfile
file.close()
print "Saved activity in logfile: " + logfile
def __init__(self, userName):
self.userName = userName
self.profileUrl = 'http://instagram.com/' + userName + '/'
self.PAUSE = 1
self.loadLabelXPATH = "/html/body/div/div/div/section/div/span/a/span[2]/span/span"
if __name__ == '__main__':
# parse arguments
parser = argparse.ArgumentParser(description="InstaRaider")
parser.add_argument('-u', '--user', help="Instagram username", required=True)
parser.add_argument('-c', '--count', help="# of photos to download", type=int)
args = parser.parse_args()
if (args.user):
userName = args.user
raider = instaRaider(userName)
url = raider.profileUrl
if(raider.validUser(userName)):
if not args.count:
count = raider.getImageCount(url)
args.count = count
else:
count = args.count
if raider.getImageCount(url) < count:
print "You want to dowload %r photos." % args.count
print "The user only has %r photo." % raider.getImageCount(url)
print "Downloading all photos."
count = raider.getImageCount(url)
args.count = count
# Get source code from fully loaded Instagram profile page
source = raider.loadInstagram(url)
# Download all photos identified on profile page
raider.getPhotos(source, userName, count)
else:
print "Username " + userName + " is not valid."
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the MSIE zone settings plugin."""
import logging
from plaso.lib import event
from plaso.parsers.winreg_plugins import interface
__author__ = 'Elizabeth Schweinsberg (beth@bethlogic.net)'
class MsieZoneSettingsPlugin(interface.KeyPlugin):
"""Windows Registry plugin for parsing the MSIE Zones settings."""
NAME = 'winreg_msie_zone'
REG_TYPE = 'NTUSER'
REG_KEYS = [
(u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
u'\\Zones'),
(u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
u'\\Lockdown_Zones')]
URLS = ['http://support.microsoft.com/kb/182569']
ZONE_NAMES = {
'0': '0 (My Computer)',
'1': '1 (Local Intranet Zone)',
'2': '2 (Trusted sites Zone)',
'3': '3 (Internet Zone)',
'4': '4 (Restricted Sites Zone)',
'5': '5 (Custom)'
}
KNOWN_PERMISSIONS_VALUE_NAMES = [
'1001', '1004', '1200', '1201', '1400', '1402', '1405', '1406', '1407',
'1601', '1604', '1606', '1607', '1608', '1609', '1800', '1802', '1803',
'1804', '1809', '1A04', '2000', '2001', '2004', '2100', '2101', '2102',
'2200', '2201', '2300' ]
CONTROL_VALUES_PERMISSIONS = {
0x00000000: '0 (Allow)',
0x00000001: '1 (Prompt User)',
0x00000003: '3 (Not Allowed)',
0x00010000: '0x00010000 (Administrator approved)'
}
CONTROL_VALUES_SAFETY = {
0x00010000: '0x00010000 (High safety)',
0x00020000: '0x00020000 (Medium safety)',
0x00030000: '0x00030000 (Low safety)'
}
CONTROL_VALUES_1A00 = {
0x00000000: ('0x00000000 (Automatic logon with current user name and '
'password)'),
0x00010000: '0x00010000 (Prompt for user name and password)',
0x00020000: '0x00020000 (Automatic logon only in Intranet zone)',
0x00030000: '0x00030000 (Anonymous logon)'
}
CONTROL_VALUES_1C00 = {
0x00000000: '0x00000000 (Disable Java)',
0x00010000: '0x00010000 (High safety)',
0x00020000: '0x00020000 (Medium safety)',
0x00030000: '0x00030000 (Low safety)',
0x00800000: '0x00800000 (Custom)'
}
FEATURE_CONTROLS = {
'1200': 'Run ActiveX controls and plug-ins',
'1400': 'Active scripting',
'1001': 'Download signed ActiveX controls',
'1004': 'Download unsigned ActiveX controls',
'1201': 'Initialize and script ActiveX controls not marked as safe',
'1206': 'Allow scripting of IE Web browser control',
'1207': 'Reserved',
'1208': 'Allow previously unused ActiveX controls to run without prompt',
'1209': 'Allow Scriptlets',
'120A': 'Override Per-Site (domain-based) ActiveX restrictions',
'120B': 'Override Per-Site (domain-based) ActiveX restrictions',
'1402': 'Scripting of Java applets',
'1405': 'Script ActiveX controls marked as safe for scripting',
'1406': 'Access data sources across domains',
'1407': 'Allow Programmatic clipboard access',
'1408': 'Reserved',
'1601': 'Submit non-encrypted form data',
'1604': 'Font download',
'1605': 'Run Java',
'1606': 'Userdata persistence',
'1607': 'Navigate sub-frames across different domains',
'1608': 'Allow META REFRESH',
'1609': 'Display mixed content',
'160A': 'Include local directory path when uploading files to a server',
'1800': 'Installation of desktop items',
'1802': 'Drag and drop or copy and paste files',
'1803': 'File Download',
'1804': 'Launching programs and files in an IFRAME',
'1805': 'Launching programs and files in webview',
'1806': 'Launching applications and unsafe files',
'1807': 'Reserved',
'1808': 'Reserved',
'1809': 'Use Pop-up Blocker',
'180A': 'Reserved',
'180B': 'Reserved',
'180C': 'Reserved',
'180D': 'Reserved',
'1A00': 'User Authentication: Logon',
'1A02': 'Allow persistent cookies that are stored on your computer',
'1A03': 'Allow per-session cookies (not stored)',
'1A04': 'Don\'t prompt for client cert selection when no certs exists',
'1A05': 'Allow 3rd party persistent cookies',
'1A06': 'Allow 3rd party session cookies',
'1A10': 'Privacy Settings',
'1C00': 'Java permissions',
'1E05': 'Software channel permissions',
'1F00': 'Reserved',
'2000': 'Binary and script behaviors',
'2001': '.NET: Run components signed with Authenticode',
'2004': '.NET: Run components not signed with Authenticode',
'2100': 'Open files based on content, not file extension',
'2101': 'Web sites in less privileged zone can navigate into this zone',
'2102': ('Allow script initiated windows without size/position '
'constraints'),
'2103': 'Allow status bar updates via script',
'2104': 'Allow websites to open windows without address or status bars',
'2105': 'Allow websites to prompt for information using scripted windows',
'2200': 'Automatic prompting for file downloads',
'2201': 'Automatic prompting for ActiveX controls',
'2300': 'Allow web pages to use restricted protocols for active content',
'2301': 'Use Phishing Filter',
'2400': '.NET: XAML browser applications',
'2401': '.NET: XPS documents',
'2402': '.NET: Loose XAML',
'2500': 'Turn on Protected Mode',
'2600': 'Enable .NET Framework setup',
'{AEBA21FA-782A-4A90-978D-B72164C80120}': 'First Party Cookie',
'{A8A88C49-5EB2-4990-A1A2-0876022C854F}': 'Third Party Cookie'
}
def GetEntries(self, key, **unused_kwargs):
"""Retrieves information of the Internet Settings Zones values.
The MSIE Feature controls are stored in the Zone specific subkeys in:
Internet Settings\\Zones key
Internet Settings\\Lockdown_Zones key
Args:
key: A Windows Registry key (instance of WinRegKey).
Yields:
An event object of the an individual Internet Setting Registry key.
"""
text_dict = {}
if key.number_of_values == 0:
text_dict[u'Value'] = u'No values stored in key'
else:
for value in key.GetValues():
if not value.name:
value_name = '(default)'
else:
value_name = u'{0:s}'.format(value.name)
if value.DataIsString():
value_string = u'[{0:s}] {1:s}'.format(
value.data_type_string, value.data)
elif value.DataIsInteger():
value_string = u'[{0:s}] {1:d}'.format(
value.data_type_string, value.data)
elif value.DataIsMultiString():
value_string = u'[{0:s}] {1:s}'.format(
value.data_type_string, u''.join(value.data))
else:
value_string = u'[{0:s}]'.format(value.data_type_string)
text_dict[value_name] = value_string
yield event.WinRegistryEvent(
key.path, text_dict, timestamp=key.last_written_timestamp,
offset=key.offset)
if key.number_of_subkeys == 0:
logging.info('No subkeys for Internet Settings/Zones')
text_dict = {}
text_dict['Zone Subkeys'] = u'REGALERT No Zones set for Internet Settings'
yield event.WinRegistryEvent(
key.path, text_dict, timestamp=key.last_written_timestamp,
offset=key.offset)
else:
for zone_key in key.GetSubkeys():
# TODO: these values are stored in the Description value of the
# zone key. This solution will break on zone values that are larger
# than 5.
path = u'{0:s}\\{1:s}'.format(
key.path, self.ZONE_NAMES[zone_key.name])
text_dict = {}
# TODO: this plugin currently just dumps the values and does not
# distinguish between what is a feature control or not.
for value in zone_key.GetValues():
# Ignore the default value.
if not value.name:
continue
if value.DataIsString():
value_string = value.data
elif value.DataIsInteger():
if value.name in self.KNOWN_PERMISSIONS_VALUE_NAMES:
value_string = self.CONTROL_VALUES_PERMISSIONS[value.data]
elif value.name == '1A00':
value_string = self.CONTROL_VALUES_1A00[value.data]
elif value.name == '1C00':
value_string = self.CONTROL_VALUES_1C00[value.data]
elif value.name == '1E05':
value_string = self.CONTROL_VALUES_SAFETY[value.data]
else:
value_string = u'{0:d}'.format(value.data)
else:
value_string = u'[{0:s}]'.format(value.data_type_string)
if len(value.name) == 4 and value.name != 'Icon':
value_description = self.FEATURE_CONTROLS.get(value.name, 'UNKNOWN')
else:
value_description = self.FEATURE_CONTROLS.get(value.name, '')
if value_description:
feature_control = u'[{0:s}] {1:s}'.format(
value.name, value_description)
else:
feature_control = u'[{0:s}]'.format(value.name)
text_dict[feature_control] = value_string
yield event.WinRegistryEvent(
path, text_dict, timestamp=zone_key.last_written_timestamp)
class MsieZoneSettingsSoftwareZonesPlugin(MsieZoneSettingsPlugin):
"""Parses the Zones key in the Software hive."""
NAME = 'winreg_msie_zone_software'
REG_TYPE = 'SOFTWARE'
REG_KEYS = [
u'\\Microsoft\\Windows\\CurrentVersion\\Internet Settings\\Zones',
(u'\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
u'\\Lockdown_Zones'),
(u'\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
u'\\Zones'),
(u'\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
u'\\Lockdown_Zones')]
|
|
import json
import pytest
from oic.utils.http_util import Response, NoContent, Unauthorized
from oic.utils.authn.authn_context import AuthnBroker
from oic.utils.authn.client import verify_client
from oic.utils.authn.client import BearerHeader
from oic.utils.authn.client import ClientSecretPost
from oic.utils.authn.client import ClientSecretBasic
from oic.utils.authn.user import UserAuthnMethod
from oic.utils.authz import Implicit
from oic.utils import sdb
from oic.oauth2.dynreg import Provider
from oic.oauth2.dynreg import RegistrationRequest
from oic.oauth2.dynreg import ClientInfoResponse
from oic.oauth2.dynreg import ClientRegistrationError
from utils_for_tests import _eq
class DummyAuthn(UserAuthnMethod):
def __init__(self, srv, user):
UserAuthnMethod.__init__(self, srv)
self.user = user
def authenticated_as(self, cookie=None, **kwargs):
return {"uid": self.user}
class TestProvider(object):
SERVER_INFO = {
"version": "3.0",
"issuer": "https://connect-op.heroku.com",
"authorization_endpoint": "http://localhost:8088/authorization",
"token_endpoint": "http://localhost:8088/token",
"flows_supported": ["code", "token", "code token"],
}
CDB = {
"a1b2c3": {
"password": "hemligt",
"client_secret": "drickyoughurt"
},
"client1": {
"client_secret": "hemlighet",
"redirect_uris": [("http://localhost:8087/authz", None)]
}
}
@pytest.fixture(autouse=True)
def create_provider(self):
authn_broker = AuthnBroker()
authn_broker.add("UNDEFINED", DummyAuthn(None, "username"))
self.provider = Provider("pyoicserv",
sdb.SessionDB(
TestProvider.SERVER_INFO["issuer"]),
TestProvider.CDB,
authn_broker, Implicit(),
verify_client,
client_info_url="https://example.com/as",
client_authn_methods={
"client_secret_post": ClientSecretPost,
"client_secret_basic": ClientSecretBasic,
"bearer_header": BearerHeader})
def test_registration_endpoint(self):
request = RegistrationRequest(client_name="myself",
redirect_uris=["https://example.com/rp"])
resp = self.provider.registration_endpoint(request.to_json(), {})
assert isinstance(resp, Response)
data = json.loads(resp.message)
assert data["client_name"] == "myself"
assert _eq(data["redirect_uris"], ["https://example.com/rp"])
_resp = ClientInfoResponse().from_json(resp.message)
assert "client_id" in _resp
def test_registration_uri_error(self):
args = {
"redirect_uris": ["https://client.example.org/callback",
"https://client.example.org/callback2"],
"client_name": "My Example Client",
"client_name#ja-Jpan-JP":
"\u30AF\u30E9\u30A4\u30A2\u30F3\u30C8\u540D",
"token_endpoint_auth_method": "client_secret_basic",
"scope": "read write dolphin",
# invalid logo_uri
"logo_uri": "https://client.example.org/logo.png",
"jwks_uri": "https://client.example.org/my_public_keys.jwks"
}
request = RegistrationRequest(**args)
resp = self.provider.registration_endpoint(request.to_json(), {})
_resp = ClientRegistrationError().from_json(resp.message)
assert "error" in _resp
assert _resp["error"] == "invalid_client_metadata"
def test_client_registration_utf_8_client_name(self):
args = {
"redirect_uris": ["https://client.example.org/callback",
"https://client.example.org/callback2"],
"client_name": "My Example Client",
"client_name#ja-Jpan-JP":
"\u30AF\u30E9\u30A4\u30A2\u30F3\u30C8\u540D",
"token_endpoint_auth_method": "client_secret_basic",
"scope": "read write dolphin",
}
request = RegistrationRequest(**args)
resp = self.provider.registration_endpoint(request.to_json(), {})
_resp = ClientInfoResponse().from_json(resp.message)
assert _resp[
"client_name#ja-Jpan-JP"] == "\u30AF\u30E9\u30A4\u30A2\u30F3\u30C8\u540D"
assert _resp["client_name"] == "My Example Client"
def test_client_user_info_get(self):
args = {
"redirect_uris": ["https://client.example.org/callback",
"https://client.example.org/callback2"],
"client_name": "My Example Client",
"client_name#ja-Jpan-JP":
"\u30AF\u30E9\u30A4\u30A2\u30F3\u30C8\u540D",
"token_endpoint_auth_method": "client_secret_basic",
"scope": "read write dolphin",
}
request = RegistrationRequest(**args)
resp = self.provider.registration_endpoint(request.to_json(),
environ={})
_resp = ClientInfoResponse().from_json(resp.message)
resp = self.provider.client_info_endpoint(
"",
environ={"HTTP_AUTHORIZATION": "Bearer %s" % (
_resp["registration_access_token"],)},
query="client_id=%s" % _resp["client_id"])
_resp_cir = ClientInfoResponse().from_json(resp.message)
assert _resp == _resp_cir
def test_client_registration_update(self):
args = {
"redirect_uris": ["https://client.example.org/callback",
"https://client.example.org/callback2"],
"client_name": "My Example Client",
"client_name#ja-Jpan-JP":
"\u30AF\u30E9\u30A4\u30A2\u30F3\u30C8\u540D",
"token_endpoint_auth_method": "client_secret_basic",
"scope": "read write dolphin",
}
request = RegistrationRequest(**args)
resp = self.provider.registration_endpoint(request.to_json(),
environ={})
_resp = ClientInfoResponse().from_json(resp.message)
update = {
"client_id": _resp["client_id"],
"client_secret": _resp["client_secret"],
"redirect_uris": ["https://client.example.org/callback",
"https://client.example.org/alt"],
"scope": "read write dolphin",
"grant_types": ["authorization_code", "refresh_token"],
"token_endpoint_auth_method": "client_secret_basic",
"jwks_uri": "https://client.example.org/my_public_keys.jwks",
"client_name": "My New Example",
"client_name#fr": "Mon Nouvel Exemple",
}
update_req = RegistrationRequest(**update)
resp = self.provider.client_info_endpoint(
update_req.to_json(),
environ={"HTTP_AUTHORIZATION": "Bearer %s" % (
_resp["registration_access_token"],)},
method="PUT",
query="client_id=%s" % _resp["client_id"])
_resp_up = ClientInfoResponse().from_json(resp.message)
assert _resp_up["client_id"] == update["client_id"]
assert _resp_up["client_secret"] == update["client_secret"]
assert _resp_up["redirect_uris"] == update["redirect_uris"]
assert _resp_up["scope"] == update["scope"].split()
assert _resp_up["grant_types"] == update["grant_types"]
assert _resp_up["token_endpoint_auth_method"] == update[
"token_endpoint_auth_method"]
assert _resp_up["jwks_uri"] == update["jwks_uri"]
assert _resp_up["client_name"] == update["client_name"]
assert _resp_up["client_name#fr"] == update["client_name#fr"]
#
def test_client_registration_delete(self):
args = {
"redirect_uris": ["https://client.example.org/callback",
"https://client.example.org/callback2"],
"client_name": "My Example Client",
"client_name#ja-Jpan-JP":
"\u30AF\u30E9\u30A4\u30A2\u30F3\u30C8\u540D",
"token_endpoint_auth_method": "client_secret_basic",
"scope": "read write dolphin",
}
request = RegistrationRequest(**args)
resp = self.provider.registration_endpoint(request.to_json(),
environ={})
_resp = ClientInfoResponse().from_json(resp.message)
resp = self.provider.client_info_endpoint(
"",
environ={"HTTP_AUTHORIZATION": "Bearer %s" % (
_resp["registration_access_token"],)},
method="DELETE",
query="client_id=%s" % _resp["client_id"])
assert isinstance(resp, NoContent)
# A read should fail
resp = self.provider.client_info_endpoint(
"",
environ={"HTTP_AUTHORIZATION": "Bearer %s" % (
_resp["registration_access_token"],)},
query="client_id=%s" % _resp["client_id"])
assert isinstance(resp, Unauthorized)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# Meta-info
Author: Nelson Brochado
Created: 01/06/2015
Updated: 02/04/2018
# Description
## What's a hash map (or hash table)?
A hash map is a data structure which is used to implement the so-called
associative array, which is an abstract data type composed of a collection of
(key, value) pairs, such that each possible key appears at most once in the
collection.
## Hash function
To map keys to values, a hash function is used when implementing a hash map. A
hash function is any function that can be used to map data of arbitrary size to
data of fixed size.
A perfect hash function is a function that assigns each key a unique bucket in
the the data structure, but most hash table designs employ an imperfect hash
function, which might cause hash collisions, where the hash function generates
the same index (i.e. the same position or bucket) for more than one key. Such
collisions must be resolved or accommodated in some way.
## Resolving collisions
There are different ways to resolve collisions, where the most famous techniques
are "separate chaining" and "open addressing".
# TODO
- Add complexity analysis to operations
- No difference between non-existence of a key in the table and existence of a
key with None as associated value: maybe we want to differentiate the two cases?
- Resizing the hash table only whenever we reach a full table may not be the
best option in terms of performance...
- Should a client of this class be able to specify its custom hash function?
- Size could be implemented as a counter.
- Improve is_hash_table function
# References
- http://interactivepython.org/runestone/static/pythonds/SortSearch/Hashing.html
- https://stackoverflow.com/q/279539/3924118
- https://stackoverflow.com/q/9835762/3924118
- https://stackoverflow.com/q/1541797/3924118
- https://en.wikipedia.org/wiki/Associative_array
- https://en.wikipedia.org/wiki/Hash_table
- https://en.wikipedia.org/wiki/Hash_function
- https://en.wikipedia.org/wiki/Linear_probing
- https://en.wikipedia.org/wiki/Open_addressing
"""
from collections import Hashable
from tabulate import tabulate
from ands.ds.HashTable import HashTable
__all__ = ["LinearProbingHashTable", "has_duplicates_ignore_nones",
"is_hash_table"]
class LinearProbingHashTable(HashTable):
"""Resizable hash table which uses linear probing, which is a specific
"open addressing" technique, to resolve collisions.
The process of resizing consists in doubling the current capacity of the
hash table each time.
The hash function uses both the Python's built-in hash function and the %
operator.
You can access and put an item in the hash table by using the same
convenient notation that is used by the Python's standard dict class:
h = LinearProbingHashTable()
h[12] = 3
print(h[12])"""
def __init__(self, capacity: int = 11):
if not isinstance(capacity, int):
raise TypeError("capacity must be an instance of int")
if capacity < 1:
raise ValueError("capacity must be greater or equal to 1")
self._n = capacity # self._n holds the size of the buffers.
self._keys = [None] * self._n
self._values = [None] * self._n
@property
def size(self) -> int:
"""Returns the number of pairs key-value in this map."""
assert is_hash_table(self)
return sum(k is not None for k in self._keys)
@property
def capacity(self) -> int:
"""Returns the number of allocated cells in memory."""
assert is_hash_table(self)
return len(self._keys)
@staticmethod
def _hash_code(key, size: int) -> int:
"""Returns a hash code (an int) between 0 and size (excluded).
size must be the size of the buffer based on which this function should
return a hash value."""
return hash(key) % size
@staticmethod
def _rehash(old_hash: int, size: int) -> int:
"""Returns a new hash value based on the previous one called old_hash.
size must be the size of the buffer based on which we want to have a new
hash value from the old hash value."""
return (old_hash + 1) % size
def put(self, key: object, value: object) -> None:
"""Inserts the pair (key: value) in this map.
If key is None, a TypeError is raised, because keys cannot be None."""
assert is_hash_table(self)
if key is None:
raise TypeError("key cannot be None.")
if not isinstance(key, Hashable):
raise TypeError("key must be an instance of a hashable type")
self._put(key, value, self._n)
assert is_hash_table(self)
def _put(self, key: object, value: object, size: int) -> None:
"""Helper method of self.put."""
hash_value = LinearProbingHashTable._hash_code(key, size)
# No need to allocate new space.
if self._keys[hash_value] is None:
self._keys[hash_value] = key
self._values[hash_value] = value
# If self already contains_key key, then its value is overridden.
elif self._keys[hash_value] == key:
self._values[hash_value] = value
# Collision: there's already a (key: value) pair at the slot dedicated
# to this (key: value) pair, according to the self._hash_code function.
# We need to _rehash, i.e. find another slot for this (key: value) pair.
else:
next_slot = LinearProbingHashTable._rehash(hash_value, size)
rehashed = False
while (self._keys[next_slot] is not None and
self._keys[next_slot] != key):
next_slot = LinearProbingHashTable._rehash(next_slot, size)
# Allocate new buffer of length len(self.keys) * 2 + 1.
if next_slot == hash_value:
rehashed = True
keys = self._keys
values = self._values
new_size = len(self._keys) * 2 + 1
self._keys = [None] * new_size
self._values = [None] * new_size
# Rehashing and putting all elements in the new bigger
# buffer.
#
# Note: the calls to self._put in the following loop will
# never reach these statements, because there will be slots
# available, and because the way hashing and rehashing is
# currently implemented.
for k in keys:
v = LinearProbingHashTable._get(k, keys, values,
self._n)
self._put(k, v, new_size)
# After resizing the buffers, we insert the original
# (key: value) pair.
self._put(key, value, new_size)
self._n = new_size
# We exited the loop either because we have found a free slot or a
# slot containing our key, and not after having re-sized the table.
if not rehashed:
if self._keys[next_slot] is None:
self._keys[next_slot] = key
self._values[next_slot] = value
else:
assert self._keys[next_slot] == key
self._values[next_slot] = value
def get(self, key: object) -> object:
"""Returns the value associated with key.
If key is None, a TypeError is raised, because keys cannot be None."""
assert is_hash_table(self)
if key is None:
raise TypeError("key cannot be None.")
if not isinstance(key, Hashable):
raise TypeError("key must be an instance of a hashable type")
value = LinearProbingHashTable._get(key, self._keys, self._values,
self._n)
assert is_hash_table(self)
return value
@staticmethod
def _get(key: object, keys: list, values: list, size: int) -> object:
"""Helper method of self.get."""
hash_value = LinearProbingHashTable._hash_code(key, size)
data = None
stop = False
found = False
position = hash_value
while keys[position] is not None and not found and not stop:
if keys[position] == key:
found = True
data = values[position]
else:
# Find a new possible position by rehashing.
position = LinearProbingHashTable._rehash(position, size)
# We are at the initial slot, and thus nothing was found.
if position == hash_value:
stop = True
return data
def delete(self, key: object) -> object:
"""Deletes the mapping between key and its associated value.
If there's no mapping, nothing is done."""
assert is_hash_table(self)
if key is None:
raise TypeError("key cannot be None.")
if not isinstance(key, Hashable):
raise TypeError("key must be an instance of a hashable type")
try:
i = self._keys.index(key)
v = self._values[i]
self._keys[i] = self._values[i] = None
return v
except ValueError:
pass
finally:
assert is_hash_table(self)
def show(self) -> None:
"""Prints this hash table in table-like format."""
c = 0
data = []
for i in range(len(self._keys)):
if self._keys[i] is not None:
c += 1
data.append([c, self._keys[i], self._values[i]])
print(tabulate(data, headers=["#", "Keys", "Values"], tablefmt="grid"))
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.put(key, value)
def __str__(self):
return str([(k, v) for k, v in zip(self._keys, self._values)
if k is not None])
def __repr__(self):
return self.__str__()
def has_duplicates_ignore_nones(ls: list) -> bool:
"""Returns true if ls does contain duplicate elements, false otherwise.
None items in ls are not considered."""
ls = [item for item in ls if item is not None]
return len(ls) != len(set(ls))
def is_hash_table(t: HashTable) -> bool:
"""Returns true if t is a valid HashTable, false otherwise."""
if not isinstance(t, HashTable):
return False
if len(t._keys) != len(t._values) or len(t._keys) != t._n:
return False
return not has_duplicates_ignore_nones(t._keys)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver for Linux servers running LVM.
"""
import math
import os
import socket
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import units
import six
from cinder.brick.local_dev import lvm as lvm
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder import utils
from cinder.volume import driver
from cinder.volume import utils as volutils
LOG = logging.getLogger(__name__)
# FIXME(jdg): We'll put the lvm_ prefix back on these when we
# move over to using this as the real LVM driver, for now we'll
# rename them so that the config generation utility doesn't barf
# on duplicate entries.
volume_opts = [
cfg.StrOpt('volume_group',
default='cinder-volumes',
help='Name for the VG that will contain exported volumes'),
cfg.IntOpt('lvm_mirrors',
default=0,
help='If >0, create LVs with multiple mirrors. Note that '
'this requires lvm_mirrors + 2 PVs with available space'),
cfg.StrOpt('lvm_type',
default='default',
choices=['default', 'thin', 'auto'],
help='Type of LVM volumes to deploy; (default, thin, or auto). '
'Auto defaults to thin if thin is supported.'),
cfg.StrOpt('lvm_conf_file',
default='/etc/cinder/lvm.conf',
help='LVM conf file to use for the LVM driver in Cinder; '
'this setting is ignored if the specified file does '
'not exist (You can also specify \'None\' to not use '
'a conf file even if one exists).')
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class LVMVolumeDriver(driver.VolumeDriver):
"""Executes commands relating to Volumes."""
VERSION = '3.0.0'
def __init__(self, vg_obj=None, *args, **kwargs):
# Parent sets db, host, _execute and base config
super(LVMVolumeDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
self.hostname = socket.gethostname()
self.vg = vg_obj
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'LVM'
# Target Driver is what handles data-transport
# Transport specific code should NOT be in
# the driver (control path), this way
# different target drivers can be added (iscsi, FC etc)
target_driver = \
self.target_mapping[self.configuration.safe_get('iscsi_helper')]
LOG.debug('Attempting to initialize LVM driver with the '
'following target_driver: %s',
target_driver)
self.target_driver = importutils.import_object(
target_driver,
configuration=self.configuration,
db=self.db,
executor=self._execute)
self.protocol = self.target_driver.protocol
self.sparse_copy_volume = False
def _sizestr(self, size_in_g):
return '%sg' % size_in_g
def _volume_not_present(self, volume_name):
return self.vg.get_volume(volume_name) is None
def _delete_volume(self, volume, is_snapshot=False):
"""Deletes a logical volume."""
if self.configuration.volume_clear != 'none' and \
self.configuration.lvm_type != 'thin':
self._clear_volume(volume, is_snapshot)
name = volume['name']
if is_snapshot:
name = self._escape_snapshot(volume['name'])
self.vg.delete(name)
def _clear_volume(self, volume, is_snapshot=False):
# zero out old volumes to prevent data leaking between users
# TODO(ja): reclaiming space should be done lazy and low priority
if is_snapshot:
# if the volume to be cleared is a snapshot of another volume
# we need to clear out the volume using the -cow instead of the
# directly volume path. We need to skip this if we are using
# thin provisioned LVs.
# bug# lp1191812
dev_path = self.local_path(volume) + "-cow"
else:
dev_path = self.local_path(volume)
# TODO(jdg): Maybe we could optimize this for snaps by looking at
# the cow table and only overwriting what's necessary?
# for now we're still skipping on snaps due to hang issue
if not os.path.exists(dev_path):
msg = (_('Volume device file path %s does not exist.')
% dev_path)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
size_in_g = volume.get('volume_size') or volume.get('size')
if size_in_g is None:
msg = (_("Size for volume: %s not found, cannot secure delete.")
% volume['id'])
LOG.error(msg)
raise exception.InvalidParameterValue(msg)
# clear_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
vol_sz_in_meg = size_in_g * units.Ki
volutils.clear_volume(
vol_sz_in_meg, dev_path,
volume_clear=self.configuration.volume_clear,
volume_clear_size=self.configuration.volume_clear_size)
def _escape_snapshot(self, snapshot_name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not snapshot_name.startswith('snapshot'):
return snapshot_name
return '_' + snapshot_name
def _create_volume(self, name, size, lvm_type, mirror_count, vg=None):
vg_ref = self.vg
if vg is not None:
vg_ref = vg
vg_ref.create_volume(name, size, lvm_type, mirror_count)
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats")
if self.vg is None:
LOG.warning(_LW('Unable to update stats on non-initialized '
'Volume Group: %s'),
self.configuration.volume_group)
return
self.vg.update_volume_group_info()
data = {}
# Note(zhiteng): These information are driver/backend specific,
# each driver may define these values in its own config options
# or fetch from driver specific configuration file.
data["volume_backend_name"] = self.backend_name
data["vendor_name"] = 'Open Source'
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.protocol
data["pools"] = []
total_capacity = 0
free_capacity = 0
if self.configuration.lvm_mirrors > 0:
total_capacity =\
self.vg.vg_mirror_size(self.configuration.lvm_mirrors)
free_capacity =\
self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors)
provisioned_capacity = round(
float(total_capacity) - float(free_capacity), 2)
elif self.configuration.lvm_type == 'thin':
total_capacity = self.vg.vg_thin_pool_size
free_capacity = self.vg.vg_thin_pool_free_space
provisioned_capacity = self.vg.vg_provisioned_capacity
else:
total_capacity = self.vg.vg_size
free_capacity = self.vg.vg_free_space
provisioned_capacity = round(
float(total_capacity) - float(free_capacity), 2)
location_info = \
('LVMVolumeDriver:%(hostname)s:%(vg)s'
':%(lvm_type)s:%(lvm_mirrors)s' %
{'hostname': self.hostname,
'vg': self.configuration.volume_group,
'lvm_type': self.configuration.lvm_type,
'lvm_mirrors': self.configuration.lvm_mirrors})
thin_enabled = self.configuration.lvm_type == 'thin'
# Calculate the total volumes used by the VG group.
# This includes volumes and snapshots.
total_volumes = len(self.vg.get_volumes())
# Skip enabled_pools setting, treat the whole backend as one pool
# XXX FIXME if multipool support is added to LVM driver.
single_pool = {}
single_pool.update(dict(
pool_name=data["volume_backend_name"],
total_capacity_gb=total_capacity,
free_capacity_gb=free_capacity,
reserved_percentage=self.configuration.reserved_percentage,
location_info=location_info,
QoS_support=False,
provisioned_capacity_gb=provisioned_capacity,
max_over_subscription_ratio=(
self.configuration.max_over_subscription_ratio),
thin_provisioning_support=thin_enabled,
thick_provisioning_support=not thin_enabled,
total_volumes=total_volumes,
filter_function=self.get_filter_function(),
goodness_function=self.get_goodness_function(),
multiattach=True
))
data["pools"].append(single_pool)
# Check availability of sparse volume copy.
data['sparse_copy_volume'] = self.configuration.lvm_type == 'thin'
self._stats = data
def check_for_setup_error(self):
"""Verify that requirements are in place to use LVM driver."""
if self.vg is None:
root_helper = utils.get_root_helper()
lvm_conf_file = self.configuration.lvm_conf_file
if lvm_conf_file.lower() == 'none':
lvm_conf_file = None
try:
self.vg = lvm.LVM(self.configuration.volume_group,
root_helper,
lvm_type=self.configuration.lvm_type,
executor=self._execute,
lvm_conf=lvm_conf_file)
except exception.VolumeGroupNotFound:
message = (_("Volume Group %s does not exist") %
self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=message)
vg_list = volutils.get_all_volume_groups(
self.configuration.volume_group)
vg_dict = \
next(vg for vg in vg_list if vg['name'] == self.vg.vg_name)
if vg_dict is None:
message = (_("Volume Group %s does not exist") %
self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=message)
pool_name = "%s-pool" % self.configuration.volume_group
if self.configuration.lvm_type == 'auto':
# Default to thin provisioning if it is supported and
# the volume group is empty, or contains a thin pool
# for us to use.
self.vg.update_volume_group_info()
self.configuration.lvm_type = 'default'
if volutils.supports_thin_provisioning():
if self.vg.get_volume(pool_name) is not None:
LOG.info(_LI('Enabling LVM thin provisioning by default '
'because a thin pool exists.'))
self.configuration.lvm_type = 'thin'
elif len(self.vg.get_volumes()) == 0:
LOG.info(_LI('Enabling LVM thin provisioning by default '
'because no LVs exist.'))
self.configuration.lvm_type = 'thin'
if self.configuration.lvm_type == 'thin':
# Specific checks for using Thin provisioned LV's
if not volutils.supports_thin_provisioning():
message = _("Thin provisioning not supported "
"on this version of LVM.")
raise exception.VolumeBackendAPIException(data=message)
if self.vg.get_volume(pool_name) is None:
try:
self.vg.create_thin_pool(pool_name)
except processutils.ProcessExecutionError as exc:
exception_message = (_("Failed to create thin pool, "
"error message was: %s")
% six.text_type(exc.stderr))
raise exception.VolumeBackendAPIException(
data=exception_message)
# Enable sparse copy since lvm_type is 'thin'
self.sparse_copy_volume = True
def create_volume(self, volume):
"""Creates a logical volume."""
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update from LVM for migrated volume.
This method should rename the back-end volume name(id) on the
destination host back to its original name(id) on the source host.
:param ctxt: The context used to run the method update_migrated_volume
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:return model_update to update DB with any needed changes
"""
name_id = None
provider_location = None
if original_volume_status == 'available':
current_name = CONF.volume_name_template % new_volume['id']
original_volume_name = CONF.volume_name_template % volume['id']
try:
self.vg.rename_volume(current_name, original_volume_name)
except processutils.ProcessExecutionError:
LOG.error(_LE('Unable to rename the logical volume '
'for volume: %s'), volume['name'])
# If the rename fails, _name_id should be set to the new
# volume id and provider_location should be set to the
# one from the new volume as well.
name_id = new_volume['_name_id'] or new_volume['id']
provider_location = new_volume['provider_location']
else:
# The back-end will not be renamed.
name_id = new_volume['_name_id'] or new_volume['id']
provider_location = new_volume['provider_location']
return {'_name_id': name_id, 'provider_location': provider_location}
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
self.configuration.lvm_mirrors)
# Some configurations of LVM do not automatically activate
# ThinLVM snapshot LVs.
self.vg.activate_lv(snapshot['name'], is_snapshot=True)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
volutils.copy_volume(self.local_path(snapshot),
self.local_path(volume),
snapshot['volume_size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self.sparse_copy_volume)
def delete_volume(self, volume):
"""Deletes a logical volume."""
# NOTE(jdg): We don't need to explicitly call
# remove export here because we already did it
# in the manager before we got here.
if self._volume_not_present(volume['name']):
# If the volume isn't present, then don't attempt to delete
return True
if self.vg.lv_has_snapshot(volume['name']):
LOG.error(_LE('Unable to delete due to existing snapshot '
'for volume: %s'), volume['name'])
raise exception.VolumeIsBusy(volume_name=volume['name'])
self._delete_volume(volume)
LOG.info(_LI('Successfully deleted volume: %s'), volume['id'])
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']),
snapshot['volume_name'],
self.configuration.lvm_type)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
# If the snapshot isn't present, then don't attempt to delete
LOG.warning(_LW("snapshot: %s not found, "
"skipping delete operations"), snapshot['name'])
LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id'])
return True
# TODO(yamahata): zeroing out the whole snapshot triggers COW.
# it's quite slow.
self._delete_volume(snapshot, is_snapshot=True)
def local_path(self, volume, vg=None):
if vg is None:
vg = self.configuration.volume_group
# NOTE(vish): stops deprecation warning
escaped_group = vg.replace('-', '--')
escaped_name = self._escape_snapshot(volume['name']).replace('-', '--')
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'])
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
if self.configuration.lvm_type == 'thin':
self.vg.create_lv_snapshot(volume['name'],
src_vref['name'],
self.configuration.lvm_type)
if volume['size'] > src_vref['size']:
LOG.debug("Resize the new volume to %s.", volume['size'])
self.extend_volume(volume, volume['size'])
self.vg.activate_lv(volume['name'], is_snapshot=True,
permanent=True)
return
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
volume_name = src_vref['name']
temp_id = 'tmp-snap-%s' % volume['id']
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'name': 'clone-snap-%s' % volume['id'],
'id': temp_id}
self.create_snapshot(temp_snapshot)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
try:
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
self.vg.activate_lv(temp_snapshot['name'], is_snapshot=True)
volutils.copy_volume(
self.local_path(temp_snapshot),
self.local_path(volume),
src_vref['size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self.sparse_copy_volume)
finally:
self.delete_snapshot(temp_snapshot)
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
return None, False
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup.volume_id)
temp_snapshot = None
previous_status = volume['previous_status']
if previous_status == 'in-use':
temp_snapshot = self._create_temp_snapshot(context, volume)
backup.temp_snapshot_id = temp_snapshot.id
backup.save()
volume_path = self.local_path(temp_snapshot)
else:
volume_path = self.local_path(volume)
try:
with utils.temporary_chown(volume_path):
with open(volume_path) as volume_file:
backup_service.backup(backup, volume_file)
finally:
if temp_snapshot:
self._delete_temp_snapshot(context, temp_snapshot)
backup.temp_snapshot_id = None
backup.save()
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
with open(volume_path, 'wb') as volume_file:
backup_service.restore(backup, volume['id'], volume_file)
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def extend_volume(self, volume, new_size):
"""Extend an existing volume's size."""
self.vg.extend_volume(volume['name'],
self._sizestr(new_size))
def manage_existing(self, volume, existing_ref):
"""Manages an existing LV.
Renames the LV to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated.
"""
lv_name = existing_ref['source-name']
self.vg.get_volume(lv_name)
if volutils.check_already_managed_volume(self.db, lv_name):
raise exception.ManageExistingAlreadyManaged(volume_ref=lv_name)
# Attempt to rename the LV to match the OpenStack internal name.
try:
self.vg.rename_volume(lv_name, volume['name'])
except processutils.ProcessExecutionError as exc:
exception_message = (_("Failed to rename logical volume %(name)s, "
"error message was: %(err_msg)s")
% {'name': lv_name,
'err_msg': exc.stderr})
raise exception.VolumeBackendAPIException(
data=exception_message)
def manage_existing_object_get_size(self, existing_object, existing_ref,
object_type):
"""Return size of an existing LV for manage existing volume/snapshot.
existing_ref is a dictionary of the form:
{'source-name': <name of LV>}
"""
# Check that the reference is valid
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
lv_name = existing_ref['source-name']
lv = self.vg.get_volume(lv_name)
# Raise an exception if we didn't find a suitable LV.
if not lv:
kwargs = {'existing_ref': lv_name,
'reason': 'Specified logical volume does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
# LV size is returned in gigabytes. Attempt to parse size as a float
# and round up to the next integer.
try:
lv_size = int(math.ceil(float(lv['size'])))
except ValueError:
exception_message = (_("Failed to manage existing %(type)s "
"%(name)s, because reported size %(size)s "
"was not a floating-point number.")
% {'type': object_type,
'name': lv_name,
'size': lv['size']})
raise exception.VolumeBackendAPIException(
data=exception_message)
return lv_size
def manage_existing_get_size(self, volume, existing_ref):
return self.manage_existing_object_get_size(volume, existing_ref,
"volume")
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
if not isinstance(existing_ref, dict):
existing_ref = {"source-name": existing_ref}
return self.manage_existing_object_get_size(snapshot, existing_ref,
"snapshot")
def manage_existing_snapshot(self, snapshot, existing_ref):
dest_name = self._escape_snapshot(snapshot['name'])
snapshot_temp = {"name": dest_name}
if not isinstance(existing_ref, dict):
existing_ref = {"source-name": existing_ref}
return self.manage_existing(snapshot_temp, existing_ref)
def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0):
"""Optimize the migration if the destination is on the same server.
If the specified host is another back-end on the same server, and
the volume is not attached, we can do the migration locally without
going through iSCSI.
"""
false_ret = (False, None)
if volume['status'] != 'available':
return false_ret
if 'location_info' not in host['capabilities']:
return false_ret
info = host['capabilities']['location_info']
try:
(dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\
info.split(':')
lvm_mirrors = int(lvm_mirrors)
except ValueError:
return false_ret
if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname):
return false_ret
if dest_vg != self.vg.vg_name:
vg_list = volutils.get_all_volume_groups()
try:
next(vg for vg in vg_list if vg['name'] == dest_vg)
except StopIteration:
LOG.error(_LE("Destination Volume Group %s does not exist"),
dest_vg)
return false_ret
helper = utils.get_root_helper()
lvm_conf_file = self.configuration.lvm_conf_file
if lvm_conf_file.lower() == 'none':
lvm_conf_file = None
dest_vg_ref = lvm.LVM(dest_vg, helper,
lvm_type=lvm_type,
executor=self._execute,
lvm_conf=lvm_conf_file)
self._create_volume(volume['name'],
self._sizestr(volume['size']),
lvm_type,
lvm_mirrors,
dest_vg_ref)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
size_in_mb = int(volume['size']) * units.Ki
volutils.copy_volume(self.local_path(volume),
self.local_path(volume, vg=dest_vg),
size_in_mb,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self.sparse_copy_volume)
self._delete_volume(volume)
return (True, None)
else:
message = (_("Refusing to migrate volume ID: %(id)s. Please "
"check your configuration because source and "
"destination are the same Volume Group: %(name)s.") %
{'id': volume['id'], 'name': self.vg.vg_name})
LOG.exception(message)
raise exception.VolumeBackendAPIException(data=message)
def get_pool(self, volume):
return self.backend_name
# ####### Interface methods for DataPath (Target Driver) ########
def ensure_export(self, context, volume):
volume_path = "/dev/%s/%s" % (self.configuration.volume_group,
volume['name'])
model_update = \
self.target_driver.ensure_export(context, volume, volume_path)
return model_update
def create_export(self, context, volume, connector, vg=None):
if vg is None:
vg = self.configuration.volume_group
volume_path = "/dev/%s/%s" % (vg, volume['name'])
export_info = self.target_driver.create_export(
context,
volume,
volume_path)
return {'provider_location': export_info['location'],
'provider_auth': export_info['auth'], }
def remove_export(self, context, volume):
self.target_driver.remove_export(context, volume)
def initialize_connection(self, volume, connector):
return self.target_driver.initialize_connection(volume, connector)
def validate_connector(self, connector):
return self.target_driver.validate_connector(connector)
def terminate_connection(self, volume, connector, **kwargs):
return self.target_driver.terminate_connection(volume, connector,
**kwargs)
class LVMISCSIDriver(LVMVolumeDriver):
"""Empty class designation for LVMISCSI.
Since we've decoupled the inheritance of iSCSI and LVM we
don't really need this class any longer. We do however want
to keep it (at least for now) for back compat in driver naming.
"""
def __init__(self, *args, **kwargs):
super(LVMISCSIDriver, self).__init__(*args, **kwargs)
LOG.warning(_LW('LVMISCSIDriver is deprecated, you should '
'now just use LVMVolumeDriver and specify '
'iscsi_helper for the target driver you '
'wish to use.'))
class LVMISERDriver(LVMVolumeDriver):
"""Empty class designation for LVMISER.
Since we've decoupled the inheritance of data path in LVM we
don't really need this class any longer. We do however want
to keep it (at least for now) for back compat in driver naming.
"""
def __init__(self, *args, **kwargs):
super(LVMISERDriver, self).__init__(*args, **kwargs)
LOG.warning(_LW('LVMISERDriver is deprecated, you should '
'now just use LVMVolumeDriver and specify '
'iscsi_helper for the target driver you '
'wish to use. In order to enable iser, please '
'set iscsi_protocol with the value iser.'))
LOG.debug('Attempting to initialize LVM driver with the '
'following target_driver: '
'cinder.volume.targets.iser.ISERTgtAdm')
self.target_driver = importutils.import_object(
'cinder.volume.targets.iser.ISERTgtAdm',
configuration=self.configuration,
db=self.db,
executor=self._execute)
|
|
import os, sys, imp
import logging.config
import datetime, time
from collections import OrderedDict
#Application Diretory
__app_dir__ = 'app'
class ApplicationBase:
'''Base Application'''
class Application(ApplicationBase):
def __init__(self, app_path):
self.initialized = False
self.app_path = app_path
conf_path, conf_filename = os.path.split(app_path + '/config/config.py')
conf_filename, conf_ext = os.path.splitext(conf_filename)
try:
conf_f, conf_filename, conf_desc = imp.find_module(conf_filename, [conf_path])
except ImportError as e:
IN.logger.debug()
raise RuntimeError('Unable to import IN config.')
self.config = imp.load_module(conf_filename, conf_f, conf_filename, conf_desc)
# set global timezone
os.environ['TZ'] = self.config.timezone_name
time.tzset()
#self.app_path = self.config.app_root + os.sep
sys.path.append(self.app_path + '/themes/')
sys.path.append(self.app_path + '/addons/')
#sys.path.append(self.app_path + '/vendor/')
# set the debug mode to true or false based on the configuration
IN.debug = self.config.debug_mode
self.scope = 'Global' #default Scope to Global # future use
self.access = 'Public' #default Access to Public # future use
self.version = IN.__version__ #default to IN Version
# set the Application Object to IN
IN.APP = self
self.static_cache = {}
# contains all the actions built for this application.
# TODO: memory !!!
self.actions = None
self.file_actions = None
# override IN logger to app specific
#logging.config.dictConfig(self.config.loggerging)
#IN.logger = In.core.IN.logger.Logs(config = self.config.loggerging)
# contains the list of all access keys
self.access_keys = {}
self.def_theme_engine = None
self.def_theme = None
self.addons = OrderedDict()
'''per app based addons. load them on app init.
'''
self.load_addons()
self.context_pool = In.core.context.ContextPool()
# start the event
#self.context_pool.switch()
try:
# assign database controller Object
IN.db = In.db.DatabaseController(IN.APP.config.db_settings)
except Exception as e :
IN.logger.debug()
raise In.db.DBEngineInitializationException(e)
try:
# connect to db server
IN.db.connect()
except Exception as e :
#print(sys.exc_info()[0])
IN.logger.debug()
raise In.db.DBConnectionFailedException(e)
IN.filer = In.filer.Filer()
IN.stringer = In.stringer.Stringer()
IN.cacher = In.core.cacher.CacherEngine()
# init default cacher
IN.cacher.default
IN.valuator = In.core.valuator.ValuatorEngine()
IN.themer = In.themer.INThemeEngine(self.config.default_theme_name)
IN.boxer = In.boxer.BoxEngine()
IN.former = In.former.FormerEngine()
IN.fielder = In.field.FielderEngine()
IN.entitier = In.entity.EntitierEngine()
IN.texter = In.texter.TexterEngine()
IN.mailer = In.mailer.Mailer()
# In.nabar is module
# IN.nabar is Object
# context.nabar is current nabar
IN.nabar = In.nabar.AccountAuth()
IN.commenter = In.comment.Commenter()
# process the registers after In, app, application init.
IN.register.process_registers()
IN.hook_invoke('In_app_init', self)
IN.hook_invoke('__In_app_init__', self)
self.initialized = True
def addon_enabled(self, name):
'''TODO:
'''
try:
return self.addons[name].enabled
except:
IN.logger.debug()
return False
def addon(self, name):
'''TODO:
'''
try:
return self.addons[name]
except:
IN.logger.debug()
return None
def load_addons(self):
for addon in self.config.addons:
try:
__import__(addon)
except:
IN.logger.debug()
#???: specific to app instance?? how?
sys.path.append( ''.join((self.app_path, __app_dir__, os.sep, self.config.app_name, os.sep, 'addons')) )
ret = [IN.load_extension(m) for m in self.addons if m.enabled]
def app_verify(self):
'''Runs various test against the Application and returns the result.
'''
self.load_addons()
return True
def load(self):
#self.__load_configs()
self.load_addons()
def ensure_environ ( self, environ ) :
pass
def decide_page_class(self, context):
'''Return Page class dynamically based on path/nabar/role/...'''
return context.current_theme.decide_page_class(context)
def decide_theme(self, context):
'''Return theme name dynamically based on path/nabar/role/...'''
return self.config.default_theme_name
def decide_page_boxes(self, context, page, format):
'''Return boxes dynamically based on path/nabar/role/...'''
boxes = []
path_hook_tokens = context.request.path_hook_tokens()
for hook in path_hook_tokens:
IN.hook_invoke('page_box_' + hook, boxes, context, page, format)
# hook by all path
IN.hook_invoke('page_box', boxes, context, page, format)
return boxes
def decide_page_assets(self, context, page, format):
'''add/modify css js'''
return
def reload_config(self):
'''Reload the config without restarting the server.
TODO:
'''
pass
def __page_not_found__(self, context):
atn = In.action.ActionObject()
atn.handler = In.action.__page_not_found__
return atn
def __invalid_request__(self, context):
atn = In.action.ActionObject()
atn.handler = In.action.__invalid_request__
return atn
def __index_page__(self, context):
atn = In.action.ActionObject()
atn.handler = In.action.__index_page__
return atn
def __internal_server_error__(self, context):
atn = In.action.ActionObject()
atn.handler = In.action.__internal_server_error__
return atn
class WSGIApplication(Application):
'''IN WSGIApplication class. '''
def __call__(self, environ, start_response):
#### Hello world test
#output = 'Hello World!'.encode('utf-8')
#response_headers = [('Content-type', 'text/plain'),
#('Content-Length', str(len(output)))]
#start_response('200 OK', response_headers)
#return [output] # Hello World test
# handle our own
#debug = 0
#if debug:
#import cProfile, pstats, io
#pr = cProfile.Profile()
#pr.enable()
In_output = self.__run_call__(environ, start_response)
#if debug:
#pr.disable()
#s = io.StringIO()
#sortby = 'cumulative'
#ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
#ps.print_stats()
#print(s.getvalue())
return In_output
def __run_call__(self, environ, start_response):
#sys.stdout = open(os.devnull, 'w')
# test
self.ensure_environ(environ)
try:
context = In.context.Context(self, environ, start_response)
#IN.hook_invoke('__request_handler_init__', environ, start_response)
except In.context.ContextInitFailedException as context_failed:
# could not init the Context
# do some manual init before proceeding?
#print(traceback.format_exc())
# TODO: return plain error response
IN.logger.debug()
return
# handle the request
#self.handle_request(context)
# add context to pool
self.context_pool.put(context)
#stime = datetime.datetime.now()
# start this greenlet
# TODO
In_output = context.switch()
#etime = datetime.datetime.now()
##IN.logger.debug('Context start time: {stime}', {'stime' : stime})
##IN.logger.debug('Context end time: {etime}', {'etime' : etime})
#ms = etime - stime
#IN.logger.debug('Context time {diff}', {'diff' : ms})
# delete and free this context
try:
self.context_pool.free(context)
except Exception:
IN.logger.debug()
return In_output
#def handle_request(self, context):
#'''Applications main request handler
#'''
#'''Check whether is any request handler is handled this.
#'''
##res = IN.hook_invoke('app_request_handler')
##if res and any(res):
##''' Request is handled by some other object'''
##return True
## run all actions
#context.run_actions()
#return True # Job Done! Return
def load(self):
#self.__load_configs()
self.load_addons()
def ensure_environ ( self, environ ) :
pass
def decide_page_class(self, context):
'''Return Page class dynamically based on path/nabar/role/...'''
return context.current_theme.decide_page_class(context)
def decide_theme(self, context):
'''Return theme name dynamically based on path/nabar/role/...'''
return self.config.default_theme_name
def decide_page_assets(self, context, page, format):
'''add/modify css js'''
return
def reload_config(self):
'''Reload the config without restarting the server.
TODO:
'''
pass
def action_page_not_found(self, context):
return In.action.__page_not_found_action__()
|
|
from __future__ import with_statement
import pytest
import aredis
from aredis.exceptions import ConnectionError, TimeoutError
from aredis.sentinel import (Sentinel, SentinelConnectionPool,
MasterNotFoundError, SlaveNotFoundError)
class SentinelTestClient:
def __init__(self, cluster, id):
self.cluster = cluster
self.id = id
async def sentinel_masters(self):
self.cluster.connection_error_if_down(self)
self.cluster.timeout_if_down(self)
return {self.cluster.service_name: self.cluster.master}
async def sentinel_slaves(self, master_name):
self.cluster.connection_error_if_down(self)
self.cluster.timeout_if_down(self)
if master_name != self.cluster.service_name:
return []
return self.cluster.slaves
class SentinelTestCluster:
def __init__(self, service_name='mymaster', ip='127.0.0.1', port=6379):
self.clients = {}
self.master = {
'ip': ip,
'port': port,
'is_master': True,
'is_sdown': False,
'is_odown': False,
'num-other-sentinels': 0,
}
self.service_name = service_name
self.slaves = []
self.nodes_down = set()
self.nodes_timeout = set()
def connection_error_if_down(self, node):
if node.id in self.nodes_down:
raise ConnectionError
def timeout_if_down(self, node):
if node.id in self.nodes_timeout:
raise TimeoutError
def client(self, host, port, **kwargs):
return SentinelTestClient(self, (host, port))
@pytest.fixture()
def cluster(request):
def teardown():
aredis.sentinel.StrictRedis = saved_StrictRedis
cluster = SentinelTestCluster()
saved_StrictRedis = aredis.sentinel.StrictRedis
aredis.sentinel.StrictRedis = cluster.client
request.addfinalizer(teardown)
return cluster
@pytest.fixture()
def sentinel(request, cluster, event_loop):
return Sentinel([('foo', 26379), ('bar', 26379)], loop=event_loop)
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_discover_master(sentinel):
address = await sentinel.discover_master('mymaster')
assert address == ('127.0.0.1', 6379)
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_discover_master_error(sentinel):
with pytest.raises(MasterNotFoundError):
await sentinel.discover_master('xxx')
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_discover_master_sentinel_down(cluster, sentinel):
# Put first sentinel 'foo' down
cluster.nodes_down.add(('foo', 26379))
address = await sentinel.discover_master('mymaster')
assert address == ('127.0.0.1', 6379)
# 'bar' is now first sentinel
assert sentinel.sentinels[0].id == ('bar', 26379)
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_discover_master_sentinel_timeout(cluster, sentinel):
# Put first sentinel 'foo' down
cluster.nodes_timeout.add(('foo', 26379))
address = await sentinel.discover_master('mymaster')
assert address == ('127.0.0.1', 6379)
# 'bar' is now first sentinel
assert sentinel.sentinels[0].id == ('bar', 26379)
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_master_min_other_sentinels(cluster):
sentinel = Sentinel([('foo', 26379)], min_other_sentinels=1)
# min_other_sentinels
with pytest.raises(MasterNotFoundError):
await sentinel.discover_master('mymaster')
cluster.master['num-other-sentinels'] = 2
address = await sentinel.discover_master('mymaster')
assert address == ('127.0.0.1', 6379)
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_master_odown(cluster, sentinel):
cluster.master['is_odown'] = True
with pytest.raises(MasterNotFoundError):
await sentinel.discover_master('mymaster')
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_master_sdown(cluster, sentinel):
cluster.master['is_sdown'] = True
with pytest.raises(MasterNotFoundError):
await sentinel.discover_master('mymaster')
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_discover_slaves(cluster, sentinel):
assert await sentinel.discover_slaves('mymaster') == []
cluster.slaves = [
{'ip': 'slave0', 'port': 1234, 'is_odown': False, 'is_sdown': False},
{'ip': 'slave1', 'port': 1234, 'is_odown': False, 'is_sdown': False},
]
assert await sentinel.discover_slaves('mymaster') == [
('slave0', 1234), ('slave1', 1234)]
# slave0 -> ODOWN
cluster.slaves[0]['is_odown'] = True
assert await sentinel.discover_slaves('mymaster') == [
('slave1', 1234)]
# slave1 -> SDOWN
cluster.slaves[1]['is_sdown'] = True
assert await sentinel.discover_slaves('mymaster') == []
cluster.slaves[0]['is_odown'] = False
cluster.slaves[1]['is_sdown'] = False
# node0 -> DOWN
cluster.nodes_down.add(('foo', 26379))
assert await sentinel.discover_slaves('mymaster') == [
('slave0', 1234), ('slave1', 1234)]
cluster.nodes_down.clear()
# node0 -> TIMEOUT
cluster.nodes_timeout.add(('foo', 26379))
assert await sentinel.discover_slaves('mymaster') == [
('slave0', 1234), ('slave1', 1234)]
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_master_for(cluster, sentinel):
master = sentinel.master_for('mymaster')
assert await master.ping()
assert master.connection_pool.master_address == ('127.0.0.1', 6379)
# Use internal connection check
master = sentinel.master_for('mymaster', check_connection=True)
assert await master.ping()
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_slave_for(cluster, sentinel):
cluster.slaves = [
{'ip': '127.0.0.1', 'port': 6379,
'is_odown': False, 'is_sdown': False},
]
slave = sentinel.slave_for('mymaster')
assert await slave.ping()
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_slave_for_slave_not_found_error(cluster, sentinel):
cluster.master['is_odown'] = True
slave = sentinel.slave_for('mymaster', db=9)
with pytest.raises(SlaveNotFoundError):
await slave.ping()
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_slave_round_robin(cluster, sentinel):
cluster.slaves = [
{'ip': 'slave0', 'port': 6379, 'is_odown': False, 'is_sdown': False},
{'ip': 'slave1', 'port': 6379, 'is_odown': False, 'is_sdown': False},
]
pool = SentinelConnectionPool('mymaster', sentinel)
rotator = await pool.rotate_slaves()
assert set(rotator) == {('slave0', 6379), ('slave1', 6379)}
|
|
from __future__ import print_function,absolute_import,division
_AD='expected the empty value, but found %r'
_AC='cannot find module %r (%s)'
_AB='expected non-empty name appended to the tag'
_AA='tag:yaml.org,2002:map'
_A9='tag:yaml.org,2002:seq'
_A8='tag:yaml.org,2002:set'
_A7='tag:yaml.org,2002:pairs'
_A6='tag:yaml.org,2002:omap'
_A5='tag:yaml.org,2002:timestamp'
_A4='tag:yaml.org,2002:binary'
_A3='tag:yaml.org,2002:float'
_A2='tag:yaml.org,2002:int'
_A1='tag:yaml.org,2002:bool'
_A0='tag:yaml.org,2002:null'
_z='could not determine a constructor for the tag %r'
_y='second'
_x='minute'
_w='day'
_v='month'
_u='year'
_t='failed to construct timestamp from "{}"'
_s='decodebytes'
_r='failed to convert base64 data into ascii: %s'
_q='.nan'
_p='.inf'
_o='expected a mapping or list of mappings for merging, but found %s'
_n='expected a mapping for merging, but found %s'
_m=' Duplicate keys will become an error in future releases, and are errors\n by default when using the new API.\n '
_l='\n To suppress this check see:\n http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys\n '
_k='tag:yaml.org,2002:merge'
_j=' Duplicate keys will become an error in future releases, and are errors\n by default when using the new API.\n '
_i='\n To suppress this check see:\n http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys\n '
_h='expected a sequence node, but found %s'
_g='expected a scalar node, but found %s'
_f='typ'
_e='while constructing a Python module'
_d='expected a single mapping item, but found %d items'
_c='expected a mapping of length 1, but found %s'
_b='expected a sequence, but found %s'
_a='failed to decode base64 data: %s'
_Z='tag:yaml.org,2002:value'
_Y='found duplicate key "{}"'
_X='found unhashable key'
_W='found unacceptable key (%s)'
_V='__setstate__'
_U='tz_hour'
_T='hour'
_S='ascii'
_R='tag:yaml.org,2002:str'
_Q='utf-8'
_P='expected a mapping node, but found %s'
_O='tz_minute'
_N='e'
_M='+-'
_L='while constructing an ordered map'
_K='tz_sign'
_J='-'
_I='fraction'
_H='.'
_G=':'
_F='0'
_E='while constructing a mapping'
_D='_'
_C=True
_B=False
_A=None
import datetime,base64,binascii,re,sys,types,warnings
from .error import MarkedYAMLError,MarkedYAMLFutureWarning,MantissaNoDotYAML1_1Warning
from .nodes import *
from .nodes import SequenceNode,MappingNode,ScalarNode
from .compat import utf8,builtins_module,to_str,PY2,PY3,text_type,nprint,nprintf,version_tnf
from .compat import ordereddict,Hashable,MutableSequence
from .compat import MutableMapping
from .comments import *
from .comments import CommentedMap,CommentedOrderedMap,CommentedSet,CommentedKeySeq,CommentedSeq,TaggedScalar,CommentedKeyMap
from .scalarstring import SingleQuotedScalarString,DoubleQuotedScalarString,LiteralScalarString,FoldedScalarString,PlainScalarString,ScalarString
from .scalarint import ScalarInt,BinaryInt,OctalInt,HexInt,HexCapsInt
from .scalarfloat import ScalarFloat
from .scalarbool import ScalarBoolean
from .timestamp import TimeStamp
from .util import RegExp
if _B:from typing import Any,Dict,List,Set,Generator,Union,Optional
__all__=['BaseConstructor','SafeConstructor','Constructor','ConstructorError','RoundTripConstructor']
class ConstructorError(MarkedYAMLError):0
class DuplicateKeyFutureWarning(MarkedYAMLFutureWarning):0
class DuplicateKeyError(MarkedYAMLFutureWarning):0
class BaseConstructor:
yaml_constructors={};yaml_multi_constructors={}
def __init__(self,preserve_quotes=_A,loader=_A):
self.loader=loader
if self.loader is not _A and getattr(self.loader,'_constructor',_A)is _A:self.loader._constructor=self
self.loader=loader;self.yaml_base_dict_type=dict;self.yaml_base_list_type=list;self.constructed_objects={};self.recursive_objects={};self.state_generators=[];self.deep_construct=_B;self._preserve_quotes=preserve_quotes;self.allow_duplicate_keys=version_tnf((0,15,1),(0,16))
@property
def composer(self):
if hasattr(self.loader,_f):return self.loader.composer
try:return self.loader._composer
except AttributeError:sys.stdout.write('slt {}\n'.format(type(self)));sys.stdout.write('slc {}\n'.format(self.loader._composer));sys.stdout.write('{}\n'.format(dir(self)));raise
@property
def resolver(self):
if hasattr(self.loader,_f):return self.loader.resolver
return self.loader._resolver
def check_data(self):return self.composer.check_node()
def get_data(self):
if self.composer.check_node():return self.construct_document(self.composer.get_node())
def get_single_data(self):
node=self.composer.get_single_node()
if node is not _A:return self.construct_document(node)
return _A
def construct_document(self,node):
data=self.construct_object(node)
while bool(self.state_generators):
state_generators=self.state_generators;self.state_generators=[]
for generator in state_generators:
for _dummy in generator:0
self.constructed_objects={};self.recursive_objects={};self.deep_construct=_B;return data
def construct_object(self,node,deep=_B):
if node in self.constructed_objects:return self.constructed_objects[node]
if deep:old_deep=self.deep_construct;self.deep_construct=_C
if node in self.recursive_objects:return self.recursive_objects[node]
self.recursive_objects[node]=_A;data=self.construct_non_recursive_object(node);self.constructed_objects[node]=data;del self.recursive_objects[node]
if deep:self.deep_construct=old_deep
return data
def construct_non_recursive_object(self,node,tag=_A):
constructor=_A;tag_suffix=_A
if tag is _A:tag=node.tag
if tag in self.yaml_constructors:constructor=self.yaml_constructors[tag]
else:
for tag_prefix in self.yaml_multi_constructors:
if tag.startswith(tag_prefix):tag_suffix=tag[len(tag_prefix):];constructor=self.yaml_multi_constructors[tag_prefix];break
else:
if _A in self.yaml_multi_constructors:tag_suffix=tag;constructor=self.yaml_multi_constructors[_A]
elif _A in self.yaml_constructors:constructor=self.yaml_constructors[_A]
elif isinstance(node,ScalarNode):constructor=self.__class__.construct_scalar
elif isinstance(node,SequenceNode):constructor=self.__class__.construct_sequence
elif isinstance(node,MappingNode):constructor=self.__class__.construct_mapping
if tag_suffix is _A:data=constructor(self,node)
else:data=constructor(self,tag_suffix,node)
if isinstance(data,types.GeneratorType):
generator=data;data=next(generator)
if self.deep_construct:
for _dummy in generator:0
else:self.state_generators.append(generator)
return data
def construct_scalar(self,node):
if not isinstance(node,ScalarNode):raise ConstructorError(_A,_A,_g%node.id,node.start_mark)
return node.value
def construct_sequence(self,node,deep=_B):
if not isinstance(node,SequenceNode):raise ConstructorError(_A,_A,_h%node.id,node.start_mark)
return[self.construct_object(child,deep=deep)for child in node.value]
def construct_mapping(self,node,deep=_B):
if not isinstance(node,MappingNode):raise ConstructorError(_A,_A,_P%node.id,node.start_mark)
total_mapping=self.yaml_base_dict_type()
if getattr(node,'merge',_A)is not _A:todo=[(node.merge,_B),(node.value,_B)]
else:todo=[(node.value,_C)]
for (values,check) in todo:
mapping=self.yaml_base_dict_type()
for (key_node,value_node) in values:
key=self.construct_object(key_node,deep=_C)
if not isinstance(key,Hashable):
if isinstance(key,list):key=tuple(key)
if PY2:
try:hash(key)
except TypeError as exc:raise ConstructorError(_E,node.start_mark,_W%exc,key_node.start_mark)
elif not isinstance(key,Hashable):raise ConstructorError(_E,node.start_mark,_X,key_node.start_mark)
value=self.construct_object(value_node,deep=deep)
if check:
if self.check_mapping_key(node,key_node,mapping,key,value):mapping[key]=value
else:mapping[key]=value
total_mapping.update(mapping)
return total_mapping
def check_mapping_key(self,node,key_node,mapping,key,value):
if key in mapping:
if not self.allow_duplicate_keys:
mk=mapping.get(key)
if PY2:
if isinstance(key,unicode):key=key.encode(_Q)
if isinstance(value,unicode):value=value.encode(_Q)
if isinstance(mk,unicode):mk=mk.encode(_Q)
args=[_E,node.start_mark,'found duplicate key "{}" with value "{}" (original value: "{}")'.format(key,value,mk),key_node.start_mark,_i,_j]
if self.allow_duplicate_keys is _A:warnings.warn(DuplicateKeyFutureWarning(*args))
else:raise DuplicateKeyError(*args)
return _B
return _C
def check_set_key(self,node,key_node,setting,key):
if key in setting:
if not self.allow_duplicate_keys:
if PY2:
if isinstance(key,unicode):key=key.encode(_Q)
args=['while constructing a set',node.start_mark,_Y.format(key),key_node.start_mark,_i,_j]
if self.allow_duplicate_keys is _A:warnings.warn(DuplicateKeyFutureWarning(*args))
else:raise DuplicateKeyError(*args)
def construct_pairs(self,node,deep=_B):
if not isinstance(node,MappingNode):raise ConstructorError(_A,_A,_P%node.id,node.start_mark)
pairs=[]
for (key_node,value_node) in node.value:key=self.construct_object(key_node,deep=deep);value=self.construct_object(value_node,deep=deep);pairs.append((key,value))
return pairs
@classmethod
def add_constructor(cls,tag,constructor):
if'yaml_constructors'not in cls.__dict__:cls.yaml_constructors=cls.yaml_constructors.copy()
cls.yaml_constructors[tag]=constructor
@classmethod
def add_multi_constructor(cls,tag_prefix,multi_constructor):
if'yaml_multi_constructors'not in cls.__dict__:cls.yaml_multi_constructors=cls.yaml_multi_constructors.copy()
cls.yaml_multi_constructors[tag_prefix]=multi_constructor
class SafeConstructor(BaseConstructor):
def construct_scalar(self,node):
if isinstance(node,MappingNode):
for (key_node,value_node) in node.value:
if key_node.tag==_Z:return self.construct_scalar(value_node)
return BaseConstructor.construct_scalar(self,node)
def flatten_mapping(self,node):
merge=[];index=0
while index<len(node.value):
key_node,value_node=node.value[index]
if key_node.tag==_k:
if merge:
if self.allow_duplicate_keys:del node.value[index];index+=1;continue
args=[_E,node.start_mark,_Y.format(key_node.value),key_node.start_mark,_l,_m]
if self.allow_duplicate_keys is _A:warnings.warn(DuplicateKeyFutureWarning(*args))
else:raise DuplicateKeyError(*args)
del node.value[index]
if isinstance(value_node,MappingNode):self.flatten_mapping(value_node);merge.extend(value_node.value)
elif isinstance(value_node,SequenceNode):
submerge=[]
for subnode in value_node.value:
if not isinstance(subnode,MappingNode):raise ConstructorError(_E,node.start_mark,_n%subnode.id,subnode.start_mark)
self.flatten_mapping(subnode);submerge.append(subnode.value)
submerge.reverse()
for value in submerge:merge.extend(value)
else:raise ConstructorError(_E,node.start_mark,_o%value_node.id,value_node.start_mark)
elif key_node.tag==_Z:key_node.tag=_R;index+=1
else:index+=1
if bool(merge):node.merge=merge;node.value=merge+node.value
def construct_mapping(self,node,deep=_B):
if isinstance(node,MappingNode):self.flatten_mapping(node)
return BaseConstructor.construct_mapping(self,node,deep=deep)
def construct_yaml_null(self,node):self.construct_scalar(node);return _A
bool_values={'yes':_C,'no':_B,'y':_C,'n':_B,'true':_C,'false':_B,'on':_C,'off':_B}
def construct_yaml_bool(self,node):value=self.construct_scalar(node);return self.bool_values[value.lower()]
def construct_yaml_int(self,node):
value_s=to_str(self.construct_scalar(node));value_s=value_s.replace(_D,'');sign=+1
if value_s[0]==_J:sign=-1
if value_s[0]in _M:value_s=value_s[1:]
if value_s==_F:return 0
elif value_s.startswith('0b'):return sign*int(value_s[2:],2)
elif value_s.startswith('0x'):return sign*int(value_s[2:],16)
elif value_s.startswith('0o'):return sign*int(value_s[2:],8)
elif self.resolver.processing_version==(1,1)and value_s[0]==_F:return sign*int(value_s,8)
elif self.resolver.processing_version==(1,1)and _G in value_s:
digits=[int(part)for part in value_s.split(_G)];digits.reverse();base=1;value=0
for digit in digits:value+=digit*base;base*=60
return sign*value
else:return sign*int(value_s)
inf_value=1e+300
while inf_value!=inf_value*inf_value:inf_value*=inf_value
nan_value=-inf_value/inf_value
def construct_yaml_float(self,node):
value_so=to_str(self.construct_scalar(node));value_s=value_so.replace(_D,'').lower();sign=+1
if value_s[0]==_J:sign=-1
if value_s[0]in _M:value_s=value_s[1:]
if value_s==_p:return sign*self.inf_value
elif value_s==_q:return self.nan_value
elif self.resolver.processing_version!=(1,2)and _G in value_s:
digits=[float(part)for part in value_s.split(_G)];digits.reverse();base=1;value=0.0
for digit in digits:value+=digit*base;base*=60
return sign*value
else:
if self.resolver.processing_version!=(1,2)and _N in value_s:
mantissa,exponent=value_s.split(_N)
if _H not in mantissa:warnings.warn(MantissaNoDotYAML1_1Warning(node,value_so))
return sign*float(value_s)
if PY3:
def construct_yaml_binary(self,node):
try:value=self.construct_scalar(node).encode(_S)
except UnicodeEncodeError as exc:raise ConstructorError(_A,_A,_r%exc,node.start_mark)
try:
if hasattr(base64,_s):return base64.decodebytes(value)
else:return base64.decodestring(value)
except binascii.Error as exc:raise ConstructorError(_A,_A,_a%exc,node.start_mark)
else:
def construct_yaml_binary(self,node):
value=self.construct_scalar(node)
try:return to_str(value).decode('base64')
except (binascii.Error,UnicodeEncodeError)as exc:raise ConstructorError(_A,_A,_a%exc,node.start_mark)
timestamp_regexp=RegExp('^(?P<year>[0-9][0-9][0-9][0-9])\n -(?P<month>[0-9][0-9]?)\n -(?P<day>[0-9][0-9]?)\n (?:((?P<t>[Tt])|[ \\t]+) # explictly not retaining extra spaces\n (?P<hour>[0-9][0-9]?)\n :(?P<minute>[0-9][0-9])\n :(?P<second>[0-9][0-9])\n (?:\\.(?P<fraction>[0-9]*))?\n (?:[ \\t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)\n (?::(?P<tz_minute>[0-9][0-9]))?))?)?$',re.X)
def construct_yaml_timestamp(self,node,values=_A):
if values is _A:
try:match=self.timestamp_regexp.match(node.value)
except TypeError:match=_A
if match is _A:raise ConstructorError(_A,_A,_t.format(node.value),node.start_mark)
values=match.groupdict()
year=int(values[_u]);month=int(values[_v]);day=int(values[_w])
if not values[_T]:return datetime.date(year,month,day)
hour=int(values[_T]);minute=int(values[_x]);second=int(values[_y]);fraction=0
if values[_I]:
fraction_s=values[_I][:6]
while len(fraction_s)<6:fraction_s+=_F
fraction=int(fraction_s)
if len(values[_I])>6 and int(values[_I][6])>4:fraction+=1
delta=_A
if values[_K]:
tz_hour=int(values[_U]);minutes=values[_O];tz_minute=int(minutes)if minutes else 0;delta=datetime.timedelta(hours=tz_hour,minutes=tz_minute)
if values[_K]==_J:delta=-delta
data=datetime.datetime(year,month,day,hour,minute,second,fraction)
if delta:data-=delta
return data
def construct_yaml_omap(self,node):
omap=ordereddict();yield omap
if not isinstance(node,SequenceNode):raise ConstructorError(_L,node.start_mark,_b%node.id,node.start_mark)
for subnode in node.value:
if not isinstance(subnode,MappingNode):raise ConstructorError(_L,node.start_mark,_c%subnode.id,subnode.start_mark)
if len(subnode.value)!=1:raise ConstructorError(_L,node.start_mark,_d%len(subnode.value),subnode.start_mark)
key_node,value_node=subnode.value[0];key=self.construct_object(key_node);assert key not in omap;value=self.construct_object(value_node);omap[key]=value
def construct_yaml_pairs(self,node):
A='while constructing pairs';pairs=[];yield pairs
if not isinstance(node,SequenceNode):raise ConstructorError(A,node.start_mark,_b%node.id,node.start_mark)
for subnode in node.value:
if not isinstance(subnode,MappingNode):raise ConstructorError(A,node.start_mark,_c%subnode.id,subnode.start_mark)
if len(subnode.value)!=1:raise ConstructorError(A,node.start_mark,_d%len(subnode.value),subnode.start_mark)
key_node,value_node=subnode.value[0];key=self.construct_object(key_node);value=self.construct_object(value_node);pairs.append((key,value))
def construct_yaml_set(self,node):data=set();yield data;value=self.construct_mapping(node);data.update(value)
def construct_yaml_str(self,node):
value=self.construct_scalar(node)
if PY3:return value
try:return value.encode(_S)
except UnicodeEncodeError:return value
def construct_yaml_seq(self,node):data=self.yaml_base_list_type();yield data;data.extend(self.construct_sequence(node))
def construct_yaml_map(self,node):data=self.yaml_base_dict_type();yield data;value=self.construct_mapping(node);data.update(value)
def construct_yaml_object(self,node,cls):
data=cls.__new__(cls);yield data
if hasattr(data,_V):state=self.construct_mapping(node,deep=_C);data.__setstate__(state)
else:state=self.construct_mapping(node);data.__dict__.update(state)
def construct_undefined(self,node):raise ConstructorError(_A,_A,_z%utf8(node.tag),node.start_mark)
SafeConstructor.add_constructor(_A0,SafeConstructor.construct_yaml_null)
SafeConstructor.add_constructor(_A1,SafeConstructor.construct_yaml_bool)
SafeConstructor.add_constructor(_A2,SafeConstructor.construct_yaml_int)
SafeConstructor.add_constructor(_A3,SafeConstructor.construct_yaml_float)
SafeConstructor.add_constructor(_A4,SafeConstructor.construct_yaml_binary)
SafeConstructor.add_constructor(_A5,SafeConstructor.construct_yaml_timestamp)
SafeConstructor.add_constructor(_A6,SafeConstructor.construct_yaml_omap)
SafeConstructor.add_constructor(_A7,SafeConstructor.construct_yaml_pairs)
SafeConstructor.add_constructor(_A8,SafeConstructor.construct_yaml_set)
SafeConstructor.add_constructor(_R,SafeConstructor.construct_yaml_str)
SafeConstructor.add_constructor(_A9,SafeConstructor.construct_yaml_seq)
SafeConstructor.add_constructor(_AA,SafeConstructor.construct_yaml_map)
SafeConstructor.add_constructor(_A,SafeConstructor.construct_undefined)
if PY2:
class classobj:0
class Constructor(SafeConstructor):
def construct_python_str(self,node):return utf8(self.construct_scalar(node))
def construct_python_unicode(self,node):return self.construct_scalar(node)
if PY3:
def construct_python_bytes(self,node):
try:value=self.construct_scalar(node).encode(_S)
except UnicodeEncodeError as exc:raise ConstructorError(_A,_A,_r%exc,node.start_mark)
try:
if hasattr(base64,_s):return base64.decodebytes(value)
else:return base64.decodestring(value)
except binascii.Error as exc:raise ConstructorError(_A,_A,_a%exc,node.start_mark)
def construct_python_long(self,node):
val=self.construct_yaml_int(node)
if PY3:return val
return int(val)
def construct_python_complex(self,node):return complex(self.construct_scalar(node))
def construct_python_tuple(self,node):return tuple(self.construct_sequence(node))
def find_python_module(self,name,mark):
if not name:raise ConstructorError(_e,mark,_AB,mark)
try:__import__(name)
except ImportError as exc:raise ConstructorError(_e,mark,_AC%(utf8(name),exc),mark)
return sys.modules[name]
def find_python_name(self,name,mark):
A='while constructing a Python object'
if not name:raise ConstructorError(A,mark,_AB,mark)
if _H in name:
lname=name.split(_H);lmodule_name=lname;lobject_name=[]
while len(lmodule_name)>1:
lobject_name.insert(0,lmodule_name.pop());module_name=_H.join(lmodule_name)
try:__import__(module_name);break
except ImportError:continue
else:module_name=builtins_module;lobject_name=[name]
try:__import__(module_name)
except ImportError as exc:raise ConstructorError(A,mark,_AC%(utf8(module_name),exc),mark)
module=sys.modules[module_name];object_name=_H.join(lobject_name);obj=module
while lobject_name:
if not hasattr(obj,lobject_name[0]):raise ConstructorError(A,mark,'cannot find %r in the module %r'%(utf8(object_name),module.__name__),mark)
obj=getattr(obj,lobject_name.pop(0))
return obj
def construct_python_name(self,suffix,node):
value=self.construct_scalar(node)
if value:raise ConstructorError('while constructing a Python name',node.start_mark,_AD%utf8(value),node.start_mark)
return self.find_python_name(suffix,node.start_mark)
def construct_python_module(self,suffix,node):
value=self.construct_scalar(node)
if value:raise ConstructorError(_e,node.start_mark,_AD%utf8(value),node.start_mark)
return self.find_python_module(suffix,node.start_mark)
def make_python_instance(self,suffix,node,args=_A,kwds=_A,newobj=_B):
if not args:args=[]
if not kwds:kwds={}
cls=self.find_python_name(suffix,node.start_mark)
if PY3:
if newobj and isinstance(cls,type):return cls.__new__(cls,*args,**kwds)
else:return cls(*args,**kwds)
elif newobj and isinstance(cls,type(classobj))and not args and not kwds:instance=classobj();instance.__class__=cls;return instance
elif newobj and isinstance(cls,type):return cls.__new__(cls,*args,**kwds)
else:return cls(*args,**kwds)
def set_python_instance_state(self,instance,state):
if hasattr(instance,_V):instance.__setstate__(state)
else:
slotstate={}
if isinstance(state,tuple)and len(state)==2:state,slotstate=state
if hasattr(instance,'__dict__'):instance.__dict__.update(state)
elif state:slotstate.update(state)
for (key,value) in slotstate.items():setattr(instance,key,value)
def construct_python_object(self,suffix,node):instance=self.make_python_instance(suffix,node,newobj=_C);self.recursive_objects[node]=instance;yield instance;deep=hasattr(instance,_V);state=self.construct_mapping(node,deep=deep);self.set_python_instance_state(instance,state)
def construct_python_object_apply(self,suffix,node,newobj=_B):
if isinstance(node,SequenceNode):args=self.construct_sequence(node,deep=_C);kwds={};state={};listitems=[];dictitems={}
else:value=self.construct_mapping(node,deep=_C);args=value.get('args',[]);kwds=value.get('kwds',{});state=value.get('state',{});listitems=value.get('listitems',[]);dictitems=value.get('dictitems',{})
instance=self.make_python_instance(suffix,node,args,kwds,newobj)
if bool(state):self.set_python_instance_state(instance,state)
if bool(listitems):instance.extend(listitems)
if bool(dictitems):
for key in dictitems:instance[key]=dictitems[key]
return instance
def construct_python_object_new(self,suffix,node):return self.construct_python_object_apply(suffix,node,newobj=_C)
Constructor.add_constructor('tag:yaml.org,2002:python/none',Constructor.construct_yaml_null)
Constructor.add_constructor('tag:yaml.org,2002:python/bool',Constructor.construct_yaml_bool)
Constructor.add_constructor('tag:yaml.org,2002:python/str',Constructor.construct_python_str)
Constructor.add_constructor('tag:yaml.org,2002:python/unicode',Constructor.construct_python_unicode)
if PY3:Constructor.add_constructor('tag:yaml.org,2002:python/bytes',Constructor.construct_python_bytes)
Constructor.add_constructor('tag:yaml.org,2002:python/int',Constructor.construct_yaml_int)
Constructor.add_constructor('tag:yaml.org,2002:python/long',Constructor.construct_python_long)
Constructor.add_constructor('tag:yaml.org,2002:python/float',Constructor.construct_yaml_float)
Constructor.add_constructor('tag:yaml.org,2002:python/complex',Constructor.construct_python_complex)
Constructor.add_constructor('tag:yaml.org,2002:python/list',Constructor.construct_yaml_seq)
Constructor.add_constructor('tag:yaml.org,2002:python/tuple',Constructor.construct_python_tuple)
Constructor.add_constructor('tag:yaml.org,2002:python/dict',Constructor.construct_yaml_map)
Constructor.add_multi_constructor('tag:yaml.org,2002:python/name:',Constructor.construct_python_name)
Constructor.add_multi_constructor('tag:yaml.org,2002:python/module:',Constructor.construct_python_module)
Constructor.add_multi_constructor('tag:yaml.org,2002:python/object:',Constructor.construct_python_object)
Constructor.add_multi_constructor('tag:yaml.org,2002:python/object/apply:',Constructor.construct_python_object_apply)
Constructor.add_multi_constructor('tag:yaml.org,2002:python/object/new:',Constructor.construct_python_object_new)
class RoundTripConstructor(SafeConstructor):
def construct_scalar(self,node):
A='\x07'
if not isinstance(node,ScalarNode):raise ConstructorError(_A,_A,_g%node.id,node.start_mark)
if node.style=='|'and isinstance(node.value,text_type):
lss=LiteralScalarString(node.value,anchor=node.anchor)
if node.comment and node.comment[1]:lss.comment=node.comment[1][0]
return lss
if node.style=='>'and isinstance(node.value,text_type):
fold_positions=[];idx=-1
while _C:
idx=node.value.find(A,idx+1)
if idx<0:break
fold_positions.append(idx-len(fold_positions))
fss=FoldedScalarString(node.value.replace(A,''),anchor=node.anchor)
if node.comment and node.comment[1]:fss.comment=node.comment[1][0]
if fold_positions:fss.fold_pos=fold_positions
return fss
elif bool(self._preserve_quotes)and isinstance(node.value,text_type):
if node.style=="'":return SingleQuotedScalarString(node.value,anchor=node.anchor)
if node.style=='"':return DoubleQuotedScalarString(node.value,anchor=node.anchor)
if node.anchor:return PlainScalarString(node.value,anchor=node.anchor)
return node.value
def construct_yaml_int(self,node):
width=_A;value_su=to_str(self.construct_scalar(node))
try:sx=value_su.rstrip(_D);underscore=[len(sx)-sx.rindex(_D)-1,_B,_B]
except ValueError:underscore=_A
except IndexError:underscore=_A
value_s=value_su.replace(_D,'');sign=+1
if value_s[0]==_J:sign=-1
if value_s[0]in _M:value_s=value_s[1:]
if value_s==_F:return 0
elif value_s.startswith('0b'):
if self.resolver.processing_version>(1,1)and value_s[2]==_F:width=len(value_s[2:])
if underscore is not _A:underscore[1]=value_su[2]==_D;underscore[2]=len(value_su[2:])>1 and value_su[-1]==_D
return BinaryInt(sign*int(value_s[2:],2),width=width,underscore=underscore,anchor=node.anchor)
elif value_s.startswith('0x'):
if self.resolver.processing_version>(1,1)and value_s[2]==_F:width=len(value_s[2:])
hex_fun=HexInt
for ch in value_s[2:]:
if ch in'ABCDEF':hex_fun=HexCapsInt;break
if ch in'abcdef':break
if underscore is not _A:underscore[1]=value_su[2]==_D;underscore[2]=len(value_su[2:])>1 and value_su[-1]==_D
return hex_fun(sign*int(value_s[2:],16),width=width,underscore=underscore,anchor=node.anchor)
elif value_s.startswith('0o'):
if self.resolver.processing_version>(1,1)and value_s[2]==_F:width=len(value_s[2:])
if underscore is not _A:underscore[1]=value_su[2]==_D;underscore[2]=len(value_su[2:])>1 and value_su[-1]==_D
return OctalInt(sign*int(value_s[2:],8),width=width,underscore=underscore,anchor=node.anchor)
elif self.resolver.processing_version!=(1,2)and value_s[0]==_F:return sign*int(value_s,8)
elif self.resolver.processing_version!=(1,2)and _G in value_s:
digits=[int(part)for part in value_s.split(_G)];digits.reverse();base=1;value=0
for digit in digits:value+=digit*base;base*=60
return sign*value
elif self.resolver.processing_version>(1,1)and value_s[0]==_F:
if underscore is not _A:underscore[2]=len(value_su)>1 and value_su[-1]==_D
return ScalarInt(sign*int(value_s),width=len(value_s),underscore=underscore)
elif underscore:underscore[2]=len(value_su)>1 and value_su[-1]==_D;return ScalarInt(sign*int(value_s),width=_A,underscore=underscore,anchor=node.anchor)
elif node.anchor:return ScalarInt(sign*int(value_s),width=_A,anchor=node.anchor)
else:return sign*int(value_s)
def construct_yaml_float(self,node):
A='E'
def leading_zeros(v):
lead0=0;idx=0
while idx<len(v)and v[idx]in'0.':
if v[idx]==_F:lead0+=1
idx+=1
return lead0
m_sign=_B;value_so=to_str(self.construct_scalar(node));value_s=value_so.replace(_D,'').lower();sign=+1
if value_s[0]==_J:sign=-1
if value_s[0]in _M:m_sign=value_s[0];value_s=value_s[1:]
if value_s==_p:return sign*self.inf_value
if value_s==_q:return self.nan_value
if self.resolver.processing_version!=(1,2)and _G in value_s:
digits=[float(part)for part in value_s.split(_G)];digits.reverse();base=1;value=0.0
for digit in digits:value+=digit*base;base*=60
return sign*value
if _N in value_s:
try:mantissa,exponent=value_so.split(_N);exp=_N
except ValueError:mantissa,exponent=value_so.split(A);exp=A
if self.resolver.processing_version!=(1,2):
if _H not in mantissa:warnings.warn(MantissaNoDotYAML1_1Warning(node,value_so))
lead0=leading_zeros(mantissa);width=len(mantissa);prec=mantissa.find(_H)
if m_sign:width-=1
e_width=len(exponent);e_sign=exponent[0]in _M;return ScalarFloat(sign*float(value_s),width=width,prec=prec,m_sign=m_sign,m_lead0=lead0,exp=exp,e_width=e_width,e_sign=e_sign,anchor=node.anchor)
width=len(value_so);prec=value_so.index(_H);lead0=leading_zeros(value_so);return ScalarFloat(sign*float(value_s),width=width,prec=prec,m_sign=m_sign,m_lead0=lead0,anchor=node.anchor)
def construct_yaml_str(self,node):
value=self.construct_scalar(node)
if isinstance(value,ScalarString):return value
if PY3:return value
try:return value.encode(_S)
except AttributeError:return value
except UnicodeEncodeError:return value
def construct_rt_sequence(self,node,seqtyp,deep=_B):
if not isinstance(node,SequenceNode):raise ConstructorError(_A,_A,_h%node.id,node.start_mark)
ret_val=[]
if node.comment:
seqtyp._yaml_add_comment(node.comment[:2])
if len(node.comment)>2:seqtyp.yaml_end_comment_extend(node.comment[2],clear=_C)
if node.anchor:
from dynaconf.vendor.ruamel.yaml.serializer import templated_id
if not templated_id(node.anchor):seqtyp.yaml_set_anchor(node.anchor)
for (idx,child) in enumerate(node.value):
if child.comment:seqtyp._yaml_add_comment(child.comment,key=idx);child.comment=_A
ret_val.append(self.construct_object(child,deep=deep));seqtyp._yaml_set_idx_line_col(idx,[child.start_mark.line,child.start_mark.column])
return ret_val
def flatten_mapping(self,node):
def constructed(value_node):
if value_node in self.constructed_objects:value=self.constructed_objects[value_node]
else:value=self.construct_object(value_node,deep=_B)
return value
merge_map_list=[];index=0
while index<len(node.value):
key_node,value_node=node.value[index]
if key_node.tag==_k:
if merge_map_list:
if self.allow_duplicate_keys:del node.value[index];index+=1;continue
args=[_E,node.start_mark,_Y.format(key_node.value),key_node.start_mark,_l,_m]
if self.allow_duplicate_keys is _A:warnings.warn(DuplicateKeyFutureWarning(*args))
else:raise DuplicateKeyError(*args)
del node.value[index]
if isinstance(value_node,MappingNode):merge_map_list.append((index,constructed(value_node)))
elif isinstance(value_node,SequenceNode):
for subnode in value_node.value:
if not isinstance(subnode,MappingNode):raise ConstructorError(_E,node.start_mark,_n%subnode.id,subnode.start_mark)
merge_map_list.append((index,constructed(subnode)))
else:raise ConstructorError(_E,node.start_mark,_o%value_node.id,value_node.start_mark)
elif key_node.tag==_Z:key_node.tag=_R;index+=1
else:index+=1
return merge_map_list
def _sentinel(self):0
def construct_mapping(self,node,maptyp,deep=_B):
if not isinstance(node,MappingNode):raise ConstructorError(_A,_A,_P%node.id,node.start_mark)
merge_map=self.flatten_mapping(node)
if node.comment:
maptyp._yaml_add_comment(node.comment[:2])
if len(node.comment)>2:maptyp.yaml_end_comment_extend(node.comment[2],clear=_C)
if node.anchor:
from dynaconf.vendor.ruamel.yaml.serializer import templated_id
if not templated_id(node.anchor):maptyp.yaml_set_anchor(node.anchor)
last_key,last_value=_A,self._sentinel
for (key_node,value_node) in node.value:
key=self.construct_object(key_node,deep=_C)
if not isinstance(key,Hashable):
if isinstance(key,MutableSequence):
key_s=CommentedKeySeq(key)
if key_node.flow_style is _C:key_s.fa.set_flow_style()
elif key_node.flow_style is _B:key_s.fa.set_block_style()
key=key_s
elif isinstance(key,MutableMapping):
key_m=CommentedKeyMap(key)
if key_node.flow_style is _C:key_m.fa.set_flow_style()
elif key_node.flow_style is _B:key_m.fa.set_block_style()
key=key_m
if PY2:
try:hash(key)
except TypeError as exc:raise ConstructorError(_E,node.start_mark,_W%exc,key_node.start_mark)
elif not isinstance(key,Hashable):raise ConstructorError(_E,node.start_mark,_X,key_node.start_mark)
value=self.construct_object(value_node,deep=deep)
if self.check_mapping_key(node,key_node,maptyp,key,value):
if key_node.comment and len(key_node.comment)>4 and key_node.comment[4]:
if last_value is _A:key_node.comment[0]=key_node.comment.pop(4);maptyp._yaml_add_comment(key_node.comment,value=last_key)
else:key_node.comment[2]=key_node.comment.pop(4);maptyp._yaml_add_comment(key_node.comment,key=key)
key_node.comment=_A
if key_node.comment:maptyp._yaml_add_comment(key_node.comment,key=key)
if value_node.comment:maptyp._yaml_add_comment(value_node.comment,value=key)
maptyp._yaml_set_kv_line_col(key,[key_node.start_mark.line,key_node.start_mark.column,value_node.start_mark.line,value_node.start_mark.column]);maptyp[key]=value;last_key,last_value=key,value
if merge_map:maptyp.add_yaml_merge(merge_map)
def construct_setting(self,node,typ,deep=_B):
if not isinstance(node,MappingNode):raise ConstructorError(_A,_A,_P%node.id,node.start_mark)
if node.comment:
typ._yaml_add_comment(node.comment[:2])
if len(node.comment)>2:typ.yaml_end_comment_extend(node.comment[2],clear=_C)
if node.anchor:
from dynaconf.vendor.ruamel.yaml.serializer import templated_id
if not templated_id(node.anchor):typ.yaml_set_anchor(node.anchor)
for (key_node,value_node) in node.value:
key=self.construct_object(key_node,deep=_C)
if not isinstance(key,Hashable):
if isinstance(key,list):key=tuple(key)
if PY2:
try:hash(key)
except TypeError as exc:raise ConstructorError(_E,node.start_mark,_W%exc,key_node.start_mark)
elif not isinstance(key,Hashable):raise ConstructorError(_E,node.start_mark,_X,key_node.start_mark)
value=self.construct_object(value_node,deep=deep);self.check_set_key(node,key_node,typ,key)
if key_node.comment:typ._yaml_add_comment(key_node.comment,key=key)
if value_node.comment:typ._yaml_add_comment(value_node.comment,value=key)
typ.add(key)
def construct_yaml_seq(self,node):
data=CommentedSeq();data._yaml_set_line_col(node.start_mark.line,node.start_mark.column)
if node.comment:data._yaml_add_comment(node.comment)
yield data;data.extend(self.construct_rt_sequence(node,data));self.set_collection_style(data,node)
def construct_yaml_map(self,node):data=CommentedMap();data._yaml_set_line_col(node.start_mark.line,node.start_mark.column);yield data;self.construct_mapping(node,data,deep=_C);self.set_collection_style(data,node)
def set_collection_style(self,data,node):
if len(data)==0:return
if node.flow_style is _C:data.fa.set_flow_style()
elif node.flow_style is _B:data.fa.set_block_style()
def construct_yaml_object(self,node,cls):
data=cls.__new__(cls);yield data
if hasattr(data,_V):state=SafeConstructor.construct_mapping(self,node,deep=_C);data.__setstate__(state)
else:state=SafeConstructor.construct_mapping(self,node);data.__dict__.update(state)
def construct_yaml_omap(self,node):
omap=CommentedOrderedMap();omap._yaml_set_line_col(node.start_mark.line,node.start_mark.column)
if node.flow_style is _C:omap.fa.set_flow_style()
elif node.flow_style is _B:omap.fa.set_block_style()
yield omap
if node.comment:
omap._yaml_add_comment(node.comment[:2])
if len(node.comment)>2:omap.yaml_end_comment_extend(node.comment[2],clear=_C)
if not isinstance(node,SequenceNode):raise ConstructorError(_L,node.start_mark,_b%node.id,node.start_mark)
for subnode in node.value:
if not isinstance(subnode,MappingNode):raise ConstructorError(_L,node.start_mark,_c%subnode.id,subnode.start_mark)
if len(subnode.value)!=1:raise ConstructorError(_L,node.start_mark,_d%len(subnode.value),subnode.start_mark)
key_node,value_node=subnode.value[0];key=self.construct_object(key_node);assert key not in omap;value=self.construct_object(value_node)
if key_node.comment:omap._yaml_add_comment(key_node.comment,key=key)
if subnode.comment:omap._yaml_add_comment(subnode.comment,key=key)
if value_node.comment:omap._yaml_add_comment(value_node.comment,value=key)
omap[key]=value
def construct_yaml_set(self,node):data=CommentedSet();data._yaml_set_line_col(node.start_mark.line,node.start_mark.column);yield data;self.construct_setting(node,data)
def construct_undefined(self,node):
try:
if isinstance(node,MappingNode):
data=CommentedMap();data._yaml_set_line_col(node.start_mark.line,node.start_mark.column)
if node.flow_style is _C:data.fa.set_flow_style()
elif node.flow_style is _B:data.fa.set_block_style()
data.yaml_set_tag(node.tag);yield data
if node.anchor:data.yaml_set_anchor(node.anchor)
self.construct_mapping(node,data);return
elif isinstance(node,ScalarNode):
data2=TaggedScalar();data2.value=self.construct_scalar(node);data2.style=node.style;data2.yaml_set_tag(node.tag);yield data2
if node.anchor:data2.yaml_set_anchor(node.anchor,always_dump=_C)
return
elif isinstance(node,SequenceNode):
data3=CommentedSeq();data3._yaml_set_line_col(node.start_mark.line,node.start_mark.column)
if node.flow_style is _C:data3.fa.set_flow_style()
elif node.flow_style is _B:data3.fa.set_block_style()
data3.yaml_set_tag(node.tag);yield data3
if node.anchor:data3.yaml_set_anchor(node.anchor)
data3.extend(self.construct_sequence(node));return
except:pass
raise ConstructorError(_A,_A,_z%utf8(node.tag),node.start_mark)
def construct_yaml_timestamp(self,node,values=_A):
B='t';A='tz'
try:match=self.timestamp_regexp.match(node.value)
except TypeError:match=_A
if match is _A:raise ConstructorError(_A,_A,_t.format(node.value),node.start_mark)
values=match.groupdict()
if not values[_T]:return SafeConstructor.construct_yaml_timestamp(self,node,values)
for part in [B,_K,_U,_O]:
if values[part]:break
else:return SafeConstructor.construct_yaml_timestamp(self,node,values)
year=int(values[_u]);month=int(values[_v]);day=int(values[_w]);hour=int(values[_T]);minute=int(values[_x]);second=int(values[_y]);fraction=0
if values[_I]:
fraction_s=values[_I][:6]
while len(fraction_s)<6:fraction_s+=_F
fraction=int(fraction_s)
if len(values[_I])>6 and int(values[_I][6])>4:fraction+=1
delta=_A
if values[_K]:
tz_hour=int(values[_U]);minutes=values[_O];tz_minute=int(minutes)if minutes else 0;delta=datetime.timedelta(hours=tz_hour,minutes=tz_minute)
if values[_K]==_J:delta=-delta
if delta:
dt=datetime.datetime(year,month,day,hour,minute);dt-=delta;data=TimeStamp(dt.year,dt.month,dt.day,dt.hour,dt.minute,second,fraction);data._yaml['delta']=delta;tz=values[_K]+values[_U]
if values[_O]:tz+=_G+values[_O]
data._yaml[A]=tz
else:
data=TimeStamp(year,month,day,hour,minute,second,fraction)
if values[A]:data._yaml[A]=values[A]
if values[B]:data._yaml[B]=_C
return data
def construct_yaml_bool(self,node):
b=SafeConstructor.construct_yaml_bool(self,node)
if node.anchor:return ScalarBoolean(b,anchor=node.anchor)
return b
RoundTripConstructor.add_constructor(_A0,RoundTripConstructor.construct_yaml_null)
RoundTripConstructor.add_constructor(_A1,RoundTripConstructor.construct_yaml_bool)
RoundTripConstructor.add_constructor(_A2,RoundTripConstructor.construct_yaml_int)
RoundTripConstructor.add_constructor(_A3,RoundTripConstructor.construct_yaml_float)
RoundTripConstructor.add_constructor(_A4,RoundTripConstructor.construct_yaml_binary)
RoundTripConstructor.add_constructor(_A5,RoundTripConstructor.construct_yaml_timestamp)
RoundTripConstructor.add_constructor(_A6,RoundTripConstructor.construct_yaml_omap)
RoundTripConstructor.add_constructor(_A7,RoundTripConstructor.construct_yaml_pairs)
RoundTripConstructor.add_constructor(_A8,RoundTripConstructor.construct_yaml_set)
RoundTripConstructor.add_constructor(_R,RoundTripConstructor.construct_yaml_str)
RoundTripConstructor.add_constructor(_A9,RoundTripConstructor.construct_yaml_seq)
RoundTripConstructor.add_constructor(_AA,RoundTripConstructor.construct_yaml_map)
RoundTripConstructor.add_constructor(_A,RoundTripConstructor.construct_undefined)
|
|
from flask import Flask, redirect, render_template, session, url_for, flash, request, send_file
from flask.ext.wtf import Form
from flask.ext.sqlalchemy import SQLAlchemy
from wtforms import StringField, SubmitField
from wtforms.validators import Required
from flask.ext.bootstrap import Bootstrap
from flask.ext.mail import Mail, Message
import os
import zipfile
app = Flask(__name__)
from flask import render_template, redirect, url_for, abort, flash, request,\
current_app, make_response, jsonify
from flask.ext.login import login_required, current_user
from flask.ext.sqlalchemy import get_debug_queries
from . import main
from .forms import ContactForm
from flask_mail import Mail, Message
from flask.ext.mail import Message, Mail
from app import mail, create_app
# from app.matrix_functions import all_species_unreleased, all_populations_unreleased,all_matrices_unreleased, \
# all_species_unreleased_complete, all_populations_unreleased_complete, all_matrices_unreleased_complete, \
# all_species_released_complete, all_populations_released_complete, all_matrices_released_complete, \
# all_species_released_compadre, all_populations_released_compadre, all_matrices_released_compadre, \
# all_species_released_comadre, all_populations_released_comadre, all_matrices_released_comadre, \
# all_matrices, all_pops, all_species, count_plants, count_comadre, count_compadre, count_plants_pop, count_compadre_pop, count_comadre_pop, species_compadre_count, species_comadre_count
from ..data_manage.forms import SpeciesForm, TaxonomyForm, TraitForm, PopulationForm, MatrixForm, PublicationForm, DeleteForm
import random
from .. import db
from ..models import Permission, Role, User, \
IUCNStatus, OrganismType, GrowthFormRaunkiaer, ReproductiveRepetition, \
DicotMonoc, AngioGymno, SpandExGrowthType, SourceType, Database, Purpose, MissingData, ContentEmail, Ecoregion, Continent, InvasiveStatusStudy, InvasiveStatusElsewhere, StageTypeClass, \
TransitionType, MatrixComposition, StartSeason, EndSeason, StudiedSex, Captivity, Species, Taxonomy, PurposeEndangered, PurposeWeed, Trait, \
Publication, AuthorContact, AdditionalSource, Population, Stage, StageType, Treatment, \
MatrixStage, MatrixValue, Matrix, Interval, Fixed, Small, CensusTiming, Institute, Status, Version, ChangeLogger, DigitizationProtocol
from ..decorators import admin_required, permission_required, crossdomain
from werkzeug import secure_filename
@main.after_app_request
def after_request(response):
for query in get_debug_queries():
if query.duration >= current_app.config['FLASKY_SLOW_DB_QUERY_TIME']:
current_app.logger.warning(
'Slow query: %s\nParameters: %s\nDuration: %fs\nContext: %s\n'
% (query.statement, query.parameters, query.duration,
query.context))
return response
#@main.route('/shutdown')
#def server_shutdown():
# if not current_app.testing:
# abort(404)
# shutdown = request.environ.get('werkzeug.server.shutdown')
# if not shutdown:
# abort(500)
# shutdown()
# return 'Shutting down...'
# HOMEPAGE
@main.route('/', methods=['GET', 'POST'])
def index():
# ##Released and Complete Stats
# #1. Total
# all_species_released_green = all_species_released_complete()
# all_populations_released_green = all_populations_released_complete()
# all_matrices_released_green = all_matrices_released_complete()
# #2. Compadre
# all_species_released_compadre_green = all_species_released_compadre()
# all_populations_released_compadre_green = all_populations_released_compadre()
# all_matrices_released_compadre_green = all_matrices_released_compadre()
# #3. Comadre
# all_species_released_comadre_green = all_species_released_comadre()
# all_populations_released_comadre_green = all_populations_released_comadre()
# all_matrices_released_comadre_green = all_matrices_released_comadre()
# ##Released and Incomplete Stats
# #NDY 1. total, 2. compadre, 3. comadre
# ##Unreleased and Incomplete Stats
# #1. Total
# all_species_unreleased_amber = all_species_unreleased()
# all_populations_unreleased_amber = all_populations_unreleased()
# all_matrices_unreleased_amber = all_matrices_unreleased()
# #2. Compadre
# #NDY
# #3. Comadre
# #NDY
# ##Unreleased and Complete Stats
# #1. Total
# all_species_unreleased_green = all_species_unreleased_complete()
# all_populations_unreleased_green = all_populations_unreleased_complete()
# all_matrices_unreleased_green = all_matrices_unreleased_complete()
# #2. Compadre
# #3. Comadre
# ##Admin use only stats
# #Matrix Stats
# count_matrices = all_matrices()
# comadre_count = count_comadre()
# compadre_count = count_compadre()
# plant_count = count_plants()
# #Population Stats
# count_pops = all_pops()
# comadre_count_pop = count_comadre_pop()
# compadre_count_pop = count_compadre_pop()
# plant_count_pop = count_plants_pop()
# #Species stats
# species_count = all_species()
# species_count_compadre = species_compadre_count()
# species_count_comadre = species_comadre_count()
species = Species.query.filter(Species.image_path != None).all()
number = len(species)
species2 = []
for i in range(1,5):
random_int = random.randint(0,number-1)
s = species[random_int]
while "www" in s.image_path:
random_int = random.randint(0,number-1)
s = species[random_int]
species2.append(s)
return render_template('index.html',species2 = species2)
# all_species_released_green = all_species_released_green, all_populations_released_green = all_populations_released_green,
# all_matrices_released_green = all_matrices_released_green, all_species_released_compadre_green = all_species_released_compadre_green,
# all_populations_released_compadre_green = all_populations_released_compadre_green, all_matrices_released_compadre_green = all_matrices_released_compadre_green,
# all_species_released_comadre_green = all_species_released_comadre_green, all_populations_released_comadre_green = all_populations_released_comadre_green, all_matrices_released_comadre_green = all_matrices_released_comadre_green,
# all_species_unreleased_amber = all_species_unreleased_amber, all_species_unreleased_green = all_species_unreleased_green,
# all_populations_unreleased_amber = all_populations_unreleased_amber, all_populations_unreleased_green = all_populations_unreleased_green,
# all_matrices_unreleased_amber = all_matrices_unreleased_amber, all_matrices_unreleased_green = all_matrices_unreleased_green,
# count_matrices = count_matrices, comadre_count = comadre_count, compadre_count = compadre_count, plant_count = plant_count,
# count_pops = count_pops, comadre_count_pop = comadre_count_pop, compadre_count_pop = compadre_count_pop, plant_count_pop = plant_count_pop,
# species_count = species_count, species_count_compadre = species_count_compadre, species_count_comadre = species_count_comadre)
# now defunct 'display all data' page
@main.route('/data/')
# @login_required
def data():
species = Species.query.all()
return render_template('data.html', species=species)
### TABLE PAGES
# the big table of species
@main.route('/species-table/')
def species_table():
can_edit = False
try:
if current_user.role_id in [1,3,4,6]:
can_edit = True
except:
pass
print can_edit
species = Species.query.all()
return render_template('species_table_template.html', species=species,can_edit = can_edit)
# the big table of publications
@main.route('/publications-table/')
def publications_table():
can_edit = False
try:
if current_user.role_id in [1,3,4,6]:
can_edit = True
except:
pass
publications = Publication.query.all()
return render_template('publications_table_template.html', publications=publications,can_edit = can_edit)
###############################################################################################################################
### OVERVIEW PAGES
# species overview page
@main.route('/species=<list:species_ids>/publications=<list:pub_ids>')
def species_page(species_ids,pub_ids):
if species_ids[0] == "all" and pub_ids[0] == "all":
flash('Loading all species and publications is not allowed, sorry.')
abort(404)
can_edit = False
try:
if current_user.role_id in [1,3,4,6]:
can_edit = True
except:
pass
try:
#get species
all_species = []
if species_ids[0] != "all": # aka if species are filtered
for id in species_ids:
all_species.append((Species.query.filter_by(id=id)).first())
all_populations_species = []
for species in all_species:
all_populations_species.extend(Population.query.filter_by(species_id=species.id).all())
#get pubs
all_pubs = []
if pub_ids[0] != "all": # aka if publications are being filtered
for id in pub_ids:
all_pubs.append((Publication.query.filter_by(id=id)).first())
all_populations_pubs = []
for publications in all_pubs:
all_populations_pubs.extend(Population.query.filter_by(publication_id=publications.id).all())
except:
abort(404)
# variable for whether to show the compadrino info box at the top (when only 1 publication is selected)
compadrino_info = False
if species_ids[0] == "all" and len(pub_ids) == 1:
compadrino_info = True
# Pick the right populations + get stuff
if species_ids[0] == "all": # aka if species are filtered
populations = all_populations_pubs
elif pub_ids[0] == "all": # aka if publications are being filtered
populations = all_populations_species
else: # aka if publications AND species are being filtered
populations = set(all_populations_species).intersection(all_populations_pubs)
if species_ids[0] != "all": #aka if species are filtered
all_pubs = []
for population in populations:
all_pubs.append(Publication.query.filter_by(id=population.publication_id).first())
if pub_ids[0] != "all": # aka if publications are being filtered
all_species = []
for population in populations:
all_species.append(Species.query.filter_by(id=population.species_id).first())
# remove duplicates
populations = list(set(populations))
all_species = list(set(all_species))
publications = list(set(all_pubs))
# remove unchecked populations and matrices
if can_edit == False:
checked_pops = []
checked_mats = []
waiting_list_note = False
for population in populations:
if population.version[0].status_id ==3:
dont_append = False
for mat in population.matrices:
if mat.version[0].status_id !=3:
dont_append = True
waiting_list_note = True
if dont_append == False:
checked_pops.append(population)
else:
waiting_list_note = True
populations = checked_pops
if waiting_list_note == True:
flash('There are matrices coming soon for this species/publication')
#print(publications)
#publications.sort();
#print(publications)
#flash('test')
exeter_data = False
try:
if current_user.institute.institution_short == "UoE" and current_user.institute_confirmed == 1:
exeter_data = True
if current_user.institute.institution_short == "UOS" and current_user.institute_confirmed == 1:
exeter_data = True
except:
pass
protocol = DigitizationProtocol.query.all()
protocol_dict = {}
for ocol in protocol:
protocol_dict[ocol.name_in_csv] = ocol.field_short_description
return render_template('species_template.html',all_species = all_species, publications = publications, populations = populations,can_edit = can_edit,exeter_data = exeter_data,compadrino_info = compadrino_info,protocol_dict = protocol_dict)
@main.route('/protocol')
def protocol_page():
protocol = DigitizationProtocol.query.all()
protocol_dict = {}
for ocol in protocol:
protocol_dict[ocol.name_in_csv] = ocol.field_description
return render_template('protocol_template.html',protocol_dict = protocol_dict,protocol = protocol)
# Taxonomic explorer
# DOES NOT WORK IN FIREFOX
@main.route('/explorer/<taxon_level>/<taxon>')
# @login_required
def explorer(taxon_level,taxon):
if taxon_level == "life":
taxon_list = Taxonomy.query.all()
next_taxon_level = "kingdom"
tax_pos = 0
elif taxon_level == "kingdom":
taxon_list = Taxonomy.query.filter_by(kingdom=taxon).all()
next_taxon_level = "phylum"
tax_pos = 1
elif taxon_level == "phylum":
taxon_list = Taxonomy.query.filter_by(phylum=taxon).all()
next_taxon_level = "class"
tax_pos = 2
elif taxon_level == "class":
taxon_list = Taxonomy.query.filter_by(tax_class=taxon).all()
next_taxon_level = "order"
tax_pos = 3
elif taxon_level == "order":
taxon_list = Taxonomy.query.filter_by(tax_order=taxon).all()
next_taxon_level = "family"
tax_pos = 4
elif taxon_level == "family":
taxon_list = Taxonomy.query.filter_by(family=taxon).all()
next_taxon_level = "species"
tax_pos = 5
return render_template('explorer_template.html',taxon=taxon,taxon_list = taxon_list,taxon_level=taxon_level,next_taxon_level=next_taxon_level, tax_pos = tax_pos)
# contribute
@main.route('/contribute-data')
def contribute_data():
return render_template('contribute_data.html')
#coming soon page
@main.route('/comingsoon')
def comingsoon():
return render_template('coming_soon.html')
###############################################################################################################################
### Become a Compadrino Form and HTML page
@main.route('/become-a-compadrino', methods=('GET', 'POST'))
def become_a_compadrino():
form = ContactForm()
if request.method == 'POST':
if form.validate() == False:
flash('All fields are required.')
return render_template('become_a_compadrino.html', form=form)
else:
msg = Message("Demography Database Message from your visitor " + form.name.data,
sender='YourUser@NameHere',
recipients=['spandex.ex@gmail.com', 'd.l.buss@exeter.ac.uk', 'demographydatabase@gmail.com'])
msg.body = """
From: %s <%s>,
Subject: %s
Message: %s
""" % (form.name.data, form.email.data, form.subject.data, form.message.data)
mail.send(msg)
flash('Successfully sent message! If you do not receive a response within 10 working days, I apologise for this, please resend your enquiry.')
return render_template('become_a_compadrino.html', form=form)
elif request.method == 'GET':
return render_template('become_a_compadrino.html', form=form)
### Report a Website Error Form
@main.route('/error-form', methods=('GET', 'POST'))
def error_form():
form = ContactForm()
if request.method == 'POST':
if form.validate() == False:
flash('All fields are required.')
return render_template('Error_form.html', form=form)
else:
msg = Message("Demography Database Message from your visitor " + form.name.data,
sender='YourUser@NameHere',
recipients=['spandex.ex@gmail.com', 'd.l.buss@exeter.ac.uk', 'demographydatabase@gmail.com'])
msg.body = """
From: %s <%s>,
Subject: %s
Message: %s
""" % (form.name.data, form.email.data, form.subject.data, form.message.data)
mail.send(msg)
flash('Successfully sent message! Thank you for reporting the error, we will try and fix this as soon as possible. If the problem persists please resend your enquiry after 10 working days.')
return render_template('Error_form.html', form=form)
elif request.method == 'GET':
return render_template('Error_form.html', form=form)
### Help Develop Site Form
@main.route('/help-develop-site', methods=('GET', 'POST'))
def help_develop_site():
form = ContactForm()
if request.method == 'POST':
if form.validate() == False:
flash('All fields are required.')
return render_template('help_develop_site.html', form=form)
else:
msg = Message("Demography Database Message from your visitor " + form.name.data,
sender='YourUser@NameHere',
recipients=['spandex.ex@gmail.com', 'd.l.buss@exeter.ac.uk', 'demographydatabase@gmail.com'])
msg.body = """
From: %s <%s>,
Subject: %s
Message: %s
""" % (form.name.data, form.email.data, form.subject.data, form.message.data)
mail.send(msg)
flash('Successfully sent message! If you do not receive a response within 10 working days, I apologise for this, please resend your enquiry.')
return render_template('help_develop_site.html', form=form)
elif request.method == 'GET':
return render_template('help_develop_site.html', form=form)
# USER + PROFILE PAGES
# User
@main.route('/user/<username>')
def user(username):
user = User.query.filter_by(username=username).first_or_404()
return render_template('user.html', user=user)
##Trying get uploads to work - Success
##Need to alter file path once on the Exeter server!!!!!
UPLOAD_FOLDER = '/Users/daniellebuss/Sites/demography_database/app/static/uploads'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'csv', 'RData', 'DS_Store'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['ALLOWED_EXTENSIONS'] = ALLOWED_EXTENSIONS
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@main.route('/datadownloads', methods=['GET', 'POST'])
def datadownloads():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return render_template('upload_files.html',
filename=filename, type=file.content_type)
return render_template('datadownloads.html')
@main.route('/download_all')
def download_all():
UPLOAD_FOLDER = '/Users/daniellebuss/Sites/demography_database/app/static/uploads/'
zipf = zipfile.ZipFile('Database.zip', 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(UPLOAD_FOLDER):
for file in files:
zipf.write(UPLOAD_FOLDER+file)
zipf.close()
return send_file('Database.zip',
mimetype = 'zip',
attachment_filename = 'Database.zip',
as_attachment = True)
#@main.route('/return-file/', methods=['GET', 'POST'])
#def return_file():
# return send_file('/Users/daniellebuss/Sites/demography_database/app/static/uploads/COMADRE_v_2_0_1.RData', attachment_filename='comadre_2.RData')
#Download file html template
@main.route('/download')
def download():
return render_template('outputs/download.html')
#Download COMADRE 2.0.1
@main.route('/download-zip/', methods=['GET', 'POST'])
def download_zip():
file_name = '/Users/daniellebuss/Sites/demography_database/app/static/uploads/COMADRE_v_2_0_1.RData'
return send_file(file_name, as_attachment=True, mimetype='text/plain')
#Download COMADRE xxx
@main.route('/download-zip2/', methods=['GET', 'POST'])
def download_zip2():
file_name = '/Users/daniellebuss/Sites/demography_database/app/static/uploads/COMADRE_v_xxx.RData'
return send_file(file_name, as_attachment=True, mimetype='text/plain')
#Download COMPADRE xxx
@main.route('/download-zip3/', methods=['GET', 'POST'])
def download_zip3():
file_name = '/Users/daniellebuss/Sites/demography_database/app/static/uploads/COMPADRE_v_xxx.RData'
return send_file(file_name, as_attachment=True, mimetype='text/plain')
#Download COMPADRE 4.0.0
@main.route('/download-zip4/', methods=['GET', 'POST'])
def download_zip4():
file_name = '/Users/daniellebuss/Sites/demography_database/app/static/uploads/COMPADRE_v_4_0_1.RData'
return send_file(file_name, as_attachment=True, mimetype='text/plain')
#Download COMPADRE 3.2.1
@main.route('/download-zip5/', methods=['GET', 'POST'])
def download_zip5():
file_name = '/Users/daniellebuss/Sites/demography_database/app/static/uploads/COMPADRE_v_3_2_1.RData'
return send_file(file_name, as_attachment=True, mimetype='text/plain')
#Download COMPADRE 3.2.0
@main.route('/download-zip6/', methods=['GET', 'POST'])
def download_zip6():
file_name = '/Users/daniellebuss/Sites/demography_database/app/static/uploads/COMPADRE_v_3_2_0.RData'
return send_file(file_name, as_attachment=True, mimetype='text/plain')
#Download COMPADRE 3.0.0
@main.route('/download-zip7/', methods=['GET', 'POST'])
def download_zip7():
file_name = '/Users/daniellebuss/Sites/demography_database/app/static/uploads/COMPADRE_v_3_0_0.RData'
return send_file(file_name, as_attachment=True, mimetype='text/plain')
#Download COMPADRE 3.0.0
@main.route('/download-zip8/', methods=['GET', 'POST'])
def download_zip8():
file_name = '/Users/daniellebuss/Sites/demography_database/app/static/uploads/COMADRE_v_1_0_0.RData'
return send_file(file_name, as_attachment=True, mimetype='text/plain')
#Download current SQL Dump (This gets updated Daily)
#!!!On server file is /var/www/demography-database/alchemydumps/demography-database.sql
@main.route('/download-sql/', methods=['GET', 'POST'])
def download_sql():
file_name = '/Users/daniellebuss/Sites/demography_database/application_tasks/mysql_dump/demography_database.sql'
return send_file(file_name, as_attachment=True, mimetype='text/plain')
@main.route('/downloadsql')
def downloadsql():
return render_template('outputs/download_sql.html')
#@main.route('/datadownloads')
#def datadownloads():
# return render_template('datadownloads.html')
#@main.route('/upload', methods=['GET','POST'])
#def upload():
# Get the name of the uploaded file
# file = request.files['file']
# Check if the file is one of the allowed types/extensions
# if file and allowed_file(file.filename):
# Make the filename safe, remove unsupported chars
# filename = secure_filename(file.filename)
# Move the file form the temporal folder to
# the upload folder we setup
# file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# Redirect the user to the uploaded_file route, which
# will basicaly show on the browser the uploaded file
# return redirect(url_for('uploaded_file',
# filename=filename))
@main.route('/uploads/<filename>', methods=['GET', 'POST'])
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename)
|
|
# Copyright 2017-2018 Tom Eulenfeld, GPLv3
"""Commands used by the CLI interface"""
import functools
import glob
import logging
import multiprocessing
import os
import re
import shutil
import textwrap
import h5py
import numpy as np
import obspy
from obspy.core import UTCDateTime as UTC
import obspyh5
import tqdm
import yam
from yam.correlate import correlate
from yam.io import _get_existent, _iter_h5, _write_corr, read_dicts, write_dict
import yam.stack
import yam.stretch
from yam.util import _analyze_key, _filter, _get_fname, IterTime, ParseError
log = logging.getLogger('yam.commands')
def _todo_tasks(tasks, done_tasks):
if len(tasks) == 0:
log.warning('no tasks found -> nothing to do')
new_tasks = sorted(set(tasks) - set(done_tasks))
if len(new_tasks) < len(tasks):
msg = '%d of %d tasks already processed -> skip these tasks'
log.info(msg, len(tasks) - len(new_tasks), len(tasks))
return sorted(new_tasks)
def start_correlate(io,
filter_inventory=None,
startdate='1990-01-01', enddate='2020-01-01',
njobs=None,
parallel_inner_loop=False,
keep_correlations=False,
stack='1d',
dataset_kwargs=None,
**kwargs):
"""
Start correlation
:param io: |io|
:param filter_inventory: filter inventory with its select method,
specified dict is passed to |Inventory.select|
:param str startdate,enddate: start and end date as strings
: param njobs: number of cores to use for computation, days are computed
parallel, this might consume much memory, default: None -- use all
available cores, set njobs to 0 for sequential processing
:param parallel_inner_loop: Run inner loops parallel instead of outer loop
(preproccessing of different stations and correlation of different
pairs versus processing of different days).
Useful for a datset with many stations.
:param dtype: data type for storing correlations
(default: float16 - half precision)
:param dataset_kwargs: options passed to obspyh5 resp. h5py when creating
a new dataset,
e.g. `dataset_kwargs={'compression':'gzip'}`.
See create_dataset in h5py for more options.
By default the dtype is set to `'float16'`.
:param keep_correlations,stack,\*\*kwargs: all other kwargs are passed to
`~yam.correlate.correlate()` function
"""
if dataset_kwargs is None:
dataset_kwargs = {}
if filter_inventory:
log.debug('filter inventory')
io['inventory'] = io['inventory'].select(**filter_inventory)
log.info('start preprocessing and correlation')
tasks = [str(t)[:10] for t in IterTime(UTC(startdate), UTC(enddate))]
done_tasks = None
if stack is not None:
key2 = kwargs['outkey'] + '_s' + stack
done_tasks = [t[-16:-6] for t in _get_existent(io['stack'], key2)]
if keep_correlations:
key2 = kwargs['outkey']
done_tasks2 = [t[-16:-6] for t in _get_existent(io['corr'], key2)]
if done_tasks is None:
done_tasks = done_tasks2
else:
done_tasks = [t for t in done_tasks if t in done_tasks2]
tasks = _todo_tasks(tasks, done_tasks)
tasks = [UTC(t) for t in tasks]
kwargs.update({'keep_correlations': keep_correlations, 'stack': stack})
if parallel_inner_loop:
kwargs['njobs'] = njobs
njobs = 0
do_work = functools.partial(correlate, io, **kwargs)
if njobs == 0:
log.info('do work sequentially')
for task in tqdm.tqdm(tasks, total=len(tasks)):
result = do_work(task)
_write_corr(result, io, **dataset_kwargs)
else:
pool = multiprocessing.Pool(njobs)
log.info('do work parallel (%d cores)', pool._processes)
for result in tqdm.tqdm(pool.imap_unordered(do_work, tasks),
total=len(tasks)):
_write_corr(result, io, **dataset_kwargs)
pool.close()
pool.join()
log.info('finished preprocessing and correlation')
def _stack_wrapper(groupnames, fname, outkey, **kwargs):
"""
Wrapper around `~yam.stack.stack()`
:param groupnames: groups to load the correlations from
:param fname: file to load correlations from
:param outkey: key to write stacked correlations to
:param \*\*kwargs: all other kwargs are passed to
`~yam.stack.stack()` function
"""
with h5py.File(fname, 'r') as f:
traces = [obspyh5.dataset2trace(f[g]) for g in groupnames]
stream = obspy.Stream(traces)
stack_stream = yam.stack.stack(stream, **kwargs)
for tr in stack_stream:
tr.stats.key = outkey
return stack_stream
def start_stack(io, key, outkey, subkey='', njobs=None,
starttime=None, endtime=None,
dataset_kwargs=None,
**kwargs):
"""
Start stacking
:param io: |io|
:param key: key to load correlations from
:param outkey: key to write stacked correlations to
:param subkey: only use a part of the correlations
:param njobs: number of cores to use for computation,
default: None -- use all available cores,
set njobs to 0 for sequential processing
:param starttime,endtime: constrain start and end dates
:param dataset_kwargs: options passed to obspyh5 resp. h5py when creating
a new dataset,
e.g. `dataset_kwargs={'compression':'gzip'}`.
See create_dataset in h5py for more options.
By default the dtype is set to `'float16'`.
:param \*\*kwargs: all other kwargs are passed to
`yam.stack.stack()` function
"""
if dataset_kwargs is None:
dataset_kwargs = {}
dataset_kwargs.setdefault('dtype', 'float16')
fname = io['stack'] if 's' in _analyze_key(key) else io['corr']
tasks = _get_existent(fname, key + subkey, 3)
done_tasks = [t.replace(outkey, key) for t in
_get_existent(io['stack'], outkey + subkey, 3)]
tasks = _todo_tasks(tasks, done_tasks)
length = kwargs.get('length')
for task in tqdm.tqdm(tasks, total=len(tasks)):
subtasks = [t for t in _get_existent(fname, task) if
(starttime is None or t[-16:] >= starttime) and
(endtime is None or t[-16:] <= endtime)]
if length is None and njobs != 0:
step = 1000
subtask_chunks = [tuple(subtasks[i:i + step]) for i in
range(0, len(subtasks), step)]
else:
subtask_chunks = [subtasks]
# TODO: parallel stacking for arbitrary stack id
# lensec = _time2sec(length)
# if lensec >= 30 * 24 * 3600:
# subtask_chunks = [subtasks]
# else:
# subtask_chunks = []
# for i in range(0, len(subtasks), step):
# chunk = subtasks[i:i + step]
# t1 = UTC(subtasks[i + step - 1][-16:])
# j = 0
# while i + step + j < len(subtasks):
# t2 = UTC(subtasks[i + step + j][-16:])
# # assume lensec is always larger than movesec
# # not ideal, may load to much data
# # eg for stack over 1 year
# if t2 - t1 <= lensec:
# chunk.append(subtasks[i + step + j])
# else:
# break
# j += 1
# subtask_chunks.append(chunk)
do_work = functools.partial(_stack_wrapper, fname=fname, outkey=outkey,
**kwargs)
results = []
if njobs == 0 or len(subtask_chunks) == 1:
log.debug('do work sequentially')
for stask in tqdm.tqdm(subtask_chunks, total=len(subtask_chunks)):
result = do_work(stask)
results.append(result)
else:
pool = multiprocessing.Pool(njobs)
log.debug('do work parallel (%d cores)', pool._processes)
for result in tqdm.tqdm(
pool.imap(do_work, subtask_chunks),
total=len(subtask_chunks)):
results.append(result)
pool.close()
pool.join()
if length is None:
for stream in results:
assert len(stream) <= 1
traces = [tr for stream in results for tr in stream]
num = sum(tr.stats.num for tr in traces)
data = np.sum([tr.data * (tr.stats.num / num) for tr in traces],
axis=0)
tr_stack = obspy.Trace(data, header=traces[0].stats)
tr_stack.stats.num = num
tr_stack.write(io['stack'], 'H5', mode='a', **dataset_kwargs)
else:
for stack_stream in results:
stack_stream.write(io['stack'], 'H5', mode='a',
**dataset_kwargs)
def _stretch_wrapper(groupnames, fname, outkey, filter=None,
**kwargs):
"""
Wrapper around `~yam.stretch.stretch()`
:param groupname: group to load the correlations from
:param fname: file to load correlations from
:param fname_stretch: file for writing results
:param outkey: key to write stretch results to
:param filter: filter correlations before stretching
(bandpass, tuple with min and max frequency)
:param \*\*kwargs: all other kwargs are passed to
`~yam.stretch.stretch()` function
"""
with h5py.File(fname, 'r') as f:
traces = [obspyh5.dataset2trace(f[g]) for g in groupnames]
stream = obspy.Stream(traces)
for tr in stream:
tr.data = np.require(tr.data, 'float16')
if filter:
_filter(stream, filter)
stretchres = yam.stretch.stretch(stream, **kwargs)
if stretchres is not None:
stretchres['attrs']['key'] = outkey
return stretchres
def start_stretch(io, key, subkey='', njobs=None, reftrid=None,
starttime=None, endtime=None,
dataset_kwargs=None,
**kwargs):
"""
Start stretching
:param io: |io|
:param key: key to load correlations from
:param subkey: only use a part of the correlations
:param njobs: number of cores to use for computation,
default: None -- use all available cores,
set njobs to 0 for sequential processing
:param reftrid: Parallel processing is only possible when this parameter
is specified. Key to load the reference trace from, e.g. `'c1_s'`,
it can be created by a command similar to `yam stack c1 ''`.
:param starttime,endtime: constrain start and end dates
:param dataset_kwargs: options passed to obspyh5 resp. h5py when creating
a new dataset,
e.g. `dataset_kwargs={'compression':'gzip'}`.
See create_dataset in h5py for more options.
By default the dtype is set to `'float16'`.
:param \*\*kwargs: all other kwargs are passed to
`stretch_wrapper()` function
"""
if dataset_kwargs is None:
dataset_kwargs = {}
fname = _get_fname(io, key)
outkey = kwargs['outkey']
tasks = _get_existent(fname, key + subkey, 3)
done_tasks = [t.replace(outkey, key) for t in
_get_existent(io['stretch'], outkey + subkey, 3)]
tasks = _todo_tasks(tasks, done_tasks)
for task in tqdm.tqdm(tasks, total=len(tasks)):
if reftrid is None:
reftr = None
else:
fname_reftr = _get_fname(io, reftrid)
group_reftr = task.replace(key, reftrid)
reftr = obspy.read(fname_reftr, 'H5', group=group_reftr,
dtype='float16')
if len(reftr) != 1:
raise NotImplementedError('Reference must be single trace')
reftr = reftr[0]
subtasks = [t for t in _get_existent(fname, task) if
(starttime is None or t[-16:] >= starttime) and
(endtime is None or t[-16:] <= endtime)]
if reftr is None:
subtask_chunks = [tuple(subtasks)]
else:
step = 1000
subtask_chunks = [tuple(subtasks[i:i + step]) for i in
range(0, len(subtasks), step)]
do_work = functools.partial(_stretch_wrapper, fname=fname,
reftr=reftr, **kwargs)
results = []
if njobs == 0 or len(subtask_chunks) == 1:
log.debug('do work sequentially')
for stask in tqdm.tqdm(subtask_chunks, total=len(subtask_chunks)):
result = do_work(stask)
results.append(result)
else:
pool = multiprocessing.Pool(njobs)
log.debug('do work parallel (%d cores)', pool._processes)
for result in tqdm.tqdm(
pool.imap(do_work, subtask_chunks),
total=len(subtask_chunks)):
results.append(result)
pool.close()
pool.join()
result = yam.stretch.join_dicts(results)
if result is not None:
write_dict(result, io['stretch'], **dataset_kwargs)
def _start_ipy(obj):
from IPython import start_ipython
print('Contents loaded into obj variable.')
start_ipython(argv=[], user_ns={'obj': obj}, display_banner=False)
print('Good Bye')
def _get_print2():
num, _ = shutil.get_terminal_size()
if num == 0:
num = 80
wrap = textwrap.TextWrapper(width=num - 1, initial_indent=' ',
subsequent_indent=' ')
def print2(text):
print(wrap.fill(text))
return print2
def _get_data_glob(data):
"""
Construct a glob expression from the data expression
"""
return re.sub(r'{[^{}]*}', '*', data)
def _get_data_files(data):
"""
Construct a glob expression from the data expression and return file names
"""
return glob.glob(_get_data_glob(data))
def _print_info_helper(key, io):
max_count = 10000
print2 = _get_print2()
is_stretch = key == 'tstretch'
fname = _get_fname(io, key)
keys = _get_existent(fname, '/', 1) # 1, 3, 4
if len(keys) == 0:
print2('None')
for key in sorted(keys):
keys2 = _get_existent(fname, key, 3)
subkey = key.split('/')[-1]
if is_stretch:
o = '%s: %d combs' % (subkey, len(keys2))
else:
keys3 = _get_existent(fname, key, max_count=max_count+1)
o = ('%s: %d combs, %s%s corrs' %
(subkey, len(keys2), (len(keys3) > max_count) * '>',
min(max_count, len(keys3))))
print2(o)
def info(io, key=None, subkey='', config=None, **unused_kwargs):
"""
Print information about yam project
:param io: |io|
:param key: key to print infos about
(key inside HDF5 file, or one of data, stations,
default: None -- print overview)
:param subkey: only print part of the HDF5 file
:param config: list of configuration dictionaries
"""
print2 = _get_print2()
data_plugin = io.get('data_plugin')
if key is None:
print('Stations:')
inventory = io['inventory']
if inventory is None:
print2('Not found')
else:
stations = inventory.get_contents()['stations']
channels = inventory.get_contents()['channels']
print2(' '.join(st.strip().split()[0] for st in stations))
print2('%d stations, %d channels' % (len(stations), len(channels)))
if data_plugin:
print('Data plugin:')
print2('%s' % data_plugin)
else:
print('Raw data (expression for day files):')
print2(io['data'])
print2('%d files found' % len(_get_data_files(io['data'])))
print('Config ids:')
def get_keys(d):
if d is None or len(d) == 0:
return 'None'
else:
return ', '.join(sorted(d.keys()))
print2('c Corr: ' + get_keys(config[0]))
print2('s Stack: ' + get_keys(config[1]))
print2('t Stretch: ' + get_keys(config[2]))
print('Correlations (channel combinations, correlations calculated):')
_print_info_helper('corr', io)
print('Stacks:')
_print_info_helper('stack', io)
print('Stretching matrices:')
_print_info_helper('tstretch', io)
elif key == 'stations':
print(io['inventory'])
elif key == 'data':
if data_plugin:
print('Data plugin:')
print2('%s' % data_plugin)
else:
print('Raw data (expression for day files):')
print2(io['data'])
fnames = _get_data_files(io['data'])
print2('%d files found' % len(fnames))
for fname in sorted(fnames):
print2(fname)
else:
is_stretch = 't' in _analyze_key(key)
fname = _get_fname(io, key)
level = 3 if is_stretch else None
for line in _get_existent(fname, key + subkey, level):
print2(line)
def _load_data(seedid, day, data, data_format, key='data',
**prep_kw):
"""Load preprocessed or raw data"""
from obspy import UTCDateTime as UTC
from yam.util import _seedid2meta
from yam.correlate import get_data, preprocess
smeta = _seedid2meta(seedid)
day = UTC(day)
if key == 'data':
obj = get_data(smeta, data, data_format, day,
overlap=0, edge=0, trim_and_merge=True)
return obj
stream = get_data(smeta, data, data_format, day,
overlap=0, edge=60, trim_and_merge=False)
preprocess(stream, day, **prep_kw)
return stream
def load(io, key, seedid=None, day=None, do='return', prep_kw={},
fname=None, format=None):
"""
Load object and do something with it
:param io: io
:param key: key of object to load
(key inside HDF5 file, or one of data, prepdata, stations)
:param seedid: seed id of a channel (for data or prepdata)
:param day: |UTC| object with day (for data or prepdata)
:param do: specifies what to do with the object, default is ``'return'``
which simply returns the object, other possible values are
``'print'`` -- print object (used by print command),
``'load'`` -- load object in IPython session (used by load command),
``'export'`` -- export correlations to different file format
(used by export command)
:param dict prep_kw: options passed to preprocess (for prepdata only)
:param fname: file name (for export command)
:param format: target format (for export command)
"""
if key == 'stations':
obj = io['inventory']
elif key in ('data', 'prepdata'):
if seedid is None or day is None:
msg = 'seedid and day must be given for data or prepdata'
raise ParseError(msg)
if key == 'prepdata':
prep_keys = ('remove_response', 'remove_response_options',
'demean', 'filter', 'normalization',
'time_norm_options', 'spectral_whitening_options',
'tolerance_shift',
'downsample')
prep_kw = {k: prep_kw.get(k) for k in prep_keys}
obj = _load_data(seedid, day, io['data'], io.get('data_format'),
key, inventory=io['inventory'], **prep_kw)
else:
is_stretch = 't' in _analyze_key(key)
fname_in = _get_fname(io, key)
if is_stretch:
obj = read_dicts(fname_in, key)
if do == 'print':
obj = '\n\n'.join(str(o) for o in obj)
else:
obj = obspy.read(fname_in, 'H5', group=key, headonly=do == 'print')
if do == 'print':
obj = obj.__str__(extended=True)
if do == 'print':
print(obj)
elif do == 'load':
_start_ipy(obj)
elif do == 'return':
return obj
elif do == 'export':
print('export', obj)
if format == 'H5':
obspyh5.set_index()
obj.write(fname, format)
if format == 'H5':
from yam.io import INDEX
obspyh5.set_index(INDEX)
else:
raise
def plot(io, key, plottype=None, seedid=None, day=None, prep_kw={},
corrid=None, show=False,
**kwargs):
"""
Plot everything
:param io: |io|
:param key: key of objects to plot, or one of stations, data, prepdata
:param plottype: plot type to use
(non default values are ``'vs_dist'`` and ``'wiggle'`` for
correlation plots, ``'velocity'`` for plots of stretching results)
:param seedid: seed id of a channel (for data or prepdata)
:param day: |UTC| object with day (for data or prepdata)
:param dict prep_kw: options passed to preprocess (for prepdata only)
:param corrid: correlation configuration (for prepdata only)
:param show: show interactive plot
:param \*\*kwargs: all other kwargs are passed to
the corresponding plot function in `~yam.imaging` module
"""
import yam.imaging
path = io['plot']
if not os.path.exists(path):
os.mkdir(path)
if key in ('stations', 'data', 'prepdata'):
pt = key
else:
is_corr = 't' not in _analyze_key(key)
if is_corr and plottype == 'vs_dist':
pt = 'corr_vs_dist'
elif is_corr and plottype == 'wiggle':
pt = 'corr_vs_time_wiggle'
elif is_corr and plottype is None:
pt = 'corr_vs_time'
elif not is_corr and plottype is None:
pt = 'sim_mat'
elif not is_corr and plottype == 'velocity':
pt = 'velocity_change'
else:
raise ParseError('Combination of key and plottype not supported')
kw = kwargs.get('plot_%s_options' % pt, {})
kw.update(kwargs.get('plot_options', {}))
bname = os.path.join(path, pt)
if key == 'stations':
yam.imaging.plot_stations(io['inventory'], bname, **kw)
elif key in ('data', 'prepdata'):
data = load(io, key, do='return', seedid=seedid, day=day,
prep_kw=prep_kw)
fname = bname + '_%s_%s' % (seedid, day)
if key == 'prepdata':
fname = fname + '_c' + corrid
yam.imaging.plot_data(data, fname, show=show, **kw)
else:
plot_ = getattr(yam.imaging, 'plot_' + pt)
if pt == 'corr_vs_dist':
fname2 = _get_fname(io, key)
stream = obspy.read(fname2, 'H5', group=key)
fname = bname + '_' + key.replace('/', '_')
plot_(stream, fname, **kw)
elif pt == 'velocity_change':
results = [res for task, res in _iter_h5(io, key)]
fname = bname + '_' + key.replace('/', '_')
plot_(results, fname, **kw)
else:
for task, res in _iter_h5(io, key):
fname = bname + task.replace('/', '_')
plot_(res, fname, **kw)
if show:
from matplotlib.pyplot import show
show()
def remove(io, keys):
"""
Remove one or several keys from HDF5 file
:param io: |io|
:param keys: list of keys to remove
"""
for key in keys:
if '/' in key and key.split('/', 1) != '':
from warnings import warn
warn('It is highly encouraged to delete only top level keys')
fname = _get_fname(io, key)
with h5py.File(fname, 'a') as f:
del f[key]
|
|
""" TensorFlow Layers
Convenience functions but Input and Output should be tensors.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import seq2seq
_phase = tf.Variable(False, name='phase', trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
_phase_train = _phase.assign(True)
_phase_infer = _phase.assign(False)
# TODO: move to ops
def _rank(x):
return len(x.get_shape())
def _apply_dropout_mask(tensor_shape, keep_prob=1.0, normalize=True):
random_tensor = keep_prob + tf.random_uniform(tensor_shape, dtype=tf.float32)
binary_mask = tf.floor(random_tensor)
if normalize:
binary_mask = tf.reciprocal(keep_prob) * binary_mask
return binary_mask
def _global_keep_prob(keep_prob):
keep_prob = tf.convert_to_tensor(keep_prob, dtype=tf.float32)
keep_prob = tf.cond(_phase, lambda: keep_prob, lambda: keep_prob * 0.0 + 1.0)
return keep_prob
def layer(func):
class Layer(object):
def __init__(self, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
self.name = self.kwargs.get("name", self.func.__name__)
self._template = tf.make_template(self.name, self.func, create_scope_now_=True)
self._unique_name = self._template.variable_scope.name.split("/")[-1]
self._summary_added = False
def __call__(self, x):
out = self.template(x, *self.args, **self.kwargs)
self._layer_logging(x, out)
self._add_summary()
return out
def __rrshift__(self, other):
""" >> """
return self.__call__(other)
def _layer_logging(self, other, out):
tf.logging.info(" {} {} {} -> {}".format(
self.unique_name, "shape", str(other.get_shape()), str(out.get_shape())))
def _add_summary(self):
if not self.kwargs.get("summary"):
return None
if self.summary_added:
return None
for var in self.get_variables_in_scope():
# TODO: different summary types
tf.summary.scalar(var.name, tf.reduce_mean(var))
self._summary_added = True
def get_variables_in_scope(self):
assert self.template._variables_created, "Variables not yet created or undefined."
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.variable_scope_name)
return variables
@property
def template(self):
return self._template
@property
def unique_name(self):
return self._unique_name
@property
def variable_scope_name(self):
return self.template._variable_scope._name
@property
def summary_added(self):
return self._summary_added
return Layer
@layer
def identity_layer(tensor, **opts):
out = tf.identity(tensor)
return out
@layer
def embedding_layer(tensor, vocab_size=None, embedding_dim=None, embedding_matrix=None, **opts):
if embedding_matrix is None:
initializer = tf.contrib.layers.xavier_initializer(uniform=True)
embedding_matrix = tf.get_variable("embedding_matrix", initializer=initializer(shape=(vocab_size, embedding_dim)))
out = tf.nn.embedding_lookup(embedding_matrix, tensor)
return out
@layer
def recurrent_layer(tensor, cell=None, hidden_dims=128, sequence_length=None, decoder_fn=None,
activation=tf.nn.tanh, initializer=tf.orthogonal_initializer(), initial_state=None,
keep_prob=1.0,
return_final_state=False, return_next_cell_input=True, **opts):
if cell is None:
cell = tf.contrib.rnn.BasicRNNCell(hidden_dims, activation=activation)
# cell = tf.contrib.rnn.LSTMCell(hidden_dims, activation=activation)
if keep_prob < 1.0:
keep_prob = _global_keep_prob(keep_prob)
cell = tf.contrib.rnn.DropoutWrapper(cell, keep_prob, keep_prob)
if opts.get("name"):
tf.add_to_collection(opts.get("name"), cell)
if decoder_fn is None:
outputs, final_state = tf.nn.dynamic_rnn(cell, tensor,
sequence_length=sequence_length, initial_state=initial_state, dtype=tf.float32)
final_context_state = None
else:
# TODO: turn off sequence_length?
outputs, final_state, final_context_state = seq2seq.dynamic_rnn_decoder(
cell, decoder_fn, inputs=None, sequence_length=sequence_length)
if return_final_state:
return final_state
else:
return outputs
@layer
def reshape_layer(tensor, shape, **opts):
out = tf.reshape(tensor, shape=shape)
return out
@layer
def dense_layer(tensor, hidden_dims, weight=None, bias=None, **opts):
original_tensor_shape = tf.shape(tensor)
in_dim = int(tensor.get_shape()[-1])
rank = _rank(tensor)
if rank > 2:
# -- time distributed dense
tensor = tf.reshape(tensor, shape=(-1, in_dim))
name = opts.get("name", "")
if weight is None:
initializer = tf.contrib.layers.xavier_initializer(uniform=True)
weight = tf.get_variable("{}_dense_W".format(name), initializer=initializer(shape=(in_dim, hidden_dims)))
if bias is None:
bias = tf.get_variable("{}_dense_b".format(name), initializer=tf.zeros(shape=hidden_dims))
out = tf.add(tf.matmul(tensor, weight), bias)
if rank > 2:
# reshape back to time dimension
out = tf.reshape(out, shape=original_tensor_shape)
return out
@layer
def dropout_layer(tensor, keep_prob=1.0, **opts):
keep_prob = _global_keep_prob(keep_prob)
out = tf.nn.dropout(tensor, keep_prob=keep_prob)
return out
# TODO: should i normalize?
@layer
def word_dropout_layer(tensor, keep_prob=1.0, **opts):
keep_prob = _global_keep_prob(keep_prob)
rank = _rank(tensor)
assert rank == 3, "Use embedding lookup layer"
binary_mask = _apply_dropout_mask(tf.shape(tensor)[:2], keep_prob, normalize=False)
binary_mask = tf.expand_dims(binary_mask, axis=-1) # proper broadcasting to zero out entire word vectors
out = tensor * binary_mask
return out
@layer
def relu_layer(tensor):
out = tf.nn.relu(tensor)
return out
@layer
def tanh_layer(tensor):
out = tf.nn.tanh(tensor)
return out
@layer
def softmax_layer(tensor, softmax_func=None, **opts):
if softmax_func is None:
softmax_func = tf.nn.softmax
out = softmax_func(tensor)
return out
@layer
def cross_entropy_layer(tensor, target, **opts):
if _rank(tensor) > 1:
target = tf.reshape(target, shape=(-1, ))
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=tensor, labels=target)
mask = tf.cast(tf.not_equal(target, tf.zeros_like(target)), dtype=tf.float32)
out = cross_entropy * mask
return out
@layer
def sigmoid_cross_entropy_layer(tensor, target, **opts):
out = tf.nn.sigmoid_cross_entropy_with_logits(logits=tensor, labels=target)
return out
@layer
def mean_loss_by_example_layer(tensor, sequence_length, **opts):
loss = tf.div(
tf.reduce_sum(tensor, axis=1),
tf.cast(sequence_length, dtype=tf.float32)
)
out = tf.reduce_mean(loss)
tf.summary.scalar('cost', out)
return out
@layer
def conv1d_layer(tensor, dilation_rate=1, **opts):
raise NotImplementedError
@layer
def residual_layer(tensor, **opts):
raise NotImplementedError
@layer
def highway_layer(tensor, **opts):
raise NotImplementedError
if __name__ == "__main__":
import numpy as np
batch_size = 10
sequence_length = 5
vocab_size = 100
embedding_dim = 32
word_ids = np.random.randint(0, vocab_size, batch_size * sequence_length).reshape(batch_size, sequence_length)
tensor = tf.constant(word_ids)
# print(word_ids >> identity_layer() >> embedding_layer(vocab_size, embedding_dim))
print(tensor >> identity_layer() >> embedding_layer(vocab_size, embedding_dim))
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import itertools
import re
from itertools import imap
from operator import itemgetter
from django.utils.translation import ugettext as _
from desktop.lib import thrift_util
from desktop.conf import get_ldap_password, LDAP_USERNAME
from desktop.conf import DEFAULT_USER
from hadoop import cluster
from TCLIService import TCLIService
from TCLIService.ttypes import TOpenSessionReq, TGetTablesReq, TFetchResultsReq,\
TStatusCode, TGetResultSetMetadataReq, TGetColumnsReq, TTypeId,\
TExecuteStatementReq, TGetOperationStatusReq, TFetchOrientation,\
TCloseSessionReq, TGetSchemasReq, TGetLogReq, TCancelOperationReq,\
TCloseOperationReq, TFetchResultsResp, TRowSet, TProtocolVersion
from beeswax import conf as beeswax_conf
from beeswax import hive_site
from beeswax.hive_site import hiveserver2_use_ssl
from beeswax.models import Session, HiveServerQueryHandle, HiveServerQueryHistory
from beeswax.server.dbms import Table, NoSuchObjectException, DataTable,\
QueryServerException
LOG = logging.getLogger(__name__)
IMPALA_RESULTSET_CACHE_SIZE = 'impala.resultset.cache.size'
DEFAULT_USER = DEFAULT_USER.get()
class HiveServerTable(Table):
"""
We are parsing DESCRIBE EXTENDED text as the metastore API like GetColumns() misses most of the information.
Impala only supports a simple DESCRIBE.
"""
def __init__(self, table_results, table_schema, desc_results, desc_schema):
if beeswax_conf.THRIFT_VERSION.get() >= 7:
if not table_results.columns:
raise NoSuchObjectException()
self.table = table_results.columns
else: # Deprecated. To remove in Hue 4.
if not table_results.rows:
raise NoSuchObjectException()
self.table = table_results.rows and table_results.rows[0] or ''
self.table_schema = table_schema
self.desc_results = desc_results
self.desc_schema = desc_schema
@property
def name(self):
return HiveServerTRow(self.table, self.table_schema).col('TABLE_NAME')
@property
def is_view(self):
return HiveServerTRow(self.table, self.table_schema).col('TABLE_TYPE') == 'VIEW' # Used to be VIRTUAL_VIEW
@property
def partition_keys(self):
describe = self.extended_describe
# Parses a list of: partitionKeys:[FieldSchema(name:baz, type:string, comment:null), FieldSchema(name:boom, type:string, comment:null)]
match = re.search('partitionKeys:\[([^\]]+)\]', describe)
if match is not None:
match = match.group(1)
return [PartitionKeyCompatible(*partition)
for partition in re.findall('FieldSchema\(name:(.+?), type:(.+?), comment:(.+?)\)', match)]
else:
return []
@property
def path_location(self):
try:
describe = self.extended_describe
match = re.search('location:([^,]+)', describe)
if match is not None:
match = match.group(1)
return match
except:
# Impala does not have extended_describe
return None
@property
def parameters(self):
# Parses a list of: parameters:{serialization.format=1}),... parameters:{numPartitions=2, EXTERNAL=TRUE}
describe = self.extended_describe
params = re.findall('parameters:\{([^\}]+?)\}', describe)
if params:
params_list = ', '.join(params).split(', ')
return dict([param.split('=')for param in params_list])
else:
return {}
@property
def cols(self):
cols = HiveServerTTableSchema(self.desc_results, self.desc_schema).cols()
try:
end_cols_index = map(itemgetter('col_name'), cols).index('') # Truncate below extended describe
return cols[0:end_cols_index]
except:
try:
# Spark SQL: does not have an empty line in extended describe
try:
end_cols_index = map(itemgetter('col_name'), cols).index('# Partition Information')
except:
end_cols_index = map(itemgetter('col_name'), cols).index('Detailed Table Information')
return cols[0:end_cols_index]
except:
# Impala: uses non extended describe and 'col' instead of 'col_name'
return cols
@property
def comment(self):
return HiveServerTRow(self.table, self.table_schema).col('REMARKS')
@property
def extended_describe(self):
# Just keep rows after 'Detailed Table Information'
rows = HiveServerTTableSchema(self.desc_results, self.desc_schema).cols()
detailed_row_index = map(itemgetter('col_name'), rows).index('Detailed Table Information')
# Hack because of bad delimiter escaping in LazySimpleSerDe in HS2: parameters:{serialization.format=})
describe_text = rows[detailed_row_index]['data_type']
try:
# LazySimpleSerDe case, also add full next row
return describe_text + rows[detailed_row_index + 1]['col_name'] + rows[detailed_row_index + 1]['data_type']
except:
return describe_text
@property
def properties(self):
# Ugly but would need a recursive parsing to be clean
no_table = re.sub('\)$', '', re.sub('^Table\(', '', self.extended_describe))
properties = re.sub(', sd:StorageDescriptor\(cols.+?\]', '', no_table).split(', ')
props = []
for prop in properties:
key_val = prop.rsplit(':', 1)
if len(key_val) == 1:
key_val = key_val[0].rsplit('=', 1)
if len(key_val) == 2:
props.append(key_val)
return props
class HiveServerTRowSet2:
def __init__(self, row_set, schema):
self.row_set = row_set
self.rows = row_set.rows
self.schema = schema
self.startRowOffset = row_set.startRowOffset
def is_empty(self):
return not self.row_set.columns or not HiveServerTColumnValue2(self.row_set.columns[0]).val
def cols(self, col_names):
cols_rows = []
rs = HiveServerTRow2(self.row_set.columns, self.schema)
cols = [rs.full_col(name) for name in col_names]
for cols_row in itertools.izip(*cols):
cols_rows.append(dict(itertools.izip(col_names, cols_row)))
return cols_rows
def __iter__(self):
return self
def next(self):
if self.row_set.columns:
return HiveServerTRow2(self.row_set.columns, self.schema)
else:
raise StopIteration
class HiveServerTRow2:
def __init__(self, cols, schema):
self.cols = cols
self.schema = schema
def col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnValue2(self.cols[pos]).val[0] # Return only first element
def full_col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnValue2(self.cols[pos]).val # Return the full column and its values
def _get_col_position(self, column_name):
return filter(lambda (i, col): col.columnName == column_name, enumerate(self.schema.columns))[0][0]
def fields(self):
try:
return [HiveServerTColumnValue2(field).val.pop(0) for field in self.cols]
except IndexError:
raise StopIteration
class HiveServerTColumnValue2:
def __init__(self, tcolumn_value):
self.column_value = tcolumn_value
@property
def val(self):
# Could directly get index from schema but would need to cache the schema
if self.column_value.stringVal:
return self._get_val(self.column_value.stringVal)
elif self.column_value.i16Val is not None:
return self._get_val(self.column_value.i16Val)
elif self.column_value.i32Val is not None:
return self._get_val(self.column_value.i32Val)
elif self.column_value.i64Val is not None:
return self._get_val(self.column_value.i64Val)
elif self.column_value.doubleVal is not None:
return self._get_val(self.column_value.doubleVal)
elif self.column_value.boolVal is not None:
return self._get_val(self.column_value.boolVal)
elif self.column_value.byteVal is not None:
return self._get_val(self.column_value.byteVal)
elif self.column_value.binaryVal is not None:
return self._get_val(self.column_value.binaryVal)
@classmethod
def _get_val(cls, column):
column.values = cls.set_nulls(column.values, column.nulls)
column.nulls = '' # Clear the null values for not re-marking again the column with nulls at the next call
return column.values
@classmethod
def mark_nulls(cls, values, bytestring):
mask = bytearray(bytestring)
for n in mask:
yield n & 0x01
yield n & 0x02
yield n & 0x04
yield n & 0x08
yield n & 0x10
yield n & 0x20
yield n & 0x40
yield n & 0x80
@classmethod
def set_nulls(cls, values, bytestring):
if bytestring == '' or re.match('^(\x00)+$', bytestring): # HS2 has just \x00 or '', Impala can have \x00\x00...
return values
else:
return [None if is_null else value for value, is_null in itertools.izip(values, cls.mark_nulls(values, bytestring))]
class HiveServerDataTable(DataTable):
def __init__(self, results, schema, operation_handle, query_server):
self.schema = schema and schema.schema
self.row_set = HiveServerTRowSet(results.results, schema)
self.operation_handle = operation_handle
if query_server['server_name'] == 'impala':
self.has_more = results.hasMoreRows
else:
self.has_more = not self.row_set.is_empty() # Should be results.hasMoreRows but always True in HS2
self.startRowOffset = self.row_set.startRowOffset # Always 0 in HS2
@property
def ready(self):
return True
def cols(self):
if self.schema:
return [HiveServerTColumnDesc(col) for col in self.schema.columns]
else:
return []
def rows(self):
for row in self.row_set:
yield row.fields()
class HiveServerTTableSchema:
def __init__(self, columns, schema):
self.columns = columns
self.schema = schema
def cols(self):
try:
return HiveServerTRowSet(self.columns, self.schema).cols(('col_name', 'data_type', 'comment'))
except:
# Impala API is different
cols = HiveServerTRowSet(self.columns, self.schema).cols(('name', 'type', 'comment'))
for col in cols:
col['col_name'] = col.pop('name')
col['col_type'] = col.pop('type')
return cols
def col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnDesc(self.columns[pos]).val
def _get_col_position(self, column_name):
return filter(lambda (i, col): col.columnName == column_name, enumerate(self.schema.columns))[0][0]
if beeswax_conf.THRIFT_VERSION.get() >= 7:
HiveServerTRow = HiveServerTRow2
HiveServerTRowSet = HiveServerTRowSet2
else:
# Deprecated. To remove in Hue 4.
class HiveServerTRow:
def __init__(self, row, schema):
self.row = row
self.schema = schema
def col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnValue(self.row.colVals[pos]).val
def _get_col_position(self, column_name):
return filter(lambda (i, col): col.columnName == column_name, enumerate(self.schema.columns))[0][0]
def fields(self):
return [HiveServerTColumnValue(field).val for field in self.row.colVals]
class HiveServerTRowSet:
def __init__(self, row_set, schema):
self.row_set = row_set
self.rows = row_set.rows
self.schema = schema
self.startRowOffset = row_set.startRowOffset
def is_empty(self):
return len(self.rows) == 0
def cols(self, col_names):
cols_rows = []
for row in self.rows:
row = HiveServerTRow(row, self.schema)
cols = {}
for col_name in col_names:
cols[col_name] = row.col(col_name)
cols_rows.append(cols)
return cols_rows
def __iter__(self):
return self
def next(self):
if self.rows:
return HiveServerTRow(self.rows.pop(0), self.schema)
else:
raise StopIteration
class HiveServerTColumnValue:
def __init__(self, tcolumn_value):
self.column_value = tcolumn_value
@property
def val(self):
if self.column_value.boolVal is not None:
return self.column_value.boolVal.value
elif self.column_value.byteVal is not None:
return self.column_value.byteVal.value
elif self.column_value.i16Val is not None:
return self.column_value.i16Val.value
elif self.column_value.i32Val is not None:
return self.column_value.i32Val.value
elif self.column_value.i64Val is not None:
return self.column_value.i64Val.value
elif self.column_value.doubleVal is not None:
return self.column_value.doubleVal.value
elif self.column_value.stringVal is not None:
return self.column_value.stringVal.value
class HiveServerTColumnDesc:
def __init__(self, column):
self.column = column
@property
def name(self):
return self.column.columnName
@property
def comment(self):
return self.column.comment
@property
def type(self):
return self.get_type(self.column.typeDesc)
@classmethod
def get_type(self, typeDesc):
for ttype in typeDesc.types:
if ttype.primitiveEntry is not None:
return TTypeId._VALUES_TO_NAMES[ttype.primitiveEntry.type]
elif ttype.mapEntry is not None:
return ttype.mapEntry
elif ttype.unionEntry is not None:
return ttype.unionEntry
elif ttype.arrayEntry is not None:
return ttype.arrayEntry
elif ttype.structEntry is not None:
return ttype.structEntry
elif ttype.userDefinedTypeEntry is not None:
return ttype.userDefinedTypeEntry
class HiveServerClient:
HS2_MECHANISMS = {'KERBEROS': 'GSSAPI', 'NONE': 'PLAIN', 'NOSASL': 'NOSASL', 'LDAP': 'PLAIN'}
def __init__(self, query_server, user):
self.query_server = query_server
self.user = user
use_sasl, mechanism, kerberos_principal_short_name, impersonation_enabled, ldap_username, ldap_password = self.get_security()
LOG.info('use_sasl=%s, mechanism=%s, kerberos_principal_short_name=%s, impersonation_enabled=%s' % (
use_sasl, mechanism, kerberos_principal_short_name, impersonation_enabled))
self.use_sasl = use_sasl
self.kerberos_principal_short_name = kerberos_principal_short_name
self.impersonation_enabled = impersonation_enabled
if self.query_server['server_name'] == 'impala':
from impala import conf as impala_conf
ssl_enabled = impala_conf.SSL.ENABLED.get()
ca_certs = impala_conf.SSL.CACERTS.get()
keyfile = impala_conf.SSL.KEY.get()
certfile = impala_conf.SSL.CERT.get()
validate = impala_conf.SSL.VALIDATE.get()
timeout = impala_conf.SERVER_CONN_TIMEOUT.get()
else:
ssl_enabled = hiveserver2_use_ssl()
ca_certs = beeswax_conf.SSL.CACERTS.get()
keyfile = beeswax_conf.SSL.KEY.get()
certfile = beeswax_conf.SSL.CERT.get()
validate = beeswax_conf.SSL.VALIDATE.get()
timeout = beeswax_conf.SERVER_CONN_TIMEOUT.get()
if ldap_username:
username = ldap_username
password = ldap_password
else:
username = user.username
password = None
self._client = thrift_util.get_client(TCLIService.Client,
query_server['server_host'],
query_server['server_port'],
service_name=query_server['server_name'],
kerberos_principal=kerberos_principal_short_name,
use_sasl=use_sasl,
mechanism=mechanism,
username=username,
password=password,
timeout_seconds=timeout,
use_ssl=ssl_enabled,
ca_certs=ca_certs,
keyfile=keyfile,
certfile=certfile,
validate=validate,
transport_mode=query_server.get('transport_mode', 'socket'),
http_url=query_server.get('http_url', '')
)
def get_security(self):
principal = self.query_server['principal']
impersonation_enabled = False
ldap_username = None
ldap_password = get_ldap_password()
if ldap_password is not None: # Pass-through LDAP authentication
ldap_username = LDAP_USERNAME.get()
if principal:
kerberos_principal_short_name = principal.split('/', 1)[0]
else:
kerberos_principal_short_name = None
if self.query_server['server_name'] == 'impala':
if ldap_password: # Force LDAP auth if ldap_password is provided
use_sasl = True
mechanism = HiveServerClient.HS2_MECHANISMS['NONE']
else:
cluster_conf = cluster.get_cluster_conf_for_job_submission()
use_sasl = cluster_conf is not None and cluster_conf.SECURITY_ENABLED.get()
mechanism = HiveServerClient.HS2_MECHANISMS['KERBEROS']
impersonation_enabled = self.query_server['impersonation_enabled']
else:
hive_mechanism = hive_site.get_hiveserver2_authentication()
if hive_mechanism not in HiveServerClient.HS2_MECHANISMS:
raise Exception(_('%s server authentication not supported. Valid are %s.' % (hive_mechanism, HiveServerClient.HS2_MECHANISMS.keys())))
use_sasl = hive_mechanism in ('KERBEROS', 'NONE', 'LDAP')
mechanism = HiveServerClient.HS2_MECHANISMS[hive_mechanism]
impersonation_enabled = hive_site.hiveserver2_impersonation_enabled()
return use_sasl, mechanism, kerberos_principal_short_name, impersonation_enabled, ldap_username, ldap_password
def open_session(self, user):
kwargs = {
'client_protocol': beeswax_conf.THRIFT_VERSION.get() - 1,
'username': user.username, # If SASL or LDAP, it gets the username from the authentication mechanism" since it dependents on it.
'configuration': {},
}
if self.impersonation_enabled:
kwargs.update({'username': DEFAULT_USER})
if self.query_server['server_name'] == 'impala': # Only when Impala accepts it
kwargs['configuration'].update({'impala.doas.user': user.username})
if self.query_server['server_name'] == 'beeswax': # All the time
kwargs['configuration'].update({'hive.server2.proxy.user': user.username})
req = TOpenSessionReq(**kwargs)
res = self._client.OpenSession(req)
if res.status is not None and res.status.statusCode not in (TStatusCode.SUCCESS_STATUS,):
if hasattr(res.status, 'errorMessage') and res.status.errorMessage:
message = res.status.errorMessage
else:
message = ''
raise QueryServerException(Exception('Bad status for request %s:\n%s' % (req, res)), message=message)
sessionId = res.sessionHandle.sessionId
LOG.info('Opening session %s' % sessionId)
encoded_status, encoded_guid = HiveServerQueryHandle(secret=sessionId.secret, guid=sessionId.guid).get()
return Session.objects.create(owner=user,
application=self.query_server['server_name'],
status_code=res.status.statusCode,
secret=encoded_status,
guid=encoded_guid,
server_protocol_version=res.serverProtocolVersion)
def call(self, fn, req, status=TStatusCode.SUCCESS_STATUS):
session = Session.objects.get_session(self.user, self.query_server['server_name'])
if session is None:
session = self.open_session(self.user)
if hasattr(req, 'sessionHandle') and req.sessionHandle is None:
req.sessionHandle = session.get_handle()
res = fn(req)
# Not supported currently in HS2 and Impala: TStatusCode.INVALID_HANDLE_STATUS
if res.status.statusCode == TStatusCode.ERROR_STATUS and \
re.search('Invalid SessionHandle|Invalid session|Client session expired', res.status.errorMessage or '', re.I):
LOG.info('Retrying with a new session because for %s of %s' % (self.user, res))
session = self.open_session(self.user)
req.sessionHandle = session.get_handle()
# Get back the name of the function to call
res = getattr(self._client, fn.attr)(req)
if status is not None and res.status.statusCode not in (
TStatusCode.SUCCESS_STATUS, TStatusCode.SUCCESS_WITH_INFO_STATUS, TStatusCode.STILL_EXECUTING_STATUS):
if hasattr(res.status, 'errorMessage') and res.status.errorMessage:
message = res.status.errorMessage
else:
message = ''
raise QueryServerException(Exception('Bad status for request %s:\n%s' % (req, res)), message=message)
else:
return res
def close_session(self, sessionHandle):
req = TCloseSessionReq(sessionHandle=sessionHandle)
return self._client.CloseSession(req)
def get_databases(self):
# GetCatalogs() is not implemented in HS2
req = TGetSchemasReq()
res = self.call(self._client.GetSchemas, req)
results, schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(res.operationHandle)
col = 'TABLE_SCHEM'
return HiveServerTRowSet(results.results, schema.schema).cols((col,))
def get_tables(self, database, table_names):
req = TGetTablesReq(schemaName=database, tableName=table_names)
res = self.call(self._client.GetTables, req)
results, schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=5000)
self.close_operation(res.operationHandle)
return HiveServerTRowSet(results.results, schema.schema).cols(('TABLE_NAME',))
def get_table(self, database, table_name):
req = TGetTablesReq(schemaName=database, tableName=table_name)
res = self.call(self._client.GetTables, req)
table_results, table_schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(res.operationHandle)
if self.query_server['server_name'] == 'impala':
# Impala does not supported extended
query = 'DESCRIBE %s' % table_name
else:
query = 'DESCRIBE EXTENDED %s' % table_name
(desc_results, desc_schema), operation_handle = self.execute_statement(query, max_rows=5000, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(operation_handle)
return HiveServerTable(table_results.results, table_schema.schema, desc_results.results, desc_schema.schema)
def execute_query(self, query, max_rows=1000):
configuration = self._get_query_configuration(query)
return self.execute_query_statement(statement=query.query['query'], max_rows=max_rows, configuration=configuration)
def execute_query_statement(self, statement, max_rows=1000, configuration={}):
(results, schema), operation_handle = self.execute_statement(statement=statement, max_rows=max_rows, configuration=configuration)
return HiveServerDataTable(results, schema, operation_handle, self.query_server)
def execute_async_query(self, query, statement=0):
if statement == 0:
# Impala just has settings currently
if self.query_server['server_name'] == 'beeswax':
for resource in query.get_configuration_statements():
self.execute_statement(resource.strip())
configuration = {}
if self.query_server['server_name'] == 'impala' and self.query_server['querycache_rows'] > 0:
configuration[IMPALA_RESULTSET_CACHE_SIZE] = str(self.query_server['querycache_rows'])
# The query can override the default configuration
configuration.update(self._get_query_configuration(query))
query_statement = query.get_query_statement(statement)
return self.execute_async_statement(statement=query_statement, confOverlay=configuration)
def execute_statement(self, statement, max_rows=1000, configuration={}, orientation=TFetchOrientation.FETCH_FIRST):
if self.query_server['server_name'] == 'impala' and self.query_server['QUERY_TIMEOUT_S'] > 0:
configuration['QUERY_TIMEOUT_S'] = str(self.query_server['QUERY_TIMEOUT_S'])
req = TExecuteStatementReq(statement=statement.encode('utf-8'), confOverlay=configuration)
res = self.call(self._client.ExecuteStatement, req)
return self.fetch_result(res.operationHandle, max_rows=max_rows, orientation=orientation), res.operationHandle
def execute_async_statement(self, statement, confOverlay):
if self.query_server['server_name'] == 'impala' and self.query_server['QUERY_TIMEOUT_S'] > 0:
confOverlay['QUERY_TIMEOUT_S'] = str(self.query_server['QUERY_TIMEOUT_S'])
req = TExecuteStatementReq(statement=statement.encode('utf-8'), confOverlay=confOverlay, runAsync=True)
res = self.call(self._client.ExecuteStatement, req)
return HiveServerQueryHandle(secret=res.operationHandle.operationId.secret,
guid=res.operationHandle.operationId.guid,
operation_type=res.operationHandle.operationType,
has_result_set=res.operationHandle.hasResultSet,
modified_row_count=res.operationHandle.modifiedRowCount)
def fetch_data(self, operation_handle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=1000):
# Fetch until the result is empty dues to a HS2 bug instead of looking at hasMoreRows
results, schema = self.fetch_result(operation_handle, orientation, max_rows)
return HiveServerDataTable(results, schema, operation_handle, self.query_server)
def cancel_operation(self, operation_handle):
req = TCancelOperationReq(operationHandle=operation_handle)
return self.call(self._client.CancelOperation, req)
def close_operation(self, operation_handle):
req = TCloseOperationReq(operationHandle=operation_handle)
return self.call(self._client.CloseOperation, req)
def get_columns(self, database, table):
req = TGetColumnsReq(schemaName=database, tableName=table)
res = self.call(self._client.GetColumns, req)
res, schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(res.operationHandle)
return res, schema
def fetch_result(self, operation_handle, orientation=TFetchOrientation.FETCH_FIRST, max_rows=1000):
if operation_handle.hasResultSet:
fetch_req = TFetchResultsReq(operationHandle=operation_handle, orientation=orientation, maxRows=max_rows)
res = self.call(self._client.FetchResults, fetch_req)
else:
res = TFetchResultsResp(results=TRowSet(startRowOffset=0, rows=[], columns=[]))
if operation_handle.hasResultSet and TFetchOrientation.FETCH_FIRST: # Only fetch for the first call that should be with start_over
meta_req = TGetResultSetMetadataReq(operationHandle=operation_handle)
schema = self.call(self._client.GetResultSetMetadata, meta_req)
else:
schema = None
return res, schema
def fetch_log(self, operation_handle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=1000):
req = TFetchResultsReq(operationHandle=operation_handle, orientation=orientation, maxRows=max_rows, fetchType=1)
res = self.call(self._client.FetchResults, req)
if beeswax_conf.THRIFT_VERSION.get() >= 7:
lines = res.results.columns[0].stringVal.values
else:
lines = imap(lambda r: r.colVals[0].stringVal.value, res.results.rows)
return '\n'.join(lines)
def get_operation_status(self, operation_handle):
req = TGetOperationStatusReq(operationHandle=operation_handle)
return self.call(self._client.GetOperationStatus, req)
def explain(self, query):
query_statement = query.get_query_statement(0)
configuration = self._get_query_configuration(query)
return self.execute_query_statement(statement='EXPLAIN %s' % query_statement, configuration=configuration)
def get_log(self, operation_handle):
try:
req = TGetLogReq(operationHandle=operation_handle)
res = self.call(self._client.GetLog, req)
return res.log
except:
return 'Server does not support GetLog()'
def get_partitions(self, database, table_name, max_parts):
table = self.get_table(database, table_name)
if max_parts is None or max_parts <= 0:
max_rows = 10000
else:
max_rows = 1000 if max_parts <= 250 else max_parts
partitionTable = self.execute_query_statement('SHOW PARTITIONS %s' % table_name, max_rows=max_rows) # DB prefix supported since Hive 0.13
return [PartitionValueCompatible(partition, table) for partition in partitionTable.rows()][-max_parts:]
def _get_query_configuration(self, query):
return dict([(setting['key'], setting['value']) for setting in query.settings])
class HiveServerTableCompatible(HiveServerTable):
"""Same API as Beeswax"""
def __init__(self, hive_table):
self.table = hive_table.table
self.table_schema = hive_table.table_schema
self.desc_results = hive_table.desc_results
self.desc_schema = hive_table.desc_schema
@property
def cols(self):
return [type('Col', (object,), {'name': col.get('col_name', '').strip(),
'type': col.get('data_type', col.get('col_type', '')).strip(), # Impala is col_type
'comment': col.get('comment', '').strip() if col.get('comment') else '', }) for col in HiveServerTable.cols.fget(self)]
class ResultCompatible:
def __init__(self, data_table):
self.data_table = data_table
self.rows = data_table.rows
self.has_more = data_table.has_more
self.start_row = data_table.startRowOffset
self.ready = True
@property
def columns(self):
return self.cols()
def cols(self):
return [col.name for col in self.data_table.cols()]
class PartitionKeyCompatible:
def __init__(self, name, type, comment):
self.name = name
self.type = type
self.comment = comment
def __eq__(self, other):
return isinstance(other, PartitionKeyCompatible) and \
self.name == other.name and \
self.type == other.type and \
self.comment == other.comment
def __repr__(self):
return 'PartitionKey(name:%s, type:%s, comment:%s)' % (self.name, self.type, self.comment)
class PartitionValueCompatible:
def __init__(self, partition, table):
# Parses: ['datehour=2013022516'] or ['month=2011-07/dt=2011-07-01/hr=12']
self.values = [val.split('=')[1] for part in partition for val in part.split('/')]
self.sd = type('Sd', (object,), {'location': '%s/%s' % (table.path_location, ','.join(partition)),})
class ExplainCompatible:
def __init__(self, data_table):
self.textual = '\n'.join([line[0] for line in data_table.rows()])
class ResultMetaCompatible:
def __init__(self):
self.in_tablename = True
class HiveServerClientCompatible(object):
"""Same API as Beeswax"""
def __init__(self, client):
self._client = client
self.user = client.user
self.query_server = client.query_server
def query(self, query, statement=0):
return self._client.execute_async_query(query, statement)
def get_state(self, handle):
operationHandle = handle.get_rpc_handle()
res = self._client.get_operation_status(operationHandle)
return HiveServerQueryHistory.STATE_MAP[res.operationState]
def get_operation_status(self, handle):
operationHandle = handle.get_rpc_handle()
return self._client.get_operation_status(operationHandle)
def use(self, query):
data = self._client.execute_query(query)
self._client.close_operation(data.operation_handle)
return data
def explain(self, query):
data_table = self._client.explain(query)
data = ExplainCompatible(data_table)
self._client.close_operation(data_table.operation_handle)
return data
def fetch(self, handle, start_over=False, max_rows=None):
operationHandle = handle.get_rpc_handle()
if max_rows is None:
max_rows = 1000
if start_over and not (self.query_server['server_name'] == 'impala' and self.query_server['querycache_rows'] == 0): # Backward compatibility for impala
orientation = TFetchOrientation.FETCH_FIRST
else:
orientation = TFetchOrientation.FETCH_NEXT
data_table = self._client.fetch_data(operationHandle, orientation=orientation, max_rows=max_rows)
return ResultCompatible(data_table)
def cancel_operation(self, handle):
operationHandle = handle.get_rpc_handle()
return self._client.cancel_operation(operationHandle)
def close(self, handle):
return self.close_operation(handle)
def close_operation(self, handle):
operationHandle = handle.get_rpc_handle()
return self._client.close_operation(operationHandle)
def close_session(self, session):
operationHandle = session.get_handle()
return self._client.close_session(operationHandle)
def dump_config(self):
return 'Does not exist in HS2'
def get_log(self, handle, start_over=True):
operationHandle = handle.get_rpc_handle()
if beeswax_conf.USE_GET_LOG_API.get() or self.query_server['server_name'] == 'impala':
return self._client.get_log(operationHandle)
else:
if start_over:
orientation = TFetchOrientation.FETCH_FIRST
else:
orientation = TFetchOrientation.FETCH_NEXT
return self._client.fetch_log(operationHandle, orientation=orientation, max_rows=-1)
def get_databases(self):
col = 'TABLE_SCHEM'
return [table[col] for table in self._client.get_databases()]
def get_tables(self, database, table_names):
tables = [table['TABLE_NAME'] for table in self._client.get_tables(database, table_names)]
tables.sort()
return tables
def get_table(self, database, table_name):
table = self._client.get_table(database, table_name)
return HiveServerTableCompatible(table)
def get_columns(self, database, table):
return self._client.get_columns(database, table)
def get_default_configuration(self, *args, **kwargs):
return {}
def get_results_metadata(self, handle):
# We just need to mock
return ResultMetaCompatible()
def create_database(self, name, description): raise NotImplementedError()
def get_database(self, *args, **kwargs): raise NotImplementedError()
def alter_table(self, dbname, tbl_name, new_tbl): raise NotImplementedError()
def open_session(self, user):
return self._client.open_session(user)
def add_partition(self, new_part): raise NotImplementedError()
def get_partition(self, *args, **kwargs): raise NotImplementedError()
def get_partitions(self, database, table_name, max_parts):
return self._client.get_partitions(database, table_name, max_parts)
def alter_partition(self, db_name, tbl_name, new_part): raise NotImplementedError()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v3 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from nets import inception_utils
slim = contrib_slim
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
0.0, stddev)
def inception_v3_base(inputs,
final_endpoint='Mixed_7c',
min_depth=16,
depth_multiplier=1.0,
scope=None):
"""Inception model from http://arxiv.org/abs/1512.00567.
Constructs an Inception v3 network from inputs to the given final endpoint.
This method can construct the network up to the final inception block
Mixed_7c.
Note that the names of the layers in the paper do not correspond to the names
of the endpoints registered by this function although they build the same
network.
Here is a mapping from the old_names to the new names:
Old name | New name
=======================================
conv0 | Conv2d_1a_3x3
conv1 | Conv2d_2a_3x3
conv2 | Conv2d_2b_3x3
pool1 | MaxPool_3a_3x3
conv3 | Conv2d_3b_1x1
conv4 | Conv2d_4a_3x3
pool2 | MaxPool_5a_3x3
mixed_35x35x256a | Mixed_5b
mixed_35x35x288a | Mixed_5c
mixed_35x35x288b | Mixed_5d
mixed_17x17x768a | Mixed_6a
mixed_17x17x768b | Mixed_6b
mixed_17x17x768c | Mixed_6c
mixed_17x17x768d | Mixed_6d
mixed_17x17x768e | Mixed_6e
mixed_8x8x1280a | Mixed_7a
mixed_8x8x2048a | Mixed_7b
mixed_8x8x2048b | Mixed_7c
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',
'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c',
'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.compat.v1.variable_scope(scope, 'InceptionV3', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='VALID'):
# 299 x 299 x 3
end_point = 'Conv2d_1a_3x3'
net = slim.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 149 x 149 x 32
end_point = 'Conv2d_2a_3x3'
net = slim.conv2d(net, depth(32), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 147 x 147 x 32
end_point = 'Conv2d_2b_3x3'
net = slim.conv2d(net, depth(64), [3, 3], padding='SAME', scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 147 x 147 x 64
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 73 x 73 x 64
end_point = 'Conv2d_3b_1x1'
net = slim.conv2d(net, depth(80), [1, 1], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 73 x 73 x 80.
end_point = 'Conv2d_4a_3x3'
net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 71 x 71 x 192.
end_point = 'MaxPool_5a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 35 x 35 x 192.
# Inception blocks
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# mixed: 35 x 35 x 256.
end_point = 'Mixed_5b'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv2d_0b_5x5')
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(32), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_1: 35 x 35 x 288.
end_point = 'Mixed_5c'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0b_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv_1_0c_5x5')
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1],
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_2: 35 x 35 x 288.
end_point = 'Mixed_5d'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv2d_0b_5x5')
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_3: 17 x 17 x 768.
end_point = 'Mixed_6a'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(384), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_1x1')
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed4: 17 x 17 x 768.
end_point = 'Mixed_6b'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(128), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(128), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_5: 17 x 17 x 768.
end_point = 'Mixed_6c'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_6: 17 x 17 x 768.
end_point = 'Mixed_6d'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_7: 17 x 17 x 768.
end_point = 'Mixed_6e'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_8: 8 x 8 x 1280.
end_point = 'Mixed_7a'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(320), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_9: 8 x 8 x 2048.
end_point = 'Mixed_7b'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')])
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_10: 8 x 8 x 2048.
end_point = 'Mixed_7c'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')])
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v3(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
min_depth=16,
depth_multiplier=1.0,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
create_aux_logits=True,
scope='InceptionV3',
global_pool=False):
"""Inception model from http://arxiv.org/abs/1512.00567.
"Rethinking the Inception Architecture for Computer Vision"
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
Zbigniew Wojna.
With the default arguments this method constructs the exact model defined in
the paper. However, one can experiment with variations of the inception_v3
network by changing arguments dropout_keep_prob, min_depth and
depth_multiplier.
The default image size used to train this network is 299x299.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is of
shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
create_aux_logits: Whether to create the auxiliary logits.
scope: Optional variable_scope.
global_pool: Optional boolean flag to control the avgpooling before the
logits layer. If false or unset, pooling is done with a fixed window
that reduces default-sized inputs to 1x1, while larger inputs lead to
larger outputs. If true, any input size is pooled down to 1x1.
Returns:
net: a Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped-out input to the logits layer
if num_classes is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if 'depth_multiplier' is less than or equal to zero.
"""
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.compat.v1.variable_scope(
scope, 'InceptionV3', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v3_base(
inputs, scope=scope, min_depth=min_depth,
depth_multiplier=depth_multiplier)
# Auxiliary Head logits
if create_aux_logits and num_classes:
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
aux_logits = end_points['Mixed_6e']
with tf.compat.v1.variable_scope('AuxLogits'):
aux_logits = slim.avg_pool2d(
aux_logits, [5, 5], stride=3, padding='VALID',
scope='AvgPool_1a_5x5')
aux_logits = slim.conv2d(aux_logits, depth(128), [1, 1],
scope='Conv2d_1b_1x1')
# Shape of feature map before the final layer.
kernel_size = _reduced_kernel_size_for_small_input(
aux_logits, [5, 5])
aux_logits = slim.conv2d(
aux_logits, depth(768), kernel_size,
weights_initializer=trunc_normal(0.01),
padding='VALID', scope='Conv2d_2a_{}x{}'.format(*kernel_size))
aux_logits = slim.conv2d(
aux_logits, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, weights_initializer=trunc_normal(0.001),
scope='Conv2d_2b_1x1')
if spatial_squeeze:
aux_logits = tf.squeeze(aux_logits, [1, 2], name='SpatialSqueeze')
end_points['AuxLogits'] = aux_logits
# Final pooling and prediction
with tf.compat.v1.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='GlobalPool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
kernel_size = _reduced_kernel_size_for_small_input(net, [8, 8])
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a_{}x{}'.format(*kernel_size))
end_points['AvgPool_1a'] = net
if not num_classes:
return net, end_points
# 1 x 1 x 2048
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
end_points['PreLogits'] = net
# 2048
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
# 1000
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v3.default_image_size = 299
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are is large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
TODO(jrru): Make this function work with unknown shapes. Theoretically, this
can be done with the code below. Problems are two-fold: (1) If the shape was
known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
handle tensors that define the kernel size.
shape = tf.shape(input_tensor)
return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
tf.minimum(shape[2], kernel_size[1])])
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1])]
return kernel_size_out
inception_v3_arg_scope = inception_utils.inception_arg_scope
|
|
import pymongo
import threading
import core
logger = core.log.getLogger("db-manager")
class DBManager(object):
"""This object is a wrapper for MongoClient to communicate to the RO
(local) mongo-db"""
def __init__(self):
self.__mutex = threading.Lock()
# (felix_ro) RoutingTable
def get_configured_peers(self):
table = pymongo.MongoClient().felix_ro.RoutingTable
try:
self.__mutex.acquire()
return table.find()
finally:
self.__mutex.release()
def get_configured_peer(self, key):
table = pymongo.MongoClient().felix_ro.RoutingTable
try:
self.__mutex.acquire()
row = table.find_one({"_id": key})
if row is None:
raise Exception("RoutingEntry %s not found into RO-DB!" % key)
return row
finally:
self.__mutex.release()
def update_am_info(self, object_id, am_type, am_version):
table = pymongo.MongoClient().felix_ro.RoutingTable
try:
self.__mutex.acquire()
table.update({"_id": object_id},
{"$set": {"am_type": am_type,
"am_version": am_version}})
finally:
self.__mutex.release()
# (felix_ro) GeneralInfoTable
def get_domain_id(self):
table = pymongo.MongoClient().felix_ro.GeneralInfoTable
try:
self.__mutex.acquire()
row = table.find_one()
if row is None:
raise Exception("GeneralInfoEntry not found into RO-DB!")
return row.get("domain")
finally:
self.__mutex.release()
# (felix_ro) SliceTable
def store_slice_info(self, urn, slivers):
table = pymongo.MongoClient().felix_ro.SliceTable
try:
self.__mutex.acquire()
row = table.find_one({"slice_urn": urn})
if row is None:
entry = {"slice_urn": urn,
"slivers": slivers}
return table.insert(entry)
# update the slivers list (if needed)
self.__update_list("slice-table", table, row, "slivers", slivers)
finally:
self.__mutex.release()
def get_slice_routing_keys(self, urns):
table = pymongo.MongoClient().felix_ro.SliceTable
try:
self.__mutex.acquire()
ret = {}
for u in urns:
for r in table.find():
for s in r.get("slivers"):
if (r.get("slice_urn") == u) or\
(s.get("geni_sliver_urn") == u):
if (s.get("routing_key") in ret) and\
(u not in ret[s.get("routing_key")]):
ret[s.get("routing_key")].append(u)
else:
ret[s.get("routing_key")] = [u]
return ret
finally:
self.__mutex.release()
def delete_slice_urns(self, urns):
table = pymongo.MongoClient().felix_ro.SliceTable
try:
self.__mutex.acquire()
rows = table.find()
for u in urns:
for r in rows:
if r.get("slice_urn") == u:
table.remove({"slice_urn": u})
else:
for s in r.get("slivers"):
if s.get("geni_sliver_urn") == u:
# remove the element from the list
self.__delete_sliver_urn(
table, r.get("slice_urn"),
r.get("slivers"), s)
break
finally:
self.__mutex.release()
# (felix_ro) COMNodeTable
# TODO Ensure correctness
def store_com_nodes(self, routingKey, values):
table = pymongo.MongoClient().felix_ro.COMNodeTable
try:
ids = []
self.__mutex.acquire()
for v in values:
row = table.find_one({
"routing_key": routingKey,
"component_id": v.get("component_id"),
"component_manager_id": v.get("component_manager_id"), })
# "sliver_type_name": v.get("sliver_type_name")})
if not row:
v["routing_key"] = routingKey
ids.append(table.insert(v))
continue
# update the object (if needed)
self.__update_list("comnodes-table", table, row, "interfaces",
v.get("interfaces"))
return ids
finally:
self.__mutex.release()
def get_com_nodes(self):
table = pymongo.MongoClient().felix_ro.COMNodeTable
try:
self.__mutex.acquire()
return table.find()
finally:
self.__mutex.release()
def get_com_node_routing_key(self, cid):
table = pymongo.MongoClient().felix_ro.COMNodeTable
try:
self.__mutex.acquire()
row = table.find_one({"component_id": cid})
if row is None:
raise Exception("CompId %s not found in RO-COMNode-DB!" % cid)
return row.get("routing_key")
finally:
self.__mutex.release()
# (felix_ro) COMLinkTable
# TODO Ensure correctness
def store_com_links(self, routingKey, values):
table = pymongo.MongoClient().felix_ro.COMLinkTable
try:
ids = []
self.__mutex.acquire()
for v in values:
row = table.find_one({
"routing_key": routingKey,
"component_id": v.get("component_id")})
if not row:
v["routing_key"] = routingKey
ids.append(table.insert(v))
else:
logger.debug(
"(link-table) %s already stored!" % (row.get("_id")))
return ids
finally:
self.__mutex.release()
def get_com_links(self):
table = pymongo.MongoClient().felix_ro.COMLinkTable
try:
self.__mutex.acquire()
return table.find()
finally:
self.__mutex.release()
# (felix_ro) OFDatapathTable
def store_sdn_datapaths(self, routingKey, values):
table = pymongo.MongoClient().felix_ro.OFDatapathTable
try:
ids = []
self.__mutex.acquire()
for v in values:
row = table.find_one({
"routing_key": routingKey,
"component_id": v.get("component_id"),
"component_manager_id": v.get("component_manager_id"),
"dpid": v.get("dpid")})
if row is None:
v["routing_key"] = routingKey
ids.append(table.insert(v))
continue
# update the object (if needed)
self.__update_list("datapth-table", table, row, "ports",
v.get("ports"))
return ids
finally:
self.__mutex.release()
def get_sdn_datapaths(self):
table = pymongo.MongoClient().felix_ro.OFDatapathTable
try:
self.__mutex.acquire()
return table.find()
finally:
self.__mutex.release()
def get_sdn_datapath_routing_key(self, dpid):
table = pymongo.MongoClient().felix_ro.OFDatapathTable
try:
self.__mutex.acquire()
row = table.find_one({
"component_id": dpid.get("component_id"),
"component_manager_id": dpid.get("component_manager_id"),
"dpid": dpid.get("dpid")})
if row is None:
raise Exception("Datapath %s not found into RO-DB!" % dpid)
return row.get("routing_key")
finally:
self.__mutex.release()
# (felix_ro) OFLinkTable
def store_sdn_links(self, routingKey, values):
table = pymongo.MongoClient().felix_ro.OFLinkTable
try:
ids = []
self.__mutex.acquire()
for v in values:
row = table.find_one({
"routing_key": routingKey,
"component_id": v.get("component_id")})
if row is None:
v["routing_key"] = routingKey
ids.append(table.insert(v))
else:
logger.debug(
"(link-table) %s already stored!" % (row.get("_id")))
return ids
finally:
self.__mutex.release()
def get_sdn_links(self):
table = pymongo.MongoClient().felix_ro.OFLinkTable
(of, fed) = ([], [])
try:
self.__mutex.acquire()
for row in table.find():
if row.get("dpids") is not None:
of.append(row)
elif row.get("interface_ref_id") is not None:
fed.append(row)
return (of, fed)
finally:
self.__mutex.release()
# (felix_ro) SENodeTable
def store_se_nodes(self, routingKey, values):
table = pymongo.MongoClient().felix_ro.SENodeTable
try:
ids = []
self.__mutex.acquire()
for v in values:
row = table.find_one({
"routing_key": routingKey,
"component_id": v.get("component_id"),
"component_manager_id": v.get("component_manager_id"),
"sliver_type_name": v.get("sliver_type_name")})
if row is None:
v["routing_key"] = routingKey
ids.append(table.insert(v))
continue
# update the object (if needed)
self.__update_list("senodes-table", table, row, "interfaces",
v.get("interfaces"))
return ids
finally:
self.__mutex.release()
def get_se_node_info(self, routingKey):
table = pymongo.MongoClient().felix_ro.SENodeTable
try:
self.__mutex.acquire()
row = table.find_one({'routing_key': routingKey})
if row is not None:
return {
'component_id': row.get('component_id'),
'component_manager_id': row.get('component_manager_id')}
return None
finally:
self.__mutex.release()
# (felix_ro) SELinkTable
def store_se_links(self, routingKey, values):
table = pymongo.MongoClient().felix_ro.SELinkTable
try:
ids = []
self.__mutex.acquire()
for v in values:
row = table.find_one({
"routing_key": routingKey,
"component_id": v.get("component_id"),
"component_manager_name": v.get("component_manager_name"),
"link_type": v.get("link_type")})
if row is None:
v["routing_key"] = routingKey
ids.append(table.insert(v))
else:
logger.debug(
"(selink-table) %s already stored!" % (row.get("_id")))
return ids
finally:
self.__mutex.release()
def get_se_link_routing_key(self, values):
table = pymongo.MongoClient().felix_ro.SELinkTable
try:
key, ifs = None, []
self.__mutex.acquire()
for r in table.find():
ifrefs = r.get('interface_ref')
for i in ifrefs:
if i.get('component_id') in values:
key = r.get('routing_key')
ifrefs.remove(i)
ifs.append(ifrefs[0])
return key, ifs
finally:
self.__mutex.release()
def get_se_link_info(self, node_cid):
table = pymongo.MongoClient().felix_ro.SELinkTable
try:
self.__mutex.acquire()
for r in table.find():
i = r.get('component_id').find(node_cid)
if i != -1:
return r.get('link_type'), r.get('component_manager_name')
return None, None
finally:
self.__mutex.release()
# (felix_ro) TNNodeTable
def store_tn_nodes(self, routingKey, values):
table = pymongo.MongoClient().felix_ro.TNNodeTable
try:
ids = []
self.__mutex.acquire()
for v in values:
row = table.find_one({
"routing_key": routingKey,
"component_id": v.get("component_id"),
"component_manager_id": v.get("component_manager_id"),
"sliver_type_name": v.get("sliver_type_name")})
if row is None:
v["routing_key"] = routingKey
ids.append(table.insert(v))
continue
# update the object (if needed)
self.__update_list("tnnodes-table", table, row, "interfaces",
v.get("interfaces"))
return ids
finally:
self.__mutex.release()
def get_tn_nodes(self):
table = pymongo.MongoClient().felix_ro.TNNodeTable
try:
self.__mutex.acquire()
return table.find()
finally:
self.__mutex.release()
def get_tn_node_routing_key(self, cid):
table = pymongo.MongoClient().felix_ro.TNNodeTable
try:
self.__mutex.acquire()
row = table.find_one({"component_id": cid})
if row is None:
raise Exception("CompId %s not found into RO-TNNode-DB!" % cid)
return row.get("routing_key")
finally:
self.__mutex.release()
# (felix_ro) TNLinkTable
def store_tn_links(self, routingKey, values):
table = pymongo.MongoClient().felix_ro.TNLinkTable
try:
ids = []
self.__mutex.acquire()
for v in values:
row = table.find_one({
"routing_key": routingKey,
"component_id": v.get("component_id"),
"component_manager_name": v.get("component_manager_name")})
if row is None:
v["routing_key"] = routingKey
ids.append(table.insert(v))
else:
logger.debug(
"(tnlink-table) %s already stored!" % (row.get("_id")))
return ids
finally:
self.__mutex.release()
def get_tn_links(self):
table = pymongo.MongoClient().felix_ro.TNLinkTable
try:
self.__mutex.acquire()
return table.find()
finally:
self.__mutex.release()
def get_tn_link_routing_key(self, cid, cmid, ifrefs):
try:
self.__mutex.acquire()
table = pymongo.MongoClient().felix_ro.TNLinkTable
row = table.find_one({"component_id": cid})
if row is not None:
return row.get("routing_key")
table = pymongo.MongoClient().felix_ro.TNNodeTable
row = table.find_one({"component_manager_id": cmid})
if row is not None:
return row.get("routing_key")
for row in table.find():
for i in row.get("interfaces"):
if i.get("component_id") in ifrefs:
return row.get("routing_key")
raise Exception("Link (%s,%s,%s) owner is not found into RO-DB!" %
(cid, cmid, ifrefs))
finally:
self.__mutex.release()
# utilities
def __update_list(self, tname, table, entry, key, values):
logger.debug("(%s) %s already stored!" % (tname, entry.get("_id"),))
modif = {key: []}
for v in values:
if v not in entry.get(key):
modif.get(key).append(v)
if len(modif.get(key)) > 0:
modif.get(key).extend(entry.get(key))
logger.debug("(%s) extend slivers info %s" % (tname, modif,))
table.update({"_id": entry.get("_id")},
{"$set": modif})
else:
logger.debug("(%s) not needed to update %s" % (tname, key,))
def __delete_sliver_urn(self, table, slice_urn, slivers, elem):
logger.debug("(slice-table) %s remove %s from %s" %
(slice_urn, elem, slivers))
slivers.remove(elem)
modif = {"slivers": slivers}
table.update({"slice_urn": slice_urn},
{"$set": modif})
# This is the db manager object to be used into other modules
db_sync_manager = DBManager()
|
|
# encoding: utf-8
import mock
from nose.tools import * # noqa
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
from website.addons.osfstorage.tests import factories
from website.addons.osfstorage.tests.utils import StorageTestCase
import datetime
from modularodm import exceptions as modm_errors
from website.models import NodeLog
from website.addons.osfstorage import model
from website.addons.osfstorage import errors
from website.addons.osfstorage import settings
class TestFileGuid(OsfTestCase):
def setUp(self):
super(OsfTestCase, self).setUp()
self.user = factories.AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.node_addon = self.project.get_addon('osfstorage')
def test_provider(self):
assert_equal('osfstorage', model.OsfStorageGuidFile().provider)
def test_correct_path(self):
guid = model.OsfStorageGuidFile(node=self.project, path='baz/foo/bar')
assert_equals(guid.path, 'baz/foo/bar')
assert_equals(guid.waterbutler_path, '/baz/foo/bar')
@mock.patch('website.addons.base.requests.get')
def test_unique_identifier(self, mock_get):
mock_response = mock.Mock(ok=True, status_code=200)
mock_get.return_value = mock_response
mock_response.json.return_value = {
'data': {
'name': 'Morty',
'extra': {
'version': 'Terran it up'
}
}
}
guid = model.OsfStorageGuidFile(node=self.project, path='/foo/bar')
guid.enrich()
assert_equals('Terran it up', guid.unique_identifier)
def test_node_addon_get_or_create(self):
guid, created = self.node_addon.find_or_create_file_guid('baz/foo/bar')
assert_true(created)
assert_equal(guid.path, 'baz/foo/bar')
assert_equal(guid.waterbutler_path, '/baz/foo/bar')
def test_node_addon_get_or_create_finds(self):
guid1, created1 = self.node_addon.find_or_create_file_guid('/foo/bar')
guid2, created2 = self.node_addon.find_or_create_file_guid('/foo/bar')
assert_true(created1)
assert_false(created2)
assert_equals(guid1, guid2)
class TestNodeSettingsModel(StorageTestCase):
def test_fields(self):
assert_true(self.node_settings._id)
assert_is(self.node_settings.file_tree, None)
assert_is(self.node_settings.has_auth, True)
assert_is(self.node_settings.complete, True)
def test_after_fork_copies_versions(self):
path = 'jazz/dreamers-ball.mp3'
num_versions = 5
record, _ = model.OsfStorageFileRecord.get_or_create(path, self.node_settings)
for _ in range(num_versions):
version = factories.FileVersionFactory()
record.versions.append(version)
record.save()
fork = self.project.fork_node(self.auth_obj)
fork_node_settings = fork.get_addon('osfstorage')
fork_node_settings.reload()
cloned_record = model.OsfStorageFileRecord.find_by_path(path, fork_node_settings)
assert_equal(cloned_record.versions, record.versions)
assert_true(fork_node_settings.file_tree)
def test_after_register_copies_versions(self):
path = 'jazz/dreamers-ball.mp3'
num_versions = 5
record, _ = model.OsfStorageFileRecord.get_or_create(path, self.node_settings)
for _ in range(num_versions):
version = factories.FileVersionFactory()
record.versions.append(version)
record.save()
registration = self.project.register_node(
None,
self.auth_obj,
'',
{},
)
assert_true(registration.has_addon('osfstorage'))
registration_node_settings = registration.get_addon('osfstorage')
registration_node_settings.reload()
cloned_record = model.OsfStorageFileRecord.find_by_path(path, registration_node_settings)
assert_equal(cloned_record.versions, record.versions)
assert_true(registration_node_settings.file_tree)
class TestOsfStorageFileTree(OsfTestCase):
def setUp(self):
super(TestOsfStorageFileTree, self).setUp()
self.path = 'news/of/the/world'
self.node_settings = model.OsfStorageNodeSettings()
self.node_settings.save()
self.tree = model.OsfStorageFileTree(
path=self.path,
node_settings=self.node_settings,
)
self.tree.save()
def test_fields(self):
assert_true(self.tree._id)
assert_equal(self.tree.children, [])
def test_name(self):
assert_equal(self.tree.name, 'world')
def test_find_by_path_found(self):
result = model.OsfStorageFileTree.find_by_path(self.path, self.node_settings)
assert_equal(result, self.tree)
def test_find_by_path_not_found(self):
result = model.OsfStorageFileTree.find_by_path('missing', self.node_settings)
assert_is(result, None)
def test_get_or_create_found(self):
result, _ = model.OsfStorageFileTree.get_or_create(self.path, self.node_settings)
assert_equal(result, self.tree)
def test_get_or_create_not_found_top_level(self):
assert_is(self.node_settings.file_tree, None)
result, _ = model.OsfStorageFileTree.get_or_create('', self.node_settings)
assert_equal(self.node_settings.file_tree, result)
def test_get_or_create_not_found_nested(self):
assert_is(self.node_settings.file_tree, None)
path = 'night/at/the/opera'
result, _ = model.OsfStorageFileTree.get_or_create(path, self.node_settings)
assert_true(model.OsfStorageFileTree.find_by_path('', self.node_settings))
assert_true(model.OsfStorageFileTree.find_by_path('night', self.node_settings))
assert_true(model.OsfStorageFileTree.find_by_path('night/at', self.node_settings))
assert_true(model.OsfStorageFileTree.find_by_path('night/at/the', self.node_settings))
assert_true(model.OsfStorageFileTree.find_by_path('night/at/the/opera', self.node_settings))
assert_equal(
self.node_settings.file_tree,
model.OsfStorageFileTree.find_by_path('', self.node_settings),
)
def test_get_or_create_idempotent(self):
path = 'night/at/the/opera'
result, _ = model.OsfStorageFileTree.get_or_create(path, self.node_settings)
num_trees = model.OsfStorageFileTree.find().count()
num_records = model.OsfStorageFileRecord.find().count()
result = model.OsfStorageFileTree.get_or_create(path, self.node_settings)
assert_equal(num_trees, model.OsfStorageFileTree.find().count())
assert_equal(num_records, model.OsfStorageFileRecord.find().count())
class TestOsfStorageFileRecord(StorageTestCase):
def setUp(self):
super(TestOsfStorageFileRecord, self).setUp()
self.path = 'red/special.mp3'
self.record, _ = model.OsfStorageFileRecord.get_or_create(
path=self.path,
node_settings=self.node_settings,
)
self.record.save()
def test_fields(self):
assert_true(self.record._id)
assert_false(self.record.is_deleted)
assert_equal(self.record.versions, [])
def test_name(self):
assert_equal(self.record.name, 'special.mp3')
def test_extension(self):
assert_equal(self.record.extension, '.mp3')
def test_find_by_path_found(self):
result = model.OsfStorageFileRecord.find_by_path(self.path, self.node_settings)
assert_equal(result, self.record)
def test_find_by_path_not_found(self):
result = model.OsfStorageFileRecord.find_by_path('missing', self.node_settings)
assert_is(result, None)
def test_get_or_create_found(self):
result, _ = model.OsfStorageFileRecord.get_or_create(self.path, self.node_settings)
assert_equal(result, self.record)
def test_get_or_create_not_found_top_level(self):
nchildren = len(self.node_settings.file_tree.children)
result, _ = model.OsfStorageFileRecord.get_or_create(
'stonecold.mp3',
self.node_settings,
)
assert_is_not(self.node_settings.file_tree, None)
assert_equal(len(self.node_settings.file_tree.children), nchildren + 1)
assert_equal(self.node_settings.file_tree.children[-1], result)
def test_get_or_create_not_found_nested(self):
path = 'night/at/the/opera/39.mp3'
result = model.OsfStorageFileRecord.get_or_create(path, self.node_settings)
assert_true(model.OsfStorageFileRecord.find_by_path(path, self.node_settings))
assert_true(model.OsfStorageFileTree.find_by_path('', self.node_settings))
assert_true(model.OsfStorageFileTree.find_by_path('night', self.node_settings))
assert_true(model.OsfStorageFileTree.find_by_path('night/at', self.node_settings))
assert_true(model.OsfStorageFileTree.find_by_path('night/at/the', self.node_settings))
assert_true(model.OsfStorageFileTree.find_by_path('night/at/the/opera', self.node_settings))
assert_true(model.OsfStorageFileRecord.find_by_path('night/at/the/opera/39.mp3', self.node_settings))
assert_equal(
self.node_settings.file_tree,
model.OsfStorageFileTree.find_by_path('', self.node_settings),
)
def test_get_or_create_idempotent(self):
path = 'night/at/the/opera/39.mp3'
result = model.OsfStorageFileRecord.get_or_create(path, self.node_settings)
num_trees = model.OsfStorageFileTree.find().count()
num_records = model.OsfStorageFileRecord.find().count()
result = model.OsfStorageFileRecord.get_or_create(path, self.node_settings)
assert_equal(num_trees, model.OsfStorageFileTree.find().count())
assert_equal(num_records, model.OsfStorageFileRecord.find().count())
def test_get_version_defaults_found(self):
versions = [factories.FileVersionFactory() for _ in range(3)]
self.record.versions = versions
assert_equal(self.record.get_version(), self.record.versions[-1])
def test_get_version_defaults_not_found(self):
assert_equal(self.record.get_version(), None)
def test_get_version_at_index(self):
versions = [factories.FileVersionFactory() for _ in range(3)]
self.record.versions = versions
assert_equal(self.record.get_version(1), self.record.versions[1])
def test_get_version_required_not_found(self):
with assert_raises(errors.VersionNotFoundError):
self.record.get_version(required=True)
def test_get_versions(self):
self.record.versions = [
factories.FileVersionFactory()
for _ in range(15)
]
self.record.save()
indices, versions, more = self.record.get_versions(0, size=10)
assert_equal(indices, range(15, 5, -1))
assert_equal(
versions,
list(self.record.versions[14:4:-1]),
)
assert_true(more)
indices, versions, more = self.record.get_versions(1, size=10)
assert_equal(indices, range(5, 0, -1))
assert_equal(
versions,
list(self.record.versions[4::-1]),
)
assert_false(more)
def test_delete_record(self):
nlogs = len(self.project.logs)
self.record.delete(auth=self.auth_obj)
self.project.reload()
assert_true(self.record.is_deleted)
assert_equal(len(self.project.logs), nlogs + 1)
logged = self.project.logs[-1]
assert_equal(
logged.action,
'osf_storage_{0}'.format(NodeLog.FILE_REMOVED),
)
assert_not_in('version', logged.params)
def test_delete_deleted_record_raises_error(self):
nlogs = len(self.project.logs)
self.record.is_deleted = True
self.record.save()
with assert_raises(errors.DeleteError):
self.record.delete(auth=self.auth_obj)
self.project.reload()
assert_true(self.record.is_deleted)
assert_equal(len(self.project.logs), nlogs)
def test_undelete_record(self):
nlogs = len(self.project.logs)
self.record.is_deleted = True
self.record.save()
self.record.undelete(auth=self.auth_obj)
self.project.reload()
assert_false(self.record.is_deleted)
assert_equal(len(self.project.logs), nlogs + 1)
assert_equal(
self.project.logs[-1].action,
'osf_storage_{0}'.format(NodeLog.FILE_ADDED),
)
def test_undelete_undeleted_record_raises_error(self):
nlogs = len(self.project.logs)
with assert_raises(errors.UndeleteError):
self.record.undelete(auth=self.auth_obj)
assert_false(self.record.is_deleted)
self.project.reload()
assert_false(self.record.is_deleted)
assert_equal(len(self.project.logs), nlogs)
def test_update_metadata_found(self):
self.record.versions = [
factories.FileVersionFactory(),
factories.FileVersionFactory(),
]
self.record.versions[0].location['object'] = 'foo'
self.record.versions[1].location['object'] = 'bar'
self.record.versions[0].save()
self.record.versions[1].save()
self.record.save()
self.record.update_version_metadata(self.record.versions[0].location,
{'archive': 'glacier', 'size': 123, 'modified': 'Mon, 16 Feb 2015 18:45:34 GMT'})
assert_in('archive', self.record.versions[0].metadata)
assert_equal(self.record.versions[0].metadata['archive'], 'glacier')
assert_not_in('archive', self.record.versions[1].metadata)
def test_update_metadata_not_found(self):
self.record.versions = [
factories.FileVersionFactory(signature='31a64'),
factories.FileVersionFactory(signature='7aa12'),
]
self.record.save()
with assert_raises(errors.VersionNotFoundError):
self.record.update_version_metadata('1143b3', {'archive': 'glacier'})
assert_not_in('archive', self.record.versions[0].metadata)
assert_not_in('archive', self.record.versions[1].metadata)
class TestOsfStorageFileVersion(OsfTestCase):
def setUp(self):
super(TestOsfStorageFileVersion, self).setUp()
self.user = factories.AuthUserFactory()
self.mock_date = datetime.datetime(1991, 10, 31)
def test_fields(self):
version = factories.FileVersionFactory(
size=1024,
content_type='application/json',
date_modified=datetime.datetime.now(),
)
retrieved = model.OsfStorageFileVersion.load(version._id)
assert_true(retrieved.creator)
assert_true(retrieved.location)
assert_true(retrieved.size)
assert_true(retrieved.content_type)
assert_true(retrieved.date_modified)
def test_is_duplicate_true(self):
version1 = factories.FileVersionFactory()
version2 = factories.FileVersionFactory()
assert_true(version1.is_duplicate(version2))
assert_true(version2.is_duplicate(version1))
def test_is_duplicate_false(self):
version1 = factories.FileVersionFactory(
location={
'service': 'cloud',
settings.WATERBUTLER_RESOURCE: 'osf',
'object': 'd077f2',
},
)
version2 = factories.FileVersionFactory(
location={
'service': 'cloud',
settings.WATERBUTLER_RESOURCE: 'osf',
'object': '06d80e',
},
)
assert_false(version1.is_duplicate(version2))
assert_false(version2.is_duplicate(version1))
def test_validate_location(self):
version = factories.FileVersionFactory.build(location={})
with assert_raises(modm_errors.ValidationValueError):
version.save()
version.location = {
'service': 'cloud',
settings.WATERBUTLER_RESOURCE: 'osf',
'object': 'object',
}
version.save()
def test_update_metadata(self):
version = factories.FileVersionFactory()
version.update_metadata({'archive': 'glacier', 'size': 123, 'modified': 'Mon, 16 Feb 2015 18:45:34 GMT'})
version.reload()
assert_in('archive', version.metadata)
assert_equal(version.metadata['archive'], 'glacier')
class TestStorageObject(OsfTestCase):
def setUp(self):
super(TestStorageObject, self).setUp()
self.project = ProjectFactory()
self.path = 'kind/of/magic.mp3'
def test_fields(self):
file_obj = model.OsfStorageGuidFile(node=self.project, path=self.path)
file_obj.save()
assert_true(file_obj._id)
assert_equal(file_obj.node, self.project)
assert_equal(file_obj.path, self.path)
def test_field_validation(self):
file_obj = model.OsfStorageGuidFile(node=self.project)
with assert_raises(modm_errors.ValidationError):
file_obj.save()
def test_get_download_path(self):
file_obj = model.OsfStorageGuidFile(node=self.project, path=self.path)
file_obj.save()
version = 3
assert_equal(
file_obj.get_download_path(version),
'/{0}/?action=download&version={1}&mode=render'.format(
file_obj._id, version,
),
)
def test_get_or_create_exists(self):
existing = model.OsfStorageGuidFile(node=self.project, path=self.path)
existing.save()
n_objs = model.OsfStorageGuidFile.find().count()
result, _ = model.OsfStorageGuidFile.get_or_create(node=self.project, path=self.path)
assert_equal(result, existing)
assert_equal(n_objs, model.OsfStorageGuidFile.find().count())
def test_get_or_create_does_not_exist(self):
n_objs = model.OsfStorageGuidFile.find().count()
result, _ = model.OsfStorageGuidFile.get_or_create(node=self.project, path=self.path)
assert_equal(result.node, self.project)
assert_equal(result.path, self.path)
assert_equal(n_objs + 1, model.OsfStorageGuidFile.find().count())
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from os.path import abspath, dirname
class Migration(SchemaMigration):
def forwards(self, orm):
sql_migration_file = "{}/migration_0072_up.sql".format(
abspath(dirname(__file__)))
db.execute(open(sql_migration_file).read())
def backwards(self, orm):
pass
models = {
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'ssl_configured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.databaseinfraparameter': {
'Meta': {'unique_together': "((u'databaseinfra', u'parameter'),)", 'object_name': 'DatabaseInfraParameter'},
'applied_on_database': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.DatabaseInfra']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Parameter']"}),
'reset_default_value': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environmentgroup': {
'Meta': {'object_name': 'EnvironmentGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'groups'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'offering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Offering']", 'null': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shard': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'total_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'physical.offering': {
'Meta': {'object_name': 'Offering'},
'cpus': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'offerings'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory_size_mb': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.parameter': {
'Meta': {'ordering': "(u'engine_type__name', u'name')", 'unique_together': "((u'name', u'engine_type'),)", 'object_name': 'Parameter'},
'allowed_values': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_method': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dynamic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'enginetype'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'migrate_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'stronger_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'main_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'weaker_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'weaker_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'can_change_parameters': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_clone_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_reinstall_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_resize_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_setup_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_switch_master': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_upgrade_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'replication_topologies'", 'blank': 'True', 'to': u"orm['physical.Parameter']"}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'replication_topologies'", 'null': 'True', 'to': u"orm['physical.Script']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.script': {
'Meta': {'object_name': 'Script'},
'configuration': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initialization': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'metric_collector': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_database': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'start_replication': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.topologyparametercustomvalue': {
'Meta': {'unique_together': "((u'topology', u'parameter'),)", 'object_name': 'TopologyParameterCustomValue'},
'attr_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'topology_custom_values'", 'to': u"orm['physical.Parameter']"}),
'topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'param_custom_values'", 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.vip': {
'Meta': {'object_name': 'Vip'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'infra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'vips'", 'to': u"orm['physical.DatabaseInfra']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.volume': {
'Meta': {'object_name': 'Volume'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'volumes'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'total_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['physical']
|
|
"""
Loads the energy data csv files acquired through the loader scripts
(ion_get_data.py or jci_get_data.py), and inserts them into the Postgres
energy database.
"""
import csv
import os
import pyodbc
import sys
import util
DB = util.PG_DB
USER = util.PG_USER
PWD = uti.PG_PWD
# Default data directory if none is supplied at script invocation.
DEFAULT_DATA_DIR = util.DATA_OUTPUT_FILE_PATH
# Default processed data directory if none is supplied at script invocation.
DEFAULT_PROCESSED_DIR = DEFAULT_DATA_DIR + "processed/"
def tryNext():
"""
Print 'Trying next meter in list ...'
"""
print("\nTrying next meter in list ...")
def exec_and_commit(my_cursor, sql_stmt):
"""
Execute SQL_STMT with cursor MY_CURSOR and commit the changes.
Use this for operations that write to the database.
"""
my_cursor.execute(sql_stmt)
my_cursor.commit()
def drop_tmp_table(my_cursor, table_name):
"""
Drop temporary table TABLE_NAME using cursor MY_CURSOR.
"""
print("Deleting temporary table '%s' ..." % (table_name)),
dele_sql = "DROP TABLE IF EXISTS %s" % (table_name)
exec_and_commit(my_cursor, dele_sql)
util.done()
def get_data_files(datadir):
"""
Return a list of the absolute path to the files in DATADIR.
"""
return [os.path.join(datadir, f) for f in os.listdir(datadir)
if os.path.isfile(os.path.join(datadir, f))]
def get_header(data_file):
"""
Return DATA_FILE's header as a list. The elements of the list is as follows:
DESCRIPTION, ORIGINAL UNIT, COMMODITY, SOURCE SYSTEM NAME, READING TYPE
NOTE: call this ONLY once per data file.
"""
with open(data_file, "rb") as f:
reader = csv.reader(f)
header = reader.next()
if len(header) != 5:
raise ValueError("Incorrect file header")
return header
def load_files(file_list, mycursor, process_dir):
"""
Load the data files in FILE_LIST into the database with cursor MYCURSOR
Data files are moved to PROCESS_DIR if they are
loaded successfully.
"""
err_count = 0
for f in file_list:
print("Processing file '%s'" % (f))
err_count += load(f, mycursor, process_dir)
print
if (err_count == 0):
print("All files loaded into database.")
else:
print("%d of %d files could not be loaded into the database\n"
% (err_count, len(file_list)))
def load(data_file, mycursor, process_dir):
"""
Loads the contents of DATA_FILE's header into the 'meter' table.
Also loads the readings in the file into the 'meter_value' table.
Afterwards, moves DATA_FILE to PROCESS_DIR.
Returns 0 if successful, 1 on error.
Uses cursor MYCURSOR.
"""
try:
header = get_header(data_file)
except ValueError, badHeader:
print(badHeader)
tryNext()
return 1
description = header[0]
orig_unit = header[1]
commodity = header[2]
source = header[3]
reading_type = header[4]
try:
print("Performing ID collection ...\n")
unit_id = get_id(orig_unit, "unit", "old_unit", mycursor)
commodity_id = get_id(commodity, "commodity", "name", mycursor)
source_system_id = get_id(source, "source_system", "name", mycursor)
reading_type_id = get_id(reading_type, "reading_type", "name"
, mycursor)
print("\nID collection finished.\n")
except pyodbc.Error, general_err:
print("SQL ERROR!")
print(general_err)
tryNext()
return 1
if (unit_id == -1 or commodity_id == -1 or source_system_id == -1
or reading_type_id == -1):
print("ERROR: Some IDs not found!")
tryNext()
return 1
l = [description, unit_id, commodity_id, source_system_id, reading_type_id]
meter_id = load_ids(l, mycursor)
if (meter_id == -1):
return 1
print("Meter ID: %d" % (meter_id))
if not load_values(meter_id, data_file, mycursor):
print("NOTE! Meter ID (%d) created for '%s'" % (meter_id, data_file))
return 1
else:
util.move(data_file, process_dir)
return 0
def get_id(item, table, field, mycursor):
"""
Returns the ID from table TABLE whose FIELD is "ilike" to ITEM. Uses
cursor MYCURSOR. If no ID is found, returns -1.
"""
sql = "SELECT id FROM %s WHERE %s ILIKE '%s' LIMIT 1" % (table, field, item)
print("Getting %s ID ..." % (table)),
mycursor.execute(sql)
result = mycursor.fetchone()
if not result:
util.fail()
return -1
else:
util.done()
return result.id
def load_ids(id_list, mycursor):
"""
Insert ID list ID_LIST into the 'meter' table and return the meter ID
created by the insertion. Returns -1 in case of failure.
The order of ID_LIST is as follows:
[ description, unit_id, commodity_id, source_system_id, reading_type_id ]
Uses cursor MYCURSOR.
"""
sql = """
INSERT INTO meter
(
description,
unit_id,
commodity_id,
source_system_id,
reading_type_id
)
VALUES ('%s', %d, %d, %d, %d)
RETURNING id
""" % (id_list[0], id_list[1], id_list[2], id_list[3], id_list[4])
print("Inserting ID's into 'meter' table ..."),
try:
exec_and_commit(mycursor, sql)
result = mycursor.fetchone()
util.done()
return result.id
except pyodbc.Error, get_meter_id_err:
util.fail()
print(get_meter_id_err)
return -1
def load_values(m_id, data_file, mycursor):
"""
Insert reading values into the 'meter_value' table from DATA_FILE for a
meter with id M_ID. Returns TRUE if successful, FALSE otherwise.
Uses cursor MYCURSOR.
"""
print("Begin inserting readings into 'meter_value' table ...\n")
tbl = "tmp_%d" % (m_id)
if not create_temp_table(tbl, mycursor):
return False
if not copy_data(data_file, tbl, mycursor):
drop_tmp_table(mycursor, tbl)
return False
if not add_id_col(m_id, tbl, mycursor):
drop_tmp_table(mycursor, tbl)
return False
if not insert_table(tbl, mycursor):
drop_tmp_table(mycursor, tbl)
return False
print("\nReading insertion finished.\n")
drop_tmp_table(mycursor, tbl)
return True
def create_temp_table(table_name, mycursor):
"""
Create temporary table with name TABLE_NAME to hold timestamp, reading data
in the data file currently being processed.
Returns TRUE if successful, FALSE otherwise. Uses cursor MYCURSOR.
"""
sql = """
CREATE TABLE IF NOT EXISTS %s
(
time_stamp_utc TIMESTAMP,
reading NUMERIC
)
""" % (table_name)
print("Creating temporary table '%s' ..." % (table_name)),
try:
exec_and_commit(mycursor, sql)
util.done()
return True
except pyodbc.Error, create_tbl_err:
util.fail()
print(create_tbl_err)
return False
def copy_data(data_file, table, mycursor):
"""
Copy the data contents of DATA_FILE to temporary table TABLE.
Returns TRUE if successful, FALSE otherwise. Uses cursor MYCURSOR.
"""
sql = """
COPY %s
(
time_stamp_utc,
reading
) FROM '%s'
WITH DELIMITER ','
NULL AS 'Null'
CSV HEADER
""" % (table, data_file)
print("Copying data to temporary table '%s' ..." % (table)),
try:
exec_and_commit(mycursor, sql)
util.done()
return True
except pyodbc.Error, copy_err:
util.fail()
print(copy_err)
return False
def add_id_col(m_id, table, mycursor):
"""
Add a 'id' column with default value M_ID to table TABLE using cursor
MYCURSOR. Returns TRUE if successful, FALSE otherwise.
"""
sql = "ALTER TABLE %s ADD COLUMN id INTEGER DEFAULT %d" % (table, m_id)
print("Adding column to table '%s' with id value %d ..." % (table, m_id)),
try:
exec_and_commit(mycursor, sql)
util.done()
return True
except pyodbc.Error, add_col_err:
util.fail()
print(add_col_err)
return False
def insert_table(table, mycursor):
"""
Insert the contents of table TABLE into 'meter_value' using cursor MYCURSOR.
Returns TRUE if successful, FALSE otherwise.
"""
sql = """
INSERT INTO meter_value (meter_id, time_stamp_utc, reading)
SELECT id, time_stamp_utc, reading FROM %s
""" % (table)
print("Inserting readings into 'meter_value' table ..."),
try:
exec_and_commit(mycursor, sql)
util.done()
return True
except pyodbc.Error, insert_table_err:
util.fail()
print(insert_table_err)
return False
def usage():
"""
Print usage message.
"""
print("\nUSAGE: python load_data_files.py [DIRECTORIES] ")
print("\nDESCRIPTION:")
print("\tLoads meter data files in a data directory into the energy")
print("\tdatabase. After loading, the files are then moved to a ")
print("\t'processed' directory.")
print("\n\tThe files must have the following structure:")
print("\tThe first line must have the following information about the")
print("\tmeter:\n")
print("\t\tdescription")
print("\t\toriginal unit")
print("\t\tcommodity")
print("\t\tsource system name")
print("\t\treading type")
print("\n\tSubsequent lines must be in the following format:")
print("\tTimestamp, Reading\n")
print("\nOPTIONS:")
print("\tDIRECTORIES -- [data_dir processed_dir]")
print("\n\tDATA_DIR PROCESSED_DIR are absolute paths to the data and")
print("\tprocessed directories, respectively")
print("\n\tDATA_DIR contains the meter data files to import.")
print("\tPROCESSED_DIR is where data files are moved to after they")
print("\thave been processed.")
print("\n\tIf no argument is provided, default data directory is:")
print("\t'%s'" % (DEFAULT_DATA_DIR))
print("\tDefault processed directory is:")
print("\n\t'%s'" % (DEFAULT_PROCESSED_DIR))
def main():
arg_len = len(sys.argv)
if (arg_len > 3 or arg_len == 2):
usage()
exit()
elif (arg_len == 3):
data_dir = sys.argv[1]
processed_dir = sys.argv[2]
elif (arg_len == 1):
data_dir = DEFAULT_DATA_DIR
processed_dir = DEFAULT_PROCESSED_DIR
if (not os.path.isdir(data_dir) or not os.path.isdir(processed_dir)):
print("ERROR: directory '%s' does not exist!" % (data_dir))
exit()
data_files = get_data_files(data_dir)
try:
cnxn_str = "DSN=%s;UID=%s;PWD=%s" % (DB, USER, PWD)
print("Connecting to database ..."),
cnxn = pyodbc.connect(cnxn_str)
util.done()
except pyodbc.Error, conn_err:
util.fail()
print(conn_err)
exit()
cursor = cnxn.cursor()
load_files(data_files, cursor, processed_dir)
util.close_cnxn(cursor, cnxn)
if __name__ == "__main__":
main()
|
|
################################################################################
# Copyright (C) 2011-2013 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
General numerical functions and methods.
"""
import functools
import itertools
import operator
import sys
import getopt
import numpy as np
import scipy as sp
import scipy.linalg as linalg
import scipy.special as special
import scipy.optimize as optimize
import scipy.sparse as sparse
import tempfile as tmp
import unittest
from numpy import testing
def flatten_axes(X, *ndims):
ndim = sum(ndims)
if np.ndim(X) < ndim:
raise ValueError("Not enough ndims in the array")
if len(ndims) == 0:
return X
shape = np.shape(X)
i = np.ndim(X) - ndim
plates = shape[:i]
nd_sums = i + np.cumsum((0,) + ndims)
sizes = tuple(
np.prod(shape[i:j])
for (i, j) in zip(nd_sums[:-1], nd_sums[1:])
)
return np.reshape(X, plates + sizes)
def reshape_axes(X, *shapes):
ndim = len(shapes)
if np.ndim(X) < ndim:
raise ValueError("Not enough ndims in the array")
i = np.ndim(X) - ndim
sizes = tuple(np.prod(sh) for sh in shapes)
if np.shape(X)[i:] != sizes:
raise ValueError("Shapes inconsistent with sizes")
shape = tuple(i for sh in shapes for i in sh)
return np.reshape(X, np.shape(X)[:i] + shape)
def find_set_index(index, set_lengths):
"""
Given set sizes and an index, returns the index of the set
The given index is for the concatenated list of the sets.
"""
# Negative indices to positive
if index < 0:
index += np.sum(set_lengths)
# Indices must be on range (0, N-1)
if index >= np.sum(set_lengths) or index < 0:
raise Exception("Index out bounds")
return np.searchsorted(np.cumsum(set_lengths), index, side='right')
def parse_command_line_arguments(mandatory_args, *optional_args_list, argv=None):
"""
Parse command line arguments of style "--parameter=value".
Parameter specification is tuple: (name, converter, description).
Some special handling:
* If converter is None, the command line does not accept any value
for it, but instead use either "--option" to enable or
"--no-option" to disable.
* If argument name contains hyphens, those are converted to
underscores in the keys of the returned dictionaries.
Parameters
----------
mandatory_args : list of tuples
Specs for mandatory arguments
optional_args_list : list of lists of tuples
Specs for each optional arguments set
argv : list of strings (optional)
The command line arguments. By default, read sys.argv.
Returns
-------
args : dictionary
The parsed mandatory arguments
kwargs : dictionary
The parsed optional arguments
Examples
--------
>>> from pprint import pprint as print
>>> from bayespy.utils import misc
>>> (args, kwargs) = misc.parse_command_line_arguments(
... # Mandatory arguments
... [
... ('name', str, "Full name"),
... ('age', int, "Age (years)"),
... ('employed', None, "Working"),
... ],
... # Optional arguments
... [
... ('phone', str, "Phone number"),
... ('favorite-color', str, "Favorite color")
... ],
... argv=['--name=John Doe',
... '--age=42',
... '--no-employed',
... '--favorite-color=pink']
... )
>>> print(args)
{'age': 42, 'employed': False, 'name': 'John Doe'}
>>> print(kwargs)
{'favorite_color': 'pink'}
It is possible to have several optional argument sets:
>>> (args, kw_info, kw_fav) = misc.parse_command_line_arguments(
... # Mandatory arguments
... [
... ('name', str, "Full name"),
... ],
... # Optional arguments (contact information)
... [
... ('phone', str, "Phone number"),
... ('email', str, "E-mail address")
... ],
... # Optional arguments (preferences)
... [
... ('favorite-color', str, "Favorite color"),
... ('favorite-food', str, "Favorite food")
... ],
... argv=['--name=John Doe',
... '--favorite-color=pink',
... '--email=john.doe@email.com',
... '--favorite-food=spaghetti']
... )
>>> print(args)
{'name': 'John Doe'}
>>> print(kw_info)
{'email': 'john.doe@email.com'}
>>> print(kw_fav)
{'favorite_color': 'pink', 'favorite_food': 'spaghetti'}
"""
if argv is None:
argv = sys.argv[1:]
mandatory_arg_names = [arg[0] for arg in mandatory_args]
# Sizes of each optional argument list
optional_args_lengths = [len(opt_args) for opt_args in optional_args_list]
all_args = mandatory_args + functools.reduce(operator.add, optional_args_list, [])
# Create a list of arg names for the getopt parser
arg_list = []
for arg in all_args:
arg_name = arg[0].lower()
if arg[1] is None:
arg_list.append(arg_name)
arg_list.append("no-" + arg_name)
else:
arg_list.append(arg_name + "=")
if len(set(arg_list)) < len(arg_list):
raise Exception("Argument names are not unique")
# Use getopt parser
try:
(cl_opts, cl_args) = getopt.getopt(argv, "", arg_list)
except getopt.GetoptError as err:
print(err)
print("Usage:")
for arg in all_args:
if arg[1] is None:
print("--{0}\t{1}".format(arg[0].lower(),
arg[2]))
else:
print("--{0}=<{1}>\t{2}".format(arg[0].lower(),
str(arg[1].__name__).upper(),
arg[2]))
sys.exit(2)
# A list of all valid flag names: ["--first-argument", "--another-argument"]
valid_flags = []
valid_flag_arg_indices = []
for (ind, arg) in enumerate(all_args):
valid_flags.append("--" + arg[0].lower())
valid_flag_arg_indices.append(ind)
if arg[1] is None:
valid_flags.append("--no-" + arg[0].lower())
valid_flag_arg_indices.append(ind)
# Go through all the given command line arguments and store them in the
# correct dictionaries
args = dict()
kwargs_list = [dict() for i in range(len(optional_args_list))]
handled_arg_names = []
for (cl_opt, cl_arg) in cl_opts:
# Get the index of the argument
try:
ind = valid_flag_arg_indices[valid_flags.index(cl_opt.lower())]
except ValueError:
print("Invalid command line argument: {0}".format(cl_opt))
raise Exception("Invalid argument given")
# Check that the argument wasn't already given and then mark the
# argument as handled
if all_args[ind][0] in handled_arg_names:
raise Exception("Same argument given multiple times")
else:
handled_arg_names.append(all_args[ind][0])
# Check whether to add the argument to the mandatory or optional
# argument dictionary
if ind < len(mandatory_args):
dict_to = args
else:
dict_index = find_set_index(ind - len(mandatory_args),
optional_args_lengths)
dict_to = kwargs_list[dict_index]
# Convert and store the argument
convert_function = all_args[ind][1]
arg_name = all_args[ind][0].replace('-', '_')
if convert_function is None:
if cl_opt[:5] == "--no-":
dict_to[arg_name] = False
else:
dict_to[arg_name] = True
else:
dict_to[arg_name] = convert_function(cl_arg)
# Check if some mandatory argument was not given
for arg_name in mandatory_arg_names:
if arg_name not in handled_arg_names:
raise Exception("Mandatory argument --{0} not given".format(arg_name))
return tuple([args] + kwargs_list)
def composite_function(function_list):
"""
Construct a function composition from a list of functions.
Given a list of functions [f,g,h], constructs a function :math:`h \circ g
\circ f`. That is, returns a function :math:`z`, for which :math:`z(x) =
h(g(f(x)))`.
"""
def composite(X):
for function in function_list:
X = function(X)
return X
return composite
def ceildiv(a, b):
"""
Compute a divided by b and rounded up.
"""
return -(-a // b)
def rmse(y1, y2, axis=None):
return np.sqrt(np.mean((y1-y2)**2, axis=axis))
def is_callable(f):
return hasattr(f, '__call__')
def atleast_nd(X, d):
if np.ndim(X) < d:
sh = (d-np.ndim(X))*(1,) + np.shape(X)
X = np.reshape(X, sh)
return X
def T(X):
"""
Transpose the matrix.
"""
return np.swapaxes(X, -1, -2)
class TestCase(unittest.TestCase):
"""
Simple base class for unit testing.
Adds NumPy's features to Python's unittest.
"""
def assertAllClose(self, A, B,
msg="Arrays not almost equal",
rtol=1e-4,
atol=0):
self.assertEqual(np.shape(A), np.shape(B), msg=msg)
testing.assert_allclose(A, B, err_msg=msg, rtol=rtol, atol=atol)
pass
def assertArrayEqual(self, A, B, msg="Arrays not equal"):
self.assertEqual(np.shape(A), np.shape(B), msg=msg)
testing.assert_array_equal(A, B, err_msg=msg)
pass
def assertMessage(self, M1, M2):
if len(M1) != len(M2):
self.fail("Message lists have different lengths")
for (m1, m2) in zip(M1, M2):
self.assertAllClose(m1, m2)
pass
def assertMessageToChild(self, X, u):
self.assertMessage(X._message_to_child(), u)
pass
def symm(X):
"""
Make X symmetric.
"""
return 0.5 * (X + np.swapaxes(X, -1, -2))
def unique(l):
"""
Remove duplicate items from a list while preserving order.
"""
seen = set()
seen_add = seen.add
return [ x for x in l if x not in seen and not seen_add(x)]
def tempfile(prefix='', suffix=''):
return tmp.NamedTemporaryFile(prefix=prefix, suffix=suffix).name
def write_to_hdf5(group, data, name):
"""
Writes the given array into the HDF5 file.
"""
try:
# Try using compression. It doesn't work for scalars.
group.create_dataset(name,
data=data,
compression='gzip')
except TypeError:
group.create_dataset(name,
data=data)
except ValueError:
raise ValueError('Could not write %s' % data)
def nans(size=()):
return np.tile(np.nan, size)
def trues(shape):
return np.ones(shape, dtype=np.bool)
def identity(*shape):
return np.reshape(np.identity(np.prod(shape)), shape+shape)
def array_to_scalar(x):
# This transforms an N-dimensional array to a scalar. It's most
# useful when you know that the array has only one element and you
# want it out as a scalar.
return np.ravel(x)[0]
#def diag(x):
def put(x, indices, y, axis=-1, ufunc=np.add):
"""A kind of inverse mapping of `np.take`
In a simple, the operation can be thought as:
.. code-block:: python
x[indices] += y
with the exception that all entries of `y` are used instead of just the
first occurence corresponding to a particular element. That is, the results
are accumulated, and the accumulation function can be changed by providing
`ufunc`. For instance, `np.multiply` corresponds to:
.. code-block:: python
x[indices] *= y
Whereas `np.take` picks indices along an axis and returns the resulting
array, `put` similarly picks indices along an axis but accumulates the
given values to those entries.
Example
-------
.. code-block:: python
>>> x = np.zeros(3)
>>> put(x, [2, 2, 0, 2, 2], 1)
array([ 1., 0., 4.])
`y` must broadcast to the shape of `np.take(x, indices)`:
.. code-block:: python
>>> x = np.zeros((3,4))
>>> put(x, [[2, 2, 0, 2, 2], [1, 2, 1, 2, 1]], np.ones((2,1,4)), axis=0)
array([[ 1., 1., 1., 1.],
[ 3., 3., 3., 3.],
[ 6., 6., 6., 6.]])
"""
#x = np.copy(x)
ndim = np.ndim(x)
if not isinstance(axis, int):
raise ValueError("Axis must be an integer")
# Make axis index positive: [0, ..., ndim-1]
if axis < 0:
axis = axis + ndim
if axis < 0 or axis >= ndim:
raise ValueError("Axis out of bounds")
indices = axis*(slice(None),) + (indices,) + (ndim-axis-1)*(slice(None),)
#y = add_trailing_axes(y, ndim-axis-1)
ufunc.at(x, indices, y)
return x
def put_simple(y, indices, axis=-1, length=None):
"""An inverse operation of `np.take` with accumulation and broadcasting.
Compared to `put`, the difference is that the result array is initialized
with an array of zeros whose shape is determined automatically and `np.add`
is used as the accumulator.
"""
if length is None:
# Try to determine the original length of the axis by finding the
# largest index. It is more robust to give the length explicitly.
indices = np.copy(indices)
indices[indices<0] = np.abs(indices[indices<0]) - 1
length = np.amax(indices) + 1
if not isinstance(axis, int):
raise ValueError("Axis must be an integer")
# Make axis index negative: [-ndim, ..., -1]
if axis >= 0:
raise ValueError("Axis index must be negative")
y = atleast_nd(y, abs(axis)-1)
shape_y = np.shape(y)
end_before = axis - np.ndim(indices) + 1
start_after = axis + 1
if end_before == 0:
shape_x = shape_y + (length,)
elif start_after == 0:
shape_x = shape_y[:end_before] + (length,)
else:
shape_x = shape_y[:end_before] + (length,) + shape_y[start_after:]
x = np.zeros(shape_x)
return put(x, indices, y, axis=axis)
def grid(x1, x2):
""" Returns meshgrid as a (M*N,2)-shape array. """
(X1, X2) = np.meshgrid(x1, x2)
return np.hstack((X1.reshape((-1,1)),X2.reshape((-1,1))))
# class CholeskyDense():
# def __init__(self, K):
# self.U = linalg.cho_factor(K)
# def solve(self, b):
# if sparse.issparse(b):
# b = b.toarray()
# return linalg.cho_solve(self.U, b)
# def logdet(self):
# return 2*np.sum(np.log(np.diag(self.U[0])))
# def trace_solve_gradient(self, dK):
# return np.trace(self.solve(dK))
# class CholeskySparse():
# def __init__(self, K):
# self.LD = cholmod.cholesky(K)
# def solve(self, b):
# if sparse.issparse(b):
# b = b.toarray()
# return self.LD.solve_A(b)
# def logdet(self):
# return self.LD.logdet()
# #np.sum(np.log(LD.D()))
# def trace_solve_gradient(self, dK):
# # WTF?! numpy.multiply doesn't work for two sparse
# # matrices.. It returns a result but it is incorrect!
# # Use the identity trace(K\dK)=sum(inv(K).*dK) by computing
# # the sparse inverse (lower triangular part)
# iK = self.LD.spinv(form='lower')
# return (2*iK.multiply(dK).sum()
# - iK.diagonal().dot(dK.diagonal()))
# # Multiply by two because of symmetry (remove diagonal once
# # because it was taken into account twice)
# #return np.multiply(self.LD.inv().todense(),dK.todense()).sum()
# #return self.LD.inv().multiply(dK).sum() # THIS WORKS
# #return np.multiply(self.LD.inv(),dK).sum() # THIS NOT WORK!! WTF??
# iK = self.LD.spinv()
# return iK.multiply(dK).sum()
# #return (2*iK.multiply(dK).sum()
# # - iK.diagonal().dot(dK.diagonal()))
# #return (2*np.multiply(iK, dK).sum()
# # - iK.diagonal().dot(dK.diagonal())) # THIS NOT WORK!!
# #return np.trace(self.solve(dK))
# def cholesky(K):
# if isinstance(K, np.ndarray):
# return CholeskyDense(K)
# elif sparse.issparse(K):
# return CholeskySparse(K)
# else:
# raise Exception("Unsupported covariance matrix type")
# Computes log probability density function of the Gaussian
# distribution
def gaussian_logpdf(y_invcov_y,
y_invcov_mu,
mu_invcov_mu,
logdetcov,
D):
return (-0.5*D*np.log(2*np.pi)
-0.5*logdetcov
-0.5*y_invcov_y
+y_invcov_mu
-0.5*mu_invcov_mu)
def zipper_merge(*lists):
"""
Combines lists by alternating elements from them.
Combining lists [1,2,3], ['a','b','c'] and [42,666,99] results in
[1,'a',42,2,'b',666,3,'c',99]
The lists should have equal length or they are assumed to have the length of
the shortest list.
This is known as alternating merge or zipper merge.
"""
return list(sum(zip(*lists), ()))
def remove_whitespace(s):
return ''.join(s.split())
def is_numeric(a):
return (np.isscalar(a) or
isinstance(a, list) or
isinstance(a, np.ndarray))
def is_scalar_integer(x):
t = np.asanyarray(x).dtype.type
return np.ndim(x) == 0 and issubclass(t, np.integer)
def isinteger(x):
t = np.asanyarray(x).dtype.type
return ( issubclass(t, np.integer) or issubclass(t, np.bool_) )
def is_string(s):
return isinstance(s, str)
def multiply_shapes(*shapes):
"""
Compute element-wise product of lists/tuples.
Shorter lists are concatenated with leading 1s in order to get lists with
the same length.
"""
# Make the shapes equal length
shapes = make_equal_length(*shapes)
# Compute element-wise product
f = lambda X,Y: (x*y for (x,y) in zip(X,Y))
shape = functools.reduce(f, shapes)
return tuple(shape)
def make_equal_length(*shapes):
"""
Make tuples equal length.
Add leading 1s to shorter tuples.
"""
# Get maximum length
max_len = max((len(shape) for shape in shapes))
# Make the shapes equal length
shapes = ((1,)*(max_len-len(shape)) + tuple(shape) for shape in shapes)
return shapes
def make_equal_ndim(*arrays):
"""
Add trailing unit axes so that arrays have equal ndim
"""
shapes = [np.shape(array) for array in arrays]
shapes = make_equal_length(*shapes)
arrays = [np.reshape(array, shape)
for (array, shape) in zip(arrays, shapes)]
return arrays
def sum_to_dim(A, dim):
"""
Sum leading axes of A such that A has dim dimensions.
"""
dimdiff = np.ndim(A) - dim
if dimdiff > 0:
axes = np.arange(dimdiff)
A = np.sum(A, axis=axes)
return A
def broadcasting_multiplier(plates, *args):
"""
Compute the plate multiplier for given shapes.
The first shape is compared to all other shapes (using NumPy
broadcasting rules). All the elements which are non-unit in the first
shape but 1 in all other shapes are multiplied together.
This method is used, for instance, for computing a correction factor for
messages to parents: If this node has non-unit plates that are unit
plates in the parent, those plates are summed. However, if the message
has unit axis for that plate, it should be first broadcasted to the
plates of this node and then summed to the plates of the parent. In
order to avoid this broadcasting and summing, it is more efficient to
just multiply by the correct factor. This method computes that
factor. The first argument is the full plate shape of this node (with
respect to the parent). The other arguments are the shape of the message
array and the plates of the parent (with respect to this node).
"""
# Check broadcasting of the shapes
for arg in args:
broadcasted_shape(plates, arg)
# Check that each arg-plates are a subset of plates?
for arg in args:
if not is_shape_subset(arg, plates):
print("Plates:", plates)
print("Args:", args)
raise ValueError("The shapes in args are not a sub-shape of "
"plates")
r = 1
for j in range(-len(plates),0):
mult = True
for arg in args:
# if -j <= len(arg) and arg[j] != 1:
if not (-j > len(arg) or arg[j] == 1):
mult = False
if mult:
r *= plates[j]
return r
def sum_multiply_to_plates(*arrays, to_plates=(), from_plates=None, ndim=0):
"""
Compute the product of the arguments and sum to the target shape.
"""
arrays = list(arrays)
def get_plates(x):
if ndim == 0:
return x
else:
return x[:-ndim]
plates_arrays = [get_plates(np.shape(array)) for array in arrays]
product_plates = broadcasted_shape(*plates_arrays)
if from_plates is None:
from_plates = product_plates
r = 1
else:
r = broadcasting_multiplier(from_plates, product_plates, to_plates)
for ind in range(len(arrays)):
plates_others = plates_arrays[:ind] + plates_arrays[(ind+1):]
plates_without = broadcasted_shape(to_plates, *plates_others)
ax = axes_to_collapse(plates_arrays[ind], #get_plates(np.shape(arrays[ind])),
plates_without)
if ax:
ax = tuple([a-ndim for a in ax])
arrays[ind] = np.sum(arrays[ind], axis=ax, keepdims=True)
plates_arrays = [get_plates(np.shape(array)) for array in arrays]
product_plates = broadcasted_shape(*plates_arrays)
ax = axes_to_collapse(product_plates, to_plates)
if ax:
ax = tuple([a-ndim for a in ax])
y = sum_multiply(*arrays, axis=ax, keepdims=True)
else:
y = functools.reduce(np.multiply, arrays)
y = squeeze_to_dim(y, len(to_plates) + ndim)
return r * y
def sum_multiply(*args, axis=None, sumaxis=True, keepdims=False):
# Computes sum(arg[0]*arg[1]*arg[2]*..., axis=axes_to_sum) without
# explicitly computing the intermediate product
if len(args) == 0:
raise ValueError("You must give at least one input array")
# Dimensionality of the result
max_dim = 0
for k in range(len(args)):
max_dim = max(max_dim, np.ndim(args[k]))
if sumaxis:
if axis is None:
# Sum all axes
axes = []
else:
if np.isscalar(axis):
axis = [axis]
axes = [i
for i in range(max_dim)
if i not in axis and (-max_dim+i) not in axis]
else:
if axis is None:
# Keep all axes
axes = range(max_dim)
else:
# Find axes that are kept
if np.isscalar(axis):
axes = [axis]
axes = [i if i >= 0
else i+max_dim
for i in axis]
axes = sorted(axes)
if len(axes) > 0 and (min(axes) < 0 or max(axes) >= max_dim):
raise ValueError("Axis index out of bounds")
# Form a list of pairs: the array in the product and its axes
pairs = list()
for i in range(len(args)):
a = args[i]
a_dim = np.ndim(a)
pairs.append(a)
pairs.append(range(max_dim-a_dim, max_dim))
# Output axes are those which are not summed
pairs.append(axes)
# Compute the sum-product
try:
y = np.einsum(*pairs)
except ValueError as err:
if str(err) == ("If 'op_axes' or 'itershape' is not NULL in "
"theiterator constructor, 'oa_ndim' must be greater "
"than zero"):
# TODO/FIXME: Handle a bug in NumPy. If all arguments to einsum are
# scalars, it raises an error. For scalars we can just use multiply
# and forget about summing. Hopefully, in the future, einsum handles
# scalars properly and this try-except becomes unnecessary.
y = functools.reduce(np.multiply, args)
else:
raise err
# Restore summed axes as singleton axes
if keepdims:
d = 0
s = ()
for k in range(max_dim):
if k in axes:
# Axis not summed
s = s + (np.shape(y)[d],)
d += 1
else:
# Axis was summed
s = s + (1,)
y = np.reshape(y, s)
return y
def sum_product(*args, axes_to_keep=None, axes_to_sum=None, keepdims=False):
if axes_to_keep is not None:
return sum_multiply(*args,
axis=axes_to_keep,
sumaxis=False,
keepdims=keepdims)
else:
return sum_multiply(*args,
axis=axes_to_sum,
sumaxis=True,
keepdims=keepdims)
def moveaxis(A, axis_from, axis_to):
"""
Move the axis `axis_from` to position `axis_to`.
"""
if ((axis_from < 0 and abs(axis_from) > np.ndim(A)) or
(axis_from >= 0 and axis_from >= np.ndim(A)) or
(axis_to < 0 and abs(axis_to) > np.ndim(A)) or
(axis_to >= 0 and axis_to >= np.ndim(A))):
raise ValueError("Can't move axis %d to position %d. Axis index out of "
"bounds for array with shape %s"
% (axis_from,
axis_to,
np.shape(A)))
axes = np.arange(np.ndim(A))
axes[axis_from:axis_to] += 1
axes[axis_from:axis_to:-1] -= 1
axes[axis_to] = axis_from
return np.transpose(A, axes=axes)
def safe_indices(inds, shape):
"""
Makes sure that indices are valid for given shape.
The shorter shape determines the length.
For instance,
.. testsetup::
from bayespy.utils.misc import safe_indices
>>> safe_indices( (3, 4, 5), (1, 6) )
(0, 5)
"""
m = min(len(inds), len(shape))
if m == 0:
return ()
inds = inds[-m:]
maxinds = np.array(shape[-m:]) - 1
return tuple(np.fmin(inds, maxinds))
def broadcasted_shape(*shapes):
"""
Computes the resulting broadcasted shape for a given set of shapes.
Uses the broadcasting rules of NumPy. Raises an exception if the shapes do
not broadcast.
"""
dim = 0
for a in shapes:
dim = max(dim, len(a))
S = ()
for i in range(-dim,0):
s = 1
for a in shapes:
if -i <= len(a):
if s == 1:
s = a[i]
elif a[i] != 1 and a[i] != s:
raise ValueError("Shapes %s do not broadcast" % (shapes,))
S = S + (s,)
return S
def broadcasted_shape_from_arrays(*arrays):
"""
Computes the resulting broadcasted shape for a given set of arrays.
Raises an exception if the shapes do not broadcast.
"""
shapes = [np.shape(array) for array in arrays]
return broadcasted_shape(*shapes)
def is_shape_subset(sub_shape, full_shape):
"""
"""
if len(sub_shape) > len(full_shape):
return False
for i in range(len(sub_shape)):
ind = -1 - i
if sub_shape[ind] != 1 and sub_shape[ind] != full_shape[ind]:
return False
return True
def add_axes(X, num=1, axis=0):
for i in range(num):
X = np.expand_dims(X, axis=axis)
return X
shape = np.shape(X)[:axis] + num*(1,) + np.shape(X)[axis:]
return np.reshape(X, shape)
def add_leading_axes(x, n):
return add_axes(x, axis=0, num=n)
def add_trailing_axes(x, n):
return add_axes(x, axis=-1, num=n)
def nested_iterator(max_inds):
s = [range(i) for i in max_inds]
return itertools.product(*s)
def first(L):
"""
"""
for (n,l) in enumerate(L):
if l:
return n
return None
def squeeze(X):
"""
Remove leading axes that have unit length.
For instance, a shape (1,1,4,1,3) will be reshaped to (4,1,3).
"""
shape = np.array(np.shape(X))
inds = np.nonzero(shape != 1)[0]
if len(inds) == 0:
shape = ()
else:
shape = shape[inds[0]:]
return np.reshape(X, shape)
def squeeze_to_dim(X, dim):
s = tuple(range(np.ndim(X)-dim))
return np.squeeze(X, axis=s)
def axes_to_collapse(shape_x, shape_to):
# Solves which axes of shape shape_x need to be collapsed in order
# to get the shape shape_to
s = ()
for j in range(-len(shape_x), 0):
if shape_x[j] != 1:
if -j > len(shape_to) or shape_to[j] == 1:
s += (j,)
elif shape_to[j] != shape_x[j]:
print('Shape from: ' + str(shape_x))
print('Shape to: ' + str(shape_to))
raise Exception('Incompatible shape to squeeze')
return tuple(s)
def sum_to_shape(X, s):
"""
Sum axes of the array such that the resulting shape is as given.
Thus, the shape of the result will be s or an error is raised.
"""
# First, sum and remove axes that are not in s
if np.ndim(X) > len(s):
axes = tuple(range(-np.ndim(X), -len(s)))
else:
axes = ()
Y = np.sum(X, axis=axes)
# Second, sum axes that are 1 in s but keep the axes
axes = ()
for i in range(-np.ndim(Y), 0):
if s[i] == 1:
if np.shape(Y)[i] > 1:
axes = axes + (i,)
else:
if np.shape(Y)[i] != s[i]:
raise ValueError("Shape %s can't be summed to shape %s" %
(np.shape(X), s))
Y = np.sum(Y, axis=axes, keepdims=True)
return Y
def repeat_to_shape(A, s):
# Current shape
t = np.shape(A)
if len(t) > len(s):
raise Exception("Can't repeat to a smaller shape")
# Add extra axis
t = tuple([1]*(len(s)-len(t))) + t
A = np.reshape(A,t)
# Repeat
for i in reversed(range(len(s))):
if s[i] != t[i]:
if t[i] != 1:
raise Exception("Can't repeat non-singular dimensions")
else:
A = np.repeat(A, s[i], axis=i)
return A
def multidigamma(a, d):
"""
Returns the derivative of the log of multivariate gamma.
"""
return np.sum(special.digamma(a[...,None] - 0.5*np.arange(d)),
axis=-1)
m_digamma = multidigamma
def diagonal(A):
return np.diagonal(A, axis1=-2, axis2=-1)
def make_diag(X, ndim=1, ndim_from=0):
"""
Create a diagonal array given the diagonal elements.
The diagonal array can be multi-dimensional. By default, the last axis is
transformed to two axes (diagonal matrix) but this can be changed using ndim
keyword. For instance, an array with shape (K,L,M,N) can be transformed to a
set of diagonal 4-D tensors with shape (K,L,M,N,M,N) by giving ndim=2. If
ndim=3, the result has shape (K,L,M,N,L,M,N), and so on.
Diagonality means that for the resulting array Y holds:
Y[...,i_1,i_2,..,i_ndim,j_1,j_2,..,j_ndim] is zero if i_n!=j_n for any n.
"""
if ndim < 0:
raise ValueError("Parameter ndim must be non-negative integer")
if ndim_from < 0:
raise ValueError("Parameter ndim_to must be non-negative integer")
if ndim_from > ndim:
raise ValueError("Parameter ndim_to must not be greater than ndim")
if ndim == 0:
return X
if np.ndim(X) < 2 * ndim_from:
raise ValueError("The array does not have enough axes")
if ndim_from > 0:
if np.shape(X)[-ndim_from:] != np.shape(X)[-2*ndim_from:-ndim_from]:
raise ValueError("The array X is not square")
if ndim == ndim_from:
return X
X = atleast_nd(X, ndim+ndim_from)
if ndim > 0:
if ndim_from > 0:
I = identity(*(np.shape(X)[-(ndim_from+ndim):-ndim_from]))
else:
I = identity(*(np.shape(X)[-ndim:]))
X = add_axes(X, axis=np.ndim(X)-ndim_from, num=ndim-ndim_from)
X = I * X
return X
def get_diag(X, ndim=1, ndim_to=0):
"""
Get the diagonal of an array.
If ndim>1, take the diagonal of the last 2*ndim axes.
"""
if ndim < 0:
raise ValueError("Parameter ndim must be non-negative integer")
if ndim_to < 0:
raise ValueError("Parameter ndim_to must be non-negative integer")
if ndim_to > ndim:
raise ValueError("Parameter ndim_to must not be greater than ndim")
if ndim == 0:
return X
if np.ndim(X) < 2*ndim:
raise ValueError("The array does not have enough axes")
if np.shape(X)[-ndim:] != np.shape(X)[-2*ndim:-ndim]:
raise ValueError("The array X is not square")
if ndim == ndim_to:
return X
n_plate_axes = np.ndim(X) - 2 * ndim
n_diag_axes = ndim - ndim_to
axes = tuple(range(0, np.ndim(X) - ndim + ndim_to))
lengths = [0, n_plate_axes, n_diag_axes, ndim_to, ndim_to]
cutpoints = list(np.cumsum(lengths))
axes_plates = axes[cutpoints[0]:cutpoints[1]]
axes_diag= axes[cutpoints[1]:cutpoints[2]]
axes_dims1 = axes[cutpoints[2]:cutpoints[3]]
axes_dims2 = axes[cutpoints[3]:cutpoints[4]]
axes_input = axes_plates + axes_diag + axes_dims1 + axes_diag + axes_dims2
axes_output = axes_plates + axes_diag + axes_dims1 + axes_dims2
return np.einsum(X, axes_input, axes_output)
def diag(X, ndim=1):
"""
Create a diagonal array given the diagonal elements.
The diagonal array can be multi-dimensional. By default, the last axis is
transformed to two axes (diagonal matrix) but this can be changed using ndim
keyword. For instance, an array with shape (K,L,M,N) can be transformed to a
set of diagonal 4-D tensors with shape (K,L,M,N,M,N) by giving ndim=2. If
ndim=3, the result has shape (K,L,M,N,L,M,N), and so on.
Diagonality means that for the resulting array Y holds:
Y[...,i_1,i_2,..,i_ndim,j_1,j_2,..,j_ndim] is zero if i_n!=j_n for any n.
"""
X = atleast_nd(X, ndim)
if ndim > 0:
I = identity(*(np.shape(X)[-ndim:]))
X = add_axes(X, axis=np.ndim(X), num=ndim)
X = I * X
return X
def m_dot(A,b):
# Compute matrix-vector product over the last two axes of A and
# the last axes of b. Other axes are broadcasted. If A has shape
# (..., M, N) and b has shape (..., N), then the result has shape
# (..., M)
#b = reshape(b, shape(b)[:-1] + (1,) + shape(b)[-1:])
#return np.dot(A, b)
return np.einsum('...ik,...k->...i', A, b)
# TODO: Use einsum!!
#return np.sum(A*b[...,np.newaxis,:], axis=(-1,))
def block_banded(D, B):
"""
Construct a symmetric block-banded matrix.
`D` contains square diagonal blocks.
`B` contains super-diagonal blocks.
The resulting matrix is:
D[0], B[0], 0, 0, ..., 0, 0, 0
B[0].T, D[1], B[1], 0, ..., 0, 0, 0
0, B[1].T, D[2], B[2], ..., ..., ..., ...
... ... ... ... ..., B[N-2].T, D[N-1], B[N-1]
0, 0, 0, 0, ..., 0, B[N-1].T, D[N]
"""
D = [np.atleast_2d(d) for d in D]
B = [np.atleast_2d(b) for b in B]
# Number of diagonal blocks
N = len(D)
if len(B) != N-1:
raise ValueError("The number of super-diagonal blocks must contain "
"exactly one block less than the number of diagonal "
"blocks")
# Compute the size of the full matrix
M = 0
for i in range(N):
if np.ndim(D[i]) != 2:
raise ValueError("Blocks must be 2 dimensional arrays")
d = np.shape(D[i])
if d[0] != d[1]:
raise ValueError("Diagonal blocks must be square")
M += d[0]
for i in range(N-1):
if np.ndim(B[i]) != 2:
raise ValueError("Blocks must be 2 dimensional arrays")
b = np.shape(B[i])
if b[0] != np.shape(D[i])[1] or b[1] != np.shape(D[i+1])[0]:
raise ValueError("Shapes of the super-diagonal blocks do not match "
"the shapes of the diagonal blocks")
A = np.zeros((M,M))
k = 0
for i in range(N-1):
(d0, d1) = np.shape(B[i])
# Diagonal block
A[k:k+d0, k:k+d0] = D[i]
# Super-diagonal block
A[k:k+d0, k+d0:k+d0+d1] = B[i]
# Sub-diagonal block
A[k+d0:k+d0+d1, k:k+d0] = B[i].T
k += d0
A[k:,k:] = D[-1]
return A
def dist_haversine(c1, c2, radius=6372795):
# Convert coordinates to radians
lat1 = np.atleast_1d(c1[0])[...,:,None] * np.pi / 180
lon1 = np.atleast_1d(c1[1])[...,:,None] * np.pi / 180
lat2 = np.atleast_1d(c2[0])[...,None,:] * np.pi / 180
lon2 = np.atleast_1d(c2[1])[...,None,:] * np.pi / 180
dlat = lat2 - lat1
dlon = lon2 - lon1
A = np.sin(dlat/2)**2 + np.cos(lat1)*np.cos(lat2)*(np.sin(dlon/2)**2)
C = 2 * np.arctan2(np.sqrt(A), np.sqrt(1-A))
return radius * C
def logsumexp(X, axis=None, keepdims=False):
"""
Compute log(sum(exp(X)) in a numerically stable way
"""
X = np.asanyarray(X)
maxX = np.amax(X, axis=axis, keepdims=True)
if np.ndim(maxX) > 0:
maxX[~np.isfinite(maxX)] = 0
elif not np.isfinite(maxX):
maxX = 0
X = X - maxX
if not keepdims:
maxX = np.squeeze(maxX, axis=axis)
return np.log(np.sum(np.exp(X), axis=axis, keepdims=keepdims)) + maxX
def normalized_exp(phi):
"""Compute exp(phi) so that exp(phi) sums to one.
This is useful for computing probabilities from log evidence.
"""
logsum_p = logsumexp(phi, axis=-1, keepdims=True)
logp = phi - logsum_p
p = np.exp(logp)
# Because of small numerical inaccuracy, normalize the probabilities
# again for more accurate results
return (
p / np.sum(p, axis=-1, keepdims=True),
logsum_p
)
def invpsi(x):
r"""
Inverse digamma (psi) function.
The digamma function is the derivative of the log gamma function.
This calculates the value Y > 0 for a value X such that digamma(Y) = X.
For the new version, see Appendix C:
http://research.microsoft.com/en-us/um/people/minka/papers/dirichlet/minka-dirichlet.pdf
For the previous implementation, see:
http://www4.ncsu.edu/~pfackler/
Are there speed/accuracy differences between the methods?
"""
x = np.asanyarray(x)
y = np.where(
x >= -2.22,
np.exp(x) + 0.5,
-1/(x - special.psi(1))
)
for i in range(5):
y = y - (special.psi(y) - x) / special.polygamma(1, y)
return y
# # Previous implementation. Is it worse? Is there difference?
# L = 1.0
# y = np.exp(x)
# while (L > 1e-10):
# y += L*np.sign(x-special.psi(y))
# L /= 2
# # Ad hoc by Jaakko
# y = np.where(x < -100, -1 / x, y)
# return y
def invgamma(x):
r"""
Inverse gamma function.
See: http://mathoverflow.net/a/28977
"""
k = 1.461632
c = 0.036534
L = np.log((x+c)/np.sqrt(2*np.pi))
W = special.lambertw(L/np.exp(1))
return L/W + 0.5
def mean(X, axis=None, keepdims=False):
"""
Compute the mean, ignoring NaNs.
"""
if np.ndim(X) == 0:
if axis is not None:
raise ValueError("Axis out of bounds")
return X
X = np.asanyarray(X)
nans = np.isnan(X)
X = X.copy()
X[nans] = 0
m = (np.sum(X, axis=axis, keepdims=keepdims) /
np.sum(~nans, axis=axis, keepdims=keepdims))
return m
def gradient(f, x, epsilon=1e-6):
return optimize.approx_fprime(x, f, epsilon)
def broadcast(*arrays, ignore_axis=None):
"""
Explicitly broadcast arrays to same shapes.
It is possible ignore some axes so that the arrays are not broadcasted
along those axes.
"""
shapes = [np.shape(array) for array in arrays]
if ignore_axis is None:
full_shape = broadcasted_shape(*shapes)
else:
try:
ignore_axis = tuple(ignore_axis)
except TypeError:
ignore_axis = (ignore_axis,)
if len(ignore_axis) != len(set(ignore_axis)):
raise ValueError("Indices must be unique")
if any(i >= 0 for i in ignore_axis):
raise ValueError("Indices must be negative")
# Put lengths of ignored axes to 1
cut_shapes = [
tuple(
1
if i in ignore_axis else
shape[i]
for i in range(-len(shape), 0)
)
for shape in shapes
]
full_shape = broadcasted_shape(*cut_shapes)
return [np.ones(full_shape) * array for array in arrays]
def block_diag(*arrays):
"""
Form a block diagonal array from the given arrays.
Compared to SciPy's block_diag, this utilizes broadcasting and accepts more
than dimensions in the input arrays.
"""
arrays = broadcast(*arrays, ignore_axis=(-1, -2))
plates = np.shape(arrays[0])[:-2]
M = sum(np.shape(array)[-2] for array in arrays)
N = sum(np.shape(array)[-1] for array in arrays)
Y = np.zeros(plates + (M, N))
i_start = 0
j_start = 0
for array in arrays:
i_end = i_start + np.shape(array)[-2]
j_end = j_start + np.shape(array)[-1]
Y[...,i_start:i_end,j_start:j_end] = array
i_start = i_end
j_start = j_end
return Y
def concatenate(*arrays, axis=-1):
"""
Concatenate arrays along a given axis.
Compared to NumPy's concatenate, this utilizes broadcasting.
"""
# numpy.concatenate doesn't do broadcasting, so we need to do it explicitly
return np.concatenate(
broadcast(*arrays, ignore_axis=axis),
axis=axis
)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logic to fold batch norm into preceding convolution or FC layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib import graph_editor
from tensorflow.contrib.quantize.python import common
from tensorflow.contrib.quantize.python import graph_matcher
from tensorflow.contrib.quantize.python import input_to_ops
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import compat
def FoldBatchNorms(graph, is_training, freeze_batch_norm_delay=None):
"""Finds batch norm layers and folds them into preceding layers.
Folding only affects the following layers: Conv2D, fully connected, depthwise
convolution.
Args:
graph: Graph to walk and modify.
is_training: Bool, true if training.
freeze_batch_norm_delay: How many steps to wait before freezing moving mean
and variance and using them for batch normalization. This value is used
only when is_training is True.
Raises:
ValueError: When batch norm folding fails.
"""
_FoldFusedBatchNorms(
graph, is_training, freeze_batch_norm_delay=freeze_batch_norm_delay)
_FoldUnfusedBatchNorms(
graph,
is_training=is_training,
freeze_batch_norm_delay=freeze_batch_norm_delay)
def _FoldFusedBatchNorms(graph, is_training, freeze_batch_norm_delay):
"""Finds fused batch norm layers and folds them into preceding layers.
Folding only affects the following layers: Conv2D, fully connected, depthwise
convolution.
Args:
graph: Graph to walk and modify.
is_training: Bool, true if training.
freeze_batch_norm_delay: How many steps to wait before freezing moving mean
and variance and using them for batch normalization.
Raises:
ValueError: When batch norm folding fails.
"""
for match in _FindFusedBatchNorms(graph):
scope, sep, _ = match.layer_op.name.rpartition('/')
# Make sure new ops are added to `graph` and put on the same device as
# `bn_op`. The '/' (i.e. `sep`) ensures that we reuse the existing scope
# named `scope`. Otherwise, TF creates a unique scope whose name starts with
# `scope`.
with graph.as_default(), graph.name_scope(scope + sep):
with graph.name_scope(scope + sep + 'BatchNorm_Fold' + sep):
# new weights = old weights * gamma / sqrt(variance + epsilon)
# new biases = -mean * gamma / sqrt(variance + epsilon) + beta
multiplier_tensor = match.gamma_tensor * math_ops.rsqrt(
match.variance_tensor + match.bn_op.get_attr('epsilon'))
bias_tensor = math_ops.subtract(
match.beta_tensor,
match.mean_tensor * multiplier_tensor,
name='bias')
correction_scale, correction_recip, correction_offset = None, None, None
if is_training:
correction_scale, correction_recip, correction_offset = (
_ComputeBatchNormCorrections(
context='',
match=match,
freeze_batch_norm_delay=freeze_batch_norm_delay,
fused_batch_norm=True))
# The shape of depthwise weights is different, so we need to reshape the
# multiplier_tensor to ensure that the scaled_weight_tensor has the
# expected shape.
weights = match.weight_tensor
if match.layer_op.type == 'DepthwiseConv2dNative':
new_shape = [
match.weight_tensor.get_shape().as_list()[2],
match.weight_tensor.get_shape().as_list()[3]
]
multiplier_tensor = array_ops.reshape(
multiplier_tensor, new_shape, name='scale_reshape')
if correction_scale is not None:
correction_scale = array_ops.reshape(
correction_scale, new_shape, name='correction_reshape')
if correction_scale is not None:
weights = math_ops.multiply(
correction_scale, weights, name='correction_mult')
scaled_weight_tensor = math_ops.multiply(
weights, multiplier_tensor, name='mul_fold')
new_layer_tensor = _CloneWithNewOperands(
match.layer_op, match.input_tensor, scaled_weight_tensor)
if correction_recip is not None:
new_layer_tensor = math_ops.multiply(
correction_recip, new_layer_tensor, name='post_conv_mul')
new_layer_tensor = math_ops.add(new_layer_tensor, (correction_offset),
'correction_add')
bias_add_tensor = math_ops.add(
new_layer_tensor, bias_tensor, name='add_fold')
nodes_modified_count = graph_editor.reroute_ts(bias_add_tensor,
match.output_tensor)
if nodes_modified_count == 0:
raise ValueError('Folding batch norms failed, %s had no outputs.' %
match.output_tensor.name)
def _FindFusedBatchNorms(graph):
"""Finds all ops and tensors related to found FusedBatchNorms.
Args:
graph: Graph to inspect.
Yields:
_FusedBatchNormMatches.
"""
input_pattern = graph_matcher.OpTypePattern('*')
weight_pattern = graph_matcher.OpTypePattern('*')
gamma_pattern = graph_matcher.OpTypePattern('*')
beta_pattern = graph_matcher.OpTypePattern('*')
mean_pattern = graph_matcher.OpTypePattern('*')
variance_pattern = graph_matcher.OpTypePattern('*')
moving_average_pattern = graph_matcher.OpTypePattern('*')
bn_decay_pattern = graph_matcher.OpTypePattern('*')
layer_pattern = graph_matcher.OpTypePattern(
'Conv2D|DepthwiseConv2dNative|MatMul',
inputs=[input_pattern, weight_pattern])
# MatMul has a Reshape between it and FusedBatchNorm.
matmul_reshape_pattern = graph_matcher.OpTypePattern(
'Reshape', inputs=[layer_pattern,
graph_matcher.OpTypePattern('*')])
batch_norm_pattern = graph_matcher.OpTypePattern(
'FusedBatchNorm',
inputs=[
graph_matcher.OneofPattern([matmul_reshape_pattern, layer_pattern]),
gamma_pattern, beta_pattern, mean_pattern, variance_pattern
])
matmul_bn_output_reshape_pattern = graph_matcher.OpTypePattern(
'Reshape', inputs=[batch_norm_pattern,
graph_matcher.OpTypePattern('*')])
bn_matcher = graph_matcher.GraphMatcher(
graph_matcher.OneofPattern(
[matmul_bn_output_reshape_pattern, batch_norm_pattern]))
moving_average_sub_pattern = graph_matcher.OpTypePattern(
'Sub', inputs=[moving_average_pattern, batch_norm_pattern])
moving_average_mul_pattern = graph_matcher.OpTypePattern(
'Mul', inputs=[moving_average_sub_pattern, bn_decay_pattern])
moving_avg_mul_matcher = graph_matcher.GraphMatcher(
moving_average_mul_pattern)
for match_result in bn_matcher.match_graph(graph):
moving_mean_tensor = None
moving_variance_tensor = None
bn_decay_mean_tensor = None
bn_decay_var_tensor = None
layer_op = match_result.get_op(layer_pattern)
layer_tensor = match_result.get_tensor(layer_pattern)
bn_op = match_result.get_op(batch_norm_pattern)
batch_epsilon = bn_op.get_attr('epsilon')
# In the MatMul case, the output of batch norm is reshaped back into a
# 2D tensor, so the output_tensor is the output of the Reshape op.
output_tensor = bn_op.outputs[0]
if layer_op.type == 'MatMul':
output_reshape_op = match_result.get_op(matmul_bn_output_reshape_pattern)
# If the matcher didn't match matmul_bn_output_reshape, there will be
# another match for this 'MatMul' later, so we can skip this one.
if output_reshape_op is None:
continue
output_tensor = output_reshape_op.outputs[0]
# Ensure that the output tensor has consumers, otherwise this is a dangling
# node and not a match.
if not output_tensor.consumers():
continue
input_tensor = match_result.get_tensor(input_pattern)
weight_tensor = match_result.get_tensor(weight_pattern)
gamma_tensor = match_result.get_tensor(gamma_pattern)
beta_tensor = match_result.get_tensor(beta_pattern)
# FusedBatchNorm in training is different from that in inference. It takes
# empty 'mean' and empty 'variance', and produces the mean and the variance
# of the batch. Therefore, when is_training is true, mean_tensor and
# variance_tensor point to 1st and 2nd (0-based) output of bn_op,
# respectively; when is_training is false, they point to bn_op's inputs.
is_training = bn_op.get_attr('is_training')
if is_training:
# FusedBatchNormGrad doesn't compute gradients of the batch_mean and
# batch_variance outputs, so we need to substitute our own custom
# gradient.
# TODO(suharshs, raghuramank): Find a way to avoid needing this hack.
# pylint: disable=protected-access
bn_op._set_attr(
'_gradient_op_type',
attr_value_pb2.AttrValue(s=compat.as_bytes('FoldFusedBatchNormGrad')))
# pylint: enable=protected-access
mean_tensor = bn_op.outputs[1]
# The batch variance used during forward and backward prop is biased,
# i.e it is calculated as: V=sum(x(k)-mu)^2/N. For the moving average
# calculation, the variance is corrected by the term N/N-1 (Bessel's
# correction). The variance tensor read from FuseBatchNorm has Bessel's
# correction applied, so we undo it here.
scope, sep, _ = bn_op.name.rpartition('/')
g = ops.get_default_graph()
with g.as_default(), g.name_scope(scope + sep):
n = math_ops.cast(
array_ops.size(layer_tensor) / array_ops.size(mean_tensor),
dtypes.float32)
variance_tensor = math_ops.multiply(
bn_op.outputs[2], (n - 1) / n, name='Undo_Bessel_Correction')
# TODO(suharshs): Find a way to get rid of this inner match.
for mul_match_result in moving_avg_mul_matcher.match_graph(graph):
sub_op = mul_match_result.get_op(moving_average_sub_pattern)
if sub_op.inputs[1].name == bn_op.outputs[1].name:
# During training: Batch Mean is bn_op.outputs[1]
moving_mean_tensor = sub_op.inputs[0]
bn_decay_mean_tensor = mul_match_result.get_tensor(bn_decay_pattern)
if sub_op.inputs[1].name == bn_op.outputs[2].name:
# During training: Batch Var is bn_op.outputs[2]
moving_variance_tensor = sub_op.inputs[0]
bn_decay_var_tensor = mul_match_result.get_tensor(bn_decay_pattern)
else:
mean_tensor = match_result.get_tensor(mean_pattern)
variance_tensor = match_result.get_tensor(variance_pattern)
yield _BatchNormMatch(
layer_op=layer_op,
bn_op=bn_op,
output_tensor=output_tensor,
input_tensor=input_tensor,
weight_tensor=weight_tensor,
gamma_tensor=gamma_tensor,
beta_tensor=beta_tensor,
mean_tensor=mean_tensor,
variance_tensor=variance_tensor,
moving_mean_tensor=moving_mean_tensor,
moving_variance_tensor=moving_variance_tensor,
bn_decay_mean_tensor=bn_decay_mean_tensor,
bn_decay_var_tensor=bn_decay_var_tensor,
batch_epsilon=batch_epsilon)
def _ComputeBatchNormCorrections(context, match, freeze_batch_norm_delay,
fused_batch_norm):
"""Computes batch norm correction params.
Before batch normalization is frozen:
We use batch statistics for batch norm.
correction_scale = sigma_b/sigma_mv
correction_recip = 1/correction_scale
correction_offset = 0
After batch normalization is frozen:
correction_scale = sigma_b/sigma_mv
correction_recip = 1
correction_offset = gamma*(mu_b/sigma_b-mu_mv/sigma_mv).
Batch norm is frozen if global_step > bn_freeze_delay.
The corrections ensure that:
a) The weights are quantized after scaling by gamma/sigma_mv. This enables
smoother training as the scaling on the weights changes slowly, rather than
jump across mini-batches
b) Changing the values of the corrections allows for one to switch between
using batch statistics to using moving mean and average, without requiring
changes to batch_norm
Args:
context: The scope under which we look for batch norm params
match: Object containing required batch norm tensors for correction
computation.
freeze_batch_norm_delay: Delay in steps at which computation switches
from regular batch norm to frozen mean and variance.
fused_batch_norm: Bool, true if fused batch norm is used.
Returns:
A tuple of correction_scale, correction_recip, correction_offset
"""
g = ops.get_default_graph()
prefix = '' if not context else context + '/'
with g.name_scope(prefix + 'batch_norm_correction'):
recip_sigma_mv = math_ops.rsqrt(
match.moving_variance_tensor + match.batch_epsilon)
recip_sigma = math_ops.rsqrt(match.variance_tensor + match.batch_epsilon)
correction_scale = math_ops.divide(
recip_sigma_mv, recip_sigma, name='scale_compute')
correction_scale = array_ops.identity(
correction_scale, name='correction_scale')
correction_recip = math_ops.reciprocal(
correction_scale, name='reciprocal_compute')
correction_offset = math_ops.multiply(
match.gamma_tensor,
match.mean_tensor * recip_sigma -
match.moving_mean_tensor * recip_sigma_mv,
name='offset_compute')
if freeze_batch_norm_delay is not None:
use_mv_avg = math_ops.greater_equal(
common.CreateOrGetQuantizationStep(),
freeze_batch_norm_delay,
name='use_moving_average')
else:
use_mv_avg = False
bn_decay_zero = 0.0
bn_decay_mean_consumers = list(match.bn_decay_mean_tensor.consumers())
bn_decay_var_consumers = list(match.bn_decay_mean_tensor.consumers())
bn_decay_mean_out = utils.smart_cond(
use_mv_avg,
lambda: bn_decay_zero,
lambda: match.bn_decay_mean_tensor,
name='freeze_moving_mean')
graph_editor.reroute_ts(
[bn_decay_mean_out], [match.bn_decay_mean_tensor],
can_modify=bn_decay_mean_consumers)
if fused_batch_norm is False:
bn_decay_var_consumers = list(match.bn_decay_var_tensor.consumers())
bn_decay_var_out = utils.smart_cond(
use_mv_avg,
lambda: bn_decay_zero,
lambda: match.bn_decay_var_tensor,
name='freeze_moving_var')
graph_editor.reroute_ts(
[bn_decay_var_out], [match.bn_decay_var_tensor],
can_modify=bn_decay_var_consumers)
correction_recip = utils.smart_cond(
use_mv_avg,
lambda: array_ops.ones(correction_scale.shape),
lambda: correction_recip,
name='correction_recip')
correction_offset = utils.smart_cond(
use_mv_avg,
lambda: correction_offset,
lambda: array_ops.zeros(correction_offset.shape),
name='correction_offset')
return correction_scale, correction_recip, correction_offset
def _CloneWithNewOperands(layer_op, input_tensor, weight_tensor):
"""Clones layer_op with input_tensor and weight_tensor as new inputs."""
new_layer_name = layer_op.name.split('/')[-1] + '_Fold'
if layer_op.type == 'Conv2D':
return nn_ops.conv2d(
input_tensor,
weight_tensor,
strides=layer_op.get_attr('strides'),
padding=layer_op.get_attr('padding'),
use_cudnn_on_gpu=layer_op.get_attr('use_cudnn_on_gpu'),
data_format=layer_op.get_attr('data_format'),
name=new_layer_name)
elif layer_op.type == 'MatMul':
return math_ops.matmul(
input_tensor,
weight_tensor,
transpose_a=layer_op.get_attr('transpose_a'),
transpose_b=layer_op.get_attr('transpose_b'),
name=new_layer_name)
elif layer_op.type == 'DepthwiseConv2dNative':
return nn.depthwise_conv2d(
input_tensor,
weight_tensor,
strides=layer_op.get_attr('strides'),
padding=layer_op.get_attr('padding'),
name=new_layer_name)
else:
raise ValueError('Cannot handle operation of type: %s' % layer_op.type)
@ops.RegisterGradient('FoldFusedBatchNormGrad')
def _FoldFusedBatchNormGrad(op, unused_grad_y, grad_mean, grad_var, unused_1,
unused_2):
x = op.inputs[0]
n = math_ops.cast(
array_ops.size(x) / array_ops.size(grad_mean), dtypes.float32)
dmean_dx = grad_mean / n
dvar_dx = 2 * grad_var * (x - op.outputs[1]) / (n - 1)
return (dmean_dx + dvar_dx), None, None, None, None
def _FoldUnfusedBatchNorms(graph, is_training, freeze_batch_norm_delay):
"""Finds unfused batch norm layers and folds them into preceding layers.
Folding only affects the following layers: Conv2D, fully connected, depthwise
convolution.
Args:
graph: Graph to walk and modify.
is_training: Bool, True if training.
freeze_batch_norm_delay: How many steps to wait before freezing moving mean
and variance and using them for batch normalization.
Raises:
ValueError: When batch norm folding fails.
"""
input_to_ops_map = input_to_ops.InputToOps(graph)
for bn in common.BatchNormGroups(graph):
has_scaling = _HasScaling(graph, input_to_ops_map, bn)
if not _IsValidUnfusedBatchNorm(graph, bn):
continue
# The mangling code intimately depends on BatchNorm node's internals.
original_op, folded_op = _CreateFoldedOp(
graph,
bn,
has_scaling=has_scaling,
freeze_batch_norm_delay=freeze_batch_norm_delay,
is_training=is_training)
activation = common.GetEndpointActivationOp(graph, bn)
if activation:
nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],
[original_op.outputs[0]],
can_modify=[activation])
if nodes_modified_count != 1:
raise ValueError('Unexpected inputs to op: %s' % activation.name)
continue
# Treat consumer ops in bypass modules differently since they have Add
# operations instead of Relu* above.
add_bypass_ctx = re.search(r'^(.*)/([^/]+)', bn).group(1)
add_bypass = graph.get_operation_by_name(add_bypass_ctx + '/Add')
nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],
[original_op.outputs[0]],
can_modify=[add_bypass])
if nodes_modified_count != 1:
raise ValueError('Unexpected inputs to op: %s' % add_bypass.name)
def _IsValidUnfusedBatchNorm(graph, context):
"""Checks that the output of the unfused batch norm has consumers."""
add_shift = graph.get_operation_by_name(
context + '/BatchNorm/batchnorm_1/add_1')
# Ensure that the output tensor of batch norm has consumers, otherwise this
# is a dangling node and not a match.
return bool(add_shift.outputs[0].consumers())
def _FindMatchingTensor(graph, match_pattern, scope):
"""Finds best match of ops matching match_pattern with scope.
Example: _FindMatchingTensor(graph,'/BatchNorm/moments/Squeeze',
'MobilenetV1/MobilenetV1/Conv2d_0/') returns:
Tensor('MobilenetV1/Conv2d_0/BatchNorm/moments/Squeeze')
Args:
graph: Graph to inspect.
match_pattern: Part of the name of the op that we need to match, should
be present in the op's name
scope: The scope of the op. All the elements of the scope need not be
present in the op's name.
Returns:
Tensor from graph that provides the best match to the match_pattern and
scope
"""
oplist = graph.get_operations()
split_context = set(scope.split('/'))
match_dict = {}
for op in oplist:
if op.name.endswith(match_pattern):
split_name = op.name.split('/')
num_matches = len(set(split_name) & split_context)
if num_matches > 0:
match_dict[op.name] = num_matches
# match_dict contains matching op names from graph with values being
# number of matches to scope. We pick the key with the most matches
if match_dict:
max_key = max(match_dict, key=match_dict.get)
return graph.get_tensor_by_name(max_key + ':0')
else:
return None
def _GetBatchNormParams(graph, context, has_scaling):
"""Extracts relevant tensors for folding batch norms.
Args:
graph: Graph to inspect.
context: The scope under which we look for batch norm params
has_scaling: Bool that specifies if scaling is done as part of batch norm.
Returns:
_BatchNormMatch containing all required batch norm parameters.
"""
gamma_tensor = None
batch_mean_tensor = None
batch_variance_tensor = None
moving_mean_tensor = None
moving_variance_tensor = None
batch_epsilon = None
bn_decay_mean_tensor = None
bn_decay_var_tensor = None
# TODO(raghuramank) This code relies on string matching and needs to be
# updated if unfused batch norm continues to be widely used
# Matching variable names is brittle and relies on scoping
# conventions. Fused batch norm folding is more robust. Support for unfused
# batch norms will be deprecated as we move forward. Fused batch norms allow
# for faster training and should be used whenever possible.
# context contains part of the names of the tensors we are interested in:
# For MobilenetV1, the context has repetitions:
# MobilenetV1/MobilenetV1/Conv2d_3_depthwise
# when the moving_mean tensor has the name:
# MobilenetV1/Conv2d_3_depthwise/BatchNorm/moving_mean/read
# To pick the correct variable name, it is necessary to ignore the repeating
# header.
# For MobilenetV2, this problem does not exist:
# The context is: MobilenetV2/expanded_conv_3/depthwise
# and the names of the tensors start with a single MobilenetV2
# The moving mean for example, has the name:
# MobilenetV2/expanded_conv_3/depthwise/BatchNorm/moving_mean/read
# We identify the best match for an op by checking for
# 1. The suffix of the op is exactly matched
# 2. Maximum number of matches with the context.The matching
# score is given by the number of parts of context (split by /) that
# are present in the parts of the tensor name (again split by /).
# For example: scope= MobilenetV2/MobilenetV2/expanded_conv_3 and
# op.name = MobilenetV2/expanded_conv_3/depthwise/BatchNorm/moving_mean/read
# will have 2 matches,scope with a different conv layer will have one match.
op_suffix_mean = '/BatchNorm/moments/Squeeze'
op_suffix_variance = '/BatchNorm/moments/Squeeze_1'
op_suffix_epsilon = '/BatchNorm/batchnorm_1/add/y'
op_suffix_bn_decay_mean = '/BatchNorm/AssignMovingAvg/decay'
op_suffix_bn_decay_var = '/BatchNorm/AssignMovingAvg_1/decay'
if variable_scope.get_variable_scope().use_resource:
op_suffix_gamma = '/BatchNorm/gamma/Read/ReadVariableOp'
op_suffix_moving_variance = (
'/BatchNorm/moving_variance/Read/ReadVariableOp')
op_suffix_moving_mean = ('/BatchNorm/moving_mean/Read/ReadVariableOp')
else:
op_suffix_gamma = '/BatchNorm/gamma'
op_suffix_moving_variance = '/BatchNorm/moving_variance/read'
op_suffix_moving_mean = '/BatchNorm/moving_mean/read'
# Parse through list of ops to find relevant ops
batch_mean_tensor = _FindMatchingTensor(graph, op_suffix_mean, context)
batch_variance_tensor = _FindMatchingTensor(graph, op_suffix_variance,
context)
moving_mean_tensor = _FindMatchingTensor(graph, op_suffix_moving_mean,
context)
moving_variance_tensor = _FindMatchingTensor(graph, op_suffix_moving_variance,
context)
batch_epsilon = _FindMatchingTensor(graph, op_suffix_epsilon, context)
bn_decay_mean_tensor = _FindMatchingTensor(graph, op_suffix_bn_decay_mean,
context)
bn_decay_var_tensor = _FindMatchingTensor(graph, op_suffix_bn_decay_var,
context)
if batch_mean_tensor is None and moving_mean_tensor is None:
ValueError('Error folding unfused batch norms')
if has_scaling:
gamma_tensor = _FindMatchingTensor(graph, op_suffix_gamma, context)
if not has_scaling:
gamma_tensor = array_ops.ones(moving_mean_tensor.shape)
return _BatchNormMatch(
layer_op=None,
bn_op=None,
output_tensor=None,
input_tensor=None,
weight_tensor=None,
gamma_tensor=gamma_tensor,
beta_tensor=None,
mean_tensor=batch_mean_tensor,
variance_tensor=batch_variance_tensor,
moving_mean_tensor=moving_mean_tensor,
moving_variance_tensor=moving_variance_tensor,
bn_decay_mean_tensor=bn_decay_mean_tensor,
bn_decay_var_tensor=bn_decay_var_tensor,
batch_epsilon=batch_epsilon)
def _CreateFoldedOp(graph, context, has_scaling, freeze_batch_norm_delay,
is_training):
"""Folds in batch norm layer into preceding convolution or FC layer.
Creates 3 new nodes, connects their inputs and adds them to the graph:
mul is cloned into mul_fold, Conv2D or MatMul, or DepthwiseConv2d is cloned
into respective *_Fold, add is cloned into add_fold.
Args:
graph: Graph to modify.
context: String, batch norm context, i.e. node into which BatchNorm is
nested.
has_scaling: Whether the batch norm has scaling enabled.
freeze_batch_norm_delay: How many steps to wait before freezing moving mean
and variance and using them for batch normalization.
is_training: Bool, true if training.
Raises:
ValueError: When operation type is not supported, or input and output tensor
shapes mismatch for created operations: mul_fold, add_fold.
Returns:
A pair of Operations, the first is the original consumer node of the batch
norm (../BatchNorm/batchnorm_1/add_1), the second is the consumer node of
the folded graph (add_fold).
"""
mul_scale_name = 'mul_1' if has_scaling else 'mul'
mul_scale = graph.get_operation_by_name(context +
'/BatchNorm/batchnorm_1/' +
mul_scale_name)
op_below = mul_scale.inputs[0].op
weights = op_below.inputs[1]
match = _GetBatchNormParams(
graph=graph, context=context, has_scaling=has_scaling)
correction_scale, correction_recip, correction_offset = None, None, None
if is_training:
correction_scale, correction_recip, correction_offset = (
_ComputeBatchNormCorrections(
context=context,
match=match,
freeze_batch_norm_delay=freeze_batch_norm_delay,
fused_batch_norm=False))
# Special handling for weights of depthwise convolution.
if op_below.type == 'DepthwiseConv2dNative':
new_shape = [
weights.get_shape().as_list()[2],
weights.get_shape().as_list()[3]
]
scale_name = 'mul' if has_scaling else 'Rsqrt'
scale = graph.get_operation_by_name(
context + '/BatchNorm/batchnorm_1/' + scale_name)
scale = array_ops.reshape(scale.outputs[0], new_shape,
context + '/scale_reshape')
if correction_scale is not None:
correction_scale = array_ops.reshape(correction_scale, new_shape,
context + '/correction_reshape')
with ops.device(mul_scale.device):
weights = math_ops.multiply(correction_scale, weights,
context + '/correction_mult')
mul_fold = _CloneOp(mul_scale, context + '/mul_fold', [(0, weights),
(1, scale)])
elif op_below.type in ['Conv2D', 'MatMul']:
if correction_scale is not None:
with ops.device(mul_scale.device):
weights = math_ops.multiply(correction_scale, weights,
context + '/correction_mult')
mul_fold = _CloneOp(mul_scale, context + '/mul_fold', [(0, weights)])
else:
raise ValueError('Cannot handle operation of type: %s' % op_below.op)
_AssertShapesMatch('mul_fold', mul_fold.inputs[0], mul_fold.outputs[0])
conv_or_fc_folded = _CloneOp(op_below, op_below.name + '_Fold',
[(1, mul_fold.outputs[0])])
add_shift = graph.get_operation_by_name(
context + '/BatchNorm/batchnorm_1/add_1')
corrected_output = conv_or_fc_folded.outputs[0]
if correction_offset is not None:
with ops.device(conv_or_fc_folded.device):
corrected_output = math_ops.multiply(correction_recip, corrected_output,
context + '/post_conv_mul')
corrected_output = math_ops.add(corrected_output, (correction_offset),
context + '/correction_add')
add_fold = _CloneOp(add_shift, context + '/add_fold', [(0, corrected_output)])
_AssertShapesMatch('add_fold', add_fold.inputs[0], add_fold.outputs[0])
return add_shift, add_fold
def _CloneOp(op, new_name, new_inputs):
"""Clones a given op, replaces its name and some of its inputs.
Args:
op: Operation to modify.
new_name: String, a new name to set on cloned op.
new_inputs: A list of tuples (idx, tensor), each input with corresponding
index will be replaced by the given Tensor in the cloned op.
Returns:
Operation, the cloned op.
Raises:
TypeError: When Operation type is not supported.
ValueError: When input shapes are incompatible.
"""
inputs = list(op.inputs)
for new_input in new_inputs:
inputs[new_input[0]] = new_input[1]
return _OP_CLONER.Clone(op, inputs, new_name)
class _OpCloner(object):
"""Helper class that clones tf.Operations based on their type."""
def __init__(self):
self.op_type_to_action = {
'Mul': self._CloneMul,
'Add': self._CloneAdd,
'Conv2D': self._CloneConv2d,
'DepthwiseConv2dNative': self._CloneDepthwiseConv2d,
'MatMul': self._CloneMatMul,
}
def _CloneMul(self, op, inputs, new_name):
del op # Unused.
return math_ops.multiply(inputs[0], inputs[1], name=new_name).op
def _CloneAdd(self, op, inputs, new_name):
del op # Unused.
return math_ops.add(inputs[0], inputs[1], name=new_name).op
def _CloneConv2d(self, op, inputs, new_name):
input_tensor = inputs[0]
weights = inputs[1]
self._AssertConvShapes(op.name, input_tensor, weights)
return nn_ops.conv2d(
input_tensor,
weights,
strides=op.get_attr('strides'),
padding=op.get_attr('padding'),
use_cudnn_on_gpu=op.get_attr('use_cudnn_on_gpu'),
data_format=op.get_attr('data_format'),
name=new_name).op
def _CloneDepthwiseConv2d(self, op, inputs, new_name):
input_tensor = inputs[0]
weights = inputs[1]
self._AssertConvShapes(op.name, input_tensor, weights)
return nn.depthwise_conv2d(
input_tensor,
weights,
strides=op.get_attr('strides'),
padding=op.get_attr('padding'),
name=new_name).op
def _CloneMatMul(self, op, inputs, new_name):
weights = inputs[0]
input_tensor = inputs[1]
self._AssertFCShapes(op.name, weights, input_tensor)
return math_ops.matmul(
weights,
input_tensor,
transpose_a=op.get_attr('transpose_a'),
transpose_b=op.get_attr('transpose_b'),
name=new_name).op
def Clone(self, op, inputs, new_name):
try:
return self.op_type_to_action[op.type](op, inputs, new_name)
except KeyError:
raise TypeError('Unsupported operation type: %s' % op.type)
def _AssertConvShapes(self, op_name, input_tensor, weights):
"""Makes sure that convolution inputs have compatible shapes.
Args:
op_name: Operation name, only used in error message.
input_tensor: Input that is convolved.
weights: Weights of the convolution filter.
Raises:
ValueError: When input shapes are incompatible.
"""
input_shape = input_tensor.get_shape()
weights_shape = weights.get_shape()
if (len(input_shape) != 4 or len(weights_shape) != 4 or
input_shape[3] != weights_shape[2]):
raise ValueError('Incompatible shapes for op %s inputs: %s and %s' %
(op_name, input_shape, weights_shape))
def _AssertFCShapes(self, op_name, weights, input_tensor):
"""Makes sure that FC layer inputs have compatible shapes.
Args:
op_name: Operation name, only used in error message.
weights: Weights used in FC layer.
input_tensor: Input into FC layer.
Raises:
ValueError: When input shapes are incompatible.
"""
weights_shape = weights.get_shape()
input_shape = input_tensor.get_shape()
if (len(weights_shape) != 2 or len(input_shape) != 2 or
weights_shape[1] != input_shape[0]):
raise ValueError('Incompatible shapes for op %s inputs: %s and %s' %
(op_name, weights_shape, input_shape))
_OP_CLONER = _OpCloner()
def _AssertShapesMatch(op_name, in_tensor, out_tensor):
"""Makes sure that shapes of input and output tensors are compatible.
Args:
op_name: String, operation name, only used in error message.
in_tensor: Tensor, input tensor.
out_tensor: Tensor, output tensor.
Raises:
ValueError: When input and output tensors have different shapes.
"""
in_shape = in_tensor.get_shape()
out_shape = out_tensor.get_shape()
if not in_shape.is_compatible_with(out_shape):
raise ValueError('%s should not change tensor shape: input %s, '
'output %s' % (op_name, in_shape, out_shape))
def _HasScaling(graph, input_to_ops_map, bn):
r"""Checks if batch norm has scaling enabled.
Difference between batch norm with scaling and without is that with scaling:
Rsqrt -> mul -> mul_1
\-> mul_2
where
mul multiplies gamma by inverse square root of EMA of batch variance,
mul_1 multiplies output of mul with output from the base operation
(convolution, FC or depthwise convolution),
mul_2 multiplies output of mul with EMA of batch mean,
and without scaling:
Rsqrt -> mul
\-> mul_1
where
mul multiplies the inverse square root of EMA of batch variance with output
from the base operation,
mul_1 multiplies inverse square root of EMA of batch variance with EMA
of batch mean.
Args:
graph: Graph to inspect.
input_to_ops_map: InputToOps object containing mapping from tensor's name
to ops that take it as input.
bn: Batch norm layer prefix string.
Returns:
A boolean indicating whether this batch norm layer has scaling enabled.
"""
rsqrt_op = graph.get_operation_by_name(bn + '/BatchNorm/batchnorm_1/Rsqrt')
rsqrt_consumers = input_to_ops_map.ConsumerOperations(rsqrt_op)
return sum(1 for op in rsqrt_consumers if op.type == 'Mul') == 1
class _BatchNormMatch(object):
"""Contains all information related to a found Fused/UnfusedBatchNorm."""
def __init__(self, layer_op, bn_op, output_tensor, input_tensor,
weight_tensor, gamma_tensor, beta_tensor, mean_tensor,
variance_tensor, moving_mean_tensor, moving_variance_tensor,
bn_decay_mean_tensor, bn_decay_var_tensor, batch_epsilon):
self._layer_op = layer_op
self._bn_op = bn_op
self._output_tensor = output_tensor
self._input_tensor = input_tensor
self._weight_tensor = weight_tensor
self._gamma_tensor = gamma_tensor
self._beta_tensor = beta_tensor
self._mean_tensor = mean_tensor
self._variance_tensor = variance_tensor
self._moving_mean_tensor = moving_mean_tensor
self._moving_variance_tensor = moving_variance_tensor
self._bn_decay_mean_tensor = bn_decay_mean_tensor
self._bn_decay_var_tensor = bn_decay_var_tensor
self._batch_epsilon = batch_epsilon
@property
def layer_op(self):
return self._layer_op
@property
def bn_op(self):
return self._bn_op
@property
def output_tensor(self):
return self._output_tensor
@property
def input_tensor(self):
return self._input_tensor
@property
def weight_tensor(self):
return self._weight_tensor
@property
def gamma_tensor(self):
return self._gamma_tensor
@property
def beta_tensor(self):
return self._beta_tensor
@property
def mean_tensor(self):
return self._mean_tensor
@property
def variance_tensor(self):
return self._variance_tensor
@property
def moving_mean_tensor(self):
return self._moving_mean_tensor
@property
def moving_variance_tensor(self):
return self._moving_variance_tensor
@property
def batch_epsilon(self):
return self._batch_epsilon
@property
def bn_decay_mean_tensor(self):
return self._bn_decay_mean_tensor
@property
def bn_decay_var_tensor(self):
return self._bn_decay_var_tensor
|
|
#!/usr/bin/python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of everything that's particular to version 1.3.0."""
import logging
import StringIO
import wms.ogc.common.image_specs as image_specs
import wms.ogc.common.utils as utils
import wms.ogc.implementation.common as common
import wms.ogc.xml.v130.capabilities_wms as capabilities_wms
import wms.ogc.xml.v130.exceptions_wms as exceptions_wms
_WMS_VERSION = "1.3.0"
_XML_CONTENT_TYPE = "text/xml"
_XML_SERVICE_EXCEPTION_TYPE = "application/vnd.ogc.se_xml"
_XML_HEADER = '<?xml version="1.0" encoding="UTF-8"?>\n'
_NAME = "WMS"
_TITLE = "Google Earth WMS service."
# required wms parameters
# This crs instead of srs is the only diff. in this list from 1.1.1
_REQUIRED_PARAMS = [
"version",
"request",
"layers",
"styles",
"crs",
"bbox",
"width",
"height",
"format"
]
_HEADERS_EXCEPTION = [
("Content-Disposition",
'inline; filename="service-exception-%s-google.xml"' % _WMS_VERSION),
("Content-Type", _XML_SERVICE_EXCEPTION_TYPE)]
# Get logger
logger = logging.getLogger("wms_maps")
class ServiceException(Exception):
"""Represents a 1.3.0 WMS service exception."""
def __init__(self, code=None, message="Unspecified Error"):
super(ServiceException, self).__init__(code, message)
self.code = code
self.message = message
def Xml(self):
"""Produces the XML response.
Returns:
The XML response.
"""
xml_text = StringIO.StringIO()
xml_text.write(_XML_HEADER)
service_exception = exceptions_wms.ServiceExceptionType(
self.code,
self.message
)
service_exception_report = exceptions_wms.ServiceExceptionReport(
_WMS_VERSION, [service_exception])
service_exception_report.export(xml_text, 0)
return xml_text.getvalue()
class GetCapabilitiesRequest(object):
"""Represents a WMS 1.3.0 GetCapabilities request."""
def __init__(self, layer_obj, parameters):
self.parameters = parameters
self.layer_obj = layer_obj
self.capabilities_xml = ""
def _MakeOnlineResourceXml(self, url):
return capabilities_wms.OnlineResource(
# xlink type
type_="simple",
# xlink_href
href=url)
def _GetMapLimitsForEpsg(self, limits, epsg_name):
"""Fetch the map limits based on the EPSG name."""
# As per OGC specifications for EPSG:4326(Plate Carree projection),
# the bounding box coordinates should be in
# latitude, longitude format for WMS 1.3.0 version,
# which otherwise are in longitude, latitude order for other projections.
if epsg_name == "EPSG:4326":
return (limits.y0, limits.x0, limits.y1, limits.x1)
return (limits.x0, limits.y0, limits.x1, limits.y1)
def GetOnlineResource(self):
return self._MakeOnlineResourceXml(self.parameters["this-endpoint"])
def GetDCPTypeInfo(self):
resource_info = self.GetOnlineResource()
http_info = capabilities_wms.HTTP(
Get=capabilities_wms.Get(OnlineResource=resource_info))
dcptype_info = [capabilities_wms.DCPType(HTTP=http_info)]
return dcptype_info
def GetCapabilities(self):
dcptype_info = self.GetDCPTypeInfo()
capabilities = capabilities_wms.OperationType(
Format=[_XML_CONTENT_TYPE], DCPType=dcptype_info)
return capabilities
def GetMap(self):
formats = [spec.content_type
for spec in image_specs.IMAGE_SPECS.values()]
dcptype_info = self.GetDCPTypeInfo()
map_info = capabilities_wms.OperationType(
Format=formats, DCPType=dcptype_info)
return map_info
def GetExceptionInfo(self):
exp_info = capabilities_wms.Exception(
Format=[_XML_SERVICE_EXCEPTION_TYPE])
return exp_info
def GetRequestInfo(self):
"""Process request information.
Returns:
request_info: request information in the capabilities xml.
"""
capabilities_info = self.GetCapabilities()
map_info = self.GetMap()
request_info = capabilities_wms.Request(
GetCapabilities=capabilities_info,
GetMap=map_info,
GetFeatureInfo=None)
return request_info
def SetCapability(self):
request_info = self.GetRequestInfo()
exp_info = self.GetExceptionInfo()
self.capabilities_xml.set_Capability(
capabilities_wms.Capability(Request=request_info, Exception=exp_info))
self.SetLayers()
def SetService(self):
resource_info = self.GetOnlineResource()
self.capabilities_xml.set_Service(
capabilities_wms.Service(
Name=_NAME,
Title=_TITLE,
Abstract=None,
KeywordList=None,
OnlineResource=resource_info))
def SetLayers(self):
"""Set the layers for the capabilities xml."""
# This outer, inaccessible layer is to give information just once;
# the sub-layers inherit it.
outer_layer = capabilities_wms.Layer(
# 7.2.4.7.4 The layer is area-filling => opaque
opaque=True,
# 7.2.4.7.5 -- we can subset.
noSubsets=False,
# whether we support GetFeatureInfo
queryable=False,
# -- can't request it, this is just a container.
Name=None,
Title=_TITLE)
server_layers_by_name = self.layer_obj.GetLayers(
utils.GetValue(self.parameters, "server-url"),
utils.GetValue(self.parameters, "TargetPath"))
if not server_layers_by_name:
# Raise ServiceException here.
raise ServiceException(None, "Database type is not supported.")
for layer_name, server_layer in server_layers_by_name.iteritems():
proj = server_layer.projection
wms_layer = capabilities_wms.Layer(
# 7.2.4.7.4 - Even for vector maps we always get data from
# the server, even if it's just a transparent tile. By
# jeffdonner's reading, this means that even the vector
# layers are 'opaque'.
opaque=True,
# 7.2.4.7.5 - we can subset.
noSubsets=False,
queryable=False,
Name=layer_name,
Title=server_layer.label,
# ex geo bounding box is required.
EX_GeographicBoundingBox=
capabilities_wms.EX_GeographicBoundingBox(
westBoundLongitude=-proj.MAX_LONGITUDE,
eastBoundLongitude=proj.MAX_LONGITUDE,
southBoundLatitude=-proj.MAX_LATITUDE,
northBoundLatitude=proj.MAX_LATITUDE))
for epsg_name in proj.EPSG_NAMES:
wms_layer.add_CRS(epsg_name)
map_limits = proj.AdvertizedLogOuterBounds()
bounding_boxes = []
for epsg_name in proj.EPSG_NAMES:
(min_x, min_y, max_x, max_y) = self._GetMapLimitsForEpsg(
map_limits, epsg_name)
bounding_box_object = capabilities_wms.BoundingBox(
CRS=epsg_name,
minx=min_x,
miny=min_y,
maxx=max_x,
maxy=max_y)
bounding_boxes.append(bounding_box_object)
wms_layer.set_BoundingBox(bounding_boxes)
outer_layer.add_Layer(wms_layer)
self.capabilities_xml.get_Capability().set_Layer(outer_layer)
def _Xml(self):
"""Generates the xml response.
Returns:
The XML response.
"""
logger.debug("Begin XML response for GetCapabilities for WMS v1.3.0")
xml_text = StringIO.StringIO()
xml_text.write(_XML_HEADER)
self.capabilities_xml = capabilities_wms.WMS_Capabilities(
version=_WMS_VERSION)
self.SetService()
self.SetCapability()
self.capabilities_xml.export(xml_text, 0)
logger.debug("End XML response for GetCapabilities for WMS v1.3.0")
return xml_text.getvalue()
def GenerateOutput(self):
"""Generate response for GetCapabilities request.
Returns:
headers: List of headers for the response.
response: GetCapabilities response.
"""
logger.debug("Processing GetCapabilities response for WMS v1.3.0")
# If "TargetPath" query parameter doesn't exist in the
# input parameters, then send back "ServiceException"
# to the client.
target_path = utils.GetValue(self.parameters, "TargetPath")
if not target_path:
headers = _HEADERS_EXCEPTION
response = ServiceException(None, "Target path is not specified.").Xml()
else:
try:
headers = [
("Content-Disposition",
'inline; filename="wmsCapabilities-%s-google.xml"' % _WMS_VERSION),
("Content-Type", _XML_CONTENT_TYPE)]
response = self._Xml()
except ServiceException, e:
headers = _HEADERS_EXCEPTION
return headers, e.Xml()
return headers, response
class GetMapRequest(common.WmsGetMapRequest):
"""Represents WMS GetMap request."""
def __init__(self, layer_obj, parameters):
common.WmsGetMapRequest.__init__(
self,
layer_obj,
parameters,
_REQUIRED_PARAMS)
logger.debug("Initializing GetMapRequest class")
utils.DumpParms(parameters, "GetMapRequest:")
# These must be in the derived classes because the ServiceExceptions
# are of different types.
def _ServiceExceptionImpl(self, code, message):
raise ServiceException(code, message)
def _ProcessResponse(self):
"""Processes the GetMapRequest parameters and prepares the GEE tile URL."""
self._ProcessCommon()
# future 130-specific processing.
def GenerateOutput(self):
"""Executes the operation and returns the image.
Returns:
The image composed of tiles.
"""
logger.debug("Generating GetMapRequest response "
"for WMS request for version 1.3.0")
# If "TargetPath" query parameter doesn't exist in the
# input parameters, then send back "ServiceException"
# to the client.
target_path = utils.GetValue(self.parameters, "TargetPath")
if not target_path:
headers = _HEADERS_EXCEPTION
response = ServiceException(None, "Target path is not specified.").Xml()
return headers, response
else:
try:
self._ProcessResponse()
except ServiceException, e:
headers = _HEADERS_EXCEPTION
return headers, e.Xml()
logger.debug("Done generating GetMapRequest response "
"for WMS request for version 1.3.0")
return self.GenerateOutputCommon()
class BadWmsRequest(object):
"""Generator of the appropriate WMS-version error.
For when we know the WMS version, so that we can and should send a
ServiceException, yet it's not any particular REQUEST type so we
can't handle it from within one of those.
"""
def __init__(self, badRequestType):
self.bad_request_type = badRequestType
def GenerateOutput(self):
headers = _HEADERS_EXCEPTION
response = ServiceException(
common.OPERATION_NOT_SUPPORTED,
"WMS request type \'%s\' not supported." %
self.bad_request_type).Xml()
return headers, response
def main():
obj = BadWmsRequest("Not Valid Format")
output = obj.GenerateOutput()
print output
if __name__ == "__main__":
main()
|
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import signal
import subprocess
import time
import traceback
from contextlib import contextmanager
import psutil
from pants.base.build_environment import get_buildroot
from pants.util.dirutil import safe_delete, safe_mkdir, safe_open
logger = logging.getLogger(__name__)
class ProcessGroup(object):
"""Wraps a logical group of processes and provides convenient access to ProcessManager objects."""
def __init__(self, name):
self._name = name
@contextmanager
def _swallow_psutil_exceptions(self):
"""A contextmanager that swallows standard psutil access exceptions."""
try:
yield
except (psutil.AccessDenied, psutil.NoSuchProcess):
# This masks common, but usually benign psutil process access exceptions that might be seen
# when accessing attributes/methods on psutil.Process objects.
pass
def _instance_from_process(self, process):
"""Default converter from psutil.Process to process instance classes for subclassing."""
return ProcessManager(name=process.name(), pid=process.pid, process_name=process.name())
def iter_processes(self, proc_filter=None):
proc_filter = proc_filter or (lambda x: True)
with self._swallow_psutil_exceptions():
for proc in (x for x in psutil.process_iter() if proc_filter(x)):
yield proc
def iter_instances(self, *args, **kwargs):
for item in self.iter_processes(*args, **kwargs):
yield self._instance_from_process(item)
class ProcessManager(object):
"""Subprocess/daemon management mixin/superclass. Not intended to be thread-safe."""
class NonResponsiveProcess(Exception): pass
class Timeout(Exception): pass
WAIT_INTERVAL = .1
KILL_WAIT = 1
KILL_CHAIN = (signal.SIGTERM, signal.SIGKILL)
def __init__(self, name, pid=None, socket=None, process_name=None, socket_type=None):
self._name = name
self._pid = pid
self._socket = socket
self._socket_type = socket_type or int
self._process_name = process_name
self._buildroot = get_buildroot()
self._process = None
@property
def name(self):
"""The logical name/label of the process."""
return self._name
@property
def process_name(self):
"""The logical process name. If defined, this is compared to exe_name for stale pid checking."""
return self._process_name
@property
def cmdline(self):
"""The process commandline. e.g. ['/usr/bin/python2.7', 'pants.pex'].
:returns: The command line or else `None` if the underlying process has died.
"""
try:
process = self._as_process()
if process:
return process.cmdline()
except psutil.NoSuchProcess:
# On some platforms, accessing attributes of a zombie'd Process results in NoSuchProcess.
pass
return None
@property
def cmd(self):
"""The first element of the process commandline e.g. '/usr/bin/python2.7'.
:returns: The first element of the process command line or else `None` if the underlying
process has died.
"""
return (self.cmdline or [None])[0]
@property
def pid(self):
"""The running processes pid (or None)."""
return self._pid or self._get_pid()
@property
def socket(self):
"""The running processes socket/port information (or None)."""
return self._socket or self._get_socket()
@staticmethod
def _maybe_cast(x, caster):
try:
return caster(x)
except (TypeError, ValueError):
return x
def _as_process(self):
"""Returns a psutil `Process` object wrapping our pid.
NB: Even with a process object in hand, subsequent method calls against it can always raise
`NoSuchProcess`. Care is needed to document the raises in the public API or else trap them and
do something sensible for the API.
:returns: a psutil Process object or else None if we have no pid.
:rtype: :class:`psutil.Process`
:raises: :class:`psutil.NoSuchProcess` if the process identified by our pid has died.
"""
if self._process is None and self.pid:
self._process = psutil.Process(self.pid)
return self._process
def _read_file(self, filename):
with safe_open(filename, 'rb') as f:
return f.read().strip()
def _write_file(self, filename, payload):
with safe_open(filename, 'wb') as f:
f.write(payload)
def _wait_for_file(self, filename, timeout=10, want_content=True):
"""Wait up to timeout seconds for filename to appear with a non-zero size or raise Timeout()."""
start_time = time.time()
while 1:
if os.path.exists(filename) and (not want_content or os.path.getsize(filename)): return
if time.time() - start_time > timeout:
raise self.Timeout('exceeded timeout of {sec} seconds while waiting for file {filename}'
.format(sec=timeout, filename=filename))
else:
time.sleep(self.WAIT_INTERVAL)
def await_pid(self, timeout):
"""Wait up to a given timeout for a process to launch."""
self._wait_for_file(self.get_pid_path(), timeout)
return self._get_pid()
def await_socket(self, timeout):
"""Wait up to a given timeout for a process to write socket info."""
self._wait_for_file(self.get_socket_path(), timeout)
return self._get_socket()
def get_metadata_dir(self):
"""Return a metadata path for the process.
This should always live outside of the .pants.d dir to survive a clean-all.
"""
return os.path.join(self._buildroot, '.pids', self._name)
def _purge_metadata(self):
assert not self.is_alive(), 'aborting attempt to purge metadata for a running process!'
for f in (self.get_pid_path(), self.get_socket_path()):
if f and os.path.exists(f):
try:
logging.debug('purging {file}'.format(file=f))
safe_delete(f)
except OSError as e:
logging.warning('failed to unlink {file}: {exc}'.format(file=f, exc=e))
def get_pid_path(self):
"""Return the path to the file containing the processes pid."""
return os.path.join(self.get_metadata_dir(), 'pid')
def get_socket_path(self):
"""Return the path to the file containing the processes socket."""
return os.path.join(self.get_metadata_dir(), 'socket')
def _maybe_init_metadata_dir(self):
safe_mkdir(self.get_metadata_dir())
def write_pid(self, pid):
"""Write the current processes PID to the pidfile location"""
self._maybe_init_metadata_dir()
self._write_file(self.get_pid_path(), str(pid))
def write_socket(self, socket_info):
"""Write the local processes socket information (TCP port or UNIX socket)."""
self._maybe_init_metadata_dir()
self._write_file(self.get_socket_path(), str(socket_info))
def _get_pid(self):
"""Retrieve and return the running PID."""
try:
return self._maybe_cast(self._read_file(self.get_pid_path()), int) or None
except (IOError, OSError):
return None
def _get_socket(self):
"""Retrieve and return the running processes socket info."""
try:
return self._maybe_cast(self._read_file(self.get_socket_path()), self._socket_type) or None
except (IOError, OSError):
return None
def is_alive(self):
"""Return a boolean indicating whether the process is running."""
try:
process = self._as_process()
if not process:
# Can happen if we don't find our pid.
return False
if (process.status() == psutil.STATUS_ZOMBIE or # Check for walkers.
(self.process_name and self.process_name != process.name())): # Check for stale pids.
return False
except psutil.NoSuchProcess:
# On some platforms, accessing attributes of a zombie'd Process results in NoSuchProcess.
return False
return True
def _kill(self, kill_sig):
"""Send a signal to the current process."""
if self.pid:
os.kill(self.pid, kill_sig)
def terminate(self, signal_chain=KILL_CHAIN, kill_wait=KILL_WAIT, purge=True):
"""Ensure a process is terminated by sending a chain of kill signals (SIGTERM, SIGKILL)."""
if self.is_alive():
for signal_type in signal_chain:
try:
self._kill(signal_type)
except OSError as e:
logger.warning('caught OSError({e!s}) during attempt to kill -{signal} {pid}!'
.format(e=e, signal=signal_type, pid=self.pid))
time.sleep(kill_wait)
if not self.is_alive():
break
if not self.is_alive():
if purge: self._purge_metadata()
else:
raise self.NonResponsiveProcess('failed to kill pid {pid} with signals {chain}'
.format(pid=self.pid, chain=signal_chain))
def monitor(self):
"""Synchronously monitor the current process and actively keep it alive."""
raise NotImplementedError()
def _open_process(self, *args, **kwargs):
return subprocess.Popen(*args, **kwargs)
def run_subprocess(self, *args, **kwargs):
"""Synchronously run a subprocess."""
return self._open_process(*args, **kwargs)
def daemonize(self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None,
write_pid=True):
"""Perform a double-fork, execute callbacks and write the child pid file.
The double-fork here is necessary to truly daemonize the subprocess such that it can never
take control of a tty. The initial fork and setsid() creates a new, isolated process group
and also makes the first child a session leader (which can still acquire a tty). By forking a
second time, we ensure that the second child can never acquire a controlling terminal because
it's no longer a session leader - but it now has its own separate process group.
Additionally, a normal daemon implementation would typically perform an os.umask(0) to reset
the processes file mode creation mask post-fork. We do not do this here (and in daemon_spawn
below) due to the fact that the daemons that pants would run are typically personal user
daemons. Having a disparate umask from pre-vs-post fork causes files written in each phase to
differ in their permissions without good reason - in this case, we want to inherit the umask.
"""
self.pre_fork(**pre_fork_opts or {})
pid = os.fork()
if pid == 0:
os.setsid()
second_pid = os.fork()
if second_pid == 0:
try:
os.chdir(self._buildroot)
self.post_fork_child(**post_fork_child_opts or {})
except Exception:
logging.critical(traceback.format_exc())
os._exit(0)
else:
try:
if write_pid: self.write_pid(second_pid)
self.post_fork_parent(**post_fork_parent_opts or {})
except Exception:
logging.critical(traceback.format_exc())
os._exit(0)
def daemon_spawn(self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None):
"""Perform a single-fork to run a subprocess and write the child pid file.
Use this if your post_fork_child block invokes a subprocess via subprocess.Popen(). In this
case, a second fork such as used in daemonize() is extraneous given that Popen() also forks.
Using this daemonization method vs daemonize() leaves the responsibility of writing the pid
to the caller to allow for library-agnostic flexibility in subprocess execution.
"""
self.pre_fork(**pre_fork_opts or {})
pid = os.fork()
if pid == 0:
try:
os.setsid()
os.chdir(self._buildroot)
self.post_fork_child(**post_fork_child_opts or {})
except Exception:
logging.critical(traceback.format_exc())
os._exit(0)
else:
try:
self.post_fork_parent(**post_fork_parent_opts or {})
except Exception:
logging.critical(traceback.format_exc())
def pre_fork(self):
"""Pre-fork callback for subclasses."""
pass
def post_fork_child(self):
"""Pre-fork child callback for subclasses."""
pass
def post_fork_parent(self):
"""Post-fork parent callback for subclasses."""
pass
|
|
"""Denon HEOS Media Player."""
from __future__ import annotations
import asyncio
from datetime import timedelta
import logging
from pyheos import Heos, HeosError, const as heos_const
import voluptuous as vol
from homeassistant.components.media_player.const import DOMAIN as MEDIA_PLAYER_DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_HOST, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import Throttle
from . import services
from .config_flow import format_title
from .const import (
COMMAND_RETRY_ATTEMPTS,
COMMAND_RETRY_DELAY,
DATA_CONTROLLER_MANAGER,
DATA_SOURCE_MANAGER,
DOMAIN,
SIGNAL_HEOS_UPDATED,
)
PLATFORMS = [MEDIA_PLAYER_DOMAIN]
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{DOMAIN: vol.Schema({vol.Required(CONF_HOST): cv.string})},
),
extra=vol.ALLOW_EXTRA,
)
MIN_UPDATE_SOURCES = timedelta(seconds=1)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the HEOS component."""
if DOMAIN not in config:
return True
host = config[DOMAIN][CONF_HOST]
entries = hass.config_entries.async_entries(DOMAIN)
if not entries:
# Create new entry based on config
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data={CONF_HOST: host}
)
)
else:
# Check if host needs to be updated
entry = entries[0]
if entry.data[CONF_HOST] != host:
hass.config_entries.async_update_entry(
entry, title=format_title(host), data={**entry.data, CONF_HOST: host}
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Initialize config entry which represents the HEOS controller."""
# For backwards compat
if entry.unique_id is None:
hass.config_entries.async_update_entry(entry, unique_id=DOMAIN)
host = entry.data[CONF_HOST]
# Setting all_progress_events=False ensures that we only receive a
# media position update upon start of playback or when media changes
controller = Heos(host, all_progress_events=False)
try:
await controller.connect(auto_reconnect=True)
# Auto reconnect only operates if initial connection was successful.
except HeosError as error:
await controller.disconnect()
_LOGGER.debug("Unable to connect to controller %s: %s", host, error)
raise ConfigEntryNotReady from error
# Disconnect when shutting down
async def disconnect_controller(event):
await controller.disconnect()
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, disconnect_controller)
)
# Get players and sources
try:
players = await controller.get_players()
favorites = {}
if controller.is_signed_in:
favorites = await controller.get_favorites()
else:
_LOGGER.warning(
"%s is not logged in to a HEOS account and will be unable to retrieve "
"HEOS favorites: Use the 'heos.sign_in' service to sign-in to a HEOS account",
host,
)
inputs = await controller.get_input_sources()
except HeosError as error:
await controller.disconnect()
_LOGGER.debug("Unable to retrieve players and sources: %s", error)
raise ConfigEntryNotReady from error
controller_manager = ControllerManager(hass, controller)
await controller_manager.connect_listeners()
source_manager = SourceManager(favorites, inputs)
source_manager.connect_update(hass, controller)
hass.data[DOMAIN] = {
DATA_CONTROLLER_MANAGER: controller_manager,
DATA_SOURCE_MANAGER: source_manager,
MEDIA_PLAYER_DOMAIN: players,
}
services.register(hass, controller)
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
controller_manager = hass.data[DOMAIN][DATA_CONTROLLER_MANAGER]
await controller_manager.disconnect()
hass.data.pop(DOMAIN)
services.remove(hass)
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
class ControllerManager:
"""Class that manages events of the controller."""
def __init__(self, hass, controller):
"""Init the controller manager."""
self._hass = hass
self._device_registry = None
self._entity_registry = None
self.controller = controller
self._signals = []
async def connect_listeners(self):
"""Subscribe to events of interest."""
self._device_registry, self._entity_registry = await asyncio.gather(
self._hass.helpers.device_registry.async_get_registry(),
self._hass.helpers.entity_registry.async_get_registry(),
)
# Handle controller events
self._signals.append(
self.controller.dispatcher.connect(
heos_const.SIGNAL_CONTROLLER_EVENT, self._controller_event
)
)
# Handle connection-related events
self._signals.append(
self.controller.dispatcher.connect(
heos_const.SIGNAL_HEOS_EVENT, self._heos_event
)
)
async def disconnect(self):
"""Disconnect subscriptions."""
for signal_remove in self._signals:
signal_remove()
self._signals.clear()
self.controller.dispatcher.disconnect_all()
await self.controller.disconnect()
async def _controller_event(self, event, data):
"""Handle controller event."""
if event == heos_const.EVENT_PLAYERS_CHANGED:
self.update_ids(data[heos_const.DATA_MAPPED_IDS])
# Update players
self._hass.helpers.dispatcher.async_dispatcher_send(SIGNAL_HEOS_UPDATED)
async def _heos_event(self, event):
"""Handle connection event."""
if event == heos_const.EVENT_CONNECTED:
try:
# Retrieve latest players and refresh status
data = await self.controller.load_players()
self.update_ids(data[heos_const.DATA_MAPPED_IDS])
except HeosError as ex:
_LOGGER.error("Unable to refresh players: %s", ex)
# Update players
self._hass.helpers.dispatcher.async_dispatcher_send(SIGNAL_HEOS_UPDATED)
def update_ids(self, mapped_ids: dict[int, int]):
"""Update the IDs in the device and entity registry."""
# mapped_ids contains the mapped IDs (new:old)
for new_id, old_id in mapped_ids.items():
# update device registry
entry = self._device_registry.async_get_device({(DOMAIN, old_id)})
new_identifiers = {(DOMAIN, new_id)}
if entry:
self._device_registry.async_update_device(
entry.id, new_identifiers=new_identifiers
)
_LOGGER.debug(
"Updated device %s identifiers to %s", entry.id, new_identifiers
)
# update entity registry
entity_id = self._entity_registry.async_get_entity_id(
MEDIA_PLAYER_DOMAIN, DOMAIN, str(old_id)
)
if entity_id:
self._entity_registry.async_update_entity(
entity_id, new_unique_id=str(new_id)
)
_LOGGER.debug("Updated entity %s unique id to %s", entity_id, new_id)
class SourceManager:
"""Class that manages sources for players."""
def __init__(
self,
favorites,
inputs,
*,
retry_delay: int = COMMAND_RETRY_DELAY,
max_retry_attempts: int = COMMAND_RETRY_ATTEMPTS,
):
"""Init input manager."""
self.retry_delay = retry_delay
self.max_retry_attempts = max_retry_attempts
self.favorites = favorites
self.inputs = inputs
self.source_list = self._build_source_list()
def _build_source_list(self):
"""Build a single list of inputs from various types."""
source_list = []
source_list.extend([favorite.name for favorite in self.favorites.values()])
source_list.extend([source.name for source in self.inputs])
return source_list
async def play_source(self, source: str, player):
"""Determine type of source and play it."""
index = next(
(
index
for index, favorite in self.favorites.items()
if favorite.name == source
),
None,
)
if index is not None:
await player.play_favorite(index)
return
input_source = next(
(
input_source
for input_source in self.inputs
if input_source.name == source
),
None,
)
if input_source is not None:
await player.play_input_source(input_source)
return
_LOGGER.error("Unknown source: %s", source)
def get_current_source(self, now_playing_media):
"""Determine current source from now playing media."""
# Match input by input_name:media_id
if now_playing_media.source_id == heos_const.MUSIC_SOURCE_AUX_INPUT:
return next(
(
input_source.name
for input_source in self.inputs
if input_source.input_name == now_playing_media.media_id
),
None,
)
# Try matching favorite by name:station or media_id:album_id
return next(
(
source.name
for source in self.favorites.values()
if source.name == now_playing_media.station
or source.media_id == now_playing_media.album_id
),
None,
)
def connect_update(self, hass, controller):
"""
Connect listener for when sources change and signal player update.
EVENT_SOURCES_CHANGED is often raised multiple times in response to a
physical event therefore throttle it. Retrieving sources immediately
after the event may fail so retry.
"""
@Throttle(MIN_UPDATE_SOURCES)
async def get_sources():
retry_attempts = 0
while True:
try:
favorites = {}
if controller.is_signed_in:
favorites = await controller.get_favorites()
inputs = await controller.get_input_sources()
return favorites, inputs
except HeosError as error:
if retry_attempts < self.max_retry_attempts:
retry_attempts += 1
_LOGGER.debug(
"Error retrieving sources and will retry: %s", error
)
await asyncio.sleep(self.retry_delay)
else:
_LOGGER.error("Unable to update sources: %s", error)
return
async def update_sources(event, data=None):
if event in (
heos_const.EVENT_SOURCES_CHANGED,
heos_const.EVENT_USER_CHANGED,
heos_const.EVENT_CONNECTED,
):
sources = await get_sources()
# If throttled, it will return None
if sources:
self.favorites, self.inputs = sources
self.source_list = self._build_source_list()
_LOGGER.debug("Sources updated due to changed event")
# Let players know to update
hass.helpers.dispatcher.async_dispatcher_send(SIGNAL_HEOS_UPDATED)
controller.dispatcher.connect(
heos_const.SIGNAL_CONTROLLER_EVENT, update_sources
)
controller.dispatcher.connect(heos_const.SIGNAL_HEOS_EVENT, update_sources)
|
|
# Library for full image cnn operations
import numpy as np
import pycuda.autoinit
import pycuda.driver as cu
import pycuda.compiler as nvcc
import pycuda.gpuarray as gpuarray
VALID_SIZE_CROP = False
def _centered(arr, newsize):
# Return the center newsize portion of the array.
newsize = asarray(newsize)
currsize = array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
gpu_convolve_source = """
__global__ void convolve( float* input, float* filters, float* output,
int stride, int width, int height, int channels, int filter_width, int filter_height, int nfilters, int output_width, int output_height)
{
int oi = blockIdx.x * blockDim.x + threadIdx.x;
int oj = blockIdx.y * blockDim.y + threadIdx.y;
int si = oi % stride;
int sj = oj % stride;
int i = oi / stride;
int j = oj / stride;
if ( oi < output_width && oj < output_height )
{
// Calculate convolution result for output pixel oi, oj with all filters
for(int filter_index = 0; filter_index < nfilters; ++filter_index )
{
float conv_sum = 0;
// Repeat for all channels
for(int c = 0; c < channels; ++c)
{
for (int fi = 0; fi < filter_width; ++fi)
{
for (int fj = 0; fj < filter_height; ++fj)
{
if ((i + fi) * stride + si < width && (j + fj) * stride + sj < height)
{
float in_pix = input[(i + fi) * stride + si + ((j + fj) * stride + sj) * width + c * width * height];
float filt_pix = filters[(filter_width - 1 - fi) + (filter_height - 1 - fj) * filter_width + (filter_index * channels + c) * filter_width * filter_height];
conv_sum += in_pix * filt_pix;
}
}
}
}
output[oi + oj * output_width + filter_index * output_width * output_height] = conv_sum;
}
}
}
"""
gpu_maxpool2_source = """
__global__ void maxpool2( float* input, float* bias, float* output,
int stride, int width, int height, int channels )
{
int oi = blockIdx.x * blockDim.x + threadIdx.x;
int oj = blockIdx.y * blockDim.y + threadIdx.y;
int si = oi % stride;
int sj = oj % stride;
int i = oi / stride;
int j = oj / stride;
if ( oi < width && oj < height )
{
// Repeat for all channels
for(int c = 0; c < channels; ++c)
{
// Calculate dot product / tanh for pixel i, j
float max = -1e38;
for (int mi = i; mi < i + 2; ++mi)
{
for (int mj = j; mj < j + 2; ++mj)
{
if ( mi * stride + si < width && mj * stride + sj < height )
{
float pix = input[mi * stride + si + (mj * stride + sj) * width + c * width * height];
if ( pix > max )
{
max = pix;
}
}
}
}
output[oi + oj * width + c * width * height] = tanh( (max + bias[c]) );
}
}
}
"""
gpu_hidden_layer_source = """
__global__ void hidden_layer( float* input, float* w, float* bias, float* output,
int stride, int width, int height, int channels, int w_width, int w_height, int nweights, int output_width, int output_height )
{
int oi = blockIdx.x * blockDim.x + threadIdx.x;
int oj = blockIdx.y * blockDim.y + threadIdx.y;
int si = oi % stride;
int sj = oj % stride;
int i = oi / stride;
int j = oj / stride;
if ( oi < output_width && oj < output_height )
{
// Calculate dot product result for pixel oi, oj
for(int w_index = 0; w_index < nweights; ++w_index )
{
float dot_product = 0;
// Repeat for all channels
for(int c = 0; c < channels; ++c)
{
for (int wi = 0; wi < w_width; ++wi)
{
for (int wj = 0; wj < w_height; ++wj)
{
if ((i + wi) * stride + si < width && (j + wj) * stride + sj < height)
{
float in_pix = input[(i + wi) * stride + si + ((j + wj) * stride + sj) * width + c * width * height];
float filt_pix = w[w_index + wi * nweights + wj * nweights * w_width + c * nweights * w_width * w_height];
dot_product += in_pix * filt_pix;
}
}
}
}
// Apply bias and tanh
output[oi + oj * output_width + w_index * output_width * output_height] = tanh( (dot_product + bias[w_index]) );
}
}
}
"""
gpu_logistic_regression_source = """
__global__ void logistic_regression( float* input, float* w, float* bias, float* output,
int stride, int width, int height, int channels, int w_width, int w_height, int nweights, int output_width, int output_height )
{
int oi = blockIdx.x * blockDim.x + threadIdx.x;
int oj = blockIdx.y * blockDim.y + threadIdx.y;
int si = oi % stride;
int sj = oj % stride;
int i = oi / stride;
int j = oj / stride;
if ( oi < output_width && oj < output_height )
{
// Calculate dot product result for pixel oi, oj
for(int w_index = 0; w_index < nweights; ++w_index )
{
float dot_product = 0;
// Repeat for all channels
for(int c = 0; c < channels; ++c)
{
for (int wi = 0; wi < w_width; ++wi)
{
for (int wj = 0; wj < w_height; ++wj)
{
if ((i + wi) * stride + si < width && (j + wj) * stride + sj < height)
{
float in_pix = input[(i + wi) * stride + si + ((j + wj) * stride + sj) * width + c * width * height];
float filt_pix = w[w_index + wi * nweights + wj * nweights * w_width + c * nweights * w_width * w_height];
dot_product += in_pix * filt_pix;
}
}
}
}
// Apply bias
output[oi + oj * output_width + w_index * output_width * output_height] = (dot_product + bias[w_index]);
}
}
}
"""
gpu_logistic_regression_1to1_source = """
__global__ void logistic_regression_1to1( float* input, float* w, float* bias, float* output,
int width, int height, int channels, int nweights )
{
int oi = blockIdx.x * blockDim.x + threadIdx.x;
int oj = blockIdx.y * blockDim.y + threadIdx.y;
if ( oi < width && oj < height )
{
// Calculate dot product result for pixel oi, oj
for(int w_index = 0; w_index < nweights; ++w_index )
{
float dot_product = 0;
// Repeat for all channels
for(int c = 0; c < channels; ++c)
{
float in_pix = input[oi + oj * width + c * width * height];
float filt_pix = w[w_index + c * nweights];
dot_product += in_pix * filt_pix;
}
// Apply bias
output[oi + oj * width + w_index * width * height] = (dot_product + bias[w_index]);
}
}
}
"""
gpu_convolve = nvcc.SourceModule(gpu_convolve_source).get_function('convolve')
gpu_maxpool2 = nvcc.SourceModule(gpu_maxpool2_source).get_function('maxpool2')
gpu_hidden_layer = nvcc.SourceModule(gpu_hidden_layer_source).get_function('hidden_layer')
gpu_logistic_regression = nvcc.SourceModule(gpu_logistic_regression_source).get_function('logistic_regression')
gpu_logistic_regression_1to1 = nvcc.SourceModule(gpu_logistic_regression_1to1_source).get_function('logistic_regression_1to1')
class ConvolutionMaxpoolLayer(object):
def __init__(self, nkernels, ninputs, kernel_size, stride_in, maxpool_size,
weight_init=0.005, W=[], b=[]):
self.ninputs = ninputs
self.nkernels = nkernels
self.kernel_size = kernel_size
self.maxpool_size = maxpool_size
self.stride_in = stride_in
self.stride_out = stride_in * maxpool_size
self.prev_conv_size = 0
if W == []:
self.W = (np.float32(np.random.random((nkernels, ninputs, kernel_size, kernel_size))) - 0.5) * weight_init * 2
else:
self.W = W
if b == []:
self.b = np.zeros((nkernels), dtype=np.float32)
else:
self.b = b
def apply_layer(self, input_image=None, d_input_image=None):
# Calculate feed-forward result
if d_input_image is None:
ishape = input_image.shape
d_input_image = gpuarray.to_gpu(input_image)
else:
ishape = d_input_image.shape
print 'reusing input {0}'.format(ishape)
print type(d_input_image)
assert(ishape[0] == self.ninputs)
d_filters = gpuarray.to_gpu(self.W)
channels = ishape[0]
width = ishape[1]
height = ishape[2]
if VALID_SIZE_CROP:
# valid size output
output_size = (ishape[1] - self.kernel_size + 1, ishape[2] - self.kernel_size + 1)
else:
# same size output
output_size = (ishape[1], ishape[2])
block = (32, 32, 1)
grid = (int((output_size[0] - 1) / block[0] + 1), int((output_size[1] - 1) / block[0] + 1))
out_image = numpy.zeros((self.W.shape[0], output_size[0], output_size[1]), dtype=numpy.float32)
d_conv_image = gpuarray.to_gpu(out_image)
gpu_convolve(d_input_image, d_filters, d_conv_image,
numpy.int32(self.stride_in), numpy.int32(width), numpy.int32(height), numpy.int32(channels),
numpy.int32(self.W.shape[2]), numpy.int32(self.W.shape[3]), numpy.int32(self.W.shape[0]),
numpy.int32(output_size[0]), numpy.int32(output_size[1]), block=block, grid=grid)
# Debug intermeidate result
#self.layer0_conv = d_conv_image.get()
d_input_image = None
d_filters = None
d_out_image = gpuarray.to_gpu(out_image)
out_image = None
d_bias = gpuarray.to_gpu(self.b)
gpu_maxpool2(d_conv_image, d_bias, d_out_image,
numpy.int32(self.stride_in), numpy.int32(output_size[0]), numpy.int32(output_size[1]), numpy.int32(self.W.shape[0]), block=block, grid=grid)
d_bias = None
d_conv_image = None
#output = d_out_image.get()
#d_out_image = None
#output = np.zeros((self.nkernels, output_size[0], output_size[1]), dtype=np.float32)
#self.switches = np.zeros((self.nkernels, output_size[0], output_size[1]), dtype=np.uint32)
print "CONV Layer: Complete ({0} pools).".format(self.stride_in ** 2)
return d_out_image
class FullyConnectedLayer(object):
def __init__(self, ninputs, noutputs, kernel_size, stride, weight_init=0.005, W=[], b=[]):
self.ninputs = ninputs
self.noutputs = noutputs
self.kernel_size = kernel_size
self.stride_in = stride
self.stride_out = stride
if W == []:
self.W = (np.float32(np.random.random((ninputs * kernel_size ** 2, noutputs))) - 0.5) * weight_init * 2
else:
self.W = W
if b ==[]:
self.b = np.zeros((noutputs), dtype=np.float32)
else:
self.b = b
def apply_layer(self, input_image=None, d_input_image=None):
# Calculate feed-forward result
if d_input_image is None:
ishape = input_image.shape
d_input_image = gpuarray.to_gpu(input_image)
else:
ishape = d_input_image.shape
assert(ishape[0] == self.ninputs)
if VALID_SIZE_CROP:
# valid size output
output_size = (ishape[1] - self.kernel_size + 1, ishape[2] - self.kernel_size + 1)
else:
# same size output
output_size = (ishape[1], ishape[2])
d_filters = gpuarray.to_gpu(self.W)
d_bias = gpuarray.to_gpu(self.b)
stride = self.stride_in
width = output_size[0]
height = output_size[1]
channels = self.ninputs
nfilters = self.W.shape[1]
output_width = (width / stride - self.kernel_size + 1) * stride
output_height = (height / stride - self.kernel_size + 1) * stride
block = (32, 32, 1)
grid = (int((output_width - 1) / block[0] + 1), int((output_height - 1) / block[0] + 1))
out_image = numpy.zeros((nfilters, output_width, output_height), dtype=numpy.float32)
d_out_image = gpuarray.to_gpu(out_image)
out_image = None
gpu_hidden_layer(d_input_image, d_filters, d_bias, d_out_image,
numpy.int32(stride), numpy.int32(width), numpy.int32(height), numpy.int32(channels),
numpy.int32(self.kernel_size), numpy.int32(self.kernel_size), numpy.int32(nfilters),
numpy.int32(output_width), numpy.int32(output_height), block=block, grid=grid)
d_input_image = None
d_filters = None
d_bias = None
#output = d_out_image.get()
#d_out_image = None
print 'FC Layer: Complete ({0} pools)'.format(self.stride_in ** 2)
return d_out_image
class LogisticRegressionLayer(object):
def __init__(self, ninputs, noutputs, stride, W=[], b=[]):
self.ninputs = ninputs
self.noutputs = noutputs
self.stride_in = stride
self.stride_out = stride
if W == []:
self.W = np.zeros((ninputs, noutputs), dtype=np.float32)
else:
self.W = W
if b ==[]:
self.b = np.zeros((noutputs), dtype=np.float32)
else:
self.b = b
def apply_layer(self, input_image=None, d_input_image=None):
# Calculate feed-forward result
if d_input_image is None:
ishape = input_image.shape
d_input_image = gpuarray.to_gpu(input_image)
else:
ishape = d_input_image.shape
assert(ishape[0] == self.ninputs)
d_filters = gpuarray.to_gpu(self.W)
d_bias = gpuarray.to_gpu(self.b)
stride = self.stride_in
width = ishape[1]
height = ishape[2]
channels = ishape[0]
filter_width = 1
filter_height = 1
nfilters = self.W.shape[1]
# output_width = (width / stride - filter_width + 1) * stride
# output_height = (height / stride - filter_height + 1) * stride
block = (32, 32, 1)
#grid = (int((output_width - 1) / block[0] + 1), int((output_height - 1) / block[0] + 1))
grid = (int((width - 1) / block[0] + 1), int((height - 1) / block[0] + 1))
out_image = np.zeros((self.noutputs, ishape[1], ishape[2]), dtype=np.float32)
d_out_image = gpuarray.to_gpu(out_image)
out_image = None
# gpu_logistic_regression(d_input_image, d_filters, d_bias, d_out_image,
# numpy.int32(stride), numpy.int32(width), numpy.int32(height), numpy.int32(channels),
# numpy.int32(filter_width), numpy.int32(filter_height), numpy.int32(nfilters),
# numpy.int32(output_width), numpy.int32(output_height), block=block, grid=grid)
gpu_logistic_regression_1to1(d_input_image, d_filters, d_bias, d_out_image,
numpy.int32(width), numpy.int32(height), numpy.int32(channels),
numpy.int32(nfilters), block=block, grid=grid)
d_input_image = None
d_filters = None
d_bias = None
output = d_out_image.get()
d_out_image = None
self.pre_softmax = output
#Apply softmax
maxes = np.amax(output, axis=0)
maxes = np.tile(maxes, (2,1,1))
e = np.exp(output - maxes)
output = e / np.sum(e, axis=0)
print 'LR Layer: Complete.'
return output
|
|
import numpy as np
from scipy import stats
import scipy.io
from matplotlib import pyplot as plt
import pstats
# Message Passing on Single Node
# G: non-symmetric social networks
# Y: evidence, observed data
# T: num. of iterations
# X: used for test
def AEMBP_GCHMM(G,Y,C, sC, initial, MaxIter, tol,X, prior={'xi': 0.25, 'alpha':0.1, 'beta':0.1, 'gamma':0.5,
'theta_1':0.75, 'theta_0':0.25, 'ax':2, 'bx':5,
'aa':2, 'ba':5, 'ab':2,'bb':5, 'ar':2, 'br':5,
'al':2,'bl':2,'a0':2,'b0':5}):
Xr = X
N, _, D = G.shape # N*N*D
_, S, _ = Y.shape # N*S*D
for i in range(D):
Gt = G[:,:,i]
G[:,:,i] = np.eye(N) + (Gt+Gt.T)>0
# Initialization
if initial == 'f':
# A good trial
xi = prior['xi']; alpha = prior['alpha']
beta = prior['beta']; gamma = prior['gamma']
theta1 = prior.get("theta_1")*np.ones((1,S))
theta0 = prior.get("theta_0")*np.ones((1,S))
else:
# Initial by random sampling, has a chance to fail
ax=prior['ax']; bx=prior['bx']
aa=prior['aa']; ba=prior['ba']
ab=prior['ab']; bb=prior['bb']
ar=prior['ar']; br=prior['br']
a1=prior['al']; b1=prior['bl']
a0=prior['a0']; b0=prior['b0']
xi = stats.beta.rvs(ax, bx, size=1)
alpha = stats.beta.rvs(aa, ba, size=1)
beta = stats.beta.rvs(ab, bb, size=1)
gamma = stats.beta.rvs(ar,br, size=1)
theta1 = stats.beta.rvs(a1,b1,size=(1,S))
theta0 = stats.beta.rvs(a0,b0,size=(1,S))
para = np.c_[xi, alpha, beta, gamma, theta1, theta0]
#print(para)
# Construct Belief of each X
PX = np.zeros((N,2,D+1))
PX[:,:,0] = np.hstack(((1-xi)*np.ones((N,1)), xi*np.ones((N,1)))) # Root node X(0): N x 2
LX = np.zeros((N,2,D+1))
# Construct Message between X
PXX = np.ones((N,N,2,D))
LXX = np.ones((N,N,2,D))
# Iteration
for T in range(MaxIter):
#print(T)
para1 = para
f0 = lambda u: (1-beta)**u;
f1 = lambda u: 1-(1-alpha)*(1-beta)**u
# Expectation
# Update PX,LX,BEL
LX[:,:,0] = np.hstack((np.prod(LXX[:,:,0,0],1)[:, None], np.prod(LXX[:,:,1,0],1)[:, None]))
for i in range(1,D):
PX[:,:,i] = fGX(G[:,:,i-1], PXX[:,:,:,i-1], alpha, gamma, C, sC, f0, f1)
LX[:,:,i] = np.hstack((fYX(Y[:,:,i-1], theta0) * np.prod(LXX[:,:,0,i],1)[:, None],
fYX(Y[:,:,i-1], theta1) * np.prod(LXX[:,:,1,i],1)[:, None]))
PX[:,:,D] = fGX(G[:,:,D-1], PXX[:,:,:,D-1], alpha, gamma, C, sC, f0, f1)
LX[:,:,D] = np.hstack((fYX(Y[:,:,D-1],theta0), fYX(Y[:,:,D-1], theta1)))
BEL = PX*LX
BEL = BEL/np.sum(BEL,1,keepdims=True)
#BEL = bsxfun(@rdivide,BEL,np.sum(BEL,1)) #normalization
# test sesseion
tmpbel = np.transpose(BEL, axes = [0, 2, 1])
xpred = tmpbel[:,:,1] > tmpbel[:,:,0]
acc = np.sum(xpred==Xr) / N / (D+1);
LXX1 = LXX
for i in range(D):
# Update LXX
tp0 = np.sum(LX[:,:,i+1]*np.hstack(((1-alpha)*SumProd(G[:,:,i],PXX[:,:,:,i],f0,C,sC),SumProd(G[:,:,i],PXX[:,:,:,i],f1,C,sC))), 1)
tp1 = (LX[:,:,i+1] @ np.vstack((gamma,1-gamma))).reshape(-1)
LXX[:,:,0,i] = LXX[:,:,0,i] * (1-np.eye(N)) + np.diag(tp0 / (tp0+tp1))
LXX[:,:,1,i] = LXX[:,:,1,i] * (1-np.eye(N)) + np.diag(tp1 / (tp0+tp1))
Gt = G[:,:,i] - np.eye(N)
for j in range(N):
ind = np.where(Gt[j,:]==1)[0]
for k in range(len(ind)):
if len(ind)==1:
temp0 = LX[j,0,i+1] * (gamma*PXX[j,j,1,i] + (1-alpha)*PXX[j,j,0,i]) + LX[j,1,i+1] * ((1-gamma)*PXX[j,j,1,i] + alpha*PXX[j,j,0,i])
temp1 = LX[j,0,i+1] * (gamma*PXX[j,j,1,i] + (1-alpha)*(1-beta)*PXX[j,j,0,i]) + LX[j,1,i+1] * ((1-gamma)*PXX[j,j,1,i]+(1-(1-alpha)*(1-beta))*PXX[j,j,0,i])
else:
temp0 = LX[j,0,i+1] * (gamma*PXX[j,j,1,i] + (1-alpha)*PXX[j,j,0,i]*sp(0,Gt[j,:],ind[k],PXX[j:(j+1),:,:,i],f0,C[int(np.sum(Gt[j,:]))-2][0], sC[int(np.sum(Gt[j,:]))-2][0])) + LX[j,1,i+1]*((1-gamma)*PXX[j,j,1,i] + PXX[j,j,0,i]*sp(0, Gt[j,:], ind[k], PXX[j:(j+1),:,:,i], f1, C[int(np.sum(Gt[j,:]))-2][0], sC[int(np.sum(Gt[j,:]))-2][0]))
temp1 = LX[j,0,i+1] * (gamma*PXX[j,j,1,i] + (1-alpha)*PXX[j,j,0,i]*sp(1,Gt[j,:],ind[k],PXX[j:(j+1),:,:,i],f0,C[int(np.sum(Gt[j,:]))-2][0], sC[int(np.sum(Gt[j,:]))-2][0])) + LX[j,1,i+1]*((1-gamma)*PXX[j,j,1,i] + PXX[j,j,0,i]*sp(1, Gt[j,:], ind[k], PXX[j:(j+1),:,:,i], f1, C[int(np.sum(Gt[j,:]))-2][0], sC[int(np.sum(Gt[j,:]))-2][0]))
LXX[j,ind[k],0,i] = temp0/(temp0+temp1)
LXX[j,ind[k],1,i] = temp1/(temp0+temp1)
# Update PXX
tmp0 = BEL[:,0,i] / LXX1[:,:,0,i]
tmp1 = BEL[:,1,i] / LXX1[:,:,1,i]
tmp0[np.isnan(tmp0)] = 0
tmp1[np.isnan(tmp1)] = 0
tmp = tmp0 + tmp1
# normalization
tmp0 = tmp0 / tmp
tmp1 = tmp1 / tmp
tmp0[G[:,:,i]==0] = 1
tmp1[G[:,:,i]==0] = 1
PXX[:,:,0,i] = tmp0
PXX[:,:,1,i] = tmp1
# Approximate Maximization
Xpred = np.transpose(BEL,axes =[0,2,1])
X = 0 + (Xpred[:,:,1] > Xpred[:,:,0])
# Num. of previous infection
NPI = NumPreInf(X,G)
xi = np.sum(X[:,0]) / N
gamma = np.sum((X[:,0:D]==1)*(X[:,1:]==0))/np.sum(X[:,0:D]==1)
alpha = np.sum((X[:,0:D]==0)*X[:,1:]*(NPI==0))/np.sum((X[:,0:D]==0)*(NPI==0))
beta = UpdateBeta(NPI, X, alpha)
temp = np.transpose(np.repeat(np.expand_dims(X[:,1:], axis=2),
S, axis = 2), axes = [0, 2, 1])
theta1 = np.sum(Y * temp, axis = (0, 2)) / np.sum(temp, axis = (0, 2)).reshape((1, S))
theta0 = np.sum(Y * (temp == 0), axis = (0, 2)) / np.sum((temp == 0), axis = (0, 2)).reshape((1, S))
# Convergence
para = np.c_[xi, alpha, beta, gamma, theta1, theta0]
diff = para1 - para
if np.max(diff) < tol:
break
return [BEL, para]
# Yt is a matrix
def fYX(Yt,theta):
#compute the message from Y->X
L = np.prod((Yt * theta + (1-Yt) * (1-theta)),1, keepdims=True)
return(L)
def fGX(Gt,PXXt,a,r,C,sC,f0,f1):
# compute the belief from parent
tmp0 = (r * np.diag(PXXt[:,:,1]))[:,None] + ((1-a)*np.diag(PXXt[:,:,0]))[:,None] * SumProd(Gt,PXXt,f0,C,sC)
tmp1 = ((1-r) * np.diag(PXXt[:,:,1]))[:,None] + (np.diag(PXXt[:,:,0]))[:,None] * SumProd(Gt,PXXt,f1,C,sC)
P = np.hstack((tmp0,tmp1))
return(P)
def SumProd(Gtt,PXXt,fun,C,sC):
# compute the form: \sum fun(...) \prod f_i
Gt = Gtt.copy()
N, _ = Gt.shape
Gt = Gt - np.eye(N)
cnt = np.sum(Gt,1)
res = np.zeros((N,1))#
for i in range(N):
k = int(cnt[i])
if k == 0:
res[i] = 1
continue
tmp = PXXt[i:(i+1), Gt[i,:]==1,:]
tmp0 = np.tile(tmp[:,:,0],(2**k,1))
tmp10 = np.tile(tmp[:,:,1]-tmp[:,:,0],(2**k,1))
res[i] = np.sum(fun(sC[k-1]) * np.prod(C[k-1][0]*tmp10+tmp0,1))
return(res)
def sp(x,Gjt,indk,P,fun,Ck,sCk):
# compute sum product
Gtj = Gjt.copy()
Gtj[indk] = 0
tmp = P[0:1, Gtj==1, :]
H, _ = Ck.shape
tmp0 =np.tile(tmp[:,:,0],(H,1))
tmp10 = np.tile(tmp[:,:,1]-tmp[:,:,0],(H,1))
res = np.sum(fun(x + sCk) * np.prod(Ck*tmp10+tmp0,1))
return(res)
def NumPreInf(X,G):
# compute the number of connected nodes = 1 at previous timestamp
N, _, D = G.shape
NPI = np.zeros((N,D)) # Num. of previous infection
for i in range(D):
Gt = G[:,:,i] - np.eye(N);
Xt = X[:,i]
tmp = (Gt + Gt.T)>0
NPI[:,i] = tmp @ Xt
return(NPI)
def UpdateBeta(NPI,X,alpha):
_, D = NPI.shape
#mcnt = max(NPI.ravel())
mcnt = int(np.max(NPI))
B = np.zeros((mcnt,1))
for i in range(mcnt):
tmp = np.sum((X[:,0:D]==0)*(X[:,1:D+1]==0)*(NPI==(i+1))) / np.sum((X[:,0:D]==0)*(NPI==(i+1)))
B[i] = min(np.power(tmp/(1-alpha),(1/(i+1))), 1)
beta = 1 - np.mean(B)
return(beta)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from itertools import chain
from cPickle import dumps
from collections import OrderedDict
import bson
import tg
import jinja2
from paste.deploy.converters import asint
from pylons import tmpl_context as c, app_globals as g
from ming.base import Object
from ming.orm import mapper, session, ThreadLocalORMSession
from allura.lib import utils
from allura.lib import helpers as h
from allura.model.repository import CommitDoc
from allura.model.repository import CommitRunDoc
from allura.model.repository import Commit, Tree, LastCommit, ModelCache
from allura.model.index import ArtifactReferenceDoc, ShortlinkDoc
from allura.model.auth import User
from allura.model.timeline import TransientActor
log = logging.getLogger(__name__)
QSIZE = 100
def refresh_repo(repo, all_commits=False, notify=True, new_clone=False, commits_are_new=None):
if commits_are_new is None:
commits_are_new = not all_commits and not new_clone
all_commit_ids = commit_ids = list(repo.all_commit_ids())
if not commit_ids:
# the repo is empty, no need to continue
return
new_commit_ids = unknown_commit_ids(commit_ids)
stats_log = h.log_action(log, 'commit')
for ci in new_commit_ids:
stats_log.info(
'',
meta=dict(
module='scm-%s' % repo.repo_id,
read='0'))
if not all_commits:
# Skip commits that are already in the DB
commit_ids = new_commit_ids
log.info('Refreshing %d commits on %s', len(commit_ids), repo.full_fs_path)
# Refresh commits
seen = set()
for i, oid in enumerate(commit_ids):
repo.refresh_commit_info(oid, seen, not all_commits)
if (i + 1) % 100 == 0:
log.info('Refresh commit info %d: %s', (i + 1), oid)
refresh_commit_repos(all_commit_ids, repo)
# Refresh child references
for i, oid in enumerate(commit_ids):
ci = CommitDoc.m.find(dict(_id=oid), validate=False).next()
refresh_children(ci)
if (i + 1) % 100 == 0:
log.info('Refresh child info %d for parents of %s',
(i + 1), ci._id)
if repo._refresh_precompute:
# Refresh commit runs
commit_run_ids = commit_ids
# Check if the CommitRuns for the repo are in a good state by checking for
# a CommitRunDoc that contains the last known commit. If there isn't one,
# the CommitRuns for this repo are in a bad state - rebuild them
# entirely.
if commit_run_ids != all_commit_ids:
last_commit = last_known_commit_id(all_commit_ids, new_commit_ids)
log.info('Last known commit id: %s', last_commit)
if not CommitRunDoc.m.find(dict(commit_ids=last_commit)).count():
log.info('CommitRun incomplete, rebuilding with all commits')
commit_run_ids = all_commit_ids
log.info('Starting CommitRunBuilder for %s', repo.full_fs_path)
rb = CommitRunBuilder(commit_run_ids)
rb.run()
rb.cleanup()
log.info('Finished CommitRunBuilder for %s', repo.full_fs_path)
# Clear any existing caches for branches/tags
if repo.cached_branches:
repo.cached_branches = []
session(repo).flush()
if repo.cached_tags:
repo.cached_tags = []
session(repo).flush()
# The first view can be expensive to cache,
# so we want to do it here instead of on the first view.
repo.get_branches()
repo.get_tags()
if commits_are_new:
for commit in commit_ids:
new = repo.commit(commit)
user = User.by_email_address(new.committed.email)
if user is None:
user = User.by_username(new.committed.name)
if user is not None:
g.statsUpdater.newCommit(new, repo.app_config.project, user)
actor = user or TransientActor(
activity_name=new.committed.name or new.committed.email)
g.director.create_activity(actor, 'committed', new,
related_nodes=[repo.app_config.project],
tags=['commit', repo.tool.lower()])
from allura.webhooks import RepoPushWebhookSender
by_branches, by_tags = _group_commits(repo, commit_ids)
params = []
for b, commits in by_branches.iteritems():
ref = u'refs/heads/{}'.format(b) if b != '__default__' else None
params.append(dict(commit_ids=commits, ref=ref))
for t, commits in by_tags.iteritems():
ref = u'refs/tags/{}'.format(t)
params.append(dict(commit_ids=commits, ref=ref))
if params:
RepoPushWebhookSender().send(params)
log.info('Refresh complete for %s', repo.full_fs_path)
g.post_event('repo_refreshed', len(commit_ids), all_commits, new_clone)
# Send notifications
if notify:
send_notifications(repo, reversed(commit_ids))
def refresh_commit_repos(all_commit_ids, repo):
'''Refresh the list of repositories within which a set of commits are
contained'''
for oids in utils.chunked_iter(all_commit_ids, QSIZE):
for ci in CommitDoc.m.find(dict(
_id={'$in': list(oids)},
repo_ids={'$ne': repo._id})):
oid = ci._id
ci.repo_ids.append(repo._id)
index_id = 'allura.model.repository.Commit#' + oid
ref = ArtifactReferenceDoc(dict(
_id=index_id,
artifact_reference=dict(
cls=bson.Binary(dumps(Commit)),
project_id=repo.app.config.project_id,
app_config_id=repo.app.config._id,
artifact_id=oid),
references=[]))
link0 = ShortlinkDoc(dict(
_id=bson.ObjectId(),
ref_id=index_id,
project_id=repo.app.config.project_id,
app_config_id=repo.app.config._id,
link=repo.shorthand_for_commit(oid)[1:-1],
url=repo.url_for_commit(oid)))
# Always create a link for the full commit ID
link1 = ShortlinkDoc(dict(
_id=bson.ObjectId(),
ref_id=index_id,
project_id=repo.app.config.project_id,
app_config_id=repo.app.config._id,
link=oid,
url=repo.url_for_commit(oid)))
ci.m.save(safe=False, validate=False)
ref.m.save(safe=False, validate=False)
link0.m.save(safe=False, validate=False)
link1.m.save(safe=False, validate=False)
def refresh_children(ci):
'''Refresh the list of children of the given commit'''
CommitDoc.m.update_partial(
dict(_id={'$in': ci.parent_ids}),
{'$addToSet': dict(child_ids=ci._id)},
multi=True)
class CommitRunBuilder(object):
'''Class used to build up linear runs of single-parent commits'''
def __init__(self, commit_ids):
self.commit_ids = commit_ids
self.run_index = {} # by commit ID
self.runs = {} # by run ID
self.reasons = {} # reasons to stop merging runs
def run(self):
'''Build up the runs'''
for oids in utils.chunked_iter(self.commit_ids, QSIZE):
oids = list(oids)
for ci in CommitDoc.m.find(dict(_id={'$in': oids})):
if ci._id in self.run_index:
continue
self.run_index[ci._id] = ci._id
self.runs[ci._id] = CommitRunDoc(dict(
_id=ci._id,
parent_commit_ids=ci.parent_ids,
commit_ids=[ci._id],
commit_times=[ci.authored['date']]))
self.merge_runs()
log.info('%d runs', len(self.runs))
for rid, run in sorted(self.runs.items()):
log.info('%32s: %r', self.reasons.get(rid, 'none'), run._id)
for run in self.runs.itervalues():
run.m.save()
return self.runs
def _all_runs(self):
'''Find all runs containing this builder's commit IDs'''
runs = {}
for oids in utils.chunked_iter(self.commit_ids, QSIZE):
oids = list(oids)
for run in CommitRunDoc.m.find(dict(commit_ids={'$in': oids})):
runs[run._id] = run
for run in CommitRunDoc.m.find(dict(parent_commit_ids={'$in': oids})):
runs[run._id] = run
seen_run_ids = set()
runs = runs.values()
while runs:
run = runs.pop()
if run._id in seen_run_ids:
continue
seen_run_ids.add(run._id)
yield run
for run in CommitRunDoc.m.find(
dict(commit_ids={'$in': run.parent_commit_ids})):
runs.append(run)
def cleanup(self):
'''Delete non-maximal runs and merge any new runs with existing runs'''
runs = dict(
(run['commit_ids'][0], run)
for run in self._all_runs())
for rid, run in runs.items():
p_cis = run['parent_commit_ids']
if len(p_cis) != 1:
continue
parent_run = runs.get(p_cis[0], None)
if parent_run is None:
continue
run['commit_ids'] += parent_run['commit_ids']
run['commit_times'] += parent_run['commit_times']
run['parent_commit_ids'] = parent_run['parent_commit_ids']
run.m.save()
parent_run.m.delete()
del runs[p_cis[0]]
for run1 in runs.values():
# if run1 is a subset of another run, delete it
if CommitRunDoc.m.find(dict(commit_ids={'$all': run1.commit_ids},
_id={'$ne': run1._id})).count():
log.info('... delete %r (subset of another run)', run1)
run1.m.delete()
continue
for run2 in CommitRunDoc.m.find(dict(
commit_ids=run1.commit_ids[0])):
if run1._id == run2._id:
continue
log.info('... delete %r (part of %r)', run2, run1)
run2.m.delete()
def merge_runs(self):
'''Find partial runs that may be merged and merge them'''
while True:
for run_id, run in self.runs.iteritems():
if len(run.parent_commit_ids) != 1:
self.reasons[run_id] = '%d parents' % len(
run.parent_commit_ids)
continue
p_oid = run.parent_commit_ids[0]
p_run_id = self.run_index.get(p_oid)
if p_run_id is None:
self.reasons[run_id] = 'parent commit not found'
continue
p_run = self.runs.get(p_run_id)
if p_run is None:
self.reasons[run_id] = 'parent run not found'
continue
if p_run.commit_ids[0] != p_oid:
self.reasons[
run_id] = 'parent does not start with parent commit'
continue
run.commit_ids += p_run.commit_ids
run.commit_times += p_run.commit_times
run.parent_commit_ids = p_run.parent_commit_ids
for oid in p_run.commit_ids:
self.run_index[oid] = run_id
break
else:
break
del self.runs[p_run_id]
def unknown_commit_ids(all_commit_ids):
'''filter out all commit ids that have already been cached'''
result = []
for chunk in utils.chunked_iter(all_commit_ids, QSIZE):
chunk = list(chunk)
q = CommitDoc.m.find(dict(_id={'$in': chunk}))
known_commit_ids = set(ci._id for ci in q)
result += [oid for oid in chunk if oid not in known_commit_ids]
return result
def send_notifications(repo, commit_ids):
"""Create appropriate notification and feed objects for a refresh
:param repo: A repository artifact instance.
:type repo: Repository
:param commit_ids: A list of commit hash strings, oldest to newest
:type commit_ids: list
"""
from allura.model import Feed, Notification
commit_msgs = []
base_url = tg.config['base_url']
for oids in utils.chunked_iter(commit_ids, QSIZE):
chunk = list(oids)
index = dict(
(doc._id, doc)
for doc in Commit.query.find(dict(_id={'$in': chunk})))
for oid in chunk:
ci = index[oid]
href = repo.url_for_commit(oid)
title = _title(ci.message)
summary = _summarize(ci.message)
Feed.post(
repo, title=title,
description='%s<br><a href="%s">View Changes</a>' % (
summary, href),
author_link=ci.author_url,
author_name=ci.authored.name,
link=href,
unique_id=href)
summary = g.markdown_commit.convert(ci.message) if ci.message else ""
current_branch = repo.symbolics_for_commit(ci)[0] # only the head of a branch will have this
commit_msgs.append(dict(
author=ci.authored.name,
date=ci.authored.date.strftime("%m/%d/%Y %H:%M"),
summary=summary,
branches=current_branch,
commit_url=base_url + href,
shorthand_id=ci.shorthand_id()))
# fill out the branch info for all the other commits
prev_branch = None
for c_msg in reversed(commit_msgs):
if not c_msg['branches']:
c_msg['branches'] = prev_branch
prev_branch = c_msg['branches']
# mark which ones are first on a branch and need the branch name shown
last_branch = None
for c_msg in commit_msgs:
if c_msg['branches'] != last_branch:
c_msg['show_branch_name'] = True
last_branch = c_msg['branches']
if commit_msgs:
if len(commit_msgs) > 1:
subject = u"{} new commits to {}".format(len(commit_msgs), repo.app.config.options.mount_label)
else:
commit = commit_msgs[0]
subject = u'New commit {} by {}'.format(commit['shorthand_id'], commit['author'])
text = g.jinja2_env.get_template("allura:templates/mail/commits.md").render(
commit_msgs=commit_msgs,
max_num_commits=asint(tg.config.get('scm.notify.max_commits', 100)),
)
Notification.post(
artifact=repo,
topic='metadata',
subject=subject,
text=text)
def _title(message):
if not message:
return ''
line = message.splitlines()[0]
return jinja2.filters.do_truncate(None, line, 200, killwords=True, leeway=3)
def _summarize(message):
if not message:
return ''
summary = []
for line in message.splitlines():
line = line.rstrip()
if line:
summary.append(line)
else:
break
return ' '.join(summary)
def last_known_commit_id(all_commit_ids, new_commit_ids):
"""
Return the newest "known" (cached in mongo) commit id.
Params:
all_commit_ids: Every commit id from the repo on disk, sorted oldest to
newest.
new_commit_ids: Commit ids that are not yet cached in mongo, sorted
oldest to newest.
"""
if not all_commit_ids:
return None
if not new_commit_ids:
return all_commit_ids[-1]
return all_commit_ids[all_commit_ids.index(new_commit_ids[0]) - 1]
def _group_commits(repo, commit_ids):
by_branches = {}
by_tags = {}
# svn has no branches, so we need __default__ as a fallback to collect
# all commits into
current_branches = ['__default__']
current_tags = []
for commit in commit_ids:
ci = repo.commit(commit)
branches, tags = repo.symbolics_for_commit(ci)
if branches:
current_branches = branches
if tags:
current_tags = tags
for b in current_branches:
if b not in by_branches.keys():
by_branches[b] = []
by_branches[b].append(commit)
for t in current_tags:
if t not in by_tags.keys():
by_tags[t] = []
by_tags[t].append(commit)
return by_branches, by_tags
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import re
import textwrap
import uuid
try:
from oslo_serialization import jsonutils
except ImportError:
from oslo.serialization import jsonutils
try:
from oslo_utils import encodeutils
except ImportError:
from oslo.utils import encodeutils
import pkg_resources
import prettytable
import six
from . import exceptions
from .i18n import _
from .common import cliutils
VALID_KEY_REGEX = re.compile(r"[\w\.\- :]+$", re.UNICODE)
def add_resource_manager_extra_kwargs_hook(f, hook):
"""Add hook to bind CLI arguments to ResourceManager calls.
The `do_foo` calls in shell.py will receive CLI args and then in turn pass
them through to the ResourceManager. Before passing through the args, the
hooks registered here will be called, giving us a chance to add extra
kwargs (taken from the command-line) to what's passed to the
ResourceManager.
"""
if not hasattr(f, 'resource_manager_kwargs_hooks'):
f.resource_manager_kwargs_hooks = []
names = [h.__name__ for h in f.resource_manager_kwargs_hooks]
if hook.__name__ not in names:
f.resource_manager_kwargs_hooks.append(hook)
def get_resource_manager_extra_kwargs(f, args, allow_conflicts=False):
"""Return extra_kwargs by calling resource manager kwargs hooks."""
hooks = getattr(f, "resource_manager_kwargs_hooks", [])
extra_kwargs = {}
for hook in hooks:
hook_kwargs = hook(args)
hook_name = hook.__name__
conflicting_keys = set(hook_kwargs.keys()) & set(extra_kwargs.keys())
if conflicting_keys and not allow_conflicts:
msg = (_("Hook '%(hook_name)s' is attempting to redefine "
"attributes '%(conflicting_keys)s'") %
{'hook_name': hook_name,
'conflicting_keys': conflicting_keys})
raise exceptions.NoUniqueMatch(msg)
extra_kwargs.update(hook_kwargs)
return extra_kwargs
def pretty_choice_dict(d):
"""Returns a formatted dict as 'key=value'."""
return cliutils.pretty_choice_list(
['%s=%s' % (k, d[k]) for k in sorted(d.keys())])
def print_list(objs, fields, formatters={}, sortby_index=None):
if sortby_index is None:
sortby = None
else:
sortby = fields[sortby_index]
mixed_case_fields = ['serverId']
pt = prettytable.PrettyTable([f for f in fields], caching=False)
pt.align = 'l'
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(o, field_name, '')
if data is None:
data = '-'
row.append(data)
pt.add_row(row)
if sortby is not None:
result = encodeutils.safe_encode(pt.get_string(sortby=sortby))
else:
result = encodeutils.safe_encode(pt.get_string())
if six.PY3:
result = result.decode()
print(result)
def _flatten(data, prefix=None):
"""Flatten a dict, using name as a prefix for the keys of dict.
>>> _flatten('cpu_info', {'arch':'x86_64'})
[('cpu_info_arch': 'x86_64')]
"""
if isinstance(data, dict):
for key, value in six.iteritems(data):
new_key = '%s_%s' % (prefix, key) if prefix else key
if isinstance(value, (dict, list)):
for item in _flatten(value, new_key):
yield item
else:
yield new_key, value
else:
yield prefix, data
def flatten_dict(data):
"""Return a new dict whose sub-dicts have been merged into the
original. Each of the parents keys are prepended to the child's
to prevent collisions. Any string elements will be JSON parsed
before flattening.
>>> flatten_dict({'service': {'host':'cloud9@compute-068', 'id': 143}})
{'service_host': colud9@compute-068', 'service_id': 143}
"""
data = data.copy()
# Try and decode any nested JSON structures.
for key, value in six.iteritems(data):
if isinstance(value, six.string_types):
try:
data[key] = json.loads(value)
except ValueError:
pass
return dict(_flatten(data))
def print_dict(d, dict_property="Property", dict_value="Value", wrap=0):
pt = prettytable.PrettyTable([dict_property, dict_value], caching=False)
pt.align = 'l'
for k, v in sorted(d.items()):
# convert dict to str to check length
if isinstance(v, (dict, list)):
v = jsonutils.dumps(v)
if wrap > 0:
v = textwrap.fill(str(v), wrap)
# if value has a newline, add in multiple rows
# e.g. fault with stacktrace
if v and isinstance(v, six.string_types) and r'\n' in v:
lines = v.strip().split(r'\n')
col1 = k
for line in lines:
pt.add_row([col1, line])
col1 = ''
else:
if v is None:
v = '-'
pt.add_row([k, v])
result = encodeutils.safe_encode(pt.get_string())
if six.PY3:
result = result.decode()
print(result)
def find_resource(manager, name_or_id, **find_args):
"""Helper for the _find_* methods."""
# for str id which is not uuid (for Flavor and Keypair search currently)
if getattr(manager, 'is_alphanum_id_allowed', False):
try:
return manager.get(name_or_id)
except exceptions.NotFound:
pass
# try to get entity as integer id
try:
return manager.get(int(name_or_id))
except (TypeError, ValueError, exceptions.NotFound):
pass
# now try to get entity as uuid
try:
tmp_id = encodeutils.safe_encode(name_or_id)
if six.PY3:
tmp_id = tmp_id.decode()
uuid.UUID(tmp_id)
return manager.get(tmp_id)
except (TypeError, ValueError, exceptions.NotFound):
pass
try:
try:
resource = getattr(manager, 'resource_class', None)
name_attr = resource.NAME_ATTR if resource else 'name'
kwargs = {name_attr: name_or_id}
kwargs.update(find_args)
return manager.find(**kwargs)
except exceptions.NotFound:
pass
# finally try to find entity by human_id
try:
return manager.find(human_id=name_or_id, **find_args)
except exceptions.NotFound:
msg = (_("No %(class)s with a name or ID of '%(name)s' exists.") %
{'class': manager.resource_class.__name__.lower(),
'name': name_or_id})
raise exceptions.CommandError(msg)
except exceptions.NoUniqueMatch:
msg = (_("Multiple %(class)s matches found for '%(name)s', use an ID "
"to be more specific.") %
{'class': manager.resource_class.__name__.lower(),
'name': name_or_id})
raise exceptions.CommandError(msg)
def _format_servers_list_networks(server):
output = []
for (network, addresses) in server.networks.items():
if len(addresses) == 0:
continue
addresses_csv = ', '.join(addresses)
group = "%s=%s" % (network, addresses_csv)
output.append(group)
return '; '.join(output)
def _format_security_groups(groups):
return ', '.join(group['name'] for group in groups)
def _format_field_name(attr):
"""Format an object attribute in a human-friendly way."""
# Split at ':' and leave the extension name as-is.
parts = attr.rsplit(':', 1)
name = parts[-1].replace('_', ' ')
# Don't title() on mixed case
if name.isupper() or name.islower():
name = name.title()
parts[-1] = name
return ': '.join(parts)
def _make_field_formatter(attr, filters=None):
"""
Given an object attribute, return a formatted field name and a
formatter suitable for passing to print_list.
Optionally pass a dict mapping attribute names to a function. The function
will be passed the value of the attribute and should return the string to
display.
"""
filter_ = None
if filters:
filter_ = filters.get(attr)
def get_field(obj):
field = getattr(obj, attr, '')
if field and filter_:
field = filter_(field)
return field
name = _format_field_name(attr)
formatter = get_field
return name, formatter
def safe_issubclass(*args):
"""Like issubclass, but will just return False if not a class."""
try:
if issubclass(*args):
return True
except TypeError:
pass
return False
def do_action_on_many(action, resources, success_msg, error_msg):
"""Helper to run an action on many resources."""
failure_flag = False
for resource in resources:
try:
action(resource)
print(success_msg % resource)
except Exception as e:
failure_flag = True
print(e)
if failure_flag:
raise exceptions.CommandError(error_msg)
def _load_entry_point(ep_name, name=None):
"""Try to load the entry point ep_name that matches name."""
for ep in pkg_resources.iter_entry_points(ep_name, name=name):
try:
return ep.load()
except (ImportError, pkg_resources.UnknownExtra, AttributeError):
continue
def is_integer_like(val):
"""Returns validation of a value as an integer."""
try:
int(val)
return True
except (TypeError, ValueError, AttributeError):
return False
def validate_flavor_metadata_keys(keys):
for key in keys:
valid_name = VALID_KEY_REGEX.match(key)
if not valid_name:
msg = _('Invalid key: "%s". Keys may only contain letters, '
'numbers, spaces, underscores, periods, colons and '
'hyphens.')
raise exceptions.CommandError(msg % key)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Parameters are one of the core concepts of Luigi.
All Parameters sit on :class:`~luigi.task.Task` classes.
See :ref:`Parameter` for more info on how to define parameters.
'''
import abc
import datetime
import warnings
try:
from ConfigParser import NoOptionError, NoSectionError
except ImportError:
from configparser import NoOptionError, NoSectionError
from luigi import task_register
from luigi import six
from luigi import configuration
from luigi.deprecate_kwarg import deprecate_kwarg
from datetime import timedelta
_no_value = object()
class ParameterException(Exception):
"""
Base exception.
"""
pass
class MissingParameterException(ParameterException):
"""
Exception signifying that there was a missing Parameter.
"""
pass
class UnknownParameterException(ParameterException):
"""
Exception signifying that an unknown Parameter was supplied.
"""
pass
class DuplicateParameterException(ParameterException):
"""
Exception signifying that a Parameter was specified multiple times.
"""
pass
class UnknownConfigException(ParameterException):
"""
Exception signifying that the ``config_path`` for the Parameter could not be found.
"""
pass
class Parameter(object):
"""
An untyped Parameter
Parameters are objects set on the Task class level to make it possible to parameterize tasks.
For instance:
class MyTask(luigi.Task):
foo = luigi.Parameter()
This makes it possible to instantiate multiple tasks, eg ``MyTask(foo='bar')`` and
``MyTask(foo='baz')``. The task will then have the ``foo`` attribute set appropriately.
There are subclasses of ``Parameter`` that define what type the parameter has. This is not
enforced within Python, but are used for command line interaction.
When a task is instantiated, it will first use any argument as the value of the parameter, eg.
if you instantiate a = TaskA(x=44) then a.x == 44. If this does not exist, it will use the value
of the Parameter object, which is defined on a class level. This will be resolved in this
order of falling priority:
* Any value provided on the command line on the class level (eg. ``--TaskA-param xyz``)
* Any value provided via config (using the ``config_path`` argument)
* Any default value set using the ``default`` flag.
"""
counter = 0
"""non-atomically increasing counter used for ordering parameters."""
@deprecate_kwarg('is_boolean', 'is_bool', False)
def __init__(self, default=_no_value, is_boolean=False, is_global=False, significant=True, description=None,
config_path=None, positional=True):
"""
:param default: the default value for this parameter. This should match the type of the
Parameter, i.e. ``datetime.date`` for ``DateParameter`` or ``int`` for
``IntParameter``. By default, no default is stored and
the value must be specified at runtime.
:param bool is_bool: specify ``True`` if the parameter is a bool value. Default:
``False``. Bool's have an implicit default value of ``False``.
:param bool significant: specify ``False`` if the parameter should not be treated as part of
the unique identifier for a Task. An insignificant Parameter might
also be used to specify a password or other sensitive information
that should not be made public via the scheduler. Default:
``True``.
:param str description: A human-readable string describing the purpose of this Parameter.
For command-line invocations, this will be used as the `help` string
shown to users. Default: ``None``.
:param dict config_path: a dictionary with entries ``section`` and ``name``
specifying a config file entry from which to read the
default value for this parameter. DEPRECATED.
Default: ``None``.
:param bool positional: If true, you can set the argument as a
positional argument. Generally we recommend ``positional=False``
as positional arguments become very tricky when
you have inheritance and whatnot.
"""
# The default default is no default
self.__default = default
self.__global = _no_value
self.is_bool = is_boolean # Only BoolParameter should ever use this. TODO(erikbern): should we raise some kind of exception?
if is_global:
warnings.warn("is_global support is removed. Assuming positional=False",
DeprecationWarning,
stacklevel=2)
positional = False
self.significant = significant # Whether different values for this parameter will differentiate otherwise equal tasks
self.positional = positional
self.description = description
if config_path is not None and ('section' not in config_path or 'name' not in config_path):
raise ParameterException('config_path must be a hash containing entries for section and name')
self.__config = config_path
self.counter = Parameter.counter # We need to keep track of this to get the order right (see Task class)
Parameter.counter += 1
def _get_value_from_config(self, section, name):
"""Loads the default from the config. Returns _no_value if it doesn't exist"""
conf = configuration.get_config()
try:
value = conf.get(section, name)
except (NoSectionError, NoOptionError):
return _no_value
return self.parse(value)
def _get_value(self, task_name=None, param_name=None):
if self.__global != _no_value:
return self.__global
if task_name and param_name:
v = self._get_value_from_config(task_name, param_name)
if v != _no_value:
return v
v = self._get_value_from_config(task_name, param_name.replace('_', '-'))
if v != _no_value:
warnings.warn(
'The use of the configuration [%s] %s (with dashes) should be avoided. Please use underscores.' %
(task_name, param_name), DeprecationWarning, stacklevel=2)
return v
if self.__config:
v = self._get_value_from_config(self.__config['section'], self.__config['name'])
if v != _no_value and task_name and param_name:
warnings.warn(
'The use of the configuration [%s] %s is deprecated. Please use [%s] %s' %
(self.__config['section'], self.__config['name'], task_name, param_name),
DeprecationWarning, stacklevel=2)
if v != _no_value:
return v
if self.__default != _no_value:
return self.__default
return _no_value
@property
def has_value(self):
"""
``True`` if a default was specified or if config_path references a valid entry in the conf.
Note that "value" refers to the Parameter object itself - it can be either
1. The default value for this parameter
2. A value read from the config
3. A global value
Any Task instance can have its own value set that overrides this.
"""
return self._get_value() != _no_value
@property
def value(self):
"""
The value for this Parameter.
This refers to any value defined by a default, a config option, or
a global value.
:raises MissingParameterException: if a value is not set.
:return: the parsed value.
"""
value = self._get_value()
if value == _no_value:
raise MissingParameterException("No default specified")
else:
return value
def has_task_value(self, task_name, param_name):
return self._get_value(task_name, param_name) != _no_value
def task_value(self, task_name, param_name):
value = self._get_value(task_name, param_name)
if value == _no_value:
raise MissingParameterException("No default specified")
else:
return value
def set_global(self, value):
"""
Set the global value of this Parameter.
:param value: the new global value.
"""
self.__global = value
def reset_global(self):
self.__global = _no_value
def parse(self, x):
"""
Parse an individual value from the input.
The default implementation is an identify (it returns ``x``), but subclasses should override
this method for specialized parsing. This method is called by :py:meth:`parse_from_input`
if ``x`` exists.
:param str x: the value to parse.
:return: the parsed value.
"""
return x # default impl
def serialize(self, x): # opposite of parse
"""
Opposite of :py:meth:`parse`.
Converts the value ``x`` to a string.
:param x: the value to serialize.
"""
return str(x)
@classmethod
def next_in_enumeration(_cls, _value):
"""
If your Parameter type has an enumerable ordering of values. You can
choose to override this method. This method is used by the
:py:mod:`luigi.execution_summary` module for pretty printing
purposes. Enabling it to pretty print tasks like ``MyTask(num=1),
MyTask(num=2), MyTask(num=3)`` to ``MyTask(num=1..3)``.
:param value: The value
:return: The next value, like "value + 1". Or ``None`` if there's no enumerable ordering.
"""
return None
def parse_from_input(self, param_name, x, task_name=None):
"""
Parses the parameter value from input ``x``, handling defaults.
:param param_name: the name of the parameter. This is used for the message in
``MissingParameterException``.
:param x: the input value to parse.
:raises MissingParameterException: if x is false-y and no default is specified.
"""
if not x:
if self.has_task_value(param_name=param_name, task_name=task_name):
return self.task_value(param_name=param_name, task_name=task_name)
elif self.is_bool:
return False
else:
raise MissingParameterException("No value for '%s' (%s) submitted and no default value has been assigned." %
(param_name, "--" + param_name.replace('_', '-')))
else:
return self.parse(x)
def serialize_to_input(self, x):
return self.serialize(x)
def parser_dest(self, param_name, task_name, glob=False, is_without_section=False):
if is_without_section:
if glob:
return param_name
else:
return None
else:
if glob:
return task_name + '_' + param_name
else:
return param_name
def add_to_cmdline_parser(self, parser, param_name, task_name, glob=False, is_without_section=False):
dest = self.parser_dest(param_name, task_name, glob, is_without_section=is_without_section)
if not dest:
return
flag = '--' + dest.replace('_', '-')
description = []
description.append('%s.%s' % (task_name, param_name))
if glob:
description.append('for all instances of class %s' % task_name)
elif self.description:
description.append(self.description)
if self.has_task_value(param_name=param_name, task_name=task_name):
value = self.task_value(param_name=param_name, task_name=task_name)
description.append(" [default: %s]" % (value,))
if self.is_bool:
action = "store_true"
else:
action = "store"
parser.add_argument(flag,
help=' '.join(description),
action=action,
dest=dest)
def parse_from_args(self, param_name, task_name, args, params):
# Note: modifies arguments
dest = self.parser_dest(param_name, task_name, glob=False)
if dest is not None:
value = getattr(args, dest, None)
params[param_name] = self.parse_from_input(param_name, value, task_name=task_name)
def set_global_from_args(self, param_name, task_name, args, is_without_section=False):
# Note: side effects
dest = self.parser_dest(param_name, task_name, glob=True, is_without_section=is_without_section)
if dest is not None:
value = getattr(args, dest, None)
if value:
self.set_global(self.parse_from_input(param_name, value, task_name=task_name))
else: # either False (bools) or None (everything else)
self.reset_global()
class DateParameterBase(Parameter):
"""
Base class Parameter for dates. Code reuse is made possible since all date
parameters are serialized in the same way.
"""
@abc.abstractproperty
def date_format(self):
"""
Override me with a :py:meth:`~datetime.date.strftime` string.
"""
pass
@abc.abstractproperty
def _timedelta(self):
"""
Either override me with a :py:class:`~datetime.timedelta` value or
implement :py:meth:`~Parameter.next_in_enumeration` to return ``None``.
"""
pass
def serialize(self, dt):
"""
Converts the date to a string using the :py:attr:`~DateParameterBase.date_format`.
"""
if dt is None:
return str(dt)
return dt.strftime(self.date_format)
@classmethod
def next_in_enumeration(cls, value):
return value + cls._timedelta
class DateParameter(DateParameterBase):
"""
Parameter whose value is a :py:class:`~datetime.date`.
A DateParameter is a Date string formatted ``YYYY-MM-DD``. For example, ``2013-07-10`` specifies
July 10, 2013.
"""
date_format = '%Y-%m-%d'
_timedelta = timedelta(days=1)
def parse(self, s):
"""
Parses a date string formatted as ``YYYY-MM-DD``.
"""
return datetime.datetime.strptime(s, self.date_format).date()
class MonthParameter(DateParameter):
"""
Parameter whose value is a :py:class:`~datetime.date`, specified to the month
(day of :py:class:`~datetime.date` is "rounded" to first of the month).
A MonthParameter is a Date string formatted ``YYYY-MM``. For example, ``2013-07`` specifies
July of 2013.
"""
date_format = '%Y-%m'
@staticmethod
def next_in_enumeration(_value):
return None
class YearParameter(DateParameter):
"""
Parameter whose value is a :py:class:`~datetime.date`, specified to the year
(day and month of :py:class:`~datetime.date` is "rounded" to first day of the year).
A YearParameter is a Date string formatted ``YYYY``.
"""
date_format = '%Y'
@staticmethod
def next_in_enumeration(_value):
return None
class DateHourParameter(DateParameterBase):
"""
Parameter whose value is a :py:class:`~datetime.datetime` specified to the hour.
A DateHourParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the hour. For example, ``2013-07-10T19`` specifies July 10, 2013 at
19:00.
"""
date_format = '%Y-%m-%dT%H' # ISO 8601 is to use 'T'
def parse(self, s):
"""
Parses a string to a :py:class:`~datetime.datetime` using the format string ``%Y-%m-%dT%H``.
"""
return datetime.datetime.strptime(s, self.date_format)
class DateMinuteParameter(DateHourParameter):
"""
Parameter whose value is a :py:class:`~datetime.datetime` specified to the minute.
A DateMinuteParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the minute. For example, ``2013-07-10T1907`` specifies July 10, 2013 at
19:07.
"""
date_format = '%Y-%m-%dT%H%M'
_timedelta = timedelta(minutes=1)
deprecated_date_format = '%Y-%m-%dT%HH%M'
def parse(self, s):
try:
value = datetime.datetime.strptime(s, self.deprecated_date_format)
warnings.warn(
'Using "H" between hours and minutes is deprecated, omit it instead.',
DeprecationWarning,
stacklevel=2
)
return value
except ValueError:
return super(DateMinuteParameter, self).parse(s)
class IntParameter(Parameter):
"""
Parameter whose value is an ``int``.
"""
def parse(self, s):
"""
Parses an ``int`` from the string using ``int()``.
"""
return int(s)
@staticmethod
def next_in_enumeration(value):
return value + 1
class FloatParameter(Parameter):
"""
Parameter whose value is a ``float``.
"""
def parse(self, s):
"""
Parses a ``float`` from the string using ``float()``.
"""
return float(s)
class BoolParameter(Parameter):
"""
A Parameter whose value is a ``bool``.
"""
def __init__(self, *args, **kwargs):
"""
This constructor passes along args and kwargs to ctor for :py:class:`Parameter` but
specifies ``is_bool=True``.
"""
super(BoolParameter, self).__init__(*args, is_bool=True, **kwargs)
def parse(self, s):
"""
Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case.
"""
return {'true': True, 'false': False}[str(s).lower()]
class BooleanParameter(BoolParameter):
def __init__(self, *args, **kwargs):
warnings.warn(
'BooleanParameter is deprecated, use BoolParameter instead',
DeprecationWarning,
stacklevel=2
)
super(BooleanParameter, self).__init__(*args, **kwargs)
class DateIntervalParameter(Parameter):
"""
A Parameter whose value is a :py:class:`~luigi.date_interval.DateInterval`.
Date Intervals are specified using the ISO 8601 `Time Interval
<http://en.wikipedia.org/wiki/ISO_8601#Time_intervals>`_ notation.
"""
# Class that maps to/from dates using ISO 8601 standard
# Also gives some helpful interval algebra
def parse(self, s):
"""
Parses a `:py:class:`~luigi.date_interval.DateInterval` from the input.
see :py:mod:`luigi.date_interval`
for details on the parsing of DateIntervals.
"""
# TODO: can we use xml.utils.iso8601 or something similar?
from luigi import date_interval as d
for cls in [d.Year, d.Month, d.Week, d.Date, d.Custom]:
i = cls.parse(s)
if i:
return i
else:
raise ValueError('Invalid date interval - could not be parsed')
class TimeDeltaParameter(Parameter):
"""
Class that maps to timedelta using strings in any of the following forms:
* ``n {w[eek[s]]|d[ay[s]]|h[our[s]]|m[inute[s]|s[second[s]]}`` (e.g. "1 week 2 days" or "1 h")
Note: multiple arguments must be supplied in longest to shortest unit order
* ISO 8601 duration ``PnDTnHnMnS`` (each field optional, years and months not supported)
* ISO 8601 duration ``PnW``
See https://en.wikipedia.org/wiki/ISO_8601#Durations
"""
def _apply_regex(self, regex, input):
import re
re_match = re.match(regex, input)
if re_match:
kwargs = {}
has_val = False
for k, v in six.iteritems(re_match.groupdict(default="0")):
val = int(v)
has_val = has_val or val != 0
kwargs[k] = val
if has_val:
return timedelta(**kwargs)
def _parseIso8601(self, input):
def field(key):
return "(?P<%s>\d+)%s" % (key, key[0].upper())
def optional_field(key):
return "(%s)?" % field(key)
# A little loose: ISO 8601 does not allow weeks in combination with other fields, but this regex does (as does python timedelta)
regex = "P(%s|%s(T%s)?)" % (field("weeks"), optional_field("days"), "".join([optional_field(key) for key in ["hours", "minutes", "seconds"]]))
return self._apply_regex(regex, input)
def _parseSimple(self, input):
keys = ["weeks", "days", "hours", "minutes", "seconds"]
# Give the digits a regex group name from the keys, then look for text with the first letter of the key,
# optionally followed by the rest of the word, with final char (the "s") optional
regex = "".join(["((?P<%s>\d+) ?%s(%s)?(%s)? ?)?" % (k, k[0], k[1:-1], k[-1]) for k in keys])
return self._apply_regex(regex, input)
def parse(self, input):
"""
Parses a time delta from the input.
See :py:class:`TimeDeltaParameter` for details on supported formats.
"""
result = self._parseIso8601(input)
if not result:
result = self._parseSimple(input)
if result:
return result
else:
raise ParameterException("Invalid time delta - could not parse %s" % input)
class TaskParameter(Parameter):
"""
A parameter that takes another luigi task class.
When used programatically, the parameter should be specified
directly with the :py:class:`luigi.task.Task` (sub) class. Like
``MyMetaTask(my_task_param=my_tasks.MyTask)``. On the command line,
you specify the :py:attr:`luigi.task.Task.task_family`. Like
.. code:: console
$ luigi --module my_tasks MyMetaTask --my_task_param my_namespace.MyTask
Where ``my_namespace.MyTask`` is defined in the ``my_tasks`` python module.
When the :py:class:`luigi.task.Task` class is instantiated to an object.
The value will always be a task class (and not a string).
"""
def parse(self, input):
"""
Parse a task_famly using the :class:`~luigi.task_register.Register`
"""
return task_register.Register.get_task_cls(input)
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.boot import get_allowed_pages
@frappe.whitelist()
def get(module):
"""Returns data (sections, list of reports, counts) to render module view in desk:
`/desk/#Module/[name]`."""
data = get_data(module)
out = {
"data": data
}
return out
def get_data(module):
"""Get module data for the module view `desk/#Module/[name]`"""
doctype_info = get_doctype_info(module)
data = build_config_from_file(module)
if not data:
data = build_standard_config(module, doctype_info)
else:
add_custom_doctypes(data, doctype_info)
add_section(data, _("Custom Reports"), "icon-list-alt",
get_report_list(module))
data = combine_common_sections(data)
data = apply_permissions(data)
set_last_modified(data)
return data
def build_config_from_file(module):
"""Build module info from `app/config/desktop.py` files."""
data = []
module = frappe.scrub(module)
for app in frappe.get_installed_apps():
try:
data += get_config(app, module)
except ImportError:
pass
return data
def build_standard_config(module, doctype_info):
"""Build standard module data from DocTypes."""
if not frappe.db.get_value("Module Def", module):
frappe.throw(_("Module Not Found"))
data = []
add_section(data, _("Documents"), "icon-star",
[d for d in doctype_info if d.document_type in ("Document", "Transaction")])
add_section(data, _("Setup"), "icon-cog",
[d for d in doctype_info if d.document_type in ("Master", "Setup", "")])
add_section(data, _("Standard Reports"), "icon-list",
get_report_list(module, is_standard="Yes"))
return data
def add_section(data, label, icon, items):
"""Adds a section to the module data."""
if not items: return
data.append({
"label": label,
"icon": icon,
"items": items
})
def add_custom_doctypes(data, doctype_info):
"""Adds Custom DocTypes to modules setup via `config/desktop.py`."""
add_section(data, _("Documents"), "icon-star",
[d for d in doctype_info if (d.custom and d.document_type in ("Document", "Transaction"))])
add_section(data, _("Setup"), "icon-cog",
[d for d in doctype_info if (d.custom and d.document_type in ("Setup", "Master", ""))])
def get_doctype_info(module):
"""Returns list of non child DocTypes for given module."""
doctype_info = frappe.db.sql("""select "doctype" as type, name, description,
ifnull(document_type, "") as document_type, custom as custom,
issingle as issingle
from `tabDocType` where module=%s and istable=0
order by custom asc, document_type desc, name asc""", module, as_dict=True)
for d in doctype_info:
d.description = _(d.description or "")
return doctype_info
def combine_common_sections(data):
"""Combine sections declared in separate apps."""
sections = []
sections_dict = {}
for each in data:
if each["label"] not in sections_dict:
sections_dict[each["label"]] = each
sections.append(each)
else:
sections_dict[each["label"]]["items"] += each["items"]
return sections
def apply_permissions(data):
default_country = frappe.db.get_default("country")
user = frappe.get_user()
user.build_permissions()
allowed_pages = get_allowed_pages()
new_data = []
for section in data:
new_items = []
for item in (section.get("items") or []):
item = frappe._dict(item)
if item.country and item.country!=default_country:
continue
if ((item.type=="doctype" and item.name in user.can_read)
or (item.type=="page" and item.name in allowed_pages)
or (item.type=="report" and item.doctype in user.can_get_report)
or item.type=="help"):
new_items.append(item)
if new_items:
new_section = section.copy()
new_section["items"] = new_items
new_data.append(new_section)
return new_data
def get_config(app, module):
"""Load module info from `[app].config.[module]`."""
config = frappe.get_module("{app}.config.{module}".format(app=app, module=module))
config = config.get_data()
for section in config:
for item in section["items"]:
if not "label" in item:
item["label"] = _(item["name"])
return config
def add_setup_section(config, app, module, label, icon):
"""Add common sections to `/desk#Module/Setup`"""
try:
setup_section = get_setup_section(app, module, label, icon)
if setup_section:
config.append(setup_section)
except ImportError:
pass
def get_setup_section(app, module, label, icon):
"""Get the setup section from each module (for global Setup page)."""
config = get_config(app, module)
for section in config:
if section.get("label")==_("Setup"):
return {
"label": label,
"icon": icon,
"items": section["items"]
}
def set_last_modified(data):
for section in data:
for item in section["items"]:
if item["type"] == "doctype":
item["last_modified"] = get_last_modified(item["name"])
def get_last_modified(doctype):
def _get():
try:
last_modified = frappe.get_all(doctype, fields=["max(modified)"], as_list=True, limit_page_length=1)[0][0]
except Exception, e:
if e.args[0]==1146:
last_modified = None
else:
raise
# hack: save as -1 so that it is cached
if last_modified==None:
last_modified = -1
return last_modified
last_modified = frappe.cache().hget("last_modified", doctype, _get)
if last_modified==-1:
last_modified = None
return last_modified
def get_report_list(module, is_standard="No"):
"""Returns list on new style reports for modules."""
reports = frappe.get_list("Report", fields=["name", "ref_doctype", "report_type"], filters=
{"is_standard": is_standard, "disabled": ("in", ("0", "NULL", "")), "module": module},
order_by="name")
out = []
for r in reports:
out.append({
"type": "report",
"doctype": r.ref_doctype,
"is_query_report": 1 if r.report_type in ("Query Report", "Script Report") else 0,
"label": _(r.name),
"name": r.name
})
return out
|
|
# stdlib
from contextlib import contextmanager
from random import random, randrange
import os
import subprocess
import sys
import time
import unittest
import urllib as url
# 3p
from mock import patch
from nose.plugins.attrib import attr
# project
# needed because of the subprocess calls
sys.path.append(os.getcwd())
from ddagent import Application
from util import Watchdog
class WatchdogKill(Exception):
"""
The watchdog attempted to kill the process.
"""
pass
@attr('unix')
@attr(requires='core_integration')
class TestWatchdog(unittest.TestCase):
"""
Test watchdog in various conditions
"""
JITTER_FACTOR = 2
@contextmanager
def set_time(self, time):
"""
Helper, a context manager to set the current time value.
"""
# Set the current time within `util` module
mock_time = patch("util.time.time")
mock_time.start().return_value = time
# Yield
yield
# Unset the time mock
mock_time.stop()
@patch.object(Watchdog, 'self_destruct', side_effect=WatchdogKill)
def test_watchdog_frenesy_detection(self, mock_restarted):
"""
Watchdog restarts the process on suspicious high activity.
"""
# Limit the restart timeframe for test purpose
Watchdog._RESTART_TIMEFRAME = 1
# Create a watchdog with a low activity tolerancy
process_watchdog = Watchdog(10, max_resets=3)
ping_watchdog = process_watchdog.reset
with self.set_time(1):
# Can be reset 3 times within the watchdog timeframe
for x in xrange(0, 3):
ping_watchdog()
# On the 4th attempt, the watchdog detects a suspicously high activity
self.assertRaises(WatchdogKill, ping_watchdog)
with self.set_time(3):
# Gets back to normal when the activity timeframe expires.
ping_watchdog()
def test_watchdog(self):
"""
Verify that watchdog kills ourselves even when spinning
Verify that watchdog kills ourselves when hanging
"""
start = time.time()
try:
subprocess.check_call(["python", __file__, "busy"], stderr=subprocess.STDOUT)
raise Exception("Should have died with an error")
except subprocess.CalledProcessError:
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 5)
# Start pseudo web server
subprocess.Popen(["nc", "-l", "31834"])
start = time.time()
try:
subprocess.check_call(["python", __file__, "net"])
raise Exception("Should have died with an error")
except subprocess.CalledProcessError:
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 5)
# Normal loop, should run 5 times
start = time.time()
try:
subprocess.check_call(["python", __file__, "normal"])
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 5)
except subprocess.CalledProcessError:
self.fail("Watchdog killed normal process after %s seconds" % int(time.time() - start))
# Fast tornado, not killed
start = time.time()
p = subprocess.Popen(["python", __file__, "fast"])
p.wait()
duration = int(time.time() - start)
# should die as soon as flush_trs has been called
self.assertTrue(duration < self.JITTER_FACTOR * 10)
# Slow tornado, killed by the Watchdog
start = time.time()
p = subprocess.Popen(["python", __file__, "slow"])
p.wait()
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 4)
class MockTxManager(object):
def flush(self):
"Pretend to flush for a long time"
time.sleep(5)
sys.exit(0)
class MemoryHogTxManager(object):
def __init__(self, watchdog):
self._watchdog = watchdog
def flush(self):
rand_data = []
while True:
rand_data.append('%030x' % randrange(256**15))
self._watchdog.reset()
class PseudoAgent(object):
"""Same logic as the agent, simplified"""
AGENT_CONFIG = {
"bind_host": "localhost",
'endpoints': {
'https://app.datadoghq.com': ['api_key']
},
'forwarder_timeout': 5
}
def busy_run(self):
w = Watchdog(5)
w.reset()
while True:
random()
def hanging_net(self):
w = Watchdog(5)
w.reset()
x = url.urlopen("http://localhost:31834")
print "ERROR Net call returned", x
return True
def normal_run(self):
w = Watchdog(2)
w.reset()
for i in range(5):
time.sleep(1)
w.reset()
def slow_tornado(self):
a = Application(12345, self.AGENT_CONFIG)
a._watchdog = Watchdog(4)
a._tr_manager = MockTxManager()
a.run()
def fast_tornado(self):
a = Application(12345, self.AGENT_CONFIG)
a._watchdog = Watchdog(6)
a._tr_manager = MockTxManager()
a.run()
if __name__ == "__main__":
if sys.argv[1] == "busy":
a = PseudoAgent()
a.busy_run()
elif sys.argv[1] == "net":
a = PseudoAgent()
a.hanging_net()
elif sys.argv[1] == "normal":
a = PseudoAgent()
a.normal_run()
elif sys.argv[1] == "slow":
a = PseudoAgent()
a.slow_tornado()
elif sys.argv[1] == "fast":
a = PseudoAgent()
a.fast_tornado()
elif sys.argv[1] == "test":
t = TestWatchdog()
t.runTest()
elif sys.argv[1] == "memory":
a = PseudoAgent()
a.use_lots_of_memory()
|
|
import os
import unittest
import datetime
from provstore.api import Api, NotFoundException, InvalidCredentialsException, InvalidDataException, ForbiddenException
from provstore.document import AbstractDocumentException, ImmutableDocumentException, EmptyDocumentException
import provstore.tests.examples as examples
PROVSTORE_USERNAME = os.environ.get('PROVSTORE_USERNAME', 'provstore-api-test')
PROVSTORE_API_KEY = os.environ.get('PROVSTORE_API_KEY', '56f7db0b9f1651d2cb0dd9b11c53b5fdc2dcacf4')
class LoggedInAPITestMixin(object):
@classmethod
def setUpClass(cls):
cls.api = Api(username=PROVSTORE_USERNAME, api_key=PROVSTORE_API_KEY)
return super(LoggedInAPITestMixin, cls).setUpClass()
class ProvStoreAPITests(LoggedInAPITestMixin, unittest.TestCase):
def test_basic_storage(self):
prov_document = examples.flat_document()
stored_document = self.api.document.create(prov_document,
name="test_basic_storage")
self.assertEqual(stored_document.prov, prov_document)
stored_document.delete()
def test_diff_auth_access(self):
prov_document = examples.flat_document()
# Private
stored_document = self.api.document.create(prov_document,
name="test_basic_storage")
public_api = Api()
with self.assertRaises(ForbiddenException):
public_api.document.get(stored_document.id)
# Public
stored_document = self.api.document.create(prov_document,
name="test_basic_storage",
public=True)
document = public_api.document.get(stored_document.id)
self.assertEqual(document.id, stored_document.id)
def test_basic_bundle_storage(self):
prov_document = examples.flat_document()
stored_document = self.api.document.create(prov_document,
name="test_basic_bundle_storage")
stored_document.add_bundle(prov_document, identifier="ex:bundle-1")
stored_document.bundles['ex:bundle-2'] = prov_document
# should be a match even though we've added a bundle, this is a stale
# instance
self.assertEqual(stored_document.prov, prov_document)
# when we refresh it, it should no longer match
self.assertNotEqual(stored_document.refresh().prov, prov_document)
self.assertEqual(stored_document.bundles['ex:bundle-2'].prov, prov_document)
self.assertEqual(self.api.document.read_meta(stored_document.id).name, "test_basic_bundle_storage")
self.assertTrue(isinstance(stored_document.bundles['ex:bundle-2'].created_at, datetime.datetime))
stored_document.delete()
def test_bundle_iteration(self):
prov_document = examples.flat_document()
stored_document = self.api.document.create(prov_document,
name="test_bundle_iteration")
stored_document.add_bundle(prov_document, identifier="ex:bundle-1")
stored_document.bundles['ex:bundle-2'] = prov_document
self.assertEqual(len(stored_document.bundles), 0)
self.assertEqual({u'ex:bundle-1', u'ex:bundle-2'},
set([bundle.identifier for bundle in stored_document.bundles]))
self.assertEqual(len(stored_document.bundles.refresh()), 2)
stored_document.delete()
def test_basic_bundle_retrieval(self):
prov_document = examples.flat_document()
stored_document1 = self.api.document.create(prov_document,
name="test_basic_bundle_retrieval")
stored_document2 = self.api.document.create(prov_document,
name="test_basic_bundle_retrieval")
retrieved_document = self.api.document.set(stored_document1.id)
self.assertEqual(stored_document1, retrieved_document)
self.assertNotEqual(stored_document2, retrieved_document)
stored_document1.delete()
stored_document2.delete()
def test_non_existent_bundle(self):
prov_document = examples.flat_document()
stored_document = self.api.document.create(prov_document,
name="test_non_existent_bundle")
with self.assertRaises(NotFoundException):
stored_document.bundles['ex:not-there']
stored_document.delete()
def test_non_existent_document(self):
with self.assertRaises(NotFoundException):
self.api.document.get(-1)
def test_lazy_instantiation_of_props(self):
prov_document = examples.flat_document()
stored_document = self.api.document.create(prov_document,
name="test_lazy_instantiation_of_props")
self.assertEqual(self.api.document.set(stored_document.id).views, 0)
self.assertEqual(self.api.document.set(stored_document.id).owner, self.api._username)
self.assertTrue(isinstance(self.api.document.set(stored_document.id).created_at, datetime.datetime))
self.assertEqual(self.api.document.set(stored_document.id).prov, prov_document)
self.assertFalse(self.api.document.set(stored_document.id).public)
self.assertEqual(self.api.document.set(stored_document.id).name, "test_lazy_instantiation_of_props")
stored_document.delete()
def test_document_props(self):
prov_document = examples.flat_document()
stored_document = self.api.document.create(prov_document,
name="test_document_props")
self.assertEqual(stored_document.views, 0)
self.assertEqual(stored_document.owner, self.api._username)
self.assertTrue(isinstance(stored_document.created_at, datetime.datetime))
self.assertEqual(stored_document.prov, prov_document)
self.assertFalse(stored_document.public)
self.assertEqual(stored_document.name, "test_document_props")
stored_document.delete()
def test_empty_exceptions(self):
with self.assertRaises(EmptyDocumentException):
self.api.document.views
with self.assertRaises(EmptyDocumentException):
self.api.document.created_at
with self.assertRaises(EmptyDocumentException):
self.api.document.owner
with self.assertRaises(EmptyDocumentException):
self.api.document.prov
with self.assertRaises(EmptyDocumentException):
self.api.document.public
with self.assertRaises(EmptyDocumentException):
self.api.document.name
def test_abstract_exceptions(self):
prov_document = examples.flat_document()
abstract_document = self.api.document
with self.assertRaises(AbstractDocumentException):
abstract_document.bundles
self.assertRaises(AbstractDocumentException, abstract_document.delete)
with self.assertRaises(AbstractDocumentException):
abstract_document.add_bundle(prov_document, 'ex:bundle')
self.assertRaises(AbstractDocumentException, abstract_document.read_meta)
self.assertRaises(AbstractDocumentException, abstract_document.read_prov)
def test_immutable_exceptions(self):
prov_document = examples.flat_document()
stored_document = self.api.document.create(prov_document, name="test_immutable_exceptions")
self.assertRaises(ImmutableDocumentException, stored_document.create, (stored_document,))
self.assertRaises(ImmutableDocumentException, stored_document.set, (1,))
self.assertRaises(ImmutableDocumentException, stored_document.get, (1,))
self.assertRaises(ImmutableDocumentException, stored_document.read_prov, (1,))
self.assertRaises(ImmutableDocumentException, stored_document.read_meta, (1,))
self.assertRaises(ImmutableDocumentException, stored_document.read, (1,))
stored_document.delete()
def test_equality(self):
prov_document = examples.flat_document()
stored_document = self.api.document.create(prov_document, name="test_equality")
self.assertFalse(stored_document == "document")
stored_document.delete()
def test_invalid_name(self):
prov_document = examples.flat_document()
with self.assertRaises(InvalidDataException):
self.api.document.create(prov_document, name="")
class ProvStoreConfigAPITests(unittest.TestCase):
def test_invalid_credentials(self):
with self.assertRaises(InvalidCredentialsException):
api = Api(username="millar", api_key="bad")
api.document.get(148)
def test_public_access(self):
api = Api()
stored_document = api.document.get(148)
self.assertEqual(stored_document.id, 148)
|
|
import os
from bcbio.rnaseq import (featureCounts, cufflinks, oncofuse, count, dexseq,
express, variation, gtf, stringtie)
from bcbio.ngsalign import bwa, bowtie2, alignprep
import bcbio.pipeline.datadict as dd
from bcbio.utils import filter_missing
from bcbio.log import logger
def rnaseq_variant_calling(samples, run_parallel):
"""
run RNA-seq variant calling using GATK
"""
samples = run_parallel("run_rnaseq_variant_calling", samples)
samples = run_parallel("run_rnaseq_joint_genotyping", [samples])
return samples
def run_rnaseq_variant_calling(data):
variantcaller = dd.get_variantcaller(data)
if variantcaller and "gatk" in variantcaller:
data = variation.rnaseq_gatk_variant_calling(data)
return [[data]]
def run_rnaseq_joint_genotyping(*samples):
data = samples[0][0]
variantcaller = dd.get_variantcaller(data)
ref_file = dd.get_ref_file(data)
out_file = os.path.join(dd.get_work_dir(data, "."), "variation", "combined.vcf")
if variantcaller and "gatk" in variantcaller:
vrn_files = [dd.get_vrn_file(d) for d in dd.sample_data_iterator(samples)]
out_file = variation.gatk_joint_calling(data, vrn_files, ref_file, out_file)
updated_samples = []
for data in dd.sample_data_iterator(samples):
data = dd.set_square_vcf(data, out_file)
updated_samples.append([data])
return updated_samples
return samples
def quantitate_expression_parallel(samples, run_parallel):
"""
quantitate expression, all programs run here should be multithreaded to
take advantage of the threaded run_parallel environment
"""
samples = run_parallel("generate_transcript_counts", samples)
samples = run_parallel("run_cufflinks", samples)
#samples = run_parallel("run_stringtie_expression", samples)
return samples
def quantitate_expression_noparallel(samples, run_parallel):
"""
run transcript quantitation for algorithms that don't run in parallel
"""
samples = run_parallel("run_express", samples)
samples = run_parallel("run_dexseq", samples)
return samples
def generate_transcript_counts(data):
"""Generate counts per transcript and per exon from an alignment"""
data["count_file"] = featureCounts.count(data)
if dd.get_fusion_mode(data, False):
oncofuse_file = oncofuse.run(data)
if oncofuse_file:
data = dd.set_oncofuse_file(data, oncofuse_file)
if dd.get_transcriptome_align(data) and not dd.get_transcriptome_bam(data):
file1, file2 = None, None
if dd.get_disambiguate(data):
bam_path = data["work_bam"]
fastq_paths = alignprep._bgzip_from_bam(bam_path, data["dirs"], data["config"], is_retry=False, output_infix='-transcriptome')
if len(fastq_paths) == 2:
file1, file2 = fastq_paths
else:
file1, file2 = fastq_paths[0], None
else:
file1, file2 = dd.get_input_sequence_files(data)
ref_file = dd.get_ref_file(data)
logger.info("Transcriptome alignment was flagged to run, but the "
"transcriptome BAM file was not found. Aligning to the "
"transcriptome with bowtie2.")
data = bowtie2.align_transcriptome(file1, file2, ref_file, data)
return [[data]]
def run_stringtie_expression(data):
"""Calculate transcript and gene level FPKM with Stringtie"""
data = stringtie.run_stringtie_expression(data)
return [[data]]
def run_dexseq(data):
"""Quantitate exon-level counts with DEXSeq"""
if dd.get_dexseq_gff(data, None):
data = dexseq.bcbio_run(data)
return [[data]]
def run_express(data):
"""Quantitative isoform expression by eXpress"""
data = express.run(data)
return [[data]]
def combine_express(samples, combined):
"""Combine tpm, effective counts and fpkm from express results"""
to_combine = [dd.get_express_counts(x) for x in
dd.sample_data_iterator(samples) if dd.get_express_counts(x)]
gtf_file = dd.get_gtf_file(samples[0][0])
isoform_to_gene_file = os.path.join(os.path.dirname(combined), "isoform_to_gene.txt")
isoform_to_gene_file = express.isoform_to_gene_name(gtf_file, isoform_to_gene_file)
if len(to_combine) > 0:
eff_counts_combined_file = os.path.splitext(combined)[0] + ".isoform.express_counts"
eff_counts_combined = count.combine_count_files(to_combine, eff_counts_combined_file)
to_combine = [dd.get_express_tpm(x) for x in
dd.sample_data_iterator(samples) if dd.get_express_tpm(x)]
tpm_counts_combined_file = os.path.splitext(combined)[0] + ".isoform.express_tpm"
tpm_counts_combined = count.combine_count_files(to_combine, tpm_counts_combined_file)
to_combine = [dd.get_express_fpkm(x) for x in dd.sample_data_iterator(samples)
if dd.get_express_fpkm(x)]
fpkm_counts_combined_file = os.path.splitext(combined)[0] + ".isoform.express_fpkm"
fpkm_counts_combined = count.combine_count_files(to_combine, fpkm_counts_combined_file)
return {'counts': eff_counts_combined, 'tpm': tpm_counts_combined,
'fpkm': fpkm_counts_combined, 'isoform_to_gene': isoform_to_gene_file}
return {}
def run_cufflinks(data):
"""Quantitate transcript expression with Cufflinks"""
work_bam = dd.get_work_bam(data)
ref_file = dd.get_sam_ref(data)
out_dir, fpkm_file, fpkm_isoform_file = cufflinks.run(work_bam, ref_file, data)
data = dd.set_cufflinks_dir(data, out_dir)
data = dd.set_fpkm(data, fpkm_file)
data = dd.set_fpkm_isoform(data, fpkm_isoform_file)
return [[data]]
def cufflinks_assemble(data):
bam_file = dd.get_work_bam(data)
ref_file = dd.get_sam_ref(data)
out_dir = os.path.join(dd.get_work_dir(data), "assembly")
num_cores = dd.get_num_cores(data)
assembled_gtf = cufflinks.assemble(bam_file, ref_file, num_cores, out_dir, data)
data = dd.set_assembled_gtf(data, assembled_gtf)
return [[data]]
def cufflinks_merge(*samples):
to_merge = filter_missing([dd.get_assembled_gtf(data) for data in
dd.sample_data_iterator(samples)])
data = samples[0][0]
bam_file = dd.get_work_bam(data)
ref_file = dd.get_sam_ref(data)
gtf_file = dd.get_gtf_file(data)
out_dir = os.path.join(dd.get_work_dir(data), "assembly")
num_cores = dd.get_num_cores(data)
merged_gtf = cufflinks.merge(to_merge, ref_file, gtf_file, num_cores, samples[0][0])
updated_samples = []
for data in dd.sample_data_iterator(samples):
data = dd.set_assembled_gtf(data, merged_gtf)
updated_samples.append([data])
return updated_samples
def assemble_transcripts(run_parallel, samples):
"""
assembly strategy rationale implemented as suggested in
http://www.nature.com/nprot/journal/v7/n3/full/nprot.2012.016.html
run Cufflinks in without a reference GTF for each individual sample
merge the assemblies with Cuffmerge using a reference GTF
"""
if dd.get_assemble_transcripts(samples[0][0]):
samples = run_parallel("cufflinks_assemble", samples)
samples = run_parallel("cufflinks_merge", [samples])
return samples
def combine_files(samples):
"""
after quantitation, combine the counts/FPKM/TPM/etc into a single table with
all samples
"""
gtf_file = dd.get_gtf_file(samples[0][0], None)
# combine featureCount files
count_files = filter_missing([dd.get_count_file(x[0]) for x in samples])
combined = count.combine_count_files(count_files)
annotated = count.annotate_combined_count_file(combined, gtf_file)
# combine eXpress files
express_counts_combined = combine_express(samples, combined)
# combine Cufflinks files
fpkm_combined_file = os.path.splitext(combined)[0] + ".fpkm"
fpkm_files = filter_missing([dd.get_fpkm(x[0]) for x in samples])
fpkm_combined = count.combine_count_files(fpkm_files, fpkm_combined_file)
fpkm_isoform_combined_file = os.path.splitext(combined)[0] + ".isoform.fpkm"
isoform_files = filter_missing([dd.get_fpkm_isoform(x[0]) for x in samples])
fpkm_isoform_combined = count.combine_count_files(isoform_files,
fpkm_isoform_combined_file,
".isoform.fpkm")
# combine DEXseq files
dexseq_combined_file = os.path.splitext(combined)[0] + ".dexseq"
to_combine_dexseq = filter_missing([dd.get_dexseq_counts(data[0]) for data in samples])
if to_combine_dexseq:
dexseq_combined = count.combine_count_files(to_combine_dexseq,
dexseq_combined_file, ".dexseq")
else:
dexseq_combined = None
updated_samples = []
for data in dd.sample_data_iterator(samples):
data = dd.set_combined_counts(data, combined)
if annotated:
data = dd.set_annotated_combined_counts(data, annotated)
if fpkm_combined:
data = dd.set_combined_fpkm(data, fpkm_combined)
if fpkm_isoform_combined:
data = dd.set_combined_fpkm_isoform(data, fpkm_isoform_combined)
if express_counts_combined:
data = dd.set_express_counts(data, express_counts_combined['counts'])
data = dd.set_express_tpm(data, express_counts_combined['tpm'])
data = dd.set_express_fpkm(data, express_counts_combined['fpkm'])
data = dd.set_isoform_to_gene(data, express_counts_combined['isoform_to_gene'])
if dexseq_combined:
data = dd.set_dexseq_counts(data, dexseq_combined_file)
updated_samples.append([data])
return updated_samples
|
|
import os
import wx
import wx.lib.newevent
from sas.sascalc.dataloader.readers.cansas_reader import Reader
from sas.sasgui.guiframe.utils import format_number
from sas.sasgui.guiframe.events import EVT_SLICER_PARS, EVT_SLICER
from sas.sasgui.guiframe.events import SlicerParameterEvent, StatusEvent
from Plotter2D import ModelPanel2D
apply_params, EVT_APPLY_PARAMS = wx.lib.newevent.NewEvent()
save_files, EVT_AUTO_SAVE = wx.lib.newevent.NewEvent()
FIT_OPTIONS = ["No fitting", "Fitting", "Batch Fitting"]
CONVERT_KEYS = ["SectorInteractor", "AnnulusInteractor", "BoxInteractorX",
"BoxInteractorY"]
CONVERT_DICT = {"SectorInteractor": "SectorQ",
"AnnulusInteractor": "AnnulusPhi",
"BoxInteractorX": "SlabX",
"BoxInteractorY": "SlabY"}
BINNING_OPTIONS = {"Linear" : 0,
"Logarithmic" : 10,}
class SlicerParameterPanel(wx.Dialog):
"""
Panel for dynamically changing slicer parameters and apply the same slicer
to multiple 2D plot panels
"""
def __init__(self, parent, *args, **kwargs):
"""
Dialog window that allow to edit parameters slicer
by entering new values
"""
wx.Dialog.__init__(self, parent, *args, **kwargs)
self.params = {}
self.iter = 0
self.parent = parent
self.main_window = parent.parent
self.data_panel = self.main_window._data_panel
self.type = None
self.listeners = []
self.parameters = []
self.bck = wx.GridBagSizer(5, 5)
self.SetSizer(self.bck)
self.auto_save = None
self.path = None
self.fitting_options = None
self.bin_ctl = None
self.type_list = []
self.loaded_data = []
self.always_on = None
self.type_select = None
self.append_name = None
self.data_list = None
self.default_value = ""
self.batch_slicer_button = None
label = "Right-click on 2D plot for slicer options"
title = wx.StaticText(self, -1, label, style=wx.ALIGN_LEFT)
self.bck.Add(title, (0, 0), (1, 2),
flag=wx.LEFT | wx.ALIGN_CENTER_VERTICAL, border=15)
# Bindings
self.parent.Bind(EVT_SLICER, self.on_evt_slicer)
self.Bind(EVT_SLICER_PARS, self.on_param_change)
self.Bind(EVT_APPLY_PARAMS, self.apply_params_list_and_process)
self.Bind(EVT_AUTO_SAVE, self.save_files)
def on_evt_slicer(self, event):
"""
Process EVT_SLICER events
When the slicer changes, update the panel
:param event: EVT_SLICER event
"""
event.Skip()
if event.obj_class is None:
self.set_slicer(None, None)
else:
self.set_slicer(event.type, event.params)
def set_slicer(self, type, params):
"""
Rebuild the panel
"""
self.bck.Clear(True)
self.bck.Add((5, 5), (0, 0), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 5)
self.type = type
if type is None:
label = "Right-click on 2D plot for slicer options"
title = wx.StaticText(self, -1, label, style=wx.ALIGN_LEFT)
self.bck.Add(title, (1, 0), (1, 2),
flag=wx.LEFT | wx.ALIGN_CENTER_VERTICAL, border=15)
else:
title = wx.StaticText(self, -1,
"Slicer Parameters:", style=wx.ALIGN_LEFT)
self.bck.Add(title, (1, 0), (1, 2),
flag=wx.LEFT | wx.ALIGN_CENTER_VERTICAL, border=15)
iy = 1
self.parameters = []
keys = params.keys()
keys.sort()
for item in keys:
ix = 0
iy += 1
if item not in ["count", "errors", "binning base"]:
text = wx.StaticText(self, -1, item, style=wx.ALIGN_LEFT)
self.bck.Add(text, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
ctl = wx.TextCtrl(self, -1, size=(80, 20),
style=wx.TE_PROCESS_ENTER)
hint_msg = "Modify the value of %s to change" % item
hint_msg += " the 2D slicer"
ctl.SetToolTipString(hint_msg)
ix = 1
ctl.SetValue(format_number(str(params[item])))
self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter)
self.parameters.append([item, ctl])
self.bck.Add(ctl, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
ix = 3
self.bck.Add((20, 20), (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
elif item == 'binning base':
text = wx.StaticText(self, -1, item, style=wx.ALIGN_LEFT)
self.bck.Add(text, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
options = BINNING_OPTIONS.keys()
self.bin_ctl = wx.ComboBox(parent=self, choices=options)
hint_msg = "Modify the value of %s to change" % item
hint_msg += " the 2D slicer"
self.bin_ctl.SetToolTipString(hint_msg)
ix = 1
result = ""
value = 0
for name, value in BINNING_OPTIONS.items():
if value == params[item]:
result = name
break
index = self.bin_ctl.FindString(result)
self.bin_ctl.SetSelection(index)
self.parameters.append([item, self.bin_ctl])
self.Bind(wx.EVT_COMBOBOX, self.on_text_enter)
self.bck.Add(self.bin_ctl, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
ix = 3
self.bck.Add((20, 20), (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
else:
text = wx.StaticText(self, -1, item + " : ",
style=wx.ALIGN_LEFT)
self.bck.Add(text, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
ctl = wx.StaticText(self, -1,
format_number(str(params[item])),
style=wx.ALIGN_LEFT)
ix = 1
self.bck.Add(ctl, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
# Change slicer within the window
ix = 0
iy += 1
txt = "Slicer type"
text = wx.StaticText(self, -1, txt, style=wx.ALIGN_LEFT)
self.bck.Add(text, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
self.type_list = CONVERT_KEYS
self.type_select = wx.ComboBox(parent=self, choices=self.type_list)
self.type_select.Bind(wx.EVT_COMBOBOX, self.on_change_slicer)
index = self.type_select.FindString(type)
self.type_select.SetSelection(index)
self.bck.Add(self.type_select, (iy, 1), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
# batch slicing parameters
title_text = "Batch Slicing Options:"
title = wx.StaticText(self, -1, title_text, style=wx.ALIGN_LEFT)
iy += 1
line = wx.StaticLine(self, -1, style=wx.LI_VERTICAL)
line.SetSize((60, 60))
self.bck.Add(line, (iy, ix), (1, 2),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
iy += 1
self.bck.Add(title, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
# Create a list box with all of the 2D plots
iy += 1
self.process_list()
self.bck.Add(self.data_list, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
# Checkbox to enable saving and fitting options
iy += 1
self.auto_save = wx.CheckBox(parent=self, id=wx.NewId(),
label="Auto save generated 1D:")
self.Bind(wx.EVT_CHECKBOX, self.on_auto_save_checked)
self.bck.Add(self.auto_save, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
iy += 1
# File browser
save_to = "Save files to:"
save = wx.StaticText(self, -1, save_to, style=wx.ALIGN_LEFT)
path = os.getcwd()
self.path = wx.DirPickerCtrl(self, id=wx.NewId(), path=path,
message=save_to)
self.path.Enable(False)
self.bck.Add(save, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
self.bck.Add(self.path, (iy, 1), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
# Append to file
iy += 1
self.update_file_append(params)
append_text = "Append to file name:"
append = wx.StaticText(self, -1, append_text, style=wx.ALIGN_LEFT)
self.append_name = wx.TextCtrl(parent=self, id=wx.NewId(),
name="Append to file name:")
append_tool_tip = "Files will be saved as <SlicerType><FileName>"
append_tool_tip += "<AppendToText>.txt"
self.append_name.SetToolTipString(append_tool_tip)
self.append_name.SetValue(self.default_value)
self.append_name.Enable(False)
self.bck.Add(append, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
self.bck.Add(self.append_name, (iy, 1), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
# Combobox for selecting fitting options
iy += 1
fit_text = "Fitting Options:"
fit_text_item = wx.StaticText(self, -1, fit_text,
style=wx.ALIGN_LEFT)
self.bck.Add(fit_text_item, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
self.fitting_options = wx.ComboBox(parent=self, choices=FIT_OPTIONS)
self.fitting_options.SetSelection(0)
self.bck.Add(self.fitting_options, (iy, 1), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
self.fitting_options.Enable(False)
self.fitting_options.Bind(wx.EVT_COMBOBOX, None)
# Button to start batch slicing
iy += 1
button_label = "Apply Slicer to Selected Plots"
self.batch_slicer_button = wx.Button(parent=self,
label=button_label)
self.Bind(wx.EVT_BUTTON, self.on_batch_slicer)
self.bck.Add(self.batch_slicer_button, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
iy += 1
self.bck.Add((5, 5), (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 5)
self.bck.Layout()
self.bck.Fit(self)
self.parent.GetSizer().Layout()
def on_param_change(self, evt):
"""
receive an event end reset value text fields
inside self.parameters
"""
evt.Skip()
if evt.type == "UPDATE":
for item in self.parameters:
if item[0] in evt.params:
item[1].SetValue("%-5.3g" % evt.params[item[0]])
item[1].Refresh()
def on_text_enter(self, evt):
"""
Parameters have changed
"""
params = {}
has_error = False
for item in self.parameters:
try:
if item[0] == "binning base":
title = self.bin_ctl.GetValue()
params["binning base"] = BINNING_OPTIONS.get(title)
continue
params[item[0]] = float(item[1].GetValue())
item[1].SetBackgroundColour(
wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW))
item[1].Refresh()
except:
has_error = True
item[1].SetBackgroundColour("pink")
item[1].Refresh()
if not has_error:
# Post parameter event
# parent here is plotter2D
self.update_file_append(params)
self.append_name.SetValue(self.default_value)
self.append_name.Refresh()
event = SlicerParameterEvent(type=self.type, params=params)
wx.PostEvent(self.parent, event)
def on_batch_slicer(self, evt=None):
"""
Event triggered when batch slicing button is pressed
:param evt: Event triggering the batch slicing
"""
apply_to_list = []
spp = self.parent.parent
params = self.parent.slicer.get_params()
slicer_type = self.type_select.GetStringSelection()
save = self.auto_save.IsChecked()
append = self.append_name.GetValue()
path = self.path.GetPath()
fit = self.fitting_options.GetValue()
# Find desired 2D data panels
for key, mgr in spp.plot_panels.iteritems():
if mgr.graph.prop['title'] in self.data_list.CheckedStrings:
apply_to_list.append(mgr)
# Apply slicer type to selected panels
for item in apply_to_list:
self._apply_slicer_to_plot(item, slicer_type)
# Post an event to apply appropriate slicer params to each slicer
# Pass all variables, including class variables
event_params = apply_params(params=params, apply_to_list=apply_to_list,
auto_save=save, append=append, fit=fit,
path=path, type=slicer_type)
wx.PostEvent(self, event_params)
def on_change_slicer(self, evt):
"""
Event driven slicer change when self.type_select changes
:param evt: Event triggering this change
"""
self._apply_slicer_to_plot(self.parent)
def _apply_slicer_to_plot(self, plot, slicer_type=None):
"""
Apply a slicer to *any* plot window, not just parent window
:param plot: 2D plot panel to apply a slicer to
:param slicer_type: The type of slicer to apply to the panel
"""
# Skip redrawing the current plot if no change in slicer type
if self.parent == plot and self.type == slicer_type:
return
# Do not draw a slicer on a 1D plot
if not isinstance(plot, ModelPanel2D):
return
if slicer_type is None:
slicer_type = self.type_select.GetStringSelection()
if slicer_type == self.type_list[0]:
plot.onSectorQ(None)
elif slicer_type == self.type_list[1]:
plot.onSectorPhi(None)
elif slicer_type == self.type_list[2]:
plot.onBoxavgX(None)
elif slicer_type == self.type_list[3]:
plot.onBoxavgY(None)
def process_list(self):
"""
Populate the check list from the currently plotted 2D data
"""
# Reinitialize loaded data list on redraw
self.loaded_data = []
# Iterate over the loaded plots and find all 2D panels
for key, value in self.main_window.plot_panels.iteritems():
if isinstance(value, ModelPanel2D):
self.loaded_data.append(value.data2D.name)
if value.data2D.id == self.parent.data2D.id:
# Set current plot panel as uncheckable
self.always_on = self.loaded_data.index(value.data2D.name)
self.data_list = wx.CheckListBox(parent=self, id=wx.NewId(),
choices=self.loaded_data,
name="Apply Slicer to 2D Plots:")
# Check all items by default
for item in range(len(self.data_list.Items)):
self.data_list.Check(item)
self.data_list.Bind(wx.EVT_CHECKLISTBOX, self.on_check_box_list)
def on_check_box_list(self, evt=None):
"""
Prevent a checkbox item from being unchecked
:param evt: Event triggered when a checkbox list item is checked
"""
if evt is None:
return
index = evt.GetSelection()
if index == self.always_on:
self.data_list.Check(index)
def apply_params_list_and_process(self, evt=None):
"""
Event based parameter setting.
:param evt: Event triggered to apply parameters to a list of plots
evt should have attrs plot_list and params
"""
if evt is None:
return
# Apply parameter list to each plot as desired
for item in evt.apply_to_list:
event = SlicerParameterEvent(type=evt.type, params=evt.params)
wx.PostEvent(item, event)
# Post an event to save each data set to file
if evt.auto_save:
event = save_files(append_to_name=evt.append, path=evt.path,
type=evt.type, file_list=evt.apply_to_list,
fit=evt.fit)
wx.PostEvent(self, event)
def save_files(self, evt=None):
"""
Automatically save the sliced data to file.
:param evt: Event that triggered the call to the method
"""
# Events triggered after this event pass other events to wx that are
# necessary before this event is called. If this is the first time
# reaching this event, send it to the end of the wx event queue
if self.iter < 2:
clone = evt.Clone()
wx.PostEvent(self, clone)
self.iter += 1
return
if evt is None:
return
# Start definitions
writer = Reader()
data_dic = {}
append = evt.append_to_name
names = []
f_name_list = []
f_path_list = []
# Get list of 2D data names for saving
for f_name in evt.file_list:
names.append(f_name.data2D.label)
# Find the correct plots to save
for key, plot in self.main_window.plot_panels.iteritems():
if not hasattr(plot, "data2D"):
for item in plot.plots:
base = item.replace(CONVERT_DICT[evt.type], "")
if base in names:
data_dic[item] = plot.plots[item]
# Save files as Text
for item, data1d in data_dic.iteritems():
base = '.'.join(item.split('.')[:-1])
file_name = base + append + ".txt"
save_to = evt.path + "\\" + file_name
writer.write(save_to, data1d)
f_path_list.append(save_to)
f_name_list.append(file_name)
# Load files into GUI
for item in f_path_list:
self.main_window.load_data(item)
# Send to fitting
self.send_to_fitting(evt.fit, f_name_list)
def send_to_fitting(self, fit=FIT_OPTIONS[0], file_list=None):
"""
Send a list of data to the fitting perspective
:param fit: fit type desired
:param file_list: list of loaded file names to send to fit
"""
if fit in FIT_OPTIONS and fit != FIT_OPTIONS[0] and \
file_list is not None:
# Set perspective to fitting
int = self.data_panel.perspective_cbox.FindString("Fitting")
self.data_panel.perspective_cbox.SetSelection(int)
self.data_panel._on_perspective_selection(None)
# Unselect all loaded data
self.data_panel.selection_cbox.SetValue('Unselect all Data')
self.data_panel._on_selection_type(None)
# Click each sliced data file
for f_name in file_list:
num = len(f_name)
data_list = self.data_panel.list_cb_data
for key in data_list:
loaded_key = (key[:num]) if len(key) > num else key
if loaded_key == f_name:
selection = key
data_ctrl = data_list[selection][0]
self.check_item_and_children(data_ctrl=data_ctrl,
check_value=True)
# Switch to batch mode if selected
if fit == FIT_OPTIONS[2]:
self.data_panel.rb_single_mode.SetValue(False)
self.data_panel.rb_batch_mode.SetValue(True)
self.data_panel.on_batch_mode(None)
else:
self.data_panel.rb_single_mode.SetValue(True)
self.data_panel.rb_batch_mode.SetValue(False)
self.data_panel.on_single_mode(None)
# Post button click event to send data to fitting
evt = wx.PyCommandEvent(wx.EVT_BUTTON.typeId,
self.data_panel.bt_import.GetId())
wx.PostEvent(self.data_panel, evt)
def on_auto_save_checked(self, evt=None):
"""
Enable/Disable auto append when checkbox is checked
:param evt: Event
"""
self.append_name.Enable(self.auto_save.IsChecked())
self.path.Enable(self.auto_save.IsChecked())
self.fitting_options.Enable(self.auto_save.IsChecked())
def check_item_and_children(self, data_ctrl, check_value=True):
self.data_panel.tree_ctrl.CheckItem(data_ctrl, check_value)
if data_ctrl.HasChildren():
if check_value and not data_ctrl.IsExpanded():
return
for child_ctrl in data_ctrl.GetChildren():
self.data_panel.CheckItem(child_ctrl, check_value)
def update_file_append(self, params=None):
"""
Update default_value when any parameters are changed
:param params: dictionary of parameters
"""
self.default_value = ""
if params is None:
params = self.params
for key in params:
self.default_value += "_{0}".format(key).split(" [")[0]
self.default_value += "-{:.2f}".format(params[key])
|
|
"""
QUTest Testing Module version 1.0
(c) James Wu 2013
QUTest is a simple unit testing framework for python.
See readme for details.
"""
import time, numpy, math, gc, string, random
from scipy.stats import linregress
import itertools
from inspect import getargspec
#used for when the output is unknown
class Unknown(object):
pass
"""Runs a single test"""
class Test(object):
name = ""
#expected inputs and outputs
inputs = []
output = None
#the function itself
fn = None
#do we expect an assertion error?
error = False
def __init__(self, fn, inputs = [], output = Unknown, error = False, name = ""):
"""
fn: function in question
inputs: required inputs for the function
output: expected output
error: whether or not this function should trigger an error
name: name of function
"""
self.name = name
if not self.name:
self.name = fn.__name__
self.inputs = inputs
self.error = error
if error:
self.output = "Give assertion error"
else:
self.output = output
self.fn = fn
def __repr__(self):
return self.name
def time(self):
start = time.clock()
result = self.run()
elapsed = time.clock() - start
return (result, elapsed)
def onlytime(self):
start = time.clock()
try:
self.fn(*self.inputs)
except Exception as e:
print "Exception!"
return False
elapsed = time.clock() - start
return elapsed
def avgtime(self, tries):
"""
Gives the average time elapsed for a certain number of Runs
"""
total = 0
for i in xrange(tries):
elapsed = self.onlytime()
total += elapsed
return total / tries
def run(self):
"""
Returns a triplet, of type
(bool, int, Object)
True means passed, False means failed.
Error codes:
0: Test Passed
1: Unexpected AssertionError
2: Expected AssertionError, code ran to completion
3: Incorrect Output
4: Unknown output, prints out
"""
try:
testOutput = self.fn(*self.inputs)
except Exception as e:
if self.error:
return (True, 0, None)
return (False, 1 ,e)
if self.error:
return (False, 2, testOutput)
if self.output is Unknown:
return (True, 4, testOutput)
if testOutput == self.output:
return (True, 0, None)
else:
return (False, 3, testOutput)
def largeIntTests(fn, maximum = 10000, factor = 2, start = 1):
"""
Creates a suite of large integer input tests.
Maximum: Maximum size input
factor: Ratio of input sizes. If set to 1, inputs will increment by 1 each time
"""
i = start
suite = Suite("{0} Large Integer tests".format(fn.__name__))
while i <= maximum:
suite.addTest(Test(fn, [i], name = "{0}({1})".format(fn.__name__, i)))
if(factor == 1):
i += 1
else:
i *= factor
return suite
##from stackoverflow
def keywithmaxval(d):
""" a) create a list of the dict's keys and values;
b) return the key with the max value"""
v=list(d.values())
k=list(d.keys())
return k[v.index(max(v))]
def getTimeData(fn, maximum, factor):
"""
times a function on various size inputs and returns timing data
"""
timeSuite = largeIntTests(fn, maximum, factor)
data = []
#run average time elapsed on each test
print timeSuite.tests
for test in timeSuite.tests:
assert len(test.inputs) == 1
inputSize = test.inputs[0]
#number of times we average it by
testTime = test.avgtime(100) * 10000
data += [(inputSize, testTime)]
return data
def calcTimeComplexity(fn, maximum = 10000, factor = 2):
"""
Generates a list of largeIntTests, and uses them to figure out time complexity by timing
Currently only works on functions with one argument, which is an int or float
NOT EXACT: Only estimates based on linear, quadratic, cubic, exponential and logarithmic regression data
"""
fn(maximum)
data = getTimeData(fn, maximum, factor)
print data[0]
rvals = {}
#linear?
#print data
slope, _, lin_rval, _, _ = linregress(data)
rvals['n'] = lin_rval**2
#quadratic?
sq_data = [(x, math.sqrt(y)) for (x,y) in data]
_, _, sq_rval, _, _ = linregress(sq_data)
rvals['n^2'] = sq_rval**2
del sq_data[:]
#nlogn?
nlgn_data = [(x* math.log(x,2),y) for (x,y) in data]
_, _, nlgn_rval, _, _ = linregress(nlgn_data)
rvals['nlg(n)'] = nlgn_rval**2
del nlgn_data[:]
#exp?
exp_data = [(x, math.log(y, 2)) for (x,y) in data]
_, _, exp_rval, _, _ = linregress(exp_data)
rvals['2^n'] = exp_rval**2
del exp_data[:]
#log?
log_data = [(math.log(x, 2), y) for (x,y) in data]
_, _, log_rval, _, _ = linregress(log_data)
rvals['lg(n)'] = log_rval**2
return "O(" + keywithmaxval(rvals) + ")"
def randString():
"""
Generates a random string of random length
"""
return ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in range(random.randint(0,100)))
def genTests(fn, sampleInput, numTests = 20, reference = None):
"""
Generates automatic test cases for a function, with a sample input of the function
Sample input: lists of the input must be nonempty. otherwise, the function can't tell what type of list it is!
genTests(foo, [int], 20, None)
Reference is the reference solution- if one exists, in function form
"""
argNames = getargspec(fn).args
argTypes = []
for argument in sampleInput:
if type(argument) == list or type(argument) == tuple:
argTypes.append((type(argument), type(argument[0])))
else:
argTypes.append(type(argument))
#they must be of equal length for this to work
assert len(argNames) == len(argTypes)
argDict = dict(zip(argNames, argTypes))
possibleArgs = {}
for arg in argDict:
possibleArgs[arg] = [sampleInput[argNames.index(arg)]]
#if the argument is an integer, we test zeroes, large inputs
if argDict[arg] == int:
possibleArgs[arg].append(0)
#test random integers
for i in xrange(10):
possibleArgs[arg].append(random.randint(0,100))
elif argDict[arg] == str:
#test empty string
possibleArgs[arg].append("")
#test random strings of random lengths
for i in xrange(10):
#random string generation
possibleArgs[arg].append(randString())
elif argDict[arg] == (list, int):
#test empty list
possibleArgs[arg].append([])
for i in xrange(10):
possibleArgs[arg].append([random.randint(0,100) for t in xrange(random.randint(1,100))])
elif argDict[arg] == (list, str):
possibleArgs[arg].append([])
for i in xrange(10):
#random string generation put into lists
possibleArgs[arg].append([randString() for y in xrange(random.randint(0,100))])
elif argDict[arg] == (list, float):
possibleArgs[arg].append([])
for i in xrange(10):
possibleArgs[arg].append([random.uniform(0, 100) for x in xrange(random.randint(1,100))])
#now we have a dictionary of possible inputs, we choose random possible inputs for each test
resultSuite = Suite(fn.__name__)
for i in xrange(numTests):
listofArgs = []
for arg in argDict:
listofArgs.append(random.choice(possibleArgs[arg]))
if reference:
newTest = Test(fn, listofArgs, output = reference(*listofArgs),
error = False, name = "Generated test on {0}".format(fn.__name__))
else:
newTest = Test(fn, listofArgs, Unknown, error = False,
name = "Generated test on {0}".format(fn.__name__))
resultSuite.addTest(newTest)
return resultSuite
class Suite(object):
"""
A suite is a group of tests meant to be run together
"""
tests = []
def __init__(self, name = "", tests= []):
self.tests = tests
self.name = name
def __add__(self, other):
return Suite(self.name + " and " + other.name, self.tests + other.tests)
def __repr__(self):
return self.name + str(self.tests)
def testList(self):
return self.tests
def addTests(self, tests):
"adds a list of tests to the suite"
assert type(tests) == list
assert type(tests[0]) == Test
self.tests += tests
def addTest(self, test):
"adds a single test to the Suite"
assert type(test) == Test
self.tests.append(test)
def removeTest(self, test):
self.tests.remove(test)
def timeTests(self):
self.runTests(timed = True)
def runTestsSilent(self):
"""
Runs tests quietly, unless an error occurs
"""
errorCount = 0
passed = 0
for test in self.tests:
result = test.run()
if result[0]:
passed += 1
else:
errorCount += 1
#otherwise there's an error
print "TEST on function", test.fn.__name__, "FAILED"
print "Input:", test.inputs
print "Expected ", test.output
#assertion error
if result[1] == 1:
print "An assertion error occurred unexpectedly:"
print result[2]
#expected an assertion error
elif result[1] == 2:
print "Expected an assertion to fail, but ran to completion"
print "Output: ", result[2]
else:
assert result[1] == 3
print "Incorrect output:", result[2]
print "--------------------------"
if errorCount:
print "Tests failed: ", errorCount
return errorCount
def runTests(self, timed= False):
print "Running unit tests on suite", self.name
print "--------------------------"
errorCount = 0
passed = 0
for test in self.tests:
print "Running", test.name
print "Input: ", test.inputs
if test.output is not Unknown:
print "Expect:", test.output
if timed:
result, elapsed = test.time()
else:
result = test.run()
if result[0]:
passed += 1
if result[1] == 4:
print "Output:", result[2]
print "TEST PASSED"
if timed:
print "Time Elapsed:", elapsed
print "--------------------------"
else:
errorCount += 1
#otherwise there's an error
print "TEST FAILED"
print "Expected ", test.output
#assertion error
if result[1] == 1:
print "An assertion error occurred unexpectedly:"
print result[2]
#expected an assertion error
elif result[1] == 2:
print "Expected an assertion to fail, but ran to completion"
print "Output: ", result[2]
else:
assert result[1] == 3
print "Incorrect output:", result[2]
print "--------------------------"
print "Tests passed: ", passed
print "Tests failed: ", errorCount
return errorCount
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# zernike.py
"""
A module defining the zernike polynomials and associated functions.
Running this file as a script will output a graph of the first 15 zernike
polynomials on the unit disk.
https://en.wikipedia.org/wiki/Zernike_polynomials
http://mathworld.wolfram.com/ZernikePolynomial.html
Copyright (c) 2016, David Hoffman
"""
import numpy as np
from scipy.special import eval_jacobi
from .utils import cart2pol
# forward mapping of Noll indices https://oeis.org/A176988
noll_mapping = np.array(
[
1,
3,
2,
5,
4,
6,
9,
7,
8,
10,
15,
13,
11,
12,
14,
21,
19,
17,
16,
18,
20,
27,
25,
23,
22,
24,
26,
28,
35,
33,
31,
29,
30,
32,
34,
36,
45,
43,
41,
39,
37,
38,
40,
42,
44,
55,
53,
51,
49,
47,
46,
48,
50,
52,
54,
65,
63,
61,
59,
57,
56,
58,
60,
62,
64,
66,
77,
75,
73,
71,
69,
67,
68,
70,
72,
74,
76,
78,
91,
89,
87,
85,
83,
81,
79,
80,
82,
84,
86,
88,
90,
105,
103,
101,
99,
97,
95,
93,
92,
94,
96,
98,
100,
102,
104,
119,
117,
115,
113,
111,
109,
107,
106,
108,
110,
112,
114,
116,
118,
120,
]
)
# reverse mapping of noll indices
noll_inverse = noll_mapping.argsort()
# classical names for the Noll indices
# https://en.wikipedia.org/wiki/Zernike_polynomials
noll2name = {
1: "piston",
2: "tip",
3: "tilt",
4: "defocus",
5: "oblique astigmatism",
6: "vertical astigmatism",
7: "vertical coma",
8: "horizontal coma",
9: "vertical trefoil",
10: "oblique trefoil",
11: "primary spherical",
12: "vertical secondary astigmatism",
13: "oblique secondary astigmatism",
14: "vertical quadrafoil",
15: "oblique quadrafoil",
}
name2noll = {v: k for k, v in noll2name.items()}
def noll2degrees(noll):
"""Convert from Noll's indices to radial degree and azimuthal degree."""
noll = np.asarray(noll)
if not np.issubdtype(noll.dtype, np.signedinteger):
raise ValueError(f"input is not integer, input = {noll}")
if not (noll > 0).all():
raise ValueError(f"Noll indices must be greater than 0, input = {noll}")
# need to subtract 1 from the Noll's indices because they start at 1.
p = noll_inverse[noll - 1]
n = np.ceil((-3 + np.sqrt(9 + 8 * p)) / 2)
m = 2 * p - n * (n + 2)
return n.astype(int), m.astype(int)
def degrees2noll(n, m):
"""Convert from radial and azimuthal degrees to Noll's index."""
n, m = np.asarray(n), np.asarray(m)
# check inputs
if not np.issubdtype(n.dtype, np.signedinteger):
raise ValueError("Radial degree is not integer, input = {n}")
if not np.issubdtype(m.dtype, np.signedinteger):
raise ValueError("Azimuthal degree is not integer, input = {m}")
if ((n - m) % 2).any():
raise ValueError("The difference between radial and azimuthal degree isn't mod 2")
# do the mapping
p = (m + n * (n + 2)) / 2
noll = noll_mapping[p.astype(int)]
return noll
def zernike(r, theta, *args, **kwargs):
"""Calculate the Zernike polynomial on the unit disk for the requested orders.
Parameters
----------
r : ndarray
theta : ndarray
Args
----
Noll : numeric or numeric sequence
Noll's Indices to generate
(n, m) : tuple of numerics or numeric sequences
Radial and azimuthal degrees
n : see above
m : see above
Kwargs
------
norm : bool (default False)
Do you want the output normed?
Returns
-------
zernike : ndarray
The zernike polynomials corresponding to Noll or (n, m) whichever are
provided
Example
-------
>>> x = np.linspace(-1, 1, 512)
>>> xx, yy = np.meshgrid(x, x)
>>> r, theta = cart2pol(yy, xx)
>>> zern = zernike(r, theta, 4) # generates the defocus zernike polynomial
"""
if len(args) == 1:
args = np.asarray(args[0])
if args.ndim < 2:
n, m = noll2degrees(args)
elif args.ndim == 2:
if args.shape[0] == 2:
n, m = args
else:
raise RuntimeError("This shouldn't happen")
else:
raise ValueError(f"{args.shape} is the wrong shape")
elif len(args) == 2:
n, m = np.asarray(args)
if n.ndim > 1:
raise ValueError("Radial degree has the wrong shape")
if m.ndim > 1:
raise ValueError("Azimuthal degree has the wrong shape")
if n.shape != m.shape:
raise ValueError("Radial and Azimuthal degrees have different shapes")
else:
raise ValueError(f"{len(args)} is an invalid number of arguments")
# make sure r and theta are arrays
r = np.asarray(r, dtype=float)
theta = np.asarray(theta, dtype=float)
# make sure that r is always greater than 0
if not (r >= 0).all():
raise ValueError("r must always be greater or equal to 0")
if r.ndim > 2:
raise ValueError("Input rho and theta cannot have more than two dimensions")
# make sure that n and m are iterable
n, m = n.ravel(), m.ravel()
# make sure that n is always greater or equal to m
if not (n >= abs(m)).all():
raise ValueError("n must always be greater or equal to m")
# return column of zernike polynomials
return np.array([_zernike(r, theta, nn, mm, **kwargs) for nn, mm in zip(n, m)]).squeeze()
def _radial_zernike(r, n, m):
"""Radial part of the zernike polynomial.
Formula from http://mathworld.wolfram.com/ZernikePolynomial.html
"""
rad_zern = np.zeros_like(r)
# zernike polynomials are only valid for r <= 1
valid_points = r <= 1.0
if m == 0 and n == 0:
rad_zern[valid_points] = 1
return rad_zern
rprime = r[valid_points]
# for the radial part m is always positive
m = abs(m)
# calculate the coefs
coef1 = (n + m) // 2
coef2 = (n - m) // 2
jacobi = eval_jacobi(coef2, m, 0, 1 - 2 * rprime ** 2)
rad_zern[valid_points] = (-1) ** coef2 * rprime ** m * jacobi
return rad_zern
def _zernike(r, theta, n, m, norm=True):
"""Calculate the full zernike polynomial."""
# remember if m is negative
mneg = m < 0
# going forward m is positive (Radial zernikes are only defined for
# positive m)
m = abs(m)
# if m and n aren't seperated by multiple of two then return zeros
if (n - m) % 2:
return np.zeros_like(r)
zern = _radial_zernike(r, n, m)
if mneg:
# odd zernike
zern *= np.sin(m * theta)
else:
# even zernike
zern *= np.cos(m * theta)
# calculate the normalization factor
if norm:
# https://www.gatinel.com/en/recherche-formation/wavefront-sensing/zernike-polynomials/
if m == 0:
# m is zero
norm = np.sqrt(n + 1)
else:
# m not zero
norm = np.sqrt(2 * (n + 1))
zern *= norm
return zern
if __name__ == "__main__":
from matplotlib import pyplot as plt
# make coordinates
x = np.linspace(-1, 1, 257)
xx, yy = np.meshgrid(x, x) # xy indexing is default
r, theta = cart2pol(yy, xx)
# set up plot
fig, axs = plt.subplots(3, 5, figsize=(20, 12))
# fill out plot
for ax, (k, v) in zip(axs.ravel(), noll2name.items()):
zern = zernike(r, theta, k, norm=False)
ax.imshow(
np.ma.array(zern, mask=r > 1),
vmin=-1,
vmax=1,
cmap="coolwarm",
interpolation="bicubic",
)
ax.set_title(v + r", $Z_{{{}}}^{{{}}}$".format(*noll2degrees(k)))
ax.axis("off")
fig.tight_layout()
plt.show()
|
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from django.core.checks import Error
from django.db import models
from django.test.utils import override_settings
from django.test.testcases import skipIfDBFeature
from .base import IsolatedModelsTestCase
class RelativeFieldTests(IsolatedModelsTestCase):
def test_valid_foreign_key_without_accessor(self):
class Target(models.Model):
# There would be a clash if Model.field installed an accessor.
model = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, related_name='+')
field = Model._meta.get_field('field')
errors = field.check()
self.assertEqual(errors, [])
def test_foreign_key_to_missing_model(self):
# Model names are resolved when a model is being created, so we cannot
# test relative fields in isolation and we need to attach them to a
# model.
class Model(models.Model):
foreign_key = models.ForeignKey('Rel1')
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
("Field defines a relation with model 'Rel1', "
"which is either not installed, or is abstract."),
hint=None,
obj=field,
id='fields.E300',
),
]
self.assertEqual(errors, expected)
def test_many_to_many_to_missing_model(self):
class Model(models.Model):
m2m = models.ManyToManyField("Rel2")
field = Model._meta.get_field('m2m')
errors = field.check(from_model=Model)
expected = [
Error(
("Field defines a relation with model 'Rel2', "
"which is either not installed, or is abstract."),
hint=None,
obj=field,
id='fields.E300',
),
]
self.assertEqual(errors, expected)
def test_ambiguous_relationship_model(self):
class Person(models.Model):
pass
class Group(models.Model):
field = models.ManyToManyField('Person',
through="AmbiguousRelationship", related_name='tertiary')
class AmbiguousRelationship(models.Model):
# Too much foreign keys to Person.
first_person = models.ForeignKey(Person, related_name="first")
second_person = models.ForeignKey(Person, related_name="second")
second_model = models.ForeignKey(Group)
field = Group._meta.get_field('field')
errors = field.check(from_model=Group)
expected = [
Error(
("The model is used as an intermediate model by "
"'invalid_models_tests.Group.field', but it has more than one "
"foreign key to 'Person', which is ambiguous. You must specify "
"which foreign key Django should use via the through_fields "
"keyword argument."),
hint=('If you want to create a recursive relationship, use '
'ForeignKey("self", symmetrical=False, '
'through="AmbiguousRelationship").'),
obj=field,
id='fields.E335',
),
]
self.assertEqual(errors, expected)
def test_relationship_model_with_foreign_key_to_wrong_model(self):
class WrongModel(models.Model):
pass
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person',
through="InvalidRelationship")
class InvalidRelationship(models.Model):
person = models.ForeignKey(Person)
wrong_foreign_key = models.ForeignKey(WrongModel)
# The last foreign key should point to Group model.
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
("The model is used as an intermediate model by "
"'invalid_models_tests.Group.members', but it does not "
"have a foreign key to 'Group' or 'Person'."),
hint=None,
obj=InvalidRelationship,
id='fields.E336',
),
]
self.assertEqual(errors, expected)
def test_relationship_model_missing_foreign_key(self):
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person',
through="InvalidRelationship")
class InvalidRelationship(models.Model):
group = models.ForeignKey(Group)
# No foreign key to Person
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
("The model is used as an intermediate model by "
"'invalid_models_tests.Group.members', but it does not have "
"a foreign key to 'Group' or 'Person'."),
hint=None,
obj=InvalidRelationship,
id='fields.E336',
),
]
self.assertEqual(errors, expected)
def test_missing_relationship_model(self):
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person',
through="MissingM2MModel")
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
("Field specifies a many-to-many relation through model "
"'MissingM2MModel', which has not been installed."),
hint=None,
obj=field,
id='fields.E331',
),
]
self.assertEqual(errors, expected)
def test_symmetrical_self_referential_field(self):
class Person(models.Model):
# Implicit symmetrical=False.
friends = models.ManyToManyField('self', through="Relationship")
class Relationship(models.Model):
first = models.ForeignKey(Person, related_name="rel_from_set")
second = models.ForeignKey(Person, related_name="rel_to_set")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=field,
id='fields.E332',
),
]
self.assertEqual(errors, expected)
def test_too_many_foreign_keys_in_self_referential_model(self):
class Person(models.Model):
friends = models.ManyToManyField('self',
through="InvalidRelationship", symmetrical=False)
class InvalidRelationship(models.Model):
first = models.ForeignKey(Person, related_name="rel_from_set_2")
second = models.ForeignKey(Person, related_name="rel_to_set_2")
third = models.ForeignKey(Person, related_name="too_many_by_far")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
("The model is used as an intermediate model by "
"'invalid_models_tests.Person.friends', but it has more than two "
"foreign keys to 'Person', which is ambiguous. You must specify "
"which two foreign keys Django should use via the through_fields "
"keyword argument."),
hint='Use through_fields to specify which two foreign keys Django should use.',
obj=InvalidRelationship,
id='fields.E333',
),
]
self.assertEqual(errors, expected)
def test_symmetric_self_reference_with_intermediate_table(self):
class Person(models.Model):
# Explicit symmetrical=True.
friends = models.ManyToManyField('self',
through="Relationship", symmetrical=True)
class Relationship(models.Model):
first = models.ForeignKey(Person, related_name="rel_from_set")
second = models.ForeignKey(Person, related_name="rel_to_set")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=field,
id='fields.E332',
),
]
self.assertEqual(errors, expected)
def test_symmetric_self_reference_with_intermediate_table_and_through_fields(self):
"""Using through_fields in a m2m with an intermediate model shouldn't mask its incompatibility with symmetry."""
class Person(models.Model):
# Explicit symmetrical=True.
friends = models.ManyToManyField('self',
symmetrical=True,
through="Relationship",
through_fields=('first', 'second'))
class Relationship(models.Model):
first = models.ForeignKey(Person, related_name="rel_from_set")
second = models.ForeignKey(Person, related_name="rel_to_set")
referee = models.ForeignKey(Person, related_name="referred")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=field,
id='fields.E332',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_abstract_model(self):
class Model(models.Model):
foreign_key = models.ForeignKey('AbstractModel')
class AbstractModel(models.Model):
class Meta:
abstract = True
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
("Field defines a relation with model 'AbstractModel', "
"which is either not installed, or is abstract."),
hint=None,
obj=field,
id='fields.E300',
),
]
self.assertEqual(errors, expected)
def test_m2m_to_abstract_model(self):
class AbstractModel(models.Model):
class Meta:
abstract = True
class Model(models.Model):
m2m = models.ManyToManyField('AbstractModel')
field = Model._meta.get_field('m2m')
errors = field.check(from_model=Model)
expected = [
Error(
("Field defines a relation with model 'AbstractModel', "
"which is either not installed, or is abstract."),
hint=None,
obj=field,
id='fields.E300',
),
]
self.assertEqual(errors, expected)
def test_unique_m2m(self):
class Person(models.Model):
name = models.CharField(max_length=5)
class Group(models.Model):
members = models.ManyToManyField('Person', unique=True)
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
'ManyToManyFields cannot be unique.',
hint=None,
obj=field,
id='fields.E330',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_non_unique_field(self):
class Target(models.Model):
bad = models.IntegerField() # No unique=True
class Model(models.Model):
foreign_key = models.ForeignKey('Target', to_field='bad')
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
"'Target.bad' must set unique=True because it is referenced by a foreign key.",
hint=None,
obj=field,
id='fields.E311',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_non_unique_field_under_explicit_model(self):
class Target(models.Model):
bad = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, to_field='bad')
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'Target.bad' must set unique=True because it is referenced by a foreign key.",
hint=None,
obj=field,
id='fields.E311',
),
]
self.assertEqual(errors, expected)
def test_foreign_object_to_non_unique_fields(self):
class Person(models.Model):
# Note that both fields are not unique.
country_id = models.IntegerField()
city_id = models.IntegerField()
class MMembership(models.Model):
person_country_id = models.IntegerField()
person_city_id = models.IntegerField()
person = models.ForeignObject(Person,
from_fields=['person_country_id', 'person_city_id'],
to_fields=['country_id', 'city_id'])
field = MMembership._meta.get_field('person')
errors = field.check()
expected = [
Error(
("None of the fields 'country_id', 'city_id' on model 'Person' "
"have a unique=True constraint."),
hint=None,
obj=field,
id='fields.E310',
)
]
self.assertEqual(errors, expected)
def test_on_delete_set_null_on_non_nullable_field(self):
class Person(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('Person',
on_delete=models.SET_NULL)
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
'Field specifies on_delete=SET_NULL, but cannot be null.',
hint='Set null=True argument on the field, or change the on_delete rule.',
obj=field,
id='fields.E320',
),
]
self.assertEqual(errors, expected)
def test_on_delete_set_default_without_default_value(self):
class Person(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('Person',
on_delete=models.SET_DEFAULT)
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
'Field specifies on_delete=SET_DEFAULT, but has no default value.',
hint='Set a default value, or change the on_delete rule.',
obj=field,
id='fields.E321',
),
]
self.assertEqual(errors, expected)
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_nullable_primary_key(self):
class Model(models.Model):
field = models.IntegerField(primary_key=True, null=True)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
'Primary keys must not have null=True.',
hint='Set null=False on the field, or remove primary_key=True argument.',
obj=field,
id='fields.E007',
),
]
self.assertEqual(errors, expected)
def test_not_swapped_model(self):
class SwappableModel(models.Model):
# A model that can be, but isn't swapped out. References to this
# model should *not* raise any validation error.
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
class Model(models.Model):
explicit_fk = models.ForeignKey(SwappableModel,
related_name='explicit_fk')
implicit_fk = models.ForeignKey('invalid_models_tests.SwappableModel',
related_name='implicit_fk')
explicit_m2m = models.ManyToManyField(SwappableModel,
related_name='explicit_m2m')
implicit_m2m = models.ManyToManyField(
'invalid_models_tests.SwappableModel',
related_name='implicit_m2m')
explicit_fk = Model._meta.get_field('explicit_fk')
self.assertEqual(explicit_fk.check(), [])
implicit_fk = Model._meta.get_field('implicit_fk')
self.assertEqual(implicit_fk.check(), [])
explicit_m2m = Model._meta.get_field('explicit_m2m')
self.assertEqual(explicit_m2m.check(from_model=Model), [])
implicit_m2m = Model._meta.get_field('implicit_m2m')
self.assertEqual(implicit_m2m.check(from_model=Model), [])
@override_settings(TEST_SWAPPED_MODEL='invalid_models_tests.Replacement')
def test_referencing_to_swapped_model(self):
class Replacement(models.Model):
pass
class SwappedModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL'
class Model(models.Model):
explicit_fk = models.ForeignKey(SwappedModel,
related_name='explicit_fk')
implicit_fk = models.ForeignKey('invalid_models_tests.SwappedModel',
related_name='implicit_fk')
explicit_m2m = models.ManyToManyField(SwappedModel,
related_name='explicit_m2m')
implicit_m2m = models.ManyToManyField(
'invalid_models_tests.SwappedModel',
related_name='implicit_m2m')
fields = [
Model._meta.get_field('explicit_fk'),
Model._meta.get_field('implicit_fk'),
Model._meta.get_field('explicit_m2m'),
Model._meta.get_field('implicit_m2m'),
]
expected_error = Error(
("Field defines a relation with the model "
"'invalid_models_tests.SwappedModel', which has been swapped out."),
hint="Update the relation to point at 'settings.TEST_SWAPPED_MODEL'.",
id='fields.E301',
)
for field in fields:
expected_error.obj = field
errors = field.check(from_model=Model)
self.assertEqual(errors, [expected_error])
class AccessorClashTests(IsolatedModelsTestCase):
def test_fk_to_integer(self):
self._test_accessor_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target'))
def test_fk_to_fk(self):
self._test_accessor_clash(
target=models.ForeignKey('Another'),
relative=models.ForeignKey('Target'))
def test_fk_to_m2m(self):
self._test_accessor_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target'))
def test_m2m_to_integer(self):
self._test_accessor_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target'))
def test_m2m_to_fk(self):
self._test_accessor_clash(
target=models.ForeignKey('Another'),
relative=models.ManyToManyField('Target'))
def test_m2m_to_m2m(self):
self._test_accessor_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target'))
def _test_accessor_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
model_set = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.rel' clashes with field name 'Target.model_set'.",
hint=("Rename field 'Target.model_set', or add/change "
"a related_name argument to the definition "
"for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E302',
),
]
self.assertEqual(errors, expected)
def test_clash_between_accessors(self):
class Target(models.Model):
pass
class Model(models.Model):
foreign = models.ForeignKey(Target)
m2m = models.ManyToManyField(Target)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.foreign' clashes with reverse accessor for 'Model.m2m'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.foreign' or 'Model.m2m'."),
obj=Model._meta.get_field('foreign'),
id='fields.E304',
),
Error(
"Reverse accessor for 'Model.m2m' clashes with reverse accessor for 'Model.foreign'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m' or 'Model.foreign'."),
obj=Model._meta.get_field('m2m'),
id='fields.E304',
),
]
self.assertEqual(errors, expected)
def test_m2m_to_m2m_with_inheritance(self):
""" Ref #22047. """
class Target(models.Model):
pass
class Model(models.Model):
children = models.ManyToManyField('Child',
related_name="m2m_clash", related_query_name="no_clash")
class Parent(models.Model):
m2m_clash = models.ManyToManyField('Target')
class Child(Parent):
pass
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.children' clashes with field name 'Child.m2m_clash'.",
hint=("Rename field 'Child.m2m_clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.children'."),
obj=Model._meta.get_field('children'),
id='fields.E302',
)
]
self.assertEqual(errors, expected)
class ReverseQueryNameClashTests(IsolatedModelsTestCase):
def test_fk_to_integer(self):
self._test_reverse_query_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target'))
def test_fk_to_fk(self):
self._test_reverse_query_name_clash(
target=models.ForeignKey('Another'),
relative=models.ForeignKey('Target'))
def test_fk_to_m2m(self):
self._test_reverse_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target'))
def test_m2m_to_integer(self):
self._test_reverse_query_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target'))
def test_m2m_to_fk(self):
self._test_reverse_query_name_clash(
target=models.ForeignKey('Another'),
relative=models.ManyToManyField('Target'))
def test_m2m_to_m2m(self):
self._test_reverse_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target'))
def _test_reverse_query_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
model = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.model'.",
hint=("Rename field 'Target.model', or add/change "
"a related_name argument to the definition "
"for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
class ExplicitRelatedNameClashTests(IsolatedModelsTestCase):
def test_fk_to_integer(self):
self._test_explicit_related_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target', related_name='clash'))
def test_fk_to_fk(self):
self._test_explicit_related_name_clash(
target=models.ForeignKey('Another'),
relative=models.ForeignKey('Target', related_name='clash'))
def test_fk_to_m2m(self):
self._test_explicit_related_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target', related_name='clash'))
def test_m2m_to_integer(self):
self._test_explicit_related_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target', related_name='clash'))
def test_m2m_to_fk(self):
self._test_explicit_related_name_clash(
target=models.ForeignKey('Another'),
relative=models.ManyToManyField('Target', related_name='clash'))
def test_m2m_to_m2m(self):
self._test_explicit_related_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target', related_name='clash'))
def _test_explicit_related_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
clash = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.rel' clashes with field name 'Target.clash'.",
hint=("Rename field 'Target.clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.",
hint=("Rename field 'Target.clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
class ExplicitRelatedQueryNameClashTests(IsolatedModelsTestCase):
def test_fk_to_integer(self):
self._test_explicit_related_query_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target',
related_query_name='clash'))
def test_fk_to_fk(self):
self._test_explicit_related_query_name_clash(
target=models.ForeignKey('Another'),
relative=models.ForeignKey('Target',
related_query_name='clash'))
def test_fk_to_m2m(self):
self._test_explicit_related_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target',
related_query_name='clash'))
def test_m2m_to_integer(self):
self._test_explicit_related_query_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target',
related_query_name='clash'))
def test_m2m_to_fk(self):
self._test_explicit_related_query_name_clash(
target=models.ForeignKey('Another'),
relative=models.ManyToManyField('Target',
related_query_name='clash'))
def test_m2m_to_m2m(self):
self._test_explicit_related_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target',
related_query_name='clash'))
def _test_explicit_related_query_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
clash = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.",
hint=("Rename field 'Target.clash', or add/change a related_name "
"argument to the definition for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
class SelfReferentialM2MClashTests(IsolatedModelsTestCase):
def test_clash_between_accessors(self):
class Model(models.Model):
first_m2m = models.ManyToManyField('self', symmetrical=False)
second_m2m = models.ManyToManyField('self', symmetrical=False)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.first_m2m' clashes with reverse accessor for 'Model.second_m2m'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.first_m2m' or 'Model.second_m2m'."),
obj=Model._meta.get_field('first_m2m'),
id='fields.E304',
),
Error(
"Reverse accessor for 'Model.second_m2m' clashes with reverse accessor for 'Model.first_m2m'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.second_m2m' or 'Model.first_m2m'."),
obj=Model._meta.get_field('second_m2m'),
id='fields.E304',
),
]
self.assertEqual(errors, expected)
def test_accessor_clash(self):
class Model(models.Model):
model_set = models.ManyToManyField("self", symmetrical=False)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.",
hint=("Rename field 'Model.model_set', or add/change "
"a related_name argument to the definition "
"for field 'Model.model_set'."),
obj=Model._meta.get_field('model_set'),
id='fields.E302',
),
]
self.assertEqual(errors, expected)
def test_reverse_query_name_clash(self):
class Model(models.Model):
model = models.ManyToManyField("self", symmetrical=False)
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.model' clashes with field name 'Model.model'.",
hint=("Rename field 'Model.model', or add/change a related_name "
"argument to the definition for field 'Model.model'."),
obj=Model._meta.get_field('model'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
def test_clash_under_explicit_related_name(self):
class Model(models.Model):
clash = models.IntegerField()
m2m = models.ManyToManyField("self",
symmetrical=False, related_name='clash')
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.m2m' clashes with field name 'Model.clash'.",
hint=("Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.m2m'."),
obj=Model._meta.get_field('m2m'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.m2m' clashes with field name 'Model.clash'.",
hint=("Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.m2m'."),
obj=Model._meta.get_field('m2m'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
def test_valid_model(self):
class Model(models.Model):
first = models.ManyToManyField("self",
symmetrical=False, related_name='first_accessor')
second = models.ManyToManyField("self",
symmetrical=False, related_name='second_accessor')
errors = Model.check()
self.assertEqual(errors, [])
class SelfReferentialFKClashTests(IsolatedModelsTestCase):
def test_accessor_clash(self):
class Model(models.Model):
model_set = models.ForeignKey("Model")
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.",
hint=("Rename field 'Model.model_set', or add/change "
"a related_name argument to the definition "
"for field 'Model.model_set'."),
obj=Model._meta.get_field('model_set'),
id='fields.E302',
),
]
self.assertEqual(errors, expected)
def test_reverse_query_name_clash(self):
class Model(models.Model):
model = models.ForeignKey("Model")
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.model' clashes with field name 'Model.model'.",
hint=("Rename field 'Model.model', or add/change "
"a related_name argument to the definition "
"for field 'Model.model'."),
obj=Model._meta.get_field('model'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
def test_clash_under_explicit_related_name(self):
class Model(models.Model):
clash = models.CharField(max_length=10)
foreign = models.ForeignKey("Model", related_name='clash')
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.foreign' clashes with field name 'Model.clash'.",
hint=("Rename field 'Model.clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.foreign'."),
obj=Model._meta.get_field('foreign'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.foreign' clashes with field name 'Model.clash'.",
hint=("Rename field 'Model.clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.foreign'."),
obj=Model._meta.get_field('foreign'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
class ComplexClashTests(IsolatedModelsTestCase):
# New tests should not be included here, because this is a single,
# self-contained sanity check, not a test of everything.
def test_complex_clash(self):
class Target(models.Model):
tgt_safe = models.CharField(max_length=10)
clash = models.CharField(max_length=10)
model = models.CharField(max_length=10)
clash1_set = models.CharField(max_length=10)
class Model(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target, related_name='id')
foreign_2 = models.ForeignKey(Target, related_name='src_safe')
m2m_1 = models.ManyToManyField(Target, related_name='id')
m2m_2 = models.ManyToManyField(Target, related_name='src_safe')
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.foreign_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.foreign_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.foreign_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.foreign_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E303',
),
Error(
"Reverse accessor for 'Model.foreign_1' clashes with reverse accessor for 'Model.m2m_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_1' or 'Model.m2m_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.foreign_1' clashes with reverse query name for 'Model.m2m_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_1' or 'Model.m2m_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.foreign_2' clashes with reverse accessor for 'Model.m2m_2'.",
hint=("Add or change a related_name argument "
"to the definition for 'Model.foreign_2' or 'Model.m2m_2'."),
obj=Model._meta.get_field('foreign_2'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.foreign_2' clashes with reverse query name for 'Model.m2m_2'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_2' or 'Model.m2m_2'."),
obj=Model._meta.get_field('foreign_2'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.m2m_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.m2m_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.m2m_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.m2m_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E303',
),
Error(
"Reverse accessor for 'Model.m2m_1' clashes with reverse accessor for 'Model.foreign_1'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_1' or 'Model.foreign_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.m2m_1' clashes with reverse query name for 'Model.foreign_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.m2m_1' or 'Model.foreign_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.m2m_2' clashes with reverse accessor for 'Model.foreign_2'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_2' or 'Model.foreign_2'."),
obj=Model._meta.get_field('m2m_2'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.m2m_2' clashes with reverse query name for 'Model.foreign_2'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_2' or 'Model.foreign_2'."),
obj=Model._meta.get_field('m2m_2'),
id='fields.E305',
),
]
self.assertEqual(errors, expected)
class M2mThroughFieldsTests(IsolatedModelsTestCase):
def test_m2m_field_argument_validation(self):
"""
Tests that ManyToManyField accepts the ``through_fields`` kwarg
only if an intermediary table is specified.
"""
class Fan(models.Model):
pass
self.assertRaisesMessage(
ValueError, 'Cannot specify through_fields without a through model',
models.ManyToManyField, Fan, through_fields=('f1', 'f2'))
def test_invalid_order(self):
"""
Tests that mixing up the order of link fields to ManyToManyField.through_fields
triggers validation errors.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=('invitee', 'event'))
class Invitation(models.Model):
event = models.ForeignKey(Event)
invitee = models.ForeignKey(Fan)
inviter = models.ForeignKey(Fan, related_name='+')
field = Event._meta.get_field('invitees')
errors = field.check(from_model=Event)
expected = [
Error(
("'Invitation.invitee' is not a foreign key to 'Event'."),
hint="Did you mean one of the following foreign keys to 'Event': event?",
obj=field,
id='fields.E339'),
Error(
("'Invitation.event' is not a foreign key to 'Fan'."),
hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?",
obj=field,
id='fields.E339'),
]
self.assertEqual(expected, errors)
def test_invalid_field(self):
"""
Tests that providing invalid field names to ManyToManyField.through_fields
triggers validation errors.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=('invalid_field_1', 'invalid_field_2'))
class Invitation(models.Model):
event = models.ForeignKey(Event)
invitee = models.ForeignKey(Fan)
inviter = models.ForeignKey(Fan, related_name='+')
field = Event._meta.get_field('invitees')
errors = field.check(from_model=Event)
expected = [
Error(
("The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_1'."),
hint="Did you mean one of the following foreign keys to 'Event': event?",
obj=field,
id='fields.E338'),
Error(
("The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_2'."),
hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?",
obj=field,
id='fields.E338'),
]
self.assertEqual(expected, errors)
def test_explicit_field_names(self):
"""
Tests that if ``through_fields`` kwarg is given, it must specify both
link fields of the intermediary table.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=(None, 'invitee'))
class Invitation(models.Model):
event = models.ForeignKey(Event)
invitee = models.ForeignKey(Fan)
inviter = models.ForeignKey(Fan, related_name='+')
field = Event._meta.get_field('invitees')
errors = field.check(from_model=Event)
expected = [
Error(
("Field specifies 'through_fields' but does not provide the names "
"of the two link fields that should be used for the relation "
"through model 'invalid_models_tests.Invitation'."),
hint=("Make sure you specify 'through_fields' as "
"through_fields=('field1', 'field2')"),
obj=field,
id='fields.E337')]
self.assertEqual(expected, errors)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import xml.etree.ElementTree as ET
import io
try:
register_namespace = ET.register_namespace
except AttributeError:
def register_namespace(prefix, uri):
ET._namespace_map[uri] = prefix
import requests
# TODO: test escaping
class MPNSBase(object):
DELAY_IMMEDIATE = None
DELAY_450S = None
DELAY_900S = None
HEADER_NOTIFICATION_CLASS = 'X-NotificationClass'
HEADER_TARGET = 'X-WindowsPhone-Target'
HEADER_MESSAGE_ID = 'X-MessageID'
HEADER_CALLBACK_URI = 'X-CallbackURI'
def __init__(self, delay=None):
self.delay = delay or self.DELAY_IMMEDIATE
self.headers = {
'Content-Type': 'text/xml',
'Accept': 'application/*',
self.HEADER_NOTIFICATION_CLASS: str(self.delay),
}
register_namespace('wp', 'WPNotification')
def set_target(self, target):
self.headers[self.HEADER_TARGET] = target
def serialize_tree(self, tree):
buffer = io.BytesIO()
# http://bugs.python.org/issue15811
# TODO: use xml_declaration=True for python>=2.7
tree.write(buffer, encoding=str('utf-8'))
contents = ('<?xml version="1.0" encoding="utf-8"?>'.encode('utf-8') +
buffer.getvalue())
buffer.close()
return contents
def optional_attribute(self, element, attribute, payload_param, payload):
if payload_param in payload:
element.attrib['attribute'] = payload[payload_param]
def optional_subelement(self, parent, element, payload_param, payload):
if payload_param in payload:
el = ET.SubElement(parent, element)
el.text = payload[payload_param]
return el
def prepare_payload(self, payload):
raise NotImplementedError('Subclasses should override prepare_payload method')
def parse_response(self, response):
status = {
'device_connection_status': response.headers.get('x-deviceconnectionstatus', ''), # Connected, InActive, Disconnected, TempDisconnected
'subscription_status': response.headers.get('x-subscriptionstatus', ''), # Active, Expired
'notification_status': response.headers.get('x-notificationstatus', ''), # Received, Suppressed, Dropped, QueueFull
'message_id': response.headers.get('x-messageid'), # 00000000-0000-0000-0000-000000000000
}
code = response.status_code
status['http_status_code'] = code
if code == 200:
if status['notification_status'] == 'QueueFull':
status['error'] = 'Queue full, try again later'
status['backoff_seconds'] = 60
elif code == 400:
status['error'] = 'Bad Request - invalid payload or subscription URI'
elif code == 401:
status['error'] = 'Unauthorized - invalid token or subscription URI'
status['drop_subscription'] = True
elif code == 404:
status['error'] = 'Not Found - subscription URI is invalid'
status['drop_subscription'] = True
elif code == 405:
status['error'] = 'Invalid Method' # (this should not happen, module uses only POST method)
elif code == 406:
status['error'] = 'Not Acceptable - per-day throttling limit reached'
status['backoff_seconds'] = 24 * 60 * 60
elif code == 412:
status['error'] = 'Precondition Failed - device inactive, try once per-hour'
status['backoff_seconds'] = 60 * 60
elif code == 503:
status['error'] = 'Service Unavailable - try again later'
status['backoff_seconds'] = 60
else:
status['error'] = 'Unexpected status'
return status
def send(self, uri, payload, message_id=None, callback_uri=None, cert=None, debug=False):
"""
Send push message. Input parameters:
uri - subscription uri
payload - message payload (see help for subclasses)
message_id - optional message id (UUID)
callback_uri - optional callback url (only for authenticated web services)
cert - optional (only for authenticated web services)
If string, path to ssl client cert file (.pem).
If tuple, ('cert', 'key') pair.
For more info see requests library documentation.
Returns message status dictionary with the following elements:
device_connection_status - Connected, InActive, Disconnected, TempDisconnected
subscription_status - Active, Expired
notification_status - Received, Suppressed, Dropped, QueueFull
message_id - submitted message_id or 00000000-0000-0000-0000-000000000000
http_status_code - HTTP response status code
error - optional error message
backoff_seconds - optional recommended throttling delay (in seconds)
drop_subscription - optional flag to indicate that subscription uri is invalid
"""
# reset per-message headers
for k in (self.HEADER_MESSAGE_ID, self.HEADER_CALLBACK_URI):
if k in self.headers: self.headers.pop(k)
# set per-message headers if necessary
if message_id:
self.headers[self.HEADER_MESSAGE_ID] = str(message_id) # TODO: validate UUID
if callback_uri:
self.headers[self.HEADER_CALLBACK_URI] = str(callback_uri)
data = self.prepare_payload(payload)
res = requests.post(uri, data=data, headers=self.headers, cert=cert)
result = self.parse_response(res)
if debug:
result['request'] = {'data': data, 'headers': dict(self.headers) }
result['response'] = {'status': res.status_code, 'headers': dict(res.headers), 'text': res.text}
return result
# TODO: create separate classes for FlipTile, Cycle and Iconic notifications (also add version 2.0)
# WP8 specific:
# self.clearable_subelement(tile, '{WPNotification}SmallBackgroundImage' 'small_background_image', payload)
# self.clearable_subelement(tile, '{WPNotification}WideBackgroundImage' 'wide_background_image', payload)
# self.clearable_subelement(tile, '{WPNotification}WideBackBackgroundImage' 'wide_back_background_image', payload)
# self.clearable_subelement(tile, '{WPNotification}WideBackContent' 'wide_back_content', payload)
class MPNSTile(MPNSBase):
"""
Tile notification. Payload is a dictionary with the following optional elements:
id
template
background_image
count
title
back_background_image
back_title
back_content
"""
DELAY_IMMEDIATE = 1
DELAY_450S = 11
DELAY_900S = 21
def __init__(self, *args, **kwargs):
super(MPNSTile, self).__init__(*args, **kwargs)
self.set_target('token') # TODO: flip tile
def clearable_subelement(self, parent, element, payload_param, payload):
if payload_param in payload:
el = ET.SubElement(parent, element)
if payload[payload_param] is None:
el.attrib['Action'] = 'Clear'
else:
el.text = payload[payload_param]
return el
def prepare_payload(self, payload):
root = ET.Element("{WPNotification}Notification")
tile = ET.SubElement(root, '{WPNotification}Tile')
self.optional_attribute(tile, 'Id', 'id', payload)
self.optional_attribute(tile, 'Template', 'template', payload)
self.optional_subelement(tile, '{WPNotification}BackgroundImage', 'background_image', payload)
self.clearable_subelement(tile, '{WPNotification}Count', 'count', payload)
self.clearable_subelement(tile, '{WPNotification}Title', 'title', payload)
self.clearable_subelement(tile, '{WPNotification}BackBackgroundImage', 'back_background_image', payload)
self.clearable_subelement(tile, '{WPNotification}BackTitle', 'back_title', payload)
self.clearable_subelement(tile, '{WPNotification}BackContent', 'back_content', payload)
return self.serialize_tree(ET.ElementTree(root))
class MPNSToast(MPNSBase):
"""
Toast notification. Payload is a dictionary with the following optional elements:
text1
text2
param
"""
DELAY_IMMEDIATE = 2
DELAY_450S = 12
DELAY_900S = 22
def __init__(self, *args, **kwargs):
super(MPNSToast, self).__init__(*args, **kwargs)
self.set_target('toast')
def prepare_payload(self, payload):
root = ET.Element("{WPNotification}Notification")
toast = ET.SubElement(root, '{WPNotification}Toast')
self.optional_subelement(toast, '{WPNotification}Text1', 'text1', payload)
self.optional_subelement(toast, '{WPNotification}Text2', 'text2', payload)
self.optional_subelement(toast, '{WPNotification}Sound', 'sound', payload)
self.optional_subelement(toast, '{WPNotification}Param', 'param', payload) # TODO: validate param (/ and length)
return self.serialize_tree(ET.ElementTree(root))
class MPNSRaw(MPNSBase):
"""
Raw notification. Payload format can be arbitrary.
"""
DELAY_IMMEDIATE = 3
DELAY_450S = 13
DELAY_900S = 23
def __init__(self, *args, **kwargs):
super(MPNSRaw, self).__init__(*args, **kwargs)
self.set_target('raw')
def prepare_payload(self, payload):
return payload
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hook for Mongo DB"""
from ssl import CERT_NONE
from types import TracebackType
from typing import List, Optional, Type
import pymongo
from pymongo import MongoClient, ReplaceOne
from airflow.hooks.base import BaseHook
class MongoHook(BaseHook):
"""
Interact with Mongo. This hook uses the Mongo conn_id.
PyMongo Wrapper to Interact With Mongo Database
Mongo Connection Documentation
https://docs.mongodb.com/manual/reference/connection-string/index.html
You can specify connection string options in extra field of your connection
https://docs.mongodb.com/manual/reference/connection-string/index.html#connection-string-options
If you want use DNS seedlist, set `srv` to True.
ex.
{"srv": true, "replicaSet": "test", "ssl": true, "connectTimeoutMS": 30000}
:param mongo_conn_id: The :ref:`Mongo connection id <howto/connection:mongo>` to use
when connecting to MongoDB.
:type mongo: str
"""
conn_name_attr = 'conn_id'
default_conn_name = 'mongo_default'
conn_type = 'mongo'
hook_name = 'MongoDB'
def __init__(self, conn_id: str = default_conn_name, *args, **kwargs) -> None:
super().__init__()
self.mongo_conn_id = conn_id
self.connection = self.get_connection(conn_id)
self.extras = self.connection.extra_dejson.copy()
self.client = None
srv = self.extras.pop('srv', False)
scheme = 'mongodb+srv' if srv else 'mongodb'
self.uri = '{scheme}://{creds}{host}{port}/{database}'.format(
scheme=scheme,
creds=f'{self.connection.login}:{self.connection.password}@' if self.connection.login else '',
host=self.connection.host,
port='' if self.connection.port is None else f':{self.connection.port}',
database=self.connection.schema,
)
def __enter__(self):
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
if self.client is not None:
self.close_conn()
def get_conn(self) -> MongoClient:
"""Fetches PyMongo Client"""
if self.client is not None:
return self.client
# Mongo Connection Options dict that is unpacked when passed to MongoClient
options = self.extras
# If we are using SSL disable requiring certs from specific hostname
if options.get('ssl', False):
options.update({'ssl_cert_reqs': CERT_NONE})
self.client = MongoClient(self.uri, **options)
return self.client
def close_conn(self) -> None:
"""Closes connection"""
client = self.client
if client is not None:
client.close()
self.client = None
def get_collection(
self, mongo_collection: str, mongo_db: Optional[str] = None
) -> pymongo.collection.Collection:
"""
Fetches a mongo collection object for querying.
Uses connection schema as DB unless specified.
"""
mongo_db = mongo_db if mongo_db is not None else self.connection.schema
mongo_conn: MongoClient = self.get_conn()
return mongo_conn.get_database(mongo_db).get_collection(mongo_collection)
def aggregate(
self, mongo_collection: str, aggregate_query: list, mongo_db: Optional[str] = None, **kwargs
) -> pymongo.command_cursor.CommandCursor:
"""
Runs an aggregation pipeline and returns the results
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.aggregate
https://api.mongodb.com/python/current/examples/aggregation.html
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.aggregate(aggregate_query, **kwargs)
def find(
self,
mongo_collection: str,
query: dict,
find_one: bool = False,
mongo_db: Optional[str] = None,
**kwargs,
) -> pymongo.cursor.Cursor:
"""
Runs a mongo find query and returns the results
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.find
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
if find_one:
return collection.find_one(query, **kwargs)
else:
return collection.find(query, **kwargs)
def insert_one(
self, mongo_collection: str, doc: dict, mongo_db: Optional[str] = None, **kwargs
) -> pymongo.results.InsertOneResult:
"""
Inserts a single document into a mongo collection
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.insert_one
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.insert_one(doc, **kwargs)
def insert_many(
self, mongo_collection: str, docs: dict, mongo_db: Optional[str] = None, **kwargs
) -> pymongo.results.InsertManyResult:
"""
Inserts many docs into a mongo collection.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.insert_many
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.insert_many(docs, **kwargs)
def update_one(
self,
mongo_collection: str,
filter_doc: dict,
update_doc: dict,
mongo_db: Optional[str] = None,
**kwargs,
) -> pymongo.results.UpdateResult:
"""
Updates a single document in a mongo collection.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one
:param mongo_collection: The name of the collection to update.
:type mongo_collection: str
:param filter_doc: A query that matches the documents to update.
:type filter_doc: dict
:param update_doc: The modifications to apply.
:type update_doc: dict
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.update_one(filter_doc, update_doc, **kwargs)
def update_many(
self,
mongo_collection: str,
filter_doc: dict,
update_doc: dict,
mongo_db: Optional[str] = None,
**kwargs,
) -> pymongo.results.UpdateResult:
"""
Updates one or more documents in a mongo collection.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_many
:param mongo_collection: The name of the collection to update.
:type mongo_collection: str
:param filter_doc: A query that matches the documents to update.
:type filter_doc: dict
:param update_doc: The modifications to apply.
:type update_doc: dict
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.update_many(filter_doc, update_doc, **kwargs)
def replace_one(
self,
mongo_collection: str,
doc: dict,
filter_doc: Optional[dict] = None,
mongo_db: Optional[str] = None,
**kwargs,
) -> pymongo.results.UpdateResult:
"""
Replaces a single document in a mongo collection.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.replace_one
.. note::
If no ``filter_doc`` is given, it is assumed that the replacement
document contain the ``_id`` field which is then used as filters.
:param mongo_collection: The name of the collection to update.
:type mongo_collection: str
:param doc: The new document.
:type doc: dict
:param filter_doc: A query that matches the documents to replace.
Can be omitted; then the _id field from doc will be used.
:type filter_doc: dict
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
if not filter_doc:
filter_doc = {'_id': doc['_id']}
return collection.replace_one(filter_doc, doc, **kwargs)
def replace_many(
self,
mongo_collection: str,
docs: List[dict],
filter_docs: Optional[List[dict]] = None,
mongo_db: Optional[str] = None,
upsert: bool = False,
collation: Optional[pymongo.collation.Collation] = None,
**kwargs,
) -> pymongo.results.BulkWriteResult:
"""
Replaces many documents in a mongo collection.
Uses bulk_write with multiple ReplaceOne operations
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.bulk_write
.. note::
If no ``filter_docs``are given, it is assumed that all
replacement documents contain the ``_id`` field which are then
used as filters.
:param mongo_collection: The name of the collection to update.
:type mongo_collection: str
:param docs: The new documents.
:type docs: list[dict]
:param filter_docs: A list of queries that match the documents to replace.
Can be omitted; then the _id fields from docs will be used.
:type filter_docs: list[dict]
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str
:param upsert: If ``True``, perform an insert if no documents
match the filters for the replace operation.
:type upsert: bool
:param collation: An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
:type collation: pymongo.collation.Collation
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
if not filter_docs:
filter_docs = [{'_id': doc['_id']} for doc in docs]
requests = [
ReplaceOne(filter_docs[i], docs[i], upsert=upsert, collation=collation) for i in range(len(docs))
]
return collection.bulk_write(requests, **kwargs)
def delete_one(
self, mongo_collection: str, filter_doc: dict, mongo_db: Optional[str] = None, **kwargs
) -> pymongo.results.DeleteResult:
"""
Deletes a single document in a mongo collection.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_one
:param mongo_collection: The name of the collection to delete from.
:type mongo_collection: str
:param filter_doc: A query that matches the document to delete.
:type filter_doc: dict
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.delete_one(filter_doc, **kwargs)
def delete_many(
self, mongo_collection: str, filter_doc: dict, mongo_db: Optional[str] = None, **kwargs
) -> pymongo.results.DeleteResult:
"""
Deletes one or more documents in a mongo collection.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_many
:param mongo_collection: The name of the collection to delete from.
:type mongo_collection: str
:param filter_doc: A query that matches the documents to delete.
:type filter_doc: dict
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.delete_many(filter_doc, **kwargs)
|
|
from sqlagg.columns import *
from corehq.apps.reports.sqlreport import SqlTabularReport, DatabaseColumn
from corehq.apps.reports.filters.fixtures import AsyncDrillableFilter
from corehq.apps.reports.filters.select import GroupFilter
from corehq.apps.reports.dont_use.fields import BooleanField
from corehq.apps.reports.standard import CustomProjectReport, DatespanMixin
from corehq.apps.reports.util import make_ctable_table_name
from corehq.apps.users.models import CommCareUser
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DataTablesColumnGroup
from corehq.apps.fixtures.models import FixtureDataItem
from corehq.apps.groups.models import Group
from couchdbkit.exceptions import ResourceNotFound
from copy import copy
class ProvinceField(AsyncDrillableFilter):
label = "Province"
slug = "province"
hierarchy = [{"type": "province", "display": "name"}]
class ShowAgeField(BooleanField):
label = "Show Age"
slug = "show_age_field"
template = "care_sa/reports/partials/checkbox.html"
class ShowGenderField(BooleanField):
label = "Show Gender"
slug = "show_gender_field"
template = "care_sa/reports/partials/checkbox.html"
class CBOField(GroupFilter):
name = 'CBO'
default_option = 'All'
class CareReport(SqlTabularReport,
CustomProjectReport,
DatespanMixin):
exportable = True
emailable = True
table_name = make_ctable_table_name("care-ihapc-live_CareSAFluff")
report_template_path = "care_sa/reports/grouped.html"
fields = [
'corehq.apps.reports.filters.dates.DatespanFilter',
'custom.reports.care_sa.reports.sql.ProvinceField',
'custom.reports.care_sa.reports.sql.CBOField',
'custom.reports.care_sa.reports.sql.ShowAgeField',
'custom.reports.care_sa.reports.sql.ShowGenderField',
]
def selected_province(self):
fixture = self.request.GET.get('fixture_id', "")
return fixture.split(':')[1] if fixture else None
def selected_cbo(self):
group = self.request.GET.get('group', '')
return group
def show_age(self):
show_age_field = self.request.GET.get('show_age_field', '')
return show_age_field == 'on'
def show_gender(self):
show_gender_field = self.request.GET.get('show_gender_field', '')
return show_gender_field == 'on'
@property
def filters(self):
filters = [
"domain = :domain",
"date between :startdate and :enddate",
]
if self.selected_province():
filters.append("province = :province")
if self.selected_cbo():
filters.append("cbo = :cbo")
return filters
@property
def group_by(self):
groups = []
if not self.selected_province():
groups.append('province')
elif not self.selected_cbo():
groups.append('cbo')
else:
groups.append('user_id')
if self.show_age():
groups.append('age_group')
if self.show_gender():
groups.append('gender')
return groups
@property
def filter_values(self):
return dict(
domain=self.domain,
startdate=self.datespan.startdate_param_utc,
enddate=self.datespan.enddate_param_utc,
province=self.selected_province(),
cbo=self.selected_cbo(),
)
def first_indicator_column_index(self):
return len(self.columns) - len(self.report_columns)
@property
def headers(self):
"""
Override the headers method to be able to add male/female sub
header columns.
"""
header_columns = []
for idx, column in enumerate(self.columns):
if idx >= self.first_indicator_column_index() and self.show_gender():
group = DataTablesColumnGroup(column.header)
group.add_column(DataTablesColumn("male", sortable=False))
group.add_column(DataTablesColumn("female", sortable=False))
header_columns.append(group)
else:
# gender is included in the columns to populate data
# but we don't show it on the page
if column.header != 'Gender':
header_columns.append(DataTablesColumn(column.header, sortable=False))
# insert a blank header to display the "all genders/ages" message
if not self.show_gender() and not self.show_age():
header_columns.insert(1, DataTablesColumn('', sortable=False))
return DataTablesHeader(*header_columns)
@property
def columns(self):
if not self.selected_province():
columns = [DatabaseColumn("Province",
SimpleColumn('province'),
sortable=False)]
elif not self.selected_cbo():
columns = [DatabaseColumn("CBO",
SimpleColumn('cbo'),
sortable=False)]
else:
columns = [DatabaseColumn("User",
SimpleColumn('user_id'),
sortable=False)]
if self.show_gender():
columns.append(DatabaseColumn("Gender",
SimpleColumn('gender'),
sortable=False))
if self.show_age():
columns.append(DatabaseColumn("Age",
SimpleColumn('age_group'),
sortable=False))
for column_attrs in self.report_columns:
text, name = column_attrs[:2]
name = '%s_total' % name
if len(column_attrs) == 2:
column = DatabaseColumn(text, CountColumn(name), sortable=False)
elif column_attrs[2] == 'SumColumn':
column = DatabaseColumn(text, SumColumn(name), sortable=False)
columns.append(column)
return columns
@property
def keys(self):
[self.domain]
@property
def export_table(self):
headers = self.headers
rows = self.rows
formatted_rows = []
for row in rows:
if not self.show_age() and not self.show_gender():
if 'total_width' not in row:
formatted_rows.append(
[row['username']] +
['All genders and ages'] +
row['row_data']
)
elif not self.show_age() and self.show_gender():
if 'total_width' not in row:
formatted_rows.append(
[row['username']] +
row['row_data']
)
else:
# both groups with age get built the same
if 'total_width' not in row:
formatted_rows.append(
[row['username']] +
[row['age_display']] +
row['row_data']
)
else:
formatted_rows.append(
['Total:', ''] +
row['row_data']
)
def _unformat_row(row):
return [col.get("sort_key", col) if isinstance(col, dict) else col for col in row]
table = headers.as_export_table
rows = [_unformat_row(row) for row in formatted_rows]
table.extend(rows)
if self.total_row:
table.append(_unformat_row(self.total_row))
if self.statistics_rows:
table.extend([_unformat_row(row) for row in self.statistics_rows])
return [[self.export_sheet_name, table]]
def empty_row(self):
return ['--'] * len(self.report_columns)
def gender_seperated_dict(self):
return {
'male': self.empty_row(),
'female': self.empty_row()
}
def age_seperated_dict(self, default):
""" Build a dictionary with a copy of default for each age group """
return dict((str(i), copy(default)) for i in range(4))
def initialize_user_stuff(self):
"""
Return a dictionary appropriately formatted based on the
set filter options
Used to seperate a given users/province/cbo's data into
a dictionary seperated by age group and gender as
needed
"""
if self.show_age() and self.show_gender():
return self.age_seperated_dict(self.gender_seperated_dict())
if self.show_age() and not self.show_gender():
return self.age_seperated_dict(self.empty_row())
if not self.show_age() and self.show_gender():
return self.gender_seperated_dict()
if not self.show_age() and not self.show_gender():
return self.empty_row()
def add_row_to_total(self, total, row):
# initialize it if it hasn't been used yet
if len(total) == 0:
total = [0] * len(row)
return [a if isinstance(b, str) else a + b for (a, b) in zip(total, row)]
def add_row_to_row(self, base_row, row_to_add):
for i in range(len(base_row)):
if isinstance(row_to_add[i], int) or isinstance(row_to_add[i], long):
if isinstance(base_row[i], int):
base_row[i] = base_row[i] + int(row_to_add[i])
else:
base_row[i] = row_to_add[i]
return base_row
def get_data_grouping_id(self, row):
if not self.selected_province() or not self.selected_cbo():
grouping_id = row.pop(0)
else:
# if it's a user we need to get the username
user = CommCareUser.get_by_user_id(row.pop(0))
grouping_id = user.username
return grouping_id
def add_row_to_grouping_data(self, built_data, row, grouping_id, age_group, gender):
"""
Take whatever was left in row and add it to the appropriate spot in
the data we are building for this grouping_id
"""
if self.show_age() and self.show_gender():
built_data[grouping_id][age_group][gender] = row
elif self.show_age() and not self.show_gender():
built_data[grouping_id][age_group] = \
self.add_row_to_row(built_data[grouping_id][age_group], row)
elif not self.show_age() and self.show_gender():
built_data[grouping_id][gender] = \
self.add_row_to_row(built_data[grouping_id][gender], row)
elif not self.show_age() and not self.show_gender():
built_data[grouping_id] = \
self.add_row_to_row(built_data[grouping_id], row)
def build_data(self, rows):
"""
Take all of the individual data from the rows and collect it into
a dict (built_data) that is used to group the values by gender/age
"""
built_data = {}
for row in rows:
gender = age_group = None
try:
grouping_id = self.get_data_grouping_id(row)
except AttributeError:
continue
if grouping_id not in built_data:
# If we haven't seen this id yet we need to create
# an empty row/dict (depending on selected filters)
built_data[grouping_id] = self.initialize_user_stuff()
if self.show_gender():
gender = row.pop(0)
if gender == 'refuses_answer':
continue
if self.show_age():
age_group = row.pop(0)
self.add_row_to_grouping_data(
built_data,
row,
grouping_id,
age_group,
gender
)
return built_data
def age_group_text(self, age_group_val):
if age_group_val == '0':
return '0-14 years'
elif age_group_val == '1':
return '15-24 years'
elif age_group_val == '2':
return '25+ years'
else:
return 'Unknown'
def get_grouping_name(self, user):
"""
Get the name of province/cbo/user (depending on what is selected)
"""
if not self.selected_province():
return FixtureDataItem.get(user).fields_without_attributes['name']
elif not self.selected_cbo():
return Group.get(user).name
else:
return CommCareUser.get_by_username(user).name
def merge_gender_data(self, data):
return [val for pair in zip(data['male'], data['female'])
for val in pair]
@property
def rows(self):
"""
Override rows method to be able to properly group data
"""
# use super to get the raw rows from the report
stock_rows = list(super(CareReport, self).rows)
# pack these rows into a dict representing the currently
# configured report structure
rows = self.build_data(stock_rows)
# set up with for total rows
if (not self.show_age() and self.show_gender()):
total_width = 1
else:
total_width = 2
rows_for_table = []
overall_total_row = []
age_group_totals = {'0': [], '1': [], '2': [], '3': []}
# for every group of data, unpack back to individual rows
# and set up the information the template needs to render this
# stuff
for user in rows:
u = self.get_grouping_name(user)
total_row = []
if self.show_age() and self.show_gender():
for age_group in sorted(rows[user]):
age_display = self.age_group_text(age_group)
row_data = self.merge_gender_data(rows[user][age_group])
rows_for_table.append({
'username': u if age_group == '0' else '',
'gender': True,
'age_display': age_display,
'row_data': row_data
})
age_group_totals[age_group] = self.add_row_to_total(
age_group_totals[age_group],
row_data
)
total_row = self.add_row_to_total(total_row, row_data)
elif not self.show_age() and self.show_gender():
row_data = self.merge_gender_data(rows[user])
rows_for_table.append({
'username': u,
'gender': True,
'row_data': row_data
})
elif self.show_age() and not self.show_gender():
for age_group in sorted(rows[user]):
row_data = rows[user][age_group]
rows_for_table.append({
'username': u if age_group == '0' else '',
'age_display': self.age_group_text(age_group),
'row_data': row_data
})
age_group_totals[age_group] = self.add_row_to_total(
age_group_totals[age_group],
row_data
)
total_row = self.add_row_to_total(total_row, row_data)
else:
row_data = rows[user]
rows_for_table.append({
'username': u,
'gender': 'no_grouping', # magic
'row_data': row_data
})
if total_row:
overall_total_row = self.add_row_to_total(overall_total_row, total_row)
else:
# there is no total_row if we aren't grouping by age
overall_total_row = self.add_row_to_total(overall_total_row, row_data)
rows_for_table.append({
'username': 'TOTAL_ROW',
'total_width': total_width,
'gender': self.show_gender(),
'row_data': total_row,
})
if self.show_age():
for group in ['0', '1', '2', '3']:
rows_for_table.append({
'username': 'AGE_TOTAL_ROW',
'total_width': total_width,
'age_display': self.age_group_text(group),
'gender': self.show_gender(),
'row_data': age_group_totals[group]
})
rows_for_table.append({
'username': 'OVERALL_TOTAL_ROW',
'total_width': total_width,
'gender': self.show_gender(),
'row_data': overall_total_row,
})
return rows_for_table
class TestingAndCounseling(CareReport):
slug = 'tac'
name = "Testing and Counseling"
report_columns = [
['HIV Counseling', 'hiv_counseling'],
['Individuals HIV tested', 'hiv_tested'],
['Individuals HIV Positive ', 'hiv_positive'],
['Newly diagnosed HIV+ indv scr for TB', 'new_hiv_tb_screen'], # 1d
['TB screening status known - TB Module', 'tb_screened'], # 1ea
['TB screening status unknown - HCT Module', 'hct_screened'], # 1eb
['Individuals ref to PHCF with signs & symptoms of TB', 'referred_tb_signs'], # 1f
['Newly diagnosed individuals HIV infected ref for CD4 count test in a PHCF', 'referred_for_cdf_new'], # 1ha TODO empty?
['Existing patients HIV infected ref for CD4 count test in a PHCF', 'referred_for_cdf_existing'], # 1hb TODO empty?
['Individuals HIV infected provided with CD4 count test results',
'new_hiv_cd4_results'], # 1i
['Individuals HIV infected provided with CD4 count test results from previous months',
'new_hiv_in_care_program'], # 1k
['People tested as individuals', 'individual_tests'], # 1l
['People tested as couples', 'couple_tests', 'SumColumn'], # 1m
['People tested at the community', 'hiv_community'],
]
class CareAndTBHIV(CareReport):
slug = 'caretbhiv'
name = "Care and TBHIV"
report_columns = [
['Number of deceased patients', 'deceased'], # 2a
#['Number of patients lost to follow-up', TODO], # 2b
['Patients completed TB treatment', 'tb_treatment_completed'], # 2d
['All visits for CBC', 'received_cbc'], # 2e
['Existing HIV+ individuals who received CBC', 'existing_cbc'], # 2f
['New HIV+ individuals who received CBC', 'new_hiv_cbc'], # 2g
['HIV infected patients newly started on IPT', 'new_hiv_starting_ipt'], # 2h
['HIV infected patients newly receiving Bactrim', 'new_hiv_starting_bactrim'], # 2i
['HIV+ patients receiving HIV care who are screened for symptoms of TB', 'hiv_on_care_screened_for_tb'], # 2k
['Family members screened for symptoms of TB', 'family_screened', 'SumColumn'], # 2l
]
class IACT(CareReport):
slug = 'iact'
name = 'I-ACT'
report_columns = [
['HIV+ client enrolled for I-ACT', 'hiv_pos_enrolled'], # 3a
['HIV+ client completed I-ACT', 'hiv_pos_completed'], # 3b
['HIV+ clients registered for I-ACT & in the pipeline (5th session)', 'hiv_pos_pipeline'], # 3c
#['HIV+client registered for I-ACT after diagnosis', #TODO], # 3d
['I-ACT participants receiving INH/IPT prophylaxis',
'iact_participant_ipt'], # 3f
['I-ACT participants receiving Cotrimoxizole prophylaxis/Dapsone',
'iact_participant_bactrim'], # 3g
['I-ACT participant on Pre-ART', 'iact_participant_art'], # 3h
['I-ACT participant on ARV', 'iact_participant_arv'], # 3i
['I-ACT registered client with CD4 count <200', 'cd4lt200'], # 3j
['I-ACT registered client with CD4 count 200 - 350', 'cd4lt350'], # 3k
['I-ACT registered client with CD4 cont higher than 350', 'cd4gt350'], # 3l
['Unknown CD4 count at registration', 'unknown_cd4'], # 3m
['I-ACT Support groups completed (all 6 sessions)', 'iact_support_groups'], # 3n
]
|
|
import requests
import json
import datetime
import time
from tabulate import tabulate
from colorama import init
init(autoreset=True)
__author__ = 'asifj'
class Utils:
def __init__(self):
self.url = "http://172.22.147.248:8092/api/"
pass
def header(self, document, document_no, case_key):
output = "\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
output += "\nAPI URL: "+self.url+"case-manager/cases/"+str(document[case_key])+"\n"
output += "\nDocument No: "+str(document_no)
output += "\nObject _id: "+str(document['_id'])
output += "\nCaseID: "+str(document[case_key])
keys = len(document.keys())
output += "\nKeys: "+str(keys)
return output
def request(self, document, case_key):
r = requests.get(self.url+"case-manager/cases/"+str(document[case_key]))
return r
def footer(self, output):
output += "\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
output += "\n\n"
return output
def validate_sr_details(self, r, document, document_no, start_time, response_writer):
row = []
output = ""
row.append(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S.%f'))
row.append(document_no)
row.append(document['SRID'])
row.append("SR")
row.append("")
row.append(r.status_code)
row.append(r.elapsed)
status = 0
response = ""
if r.status_code == 200:
response = json.loads(r.text)
table = []
if not (str(document['BETA_TYPE']).strip() == ("" if response['betaType'] is None else str(response['betaType']).strip())):
tmp = [str(document['BETA_TYPE']).strip(), str(response['betaType']).strip()]
tmp.append("Incorrect value for 'betaType'!")
table.append(tmp)
status = 1
if not (str(document['BUILD']).strip() == ("" if response['outage']['build'] is None else str(response['outage']['build']).strip())):
tmp = [str(document['build']).strip(), str(response['outage']['build']).strip()]
tmp.append("Incorrect value for 'build'!")
table.append(tmp)
status = 1
if not (str(document['CC_ENGINEER']).strip() == ("" if response['outage']['ccEngineer'] is None else str(response['outage']['ccEngineer']).strip())):
tmp = [str(document['CC_ENGINEER']).strip(), str(response['outage']['ccEngineer']).strip()]
tmp.append("Incorrect value for 'ccEngineer'!")
table.append(tmp)
status = 1
if not (str(document['SRID']).strip() == ("" if response['srId'] is None else str(response['srId']).strip())):
tmp = [str(document['SRID']).strip(), str(response['srId']).strip()]
tmp.append("Incorrect value for 'caseId'!")
table.append(tmp)
status = 1
if not (str(document['CONTRACT_ID']).strip() == ("" if response['entitlement']['contractId'] is None else str(response['entitlement']['contractId']).strip())):
tmp = [str(document['CONTRACT_ID']).strip(), str(response['entitlement']['contractId']).strip()]
tmp.append("Incorrect value for 'contractId'!")
table.append(tmp)
status = 1
if not (str(document['CONTRACT_STATUS']).strip()== ("" if response['entitlement']['contractStatus'] is None else str(response['entitlement']['contractStatus']).strip())):
tmp = [str(document['CONTRACT_STATUS']).strip(), str(response['entitlement']['contractStatus']).strip()]
tmp.append("Incorrect value for 'contractStatus'!")
table.append(tmp)
status = 1
if not (str(document['COUNTRY']).strip() == ("" if response['outage']['country'] is None else str(response['outage']['country']).strip())):
tmp = [str(document['COUNTRY']).strip(), str(response['outage']['country']).strip()]
tmp.append("Incorrect value for 'country'!")
table.append(tmp)
status = 1
if not (str(document['COURTESY']).strip()== ("" if response['courtesy'] is None else str(response['courtesy']).strip())):
tmp = [str(document['COURTESY']).strip(), str(response['courtesy']).strip()]
tmp.append("Incorrect value for 'courtesyDescription/courtesy'!")
table.append(tmp)
status = 1
if not (str(document['COURTESY_KEY']).strip()== ("" if response['courtesyKey'] is None else str(response['courtesyKey']).strip())):
tmp = [str(document['COURTESY_KEY']).strip(), str(response['courtesyKey']).strip()]
tmp.append("Incorrect value for 'courtesykey'!")
table.append(tmp)
status = 1
if not (str(document['CRITICAL_ISSUE']).strip()== ("" if response['outage']['criticalIssue'] is None else str(response['outage']['criticalIssue']).strip())):
tmp = [str(document['CRITICAL_ISSUE']).strip(), str(response['outage']['criticalIssue']).strip()]
tmp.append("Incorrect value for 'criticalIssue'!")
table.append(tmp)
status = 1
if not (str(document['CRITICAL_OUTAGE']).strip()== ("" if response['criticalOutage'] is None else str(response['criticalOutage']).strip())):
tmp = [str(document['CRITICAL_OUTAGE']).strip(), str(response['criticalOutage']).strip()]
tmp.append("Incorrect value for 'criticalOutage'!")
table.append(tmp)
status = 1
if not (str(document['CUST_CASE_NO']).strip()== ("" if response['outage']['custCaseNo'] is None else str(response['outage']['custCaseNo']).strip())):
tmp = [str(document['CUST_CASE_NO']).strip(), str(response['outage']['custCaseNo']).strip()]
tmp.append("Incorrect value for 'customerCaseNumber'!")
table.append(tmp)
status = 1
if not (str(document['CVE']).strip()== ("" if response['cve'] is None else str(response['cve']).strip())):
tmp = [str(document['CVE']).strip(), str(response['cve']).strip()]
tmp.append("Incorrect value for 'cve'!")
table.append(tmp)
status = 1
if not (str(document['CVSS']).strip()== ("" if response['cvss'] is None else str(response['cvss']).strip())):
tmp = [str(document['CVSS']).strip(), str(response['cvss']).strip()]
tmp.append("Incorrect value for 'cvss'!")
table.append(tmp)
status = 1
if not (str(document['DESCRIPTION']).strip()== ("" if response['desc'] is None else str(response['desc']).strip())):
tmp = [str(document['DESCRIPTION']).strip(), str(response['desc']).strip()]
tmp.append("Incorrect value for 'description'!")
table.append(tmp)
status = 1
if not (str(document['END_DATE']).strip() == ("" if response['entitlement']['endDate'] is None else str(response['entitlement']['endDate']).strip())):
tmp = [str(document['END_DATE']).strip(), str(response['entitlement']['endDate']).strip()]
tmp.append("Incorrect value for 'endDate'!")
table.append(tmp)
status = 1
if not (str(document['ENTITLED_SERIAL_NO']).strip()== ("" if response['entitlement']['entitledSerialNumber'] is None else str(response['entitlement']['entitledSerialNumber']).strip())):
tmp = [str(document['ENTITLED_SERIAL_NO']).strip(), str(response['entitlement']['entitledSerialNumber']).strip()]
tmp.append("Incorrect value for 'entitledSerialNumber'!")
table.append(tmp)
status = 1
if not (str(document['ENTITLEMENT_CHECKED']).strip()== ("" if response['entitlement']['entitlementChecked'] is None else str(response['entitlement']['entitlementChecked']).strip())):
tmp = [str(document['ENTITLEMENT_CHECKED']).strip(), str(response['entitlement']['entitlementChecked']).strip()]
tmp.append("Incorrect value for 'entitlementChecked'!")
table.append(tmp)
status = 1
if not (str(document['ENTITLEMENT_SERVICE_LEVEL']).strip()== ("" if response['entitlement']['entitlementServiceLevel'] is None else str(response['entitlement']['entitlementServiceLevel']).strip())):
tmp = [str(document['ENTITLEMENT_SERVICE_LEVEL']).strip(), str(response['entitlement']['entitlementServiceLevel']).strip()]
tmp.append("Incorrect value for 'entitlementServiceLevel'!")
table.append(tmp)
status = 1
if not (str(document['ENTITLEMENT_SOURCE']).strip()== ("" if response['entitlement']['entitlementSource'] is None else str(response['entitlement']['entitlementSource']).strip())):
tmp = [str(document['ENTITLEMENT_SOURCE']).strip(), str(response['entitlement']['entitlementSource']).strip()]
tmp.append("Incorrect value for 'entitlementSource'!")
table.append(tmp)
status = 1
if not (str(document.get('ESCALATION_DES', '')).strip()== ("" if response['escalationDesc'] is None else str(response['escalationDesc']).strip())):
tmp = [str(document['escalation']).strip(), str(response['escalationDesc']).strip()]
tmp.append("Incorrect value for 'escalation'!")
table.append(tmp)
status = 1
if not (str(document['ESCALATION_LEVEL']).strip()== ("" if response['outage']['escalationLevel'] is None else str(response['outage']['escalationLevel']).strip())):
tmp = [str(document['escalationLevelDescription']).strip(), str(response['outage']['escalationLevel']).strip()]
tmp.append("Incorrect value for 'escalationLevelDescription'!")
table.append(tmp)
status = 1
if not (str(document['ESCALATION_LEVEL_KEY']).strip()== ("" if response['outage']['escalationLevelkey'] is None else str(response['outage']['escalationLevelkey']).strip())):
tmp = [str(document['ESCALATION_LEVEL_KEY']).strip(), str(response['outage']['escalationLevelkey']).strip()]
tmp.append("Incorrect value for 'escalationLevelKey'!")
table.append(tmp)
status = 1
if not (str(document['ESCALATION_KEY']).strip()== ("" if response['escalationKey'] is None else str(response['escalationKey']).strip())):
tmp = [str(document['ESCALATION_KEY']).strip(), str(response['escalationKey']).strip()]
tmp.append("Incorrect value for 'escalationkey'!")
table.append(tmp)
status = 1
if not (str(document['EXTERNALLY_REPORTED']).strip()== ("" if response['externallyReported'] is None else str(response['externallyReported']).strip())):
tmp = [str(document['EXTERNALLY_REPORTED']).strip(), str(response['externallyReported']).strip()]
tmp.append("Incorrect value for 'externallyReported'!")
table.append(tmp)
status = 1
if not (str(document.get('FOLLOW_UP_METHOD', '')).strip()== ("" if response['outage']['followUpMethod'] is None else str(response['outage']['followUpMethod']).strip())):
tmp = [str(document['FOLLOW_UP_METHOD']).strip(), str(response['outage']['followUpMethod']).strip()]
tmp.append("Incorrect value for 'followupMethod'!")
table.append(tmp)
status = 1
if not (str(document['FOLLOW_UP_METHOD_KEY']).strip()== ("" if response['outage']['followUpMethodkey'] is None else str(response['outage']['followUpMethodkey']).strip())):
tmp = [str(document['FOLLOW_UP_METHOD_KEY']).strip(), str(response['outage']['followUpMethodkey']).strip()]
tmp.append("Incorrect value for 'followupMethodKey'!")
table.append(tmp)
status = 1
if not (str(document['JSA_ADVISORY_BOARD']).strip()== ("" if response['jsaAdvisoryBoard'] is None else str(response['jsaAdvisoryBoard']).strip())):
tmp = [str(document['JSA_ADVISORY_BOARD']).strip(), str(response['jsaAdvisoryBoard']).strip()]
tmp.append("Incorrect value for 'jsaAdvisoryBoard'!")
table.append(tmp)
status = 1
if not (str(document['JTAC']).strip()== ("" if response['jtac'] is None else str(response['jtac']).strip())):
tmp = [str(document['JTAC']).strip(), str(response['jtac']).strip()]
tmp.append("Incorrect value for 'jtac'!")
table.append(tmp)
status = 1
if not (str(document['KNOWLEDGE_ARTICLE']).strip()== ("" if response['outage']['knowledgeArticle'] is None else str(response['outage']['knowledgeArticle']).strip())):
tmp = [str(document['KNOWLEDGE_ARTICLE']).strip(), str(response['outage']['knowledgeArticle']).strip()]
tmp.append("Incorrect value for 'knowledgeArticle'!")
table.append(tmp)
status = 1
if not (str(document['OUATGE_CAUSE']).strip()== ("" if response['outage']['outageCause'] is None else str(response['outage']['outageCause']).strip())):
tmp = [str(document['OUATGE_CAUSE']).strip(), str(response['outage']['outageCause']).strip()]
tmp.append("Incorrect value for 'OUATGE_CAUSE/outageCause'!")
table.append(tmp)
status = 1
if not (str(document['OUTAGE_CAUSE_KEY']).strip()== ("" if response['outage']['outageCausekey'] is None else str(response['outage']['outageCausekey']).strip())):
tmp = [str(document['OUTAGE_CAUSE_KEY']).strip(), str(response['outage']['outageCausekey']).strip()]
tmp.append("Incorrect value for 'outageCauseKey'!")
table.append(tmp)
status = 1
if not (str(document['OUTAGE']).strip()== ("" if response['outage']['outage'] is None else str(response['outage']['outage']).strip())):
tmp = [str(document['OUTAGE']).strip(), str(response['outage']['outage']).strip()]
tmp.append("Incorrect value for 'outageDescription/outage'!")
table.append(tmp)
status = 1
if not (str(document['OUTAGE_IMPACT_KEY']).strip()== ("" if response['outage']['outageImpactKey'] is None else str(response['outage']['outageImpactKey']).strip())):
tmp = [str(document['OUTAGE_IMPACT_KEY']).strip(), str(response['outage']['outageImpactKey']).strip()]
tmp.append("Incorrect value for 'outageImpactKey'!")
table.append(tmp)
status = 1
if not (str(document['OUTAGE_INFO_AVAILABLE']).strip()== ("" if response['outage']['outageInfoAvailable'] is None else str(response['outage']['outageInfoAvailable']).strip())):
tmp = [str(document['OUTAGE_INFO_AVAILABLE']).strip(), str(response['outage']['outageInfoAvailable']).strip()]
tmp.append("Incorrect value for 'outageInfoAvailable'!")
table.append(tmp)
status = 1
if not (str(document['OUTAGE_KEY']).strip()== ("" if response['outage']['outageKey'] is None else str(response['outage']['outageKey']).strip())):
tmp = [str(document['OUTAGE_KEY']).strip(), str(response['outage']['outageKey']).strip()]
tmp.append("Incorrect value for 'outageKey'!")
table.append(tmp)
status = 1
if not (str(document['OUTAGE_TYPE']).strip()== ("" if response['outage']['outageType'] is None else str(response['outage']['outageType']).strip())):
tmp = [str(document['OUTAGE_TYPE']).strip(), str(response['outage']['outageType']).strip()]
tmp.append("Incorrect value for 'outageTypeDescription/outageType'!")
table.append(tmp)
status = 1
if not (str(document['OUTAGE_TYPE_KEY']).strip()== ("" if response['outage']['outageTypekey'] is None else str(response['outage']['outageTypekey']).strip())):
tmp = [str(document['OUTAGE_TYPE_KEY']).strip(), str(response['outage']['outageTypekey']).strip(),
"Incorrect value for 'outageTypeKey'!"]
table.append(tmp)
status = 1
if not (str(document['OUTSOURCER']).strip()== ("" if response['outage']['outsourcer'] is None else str(response['outage']['outsourcer']).strip())):
tmp = [str(document['OUTSOURCER']).strip(), str(response['outage']['outsourcer']).strip(),
"Incorrect value for 'outsourcer'!"]
table.append(tmp)
status = 1
if not (str(document['OVERIDE_OUTAGE']).strip()== ("" if response['outage']['overideOutage'] is None else str(response['outage']['overideOutage']).strip())):
tmp = [str(document['OVERIDE_OUTAGE']).strip(), str(response['outage']['overideOutage']).strip()]
tmp.append("Incorrect value for 'overideOutage'!")
table.append(tmp)
status = 1
if not (str(document['PLATFORM']).strip()== ("" if response['platform'] is None else str(response['platform']).strip())):
tmp = [str(document['PLATFORM']).strip(), str(response['platform']).strip()]
tmp.append("Incorrect value for 'platform'!")
table.append(tmp)
status = 1
if not (str(document['PREVIOUS_OWNER_SKILL']).strip()== ("" if response['outage']['previousOwnerSkill'] is None else str(response['outage']['previousOwnerSkill']).strip())):
tmp = [str(document['PREVIOUS_OWNER_SKILL']).strip(), str(response['outage']['previousOwnerSkill']).strip()]
tmp.append("Incorrect value for 'previousOwnerSkill'!")
table.append(tmp)
status = 1
if not (str(document['PREVIOUS_TEAM']).strip()== ("" if response['outage']['previousTeam'] is None else str(response['outage']['previousTeam']).strip())):
tmp = [str(document['PREVIOUS_TEAM']).strip(), str(response['outage']['previousTeam']).strip()]
tmp.append("Incorrect value for 'previousTeam'!")
table.append(tmp)
status = 1
if not (str(document.get('PRIORITY', '')).strip()== ("" if response['priority'] is None else str(response['priority']).strip())):
tmp = [str(document['PRIORITY']).strip(), str(response['priority']).strip()]
tmp.append("Incorrect value for 'priority'!")
table.append(tmp)
status = 1
if not (str(document['PRIORITY_KEY']).strip()== ("" if response['priorityKey'] is None else str(response['priorityKey']).strip())):
tmp = [str(document['PRIORITY_KEY']).strip(), str(response['priorityKey']).strip()]
tmp.append("Incorrect value for 'priorityKey'!")
table.append(tmp)
status = 1
if not (str(document['PROCESS_TYPE']).strip()== ("" if response['processType'] is None else str(response['processType']).strip())):
tmp = [str(document['PROCESS_TYPE']).strip(), str(response['processType']).strip()]
tmp.append("Incorrect value for 'processType'!")
table.append(tmp)
status = 1
if not (str(document['PROCESS_TYPE_DES']).strip()== ("" if response['processTypeDesc'] is None else str(response['processTypeDesc']).strip())):
tmp = [str(document['PROCESS_TYPE_DES']).strip(), str(response['processTypeDesc']).strip()]
tmp.append("Incorrect value for 'processTypeDescription'!")
table.append(tmp)
status = 1
if not (str(document['PRODUCT_ID']).strip() == ("" if response['productId'] is None else str(response['productId']).strip())):
tmp = [str(document['PRODUCT_ID']).strip(), str(response['productId']).strip()]
tmp.append("Incorrect value for 'productId'!")
table.append(tmp)
status = 1
if not (str(document['PRODUCT_SERIES']).strip() == ("" if response['productSeries'] is None else str(response['productSeries']).strip())):
tmp = [str(document['PRODUCT_SERIES']).strip(), str(response['productSeries']).strip()]
tmp.append("Incorrect value for 'productSeries'!")
table.append(tmp)
status = 1
if not (str(document['RA_FA']).strip()== ("" if response['outage']['raFa'] is None else str(response['outage']['raFa']).strip())):
tmp = [str(document['RA_FA']).strip(), str(response['outage']['raFa']).strip()]
tmp.append("Incorrect value for 'raFa'!")
table.append(tmp)
status = 1
if not (str(document['REASON']).strip()== ("" if response['reason'] is None else str(response['reason']).strip())):
tmp = [str(document['REASON']).strip(), str(response['reason']).strip()]
tmp.append("Incorrect value for 'reason'!")
table.append(tmp)
status = 1
if not (str(document['RELEASE']).strip()== ("" if response['release'] is None else str(response['release']).strip())):
tmp = [str(document['RELEASE']).strip(), str(response['release']).strip()]
tmp.append("Incorrect value for 'release'!")
table.append(tmp)
status = 1
if not (str(document['REPORTER_DETAILS']).strip()== ("" if response['reporterDetails'] is None else str(response['reporterDetails']).strip())):
tmp = [str(document['REPORTER_DETAILS']).strip(), str(response['reporterDetails']).strip()]
tmp.append("Incorrect value for 'reporterDetails'!")
table.append(tmp)
status = 1
if not (str(document['ROUTER_NAME']).strip()== ("" if response['outage']['routerName'] is None else str(response['outage']['routerName']).strip())):
tmp = [str(document['ROUTER_NAME']).strip(), str(response['outage']['routerName']).strip()]
tmp.append("Incorrect value for 'routerName'!")
table.append(tmp)
status = 1
if not (str(document['SEC_VULNERABILITY']).strip()== ("" if response['secVulnerability'] is None else str(response['secVulnerability']).strip())):
tmp = [str(document['SEC_VULNERABILITY']).strip(), str(response['secVulnerability']).strip()]
tmp.append("Incorrect value for 'secVulnerability'!")
table.append(tmp)
status = 1
if not (str(document['SERIAL_NUMBER']).strip()== ("" if response['serialNumber'] is None else str(response['serialNumber']).strip())):
tmp = [str(document['SERIAL_NUMBER']).strip(), str(response['serialNumber']).strip()]
tmp.append("Incorrect value for 'serialNumber'!")
table.append(tmp)
status = 1
if not (str(document.get('SEVERITY', '')).strip()== ("" if response['severity'] is None else str(response['severity']).strip())):
tmp = [str(document['SEVERITY']).strip(), str(response['severity']).strip()]
tmp.append("Incorrect value for 'severity'!")
table.append(tmp)
status = 1
if not (str(document.get('SEVERITY_KEY', '')).strip()== ("" if response['severityKey'] is None else str(response['severityKey']).strip())):
tmp = [str(document['SEVERITY_KEY']).strip(), str(response['severityKey']).strip()]
tmp.append("Incorrect value for 'severityKey'!")
table.append(tmp)
status = 1
if not (str(document['SIRT_BUNDLE']).strip()== ("" if response['sirtBundle'] is None else str(response['sirtBundle']).strip())):
tmp = [str(document['SIRT_BUNDLE']).strip(), str(response['sirtBundle']).strip()]
tmp.append("Incorrect value for 'sirtBundle'!")
table.append(tmp)
status = 1
if not (str(document['SKU']).strip()== ("" if response['entitlement']['sku'] is None else str(response['entitlement']['sku']).strip())):
tmp = [str(document['SKU']).strip(), str(response['entitlement']['sku']).strip()]
tmp.append("Incorrect value for 'sku'!")
table.append(tmp)
status = 1
if not (str(document['SME_CONTACT']).strip()== ("" if response['smeContact'] is None else str(response['smeContact']).strip())):
tmp = [str(document['SME_CONTACT']).strip(), str(response['smeContact']).strip()]
tmp.append("Incorrect value for 'smeContact'!")
table.append(tmp)
status = 1
if not (str(document['SOFTWARE']).strip()== ("" if response['software'] is None else str(response['software']).strip())):
tmp = [str(document['SOFTWARE']).strip(), str(response['software']).strip()]
tmp.append("Incorrect value for 'software'!")
table.append(tmp)
status = 1
if not (str(document['SPECIAL_RELEASE']).strip()== ("" if response['specialRelease'] is None else str(response['specialRelease']).strip())):
tmp = [str(document['SPECIAL_RELEASE']).strip(), str(response['specialRelease']).strip()]
tmp.append("Incorrect value for 'specialRelease'!")
table.append(tmp)
status = 1
if not (str(document['SR_CATEGORY1']).strip()== ("" if response['srCat1'] is None else str(response['srCat1']).strip())):
tmp = [str(document['SR_CATEGORY1']).strip(), str(response['srCat1']).strip()]
tmp.append("Incorrect value for 'srCategory1'!")
table.append(tmp)
status = 1
if not (str(document['SR_CATEGORY2']).strip()== ("" if response['srCat2'] is None else str(response['srCat2']).strip())):
tmp = [str(document['SR_CATEGORY2']).strip(), str(response['srCat2']).strip()]
tmp.append("Incorrect value for 'srCategory2'!")
table.append(tmp)
status = 1
if not (str(document['SR_CATEGORY3']).strip()== ("" if response['srCat3'] is None else str(response['srCat3']).strip())):
tmp = [str(document['SR_CATEGORY3']).strip(), str(response['srCat3']).strip()]
tmp.append("Incorrect value for 'srCategory3'!")
table.append(tmp)
status = 1
if not (str(document['SR_CATEGORY4']).strip()== ("" if response['srCat4'] is None else str(response['srCat4']).strip())):
tmp = [str(document['SR_CATEGORY4']).strip(), str(response['srCat4']).strip()]
tmp.append("Incorrect value for 'srCategory4'!")
table.append(tmp)
status = 1
if not (str(document['START_DATE']).strip()== ("" if response['entitlement']['startDate'] is None else str(response['entitlement']['startDate']).strip())):
tmp = [str(document['START_DATE']).strip(), str(response['entitlement']['startDate']).strip()]
tmp.append("Incorrect value for 'startDate'!")
table.append(tmp)
status = 1
if not (str(document['STATUS']).strip()== ("" if response['status'] is None else str(response['status']).strip())):
tmp = [str(document['STATUS']).strip(), str(response['status']).strip()]
tmp.append("Incorrect value for 'status'!")
table.append(tmp)
status = 1
if not (str(document['STATUS_KEY']).strip()== ("" if response['statusKey'] is None else str(response['statusKey']).strip())):
tmp = [str(document['STATUS_KEY']).strip(), str(response['statusKey']).strip()]
tmp.append("Incorrect value for 'statusKey'!")
table.append(tmp)
status = 1
if not (str(document['TECHNICAL_CATEGORY1']).strip()== ("" if response['techCat1'] is None else str(response['techCat1']).strip())):
tmp = [str(document['technicalCategory1']).strip(), str(response['techCat1']).strip()]
tmp.append("Incorrect value for 'technicalCategory1'!")
table.append(tmp)
status = 1
if not (str(document['TECHNICAL_CATEGORY2']).strip()== ("" if response['techCat2'] is None else str(response['techCat2']).strip())):
tmp = [str(document['technicalCategory2']).strip(), str(response['techCat2']).strip()]
tmp.append("Incorrect value for 'technicalCategory2'!")
table.append(tmp)
status = 1
if not (str(document['TECHNICAL_CATEGORY3']).strip()== ("" if response['techCat3'] is None else str(response['techCat3']).strip())):
tmp = [str(document['technicalCategory3']).strip(), str(response['techCat3']).strip()]
tmp.append("Incorrect value for 'technicalCategory3'!")
table.append(tmp)
status = 1
if not (str(document['TEMPERATURE']).strip()== ("" if response['outage']['temperature'] is None else str(response['outage']['temperature']).strip())):
tmp = [str(document['TEMPERATURE']).strip(), str(response['outage']['temperature']).strip()]
tmp.append("Incorrect value for 'temperature'!")
table.append(tmp)
status = 1
if not (str(document['THEATER']).strip()== ("" if response['outage']['theater'] is None else str(response['outage']['theater']).strip())):
tmp = [str(document['THEATER']).strip(), str(response['outage']['theater']).strip()]
tmp.append("Incorrect value for 'theaterDescription/theater'!")
table.append(tmp)
status = 1
if not (str(document['THEATER_KEY']).strip()== ("" if response['outage']['theaterkey'] is None else str(response['outage']['theaterkey']).strip())):
tmp = [str(document['THEATER_KEY']).strip(), str(response['outage']['theaterkey']).strip()]
tmp.append("Incorrect value for 'theaterKey'!")
table.append(tmp)
status = 1
if not (str(document['TOP5']).strip()== ("" if response['outage']['top5'] is None else str(response['outage']['top5']).strip())):
tmp = [str(document['TOP5']).strip(), str(response['outage']['top5']).strip()]
tmp.append("Incorrect value for 'top5'!")
table.append(tmp)
status = 1
if not (str(document['TOTAL_OUTAGE_TIME']).strip()== ("" if response['outage']['totalOutageTime'] is None else str(response['outage']['totalOutageTime']).strip())):
tmp = [str(document['TOTAL_OUTAGE_TIME']).strip(), str(response['outage']['totalOutageTime']).strip()]
tmp.append("Incorrect value for 'totalOutageTime'!")
table.append(tmp)
status = 1
if not (str(document.get('URGENCY', '')).strip()== ("" if response['urgency'] is None else str(response['urgency']).strip())):
tmp = [str(document['URGENCY']).strip(), str(response['urgency']).strip()]
tmp.append("Incorrect value for 'urgency'!")
table.append(tmp)
status = 1
if not (str(document['URGENCY_KEY']).strip()== ("" if response['urgencyKey'] is None else str(response['urgencyKey']).strip())):
tmp = [str(document['URGENCY_KEY']).strip(), str(response['urgencyKey']).strip()]
tmp.append("Incorrect value for 'urgencyKey'!")
table.append(tmp)
status = 1
if not (str(document['VERSION']).strip()== ("" if response['version'] is None else str(response['version']).strip())):
tmp = [str(document['VERSION']).strip(), str(response['version']).strip()]
tmp.append("Incorrect value for 'version'!")
table.append(tmp)
status = 1
if not (str(document['VIA']).strip()== ("" if response['outage']['via'] is None else str(response['outage']['via']).strip())):
tmp = [str(document['VIA']).strip(), str(response['via']).strip()]
tmp.append("Incorrect value for 'viaDescription/via'!")
table.append(tmp)
status = 1
if not (str(document['VIA_kEY']).strip()== ("" if response['outage']['viaKey'] is None else str(response['outage']['viaKey']).strip())):
tmp = [str(document['VIA_kEY']).strip(), str(response['viaKey']).strip()]
tmp.append("Incorrect value for 'VIA_kEY'!")
table.append(tmp)
status = 1
if not (str(document['WARRANTY_END_DATE']).strip()== ("" if response['entitlement']['warrantyEndDate'] is None else str(response['entitlement']['warrantyEndDate']).strip())):
tmp = [str(document['WARRANTY_END_DATE']).strip(), str(response['entitlement']['warrantyEndDate']).strip()]
tmp.append("Incorrect value for 'warrantyEndDate'!")
table.append(tmp)
status = 1
if not (str(document['SUPPORT_24_7']).strip()== ("" if response['outage']['support24X7'] is None else str(response['outage']['support24X7']).strip())):
tmp = [str(document['SUPPORT_24_7']).strip(), str(response['outage']['support24X7']).strip()]
tmp.append("Incorrect value for 'yearRoundSupport/support24X7'!")
table.append(tmp)
status = 1
if not (str(document['ZZQ1']).strip()== ("" if response['outage']['zzq1'] is None else str(response['outage']['zzq1']).strip())):
tmp = [str(document['ZZQ1']).strip(), str(response['outage']['zzq1']).strip()]
tmp.append("Incorrect value for 'zzQ1'!")
table.append(tmp)
status = 1
if not (str(document['ZZQ2']).strip()== ("" if response['outage']['zzq2'] is None else str(response['outage']['zzq2']).strip())):
tmp = [str(document['ZZQ2']).strip(), str(response['outage']['zzq2']).strip()]
tmp.append("Incorrect value for 'zzQ2'!")
table.append(tmp)
status = 1
if not (str(document['ZZQ3']).strip()== ("" if response['outage']['zzq3'] is None else str(response['outage']['zzq3']).strip())):
tmp = [str(document['ZZQ3']).strip(), str(response['outage']['zzq3']).strip()]
tmp.append("Incorrect value for 'zzQ3'!")
table.append(tmp)
status = 1
if not (str(document['ZZQ4']).strip()== ("" if response['outage']['zzq4'] is None else str(response['outage']['zzq4']).strip())):
tmp = [str(document['ZZQ4']).strip(), str(response['outage']['zzq4']).strip()]
tmp.append("Incorrect value for 'zzQ4'!")
table.append(tmp)
status = 1
if not (str(document['ZZQ5']).strip()== ("" if response['outage']['zzq5'] is None else str(response['outage']['zzq5']).strip())):
tmp = [str(document['ZZQ5']).strip(), str(response['outage']['zzq5']).strip()]
tmp.append("Incorrect value for 'zzQ5'!")
table.append(tmp)
status = 1
if not (str(document['ZZQ6']).strip()== ("" if response['outage']['zzq6'] is None else str(response['outage']['zzq6']).strip())):
tmp = [str(document['ZZQ6']).strip(), str(response['outage']['zzq6']).strip()]
tmp.append("Incorrect value for 'zzQ6'!")
table.append(tmp)
status = 1
if not (str(document['ZZQ7']).strip()== ("" if response['outage']['zzq7'] is None else str(response['outage']['zzq7']).strip())):
tmp = [str(document['ZZQ7']).strip(), str(response['outage']['zzq7']).strip()]
tmp.append("Incorrect value for 'zzQ7'!")
table.append(tmp)
status = 1
if not (str(document['ZZQ8']).strip()== ("" if response['outage']['zzq8'] is None else str(response['outage']['zzq8']).strip())):
tmp = [str(document['ZZQ8']).strip(), str(response['outage']['zzq8']).strip()]
tmp.append("Incorrect value for 'zzQ8'!")
table.append(tmp)
status = 1
if not (str(document['ZZQ9']).strip()== ("" if response['outage']['zzq9'] is None else str(response['outage']['zzq9']).strip())):
tmp = [str(document['ZZQ9']).strip(), str(response['outage']['zzq9']).strip()]
tmp.append("Incorrect value for 'zzQ9'!")
table.append(tmp)
status = 1
if not (str(document['ZZQ10']).strip()== ("" if response['outage']['zzq10'] is None else str(response['outage']['zzq10']).strip())):
tmp = [str(document['ZZQ10']).strip(), str(response['outage']['zzq10']).strip()]
tmp.append("Incorrect value for 'zzQ10'!")
table.append(tmp)
status = 1
if not (str(document['CC_CUSTOMER']).strip()== ("" if response['outage']['ccCustomer'] is None else str(response['outage']['ccCustomer']).strip())):
tmp = [str(document['CC_CUSTOMER']).strip(), str(response['outage']['ccCustomer']).strip()]
tmp.append("Incorrect value for 'CC_CUSTOMER/ccCustomer'!")
table.append(tmp)
status = 1
if not (str(document['EMP_MAIL_ID']).strip()== ("" if response['empEmailId'] is None else str(response['empEmailId']).strip())):
tmp = [str(document['EMP_MAIL_ID']).strip(), str(response['empEmailId']).strip()]
tmp.append("Incorrect value for 'EMP_MAIL_ID/empEmailId'!")
table.append(tmp)
status = 1
if not (str(document['EMPID']).strip()== ("" if response['empId'] is None else str(response['empId']).strip())):
tmp = [str(document['EMPID']).strip(), str(response['empId']).strip()]
tmp.append("Incorrect value for 'EMPID/empId'!")
table.append(tmp)
status = 1
if not (str(document['INTERNAL_USE']).strip()== ("" if response['outage']['internalUse'] is None else str(response['outage']['internalUse']).strip())):
tmp = [str(document['INTERNAL_USE']).strip(), str(response['outage']['internalUse']).strip()]
tmp.append("Incorrect value for 'INTERNAL_USE/internalUse'!")
table.append(tmp)
status = 1
if not (str(document['NO_OF_SYSTEMS_AFFECTED']).strip()== ("" if response['outage']['numOfSystemsAffected'] is None else str(response['outage']['numOfSystemsAffected']).strip())):
tmp = [str(document['NO_OF_SYSTEMS_AFFECTED']).strip(), str(response['outage']['numOfSystemsAffected']).strip()]
tmp.append("Incorrect value for 'NO_OF_SYSTEMS_AFFECTED/numOfSystemsAffected'!")
table.append(tmp)
status = 1
if not (str(document['NO_OF_USERS_AFFECTED']).strip()== ("" if response['outage']['numOfUsersAffected'] is None else str(response['outage']['numOfUsersAffected']).strip())):
tmp = [str(document['NO_OF_USERS_AFFECTED']).strip(), str(response['outage']['numOfUsersAffected']).strip()]
tmp.append("Incorrect value for 'NO_OF_USERS_AFFECTED/numOfUsersAffected'!")
table.append(tmp)
status = 1
if not (str(document['PRODUCT_SERIES_TECH']).strip()== ("" if response['prodSeriesTech'] is None else str(response['prodSeriesTech']).strip())):
tmp = [str(document['PRODUCT_SERIES_TECH']).strip(), str(response['prodSeriesTech']).strip()]
tmp.append("Incorrect value for 'PRODUCT_SERIES_TECH/prodSeriesTech'!")
table.append(tmp)
status = 1
if not (str(document['SERVICE_PRODUCT']).strip()== ("" if response['entitlement']['serviceProduct'] is None else str(response['entitlement']['serviceProduct']).strip())):
tmp = [str(document['SERVICE_PRODUCT']).strip(), str(response['entitlement']['serviceProduct']).strip()]
tmp.append("Incorrect value for 'SERVICE_PRODUCT/serviceProduct'!")
table.append(tmp)
status = 1
if status==0:
output += "\nMatch Found"
row.append("Match Found")
else:
output += "\nData Mismatch\n"
row.append("Data Mismatch")
output += tabulate(table, headers=["Kafka", "API", "Status"], tablefmt="rst")
else:
output += "\nNo Match Found in Hadoop."
row.append("No Match Found in Hadoop.")
totalTime = datetime.datetime.now() - start_time
row.append(totalTime)
response_writer.writerow(row)
return output
def validate_kb_links(self, r, document, document_no, start_time, response_writer):
row = []
output = ""
row.append(document_no)
row.append(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S.%f'))
row.append(document['caseId'])
row.append("KBLINKS")
row.append("")
row.append(r.status_code)
row.append(r.elapsed)
status = 0
response = ""
if r.status_code == 200:
response = json.loads(r.text)
table = []
if not (str(document['caseId']).strip() == "" if response['srId'] is None else str(response['srId']).strip()):
print "Incorrect value for 'caseId'!"
status = 1
document_kbLinks_len = len(document['link'])
if type(document['link']) is dict:
output += "kBLinks in document is not an array!"
document_kbLinks_len = 1
document['link'] = [document['link']]
if response['kbLinks'] is not None:
response_kbLinks_len = len(response['kbLinks'])
else:
response_kbLinks_len = 0
output += "\nNumber of kbLinks in document: "+str(document_kbLinks_len)
output += "\nNumber of kbLinks in API response: "+str(response_kbLinks_len)
if document_kbLinks_len==0:
output += "No kbLinks found in document!"
row.append("No kbLinks found in document!")
output += "Kafka: "+str(json.dumps(document['link'], sort_keys=True))
output += "API: "+str(json.dumps(response['kbLinks'], sort_keys=True))
response_writer(row)
return output
if response_kbLinks_len==0 and document_kbLinks_len>0:
output += "No kbLinks found in API response but present in document."
row.append("No kbLinks found in API response but present in document.")
output += "Kafka: "+str(json.dumps(document['link'], sort_keys=True))
output += "API: "+str(json.dumps(response['kbLinks'], sort_keys=True))
response_writer(row)
return output
for doc_link in document['link']:
match_level = 0
found = 0
match_location = 0
counter = 0
old_match_level = 0
match_data = ""
for resp in response['kbLinks']:
match_level = 0
if str(doc_link['KBID']).strip() == str(("" if resp['kbId'] is None else resp['kbId'])).strip():
match_level += 1
if str(doc_link['STATUS']).strip() == str(("" if resp['status'] is None else resp['status'])).strip():
match_level += 1
if str(doc_link['DESCRIPTION']).strip() == str(("" if resp['description'] is None else resp['description'])).strip():
match_level += 1
if str(doc_link['INTERNALID']).strip() == str(("" if resp['internalId'] is None else resp['internalId'])).strip():
match_level += 1
if str(doc_link['URL']).strip() == str(("" if resp['url'] is None else resp['url'])).strip():
match_level += 1
if str("" if doc_link['KBDATE']=="0" else doc_link['KBDATE']).strip() == str(("" if resp['kbDate'] is None else resp['kbDate'])).strip().replace("-", "").replace(":", "").replace(" ", ""):
match_level += 1
if str(doc_link['DATA_SOURCE']).strip() == str(("" if resp['dataSource'] is None else resp['dataSource'])).strip():
match_level += 1
if str(doc_link['SOURCEVISIBILITY']).strip() == str(("" if resp['srcVisiblity'] is None else resp['srcVisiblity'])).strip():
match_level += 1
if str(doc_link['KB_FLAG']).strip() == str(("" if resp['kbFlag'] is None else resp['kbFlag'])).strip():
match_level += 1
if str(doc_link['SRVISIBILITY']).strip() == str(("" if resp['srVisibility'] is None else resp['srVisibility'])).strip():
if match_level >= 9:
found = 1
match_level += 1
match_location = counter
match_data = resp
break;
if match_level >= old_match_level:
match_location = counter
old_match_level = match_level
match_data = resp
counter += 1
if found == 0:
output += "\n************************************************"
output += "\nData Mismatch, max number of values matched is "+str(old_match_level)
output += "\nKafka ==> "+str(json.dumps(doc_link, sort_keys=True))
output += "\nAPI ==> "+str(json.dumps(match_data, sort_keys=True))
tmp = ["", "", "Incorrect value for 'kbLinks'!"]
table.append(tmp)
status = 1
output += "\n************************************************"
else:
output += "\nData matched, highest level of match is "+str(match_level)
output += "\nKafka ==> "+str(json.dumps(doc_link, sort_keys=True))
output += "\nAPI ==> "+str(json.dumps(match_data, sort_keys=True))
tmp = ["", "", "Match found for 'kbLinks'!"]
table.append(tmp)
if status == 0:
output += "\nMatch Found"
row.append("Match Found")
else:
row.append("Data Mismatch")
output += "\nCompared JSONs"
output += "Kafka: "+str(json.dumps(document['link'], sort_keys=True))
output += "API: "+str(json.dumps(response['kbLinks'], sort_keys=True))
output += "\n"
output += tabulate(table, headers=["Kafka", "API", "Status"], tablefmt="rst")
else:
output += "\nNo Match Found in Hadoop."
row.append("No Match Found in Hadoop.")
totalTime = datetime.datetime.now() - start_time
row.append(totalTime)
response_writer.writerow(row)
return output
def validate_sr_attachments(self, r, document, document_no, start_time, response_writer):
row = []
output = ""
row.append(document_no)
row.append(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S.%f'))
row.append(document['caseId'])
row.append("Attachments")
row.append("")
row.append(r.status_code)
row.append(r.elapsed)
status = 0
response = ""
if r.status_code==200:
response = json.loads(r.text)
table = []
if not (str(document['caseId']).strip() == "" if response['srId'] is None else str(response['srId']).strip()):
output += "Incorrect value for 'caseId'!"
status = 1
if response['attachments'] is not None:
response_attachment_len = len(response['attachments'])
else:
response_attachment_len = 0
document_attachment_len = len(document['attachment'])
if type(document['attachment']) is dict:
output += "attachment in document is not an array!"
document_attachment_len = 1
document['attachment'] = [document['attachment']]
output += "\nNumber of attachments in document: "+str(document_attachment_len)
output += "\nNumber of attachments in API response: "+str(response_attachment_len)
if document_attachment_len==0:
output += "\nNo attachment found in document!"
row.append("No attachment found in document!")
output += "Kafka: "+str(json.dumps(document['attachment'], sort_keys=True))
output += "API: "+str(json.dumps(response['attachments'], sort_keys=True))
response_writer(row)
return output
if response_attachment_len==0 and document_attachment_len>0:
output += "\nNo attachment found in API response but present in document."
row.append("No attachment found in API response but present in document.")
output += "Kafka: "+str(json.dumps(document['attachment'], sort_keys=True))
output += "API: "+str(json.dumps(response['attachments'], sort_keys=True))
response_writer(row)
return output
for doc_attachment in document['attachment']:
match_level = 0
found = 0
match_location = 0
counter = 0
old_match_level = 0
zDate = ""
if int(doc_attachment['ZDATE']) == 0:
zDate = ""
else:
zDate = str(doc_attachment['ZDATE'])
zDate = zDate[:4]+"-"+zDate[4:6]+"-"+zDate[6:]
match_data = ""
for resp in response['attachments']:
match_level = 0
doc_private = doc_attachment.get('PRIVATE1', "")
if not doc_private:
doc_private = doc_attachment.get('PRIVATE', "")
if not doc_private:
doc_private = doc_attachment.get('ISPRIVATE', "")
if str(doc_attachment['SNO']).strip() == str("" if resp['attNo'] is None else resp['attNo']).strip():
match_level += 1
if str(doc_attachment['TITLE']).strip() == str("" if resp['title'] is None else resp['title']).strip():
#print "tit"
match_level += 1
if str(doc_attachment['ZTIME']).strip() == str("" if resp['zTime'] is None else resp['zTime']).strip():
#print "ztim"
match_level += 1
if str(doc_attachment['FILE_TYPE']).strip() == str("" if resp['fileType'] is None else resp['fileType']).strip():
#print "ft"
match_level += 1
if str(doc_private).strip() == str("" if resp['private1'] is None else resp['private1']).strip():
#print "pr1"
match_level += 1
if str(doc_attachment['DATE_CREATED']).strip() == str("" if resp['dateCreated'] is None else resp['dateCreated']).strip():
#print "dc"
match_level += 1
if str(doc_attachment['CREATED_BY']).strip() == str("" if resp['createdBy'] is None else resp['createdBy']).strip():
#print "cb"
match_level += 1
if str(doc_attachment['PATH']).strip() == str("" if resp['path'] is None else resp['path']).strip():
#print "pat"
match_level += 1
if str(doc_attachment['UPLOADED_BY']).strip() == str("" if resp['uploadedBy'] is None else resp['uploadedBy']).strip():
#print "uplob"
match_level += 1
if str(doc_attachment['SIZE1']).strip() == str("" if resp['size1'] is None else int(resp['size1'])).strip():
#print "s1"
match_level += 1
if str(zDate).strip() == str("" if resp['zDate'] is None else resp['zDate']).strip():
if match_level >= 10:
found = 1
match_level += 1
match_location = counter
match_data = resp
break;
if match_level >= old_match_level:
match_location = counter
old_match_level = match_level
match_data = resp
counter += 1
if found == 0:
output += "\n************************************************"
output += "\nData Mismatch, max number of values matched is "+str(old_match_level)
output += "\nKafka ==> "+str(json.dumps(doc_attachment, sort_keys=True))
output += "\nAPI ==> "+str(json.dumps(match_data, sort_keys=True))
tmp = ["", "", "Incorrect value for 'attachment'!"]
table.append(tmp)
status = 1
output += "\n************************************************"
else:
output += "\nData matched, highest level of match is "+str(match_level)
output += "\nKafka ==> "+str(json.dumps(doc_attachment, sort_keys=True))
output += "\nAPI ==> "+str(json.dumps(match_data, sort_keys=True))
tmp = ["", "", "Match found for 'attachment'!"]
table.append(tmp)
if status == 0:
output += "\nMatch Found"
row.append("Match Found")
else:
row.append("Data Mismatch")
output += "\nCompared JSONs"
output += "\nKafka: "+str(json.dumps(document['attachment'], sort_keys=True))
output += "\nAPI: "+str(json.dumps(response['attachments'], sort_keys=True))
output += "\n"
output += tabulate(table, headers=["Kafka", "API", "Status"], tablefmt="rst")
else:
output += "\nNo Match Found in Hadoop."
row.append("No Match Found in Hadoop.")
totalTime = datetime.datetime.now() - start_time
row.append(totalTime)
response_writer.writerow(row)
return output
|
|
from struct import pack, unpack
import hashlib
import sys
import traceback
from electrum import bitcoin
from electrum.bitcoin import TYPE_ADDRESS, int_to_hex, var_int
from electrum.i18n import _
from electrum.plugin import BasePlugin
from electrum.keystore import Hardware_KeyStore
from electrum.transaction import Transaction
from electrum.wallet import Standard_Wallet
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch
from electrum.util import print_error, bfh, bh2u, versiontuple
from electrum.base_wizard import ScriptTypeNotSupported
try:
import hid
from btchip.btchipComm import HIDDongleHIDAPI, DongleWait
from btchip.btchip import btchip
from btchip.btchipUtils import compress_public_key,format_transaction, get_regular_input_script, get_p2sh_input_script
from btchip.bitcoinTransaction import bitcoinTransaction
from btchip.btchipFirmwareWizard import checkFirmware, updateFirmware
from btchip.btchipException import BTChipException
BTCHIP = True
BTCHIP_DEBUG = False
except ImportError:
BTCHIP = False
MSG_NEEDS_FW_UPDATE_GENERIC = _('Firmware version too old. Please update at') + \
' https://www.ledgerwallet.com'
MSG_NEEDS_FW_UPDATE_SEGWIT = _('Firmware version (or "Bitcoin" app) too old for Segwit support. Please update at') + \
' https://www.ledgerwallet.com'
MULTI_OUTPUT_SUPPORT = '1.1.4'
SEGWIT_SUPPORT = '1.1.10'
SEGWIT_SUPPORT_SPECIAL = '1.0.4'
def test_pin_unlocked(func):
"""Function decorator to test the Ledger for being unlocked, and if not,
raise a human-readable exception.
"""
def catch_exception(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except BTChipException as e:
if e.sw == 0x6982:
raise Exception(_('Your Ledger is locked. Please unlock it.'))
else:
raise
return catch_exception
class Ledger_Client():
def __init__(self, hidDevice):
self.dongleObject = btchip(hidDevice)
self.preflightDone = False
def is_pairable(self):
return True
def close(self):
self.dongleObject.dongle.close()
def timeout(self, cutoff):
pass
def is_initialized(self):
return True
def label(self):
return ""
def i4b(self, x):
return pack('>I', x)
def has_usable_connection_with_device(self):
try:
self.dongleObject.getFirmwareVersion()
except BaseException:
return False
return True
@test_pin_unlocked
def get_xpub(self, bip32_path, xtype):
self.checkDevice()
# bip32_path is of the form 44'/0'/1'
# S-L-O-W - we don't handle the fingerprint directly, so compute
# it manually from the previous node
# This only happens once so it's bearable
#self.get_client() # prompt for the PIN before displaying the dialog if necessary
#self.handler.show_message("Computing master public key")
if xtype in ['p2wpkh', 'p2wsh'] and not self.supports_native_segwit():
raise Exception(MSG_NEEDS_FW_UPDATE_SEGWIT)
if xtype in ['p2wpkh-p2sh', 'p2wsh-p2sh'] and not self.supports_segwit():
raise Exception(MSG_NEEDS_FW_UPDATE_SEGWIT)
splitPath = bip32_path.split('/')
if splitPath[0] == 'm':
splitPath = splitPath[1:]
bip32_path = bip32_path[2:]
fingerprint = 0
if len(splitPath) > 1:
prevPath = "/".join(splitPath[0:len(splitPath) - 1])
nodeData = self.dongleObject.getWalletPublicKey(prevPath)
publicKey = compress_public_key(nodeData['publicKey'])
h = hashlib.new('ripemd160')
h.update(hashlib.sha256(publicKey).digest())
fingerprint = unpack(">I", h.digest()[0:4])[0]
nodeData = self.dongleObject.getWalletPublicKey(bip32_path)
publicKey = compress_public_key(nodeData['publicKey'])
depth = len(splitPath)
lastChild = splitPath[len(splitPath) - 1].split('\'')
childnum = int(lastChild[0]) if len(lastChild) == 1 else 0x80000000 | int(lastChild[0])
xpub = bitcoin.serialize_xpub(xtype, nodeData['chainCode'], publicKey, depth, self.i4b(fingerprint), self.i4b(childnum))
return xpub
def has_detached_pin_support(self, client):
try:
client.getVerifyPinRemainingAttempts()
return True
except BTChipException as e:
if e.sw == 0x6d00:
return False
raise e
def is_pin_validated(self, client):
try:
# Invalid SET OPERATION MODE to verify the PIN status
client.dongle.exchange(bytearray([0xe0, 0x26, 0x00, 0x00, 0x01, 0xAB]))
except BTChipException as e:
if (e.sw == 0x6982):
return False
if (e.sw == 0x6A80):
return True
raise e
def supports_multi_output(self):
return self.multiOutputSupported
def supports_segwit(self):
return self.segwitSupported
def supports_native_segwit(self):
return self.nativeSegwitSupported
def perform_hw1_preflight(self):
try:
firmwareInfo = self.dongleObject.getFirmwareVersion()
firmware = firmwareInfo['version']
self.multiOutputSupported = versiontuple(firmware) >= versiontuple(MULTI_OUTPUT_SUPPORT)
self.nativeSegwitSupported = versiontuple(firmware) >= versiontuple(SEGWIT_SUPPORT)
self.segwitSupported = self.nativeSegwitSupported or (firmwareInfo['specialVersion'] == 0x20 and versiontuple(firmware) >= versiontuple(SEGWIT_SUPPORT_SPECIAL))
if not checkFirmware(firmwareInfo):
self.dongleObject.dongle.close()
raise Exception(MSG_NEEDS_FW_UPDATE_GENERIC)
try:
self.dongleObject.getOperationMode()
except BTChipException as e:
if (e.sw == 0x6985):
self.dongleObject.dongle.close()
self.handler.get_setup( )
# Acquire the new client on the next run
else:
raise e
if self.has_detached_pin_support(self.dongleObject) and not self.is_pin_validated(self.dongleObject) and (self.handler is not None):
remaining_attempts = self.dongleObject.getVerifyPinRemainingAttempts()
if remaining_attempts != 1:
msg = "Enter your Ledger PIN - remaining attempts : " + str(remaining_attempts)
else:
msg = "Enter your Ledger PIN - WARNING : LAST ATTEMPT. If the PIN is not correct, the dongle will be wiped."
confirmed, p, pin = self.password_dialog(msg)
if not confirmed:
raise Exception('Aborted by user - please unplug the dongle and plug it again before retrying')
pin = pin.encode()
self.dongleObject.verifyPin(pin)
except BTChipException as e:
if (e.sw == 0x6faa):
raise Exception("Dongle is temporarily locked - please unplug it and replug it again")
if ((e.sw & 0xFFF0) == 0x63c0):
raise Exception("Invalid PIN - please unplug the dongle and plug it again before retrying")
if e.sw == 0x6f00 and e.message == 'Invalid channel':
# based on docs 0x6f00 might be a more general error, hence we also compare message to be sure
raise Exception("Invalid channel.\n"
"Please make sure that 'Browser support' is disabled on your device.")
raise e
def checkDevice(self):
if not self.preflightDone:
try:
self.perform_hw1_preflight()
except BTChipException as e:
if (e.sw == 0x6d00 or e.sw == 0x6700):
raise Exception(_("Device not in Bitcoin mode")) from e
raise e
self.preflightDone = True
def password_dialog(self, msg=None):
response = self.handler.get_word(msg)
if response is None:
return False, None, None
return True, response, response
class Ledger_KeyStore(Hardware_KeyStore):
hw_type = 'ledger'
device = 'Ledger'
def __init__(self, d):
Hardware_KeyStore.__init__(self, d)
# Errors and other user interaction is done through the wallet's
# handler. The handler is per-window and preserved across
# device reconnects
self.force_watching_only = False
self.signing = False
self.cfg = d.get('cfg', {'mode':0,'pair':''})
def dump(self):
obj = Hardware_KeyStore.dump(self)
obj['cfg'] = self.cfg
return obj
def get_derivation(self):
return self.derivation
def get_client(self):
return self.plugin.get_client(self).dongleObject
def get_client_electrum(self):
return self.plugin.get_client(self)
def give_error(self, message, clear_client = False):
print_error(message)
if not self.signing:
self.handler.show_error(message)
else:
self.signing = False
if clear_client:
self.client = None
raise Exception(message)
def set_and_unset_signing(func):
"""Function decorator to set and unset self.signing."""
def wrapper(self, *args, **kwargs):
try:
self.signing = True
return func(self, *args, **kwargs)
finally:
self.signing = False
return wrapper
def address_id_stripped(self, address):
# Strip the leading "m/"
change, index = self.get_address_index(address)
derivation = self.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
return address_path[2:]
def decrypt_message(self, pubkey, message, password):
raise RuntimeError(_('Encryption and decryption are currently not supported for {}').format(self.device))
@test_pin_unlocked
@set_and_unset_signing
def sign_message(self, sequence, message, password):
message = message.encode('utf8')
message_hash = hashlib.sha256(message).hexdigest().upper()
# prompt for the PIN before displaying the dialog if necessary
client = self.get_client()
address_path = self.get_derivation()[2:] + "/%d/%d"%sequence
self.handler.show_message("Signing message ...\r\nMessage hash: "+message_hash)
try:
info = self.get_client().signMessagePrepare(address_path, message)
pin = ""
if info['confirmationNeeded']:
pin = self.handler.get_auth( info ) # does the authenticate dialog and returns pin
if not pin:
raise UserWarning(_('Cancelled by user'))
pin = str(pin).encode()
signature = self.get_client().signMessageSign(pin)
except BTChipException as e:
if e.sw == 0x6a80:
self.give_error("Unfortunately, this message cannot be signed by the Ledger wallet. Only alphanumerical messages shorter than 140 characters are supported. Please remove any extra characters (tab, carriage return) and retry.")
elif e.sw == 0x6985: # cancelled by user
return b''
elif e.sw == 0x6982:
raise # pin lock. decorator will catch it
else:
self.give_error(e, True)
except UserWarning:
self.handler.show_error(_('Cancelled by user'))
return b''
except Exception as e:
self.give_error(e, True)
finally:
self.handler.finished()
# Parse the ASN.1 signature
rLength = signature[3]
r = signature[4 : 4 + rLength]
sLength = signature[4 + rLength + 1]
s = signature[4 + rLength + 2:]
if rLength == 33:
r = r[1:]
if sLength == 33:
s = s[1:]
# And convert it
return bytes([27 + 4 + (signature[0] & 0x01)]) + r + s
@test_pin_unlocked
@set_and_unset_signing
def sign_transaction(self, tx, password):
if tx.is_complete():
return
client = self.get_client()
inputs = []
inputsPaths = []
pubKeys = []
chipInputs = []
redeemScripts = []
signatures = []
preparedTrustedInputs = []
changePath = ""
output = None
p2shTransaction = False
segwitTransaction = False
pin = ""
self.get_client() # prompt for the PIN before displaying the dialog if necessary
# Fetch inputs of the transaction to sign
derivations = self.get_tx_derivations(tx)
for txin in tx.inputs():
if txin['type'] == 'coinbase':
self.give_error("Coinbase not supported") # should never happen
if txin['type'] in ['p2sh']:
p2shTransaction = True
if txin['type'] in ['p2wpkh-p2sh', 'p2wsh-p2sh']:
if not self.get_client_electrum().supports_segwit():
self.give_error(MSG_NEEDS_FW_UPDATE_SEGWIT)
segwitTransaction = True
if txin['type'] in ['p2wpkh', 'p2wsh']:
if not self.get_client_electrum().supports_native_segwit():
self.give_error(MSG_NEEDS_FW_UPDATE_SEGWIT)
segwitTransaction = True
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
for i, x_pubkey in enumerate(x_pubkeys):
if x_pubkey in derivations:
signingPos = i
s = derivations.get(x_pubkey)
hwAddress = "%s/%d/%d" % (self.get_derivation()[2:], s[0], s[1])
break
else:
self.give_error("No matching x_key for sign_transaction") # should never happen
redeemScript = Transaction.get_preimage_script(txin)
txin_prev_tx = txin.get('prev_tx')
if txin_prev_tx is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
txin_prev_tx_raw = txin_prev_tx.raw if txin_prev_tx else None
inputs.append([txin_prev_tx_raw,
txin['prevout_n'],
redeemScript,
txin['prevout_hash'],
signingPos,
txin.get('sequence', 0xffffffff - 1),
txin.get('value')])
inputsPaths.append(hwAddress)
pubKeys.append(pubkeys)
# Sanity check
if p2shTransaction:
for txin in tx.inputs():
if txin['type'] != 'p2sh':
self.give_error("P2SH / regular input mixed in same transaction not supported") # should never happen
txOutput = var_int(len(tx.outputs()))
for txout in tx.outputs():
output_type, addr, amount = txout
txOutput += int_to_hex(amount, 8)
script = tx.pay_script(output_type, addr)
txOutput += var_int(len(script)//2)
txOutput += script
txOutput = bfh(txOutput)
# Recognize outputs
# - only one output and one change is authorized (for hw.1 and nano)
# - at most one output can bypass confirmation (~change) (for all)
if not p2shTransaction:
if not self.get_client_electrum().supports_multi_output():
if len(tx.outputs()) > 2:
self.give_error("Transaction with more than 2 outputs not supported")
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
assert o.type == TYPE_ADDRESS
info = tx.output_info.get(o.address)
if (info is not None) and len(tx.outputs()) > 1 \
and not has_change:
index = info.address_index
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
changePath = self.get_derivation()[2:] + "/%d/%d"%index
has_change = True
else:
output = o.address
else:
output = o.address
self.handler.show_message(_("Confirm Transaction on your Ledger device..."))
try:
# Get trusted inputs from the original transactions
for utxo in inputs:
sequence = int_to_hex(utxo[5], 4)
if segwitTransaction:
tmp = bfh(utxo[3])[::-1]
tmp += bfh(int_to_hex(utxo[1], 4))
tmp += bfh(int_to_hex(utxo[6], 8)) # txin['value']
chipInputs.append({'value' : tmp, 'witness' : True, 'sequence' : sequence})
redeemScripts.append(bfh(utxo[2]))
elif not p2shTransaction:
txtmp = bitcoinTransaction(bfh(utxo[0]))
trustedInput = self.get_client().getTrustedInput(txtmp, utxo[1])
trustedInput['sequence'] = sequence
chipInputs.append(trustedInput)
redeemScripts.append(txtmp.outputs[utxo[1]].script)
else:
tmp = bfh(utxo[3])[::-1]
tmp += bfh(int_to_hex(utxo[1], 4))
chipInputs.append({'value' : tmp, 'sequence' : sequence})
redeemScripts.append(bfh(utxo[2]))
# Sign all inputs
firstTransaction = True
inputIndex = 0
rawTx = tx.serialize_to_network()
self.get_client().enableAlternate2fa(False)
if segwitTransaction:
self.get_client().startUntrustedTransaction(True, inputIndex,
chipInputs, redeemScripts[inputIndex])
if changePath:
# we don't set meaningful outputAddress, amount and fees
# as we only care about the alternateEncoding==True branch
outputData = self.get_client().finalizeInput(b'', 0, 0, changePath, bfh(rawTx))
else:
outputData = self.get_client().finalizeInputFull(txOutput)
outputData['outputData'] = txOutput
transactionOutput = outputData['outputData']
if outputData['confirmationNeeded']:
outputData['address'] = output
self.handler.finished()
pin = self.handler.get_auth( outputData ) # does the authenticate dialog and returns pin
if not pin:
raise UserWarning()
if pin != 'paired':
self.handler.show_message(_("Confirmed. Signing Transaction..."))
while inputIndex < len(inputs):
singleInput = [ chipInputs[inputIndex] ]
self.get_client().startUntrustedTransaction(False, 0,
singleInput, redeemScripts[inputIndex])
inputSignature = self.get_client().untrustedHashSign(inputsPaths[inputIndex], pin, lockTime=tx.locktime)
inputSignature[0] = 0x30 # force for 1.4.9+
signatures.append(inputSignature)
inputIndex = inputIndex + 1
else:
while inputIndex < len(inputs):
self.get_client().startUntrustedTransaction(firstTransaction, inputIndex,
chipInputs, redeemScripts[inputIndex])
if changePath:
# we don't set meaningful outputAddress, amount and fees
# as we only care about the alternateEncoding==True branch
outputData = self.get_client().finalizeInput(b'', 0, 0, changePath, bfh(rawTx))
else:
outputData = self.get_client().finalizeInputFull(txOutput)
outputData['outputData'] = txOutput
if firstTransaction:
transactionOutput = outputData['outputData']
if outputData['confirmationNeeded']:
outputData['address'] = output
self.handler.finished()
pin = self.handler.get_auth( outputData ) # does the authenticate dialog and returns pin
if not pin:
raise UserWarning()
if pin != 'paired':
self.handler.show_message(_("Confirmed. Signing Transaction..."))
else:
# Sign input with the provided PIN
inputSignature = self.get_client().untrustedHashSign(inputsPaths[inputIndex], pin, lockTime=tx.locktime)
inputSignature[0] = 0x30 # force for 1.4.9+
signatures.append(inputSignature)
inputIndex = inputIndex + 1
if pin != 'paired':
firstTransaction = False
except UserWarning:
self.handler.show_error(_('Cancelled by user'))
return
except BTChipException as e:
if e.sw == 0x6985: # cancelled by user
return
elif e.sw == 0x6982:
raise # pin lock. decorator will catch it
else:
traceback.print_exc(file=sys.stderr)
self.give_error(e, True)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.give_error(e, True)
finally:
self.handler.finished()
for i, txin in enumerate(tx.inputs()):
signingPos = inputs[i][4]
tx.add_signature_to_txin(i, signingPos, bh2u(signatures[i]))
tx.raw = tx.serialize()
@test_pin_unlocked
@set_and_unset_signing
def show_address(self, sequence, txin_type):
client = self.get_client()
address_path = self.get_derivation()[2:] + "/%d/%d"%sequence
self.handler.show_message(_("Showing address ..."))
segwit = Transaction.is_segwit_inputtype(txin_type)
segwitNative = txin_type == 'p2wpkh'
try:
client.getWalletPublicKey(address_path, showOnScreen=True, segwit=segwit, segwitNative=segwitNative)
except BTChipException as e:
if e.sw == 0x6985: # cancelled by user
pass
elif e.sw == 0x6982:
raise # pin lock. decorator will catch it
elif e.sw == 0x6b00: # hw.1 raises this
self.handler.show_error('{}\n{}\n{}'.format(
_('Error showing address') + ':',
e,
_('Your device might not have support for this functionality.')))
else:
traceback.print_exc(file=sys.stderr)
self.handler.show_error(e)
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.handler.show_error(e)
finally:
self.handler.finished()
class LedgerPlugin(HW_PluginBase):
libraries_available = BTCHIP
keystore_class = Ledger_KeyStore
client = None
DEVICE_IDS = [
(0x2581, 0x1807), # HW.1 legacy btchip
(0x2581, 0x2b7c), # HW.1 transitional production
(0x2581, 0x3b7c), # HW.1 ledger production
(0x2581, 0x4b7c), # HW.1 ledger test
(0x2c97, 0x0000), # Blue
(0x2c97, 0x0001) # Nano-S
]
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
def __init__(self, parent, config, name):
self.segwit = config.get("segwit")
HW_PluginBase.__init__(self, parent, config, name)
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def get_btchip_device(self, device):
ledger = False
if device.product_key[0] == 0x2581 and device.product_key[1] == 0x3b7c:
ledger = True
if device.product_key[0] == 0x2581 and device.product_key[1] == 0x4b7c:
ledger = True
if device.product_key[0] == 0x2c97:
if device.interface_number == 0 or device.usage_page == 0xffa0:
ledger = True
else:
return None # non-compatible interface of a Nano S or Blue
dev = hid.device()
dev.open_path(device.path)
dev.set_nonblocking(True)
return HIDDongleHIDAPI(dev, ledger, BTCHIP_DEBUG)
def create_client(self, device, handler):
if handler:
self.handler = handler
client = self.get_btchip_device(device)
if client is not None:
client = Ledger_Client(client)
return client
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
client.handler = self.create_handler(wizard)
client.get_xpub("m/44'/0'", 'standard') # TODO replace by direct derivation once Nano S > 1.1
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = self.create_handler(wizard)
client.checkDevice()
xpub = client.get_xpub(derivation, xtype)
return xpub
def get_client(self, keystore, force_pair=True):
# All client interaction should not be in the main GUI thread
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
#if client:
# client.used()
if client is not None:
client.checkDevice()
return client
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
if type(wallet) is not Standard_Wallet:
keystore.handler.show_error(_('This function is only available for standard wallets when using {}.').format(self.device))
return
sequence = wallet.get_address_index(address)
txin_type = wallet.get_txin_type(address)
keystore.show_address(sequence, txin_type)
|
|
# -*- coding: utf-8 -*-
"""
This module implements a Python-to-SQLAlchemy syntax parser.
Allows the SQLAlchemy data-layer to seamlessy respond to a
Python-like query.
:copyright: (c) 2013 by Andrew Mleczko and Tomasz Jezierski (Tefnet)
:license: BSD, see LICENSE for more details.
"""
import re
import ast
import operator as sqla_op
import json
from eve.utils import str_to_date
from sqlalchemy.ext.associationproxy import AssociationProxy
from sqlalchemy.sql import expression as sqla_exp
class ParseError(ValueError):
pass
def parse_dictionary(filter_dict, model):
"""
Parse a dictionary into a list of SQLAlchemy BinaryExpressions to be used
in query filters.
:param filter_dict: Dictionary to convert
:param model: SQLAlchemy model class used to create the BinaryExpressions
:return list: List of conditions as SQLAlchemy BinaryExpressions
"""
if len(filter_dict) == 0:
return []
conditions = []
for k, v in filter_dict.items():
# firts let's check with the expression parser
try:
conditions += parse('{0}{1}'.format(k, v), model)
except ParseError:
pass
else:
continue
attr = getattr(model, k)
if isinstance(attr, AssociationProxy):
conditions.append(attr.contains(v))
elif hasattr(attr, 'property') and \
hasattr(attr.property, 'remote_side'): # a relation
for fk in attr.property.remote_side:
conditions.append(sqla_op.eq(fk, v))
else:
try:
new_o, v = parse_sqla_operators(v)
new_filter = getattr(attr, new_o)(v)
except (TypeError, ValueError): # json/sql parse error
if isinstance(v, list): # we have an array
new_filter = attr.in_(v)
else:
new_filter = sqla_op.eq(attr, v)
conditions.append(new_filter)
return conditions
def parse_sqla_operators(expression):
"""
Parse expressions like:
like('%john%')
ilike('john%')
in_(['a','b'])
"""
m = re.match(r"(?P<operator>\w+)\((?P<value>.+)\)", expression)
if m:
o = m.group('operator')
v = json.loads(m.group('value'))
return o, v
def parse(expression, model):
"""
Given a python-like conditional statement, returns the equivalent
SQLAlchemy-like query expression. Conditional and boolean operators
(==, <=, >=, !=, >, <) are supported.
"""
v = SQLAVisitor(model)
v.visit(ast.parse(expression))
return v.sqla_query
class SQLAVisitor(ast.NodeVisitor):
"""Implements the python-to-sql parser. Only Python conditional
statements are supported, however nested, combined with most common compare
and boolean operators (And and Or).
Supported compare operators: ==, >, <, !=, >=, <=
Supported boolean operators: And, Or
"""
op_mapper = {
ast.Eq: sqla_op.eq,
ast.Gt: sqla_op.gt,
ast.GtE: sqla_op.ge,
ast.Lt: sqla_op.lt,
ast.LtE: sqla_op.le,
ast.NotEq: sqla_op.ne,
ast.Or: sqla_exp.or_,
ast.And: sqla_exp.and_
}
def __init__(self, model):
super(SQLAVisitor, self).__init__()
self.model = model
self.sqla_query = []
self.ops = []
self.current_value = None
def visit_Module(self, node):
""" Module handler, our entry point.
"""
self.sqla_query = []
self.ops = []
self.current_value = None
# perform the magic.
self.generic_visit(node)
# if we didn't obtain a query, it is likely that an unsupported
# python expression has been passed.
if not len(self.sqla_query):
raise ParseError("Only conditional statements with boolean "
"(and, or) and comparison operators are "
"supported.")
def visit_Expr(self, node):
""" Make sure that we are parsing compare or boolean operators
"""
if not (isinstance(node.value, ast.Compare) or
isinstance(node.value, ast.BoolOp)):
raise ParseError("Will only parse conditional statements")
self.generic_visit(node)
def visit_Compare(self, node):
""" Compare operator handler.
"""
self.visit(node.left)
left = getattr(self.model, self.current_value)
operation = self.op_mapper[node.ops[0].__class__]
if node.comparators:
comparator = node.comparators[0]
self.visit(comparator)
value = self.current_value
if self.ops:
self.ops[-1]['args'].append(operation(left, value))
else:
self.sqla_query.append(operation(left, value))
def visit_BoolOp(self, node):
""" Boolean operator handler.
"""
op = self.op_mapper[node.op.__class__]
self.ops.append({'op': op, 'args': []})
for value in node.values:
self.visit(value)
tops = self.ops.pop()
if self.ops:
self.ops[-1]['args'].append(tops['op'](*tops['args']))
else:
self.sqla_query.append(tops['op'](*tops['args']))
def visit_Call(self, node):
# TODO ?
pass
def visit_Attribute(self, node):
# FIXME ?
self.visit(node.value)
self.current_value += "." + node.attr
def visit_Name(self, node):
""" Names """
self.current_value = node.id
def visit_Num(self, node):
""" Numbers """
self.current_value = node.n
def visit_Str(self, node):
""" Strings """
try:
value = str_to_date(node.s)
self.current_value = value if value is not None else node.s
except ValueError:
self.current_value = node.s
|
|
import os
from Queue import Queue
try:
import simplejson as json
except ImportError:
import json
import logging
import subprocess
from octopus.core.communication.http import Http400, Http404
from octopus.worker import settings
from octopus.worker.worker import WorkerInternalException
from tornado.web import Application, RequestHandler
# /commands/ [GET] { commands: [ { id, status, completion } ] }
# /commands/ [POST] { id, jobtype, arguments }
# /commands/{id}/ [GET] { id, status, completion, jobtype, arguments }
# /commands/{id}/ [DELETE] stops the job
# /online/ [GET] { online }
# /online/ [SET] { online }
# /status/ [GET] { status, ncommands, globalcompletion }
LOGGER = logging.getLogger("workerws")
class WorkerWebService(Application):
'''A tornado application that will communicate with the dispatcher via webservices
Services are:
/commands
/commands/<id command>
/log
/log/command/<path>
/updatesysinfos
/pause
/ramInUse
/reconfig
'''
def __init__(self, framework, port):
super(WorkerWebService, self).__init__([
(r'/commands/?$', CommandsResource, dict(framework=framework)),
(r'/commands/(?P<id>\d+)/?$', CommandResource, dict(framework=framework)),
(r'/commands/(?P<id>\d+)/done?$', CommandDoneResource, dict(framework=framework)),
(r'/debug/?$', DebugResource, dict(framework=framework)),
(r'/log/?$', WorkerLogResource),
(r'/log/command/(?P<path>\S+)', CommandLogResource),
(r'/updatesysinfos/?$', UpdateSysResource, dict(framework=framework)),
(r'/pause/?$', PauseResource, dict(framework=framework)),
(r'/ramInUse/?$', RamInUseResource, dict(framework=framework)),
(r'/reconfig/?$', WorkerReconfig, dict(framework=framework))
])
logging.getLogger('').info("start WS")
self.queue = Queue()
self.listen(port, "0.0.0.0")
self.framework = framework
self.port = port
class BaseResource(RequestHandler):
def initialize(self, framework):
self.framework = framework
self.rnId = None
def setRnId(self, request):
if self.rnId is None and "rnId" in request.headers:
self.rnId = request.headers['rnId']
def getBodyAsJSON(self):
try:
return json.loads(self.request.body)
except:
return Http400("The HTTP body is not a valid JSON object")
class PauseResource(BaseResource):
def post(self):
self.setRnId(self.request)
try:
data = self.getBodyAsJSON()
content = data["content"]
killfile = settings.KILLFILE
if os.path.isfile(killfile):
os.remove(killfile)
# if 0, unpause the worker
if content != "0":
if not os.path.isdir(os.path.dirname(killfile)):
os.makedirs(os.path.dirname(killfile))
# change rights to the folder
os.chmod(os.path.dirname(killfile), 0777)
f = open(killfile, 'w')
# if -1, kill all current rendering processes
# if -2, schedule the worker for a restart
# if -3, kill all and schedule for restart
if content in ["-1", "-2", "-3"]:
f.write(content)
f.close()
os.chmod(killfile, 0666)
except Exception, e:
LOGGER.error("Error when pausing RN (%r)" % e)
self.set_status(500)
else:
self.set_status(202)
class RamInUseResource(BaseResource):
"""
TO FIX: the method for retrieving mem used is not really correct.
We should use "free -m" or directly /proc/meminfo -> use = memtotal - (memfree + membuffer + memcache)
Par ex, pour calculer la memoire libre (en prenant en compte les buffers et le swap):
awk '/MemFree|Buffers|^Cached/ {free+=$2} END {print free}' /proc/meminfo
Pour avoir la memoire utilisee, soit memtotal-memlibre:
awk '/MemTotal/ {tot=$2} /MemFree|Buffers|^Cached/ {free+=$2} END {print tot-free}' /proc/meminfo
"""
def get(self):
process = subprocess.Popen("ps -e -o rss | awk '{sum+=$1} END {print sum/1024}'",
shell=True,
stdout=subprocess.PIPE)
stdout_list = process.communicate()[0].split('\n')
self.write(stdout_list[0])
class CommandsResource(BaseResource):
def get(self):
'''Lists the commands running on this worker.'''
commands = [{
'id': command.id,
'status': command.status,
'completion': command.completion,
'message': command.message,
} for command in self.framework.application.commands.values()]
self.write({'commands': commands})
def post(self):
# @todo this setRnId call may be just in doOnline necessary
self.setRnId(self.request)
data = self.getBodyAsJSON()
dct = {}
for key, value in data.items():
dct[str(key)] = value
dct['commandId'] = int(dct['id'])
del dct['id']
try:
# @TODO direct command creation here helps indicating to the dispatcher the result of adding process
# it is done to avoid having a command&RN staled in "assigned" status on the server.
# HOWEVER it can be a problem is the following case:
# 1. a command is deleted on the RN (via mijote/pulback or other)
# 2. quickly after deletion, the server has done a dispatch and has send a new command (sometime even the same cmd)
# 3. the mainloop of the worker occurs and tries to do the cleanup of the previously deleted command (and set the proper worker status). It detects a problem
# with the newly created command and there will be a ghost command on the RN...
# self.framework.addOrder(self.framework.application.addCommandApply, **dct)
ret = self.framework.application.addCommandApply(None,
dct['commandId'],
dct['runner'],
dct['arguments'],
dct['validationExpression'],
dct['taskName'],
dct['relativePathToLogDir'],
dct['environment'],
dct.get('runnerPackages', ''),
dct.get('watcherPackages', ''),
)
except WorkerInternalException, e:
LOGGER.error("Impossible to add command %r, the RN status is 'paused' (%r)" % (dct['commandId'], e))
self.set_status(500)
except Exception, e:
LOGGER.error("Impossible to add command %r (%r)" % (dct['commandId'], e))
self.set_status(500)
else:
self.set_status(202)
class CommandDoneResource(BaseResource):
def post(self, id):
"""
| Stops the process running for this command and sets final status to DONE
| It is called by the dispatcher when a user wants to stop a command without cancelling it.
|
| URL: POST http://host:port/commands/<id>/done
"""
dct = {
'commandId': int(id),
'endStatus': 5,
'endCompletion': 100,
'endMessage': "Done."
}
# endCompletion=0, endStatus=COMMAND.CMD_CANCELED, endMessage="killed")
self.framework.addOrder(self.framework.application.stopCommandApply, **dct)
self.set_status(200)
class CommandResource(BaseResource):
def put(self, id):
"""
| Usually called from a commandwatcher to set new values relative to a command.
| Only called when a value has changed or and long delay has been reached (see commandwatcher).
|
| URL: PUT http://host:port/commands/<id>
|
| Several kind of updates are handled:
| - validation: validation process when a command starts (to be defined, might not be necessary nor used)
| - update info of the command:
| - status: integer indicating the command status
| - completion: a float indicating command progress
| - message: information string (only used for display but not in pulback)
| - stats: a custom dict to report useful stats from the command
"""
#TODO check error and set error response
self.setRnId(self.request)
rawArgs = self.getBodyAsJSON()
if 'status' in rawArgs \
or 'completion' in rawArgs \
or 'message' in rawArgs \
or 'stats' in rawArgs:
args = {
'commandId': int(id),
'status': rawArgs.get('status', None),
'message': rawArgs.get('message', None),
'completion': rawArgs.get('completion', None),
'stats': rawArgs.get('stats', None)
}
self.framework.addOrder(self.framework.application.updateCommandApply, **args)
elif 'validatorMessage' in rawArgs or 'errorInfos' in rawArgs:
# validator message case
args = {
'commandId': int(id),
'validatorMessage': rawArgs.get('validatorMessage', None),
'errorInfos': rawArgs.get('errorInfos', None)
}
self.framework.addOrder(self.framework.application.updateCommandValidationApply, **args)
# Success
self.set_status(202)
def delete(self, id):
"""
| Called when cancelling a running command.
| The process is interrupted and final status is set to CANCEL (with a default completion and message to display)
|
| URL: DELETE http://host:port/commands/<id>
"""
#TODO check error and set error response
dct = {
'commandId': int(id)
}
self.framework.addOrder(self.framework.application.stopCommandApply, **dct)
self.set_status(202)
class DebugResource(BaseResource):
def get(self):
watchers = self.framework.application.commandWatchers.values()
content = [{'id': watcher.command.id} for watcher in watchers]
self.write(content)
class WorkerLogResource(RequestHandler):
def get(self):
logFileName = "worker%d.log" % settings.PORT
logFilePath = os.path.join(settings.LOGDIR, logFileName)
if os.path.isfile(logFilePath):
logFile = open(logFilePath, 'r')
logFileContent = logFile.read()
logFile.close()
self.set_header('Content-Type', 'text/plain')
self.write(logFileContent)
else:
raise Http404('no log file')
class CommandLogResource(RequestHandler):
def get(self, path):
logFilePath = os.path.join(settings.LOGDIR, path)
if os.path.isfile(logFilePath):
logFile = open(logFilePath, 'r')
logFileContent = logFile.read()
logFile.close()
self.set_header('Content-Type', 'text/plain')
self.write(logFileContent)
else:
raise Http404('no log file')
class UpdateSysResource(BaseResource):
def get(self):
args = {}
self.framework.addOrder(self.framework.application.updateSysInfos, **args)
class WorkerReconfig(BaseResource):
def post(self):
#TODO check error and set error response
self.framework.application.reloadConfig()
|
|
"""
Functionality for scaling agent attributes or the number of agents to
match targets.
"""
from __future__ import division, print_function
from collections import namedtuple
from numbers import Number
import numpy as np
import pandas as pd
from .targets import apply_filter_query
def _scale_col_to_target(col, target, metric_func):
"""
Scale a column's values so that in aggregate they match some metric,
for example, mean, median, or sum.
Parameters
----------
col : pandas.Series
target : number
metric_func : callable
Must accept a Series and return a number.
Returns
-------
scaled : pandas.Series
"""
current = metric_func(col)
multiplier = target / current
return col * multiplier
def scale_col_to_target(
col, target, metric='mean', clip_low=None, clip_high=None,
int_result=False):
"""
Scale a column's values so they match a target aggregate metric.
Parameters
----------
col : pandas.Series
target : number
metric : {'mean', 'median', 'sum'}
How to aggregate the values in `col` for comparison to `target`.
clip_low : number, optional
clip_high : number, optional
int_result : bool, optional
If True, results will be rounded and converted to integers.
"""
if metric == 'mean':
metric_func = pd.Series.mean
elif metric == 'sum':
metric_func = pd.Series.sum
elif metric == 'median':
metric_func = pd.Series.median
else:
raise ValueError('Unknown metric type: {!r}'.format(metric))
scaled = _scale_col_to_target(col, target, metric_func)
scaled = scaled.clip(clip_low, clip_high)
if int_result:
scaled = scaled.round().astype('int')
return scaled
TargetsRow = namedtuple(
'TargetsRow',
['column', 'target', 'metric', 'filters', 'clip_low', 'clip_high',
'int_result'])
def _targets_row_to_params(row):
"""
Convert a row of a targets table to parameters for `scale_col_to_target`.
Takes care of NaN values appropriately.
Return value is a namedtuple with attribute names as listed below.
Parameters
----------
row : pandas.Series
Returns
-------
column : str
target : number
metric : str
filters : list or None
clip_low : number or None
clip_high : number or None
int_result : bool
"""
column = row.column_name
target = row.target_value
metric = row.target_metric
is_a_thing = lambda x: (
False if isinstance(x, Number) and np.isnan(x) else bool(x))
filters = row.filters.split(',') if is_a_thing(row.filters) else None
clip_low = row.clip_low if is_a_thing(row.clip_low) else None
clip_high = row.clip_high if is_a_thing(row.clip_high) else None
int_result = row.int_result if is_a_thing(row.int_result) else False
return TargetsRow(
column, target, metric, filters, clip_low, clip_high, int_result)
def scale_to_targets_from_table(df, targets):
"""
Scale values in a DataFrame based on specifications in a targets table.
The table is expected to have this format (values are examples)::
column_name target_value target_metric filters
'income' 100000 'mean' 'tract_id == 7,num_workers > 2'
\ clip_low clip_high int_result
0 1000000 False
The names in ``column_name`` and ``filters`` are expected to be
columns in `df`.
``target_metric`` may be one of 'mean', 'median', or 'sum'.
The ``filters``, ``clip_low``, ``clip_high``, and ``int_result``
columns may be left blank to accept defaults.
Parameters
----------
df : pandas.DataFrame
Table with columns to be scaled.
targets : pandas.DataFrame
Table of targets and other scaling parameters.
Returns
-------
scaled : pandas.DataFrame
"""
# make sure we're not modifying any input data
df = df.copy()
for col in targets.column_name.unique():
df[col] = df[col].copy()
for row in (_targets_row_to_params(r) for ix, r in targets.iterrows()):
series = apply_filter_query(df, row.filters)[row.column]
scaled = scale_col_to_target(
series, row.target, row.metric, row.clip_low, row.clip_high,
row.int_result)
df[row.column].loc[scaled.index] = scaled
return df
def scale_to_targets(
df, target_col, targets, metric='mean', filters=None, clip_low=None,
clip_high=None, int_result=None):
"""
Parameters
----------
df : pandas.DataFrame
target_col : str
Column in `df` that will be scaled.
targets : sequence
Sequence of target values. Each target will correspond to a
different segment identified by `filters`.
metric : {'mean', 'median', 'sum'}
How to aggregate the values for comparison to targets.
filters : sequence, optional
Filters will be used with DataFrame.query and `df` to make a subset of
the full table for each scaling operation. Should be the same
length as `targets`.
Each individual filter can be a string or a sequence of strings.
Use ``None`` for no filtering.
clip_low : number, optional
clip_high : number, optional
Bounds for truncating results.
int_result : bool, optional
Whether result should be rounded and converted to integers.
Returns
-------
pandas.DataFrame
New DataFrame with `target_col` updated.
"""
filters = filters or [None] * len(targets)
scaled = []
for t, f, in zip(targets, filters):
series = apply_filter_query(df, f)[target_col]
scaled.append(scale_col_to_target(series, t, metric))
scaled = pd.concat(scaled)
scaled = scaled.clip(clip_low, clip_high)
if int_result:
scaled = scaled.round().astype('int')
df = df.copy()
df[target_col].loc[scaled.index] = scaled
return df
|
|
""" report test results in JUnit-XML format, for use with Hudson and build integration servers.
Based on initial code from Ross Lawley.
"""
import py
import os
import re
import sys
import time
# Python 2.X and 3.X compatibility
try:
unichr(65)
except NameError:
unichr = chr
try:
unicode('A')
except NameError:
unicode = str
try:
long(1)
except NameError:
long = int
class Junit(py.xml.Namespace):
pass
# We need to get the subset of the invalid unicode ranges according to
# XML 1.0 which are valid in this python build. Hence we calculate
# this dynamically instead of hardcoding it. The spec range of valid
# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
# | [#x10000-#x10FFFF]
_legal_chars = (0x09, 0x0A, 0x0d)
_legal_ranges = (
(0x20, 0xD7FF),
(0xE000, 0xFFFD),
(0x10000, 0x10FFFF),
)
_legal_xml_re = [unicode("%s-%s") % (unichr(low), unichr(high))
for (low, high) in _legal_ranges
if low < sys.maxunicode]
_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re
illegal_xml_re = re.compile(unicode('[^%s]') %
unicode('').join(_legal_xml_re))
del _legal_chars
del _legal_ranges
del _legal_xml_re
def bin_xml_escape(arg):
def repl(matchobj):
i = ord(matchobj.group())
if i <= 0xFF:
return unicode('#x%02X') % i
else:
return unicode('#x%04X') % i
return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg)))
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting")
group.addoption('--junitxml', action="store", dest="xmlpath",
metavar="path", default=None,
help="create junit-xml style report file at given path.")
group.addoption('--junitprefix', action="store", dest="junitprefix",
metavar="str", default=None,
help="prepend prefix to classnames in junit-xml output")
def pytest_configure(config):
xmlpath = config.option.xmlpath
# prevent opening xmllog on slave nodes (xdist)
if xmlpath and not hasattr(config, 'slaveinput'):
config._xml = LogXML(xmlpath, config.option.junitprefix)
config.pluginmanager.register(config._xml)
def pytest_unconfigure(config):
xml = getattr(config, '_xml', None)
if xml:
del config._xml
config.pluginmanager.unregister(xml)
def mangle_testnames(names):
names = [x.replace(".py", "") for x in names if x != '()']
names[0] = names[0].replace("/", '.')
return names
class LogXML(object):
def __init__(self, logfile, prefix):
logfile = os.path.expanduser(os.path.expandvars(logfile))
self.logfile = os.path.normpath(os.path.abspath(logfile))
self.prefix = prefix
self.tests = []
self.passed = self.skipped = 0
self.failed = self.errors = 0
def _opentestcase(self, report):
names = mangle_testnames(report.nodeid.split("::"))
classnames = names[:-1]
if self.prefix:
classnames.insert(0, self.prefix)
self.tests.append(Junit.testcase(
classname=".".join(classnames),
name=names[-1],
time=getattr(report, 'duration', 0)
))
def _write_captured_output(self, report):
sec = dict(report.sections)
for name in ('out', 'err'):
content = sec.get("Captured std%s" % name)
if content:
tag = getattr(Junit, 'system-'+name)
self.append(tag(bin_xml_escape(content)))
def append(self, obj):
self.tests[-1].append(obj)
def append_pass(self, report):
self.passed += 1
self._write_captured_output(report)
def append_failure(self, report):
#msg = str(report.longrepr.reprtraceback.extraline)
if hasattr(report, "wasxfail"):
self.append(
Junit.skipped(message="xfail-marked test passes unexpectedly"))
self.skipped += 1
else:
fail = Junit.failure(message="test failure")
fail.append(str(report.longrepr))
self.append(fail)
self.failed += 1
self._write_captured_output(report)
def append_collect_failure(self, report):
#msg = str(report.longrepr.reprtraceback.extraline)
self.append(Junit.failure(str(report.longrepr),
message="collection failure"))
self.errors += 1
def append_collect_skipped(self, report):
#msg = str(report.longrepr.reprtraceback.extraline)
self.append(Junit.skipped(str(report.longrepr),
message="collection skipped"))
self.skipped += 1
def append_error(self, report):
self.append(Junit.error(str(report.longrepr),
message="test setup failure"))
self.errors += 1
def append_skipped(self, report):
if hasattr(report, "wasxfail"):
self.append(Junit.skipped(str(report.wasxfail),
message="expected test failure"))
else:
filename, lineno, skipreason = report.longrepr
if skipreason.startswith("Skipped: "):
skipreason = skipreason[9:]
self.append(
Junit.skipped("%s:%s: %s" % report.longrepr,
type="pytest.skip",
message=skipreason
))
self.skipped += 1
self._write_captured_output(report)
def pytest_runtest_logreport(self, report):
if report.passed:
if report.when == "call": # ignore setup/teardown
self._opentestcase(report)
self.append_pass(report)
elif report.failed:
self._opentestcase(report)
if report.when != "call":
self.append_error(report)
else:
self.append_failure(report)
elif report.skipped:
self._opentestcase(report)
self.append_skipped(report)
def pytest_collectreport(self, report):
if not report.passed:
self._opentestcase(report)
if report.failed:
self.append_collect_failure(report)
else:
self.append_collect_skipped(report)
def pytest_internalerror(self, excrepr):
self.errors += 1
data = py.xml.escape(excrepr)
self.tests.append(
Junit.testcase(
Junit.error(data, message="internal error"),
classname="pytest",
name="internal"))
def pytest_sessionstart(self, session):
self.suite_start_time = time.time()
def pytest_sessionfinish(self, session, exitstatus, __multicall__):
if py.std.sys.version_info[0] < 3:
logfile = py.std.codecs.open(self.logfile, 'w', encoding='utf-8')
else:
logfile = open(self.logfile, 'w', encoding='utf-8')
suite_stop_time = time.time()
suite_time_delta = suite_stop_time - self.suite_start_time
numtests = self.passed + self.failed
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
logfile.write(Junit.testsuite(
self.tests,
name="",
errors=self.errors,
failures=self.failed,
skips=self.skipped,
tests=numtests,
time="%.3f" % suite_time_delta,
).unicode(indent=0))
logfile.close()
def pytest_terminal_summary(self, terminalreporter):
terminalreporter.write_sep("-", "generated xml file: %s" % (self.logfile))
|
|
# This file is part of audioread.
# Copyright 2011, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Use Gstreamer to decode audio files.
To read an audio file, pass it to the constructor for GstAudioFile()
and then iterate over the contents:
>>> f = GstAudioFile('something.mp3')
>>> try:
>>> for block in f:
>>> ...
>>> finally:
>>> f.close()
Note that there are a few complications caused by Gstreamer's
asynchronous architecture. This module spawns its own Gobject main-
loop thread; I'm not sure how that will interact with other main
loops if your program has them. Also, in order to stop the thread
and terminate your program normally, you need to call the close()
method on every GstAudioFile you create. Conveniently, the file can be
used as a context manager to make this simpler:
>>> with GstAudioFile('something.mp3') as f:
>>> for block in f:
>>> ...
Iterating a GstAudioFile yields strings containing short integer PCM
data. You can also read the sample rate and channel count from the
file:
>>> with GstAudioFile('something.mp3') as f:
>>> print f.samplerate
>>> print f.channels
>>> print f.duration
"""
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst
import sys
import threading
import os
import queue
from urllib.parse import quote
from .exceptions import DecodeError
QUEUE_SIZE = 10
BUFFER_SIZE = 10
SENTINEL = '__GSTDEC_SENTINEL__'
# Exceptions.
class GStreamerError(DecodeError):
pass
class UnknownTypeError(GStreamerError):
"""Raised when Gstreamer can't decode the given file type."""
def __init__(self, streaminfo):
super().__init__(
"can't decode stream: " + streaminfo
)
self.streaminfo = streaminfo
class FileReadError(GStreamerError):
"""Raised when the file can't be read at all."""
pass
class NoStreamError(GStreamerError):
"""Raised when the file was read successfully but no audio streams
were found.
"""
def __init__(self):
super().__init__('no audio streams found')
class MetadataMissingError(GStreamerError):
"""Raised when GStreamer fails to report stream metadata (duration,
channels, or sample rate).
"""
pass
class IncompleteGStreamerError(GStreamerError):
"""Raised when necessary components of GStreamer (namely, the
principal plugin packages) are missing.
"""
def __init__(self):
super().__init__(
'missing GStreamer base plugins'
)
# Managing the Gobject main loop thread.
_shared_loop_thread = None
_loop_thread_lock = threading.RLock()
Gst.init(None)
def get_loop_thread():
"""Get the shared main-loop thread.
"""
global _shared_loop_thread
with _loop_thread_lock:
if not _shared_loop_thread:
# Start a new thread.
_shared_loop_thread = MainLoopThread()
_shared_loop_thread.start()
return _shared_loop_thread
class MainLoopThread(threading.Thread):
"""A daemon thread encapsulating a Gobject main loop.
"""
def __init__(self):
super().__init__()
self.loop = GLib.MainLoop.new(None, False)
self.daemon = True
def run(self):
self.loop.run()
# The decoder.
class GstAudioFile:
"""Reads raw audio data from any audio file that Gstreamer
knows how to decode.
>>> with GstAudioFile('something.mp3') as f:
>>> print f.samplerate
>>> print f.channels
>>> print f.duration
>>> for block in f:
>>> do_something(block)
Iterating the object yields blocks of 16-bit PCM data. Three
pieces of stream information are also available: samplerate (in Hz),
number of channels, and duration (in seconds).
It's very important that the client call close() when it's done
with the object. Otherwise, the program is likely to hang on exit.
Alternatively, of course, one can just use the file as a context
manager, as shown above.
"""
def __init__(self, path):
self.running = False
self.finished = False
# Set up the Gstreamer pipeline.
self.pipeline = Gst.Pipeline()
self.dec = Gst.ElementFactory.make("uridecodebin", None)
self.conv = Gst.ElementFactory.make("audioconvert", None)
self.sink = Gst.ElementFactory.make("appsink", None)
if self.dec is None or self.conv is None or self.sink is None:
# uridecodebin, audioconvert, or appsink is missing. We need
# gst-plugins-base.
raise IncompleteGStreamerError()
# Register for bus signals.
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message::eos", self._message)
bus.connect("message::error", self._message)
# Configure the input.
uri = 'file://' + quote(os.path.abspath(path))
self.dec.set_property("uri", uri)
# The callback to connect the input.
self.dec.connect("pad-added", self._pad_added)
self.dec.connect("no-more-pads", self._no_more_pads)
# And a callback if decoding failes.
self.dec.connect("unknown-type", self._unkown_type)
# Configure the output.
# We want short integer data.
self.sink.set_property(
'caps',
Gst.Caps.from_string('audio/x-raw, format=(string)S16LE'),
)
# TODO set endianness?
# Set up the characteristics of the output. We don't want to
# drop any data (nothing is real-time here); we should bound
# the memory usage of the internal queue; and, most
# importantly, setting "sync" to False disables the default
# behavior in which you consume buffers in real time. This way,
# we get data as soon as it's decoded.
self.sink.set_property('drop', False)
self.sink.set_property('max-buffers', BUFFER_SIZE)
self.sink.set_property('sync', False)
# The callback to receive decoded data.
self.sink.set_property('emit-signals', True)
self.sink.connect("new-sample", self._new_sample)
# We'll need to know when the stream becomes ready and we get
# its attributes. This semaphore will become available when the
# caps are received. That way, when __init__() returns, the file
# (and its attributes) will be ready for reading.
self.ready_sem = threading.Semaphore(0)
self.caps_handler = self.sink.get_static_pad("sink").connect(
"notify::caps", self._notify_caps
)
# Link up everything but the decoder (which must be linked only
# when it becomes ready).
self.pipeline.add(self.dec)
self.pipeline.add(self.conv)
self.pipeline.add(self.sink)
self.conv.link(self.sink)
# Set up the queue for data and run the main thread.
self.queue = queue.Queue(QUEUE_SIZE)
self.thread = get_loop_thread()
# This wil get filled with an exception if opening fails.
self.read_exc = None
# Return as soon as the stream is ready!
self.running = True
self.got_caps = False
self.pipeline.set_state(Gst.State.PLAYING)
self.ready_sem.acquire()
if self.read_exc:
# An error occurred before the stream became ready.
self.close(True)
raise self.read_exc
# Gstreamer callbacks.
def _notify_caps(self, pad, args):
"""The callback for the sinkpad's "notify::caps" signal.
"""
# The sink has started to receive data, so the stream is ready.
# This also is our opportunity to read information about the
# stream.
self.got_caps = True
info = pad.get_current_caps().get_structure(0)
# Stream attributes.
self.channels = info.get_int('channels')[1]
self.samplerate = info.get_int('rate')[1]
# Query duration.
success, length = pad.get_peer().query_duration(Gst.Format.TIME)
if success:
self.duration = length / 1000000000
else:
self.read_exc = MetadataMissingError('duration not available')
# Allow constructor to complete.
self.ready_sem.release()
_got_a_pad = False
def _pad_added(self, element, pad):
"""The callback for GstElement's "pad-added" signal.
"""
# Decoded data is ready. Connect up the decoder, finally.
name = pad.query_caps(None).to_string()
if name.startswith('audio/x-raw'):
nextpad = self.conv.get_static_pad('sink')
if not nextpad.is_linked():
self._got_a_pad = True
pad.link(nextpad)
def _no_more_pads(self, element):
"""The callback for GstElement's "no-more-pads" signal.
"""
# Sent when the pads are done adding (i.e., there are no more
# streams in the file). If we haven't gotten at least one
# decodable stream, raise an exception.
if not self._got_a_pad:
self.read_exc = NoStreamError()
self.ready_sem.release() # No effect if we've already started.
def _new_sample(self, sink):
"""The callback for appsink's "new-sample" signal.
"""
if self.running:
# New data is available from the pipeline! Dump it into our
# queue (or possibly block if we're full).
buf = sink.emit('pull-sample').get_buffer()
# We can't use Gst.Buffer.extract() to read the data as it crashes
# when called through PyGObject. We also can't use
# Gst.Buffer.extract_dup() because we have no way in Python to free
# the memory that it returns. Instead we get access to the actual
# data via Gst.Memory.map().
mem = buf.get_all_memory()
success, info = mem.map(Gst.MapFlags.READ)
if success:
if isinstance(info.data, memoryview):
# We need to copy the data as the memoryview is released
# when we call mem.unmap()
data = bytes(info.data)
else:
# GStreamer Python bindings <= 1.16 return a copy of the
# data as bytes()
data = info.data
mem.unmap(info)
self.queue.put(data)
else:
raise GStreamerError("Unable to map buffer memory while reading the file.")
return Gst.FlowReturn.OK
def _unkown_type(self, uridecodebin, decodebin, caps):
"""The callback for decodebin's "unknown-type" signal.
"""
# This is called *before* the stream becomes ready when the
# file can't be read.
streaminfo = caps.to_string()
if not streaminfo.startswith('audio/'):
# Ignore non-audio (e.g., video) decode errors.
return
self.read_exc = UnknownTypeError(streaminfo)
self.ready_sem.release()
def _message(self, bus, message):
"""The callback for GstBus's "message" signal (for two kinds of
messages).
"""
if not self.finished:
if message.type == Gst.MessageType.EOS:
# The file is done. Tell the consumer thread.
self.queue.put(SENTINEL)
if not self.got_caps:
# If the stream ends before _notify_caps was called, this
# is an invalid file.
self.read_exc = NoStreamError()
self.ready_sem.release()
elif message.type == Gst.MessageType.ERROR:
gerror, debug = message.parse_error()
if 'not-linked' in debug:
self.read_exc = NoStreamError()
elif 'No such file' in debug:
self.read_exc = IOError('resource not found')
else:
self.read_exc = FileReadError(debug)
self.ready_sem.release()
# Iteration.
def __next__(self):
# Wait for data from the Gstreamer callbacks.
val = self.queue.get()
if val == SENTINEL:
# End of stream.
raise StopIteration
return val
def __iter__(self):
return self
# Cleanup.
def close(self, force=False):
"""Close the file and clean up associated resources.
Calling `close()` a second time has no effect.
"""
if self.running or force:
self.running = False
self.finished = True
# Unregister for signals, which we registered for above with
# `add_signal_watch`. (Without this, GStreamer leaks file
# descriptors.)
self.pipeline.get_bus().remove_signal_watch()
# Stop reading the file.
self.dec.set_property("uri", None)
# Block spurious signals.
self.sink.get_static_pad("sink").disconnect(self.caps_handler)
# Make space in the output queue to let the decoder thread
# finish. (Otherwise, the thread blocks on its enqueue and
# the interpreter hangs.)
try:
self.queue.get_nowait()
except queue.Empty:
pass
# Halt the pipeline (closing file).
self.pipeline.set_state(Gst.State.NULL)
def __del__(self):
self.close()
# Context manager.
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
# Smoke test.
if __name__ == '__main__':
for path in sys.argv[1:]:
path = os.path.abspath(os.path.expanduser(path))
with GstAudioFile(path) as f:
print(f.channels)
print(f.samplerate)
print(f.duration)
for s in f:
print(len(s), ord(s[0]))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
def bi(name, shape, value=0.0, dtype=tf.float32):
"""Declares a bias variable with constant initialization."""
return tf.get_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=tf.constant_initializer(
value, dtype=dtype))
def wi(name, shape, stddev=0.0, dtype=tf.float32):
"""Declares a weight variable with random normal initialization."""
return tf.get_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=tf.truncated_normal_initializer(
mean=0.0, stddev=stddev, dtype=dtype))
class BackPropNet(object):
"""Normal 3-layer network for comparison."""
scope = 'bp'
def __init__(self, num_hidden=100):
self.define_placeholders()
self.define_network(num_hidden)
self.define_costs()
self.define_train_step(num_hidden)
def define_network(self, num_hidden):
with tf.variable_scope(self.scope):
self.w1 = wi("w1", [784, num_hidden]) # forward weights
self.d1 = bi("d1", [num_hidden])
self.z1 = tf.matmul(self.x, self.w1) + self.d1
self.h = tf.nn.sigmoid(self.z1)
self.w2 = wi("w2", [num_hidden, 10]) # forward weights
self.d2 = bi("d2", [10])
self.z2 = tf.matmul(self.h, self.w2) + self.d2
self.ypred = tf.nn.softmax(self.z2)
def define_train_step(self, num_hidden):
self.train_step = tf.train.GradientDescentOptimizer(self.lr) \
.minimize(self.cross_entropy)
def define_placeholders(self):
self.x = tf.placeholder(tf.float32, [None, 784])
self.y = tf.placeholder(tf.float32, [None, 10])
self.lr = tf.placeholder(tf.float32) # learning rate
self.decay = tf.placeholder(tf.float32) # weight decay
self.N = tf.cast(tf.shape(self.x)[0], tf.float32)
self.alpha = self.lr # / self.N
def define_costs(self):
# cross-entropy
self.cross_entropy = tf.reduce_mean(
-tf.reduce_sum(self.y * tf.log(self.ypred), reduction_indices=[1]))
# acurracy
correct_prediction = tf.equal(tf.argmax(self.ypred,1), tf.argmax(self.y,1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def train(self, sess, mnist, lr=0.5, decay=0.0, epochs=50, verbose=False):
testaccs = []
trainaccs = []
for e in range(epochs):
for b in range(60):
images, labels = mnist.train.next_batch(1000)
sess.run(self.train_step,
feed_dict={self.x: images,
self.y: labels,
self.lr: lr,
self.decay: decay
})
train_acc = sess.run(self.accuracy,
feed_dict={self.x: mnist.train.images,
self.y: mnist.train.labels })
acc = sess.run(self.accuracy,
feed_dict={self.x: mnist.test.images,
self.y: mnist.test.labels })
trainaccs.append(train_acc)
testaccs.append(acc)
if verbose:
print 'Epoch %d, TrainAcc: %.1f %%, TestAcc: %.1f %%' %\
(e, train_acc*100, acc*100)
return trainaccs, testaccs
class RandomFeedbackNet(BackPropNet):
"""Three-layer netowrk based on
'Random feedback weights support learning in deep neural networks'
There are a few differences:
* number of hidden units is adjustable
* cross-entropy is used for training
* weights are initialized using normal random (instead of uniform)
* biases are initialized to zero (instead of uniform)
* did not perform any hyperparamter tuning
"""
scope = 'rfn'
def define_train_step(self, num_hidden):
# define backward weights
with tf.variable_scope(self.scope):
b2 = wi("b2", [10, num_hidden], stddev=0.2) # backwards weights
# training: derivative w.r.t. activations
ypred_grad = tf.gradients(self.cross_entropy, self.ypred)[0]
z2_grad = tf.gradients(self.cross_entropy, self.z2)[0] # with softmax inclued
h_grad = tf.matmul(z2_grad, b2)
z1_grad = tf.multiply(tf.gradients(self.h, self.z1)[0], h_grad)
# training: derivative w.r.t. weights
self.w2_grad = tf.reduce_sum(
tf.multiply(tf.expand_dims(self.h, 2),
tf.expand_dims(z2_grad, 1)), # order?
[0])
self.d2_grad = tf.reduce_sum(z2_grad, [0])
self.w1_grad = tf.reduce_sum(
tf.multiply(tf.expand_dims(self.x, 2),
tf.expand_dims(z1_grad, 1)), # order?
[0])
self.d1_grad = tf.reduce_sum(z1_grad, [0])
# training: assign weights
self.train_step= [
tf.assign(self.w2, self.w2 - self.alpha * self.w2_grad - self.decay),
tf.assign(self.w1, self.w1 - self.alpha * self.w1_grad - self.decay),
tf.assign(self.d2, self.d2 - self.alpha * self.d2_grad - self.decay),
tf.assign(self.d1, self.d1 - self.alpha * self.d1_grad - self.decay),
]
class RandomFeedback4Layer(RandomFeedbackNet):
"""A 4-layer network, as above
"""
scope = 'rf4'
def layerwise_exp(self, sess, mnist, lr=0.5, decay=0.0, verbpose=False):
test_acc = []
stage = 1
for step in range(4):
n = 30
if step == 0: # stage 1: train w1 + w2
train_step = self.train_step[2:]
n = 30 * 3
elif step == 1: # stage 1: train only w3
train_step = self.train_step[:2]
elif step == 2: # stage 1: train w1 + w2
train_step = self.train_step[2:]
elif step == 3: # stage 1: train all
train_step = self.train_step
for iter in range(n):
images, labels = mnist.train.next_batch(1000)
sess.run(train_step,
feed_dict={self.x: images,
self.y: labels,
self.lr: lr,
self.decay: decay
})
acc = sess.run(self.accuracy,
feed_dict={self.x: mnist.test.images,
self.y: mnist.test.labels })
test_acc.append(acc)
return test_acc
def define_network(self, num_hidden):
with tf.variable_scope(self.scope):
n1, n2 = num_hidden
self.w1 = wi("w1", [784, n1]) # forward weights
self.d1 = bi("d1", [n1])
self.z1 = tf.matmul(self.x, self.w1) + self.d1
self.h1 = tf.nn.tanh(self.z1)
self.w2 = wi("w2", [n1, n2]) # forward weights
self.d2 = bi("d2", [n2])
self.z2 = tf.matmul(self.h1, self.w2) + self.d2
self.h2 = tf.nn.tanh(self.z2)
self.w3 = wi("w3", [n2, 10]) # forward weights
self.d3 = bi("d3", [10])
self.z3 = tf.matmul(self.h2, self.w3) + self.d3
self.ypred = tf.nn.softmax(self.z3)
def define_train_step(self, num_hidden):
# define backward weights
with tf.variable_scope(self.scope):
n1, n2 = num_hidden
b2 = wi("b2", [n2, n1], stddev=0.2) # backwards weights
b3 = wi("b3", [10, n2], stddev=0.2) # backwards weights
# training: derivative w.r.t. activations
ypred_grad = tf.gradients(self.cross_entropy, self.ypred)[0]
z3_grad = tf.gradients(self.cross_entropy, self.z3)[0] # softmax
h2_grad = tf.matmul(z3_grad, b3)
z2_grad = tf.multiply(tf.gradients(self.h2, self.z2)[0], h2_grad) #sigmoid
h1_grad = tf.matmul(z2_grad, b2)
z1_grad = tf.multiply(tf.gradients(self.h1, self.z1)[0], h1_grad) #sigmoid
# training: derivative w.r.t. weights
self.w3_grad = tf.reduce_sum(
tf.multiply(tf.expand_dims(self.h2, 2),
tf.expand_dims(z3_grad, 1)),
[0])
self.d3_grad = tf.reduce_sum(z3_grad, [0])
self.w2_grad = tf.reduce_sum(
tf.multiply(tf.expand_dims(self.h1, 2),
tf.expand_dims(z2_grad, 1)),
[0])
self.d2_grad = tf.reduce_sum(z2_grad, [0])
self.w1_grad = tf.reduce_sum(
tf.multiply(tf.expand_dims(self.x, 2),
tf.expand_dims(z1_grad, 1)),
[0])
self.d1_grad = tf.reduce_sum(z1_grad, [0])
# training: assign weights
self.train_step= [
tf.assign(self.w3, self.w3 - self.alpha * self.w3_grad - self.decay),
tf.assign(self.d3, self.d3 - self.alpha * self.d3_grad - self.decay),
tf.assign(self.w2, self.w2 - self.alpha * self.w2_grad - self.decay),
tf.assign(self.d2, self.d2 - self.alpha * self.d2_grad - self.decay),
tf.assign(self.w1, self.w1 - self.alpha * self.w1_grad - self.decay),
tf.assign(self.d1, self.d1 - self.alpha * self.d1_grad - self.decay),
]
class BackProp4Layer(RandomFeedback4Layer):
"""A 4-layer network, as above
"""
scope = 'bp4'
#def define_train_step(self, num_hidden):
# self.train_step = tf.train.GradientDescentOptimizer(self.lr) \
# .minimize(self.cross_entropy)
def define_train_step(self, num_hidden):
# training: assign weights
self.w3_grad, self.w2_grad, self.w1_grad, \
self.d3_grad, self.d2_grad, self.d1_grad = \
tf.gradients(self.cross_entropy,
[self.w3, self.w2, self.w1,
self.d3, self.d2, self.d1])
self.train_step= [
tf.assign(self.w3, self.w3 - self.alpha * self.w3_grad - self.decay),
tf.assign(self.d3, self.d3 - self.alpha * self.d3_grad - self.decay),
tf.assign(self.w2, self.w2 - self.alpha * self.w2_grad - self.decay),
tf.assign(self.d2, self.d2 - self.alpha * self.d2_grad - self.decay),
tf.assign(self.w1, self.w1 - self.alpha * self.w1_grad - self.decay),
tf.assign(self.d1, self.d1 - self.alpha * self.d1_grad - self.decay),
]
class DirectFeedbackNet(RandomFeedback4Layer):
"""Based on
'Direct Feedback Alignment Provides Learning in
Deep Neural Networks'
There are going to be some differences. I'll know later.
"""
scope = 'dfn'
#def layerwise_exp(self, sess, mnist, lr=0.5, decay=0.0, verbpose=False):
# super(DirectFeedbackNet, self).layerwise_exp(sess, mnist, lr=0.5, decay=0.0, verbpose=False):
def define_train_step(self, num_hidden):
with tf.variable_scope(self.scope):
n1, n2 = num_hidden
b2 = wi("b2", [10, n1], stddev=0.2) # <--- SUBTLE DIFFERENCE
b3 = wi("b3", [10, n2], stddev=0.2)
# training: derivative w.r.t. activations
ypred_grad = tf.gradients(self.cross_entropy, self.ypred)[0]
z3_grad = tf.gradients(self.cross_entropy, self.z3)[0]
h2_grad = tf.matmul(z3_grad, b3)
z2_grad = tf.multiply(tf.gradients(self.h2, self.z2)[0], h2_grad)
h1_grad = tf.matmul(z3_grad, b2) # <--- SUBTLE DIFFERENCE HERE
z1_grad = tf.multiply(tf.gradients(self.h1, self.z1)[0], h1_grad)
# training: derivative w.r.t. weights
self.w3_grad = tf.reduce_sum(
tf.multiply(tf.expand_dims(self.h2, 2),
tf.expand_dims(z3_grad, 1)),
[0])
self.d3_grad = tf.reduce_sum(z3_grad, [0])
self.w2_grad = tf.reduce_sum(
tf.multiply(tf.expand_dims(self.h1, 2),
tf.expand_dims(z2_grad, 1)),
[0])
self.d2_grad = tf.reduce_sum(z2_grad, [0])
self.w1_grad = tf.reduce_sum(
tf.multiply(tf.expand_dims(self.x, 2),
tf.expand_dims(z1_grad, 1)),
[0])
self.d1_grad = tf.reduce_sum(z1_grad, [0])
# training: assign weights
self.train_step= [
tf.assign(self.w3, self.w3 - self.alpha * self.w3_grad - self.decay),
tf.assign(self.d3, self.d3 - self.alpha * self.d3_grad - self.decay),
tf.assign(self.w2, self.w2 - self.alpha * self.w2_grad - self.decay),
tf.assign(self.d2, self.d2 - self.alpha * self.d2_grad - self.decay),
tf.assign(self.w1, self.w1 - self.alpha * self.w1_grad - self.decay),
tf.assign(self.d1, self.d1 - self.alpha * self.d1_grad - self.decay),
]
if __name__ == '__main__':
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("mnist/", one_hot=True)
bpn = BackPropNet()
rfn = RandomFeedbackNet()
rfn4 = RandomFeedback4Layer([200,100])
dfb4 = DirectFeedbackNet([200,100])
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
print 'Training normal neural net with backprop:'
bpn.train(sess, mnist, epochs=100)
print 'Training random feedback neural net:'
rfn.train(sess, mnist, lr=0.5, decay=0.00001, epochs=100)
print 'Training 4-layer random feedback net:'
rfn4.train(sess, mnist, lr=0.5, decay=0.0001, epochs=100)
print 'Training 4-layer direct feedback net:'
dfb4.train(sess, mnist, lr=0.5, decay=0.0001, epochs=100)
|
|
#!/usr/bin/python
import scipy
import scipy.interpolate
import scipy.signal
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import math
from ..model import DAGModel
from .. import utils
def _floor(v, n):
return int(n * math.floor(v/n))
def _find_nearest_index(array, value):
value = (360 + value) % 360
return np.abs(array - value).argmin()
def _average_gain_dbi(pattern, angles):
return sum(pattern) / float(len(pattern))
def _average_nadir_gain_dbi(pattern, angles):
"""Average gain on the nadir face of the satellite.
For simplicity, this function assumes some hard-coded values of
65-degrees off of boresight. That translates to 0->65 and (360-65)->360
"""
s = 0
n = 0
offset = 65
for i in range(len(pattern)):
angle = angles[i]
gain = pattern[i]
if (0 <= angle <= offset) or ((360-offset) <= angle <= 360):
s += gain
n += 1
return s / n
class Antenna(object):
"""Antenna tributary
This class can be used either for tx or for rx and it will
register its functions as either the tx_antenna_... or
rx_antenna_... as appropriate.
"""
def __init__(self,
pattern=None,
gain=0.0,
polarization='RHCP',
tracking=True,
rf_chain=[],
pointing_loss_db=0,
is_rx=True,
**meta):
"""Create a new antenna tributary.
pattern -- list of evenly-spaced pattern cut values starting at 0
gain -- peak gain of the antenna
polarization -- str
tracking -- does it track the target (eg rotator) or not (eg nadir)
rf_chain -- list of Element objects for the RF hain on the board
pointing_loss_db -- for now, just the number of dB of pointing loss
is_rx -- is it for receive or transmit
kwargs -- any metadata to assign to the antenna itself
If there are 360 points in the pattern, it will be
interpolated for you automatically.
"""
self.meta = meta
self.peak_gain_only = (pattern is None)
if pattern is None:
self.peak_gain_only = True
self.peak_gain = gain
pattern = np.zeros(360)
pattern += gain
else:
self.peak_gain = max(pattern)
pattern = np.array(pattern)
self.pattern_angles = np.arange(0.0, 360.0, 360.0/len(pattern))
self.pattern = pattern
if len(pattern) == 360:
self.interpolated = pattern[:]
self.interpolated_angles = np.arange(0, 360, 1)
else:
interpolated = self._interpolate_pattern(pattern)
self.interpolated_angles = np.arange(0, 360, 360/len(interpolated))
self.interpolated = interpolated
self.is_rx = is_rx
self.tribute = {
# calculators
self._mangle('peak_gain_dbi'): self._peak_gain_dbi,
self._mangle('gain_dbi'): self._gain_dbi,
self._mangle('angle_deg'): self._angle_deg,
self._mangle('boresight_gain_dbi'): self._boresight_gain_dbi,
self._mangle('average_gain_dbi'): self._average_gain_dbi,
self._mangle('average_nadir_gain_dbi'): self._average_nadir_gain_dbi,
# constants
self._name('polarization'): polarization,
self._name('raw_gain_pattern'): pattern,
self._name('raw_gain_pattern_angles'): self.pattern_angles,
self._name('gain_pattern'): self.interpolated,
self._name('gain_pattern_angles'): self.interpolated_angles,
self._name('obj'): self,
self._name('tracking_target'): not not tracking,
self._name('rf_chain'): rf_chain,
self._name('pointing_loss_db'): pointing_loss_db,
}
def _name(self, s):
if self.is_rx:
return 'rx_antenna_'+s
else:
return 'tx_antenna_'+s
def _lst_to_rad(self, lst):
return np.array([math.radians(v) for v in lst])
def _wrap(self, lst):
return np.array(list(lst) + [lst[0]])
def _plot_peak_gain(self, fname, title):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='polar')
theta = self._lst_to_rad(self.pattern_angles[:])
pattern = np.array(self.pattern)
# offset the pattern to get around the negative-radius issue
if self.peak_gain < 0:
offset = -2 * self.peak_gain
pattern += offset
ax.plot(theta,
pattern,
color='r',
linewidth=3,
label='Peak Gain Used Everywhere')
fig.canvas.draw()
if self.peak_gain < 0:
ax.set_yticklabels([t - offset for t in ax.get_yticks()])
fig.suptitle(title)
plt.legend(loc=4)
fig.savefig(fname, transparent=True)
def _plot_interpolated(self, fname, title, include_raw, ylim):
# Wrap around one point to close the loop and convert to radians
interp = self._wrap(self.interpolated)
raw = np.copy(self.pattern)
low = min(min(interp), min(raw))
hi = max(min(interp), max(raw))
n_steps = 5
min_step_size = 1
step_size = max(int((hi - low) / n_steps), min_step_size)
low_r = _floor(low, step_size)
hi_r = _floor(hi, step_size)
val_start = low_r if low_r < low else low_r - step_size
val_stop = hi_r + step_size
offset = 0 - val_start
# to debug uncomment these lines
# print 'low: %s' % low
# print 'hi: %s' % hi
# print 'low_r: %s' % low_r
# print 'hi_r: %s' % hi_r
# print 'val_start: %s' % val_start
# print 'val_stop: %s' % val_stop
# print 'step_size: %s' % step_size
# print 'offset: %s' % offset
# print
interp += offset
raw += offset
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='polar')
if ylim:
locator = matplotlib.ticker.MaxNLocator(nbins=8)
ax.yaxis.set_major_locator(locator)
ax.set_ylim([ylim[0]+offset, ylim[1]+offset])
interp_angles = self._wrap(self._lst_to_rad(self.interpolated_angles))
raw_angles = self._lst_to_rad(self.pattern_angles)
include_raw = (include_raw
and (len(self.pattern) != len(self.interpolated)))
if len(self.pattern) == len(self.interpolated):
label = 'Antenna Pattern'
main_angles = raw_angles
main_pattern = raw
else:
label = 'Interpolated Pattern'
main_angles = interp_angles
main_pattern = interp
ax.set_theta_zero_location("N")
ax.plot(main_angles,
main_pattern,
color='r',
linewidth=3,
label=label)
if include_raw:
ax.plot(raw_angles,
raw, 'x',
color='b',
linewidth=1,
label='Observed')
fig.canvas.draw()
ax.set_yticklabels([t - offset for t in ax.get_yticks()])
fig.suptitle(title)
plt.legend(loc=4)
fig.savefig(fname, transparent=True)
def plot_pattern(self, fname, include_raw=True, title=None, ylim=None):
"""Plots the pattern to a PNG file.
fname -- where to save it
include_raw -- If the pattern is interpolated, include the raw points?
title -- Title of the image
ylim -- [min, max] as desired
If, for example, your real pattern varies by only one dB, its
plot can be correct, but look a little weird as you see it
vary wildly from one side to the other whereas it is quite
stable in reality. Thtat's why the <ylim> is an option.
"""
prefix = 'RX' if self.is_rx else 'TX'
if not title:
title = '%s Antenna Gain Pattern' % prefix
if self.peak_gain_only:
return self._plot_peak_gain(fname, title)
else:
return self._plot_interpolated(fname, title, include_raw, ylim)
def _linear_interpolate(self, src, factor):
src_x = np.arange(0, len(src), 1)
tck = scipy.interpolate.splrep(src_x, src, s=0)
dst_x = np.arange(0, len(src), 1.0/factor)
dst = scipy.interpolate.splev(dst_x, tck, der=0)
return dst
def _circular_interpolate(self, src, factor):
tmp = list(src)*3
tmp = self._linear_interpolate(tmp, factor)
l = int(len(tmp) / 3)
return tmp[l:2*l]
def _interpolate_pattern(self, pattern, factor=None):
if not factor:
# default to roughly every one degree
factor = (360.0 / len(pattern))
return self._circular_interpolate(pattern, factor)
def _mangle(self, name):
x = 'rx' if self.is_rx else 'tx'
s = '_' if name[0] == '_' else ''
return '%s%s_antenna_%s' % (s, x, name)
def _call(self, model, name):
return getattr(model, self._mangle(name))
def _peak_gain_dbi(self, model):
return max(self._call(model, 'gain_pattern'))
def _gain_dbi(self, model):
if self._call(model, 'tracking_target'):
return self._call(model, 'boresight_gain_dbi')
else:
angle = self._call(model, 'angle_deg')
angles = self._call(model, 'gain_pattern_angles')
idx = _find_nearest_index(angles, angle)
pattern = self._call(model, 'gain_pattern')
return pattern[idx]
def _angle_deg(self, model):
if self._call(model, 'tracking_target'):
return 0
if model.is_downlink:
if self.is_rx:
# We are the ground-station
return model.min_elevation_deg
else:
# We are the satellite
return model.satellite_antenna_angle_deg
else:
if self.is_rx:
# We are the satellite
return model.satellite_antenna_angle_deg
else:
# We are the ground-station
return model.min_elevation_deg
def _boresight_gain_dbi(self, model):
pattern = self._call(model, 'gain_pattern')
angles = self._call(model, 'gain_pattern_angles')
idx = _find_nearest_index(angles, 0)
return pattern[idx]
def _average_gain_dbi(self, model):
pattern = self._call(model, 'gain_pattern')
angles = self._call(model, 'gain_pattern_angles')
return _average_gain_dbi(pattern, angles)
def _average_nadir_gain_dbi(self, model):
pattern = self._call(model, 'gain_pattern')
angles = self._call(model, 'gain_pattern_angles')
return _average_nadir_gain_dbi(pattern, angles)
|
|
"""
============================
Base RPC Handler for Tornado
============================
This is a basic server implementation, designed for use within the
Tornado framework. The classes in this library should not be used
directly, but rather though the XML or JSON RPC implementations.
You can use the utility functions like 'private' and 'start_server'.
"""
from tornado.web import RequestHandler
import tornado.web
import tornado.ioloop
import tornado.httpserver
from tornado.concurrent import Future, TracebackFuture
from tornado import gen
from tornado.stack_context import ExceptionStackContext, run_with_stack_context
import types
import traceback
from tornadorpc_evok.utils import getcallargs
# Configuration element
class Config(object):
verbose = True
short_errors = True
config = Config()
class BaseRPCParser(object):
"""
This class is responsible for managing the request, dispatch,
and response formatting of the system. It is tied into the
_RPC_ attribute of the BaseRPCHandler (or subclasses) and
populated as necessary throughout the request. Use the
.faults attribute to take advantage of the built-in error
codes.
"""
content_type = 'text/plain'
def __init__(self, library, encode=None, decode=None):
# Attaches the RPC library and encode / decode functions.
self.library = library
if not encode:
encode = getattr(library, 'dumps')
if not decode:
decode = getattr(library, 'loads')
self.encode = encode
self.decode = decode
self.requests_in_progress = 0
self.responses = []
@property
def faults(self):
# Grabs the fault tree on request
return Faults(self)
def response(self, handler):
"""
This is the callback for a single finished dispatch.
Once all the dispatches have been run, it calls the
parser library to parse responses and then calls the
handler's async method.
"""
handler._requests -= 1
if handler._requests > 0:
return
# We are finished with requests, send response
if handler._RPC_finished:
# We've already sent the response
raise Exception("Error trying to send response twice.")
handler._RPC_finished = True
responses = tuple(handler._results)
response_text = self.parse_responses(responses)
if type(response_text) not in types.StringTypes:
# Likely a fault, or something messed up
response_text = self.encode(response_text)
# Calling the async callback
handler.on_result(response_text)
def traceback(self, method_name='REQUEST', params=[]):
err_lines = traceback.format_exc().splitlines()
err_title = "ERROR IN %s" % method_name
if len(params) > 0:
err_title = '%s - (PARAMS: %s)' % (err_title, repr(params))
err_sep = ('-'*len(err_title))[:79]
err_lines = [err_sep, err_title, err_sep]+err_lines
if config.verbose:
if len(err_lines) >= 7 and config.short_errors:
# Minimum number of lines to see what happened
# Plus title and separators
print '\n'.join(err_lines[0:4]+err_lines[-3:])
else:
print '\n'.join(err_lines)
# Log here
return
def parse_request(self, request_body):
"""
Extend this on the implementing protocol. If it
should error out, return the output of the
'self.faults.fault_name' response. Otherwise,
it MUST return a TUPLE of TUPLE. Each entry
tuple must have the following structure:
('method_name', params)
...where params is a list or dictionary of
arguments (positional or keyword, respectively.)
So, the result should look something like
the following:
( ('add', [5,4]), ('add', {'x':5, 'y':4}) )
"""
return ([], [])
def parse_responses(self, responses):
"""
Extend this on the implementing protocol. It must
return a response that can be returned as output to
the client.
"""
return self.encode(responses, methodresponse=True)
def check_method(self, attr_name, obj):
"""
Just checks to see whether an attribute is private
(by the decorator or by a leading underscore) and
returns boolean result.
"""
assert(not attr_name.startswith('_'))
attr = getattr(obj, attr_name)
assert( not getattr(attr, 'private', False))
return attr
class BaseRPCHandler(RequestHandler):
"""
This is the base handler to be subclassed by the actual
implementations and by the end user.
"""
_RPC_ = None
#_requests = 1
rpcrequests = None
_error = None
_RPC_finished = False
def prepare(self):
"""
Parse request_body, prepares self.rpcrequest
On error call finish or set self._error - to be serialized by export procedure
"""
try:
requests = self._RPC_.parse_request(self.request.body)
if not isinstance(requests, types.TupleType):
# SHOULD be the result of a fault call,
# according tothe parse_request spec below.
if isinstance(requests, basestring):
# Should be the response text of a fault
# This will break in Python 3.x
self.finish(requests)
elif hasattr(requests, 'response'):
# Fault types should have a 'response' method
self.finish(requests.response())
elif hasattr(requests, 'faultCode'):
# XML-RPC fault types need to be properly dispatched. This
# should only happen if there was an error parsing the
self._error = requests
else:
# No idea, hopefully the handler knows what it is doing.
self.finish(requests)
return
self.rpcrequests = requests
except (AttributeError,Exception):
self._RPC_.traceback()
self._error = self._RPC_.faults.parse_error()
@tornado.web.asynchronous
@gen.coroutine
def post(self):
# Dispatches request methods
# rpcrequests are prepared in self.prepare()
if self._error:
responses = (self._error,)
else:
futures = [self._dispatch(method, args) for method,args in self.rpcrequests ]
if len(futures) == 1:
response = yield futures[0]
responses = (response,)
else:
responses = yield futures
responses = tuple(responses)
response_text = self._RPC_.parse_responses(responses)
self.set_header('Content-Type', self._RPC_.content_type)
self.finish(response_text)
#self._RPC_.run(self, request_body)
@gen.coroutine
def _dispatch(self, method_name, params):
"""
This method walks the attribute tree in the method
and passes the parameters, either in positional or
keyword form, into the appropriate method on the
Handler class. Currently supports only positional
or keyword arguments, not mixed.
"""
try:
assert(not hasattr(RequestHandler, method_name))
print method_name
method = self
method_list = dir(method)
method_list.sort()
attr_tree = method_name.split('.')
for attr_name in attr_tree:
method = self._RPC_.check_method(attr_name, method)
assert(callable(method))
assert(not method_name.startswith('_'))
assert(not getattr(method, 'private', False))
except Exception,e :
raise gen.Return(self._RPC_.faults.method_not_found())
args = []
kwargs = {}
try:
if isinstance(params, dict):
# The parameters are keyword-based
kwargs = params
elif type(params) in (list, tuple):
# The parameters are positional
args = params
else:
# Bad argument formatting?
raise Exception()
# Validating call arguments
final_kwargs, extra_args = getcallargs(method, *args, **kwargs)
except Exception:
raise gen.Return(self._RPC_.faults.invalid_params())
try:
if getattr(method, 'coroutine', False):
method=tornado.gen.coroutine(method)
response = yield method(*extra_args, **final_kwargs)
else:
response = method(*extra_args, **final_kwargs)
except Exception:
self._RPC_.traceback(method_name, params)
raise gen.Return(self._RPC_.faults.internal_error())
raise gen.Return(response)
class FaultMethod(object):
"""
This is the 'dynamic' fault method so that the message can
be changed on request from the parser.faults call.
"""
def __init__(self, fault, code, message):
self.fault = fault
self.code = code
self.message = message
def __call__(self, message=None):
if message:
self.message = message
return self.fault(self.code, self.message)
class Faults(object):
"""
This holds the codes and messages for the RPC implementation.
It is attached (dynamically) to the Parser when called via the
parser.faults query, and returns a FaultMethod to be called so
that the message can be changed. If the 'dynamic' attribute is
not a key in the codes list, then it will error.
USAGE:
parser.fault.parse_error('Error parsing content.')
If no message is passed in, it will check the messages dictionary
for the same key as the codes dict. Otherwise, it just prettifies
the code 'key' from the codes dict.
"""
codes = {
'parse_error': -32700,
'method_not_found': -32601,
'invalid_request': -32600,
'invalid_params': -32602,
'internal_error': -32603
}
messages = {}
def __init__(self, parser, fault=None):
self.library = parser.library
self.fault = fault
if not self.fault:
self.fault = getattr(self.library, 'Fault')
def __getattr__(self, attr):
message = 'Error'
if attr in self.messages.keys():
message = self.messages[attr]
else:
message = ' '.join(map(str.capitalize, attr.split('_')))
fault = FaultMethod(self.fault, self.codes[attr], message)
return fault
"""
Utility Functions
"""
def private(func):
"""
Use this to make a method private.
It is intended to be used as a decorator.
If you wish to make a method tree private, just
create and set the 'private' variable to True
on the tree object itself.
"""
func.private = True
return func
#def async(func):
# """
# Use this to make a method asynchronous
# It is intended to be used as a decorator.
# Make sure you call "self.result" on any
# async method. Also, trees do not currently
# support async methods.
# """
# func.async = True
# return func
def coroutine(func):
func.coroutine = True
return func
def start_server(handlers, route=r'/', port=8080):
"""
This is just a friendly wrapper around the default
Tornado instantiation calls. It simplifies the imports
and setup calls you'd make otherwise.
USAGE:
start_server(handler_class, route=r'/', port=8181)
"""
if type(handlers) not in (types.ListType, types.TupleType):
handler = handlers
handlers = [(route, handler)]
if route != '/RPC2':
# friendly addition for /RPC2 if it's the only one
handlers.append(('/RPC2', handler))
application = tornado.web.Application(handlers)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(port)
loop_instance = tornado.ioloop.IOLoop.instance()
""" Setting the '_server' attribute if not set """
for (route, handler) in handlers:
try:
setattr(handler, '_server', loop_instance)
except AttributeError:
handler._server = loop_instance
loop_instance.start()
return loop_instance
"""
The following is a test implementation which should work
for both the XMLRPC and the JSONRPC clients.
"""
class TestMethodTree(object):
def power(self, x, y=2):
return pow(x, y)
@private
def private(self):
# Shouldn't be called
return False
class TestRPCHandler(BaseRPCHandler):
_RPC_ = None
def add(self, x, y):
return x+y
def ping(self, x):
return x
def noargs(self):
return 'Works!'
tree = TestMethodTree()
def _private(self):
# Shouldn't be called
return False
@private
def private(self):
# Also shouldn't be called
return False
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines interface for DB access.
Functions in this module are imported into the sahara.db namespace. Call these
functions from sahara.db namespace, not the sahara.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface.
**Related Flags**
:db_backend: string to lookup in the list of LazyPluggable backends.
`sqlalchemy` is the only supported backend right now.
:sql_connection: string specifying the sqlalchemy connection to use, like:
`mysql://user:password@localhost/sahara`.
"""
from oslo_config import cfg
from oslo_db import api as db_api
from oslo_db import options
from oslo_log import log as logging
CONF = cfg.CONF
options.set_defaults(CONF)
_BACKEND_MAPPING = {
'sqlalchemy': 'sahara.db.sqlalchemy.api',
}
IMPL = db_api.DBAPI.from_config(CONF, backend_mapping=_BACKEND_MAPPING)
LOG = logging.getLogger(__name__)
def setup_db():
"""Set up database, create tables, etc.
Return True on success, False otherwise
"""
return IMPL.setup_db()
def drop_db():
"""Drop database.
Return True on success, False otherwise
"""
return IMPL.drop_db()
# Helpers for building constraints / equality checks
def constraint(**conditions):
"""Return a constraint object suitable for use with some updates."""
return IMPL.constraint(**conditions)
def equal_any(*values):
"""Return an equality condition object suitable for use in a constraint.
Equal_any conditions require that a model object's attribute equal any
one of the given values.
"""
return IMPL.equal_any(*values)
def not_equal(*values):
"""Return an inequality condition object suitable for use in a constraint.
Not_equal conditions require that a model object's attribute differs from
all of the given values.
"""
return IMPL.not_equal(*values)
def to_dict(func):
def decorator(*args, **kwargs):
res = func(*args, **kwargs)
if isinstance(res, list):
return [item.to_dict() for item in res]
if res:
return res.to_dict()
else:
return None
return decorator
# Cluster ops
def cluster_get(context, cluster, show_progress=False):
"""Return the cluster or None if it does not exist."""
if show_progress:
cluster = IMPL.cluster_provision_progress_update(context, cluster)
else:
cluster = IMPL.cluster_get(context, cluster)
if cluster:
return cluster.to_dict(show_progress)
return None
@to_dict
def cluster_get_all(context, **kwargs):
"""Get all clusters filtered by **kwargs.
e.g. cluster_get_all(ctx, plugin_name='vanilla', hadoop_version='1.1')
"""
return IMPL.cluster_get_all(context, **kwargs)
@to_dict
def cluster_create(context, values):
"""Create a cluster from the values dictionary."""
return IMPL.cluster_create(context, values)
@to_dict
def cluster_update(context, cluster, values):
"""Set the given properties on cluster and update it."""
return IMPL.cluster_update(context, cluster, values)
def cluster_destroy(context, cluster):
"""Destroy the cluster or raise if it does not exist."""
IMPL.cluster_destroy(context, cluster)
# Node Group ops
def node_group_add(context, cluster, values):
"""Create a Node Group from the values dictionary."""
return IMPL.node_group_add(context, cluster, values)
def node_group_update(context, node_group, values):
"""Set the given properties on node_group and update it."""
IMPL.node_group_update(context, node_group, values)
def node_group_remove(context, node_group):
"""Destroy the node_group or raise if it does not exist."""
IMPL.node_group_remove(context, node_group)
# Instance ops
def instance_add(context, node_group, values):
"""Create an Instance from the values dictionary."""
return IMPL.instance_add(context, node_group, values)
def instance_update(context, instance, values):
"""Set the given properties on Instance and update it."""
IMPL.instance_update(context, instance, values)
def instance_remove(context, instance):
"""Destroy the Instance or raise if it does not exist."""
IMPL.instance_remove(context, instance)
# Volumes ops
def append_volume(context, instance, volume_id):
"""Append volume_id to instance."""
IMPL.append_volume(context, instance, volume_id)
def remove_volume(context, instance, volume_id):
"""Remove volume_id in instance."""
IMPL.remove_volume(context, instance, volume_id)
# Cluster Template ops
@to_dict
def cluster_template_get(context, cluster_template):
"""Return the cluster_template or None if it does not exist."""
return IMPL.cluster_template_get(context, cluster_template)
@to_dict
def cluster_template_get_all(context, **kwargs):
"""Get all cluster templates filtered by **kwargs.
e.g. cluster_template_get_all(plugin_name='vanilla',
hadoop_version='1.1')
"""
return IMPL.cluster_template_get_all(context, **kwargs)
@to_dict
def cluster_template_create(context, values):
"""Create a cluster_template from the values dictionary."""
return IMPL.cluster_template_create(context, values)
def cluster_template_destroy(context,
cluster_template,
ignore_default=False):
"""Destroy the cluster_template or raise if it does not exist."""
IMPL.cluster_template_destroy(context, cluster_template, ignore_default)
@to_dict
def cluster_template_update(context, values, ignore_default=False):
"""Update a cluster_template from the values dictionary."""
return IMPL.cluster_template_update(context, values, ignore_default)
# Node Group Template ops
@to_dict
def node_group_template_get(context, node_group_template):
"""Return the Node Group Template or None if it does not exist."""
return IMPL.node_group_template_get(context, node_group_template)
@to_dict
def node_group_template_get_all(context, **kwargs):
"""Get all Node Group Templates filtered by **kwargs.
e.g. node_group_template_get_all(plugin_name='vanilla',
hadoop_version='1.1')
"""
return IMPL.node_group_template_get_all(context, **kwargs)
@to_dict
def node_group_template_create(context, values):
"""Create a Node Group Template from the values dictionary."""
return IMPL.node_group_template_create(context, values)
def node_group_template_destroy(context,
node_group_template,
ignore_default=False):
"""Destroy the Node Group Template or raise if it does not exist."""
IMPL.node_group_template_destroy(context, node_group_template,
ignore_default)
@to_dict
def node_group_template_update(context, node_group_template,
ignore_default=False):
"""Update a Node Group Template from the values in a dictionary."""
return IMPL.node_group_template_update(context, node_group_template,
ignore_default)
# Data Source ops
@to_dict
def data_source_get(context, data_source):
"""Return the Data Source or None if it does not exist."""
return IMPL.data_source_get(context, data_source)
@to_dict
def data_source_get_all(context, **kwargs):
"""Get all Data Sources filtered by **kwargs.
e.g. data_source_get_all(name='myfile', type='swift')
"""
return IMPL.data_source_get_all(context, **kwargs)
def data_source_count(context, **kwargs):
"""Count Data Sources filtered by **kwargs.
Uses sqlalchemy "in_" clause for any tuple values
Uses sqlalchemy "like" clause for any string values containing %
"""
return IMPL.data_source_count(context, **kwargs)
@to_dict
def data_source_create(context, values):
"""Create a Data Source from the values dictionary."""
return IMPL.data_source_create(context, values)
def data_source_destroy(context, data_source):
"""Destroy the Data Source or raise if it does not exist."""
IMPL.data_source_destroy(context, data_source)
@to_dict
def data_source_update(context, data_source):
"""Create a Data Source from the values dictionary."""
return IMPL.data_source_update(context, data_source)
# JobExecutions ops
@to_dict
def job_execution_get(context, job_execution):
"""Return the JobExecution or None if it does not exist."""
return IMPL.job_execution_get(context, job_execution)
@to_dict
def job_execution_get_all(context, **kwargs):
"""Get all JobExecutions filtered by **kwargs.
kwargs key values may be the names of fields in a JobExecution
plus the following special values with the indicated meaning:
'cluster.name' -- name of the Cluster referenced by the JobExecution
'job.name' -- name of the Job referenced by the JobExecution
'status' -- JobExecution['info']['status']
e.g. job_execution_get_all(cluster_id=12, input_id=123)
job_execution_get_all(**{'cluster.name': 'test',
'job.name': 'wordcount'})
"""
return IMPL.job_execution_get_all(context, **kwargs)
def job_execution_count(context, **kwargs):
"""Count number of JobExecutions filtered by **kwargs.
e.g. job_execution_count(cluster_id=12, input_id=123)
"""
return IMPL.job_execution_count(context, **kwargs)
@to_dict
def job_execution_create(context, values):
"""Create a JobExecution from the values dictionary."""
return IMPL.job_execution_create(context, values)
@to_dict
def job_execution_update(context, job_execution, values):
"""Create a JobExecution from the values dictionary."""
return IMPL.job_execution_update(context, job_execution, values)
def job_execution_destroy(context, job_execution):
"""Destroy the JobExecution or raise if it does not exist."""
IMPL.job_execution_destroy(context, job_execution)
# Job ops
@to_dict
def job_get(context, job):
"""Return the Job or None if it does not exist."""
return IMPL.job_get(context, job)
@to_dict
def job_get_all(context, **kwargs):
"""Get all Jobs filtered by **kwargs.
e.g. job_get_all(name='myjob', type='MapReduce')
"""
return IMPL.job_get_all(context, **kwargs)
@to_dict
def job_create(context, values):
"""Create a Job from the values dictionary."""
return IMPL.job_create(context, values)
def job_update(context, job, values):
"""Update a Job from the values dictionary."""
return IMPL.job_update(context, job, values)
def job_destroy(context, job):
"""Destroy the Job or raise if it does not exist."""
IMPL.job_destroy(context, job)
@to_dict
def job_binary_get_all(context, **kwargs):
"""Get all JobBinarys filtered by **kwargs.
e.g. job_binary_get_all(name='wordcount.jar')
"""
return IMPL.job_binary_get_all(context, **kwargs)
@to_dict
def job_binary_get(context, job_binary):
"""Return the JobBinary or None if it does not exist."""
return IMPL.job_binary_get(context, job_binary)
@to_dict
def job_binary_create(context, values):
"""Create a JobBinary from the values dictionary."""
return IMPL.job_binary_create(context, values)
def job_binary_destroy(context, job_binary):
"""Destroy the JobBinary or raise if it does not exist."""
IMPL.job_binary_destroy(context, job_binary)
@to_dict
def job_binary_internal_get_all(context, **kwargs):
"""Get all JobBinaryInternals filtered by **kwargs.
e.g. job_binary_internal_get_all(ctx, name='wordcount.jar')
The JobBinaryInternals returned do not contain a data field.
"""
return IMPL.job_binary_internal_get_all(context, **kwargs)
@to_dict
def job_binary_internal_get(context, job_binary_internal):
"""Return the JobBinaryInternal or None if it does not exist."""
return IMPL.job_binary_internal_get(context, job_binary_internal)
@to_dict
def job_binary_internal_create(context, values):
"""Create a JobBinaryInternal from the values dictionary."""
return IMPL.job_binary_internal_create(context, values)
def job_binary_internal_destroy(context, job_binary_internal):
"""Destroy the JobBinaryInternal or raise if it does not exist."""
IMPL.job_binary_internal_destroy(context, job_binary_internal)
def job_binary_internal_get_raw_data(context, job_binary_internal_id):
"""Return the binary data field from the specified JobBinaryInternal."""
return IMPL.job_binary_internal_get_raw_data(context,
job_binary_internal_id)
# Events ops
def cluster_provision_step_add(context, cluster_id, values):
"""Create a cluster assigned ProvisionStep from the values dictionary."""
return IMPL.cluster_provision_step_add(context, cluster_id, values)
def cluster_provision_step_update(context, step_id):
"""Updates provision step."""
return IMPL.cluster_provision_step_update(context, step_id)
def cluster_provision_progress_update(context, cluster_id):
"""Return cluster with provision progress updated field."""
return IMPL.cluster_provision_progress_update(context, cluster_id)
def cluster_event_add(context, provision_step, values):
"""Assign new event to the specified provision step."""
return IMPL.cluster_event_add(context, provision_step, values)
|
|
import sys, os, time
import numpy as np
from subprocess import Popen, list2cmdline, PIPE
from shutil import copy2
# def cpu_count():
# ''' Returns the number of CPUs in the system
# '''
# num = 1
# try:
# num = os.sysconf('SC_NPROCESSORS_ONLN')
# except (ValueError, OSError, AttributeError):
# pass
# return num
# def exec_commands(cmds):
# ''' Exec commands in parallel in multiple process
# (as much as we have CPU)
# '''
# if not cmds: return # empty list
#
# def done(p):
# return p.poll() is not None
# def success(p):
# return p.returncode == 0
# def fail():
# sys.exit(1)
#
# max_task = cpu_count()
# processes = []
# while True:
# while cmds and len(processes) < max_task:
# task = cmds.pop()
# print task
# processes.append(Popen(task, shell=True))
#
# for p in processes:
# if done(p):
# if success(p):
# print "success"
# processes.remove(p)
# else:
# print "fail"
# fail()
#
# if not processes and not cmds:
# break
# else:
# time.sleep(0.05)
def ReviseInlistFile(filename, profile, profile_comp, mixing, energy,mass,imax=999):
m_ex=0.0414*mass+0.986
tmp_filename = filename+'.tmp'
fr = open(filename, 'r')
fw = open(tmp_filename, 'w')
print profile
print profile_comp
for line in fr:
if ' profile_name' in line:
new_content = ' profile_name = "%s" \n' % profile
fw.write(new_content)
print new_content
elif 'comp_profile_name' in line:
new_content = 'comp_profile_name = "%s" \n' % profile_comp
fw.write(new_content)
elif 'imax' in line:
new_content = 'imax = %(max)i \n' % {'max':imax}
fw.write(new_content)
elif 'final_energy =' in line:
new_content = 'final_energy =%(efin).2E \n' % {"efin":energy}
fw.write(new_content)
elif 'Ni_boundary_mass =' in line:
new_content = 'Ni_boundary_mass =%(ni56).1E \n' % {"ni56":mixing}
fw.write(new_content)
elif 'mass_excised =' in line:
new_content = 'mass_excised =%(ex).2E \n' % {"ex": m_ex}
fw.write(new_content)
elif 'bomb_mass_spread' in line:
new_content = 'bomb_mass_spread =%(ex).2E \n' % {"ex": mass*0.02}
fw.write(new_content)
else:
fw.write(line)
fr.close()
fw.close()
command_mv = 'mv %s %s' % (tmp_filename, filename)
os.system(command_mv)
return
def main_loop():
tasks=[]
mass=np.arange(11,28,0.6)
ni56_mixing=[3, 5, 7]
energy=np.arange(1.3e51,2e51,0.06e51)
#energy=np.arange(4e49,1.3e51,0.06e51)
# mass=[11]
# ni56_mixing=[3, 7]
# energy=[4e49]
loc_bas='/scratch/m/matzner/afsari/SNEC/'
#model_dir= '/home/afsari/PycharmProjects/kspSN/models/'
for i in mass:
for j in ni56_mixing:
for k in energy:
dir_name= 's%(num)2.1f_ni56_%(mixing)i_efin_%(efin).2E' %{"num":i,"mixing":j,"efin":k}
try:
os.mkdir(loc_bas+dir_name)
except OSError as err:
print err
try:
os.mkdir(loc_bas+dir_name+'/output')
except OSError as err:
print err
print loc_bas+dir_name+'/output'
copy2(loc_bas+'snec',loc_bas+dir_name)
cmd='cp '+loc_bas + 'parameters '+loc_bas + dir_name
print cmd
os.system(cmd)
src_comp='profile_s%(num)2.1f.iso.short' %{"num":i}
cmd='cp '+loc_bas + 'profiles/'+src_comp+' '+loc_bas + dir_name
print cmd
os.system(cmd)
src='profile_s%(num)2.1f.short' %{"num":i}
cmd='cp '+loc_bas + 'profiles/'+src+' '+loc_bas + dir_name
print cmd
os.system(cmd)
cmd='cp -r '+loc_bas+'tables '+loc_bas+dir_name
print cmd
os.system(cmd)
cmd='cp '+loc_bas+'subfile '+loc_bas+dir_name
print cmd
os.system(cmd)
ReviseInlistFile(loc_bas+dir_name+'/parameters',src,src_comp,j,k,i)
task='(cd '+loc_bas+dir_name+' ; ./snec) &'
tasks.append(task)
if len(tasks)==8:
for count in range(0,8):
with open(loc_bas+dir_name+'/subfile', "a") as myfile:
myfile.write(tasks.pop()+' \n')
with open(loc_bas + dir_name + '/subfile', "a") as myfile:
myfile.write("wait")
cmd = 'cd ' + loc_bas + dir_name + ' && qsub ' + loc_bas + dir_name + '/subfile'
os.system(cmd)
tasks=[]
if len(tasks)>0:
for count in range(0, len(tasks)):
with open(loc_bas + dir_name + '/subfile', "a") as myfile:
myfile.write(tasks.pop()+' \n')
with open(loc_bas + dir_name + '/subfile', "a") as myfile:
myfile.write("wait")
cmd = 'cd ' + loc_bas + dir_name + ' && qsub ' + loc_bas + dir_name + '/subfile'
os.system(cmd)
tasks = []
return
def csm_loop():
tasks=[]
loc_bas='/scratch/m/matzner/afsari/SNEC/'
best_model='s21.2_ni56_7_efin_7.60E+50'
content = best_model.split('_')
name= content[0].strip('s')
mixing = float(content[2])
energy = float(content[4])
solar_radius = 6.96e10
solar_mass = 1.99e33
model_radius = 86493739248921.08
model_radius_solar = model_radius / solar_radius
rad = np.arange(model_radius_solar+2, 3800, 100)
rad_solar = rad * solar_radius
K = np.arange(1.0e17, 3.0e18, 1.0e17)
#rad = [model_radius_solar + 100]
#K = [1.0e17 + 1.0e17]
for i, r in enumerate(rad):
for j, k in enumerate(K):
dir_name = 's%(num)2.1f_%(radius)i_K_%(cons).2E' % {"num": float(name), "radius": np.floor(r), "cons": k}
try:
os.mkdir(loc_bas+dir_name)
except OSError as err:
print err
try:
os.mkdir(loc_bas+dir_name+'/output')
except OSError as err:
print err
print loc_bas+dir_name+'/output'
copy2(loc_bas+'snec',loc_bas+dir_name)
cmd='cp '+loc_bas + 'parameters '+loc_bas + dir_name
print cmd
os.system(cmd)
src='profile_s%(num)2.1f.short' %{"num" : float(name)}
cmd='cp '+loc_bas + 'profiles/'+src+' '+loc_bas + dir_name
print cmd
os.system(cmd)
src_comp='profile_s%(num)2.1f.iso.short' %{"num":float(name)}
cmd='cp '+loc_bas + 'profiles/'+src_comp+' '+loc_bas + dir_name
os.system(cmd)
fr = open(loc_bas + dir_name+ '/'+src , 'r')
fw = open(loc_bas + dir_name+ '/'+'profile_'+dir_name+'.short', 'w')
fr_iso = open(loc_bas + dir_name+ '/'+src_comp , 'r')
fw_iso = open(loc_bas + dir_name+ '/'+'profile_'+dir_name+'.iso.short', 'w')
for line in fr:
fw.write(line)
dat = line.split(' ')
for line in fr_iso:
fw_iso.write(line)
dat_iso = line.split(' ')
rad_csm = np.arange(model_radius_solar + 1, r, 1.5) * solar_radius
radius_old = float(dat[2])
mass_old = float(dat[1])
print rad_csm
for u, csm in enumerate(rad_csm):
rho = k / np.square(csm)
mass_new = mass_old + (4 * np.pi * rho * (csm ** 3 - radius_old ** 3) / 3)
dat_towrite = [int(dat[0]) + u + 1, mass_new, csm, float(dat[3]), rho, float(dat[5]), float(dat[6]),
float(dat[7])]
dat_towrite_iso = [mass_new, csm, float(dat_iso[2]), float(dat_iso[3]),
float(dat_iso[4]), float(dat_iso[5]), float(dat_iso[6]), float(dat_iso[7]),
float(dat_iso[8]) , float(dat_iso[9]), float(dat_iso[10]), float(dat_iso[11]),
float(dat_iso[12]), float(dat_iso[13]),float(dat_iso[14]), float(dat_iso[15]),
float(dat_iso[16])]
fw.writelines(["%s " % item for item in dat_towrite])
fw.writelines(["\n"])
fw_iso.writelines(["%s " % item for item in dat_towrite_iso])
fw_iso.writelines(["\n"])
mass_old = mass_new
radius_old = csm
csm_fac = 1 - (float(dat[1])) / mass_old
prog_fac=(float(dat[1])) / mass_old
fr.close()
fw.close()
fr_iso.close()
fw_iso.close()
with file(loc_bas + dir_name+ '/'+'profile_'+dir_name+'.short', 'r+') as modified:
modified.write(str(int(dat[0]) + u +1))
modified.close()
with file(loc_bas + dir_name+ '/'+'profile_'+dir_name+'.iso.short', 'r+') as modified:
modified.write(str(int(dat[0]) + u + 1) + ' 15')
modified.close()
cmd='cp -r '+loc_bas+'tables '+loc_bas+dir_name
print cmd
os.system(cmd)
file_grid = loc_bas+dir_name+'/tables/GridPattern.dat'
dat_pattern = np.loadtxt(file_grid)
dat_csm = dat_pattern[dat_pattern > prog_fac]
dat_pattern = prog_fac * dat_pattern
dat_pattern = np.concatenate([dat_pattern, dat_csm])
np.savetxt(file_grid, dat_pattern)
imax=np.shape(dat_pattern)[0]
cmd='cp '+loc_bas+'subfile '+loc_bas+dir_name
print cmd
os.system(cmd)
ReviseInlistFile(loc_bas+dir_name+'/parameters','profile_'+dir_name+'.short','profile_'+dir_name+'.iso.short',mixing,energy,float(name),imax)
task='(cd '+loc_bas+dir_name+' ; ./snec) &'
tasks.append(task)
if len(tasks)==8:
for count in range(0,8):
with open(loc_bas+dir_name+'/subfile', "a") as myfile:
myfile.write(tasks.pop()+' \n')
with open(loc_bas + dir_name + '/subfile', "a") as myfile:
myfile.write("wait")
cmd = 'cd ' + loc_bas + dir_name + ' && qsub ' + loc_bas + dir_name + '/subfile'
os.system(cmd)
tasks=[]
if len(tasks)>0:
for count in range(0, len(tasks)):
with open(loc_bas + dir_name + '/subfile', "a") as myfile:
myfile.write(tasks.pop()+' \n')
with open(loc_bas + dir_name + '/subfile', "a") as myfile:
myfile.write("wait")
cmd = 'cd ' + loc_bas + dir_name + ' && qsub ' + loc_bas + dir_name + '/subfile'
os.system(cmd)
tasks = []
return
def aux_csm_loop():
tasks=[]
loc_bas='/scratch/m/matzner/afsari/SNEC/'
best_model='s21.2_ni56_7_efin_7.60E+50'
content = best_model.split('_')
name= content[0].strip('s')
mixing = float(content[2])
energy = float(content[4])
solar_radius = 6.96e10
solar_mass = 1.99e33
model_radius = 86493739248921.08
model_radius_solar = model_radius / solar_radius
fname='/scratch/m/matzner/afsari/SNEC/UnCompFile.txt'
with open(fname) as f:
dirs = f.readlines()
dirs = [x.strip() for x in dirs]
loc_bas='/scratch/m/matzner/afsari/SNEC/'
for dir_name in dirs:
content = dir_name.split('_')
r = float(content[1])
k = float(content[3])
try:
os.mkdir(loc_bas+dir_name)
except OSError as err:
print err
try:
os.mkdir(loc_bas+dir_name+'/output')
except OSError as err:
print err
print loc_bas+dir_name+'/output'
copy2(loc_bas+'snec',loc_bas+dir_name)
cmd='cp '+loc_bas + 'parameters '+loc_bas + dir_name
print cmd
os.system(cmd)
src='profile_s%(num)2.1f.short' %{"num" : float(name)}
cmd='cp '+loc_bas + 'profiles/'+src+' '+loc_bas + dir_name
print cmd
os.system(cmd)
src_comp='profile_s%(num)2.1f.iso.short' %{"num":float(name)}
cmd='cp '+loc_bas + 'profiles/'+src_comp+' '+loc_bas + dir_name
os.system(cmd)
fr = open(loc_bas + dir_name+ '/'+src , 'r')
fw = open(loc_bas + dir_name+ '/'+'profile_'+dir_name+'.short', 'w')
fr_iso = open(loc_bas + dir_name+ '/'+src_comp , 'r')
fw_iso = open(loc_bas + dir_name+ '/'+'profile_'+dir_name+'.iso.short', 'w')
for line in fr:
fw.write(line)
dat = line.split(' ')
for line in fr_iso:
fw_iso.write(line)
dat_iso = line.split(' ')
rad_csm = np.arange(model_radius_solar + 1, r, 1.5) * solar_radius
radius_old = float(dat[2])
mass_old = float(dat[1])
print rad_csm
for u, csm in enumerate(rad_csm):
rho = k / np.square(csm)
mass_new = mass_old + (4 * np.pi * rho * (csm ** 3 - radius_old ** 3) / 3)
dat_towrite = [int(dat[0]) + u + 1, mass_new, csm, float(dat[3]), rho, float(dat[5]), float(dat[6]),
float(dat[7])]
dat_towrite_iso = [mass_new, csm, float(dat_iso[2]), float(dat_iso[3]),
float(dat_iso[4]), float(dat_iso[5]), float(dat_iso[6]), float(dat_iso[7]),
float(dat_iso[8]) , float(dat_iso[9]), float(dat_iso[10]), float(dat_iso[11]),
float(dat_iso[12]), float(dat_iso[13]),float(dat_iso[14]), float(dat_iso[15]),
float(dat_iso[16])]
fw.writelines(["%s " % item for item in dat_towrite])
fw.writelines(["\n"])
fw_iso.writelines(["%s " % item for item in dat_towrite_iso])
fw_iso.writelines(["\n"])
mass_old = mass_new
radius_old = csm
csm_fac = 1 - (float(dat[1])) / mass_old
prog_fac=(float(dat[1])) / mass_old
fr.close()
fw.close()
fr_iso.close()
fw_iso.close()
with file(loc_bas + dir_name+ '/'+'profile_'+dir_name+'.short', 'r+') as modified:
modified.write(str(int(dat[0]) + u +1))
modified.close()
with file(loc_bas + dir_name+ '/'+'profile_'+dir_name+'.iso.short', 'r+') as modified:
modified.write(str(int(dat[0]) + u + 1) + ' 15')
modified.close()
cmd='cp -r '+loc_bas+'tables '+loc_bas+dir_name
print cmd
os.system(cmd)
file_grid = loc_bas+dir_name+'/tables/GridPattern.dat'
dat_pattern = np.loadtxt(file_grid)
dat_csm = dat_pattern[dat_pattern > prog_fac]
dat_pattern = prog_fac * dat_pattern
dat_pattern = np.concatenate([dat_pattern, dat_csm])
np.savetxt(file_grid, dat_pattern)
imax=np.shape(dat_pattern)[0]
cmd='cp '+loc_bas+'subfile '+loc_bas+dir_name
print cmd
os.system(cmd)
ReviseInlistFile(loc_bas+dir_name+'/parameters','profile_'+dir_name+'.short','profile_'+dir_name+'.iso.short',mixing,energy,float(name),imax)
task='(cd '+loc_bas+dir_name+' ; ./snec) &'
tasks.append(task)
if len(tasks)==8:
for count in range(0,8):
with open(loc_bas+dir_name+'/subfile', "a") as myfile:
myfile.write(tasks.pop()+' \n')
with open(loc_bas + dir_name + '/subfile', "a") as myfile:
myfile.write("wait")
cmd = 'cd ' + loc_bas + dir_name + ' && qsub ' + loc_bas + dir_name + '/subfile'
os.system(cmd)
tasks=[]
if len(tasks)>0:
for count in range(0, len(tasks)):
with open(loc_bas + dir_name + '/subfile', "a") as myfile:
myfile.write(tasks.pop()+' \n')
with open(loc_bas + dir_name + '/subfile', "a") as myfile:
myfile.write("wait")
cmd = 'cd ' + loc_bas + dir_name + ' && qsub ' + loc_bas + dir_name + '/subfile'
os.system(cmd)
tasks = []
return
def aux_loop():
tasks=[]
fname='/scratch/m/matzner/afsari/SNEC/UnCompFile.txt'
with open(fname) as f:
dirs = f.readlines()
dirs = [x.strip() for x in dirs]
loc_bas='/scratch/m/matzner/afsari/SNEC/'
for dir_name in dirs:
content=dir_name.split('_')
i=float(content[0].strip('s'))
j=float(content[2])
k=float(content[4])
try:
os.mkdir(loc_bas+dir_name)
except OSError as err:
print err
try:
os.mkdir(loc_bas+dir_name+'/output')
except OSError as err:
print err
print loc_bas+dir_name+'/output'
copy2(loc_bas+'snec',loc_bas+dir_name)
cmd='cp '+loc_bas + 'parameters '+loc_bas + dir_name
print cmd
os.system(cmd)
src_comp='profile_s%(num)2.1f.iso.short' %{"num":i}
cmd='cp '+loc_bas + 'profiles/'+src_comp+' '+loc_bas + dir_name
print cmd
os.system(cmd)
src='profile_s%(num)2.1f.short' %{"num":i}
cmd='cp '+loc_bas + 'profiles/'+src+' '+loc_bas + dir_name
print cmd
os.system(cmd)
cmd='cp -r '+loc_bas+'tables '+loc_bas+dir_name
print cmd
os.system(cmd)
cmd='cp '+loc_bas+'subfile '+loc_bas+dir_name
print cmd
os.system(cmd)
ReviseInlistFile(loc_bas+dir_name+'/parameters',src,src_comp,j,k,i)
task='(cd '+loc_bas+dir_name+' ; ./snec) &'
tasks.append(task)
if len(tasks)==8:
for count in range(0,8):
with open(loc_bas+dir_name+'/subfile', "a") as myfile:
myfile.write(tasks.pop()+' \n')
with open(loc_bas + dir_name + '/subfile', "a") as myfile:
myfile.write("wait")
cmd = 'cd ' + loc_bas + dir_name + ' && qsub ' + loc_bas + dir_name + '/subfile'
os.system(cmd)
tasks=[]
if len(tasks)>0:
for count in range(0, len(tasks)):
with open(loc_bas + dir_name + '/subfile', "a") as myfile:
myfile.write(tasks.pop()+' \n')
with open(loc_bas + dir_name + '/subfile', "a") as myfile:
myfile.write("wait")
cmd = 'cd ' + loc_bas + dir_name + ' && qsub ' + loc_bas + dir_name + '/subfile'
os.system(cmd)
tasks = []
return
if __name__ == '__main__':
csm_loop()
|
|
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
""" The file describes a container widget consisting of a pipeline
view and a version tree for each opened Vistrail """
from PyQt4 import QtCore, QtGui
from core import debug
from core.collection import Collection
from core.db.locator import untitled_locator
from core.debug import critical
from core.data_structures.bijectivedict import Bidict
from core.system import vistrails_default_file_type
from core.thumbnails import ThumbnailCache
from core.vistrail.vistrail import Vistrail
from core.vistrail.pipeline import Pipeline
from core.log.log import Log
from core.log.opm_graph import OpmGraph
from core.db.locator import FileLocator, XMLFileLocator
from core.modules.module_registry import ModuleRegistry
from core.configuration import get_vistrails_configuration
from gui.collection.vis_log import QLogView
from gui.common_widgets import QMouseTabBar
from gui.pipeline_view import QPipelineView
from gui.version_view import QVersionTreeView
from gui.query_view import QQueryView
from gui.paramexplore.pe_view import QParamExploreView
from gui.vis_diff import QDiffView
from gui.paramexplore.param_view import QParameterView
from gui.vistrail_controller import VistrailController
from gui.mashups.mashup_view import QMashupView
from gui.ports_pane import ParameterEntry
from gui.query_view import QueryEntry
################################################################################
class QVistrailView(QtGui.QWidget):
"""
QVistrailView is a widget containing four stacked widgets: Pipeline View,
Version Tree View, Query View and Parameter Exploration view
for manipulating vistrails.
"""
def __init__(self, vistrail, locator=None, abstraction_files=None,
thumbnail_files=None, mashups=None, parent=None):
""" QVistrailView(parent: QWidget) -> QVistrailView
"""
QtGui.QWidget.__init__(self, parent)
layout = QtGui.QVBoxLayout(self)
layout.setMargin(0)
layout.setSpacing(0)
self.is_executing = False
self.notifications = {}
self.tabs = QMouseTabBar(self)
self.tabs.setDocumentMode(True)
self.tabs.setTabsClosable(True)
self.tabs.setExpanding(False)
#self.tabs.setMovable(True)
self.tabs.hide()
self.tabs.tabDoubleClicked.connect(self.tabDoubleClicked)
layout.addWidget(self.tabs)
self.stack = QtGui.QStackedWidget(self)
layout.addWidget(self.stack)
self.setLayout(layout)
#this index is for pipeline/diff views
self.tab_to_stack_idx = {}
self.tab_state = {}
self.tab_to_view = {}
#self.button_to_tab_idx = Bidict()
self.detached_views = {}
# Initialize the vistrail controller
self.controller = VistrailController(vistrail)
# Create the initial views
self.version_view = None
pipeline_view = self.create_pipeline_view()
self.version_view = self.create_version_view()
self.query_view = self.create_query_view()
self.pe_view = self.create_pe_view()
self.log_view = self.create_log_view()
self.mashup_view = self.create_mashup_view()
self.set_controller(self.controller)
self.locator = locator
self.controller.set_vistrail(vistrail, self.locator, abstraction_files,
thumbnail_files, mashups)
self.tabs.setCurrentIndex(0)
self.current_tab = self.stack.setCurrentIndex(0)
self.pipeline_selected()
self.connect(self.tabs, QtCore.SIGNAL("currentChanged(int)"),
self.tab_changed)
self.connect(self.tabs, QtCore.SIGNAL("tabCloseRequested(int)"),
self.remove_view_by_index)
#self.view_changed()
#self.tab_changed(0)
self.connect(self.controller,
QtCore.SIGNAL('stateChanged'),
self.stateChanged)
from gui.vistrails_window import _app
_app.register_notification("reg_new_abstraction",
self.controller.check_subworkflow_versions)
_app.register_notification("reg_deleted_abstraction",
self.controller.check_subworkflow_versions)
# self.controller = VistrailController()
# self.controller.vistrail_view = self
# self.connect(self.controller,
# QtCore.SIGNAL('stateChanged'),
# self.stateChanged)
# self.connect(self.controller,
# QtCore.SIGNAL('new_action'),
# self.new_action)
# # self.versionTab.versionView.scene()._vistrail_view = self
# self.connect(self.versionTab.versionView.scene(),
# QtCore.SIGNAL('versionSelected(int,bool,bool,bool)'),
# self.versionSelected,
# QtCore.Qt.QueuedConnection)
# self.connect(self.versionTab,
# QtCore.SIGNAL('twoVersionsSelected(int,int)'),
# self.twoVersionsSelected)
# self.connect(self.queryTab,
# QtCore.SIGNAL('queryPipelineChange'),
# self.queryPipelineChange)
# self.connect(self.peTab,
# QtCore.SIGNAL('exploreChange(bool)'),
# self.exploreChange)
# # We also keep track where this vistrail comes from
# # So we can save in the right place
# self.locator = None
# self.closeEventHandler = None
# # Keep the state of the execution button and menu items for the view
# self.execQueryEnabled = False
# self.execDiffEnabled = False
# self.execExploreEnabled = False
# self.execPipelineEnabled = False
# self.execDiffId1 = -1
# self.execDiffId2 = -1
if get_vistrails_configuration().detachHistoryView:
self.detach_history_view()
def get_notifications(self):
return self.notifications
def set_notification(self, notification_id, method):
if notification_id not in self.notifications:
self.notifications[notification_id] = []
self.notifications[notification_id].append(method)
def set_controller(self, controller):
self.controller = controller
self.controller.vistrail_view = self
for i in xrange(self.stack.count()):
view = self.stack.widget(i)
if hasattr(view, 'set_controller'):
view.set_controller(controller)
def get_controller(self):
return self.controller
def get_name(self):
title = self.controller.name
if title=='':
title = 'Untitled%s'%vistrails_default_file_type()
if self.controller.changed:
title += '*'
# self.setWindowTitle(title)
return title
def set_name(self):
title = self.get_name()
self.setWindowTitle(title)
def reset_version_view(self):
from gui.vistrails_window import _app
if self.version_view is not None:
select_node = True
if _app._previous_view and _app._previous_view in self.detached_views:
select_node = False
self.version_view.scene().setupScene(self.controller, select_node)
def reset_tab_state(self):
try:
qaction = self.tab_state[self.tabs.currentIndex()]
qaction.trigger()
except:
pass
def reset_tab_view_to_current(self):
index = self.tabs.currentIndex()
view = self.stack.widget(self.tab_to_stack_idx[index])
#print "view changed: ", view
self.set_to_current(view)
def pipeline_selected(self):
from gui.vistrails_window import _app
if hasattr(self.window(), 'qactions'):
window = self.window()
else:
window = _app
#print "PIPELINE"
self.stack.setCurrentIndex(
self.tab_to_stack_idx[self.tabs.currentIndex()])
self.tabs.setTabText(self.tabs.currentIndex(),
self.stack.currentWidget().get_title())
self.tab_state[self.tabs.currentIndex()] = window.qactions['pipeline']
self.tab_to_view[self.tabs.currentIndex()] = self.get_current_tab()
def pipeline_unselected(self):
#print "PIPELINE UN"
self.stack.setCurrentIndex(
self.tab_to_stack_idx[self.tabs.currentIndex()])
self.tabs.setTabText(self.tabs.currentIndex(),
self.stack.currentWidget().get_title())
def history_selected(self):
from gui.vistrails_window import _app
if get_vistrails_configuration().detachHistoryView:
_app.history_view.raise_()
return
if hasattr(self.window(), 'qactions'):
window = self.window()
else:
window = _app
#print "VERSION"
self.stack.setCurrentIndex(self.stack.indexOf(self.version_view))
self.tabs.setTabText(self.tabs.currentIndex(), "History")
self.tab_state[self.tabs.currentIndex()] = window.qactions['history']
self.tab_to_view[self.tabs.currentIndex()] = self.get_current_tab()
def history_unselected(self):
#print "VERSION UN"
self.stack.setCurrentIndex(
self.tab_to_stack_idx[self.tabs.currentIndex()])
self.tabs.setTabText(self.tabs.currentIndex(),
self.stack.currentWidget().get_title())
def query_selected(self):
from gui.vistrails_window import _app
if hasattr(self.window(), 'qactions'):
window = self.window()
else:
window = _app
#print "QUERY"
self.stack.setCurrentIndex(self.stack.indexOf(self.query_view))
self.tabs.setTabText(self.tabs.currentIndex(), "Search")
self.tab_state[self.tabs.currentIndex()] = window.qactions['search']
self.tab_to_view[self.tabs.currentIndex()] = self.get_current_tab()
def query_unselected(self):
#print "QUERY UN"
self.stack.setCurrentIndex(
self.tab_to_stack_idx[self.tabs.currentIndex()])
self.tabs.setTabText(self.tabs.currentIndex(),
self.stack.currentWidget().get_title())
def explore_selected(self):
from gui.vistrails_window import _app
if hasattr(self.window(), 'qactions'):
window = self.window()
else:
window = _app
#print "EXPLORE"
self.stack.setCurrentIndex(self.stack.indexOf(self.pe_view))
self.tabs.setTabText(self.tabs.currentIndex(), "Explore")
self.tab_state[self.tabs.currentIndex()] = window.qactions['explore']
self.tab_to_view[self.tabs.currentIndex()] = self.get_current_tab()
def explore_unselected(self):
#print "EXPLORE UN"
self.stack.setCurrentIndex(
self.tab_to_stack_idx[self.tabs.currentIndex()])
self.tabs.setTabText(self.tabs.currentIndex(),
self.stack.currentWidget().get_title())
def provenance_selected(self):
from gui.vistrails_window import _app
if hasattr(self.window(), 'qactions'):
window = self.window()
else:
window = _app
#print "PROVENANCE"
self.stack.setCurrentIndex(self.stack.indexOf(self.log_view))
self.tabs.setTabText(self.tabs.currentIndex(), "Provenance")
self.tab_state[self.tabs.currentIndex()] = window.qactions['provenance']
self.tab_to_view[self.tabs.currentIndex()] = self.get_current_tab()
def provenance_unselected(self):
#print "PROVENANCE UN"
self.stack.setCurrentIndex(
self.tab_to_stack_idx[self.tabs.currentIndex()])
self.tabs.setTabText(self.tabs.currentIndex(),
self.stack.currentWidget().get_title())
def mashup_selected(self):
from gui.vistrails_window import _app
if hasattr(self.window(), 'qactions'):
window = self.window()
else:
window = _app
#print "MASHUP"
#print self.stack.count(), self.stack.indexOf(self.mashup_view)
try:
self.stack.setCurrentIndex(self.stack.indexOf(self.mashup_view))
self.tabs.setTabText(self.tabs.currentIndex(), "Mashup")
self.tab_state[self.tabs.currentIndex()] = window.qactions['mashup']
self.mashup_view.updateView()
self.tab_to_view[self.tabs.currentIndex()] = self.get_current_tab()
except Exception, e:
print "EXCEPTION: ", str(e)
def mashup_unselected(self):
#print "MASHUP UN"
self.stack.setCurrentIndex(
self.tab_to_stack_idx[self.tabs.currentIndex()])
self.tabs.setTabText(self.tabs.currentIndex(),
self.stack.currentWidget().get_title())
def pipeline_change(self, checked):
if checked:
#print "PIPELINE SELECTED"
self.pipeline_selected()
else:
#print "PIPELINE UNSELECTED"
self.pipeline_unselected()
self.view_changed()
def history_change(self, checked):
from vistrails_window import _app
if checked:
#print "HISTORY SELECTED"
self.history_selected()
else:
#print "HISTORY UNSELECTED"
self.history_unselected()
self.view_changed()
def search_change(self, checked):
if checked:
self.query_selected()
else:
self.query_unselected()
self.view_changed()
def explore_change(self, checked):
if checked:
self.explore_selected()
else:
self.explore_unselected()
self.view_changed()
def provenance_change(self, checked):
if checked:
self.provenance_selected()
else:
self.provenance_unselected()
self.view_changed()
def mashup_change(self, checked):
if checked:
self.mashup_selected()
else:
self.mashup_unselected()
self.view_changed()
def show_group(self):
pipelineView = self.controller.current_pipeline_view
items = pipelineView.get_selected_item_ids(True)
if items is not None:
for m_id in items[0]:
module = pipelineView.current_pipeline.modules[m_id]
if module.is_group() or module.is_abstraction():
newPipelineView = self.add_pipeline_view()
newPipelineView.controller.current_pipeline_view = \
newPipelineView.scene()
module.pipeline.ensure_connection_specs()
newPipelineView.scene().setupScene(module.pipeline)
newPipelineView.scene().current_pipeline = module.pipeline
newPipelineView.scene().fitToView(newPipelineView, True)
newPipelineView.setReadOnlyMode(True)
def create_view(self, klass, add_tab=True):
view = klass(self)
view.set_vistrail_view(self)
idx = self.stack.addWidget(view)
view.set_index(idx)
if add_tab:
tab_idx = self.tabs.addTab(view.get_title())
view.set_tab_idx(tab_idx)
self.tab_to_stack_idx[tab_idx] = idx
self.tab_to_view[tab_idx] = view
if self.isTabDetachable(tab_idx):
self.tabs.setTabToolTip(tab_idx, "Double-click to detach it")
self.connect(view, QtCore.SIGNAL("windowTitleChanged"),
self.view_title_changed)
if self.tabs.count() == 1:
#self.tabs.hide()
self.tabs.setTabsClosable(False)
else:
self.tabs.setTabsClosable(True)
self.updateTabsTooTip()
self.tabs.show()
return view
def detach_history_view(self):
from gui.vistrails_window import _app
view = self.version_view
window = _app.history_view
self.version_index = window.stack.addWidget(view)
window.stack.setCurrentIndex(self.version_index)
window.view = view
def detach_view(self, tab_idx):
from gui.vistrails_window import QBaseViewWindow
if self.tab_to_stack_idx.has_key(tab_idx):
stack_index = self.tab_to_stack_idx[tab_idx]
view = self.stack.widget(stack_index)
title = view.get_long_title()
self.remove_view_by_index(tab_idx)
window = QBaseViewWindow(view=view, parent=None)
view.set_title(title)
window.setWindowTitle(title)
self.connect(window, QtCore.SIGNAL("viewWasClosed"),
self.detachedViewWasClosed)
self.detached_views[view] = window
window.move(self.rect().center())
window.show()
else:
print "Error detach_view: ", tab_idx, self.tab_to_stack_idx
def isTabDetachable(self, index):
if self.tab_to_view.has_key(index):
return self.tabs.count() > 1 and self.tab_to_view[index].detachable
return False
def closeDetachedViews(self):
windows = self.detached_views.values()
for w in windows:
if w:
w.close()
def detachedViewWasClosed(self, view):
if self.controller.current_pipeline_view.parent() == view:
self.controller.current_pipeline_view = None
self.activateWindow()
self.reset_tab_view_to_current()
self.view_changed()
del self.detached_views[view]
def updateTabsTooTip(self):
for i in range(self.tabs.count()):
if self.isTabDetachable(i):
self.tabs.setTabToolTip(i, "Double-click to detach it")
else:
self.tabs.setTabToolTip(i, "")
def tabDoubleClicked(self, index, pos):
if self.isTabDetachable(index):
self.detach_view(index)
def view_title_changed(self, view):
if self.stack.currentWidget() == view:
self.tabs.setTabText(self.tabs.currentIndex(), view.windowTitle())
def update_indexes(self, rm_tab_idx, rm_stack_idx):
for (t,s) in self.tab_to_stack_idx.iteritems():
if s > rm_stack_idx:
self.tab_to_stack_idx[t] -= 1
tabs = self.tab_to_stack_idx.keys()
tabs.sort()
for t in tabs:
if t > rm_tab_idx:
self.tab_to_stack_idx[t-1] = self.tab_to_stack_idx[t]
self.tab_state[t-1] = self.tab_state[t]
del self.tab_to_stack_idx[tabs[-1]]
del self.tab_state[tabs[-1]]
for idx in range(self.stack.count()):
if idx >= rm_stack_idx:
view = self.get_tab(idx)
view.set_index(idx)
if view.tab_idx > rm_tab_idx:
view.set_tab_idx(view.tab_idx-1)
def remove_view_by_index(self, index):
self.disconnect(self.tabs, QtCore.SIGNAL("currentChanged(int)"),
self.tab_changed)
close_current = False
if index == self.tabs.currentIndex():
close_current = True
stack_idx = self.tab_to_stack_idx[index]
#print "\n\n >>>>> remove_view_by_index ", index, stack_idx, self.tabs.currentIndex()
self.tabs.removeTab(index)
del self.tab_to_view[index]
if stack_idx >= 0:
view = self.stack.widget(stack_idx)
self.disconnect(view, QtCore.SIGNAL("windowTitleChanged"),
self.view_title_changed)
self.stack.removeWidget(view)
self.update_indexes(index, stack_idx)
if self.tabs.count() == 1:
self.tabs.setTabsClosable(False)
self.updateTabsTooTip()
if close_current:
if index >= self.tabs.count():
new_index = index - 1
else:
new_index = index
self.tab_changed(new_index)
self.connect(self.tabs, QtCore.SIGNAL("currentChanged(int)"),
self.tab_changed)
# self.tabs.setCurrentIndex(new_index)
# print self.current_tab
# self.view_changed()
def switch_to_tab(self, index):
# if index < 0:
# index = self.tabs.count() + index
self.tabs.setCurrentIndex(index)
self.tab_changed(index)
def get_current_tab(self, query_top_level=False):
window = QtGui.QApplication.activeWindow()
if window in self.detached_views.values():
return window.view
else:
#if none of the detached views is active we will assume that the
#window containing this vistrail has focus
widget = self.stack.currentWidget()
if not query_top_level and type(widget) == QQueryView:
widget = widget.get_current_view()
return widget
def get_current_outer_tab(self):
window = QtGui.QApplication.activeWindow()
if window in self.detached_views.values():
return window.view
else:
#if none of the detached views is active we will assume that the
#window containing this vistrail has focus
return self.stack.currentWidget()
def get_tab(self, stack_idx):
widget = self.stack.widget(stack_idx)
if type(widget) == QQueryView:
widget = widget.get_current_view()
return widget
def view_changed(self):
from gui.vistrails_window import _app
_app.closeNotPinPalettes()
#view = self.stack.currentWidget()
view = self.get_current_outer_tab()
#print "changing tab from: ",self.current_tab, " to ", view
#print self.tab_to_stack_idx
if view != self.current_tab:
#print "!!unset_action_links of ", self.current_tab
_app.unset_action_links(self.current_tab)
self.current_tab = view
# print "\n!! _app.notifications: "
# for (k, v) in _app.notifications.iteritems():
# print " ", k, " (%s) "%len(v)
# for m in v:
# print " ", m
# print "\n!!set_action_defaults of ", self.current_tab
_app.set_action_defaults(self.current_tab)
#print "\n!!set_action_links of ", self.current_tab
_app.set_action_links(self.current_tab.action_links, self.current_tab,
self)
#else:
# print "tabs the same. do nothing"
self.showCurrentViewPalettes()
if isinstance(view, QQueryView):
_app.notify("controller_changed", view.p_controller)
_app.notify("entry_klass_changed", QueryEntry)
else:
_app.notify("entry_klass_changed", ParameterEntry)
_app.notify("controller_changed", self.controller)
if self.window().isActiveWindow():
if self.isTabDetachable(self.tabs.currentIndex()):
self.tabs.setTabToolTip(self.tabs.currentIndex(),
"Double-click to detach it")
else:
self.tabs.setTabToolTip(self.tabs.currentIndex(),
"")
if get_vistrails_configuration().detachHistoryView:
if hasattr(self.window(), 'qactions'):
_app = self.window()
_app.history_view.stack.setCurrentIndex(self.version_index)
def showCurrentViewPalettes(self):
current_tab = self.get_current_tab(True)
for dock_loc, palette_klass in current_tab.palette_layout.iteritems():
palette_instance = palette_klass.instance()
window = palette_instance.toolWindow().parentWidget()
if window:
current_loc = window.dockWidgetArea(palette_instance.toolWindow())
else:
current_loc = QtCore.Qt.NoDockWidgetArea
#print ">> P:", palette_instance.__class__.__name__, current_loc, \
# dock_loc
if current_loc == dock_loc:
# palette_instance.get_action().trigger()
palette_instance.set_visible(True)
def tab_changed(self, index):
#print 'raw tab_changed', index
if index < 0 or self.controller is None:
return
from gui.vistrails_window import _app, QVistrailViewWindow
self.stack.setCurrentIndex(self.tab_to_stack_idx[index])
if isinstance(self.window(),QVistrailViewWindow):
window = self.window()
else:
window = _app
#print window
for action in window.view_action_group.actions():
action.setChecked(False)
self.selected_mode = None
action = None
if index in self.tab_state:
action = self.tab_state[index]
# if action is not None:
# print 'running toggle'
# action.toggle()
# action.setChecked(True)
else:
self.tab_state[index] = window.qactions['pipeline']
if action is not None:
action.setChecked(True)
# _app.view_triggered(action)
view = self.stack.widget(self.tab_to_stack_idx[index])
#print "view changed: ", view
self.set_to_current(view)
def set_to_current(self, view):
from gui.vistrails_window import _app, QVistrailViewWindow
if isinstance(view, QDiffView):
view.set_to_current()
#print "view changed!", self.controller, \
# self.controller.current_version
_app.notify("controller_changed", self.controller)
self.reset_version_view()
elif isinstance(view, QLogView):
view.set_to_current()
#print "view changed!", self.controller, \
# self.controller.current_version
_app.notify("controller_changed", self.controller)
self.reset_version_view()
elif isinstance(view, QPipelineView):
#print "PIPELINE_VIEW NEW SCENE:", id(view.scene())
# need to set the controller's version, pipeline, view
# to this view...
# self.controller.current_version = view.current_version
# self.controller.current_pipeline = view.current_pipeline
view.set_to_current()
#print "view changed!", self.controller, \
# self.controller.current_version
real_view = self.stack.currentWidget()
if isinstance(real_view, QQueryView):
_app.notify("controller_changed", real_view.p_controller)
else:
_app.notify("controller_changed", self.controller)
self.reset_version_view()
def create_pipeline_view(self):
view = self.create_view(QPipelineView)
self.connect(view.scene(), QtCore.SIGNAL('moduleSelected'),
self.gen_module_selected(view))
view.set_controller(self.controller)
view.set_to_current()
self.set_notification('module_done_configure', view.done_configure)
#self.switch_to_tab(view.tab_idx)
return view
def add_pipeline_view(self):
view = self.create_pipeline_view()
self.switch_to_tab(view.tab_idx)
return view
def create_version_view(self):
view = self.create_view(QVersionTreeView, False)
self.connect(view.scene(),
QtCore.SIGNAL('versionSelected(int,bool,bool,bool,bool)'),
self.version_selected)
self.connect(view.scene(),
QtCore.SIGNAL('diffRequested(int,int)'),
self.diff_requested)
return view
def create_query_view(self):
view = self.create_view(QQueryView, False)
self.connect(view.pipeline_view.scene(),
QtCore.SIGNAL('moduleSelected'),
self.gen_module_selected(view.pipeline_view))
# self.connect(view.version_result_view.scene(),
# QtCore.SIGNAL('versionSelected(int,bool,bool,bool,bool)'),
# self.version_selected)
# self.connect(view.version_result_view.scene(),
# QtCore.SIGNAL('diffRequested(int,int)'),
# self.diff_requested)
self.set_notification('query_changed', view.query_changed)
self.set_notification('version_changed', view.version_changed)
return view
def create_diff_view(self):
view = self.create_view(QDiffView)
self.connect(view.scene(), QtCore.SIGNAL('moduleSelected'),
self.gen_module_selected(view))
return view
def create_pe_view(self):
view = self.create_view(QParamExploreView, False)
self.set_notification('controller_changed', view.set_controller)
self.set_notification('pipeline_changed', view.updatePipeline)
return view
def create_log_view(self):
from gui.vistrails_window import _app
view = self.create_view(QLogView, False)
self.set_notification('execution_changed', view.execution_changed)
return view
def create_mashup_view(self):
#print "******* create mashup view"
from gui.vistrails_window import _app
view = self.create_view(QMashupView, False)
view.set_controller(self.controller)
self.set_notification('controller_changed', view.controllerChanged)
self.set_notification('alias_changed', view.aliasChanged)
self.set_notification('version_changed', view.versionChanged)
return view
def gen_module_selected(self, view):
def module_selected(module_id, selection = []):
from gui.vistrails_window import _app
pipeline = view.scene().current_pipeline
if pipeline is not None and module_id in pipeline.modules:
module = pipeline.modules[module_id]
_app.notify('module_changed', module)
else:
_app.notify('module_changed', None)
return module_selected
def version_selected(self, version_id, by_click, do_validate=True,
from_root=False, double_click=False):
from gui.vistrails_window import _app
from gui.vis_diff import QDiffView
if hasattr(self.window(), 'qactions'):
window = self.window()
else:
window = _app
#print 'got version selected:', version_id
if _app._focus_owner in self.detached_views.values():
view = _app._focus_owner.view
elif _app._previous_view in self.detached_views:
view = _app._previous_view
else:
view = self.stack.widget(
self.tab_to_stack_idx[self.tabs.currentIndex()])
if view and by_click:
self.controller.change_selected_version(version_id, True,
do_validate, from_root)
view.scene().fitToView(view, True)
if double_click:
# view = self.create_pipeline_view()
# view.set_controller(self.controller)
# view.set_to_current()
# self.tabs.setCurrentWidget(view.parent())
window.qactions['pipeline'].trigger()
self.controller.reset_redo_stack()
if view and not isinstance(view, QDiffView):
if view not in self.detached_views:
view.set_title(self.controller.get_pipeline_name())
else:
view.set_title(view.get_long_title())
view.window().setWindowTitle(view.get_long_title())
_app.notify("version_changed", self.controller.current_version)
_app.notify("pipeline_changed", self.controller.current_pipeline)
def query_version_selected(self, search=None, version_id=None):
if version_id is None:
self.query_view.set_result_level(
self.query_view.query_controller.LEVEL_VISTRAIL)
self.query_view.query_controller.set_search(search)
else:
self.query_view.set_result_level(
self.query_view.query_controller.LEVEL_WORKFLOW)
self.query_view.query_controller.set_search(search)
self.query_view.result_version_selected(version_id, True,
double_click=True)
window = self.window()
window.qactions['search'].trigger()
def diff_requested(self, version_a, version_b, vistrail_b=None):
"""diff_requested(self, id, id, Vistrail) -> None
Request a diff between two versions. If vistrail_b is
specified, the second version will be derived from that
vistrail instead of the common vistrail controlled by this
view.
"""
view = self.create_diff_view()
view.set_controller(self.controller)
view.set_diff(version_a, version_b, vistrail_b)
self.switch_to_tab(view.tab_idx)
view.scene().fitToView(view, True)
self.view_changed()
def save_vistrail(self, locator_class, force_choose_locator=False, export=False):
"""
force_choose_locator=True triggers 'save as' behavior
export=True does not update the current controller
"""
locator = self.controller.locator
if locator_class is None and locator is not None:
locator_class = type(locator)
#print "CALLED SAVE VISTRAIL", locator_class
self.flush_changes()
gui_get = locator_class.save_from_gui
# get a locator to write to
if force_choose_locator:
locator = gui_get(self, Vistrail.vtType,
self.controller.locator)
else:
locator = (self.controller.locator or
gui_get(self, Vistrail.vtType,
self.controller.locator))
if locator == untitled_locator():
locator = gui_get(self, Vistrail.vtType,
self.controller.locator)
# if couldn't get one, ignore the request
if not locator:
return False
try:
self.controller.write_vistrail(locator, export=export)
except Exception, e:
import traceback
debug.critical('Failed to save vistrail: %s' % str(e),
traceback.format_exc())
raise
return False
if export:
return self.controller.locator
# update collection
try:
thumb_cache = ThumbnailCache.getInstance()
self.controller.vistrail.thumbnails = \
self.controller.find_thumbnails(
tags_only=thumb_cache.conf.tagsOnly)
self.controller.vistrail.abstractions = \
self.controller.find_abstractions(self.controller.vistrail,
True)
self.controller.vistrail.mashups = self.controller._mashups
collection = Collection.getInstance()
url = locator.to_url()
entity = collection.updateVistrail(url, self.controller.vistrail)
# add to relevant workspace categories
collection.add_to_workspace(entity)
collection.commit()
except Exception, e:
import traceback
debug.critical('Failed to index vistrail', traceback.format_exc())
from gui.vistrails_window import _app
# update recent files menu items
if not self.is_abstraction:
_app.set_current_locator(locator)
_app.view_changed(self)
# reload workspace entry
from gui.collection.workspace import QWorkspaceWindow
QWorkspaceWindow.instance().add_vt_window(self)
return locator
def save_vistrail_as(self, locator_class):
#print "CALLED SAVE AS VISTRAIL", locator_class
self.save_vistrail(locator_class, force_choose_locator=True)
def export_vistrail(self, locator_class):
""" Exports vistrail without updating the current vistrail """
self.save_vistrail(locator_class, force_choose_locator=True, export=True)
def export_stable(self, locator_class=XMLFileLocator,
force_choose_locator=True):
""" save vistrail to previous stable version """
self.flush_changes()
gui_get = locator_class.save_from_gui
if force_choose_locator:
locator = gui_get(self, Vistrail.vtType,
self.controller.locator)
else:
locator = (self.controller.locator or
gui_get(self, Vistrail.vtType, self.controller.locator))
if locator == untitled_locator():
locator = gui_get(self, Vistrail.vtType,
self.controller.locator)
if not locator:
return False
self.controller.write_vistrail(locator, '1.0.2', True)
return True
# FIXME normalize workflow/log/registry!!!
def save_workflow(self, locator_class, force_choose_locator=True):
self.flush_changes()
gui_get = locator_class.save_from_gui
if force_choose_locator:
locator = gui_get(self, Pipeline.vtType, self.controller.locator)
else:
locator = (self.controller.locator or
gui_get(self, Pipeline.vtType,
self.controller.locator))
if locator == untitled_locator():
locator = gui_get(self, Pipeline.vtType, self.controller.locator)
if not locator:
return False
self.controller.write_workflow(locator)
def save_log(self, locator_class, force_choose_locator=True):
self.flush_changes()
gui_get = locator_class.save_from_gui
if force_choose_locator:
locator = gui_get(self, Log.vtType,
self.controller.locator)
else:
locator = (self.controller.locator or
gui_get(self, Log.vtType,
self.controller.locator))
if locator == untitled_locator():
locator = gui_get(self, Log.vtType,
self.controller.locator)
if not locator:
return False
self.controller.write_log(locator)
def save_registry(self, locator_class, force_choose_locator=True):
self.flush_changes()
gui_get = locator_class.save_from_gui
if force_choose_locator:
locator = gui_get(self, ModuleRegistry.vtType,
self.controller.locator)
else:
locator = (self.controller.locator or
gui_get(self, ModuleRegistry.vtType,
self.controller.locator))
if locator == untitled_locator():
locator = gui_get(self, ModuleRegistry.vtType,
self.controller.locator)
if not locator:
return False
self.controller.write_registry(locator)
def save_opm(self, locator_class=XMLFileLocator,
force_choose_locator=True):
self.flush_changes()
gui_get = locator_class.save_from_gui
if force_choose_locator:
locator = gui_get(self, OpmGraph.vtType,
self.controller.locator)
else:
locator = (self.controller.locator or
gui_get(self, OpmGraph.vtType,
self.controller.locator))
if locator == untitled_locator():
locator = gui_get(self, OpmGraph.vtType,
self.controller.locator)
if not locator:
return False
self.controller.write_opm(locator)
def has_changes(self):
return self.controller.changed
def flush_changes(self):
"""Flush changes in the vistrail before closing or saving.
"""
# Quick workaround for notes focus out bug (ticket #182)
# There's probably a much better way to fix this.
from gui.version_prop import QVersionProp
prop = QVersionProp.instance()
prop.versionNotes.commit_changes()
def execute(self):
# makes sure we are not already executing
if self.is_executing:
return
self.is_executing = True
view = self.get_current_tab()
try:
if hasattr(view, 'execute'):
view.setFocus(QtCore.Qt.MouseFocusReason)
view.execute()
finally:
self.is_executing = False
def publish_to_web(self):
view = self.get_current_tab()
if hasattr(view, 'publish_to_web'):
view.publish_to_web()
def publish_to_paper(self):
view = self.get_current_tab()
if hasattr(view, 'publish_to_paper'):
view.publish_to_paper()
def open_mashup_from_mashuptrail_id(self, mashuptrail_id, mashupVersion):
for mashuptrail in self.controller._mashups:
if str(mashuptrail.id) == mashuptrail_id:
mashup = mashuptrail.getMashup(mashupVersion)
self.open_mashup(mashup)
break
def open_mashup(self, mashup):
"""open_mashup(mashup: Mashup) -> None
It will switch to version view, select the corresponding node
and run the mashup """
from gui.version_prop import QVersionProp
#first we will show the hisotry view and select the version that has
#this mashup
vt_version = mashup.version
window = self.window()
window.qactions['history'].trigger()
self.version_selected(vt_version, by_click=True)
self.version_view.select_current_version()
#then we will execute the mashup
version_prop = QVersionProp.instance()
version_prop.versionMashups.openMashup(mashup.id)
def edit_mashup(self, mashup):
"""edit_mashup(mashup: Mashup) -> None
It will select the corresponding node, switch to mashup view,
and select mashup """
from gui.mashups.mashups_inspector import QMashupsInspector
vt_version = mashup.version
window = self.window()
window.qactions['history'].trigger()
self.version_selected(vt_version, by_click=True)
self.version_view.select_current_version()
window.qactions['mashup'].trigger()
inspector = QMashupsInspector.instance()
inspector.mashupsList.selectMashup(mashup.name)
##########################################################################
# Undo/redo
def set_pipeline_selection(self, old_action, new_action):
# need to check if anything on module changed or
# any connections changed
module_types = set(['module', 'group', 'abstraction'])
module_child_types = set(['function', 'parameter', 'location',
'portSpec', 'annotation'])
conn_types = set(['connection'])
conn_child_types = set(['port'])
view = self.stack.currentWidget()
if not isinstance(view, QPipelineView):
return
pipeline_scene = view.scene()
if old_action is None:
old_action_id = 0
else:
old_action_id = old_action.id
if new_action is None:
new_action_id = 0
else:
new_action_id = new_action.id
action = self.controller.vistrail.general_action_chain(old_action_id,
new_action_id)
def module_change():
module_ids = set()
function_ids = set()
for op in action.operations:
if op.what in module_types and \
(op.vtType == 'change' or op.vtType == 'add'):
module_ids.add(op.objectId)
elif op.what in module_child_types and \
(op.vtType == 'change' or op.vtType == 'add' or
op.vtType == 'delete'):
if op.what == 'parameter':
function_ids.add(op.parentObjId)
else:
module_ids.add(op.parentObjId)
if len(function_ids) > 0:
for m_id, module in \
self.controller.current_pipeline.modules.iteritems():
to_discard = set()
for f_id in function_ids:
if module.has_function_with_real_id(f_id):
module_ids.add(m_id)
to_discard.add(f_id)
function_ids -= to_discard
for id in module_ids:
if id in pipeline_scene.modules:
pipeline_scene.modules[id].setSelected(True)
def connection_change():
conn_ids = set()
for op in action.operations:
if op.what in conn_types and \
(op.vtType == 'change' or op.vtType == 'add'):
conn_ids.add(op.objectId)
elif op.what in conn_child_types and \
(op.vtType == 'change' or op.vtType == 'add' or
op.vtType == 'delete'):
conn_ids.add(op.parentObjId)
for id in conn_ids:
if id in pipeline_scene.connections:
pipeline_scene.connections[id].setSelected(True)
module_change()
connection_change()
def undo(self):
(old_action, new_action) = self.controller.undo()
self.set_pipeline_selection(old_action, new_action)
if new_action is not None:
return new_action.id
return 0
def redo(self):
(old_action, new_action) = self.controller.redo()
self.set_pipeline_selection(old_action, new_action)
return new_action.id
# def updateCursorState(self, mode):
# """ updateCursorState(mode: Int) -> None
# Change cursor state in all different modes.
# """
# self.pipelineTab.pipelineView.setDefaultCursorState(mode)
# self.versionTab.versionView.setDefaultCursorState(mode)
# self.queryTab.pipelineView.setDefaultCursorState(mode)
# if self.parent().parent().parent().pipViewAction.isChecked():
# self.pipelineTab.pipelineView.pipFrame.graphicsView.setDefaultCursorState(mode)
# self.versionTab.versionView.pipFrame.graphicsView.setDefaultCursorState(mode)
# def flush_changes(self):
# """Flush changes in the vistrail before closing or saving.
# """
# # Quick workaround for notes focus out bug (ticket #182)
# # There's probably a much better way to fix this.
# prop = self.versionTab.versionProp
# prop.versionNotes.commit_changes()
# def setup_view(self, version=None):
# """setup_view(version = None:int) -> None
# Sets up the correct view for a fresh vistrail.
# Previously, there was a method setInitialView and another
# setOpenView.
# They were supposed to do different things but the code was
# essentially identical.
# FIXME: this means that the different calls are being handled
# somewhere else in the code. Figure this out."""
# if version is None:
# self.controller.select_latest_version()
# version = self.controller.current_version
# else:
# self.versionSelected(version, True, True, False)
# self.controller.recompute_terse_graph()
# self.controller.invalidate_version_tree(True)
# self.setPIPMode(True)
# self.setQueryMode(False)
# def setPIPMode(self, on):
# """ setPIPMode(on: bool) -> None
# Set the PIP state for the view
# """
# self.pipelineTab.pipelineView.setPIPEnabled(on)
# self.versionTab.versionView.setPIPEnabled(on)
# def setQueryMode(self, on):
# """ setQueryMode(on: bool) -> None
# Set the Reset Query button mode for the view
# """
# self.pipelineTab.pipelineView.setQueryEnabled(on)
# self.versionTab.versionView.setQueryEnabled(on)
# self.queryTab.pipelineView.setQueryEnabled(on)
# def setMethodsMode(self, on):
# """ setMethodsMode(on: bool) -> None
# Set the methods panel state for the view
# """
# if on:
# self.pipelineTab.methodPalette.toolWindow().show()
# else:
# self.pipelineTab.methodPalette.toolWindow().hide()
# def setSetMethodsMode(self, on):
# """ setSetMethodsMode(on: bool) -> None
# Set the set methods panel state for the view
# """
# if on:
# self.pipelineTab.moduleMethods.toolWindow().show()
# else:
# self.pipelineTab.moduleMethods.toolWindow().hide()
# def setPropertiesMode(self, on):
# """ setPropertiesMode(on: bool) -> None
# Set the properties panel state for the view
# """
# if on:
# self.versionTab.versionProp.toolWindow().show()
# else:
# self.versionTab.versionProp.toolWindow().hide()
# def setPropertiesOverlayMode(self, on):
# """ setPropertiesMode(on: bool) -> None
# Set the properties overlay state for the view
# """
# if on:
# self.versionTab.versionView.versionProp.show()
# else:
# self.versionTab.versionView.versionProp.hide()
# def setModuleConfigMode(self, on):
# """ setModuleConfigMode(on: bool) -> None
# Set the Module configuration panel state for the view
# """
# if on:
# self.pipelineTab.moduleConfig.toolWindow().show()
# else:
# self.pipelineTab.moduleConfig.toolWindow().hide()
# def viewModeChanged(self, index):
# """ viewModeChanged(index: int) -> None
# Slot for switching different views when the tab's current
# widget is changed
# """
# if self.stackedWidget.count()>index:
# self.stackedWidget.setCurrentIndex(index)
def setVistrailVarsMode(self, on):
""" setVistrailVarsMode(on: bool) -> None
Set the vistrail variable panel state for the view
"""
if on:
self.pipelineTab.vistrailVars.toolWindow().show()
else:
self.pipelineTab.vistrailVars.toolWindow().hide()
# def pasteToCurrentTab(self):
# index = self.stackedWidget.currentIndex()
# if index == 0:
# self.pipelineTab.pipelineView.pasteFromClipboard()
# elif index == 2:
# self.queryTab.pipelineView.pasteFromClipboard()
# def selectAll(self):
# index = self.stackedWidget.currentIndex()
# if index == 0:
# self.pipelineTab.pipelineView.scene().selectAll()
# elif index == 2:
# self.queryTab.pipelineView.scene().selectAll()
# def sizeHint(self):
# """ sizeHint(self) -> QSize
# Return recommended size of the widget
# """
# return QtCore.QSize(1024, 768)
# def set_vistrail(self, vistrail, locator=None, abstractions=None,
# thumbnails=None):
# """ set_vistrail(vistrail: Vistrail, locator: BaseLocator) -> None
# Assign a vistrail to this view, and start interacting with it
# """
# self.vistrail = vistrail
# self.locator = locator
# self.controller.set_vistrail(vistrail, locator, abstractions, thumbnails)
# self.versionTab.setController(self.controller)
# self.pipelineTab.setController(self.controller)
# self.peTab.setController(self.controller)
def stateChanged(self):
""" stateChanged() -> None
Handles 'stateChanged' signal from VistrailController """
from gui.vistrails_window import _app
_app.notify("state_changed", self)
_app.state_changed(self)
# def stateChanged(self):
# """ stateChanged() -> None
# Handles 'stateChanged' signal from VistrailController
# Update the window and tab title
# """
# title = self.controller.name
# if title=='':
# title = 'untitled%s'%vistrails_default_file_type()
# if self.controller.changed:
# title += '*'
# self.setWindowTitle(title)
# # propagate the state change to the version prop
# # maybe in the future we should propagate as a signal
# versionId = self.controller.current_version
# self.versionTab.versionProp.updateVersion(versionId)
# def emitDockBackSignal(self):
# """ emitDockBackSignal() -> None
# Emit a signal for the View Manager to take this widget back
# """
# self.emit(QtCore.SIGNAL('dockBack'), self)
# def closeEvent(self, event):
# """ closeEvent(event: QCloseEvent) -> None
# Only close if we save information
# """
# if self.closeEventHandler:
# if self.closeEventHandler(self):
# event.accept()
# else:
# event.ignore()
# else:
# #I think there's a problem with two pipeline views and the same
# #scene on Macs. After assigning a new scene just before deleting
# #seems to solve the problem
# self.peTab.annotatedPipelineView.setScene(QtGui.QGraphicsScene())
# return QDockContainer.closeEvent(self, event)
# # super(QVistrailView, self).closeEvent(event)
# def queryVistrail(self, on=True):
# """ queryVistrail(on: bool) -> None
# Inspecting the query tab to get a pipeline for querying
# """
# if on:
# queryPipeline = self.queryTab.controller.current_pipeline
# if queryPipeline:
# self.controller.query_by_example(queryPipeline)
# self.setQueryMode(True)
# else:
# self.controller.set_search(None)
# self.setQueryMode(False)
# def createPopupMenu(self):
# """ createPopupMenu() -> QMenu
# Create a pop up menu that has a list of all tool windows of
# the current tab of the view. Tool windows can be toggled using
# this menu
# """
# return self.stackedWidget.currentWidget().createPopupMenu()
# def executeParameterExploration(self):
# """ executeParameterExploration() -> None
# Execute the current parameter exploration in the exploration tab
# """
# self.peTab.performParameterExploration()
# def versionSelected(self, versionId, byClick, doValidate=True,
# fromRoot=False):
# """ versionSelected(versionId: int, byClick: bool) -> None
# A version has been selected/unselected, update the controller
# and the pipeline view
# """
# if self.controller:
# if byClick:
# if self.controller.current_version > 0:
# if self.controller.has_move_actions():
# self.controller.flush_delayed_actions()
# self.controller.invalidate_version_tree(False)
# self.controller.reset_pipeline_view = byClick
# self.controller.change_selected_version(versionId, True,
# doValidate, fromRoot)
# versionId = self.controller.current_version
# self.controller.current_pipeline_view.fitToAllViews(True)
# self.redo_stack = []
# self.versionTab.versionProp.updateVersion(versionId)
# self.versionTab.versionView.versionProp.updateVersion(versionId)
# self.emit(QtCore.SIGNAL('versionSelectionChange'),versionId)
# self.execPipelineEnabled = versionId>-1
# self.execExploreEnabled = \
# self.controller.vistrail.get_paramexp(versionId) != None
# self.execDiffEnabled = False
# self.execExploreChange = False
# self.emit(QtCore.SIGNAL('execStateChange()'))
# return versionId
# def twoVersionsSelected(self, id1, id2):
# """ twoVersionsSelected(id1: Int, id2: Int) -> None
# Just echo the signal from the view
# """
# self.execDiffEnabled = True
# self.execDiffId1 = id1
# self.execDiffId2 = id2
# self.emit(QtCore.SIGNAL('execStateChange()'))
# def queryPipelineChange(self, notEmpty):
# """ queryPipelineChange(notEmpty: bool) -> None
# Update the status of tool bar buttons if there are
# modules on the query canvas
# """
# self.execQueryEnabled = notEmpty
# self.emit(QtCore.SIGNAL('execStateChange()'))
# def exploreChange(self, notEmpty):
# """ exploreChange(notEmpty: bool) -> None
# Update the status of tool bar buttons if there are
# parameters in the exploration canvas
# """
# self.execExploreEnabled = notEmpty
# self.emit(QtCore.SIGNAL('execStateChange()'))
# def checkModuleConfigPanel(self):
# """ checkModuleConfigPanel(self) -> None
# This will ask if user wants to save changes """
# self.pipelineTab.checkModuleConfigPanel()
# def can_redo(self):
# return len(self.redo_stack) <> 0
# def new_action(self, action):
# """new_action
# Handler for VistrailController.new_action
# """
# self.redo_stack = []
################################################################################
# FIXME: There is a bug on VisTrails that shows up if you load terminator.vt,
# open the image slices HW, undo about 300 times and then try to redo.
# This should be a test here, as soon as we have an api for that.
if __name__=="__main__":
# Initialize the Vistrails Application and Theme
import sys
from gui import qt, theme
app = qt.createBogusQtGuiApp(sys.argv)
theme.initializeCurrentTheme()
# Now visually test QPipelineView
vv = QVistrailView(None)
vv.show()
sys.exit(app.exec_())
|
|
size(800,2600)
def header():
font("Verdana", 12)
text("NodeBox Compliance Tests", 20, 60)
stroke(0)
line(0,60,WIDTH,60)
fontsize(8.5)
nostroke()
text("This functional suite tests all the available NodeBox functions, to see if they comply to their contract." , 20, 80, width=200)
def primitives(x, y):
nostroke()
rect(x, y, 50, 50)
x += 60
rect(x, y, 50, 50, 0.6)
x += 60
oval(x, y, 50, 50)
x += 60
star(x+25, y+25, 20, outer=25, inner=15)
x += 60
arrow(x+50, y+25, 50)
x += 60
arrow(x+50, y, 50, type=FORTYFIVE)
def basictext(x, y):
text("Hello", x, y)
x += 60
align(LEFT)
stroke(0)
nofill()
rect(x, y-12,50,20)
fill(0)
text("Hello", x, y, width=50)
x += 60
align(CENTER)
stroke(0)
nofill()
rect(x, y-12,50,20)
fill(0)
text("Hello", x, y, width=50)
x += 60
align(RIGHT)
stroke(0)
nofill()
rect(x, y-12,50,20)
fill(0)
text("Hello", x, y, width=50)
align(LEFT)
def textblock(x, y):
stroke(0)
nofill()
rect(x, y-12, 50, 50)
fill(0)
text("Lorem ipsum dolor sit amet, consectetuer adipiscing elit.", x, y, width=50, height=50)
x += 60
align(CENTER)
stroke(0)
nofill()
rect(x, y-12, 50, 50)
fill(0)
text("Lorem ipsum dolor sit amet, consectetuer adipiscing elit.", x, y, width=50, height=50)
x += 60
align(RIGHT)
stroke(0)
nofill()
rect(x, y-12, 50, 50)
fill(0)
text("Lorem ipsum dolor sit amet, consectetuer adipiscing elit.", x, y, width=50, height=50)
x += 60
align(JUSTIFY)
stroke(0)
nofill()
rect(x, y-12, 50, 50)
fill(0)
text("Lorem ipsum dolor sit amet, consectetuer adipiscing elit.", x, y, width=50, height=50)
def grays(x, y):
nostroke()
colormode(RGB)
for i in range(11):
fill(i/10.0)
rect(x, y, 50, 50)
fill(0)
text(str(i), x, y+62)
x += 60
def alphas(x, y):
nostroke()
colormode(RGB)
for i in range(11):
fill(0, i/10.0)
rect(x, y, 50, 50)
fill(0)
text(str(i), x, y+62)
x += 60
def _clr(x, y, *args):
fill(args)
rect(x, y, 50, 50)
fill(0)
text(str(args), x, y+62)
return x + 60
def rgbColors(x, y):
nostroke()
colormode(RGB)
x = _clr(x, y, 0,0,0)
x = _clr(x, y, 0,0,1)
x = _clr(x, y, 0,1,0)
x = _clr(x, y, 0,1,1)
x = _clr(x, y, 1,0,0)
x = _clr(x, y, 1,0,1)
x = _clr(x, y, 1,1,0)
x = _clr(x, y, 1,1,1)
def cmykColors(x, y):
nostroke()
colormode(CMYK)
x = _clr(x, y, 0,0,0,1)
x = _clr(x, y, 0,0,1,0)
x = _clr(x, y, 0,1,0,0)
x = _clr(x, y, 1,0,0,0)
x = _clr(x, y, 1,1,0,0)
x = _clr(x, y, 0,1,1,0)
x = _clr(x, y, 1,1,1,0)
x = _clr(x, y, 0,0,0,0)
def hsbColors(x, y):
nostroke()
colormode(HSB)
x = _clr(x, y, 0,0,0)
x = _clr(x, y, 0,0,1)
x = _clr(x, y, 0,1,0)
x = _clr(x, y, 0,1,1)
x = _clr(x, y, 1,0,0)
x = _clr(x, y, 1,0,1)
x = _clr(x, y, 1,1,0)
x = _clr(x, y, 1,1,1)
def marker(y,h=25):
stroke(1,0,0)
line(0, y+h, WIDTH, y+h)
# Draw the header
header()
# Draw the primitives at their first position
nostroke()
text("Basic primitives", 20, 165)
primitives(140,140)
marker(140)
# Simple translation
translate(0, 140)
nostroke()
text("Translated primitives", 20, 165)
primitives(140,140)
marker(140)
# Translation and rotation
translate(0, 140)
nostroke()
text("Rotated primitives", 20, 165)
push()
rotate(45)
primitives(140,140)
pop()
marker(140)
# Scaling
translate(0, 140)
nostroke()
text("Scaled primitives", 20, 165)
push()
scale(0.5)
primitives(140,140)
pop()
marker(140)
# Text
translate(0, 140)
nostroke()
text("Basic text", 20, 165)
basictext(140, 165)
marker(140)
# Rotated Text
translate(0, 140)
nostroke()
text("Rotated text", 20, 165)
push()
rotate(45)
basictext(140, 165)
pop()
marker(140)
# Text blocks
translate(0, 140)
nostroke()
text("Text blocks", 20, 165)
textblock(140, 165)
marker(140)
# Text blocks
translate(0, 140)
nostroke()
text("Rotated text blocks", 20, 165)
push()
rotate(45)
textblock(140, 165)
pop()
marker(140)
# Outlined text
translate(0, 140)
nostroke()
text("Outlined text", 20, 165)
fsize = fontsize()
fontsize(48)
fill(0.5, 0.5)
text("hamburgevons", 140, 165)
nofill()
stroke(0.2)
text("hamburgevons", 140, 165, outline=True)
fontsize(fsize)
fill(0)
marker(140)
# Grays
translate(0, 140)
nostroke()
text("Grays", 20, 165)
grays(140, 140)
marker(140)
# Grays
translate(0, 140)
nostroke()
text("Alphas", 20, 165)
alphas(140, 140)
marker(140)
# RGB Colors
translate(0, 140)
nostroke()
text("RGB Colors", 20, 165)
rgbColors(140, 140)
marker(140)
# HSB Colors
translate(0, 140)
nostroke()
text("HSB Colors", 20, 165)
hsbColors(140, 140)
marker(140)
# CMYK Colors
translate(0, 140)
nostroke()
text("CMYK Colors", 20, 165)
cmykColors(140, 140)
marker(140)
# Images
translate(0, 140)
nostroke()
text("Images", 20, 165)
_ctx.noImagesHint = False
image("icon.tif", 140,140,width=50)
push()
translate(60,0)
rotate(90)
image("icon.tif", 140,140,width=50)
pop()
push()
translate(140,0)
scale(2.0)
image("icon.tif", 140,140,width=50)
pop()
marker(140)
# Paths
translate(0, 140)
nostroke()
text("Paths", 20, 165)
beginpath(165, 140)
lineto(140, 200)
curveto(160, 250, 160, 200, 190, 200)
p = endpath()
stroke(0)
nofill()
sw = strokewidth()
strokewidth(2)
push()
translate(60,0)
for pt in p:
pt.x += 60
pt.ctrl1.x += 60
pt.ctrl2.x += 60
drawpath(p)
pop()
strokewidth(sw)
marker(140)
|
|
##############################################################################
# DrugBank v4.3 XML parser
#
# eg 15/01/2016
##############################################################################
from xml.etree.ElementTree import iterparse
import os, cPickle
import re, math
def main():
base_dir = "../data/drugbank/"
#file_name = base_dir + "drugbank.xml"
file_name = base_dir + "test.xml"
parser = DrugBankXMLParser(file_name)
parser.parse()
drug = "DB00843"
target = "BE0000426" #"BE0004796" #"P22303"
for i in dir(parser):
print i
if i.startswith("drug"):
d = getattr(parser, i)
if drug in d: print d[drug]
elif i.startswith("target"):
d = getattr(parser, i)
if target in d: print d[target]
print parser.drug_to_target_to_values
drug_to_uniprots = parser.get_targets(target_types = set(["target", "enzyme"]), only_paction=False)
print drug_to_uniprots
return
class DrugBankXMLParser(object):
NS="{http://www.drugbank.ca}"
def __init__(self, filename):
self.file_name = filename
self.drug_to_name = {}
self.drug_to_description = {}
self.drug_to_type = {}
self.drug_to_groups = {}
self.drug_to_indication = {}
self.drug_to_pharmacodynamics = {}
self.drug_to_moa = {}
self.drug_to_toxicity = {}
self.drug_to_synonyms = {}
self.drug_to_products = {}
self.drug_to_brands = {}
self.drug_to_uniprot = {}
self.drug_to_interactions = {}
self.drug_to_pubchem = {}
self.drug_to_pubchem_substance = {}
self.drug_to_kegg = {}
self.drug_to_kegg_compound = {}
self.drug_to_pharmgkb = {}
self.drug_to_chembl = {}
self.drug_to_target_to_values = {} # drug - target - (type {target / enzyme / transporter / carrier}, known action, [action types])
self.drug_to_categories = {}
self.drug_to_atc_codes = {}
self.drug_to_inchi_key = {}
self.drug_to_smiles = {}
self.target_to_name = {}
self.target_to_gene = {}
self.target_to_uniprot = {}
return
def parse(self):
# get an iterable
context = iterparse(self.file_name, ["start", "end"])
# turn it into an iterator
context = iter(context)
# get the root element
event, root = context.next()
state_stack = [ root.tag ]
drug_id = None
drug_type = None
drug_id_partner = None
current_target = None
resource = None
current_property = None
target_types = set(map(lambda x: self.NS+x, ["target", "enzyme", "carrier", "transporter"]))
target_types_plural = set(map(lambda x: x+"s", target_types))
for (event, elem) in context:
if event == "start":
state_stack.append(elem.tag)
if len(state_stack) <= 2 and elem.tag == self.NS+"drug":
if "type" in elem.attrib:
drug_type = elem.attrib["type"]
else:
drug_type = None
elif elem.tag == self.NS+"drugbank-id":
if "primary" in elem.attrib and state_stack[-3] == self.NS+"drugbank" and state_stack[-2] == self.NS+"drug":
drug_id = None
elif len(state_stack) > 3 and state_stack[-3] == self.NS+"drug-interactions" and state_stack[-2] == self.NS+"drug-interaction":
drug_id_partner = None
elif elem.tag == self.NS+"resource":
resource = None
elif elem.tag == self.NS+"property":
current_property = None
elif elem.tag in target_types:
if state_stack[-2] in target_types_plural:
current_target = None
if event == "end":
if len(state_stack) <= 2 and elem.tag == self.NS+"drug":
if "type" in elem.attrib:
drug_type = elem.attrib["type"]
else:
drug_type = None
if elem.tag == self.NS+"drugbank-id":
if state_stack[-2] == self.NS+"drug":
if "primary" in elem.attrib:
drug_id = elem.text
if drug_type is not None:
self.drug_to_type[drug_id] = drug_type
#print drug_id, drug_type
elif len(state_stack) > 3 and state_stack[-3] == self.NS+"drug-interactions" and state_stack[-2] == self.NS+"drug-interaction":
d = self.drug_to_interactions.setdefault(drug_id, {})
drug_id_partner = elem.text
d[drug_id_partner] = ""
elif elem.tag == self.NS+"name":
if len(state_stack) <= 3 and state_stack[-2] == self.NS+"drug":
self.drug_to_name[drug_id] = elem.text.strip()
elif state_stack[-2] == self.NS+"product" and state_stack[-3] == self.NS+"products":
product = elem.text
product = product.strip().encode('ascii','ignore')
if product != "":
self.drug_to_products.setdefault(drug_id, set()).add(product)
elif state_stack[-2] == self.NS+"international-brand" and state_stack[-3] == self.NS+"international-brands":
brand = elem.text
#idx = brand.find(" [")
#if idx != -1:
# brand = brand[:idx]
brand = brand.strip().encode('ascii','ignore')
if brand != "":
self.drug_to_brands.setdefault(drug_id, set()).add(brand)
#elif state_stack[-3] == self.NS+"targets" and state_stack[-2] == self.NS+"target":
elif state_stack[-3] in target_types_plural and state_stack[-2] in target_types:
self.target_to_name[current_target] = elem.text
elif elem.tag == self.NS+"description":
if state_stack[-2] == self.NS+"drug":
self.drug_to_description[drug_id] = elem.text
if len(state_stack) > 3 and state_stack[-3] == self.NS+"drug-interactions" and state_stack[-2] == self.NS+"drug-interaction":
self.drug_to_interactions[drug_id][drug_id_partner] = elem.text
elif elem.tag == self.NS+"group":
if state_stack[-2] == self.NS+"groups":
self.drug_to_groups.setdefault(drug_id, set()).add(elem.text)
elif elem.tag == self.NS+"indication":
if state_stack[-2] == self.NS+"drug":
self.drug_to_indication[drug_id] = elem.text
elif elem.tag == self.NS+"pharmacodynamics":
if state_stack[-2] == self.NS+"drug":
self.drug_to_pharmacodynamics[drug_id] = elem.text
elif elem.tag == self.NS+"mechanism-of-action":
if state_stack[-2] == self.NS+"drug":
self.drug_to_moa[drug_id] = elem.text
elif elem.tag == self.NS+"toxicity":
if state_stack[-2] == self.NS+"drug":
self.drug_to_toxicity[drug_id] = elem.text
elif elem.tag == self.NS+"synonym":
if state_stack[-2] == self.NS+"synonyms" and state_stack[-3] == self.NS+"drug":
synonym = elem.text
idx = synonym.find(" [")
if idx != -1:
synonym = synonym[:idx]
synonym = synonym.strip().encode('ascii','ignore')
if synonym != "":
self.drug_to_synonyms.setdefault(drug_id, set()).add(synonym)
elif elem.tag == self.NS+"category":
if state_stack[-2] == self.NS+"categories":
self.drug_to_categories.setdefault(drug_id, set()).add(elem.text)
elif elem.tag == self.NS+"atc-code":
if state_stack[-2] == self.NS+"atc-codes":
self.drug_to_atc_codes.setdefault(drug_id, set()).add(elem.attrib["code"])
elif elem.tag == self.NS+"id":
if state_stack[-3] in target_types_plural and state_stack[-2] in target_types:
current_target = elem.text
d = self.drug_to_target_to_values.setdefault(drug_id, {})
d[current_target] = [state_stack[-2], False, []]
#print current_target
elif elem.tag == self.NS+"action":
if state_stack[-3] in target_types and state_stack[-2] == self.NS+"actions":
self.drug_to_target_to_values[drug_id][current_target][2].append(elem.text)
elif elem.tag == self.NS+"known-action":
if state_stack[-2] in target_types:
if elem.text == "yes":
self.drug_to_target_to_values[drug_id][current_target][1] = True
if len(self.drug_to_target_to_values[drug_id][current_target][2]) == 0:
#print "Inconsistency with target action: %s %s" % (drug_id, current_target)
pass
elif elem.tag == self.NS+"gene-name":
if state_stack[-3] in target_types and state_stack[-2] == self.NS+"polypeptide":
self.target_to_gene[current_target] = elem.text
elif elem.tag == self.NS+"kind":
if state_stack[-3] == self.NS+"calculated-properties" and state_stack[-2] == self.NS+"property":
current_property = elem.text # InChIKey or SMILES
elif elem.tag == self.NS+"value":
if state_stack[-3] == self.NS+"calculated-properties" and state_stack[-2] == self.NS+"property":
if current_property == "InChIKey":
inchi_key = elem.text # strip InChIKey=
if inchi_key.startswith("InChIKey="):
inchi_key = inchi_key[len("InChIKey="):]
self.drug_to_inchi_key[drug_id] = inchi_key
if current_property == "SMILES":
self.drug_to_smiles[drug_id] = elem.text
elif elem.tag == self.NS+"resource":
if state_stack[-3] == self.NS+"external-identifiers" and state_stack[-2] == self.NS+"external-identifier":
resource = elem.text
elif elem.tag == self.NS+"identifier":
if state_stack[-3] == self.NS+"external-identifiers" and state_stack[-2] == self.NS+"external-identifier":
if state_stack[-5] in target_types and state_stack[-4] == self.NS+"polypeptide":
if resource == "UniProtKB":
self.target_to_uniprot[current_target] = elem.text
elif state_stack[-4] == self.NS+"drug":
if resource == "PubChem Compound":
self.drug_to_pubchem[drug_id] = elem.text
elif resource == "PubChem Substance":
self.drug_to_pubchem_substance[drug_id] = elem.text
elif resource == "KEGG Drug":
self.drug_to_kegg[drug_id] = elem.text
elif resource == "KEGG Compound":
self.drug_to_kegg_compound[drug_id] = elem.text
elif resource == "UniProtKB":
self.drug_to_uniprot[drug_id] = elem.text
elif resource == "PharmGKB":
self.drug_to_pharmgkb[drug_id] = elem.text
elif resource == "ChEMBL":
self.drug_to_chembl[drug_id] = elem.text
elem.clear()
state_stack.pop()
root.clear()
return
def get_targets(self, target_types = set(["target"]), only_paction=False):
# Map target ids to uniprot ids
target_types = map(lambda x: self.NS + x, target_types)
drug_to_uniprots = {}
for drug, target_to_values in self.drug_to_target_to_values.iteritems():
for target, values in target_to_values.iteritems():
#print target, values
try:
uniprot = self.target_to_uniprot[target]
except:
# drug target has no uniprot
#print "No uniprot information for", target
continue
target_type, known, actions = values
flag = False
if only_paction:
if known:
flag = True
else:
if target_type in target_types:
flag = True
if flag:
drug_to_uniprots.setdefault(drug, set()).add(uniprot)
return drug_to_uniprots
def get_synonyms(self, selected_drugs=None, only_synonyms=False):
name_to_drug = {}
for drug, name in self.drug_to_name.iteritems():
if selected_drugs is not None and drug not in selected_drugs:
continue
name_to_drug[name.lower()] = drug
synonym_to_drug = {}
for drug, synonyms in self.drug_to_synonyms.iteritems():
for synonym in synonyms:
if selected_drugs is not None and drug not in selected_drugs:
continue
synonym_to_drug[synonym.lower()] = drug
if only_synonyms:
return name_to_drug, synonym_to_drug
for drug, brands in self.drug_to_brands.iteritems():
for brand in brands:
if selected_drugs is not None and drug not in selected_drugs:
continue
synonym_to_drug[brand.lower()] = drug
for drug, products in self.drug_to_products.iteritems():
for product in products:
if selected_drugs is not None and drug not in selected_drugs:
continue
synonym_to_drug[product.lower()] = drug
return name_to_drug, synonym_to_drug
def get_drugs_by_group(self, groups_to_include = set(["approved"]), groups_to_exclude=set(["withdrawn"])):
selected_drugs = set()
for drugbank_id, name in self.drug_to_name.iteritems():
# Consider only approved drugs
if drugbank_id not in self.drug_to_groups:
continue
groups = self.drug_to_groups[drugbank_id]
#if "approved" not in groups or "withdrawn" in groups:
if len(groups & groups_to_include) == 0:
continue
if len(groups & groups_to_exclude) > 0:
continue
selected_drugs.add(drugbank_id)
return selected_drugs
def output_data(file_name, out_file):
dump_file = file_name + ".pcl"
if os.path.exists(dump_file):
parser = cPickle.load(open(dump_file))
else:
parser = DrugBankXMLParser(file_name)
parser.parse()
cPickle.dump(parser, open(dump_file, 'w'))
#target_type_list = ["target", "enzyme", "carrier", "transporter"]
#for target_type in target_type_list:
target_type_list = ["target"]
drug_to_uniprots = parser.get_targets(target_types = set(target_type_list), only_paction=False)
f = open(out_file, 'w')
f.write("Drugbank id\tName\tGroup\tTargets\n")
#f.write("Drugbank id\tName\tGroup\tTarget uniprots\tEnzyme uniprots\tTransporter uniprots\tCarrier uniprots\tDescription\tIndication\tPubChem\tSMILES\tInchi\tAlternative names\t\n")
#drug_to_description drug_to_indication drug_to_synonyms drug_to_products drug_to_brands
for drug, uniprots in drug_to_uniprots.iteritems():
name = parser.drug_to_name[drug]
groups = parser.drug_to_groups[drug]
values = [ drug, name.encode("ascii", "replace") ]
values.append(" | ".join(groups))
values.append(" | ".join(uniprots))
try:
f.write("%s\n" % "\t".join(values))
except:
print values
f.close()
return
def get_drugs_by_group(parser, groups_to_include = set(["approved"]), groups_to_exclude=set(["withdrawn"])):
selected_drugs = set()
for drugbank_id, name in parser.drug_to_name.iteritems():
# Consider only approved drugs
if drugbank_id not in parser.drug_to_groups:
continue
groups = parser.drug_to_groups[drugbank_id]
#if "approved" not in groups or "withdrawn" in groups:
if len(groups & groups_to_include) == 0:
continue
if len(groups & groups_to_exclude) > 0:
continue
selected_drugs.add(drugbank_id)
return selected_drugs
def get_disease_specific_drugs(parser, selected_drugs, phenotypes):
import text_utilities
disease_to_drugs = {}
indication_to_diseases = {}
for drug, indication in parser.drug_to_indication.iteritems():
if drug not in selected_drugs:
continue
if indication is None:
continue
#if any(map(lambda x: x is not None, [ exp.search(indication) for exp in exps ])):
#disease = keywords[0]
#disease_to_drugs.setdefault(disease, set()).add(drug)
#for disease, exp in zip(phenotypes, exps):
# if exp.search(indication.lower()) is not None:
# disease_to_drugs.setdefault(disease, set()).add(drug)
indication = indication.lower()
for disease in phenotypes:
#if all([ indication.find(word.strip()) != -1 for word in disease.split(",") ]):
# disease_to_drugs.setdefault(disease, set()).add(drug)
values = text_utilities.tokenize_disease_name(disease)
#print disease, values
indication_to_diseases.setdefault(indication, set())
if all([ indication.find(word.strip()) != -1 for word in values ]):
#print disease, drug
disease_to_drugs.setdefault(disease, set()).add(drug)
indication_to_diseases.setdefault(indication, set()).add(disease)
else:
values = text_utilities.tokenize_disease_name(disease.replace("2", "II"))
if all([ indication.find(word.strip()) != -1 for word in values ]):
disease_to_drugs.setdefault(disease, set()).add(drug)
indication_to_diseases.setdefault(indication, set()).add(disease)
else:
values = text_utilities.tokenize_disease_name(disease.replace("1", "I"))
if all([ indication.find(word.strip()) != -1 for word in values ]):
disease_to_drugs.setdefault(disease, set()).add(drug)
indication_to_diseases.setdefault(indication, set()).add(disease)
return disease_to_drugs
# Print non-matching indications
for indication, diseases in indication_to_diseases.iteritems():
if len(diseases) == 0:
print indication.encode('ascii','ignore')
elif indication.find(" not ") != -1 or indication.find(" except ") != -1:
print diseases, indication.encode('ascii','ignore')
#print disease_to_drugs["diabetes mellitus, type 2"]
return disease_to_drugs
def get_drugs_for_targets(file_name, output_file):
parser = DrugBankXMLParser(file_name)
parser.parse()
uniprot_to_drugs = {}
for drug, targets in parser.drug_to_targets.iteritems():
#print drug
for uniprot in targets:
uniprot_to_drugs.setdefault(uniprot, set()).add(drug)
f = open(output_file, 'w')
for uniprot, drugs in uniprot_to_drugs.iteritems():
f.write("%s\t%s\n" % (uniprot, ";".join(drugs)))
f.close()
return
def output_drug_info(file_name, output_file):
parser = DrugBankXMLParser(file_name)
parser.parse()
f = open(output_file, 'w')
f.write("drugbank id\tname\tgroups\tpubchem id\tdescription\tindication\ttargets\n")
for drug, name in parser.drug_to_name.iteritems():
name = name.encode('ascii','ignore')
try:
groups = parser.drug_to_groups[drug]
except:
groups = []
try:
description = parser.drug_to_description[drug]
description = description.replace("\n", "").encode('ascii','ignore')
except:
description = ""
try:
indication = parser.drug_to_indication[drug]
indication = indication.replace("\n", "").encode('ascii','ignore')
except:
#print drug
indication = ""
if drug in parser.drug_to_pubchem:
pubchem = parser.drug_to_pubchem[drug]
else:
pubchem = ""
if drug in parser.drug_to_targets:
targets = parser.drug_to_targets[drug]
else:
targets = []
try:
f.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (drug, name, ";".join(groups), pubchem, description, indication, ";".join(targets)))
except:
print drug, name, groups, pubchem, description, indication, targets
return
f.close()
return
def get_drugbank_id_from_name(name, name_to_drug, synonym_to_drug, regex_db_name = False):
"""
regex_db_name: True creates a regex with each drugbank name and looks for in in the given name
(useful for rxname mappings which contain dosages)
"""
drugbank_id = None
drugbank_name = None
name = name.lower()
# Try exact match first
if name in name_to_drug:
drugbank_id = name_to_drug[name]
drugbank_name = name
elif name in synonym_to_drug:
drugbank_id = synonym_to_drug[name]
drugbank_name = name
# Try matching drugbank name in the given name
else:
if not regex_db_name:
if len(set("[()]") & set(name)) > 0:
return drugbank_id, drugbank_name
exp = re.compile(r"\b%s\b" % name)
for db_name, db_id in name_to_drug.iteritems():
if len(set("[()]") & set(db_name)) > 0:
continue
db_name = db_name.lower()
if regex_db_name:
exp = re.compile(r"\b%s\b" % db_name)
m = exp.search(name)
else:
m = exp.search(db_name)
if m is None:
continue
#if drugbank_id is not None:
# print "Multiple match:", drugbank_name, db_name, name
drugbank_id = db_id
drugbank_name = db_name
break
if drugbank_id is None:
for db_name, db_id in synonym_to_drug.iteritems():
if len(set("[()]") & set(db_name)) > 0:
continue
db_name = db_name.lower()
if regex_db_name:
try:
exp = re.compile(r"\b%s\b" % db_name)
except:
continue
m = exp.search(name)
else:
m = exp.search(db_name)
if m is None:
continue
#if drugbank_id is not None:
# print drugbank_id, db_id, name
drugbank_id = db_id
drugbank_name = db_name
return drugbank_id, drugbank_name
def get_drug_info(drug_info_file):
drug_to_values = {}
f = open(drug_info_file)
header = f.readline().strip().split("\t")
col_to_idx = dict((k, i) for i, k in enumerate(header[1:]))
for line in f:
words = line.strip("\n").split("\t")
drug_to_values[words[0]] = words[1:]
return col_to_idx, drug_to_values
def get_drug_targets(file_name, drugs_file=None):
parser = DrugBankXMLParser(file_name)
parser.parse()
drugs = None
if drugs_file is not None:
drugs = set([ line.strip().lower() for line in open(drugs_file) ])
#exp = re.compile("brain")
#exp2 = re.compile("metastasis")
for drug, description in parser.drug_to_description.iteritems():
#drug = drug.lower()
if description is None:
continue
#m = exp.search(description)
#m2 = exp2.search(description)
if True: # m is not None and m2 is not None:
drugs.add(drug)
for drug, indication in parser.drug_to_indication.iteritems():
#drug = drug.lower()
if indication is None:
continue
#m = exp.search(indication)
#m2 = exp2.search(indication)
if True: # m is not None and m2 is not None:
drugs.add(drug)
#print drugs
drug_to_targets = {}
for drug, partner_ids in parser.drug_to_partner_ids.iteritems():
#drug = drug.lower()
if drugs is not None and drug not in drugs:
continue
#print drug
for partner_id in partner_ids:
gene = parser.partner_id_to_gene[partner_id]
if gene is None:
continue
drug_to_targets.setdefault(drug, set()).add(gene)
return drug_to_targets, parser.drug_to_description, parser.drug_to_indication
def output_drug_targets(drug_to_targets):
f = open("drug_to_targets.txt", 'w')
f2 = open("drug_targets.txt", 'w')
for drug, targets in drug_to_targets.iteritems():
f.write("%s\t%s\n" % (drug, "\t".join(targets)))
f2.write("%s\n" % "\n".join(targets))
f.close()
f2.close()
return
def score_drugs_by_target_score(drug_to_targets, scores_file, output_file):
gene_to_score = dict([ line.strip().split() for line in open(scores_file)])
values = []
for drug, targets in drug_to_targets.iteritems():
scores = []
for target in targets:
if target in gene_to_score:
scores.append(float(gene_to_score[target]))
if len(scores) == 0:
continue
values.append((calculate_score(scores), drug))
values.sort()
values.reverse()
f = open(output_file, 'w')
for score, drug in values:
f.write("%s\t%s\n" % (drug, str(score)))
f.close()
return
def calculate_drug_score_from_targets(values):
val = 0.0
for value in values:
val += value * value
return math.sqrt(val)
if __name__ == "__main__":
main()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from oslo_config import cfg
from oslo_utils import importutils
import requests
import six
from heat.api.aws import ec2token
from heat.api.aws import exception
from heat.common import wsgi
from heat.tests import common
class Ec2TokenTest(common.HeatTestCase):
'''
Tests the Ec2Token middleware
'''
def setUp(self):
super(Ec2TokenTest, self).setUp()
self.m.StubOutWithMock(requests, 'post')
def _dummy_GET_request(self, params=None, environ=None):
# Mangle the params dict into a query string
params = params or {}
environ = environ or {}
qs = "&".join(["=".join([k, str(params[k])]) for k in params])
environ.update({'REQUEST_METHOD': 'GET', 'QUERY_STRING': qs})
req = wsgi.Request(environ)
return req
def test_conf_get_paste(self):
dummy_conf = {'auth_uri': 'http://192.0.2.9/v2.0'}
ec2 = ec2token.EC2Token(app=None, conf=dummy_conf)
self.assertEqual('http://192.0.2.9/v2.0', ec2._conf_get('auth_uri'))
self.assertEqual(
'http://192.0.2.9/v2.0/ec2tokens',
ec2._conf_get_keystone_ec2_uri('http://192.0.2.9/v2.0'))
def test_conf_get_opts(self):
cfg.CONF.set_default('auth_uri', 'http://192.0.2.9/v2.0/',
group='ec2authtoken')
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('http://192.0.2.9/v2.0/', ec2._conf_get('auth_uri'))
self.assertEqual(
'http://192.0.2.9/v2.0/ec2tokens',
ec2._conf_get_keystone_ec2_uri('http://192.0.2.9/v2.0/'))
def test_conf_get_ssl_default_options(self):
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertTrue(ec2.ssl_options['verify'],
"SSL verify should be True by default")
self.assertIsNone(ec2.ssl_options['cert'],
"SSL client cert should be None by default")
def test_conf_ssl_insecure_option(self):
ec2 = ec2token.EC2Token(app=None, conf={})
cfg.CONF.set_default('insecure', 'True', group='ec2authtoken')
cfg.CONF.set_default('ca_file', None, group='ec2authtoken')
self.assertFalse(ec2.ssl_options['verify'])
def test_conf_get_ssl_opts(self):
cfg.CONF.set_default('auth_uri', 'https://192.0.2.9/v2.0/',
group='ec2authtoken')
cfg.CONF.set_default('ca_file', '/home/user/cacert.pem',
group='ec2authtoken')
cfg.CONF.set_default('insecure', 'false', group='ec2authtoken')
cfg.CONF.set_default('cert_file', '/home/user/mycert',
group='ec2authtoken')
cfg.CONF.set_default('key_file', '/home/user/mykey',
group='ec2authtoken')
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('/home/user/cacert.pem', ec2.ssl_options['verify'])
self.assertEqual(('/home/user/mycert', '/home/user/mykey'),
ec2.ssl_options['cert'])
def test_get_signature_param_old(self):
params = {'Signature': 'foo'}
dummy_req = self._dummy_GET_request(params)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_signature(dummy_req))
def test_get_signature_param_new(self):
params = {'X-Amz-Signature': 'foo'}
dummy_req = self._dummy_GET_request(params)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_signature(dummy_req))
def test_get_signature_header_space(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('xyz', ec2._get_signature(dummy_req))
def test_get_signature_header_notlast(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo Credential=foo/bar, '
'Signature=xyz,'
'SignedHeaders=content-type;host;x-amz-date ')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('xyz', ec2._get_signature(dummy_req))
def test_get_signature_header_nospace(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo Credential=foo/bar,'
'SignedHeaders=content-type;host;x-amz-date,'
'Signature=xyz')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('xyz', ec2._get_signature(dummy_req))
def test_get_access_param_old(self):
params = {'AWSAccessKeyId': 'foo'}
dummy_req = self._dummy_GET_request(params)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_access(dummy_req))
def test_get_access_param_new(self):
params = {'X-Amz-Credential': 'foo/bar'}
dummy_req = self._dummy_GET_request(params)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_access(dummy_req))
def test_get_access_header_space(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_access(dummy_req))
def test_get_access_header_nospace(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo Credential=foo/bar,'
'SignedHeaders=content-type;host;x-amz-date,'
'Signature=xyz')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_access(dummy_req))
def test_get_access_header_last(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo '
'SignedHeaders=content-type;host;x-amz-date,'
'Signature=xyz,Credential=foo/bar')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_access(dummy_req))
def test_call_x_auth_user(self):
req_env = {'HTTP_X_AUTH_USER': 'foo'}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app='xyz', conf={})
self.assertEqual('xyz', ec2.__call__(dummy_req))
def test_call_auth_nosig(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app='xyz', conf={})
self.assertRaises(exception.HeatIncompleteSignatureError,
ec2.__call__, dummy_req)
def test_call_auth_nouser(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo '
'SignedHeaders=content-type;host;x-amz-date,'
'Signature=xyz')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app='xyz', conf={})
self.assertRaises(exception.HeatMissingAuthenticationTokenError,
ec2.__call__, dummy_req)
def test_call_auth_noaccess(self):
# If there's no accesskey in params or header, but there is a
# Signature, we expect HeatMissingAuthenticationTokenError
params = {'Signature': 'foo'}
dummy_req = self._dummy_GET_request(params)
ec2 = ec2token.EC2Token(app='xyz', conf={})
self.assertRaises(exception.HeatMissingAuthenticationTokenError,
ec2.__call__, dummy_req)
def test_call_x_auth_nouser_x_auth_user(self):
req_env = {'HTTP_X_AUTH_USER': 'foo',
'HTTP_AUTHORIZATION':
('Authorization: foo '
'SignedHeaders=content-type;host;x-amz-date,'
'Signature=xyz')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app='xyz', conf={})
self.assertEqual('xyz', ec2.__call__(dummy_req))
def _stub_http_connection(self, headers=None, params=None, response=None,
req_url='http://123:5000/v2.0/ec2tokens',
verify=True, cert=None):
headers = headers or {}
params = params or {}
class DummyHTTPResponse(object):
text = response
def json(self):
return json.loads(self.text)
body_hash = ('e3b0c44298fc1c149afbf4c8996fb9'
'2427ae41e4649b934ca495991b7852b855')
req_creds = json.dumps({"ec2Credentials":
{"access": "foo",
"headers": headers,
"host": "heat:8000",
"verb": "GET",
"params": params,
"signature": "xyz",
"path": "/v1",
"body_hash": body_hash}})
req_headers = {'Content-Type': 'application/json'}
requests.post(req_url, data=req_creds, verify=verify, cert=cert,
headers=req_headers).AndReturn(DummyHTTPResponse())
def test_call_ok(self):
dummy_conf = {'auth_uri': 'http://123:5000/v2.0'}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
auth_str = ('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1',
'HTTP_AUTHORIZATION': auth_str}
dummy_req = self._dummy_GET_request(environ=req_env)
ok_resp = json.dumps({'access': {'token': {
'id': 123,
'tenant': {'name': 'tenant', 'id': 'abcd1234'}}}})
self._stub_http_connection(headers={'Authorization': auth_str},
response=ok_resp)
self.m.ReplayAll()
self.assertEqual('woot', ec2.__call__(dummy_req))
self.assertEqual('tenant', dummy_req.headers['X-Tenant-Name'])
self.assertEqual('abcd1234', dummy_req.headers['X-Tenant-Id'])
self.m.VerifyAll()
def test_call_ok_roles(self):
dummy_conf = {'auth_uri': 'http://123:5000/v2.0'}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
auth_str = ('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1',
'HTTP_AUTHORIZATION': auth_str}
dummy_req = self._dummy_GET_request(environ=req_env)
ok_resp = json.dumps({'access': {
'token': {
'id': 123,
'tenant': {'name': 'tenant', 'id': 'abcd1234'}
},
'metadata': {'roles': ['aa', 'bb', 'cc']}}})
self._stub_http_connection(headers={'Authorization': auth_str},
response=ok_resp)
self.m.ReplayAll()
self.assertEqual('woot', ec2.__call__(dummy_req))
self.assertEqual('aa,bb,cc', dummy_req.headers['X-Roles'])
self.m.VerifyAll()
def test_call_err_tokenid(self):
dummy_conf = {'auth_uri': 'http://123:5000/v2.0/'}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
auth_str = ('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1',
'HTTP_AUTHORIZATION': auth_str}
dummy_req = self._dummy_GET_request(environ=req_env)
err_msg = "EC2 access key not found."
err_resp = json.dumps({'error': {'message': err_msg}})
self._stub_http_connection(headers={'Authorization': auth_str},
response=err_resp)
self.m.ReplayAll()
self.assertRaises(exception.HeatInvalidClientTokenIdError,
ec2.__call__, dummy_req)
self.m.VerifyAll()
def test_call_err_signature(self):
dummy_conf = {'auth_uri': 'http://123:5000/v2.0'}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
auth_str = ('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1',
'HTTP_AUTHORIZATION': auth_str}
dummy_req = self._dummy_GET_request(environ=req_env)
err_msg = "EC2 signature not supplied."
err_resp = json.dumps({'error': {'message': err_msg}})
self._stub_http_connection(headers={'Authorization': auth_str},
response=err_resp)
self.m.ReplayAll()
self.assertRaises(exception.HeatSignatureError,
ec2.__call__, dummy_req)
self.m.VerifyAll()
def test_call_err_denied(self):
dummy_conf = {'auth_uri': 'http://123:5000/v2.0'}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
auth_str = ('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1',
'HTTP_AUTHORIZATION': auth_str}
dummy_req = self._dummy_GET_request(environ=req_env)
err_resp = json.dumps({})
self._stub_http_connection(headers={'Authorization': auth_str},
response=err_resp)
self.m.ReplayAll()
self.assertRaises(exception.HeatAccessDeniedError,
ec2.__call__, dummy_req)
self.m.VerifyAll()
def test_call_ok_v2(self):
dummy_conf = {'auth_uri': 'http://123:5000/v2.0'}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
ok_resp = json.dumps({'access': {'metadata': {}, 'token': {
'id': 123,
'tenant': {'name': 'tenant', 'id': 'abcd1234'}}}})
self._stub_http_connection(response=ok_resp,
params={'AWSAccessKeyId': 'foo'})
self.m.ReplayAll()
self.assertEqual('woot', ec2.__call__(dummy_req))
self.m.VerifyAll()
def test_call_ok_multicloud(self):
dummy_conf = {
'allowed_auth_uris': [
'http://123:5000/v2.0', 'http://456:5000/v2.0'],
'multi_cloud': True
}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
ok_resp = json.dumps({'access': {'metadata': {}, 'token': {
'id': 123,
'tenant': {'name': 'tenant', 'id': 'abcd1234'}}}})
err_msg = "EC2 access key not found."
err_resp = json.dumps({'error': {'message': err_msg}})
# first request fails
self._stub_http_connection(
req_url='http://123:5000/v2.0/ec2tokens',
response=err_resp,
params={'AWSAccessKeyId': 'foo'})
# second request passes
self._stub_http_connection(
req_url='http://456:5000/v2.0/ec2tokens',
response=ok_resp,
params={'AWSAccessKeyId': 'foo'})
self.m.ReplayAll()
self.assertEqual('woot', ec2.__call__(dummy_req))
self.m.VerifyAll()
def test_call_err_multicloud(self):
dummy_conf = {
'allowed_auth_uris': [
'http://123:5000/v2.0', 'http://456:5000/v2.0'],
'multi_cloud': True
}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
err_resp1 = json.dumps({})
err_msg2 = "EC2 access key not found."
err_resp2 = json.dumps({'error': {'message': err_msg2}})
# first request fails with HeatAccessDeniedError
self._stub_http_connection(
req_url='http://123:5000/v2.0/ec2tokens',
response=err_resp1,
params={'AWSAccessKeyId': 'foo'})
# second request fails with HeatInvalidClientTokenIdError
self._stub_http_connection(
req_url='http://456:5000/v2.0/ec2tokens',
response=err_resp2,
params={'AWSAccessKeyId': 'foo'})
self.m.ReplayAll()
# raised error matches last failure
self.assertRaises(exception.HeatInvalidClientTokenIdError,
ec2.__call__, dummy_req)
self.m.VerifyAll()
def test_call_err_multicloud_none_allowed(self):
dummy_conf = {
'allowed_auth_uris': [],
'multi_cloud': True
}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
self.m.ReplayAll()
self.assertRaises(exception.HeatAccessDeniedError,
ec2.__call__, dummy_req)
self.m.VerifyAll()
def test_call_badconf_no_authuri(self):
ec2 = ec2token.EC2Token(app='woot', conf={})
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
self.m.ReplayAll()
ex = self.assertRaises(exception.HeatInternalFailureError,
ec2.__call__, dummy_req)
self.assertEqual('Service misconfigured', six.text_type(ex))
self.m.VerifyAll()
def test_call_ok_auth_uri_ec2authtoken(self):
dummy_url = 'http://123:5000/v2.0'
cfg.CONF.set_default('auth_uri', dummy_url, group='ec2authtoken')
ec2 = ec2token.EC2Token(app='woot', conf={})
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
ok_resp = json.dumps({'access': {'metadata': {}, 'token': {
'id': 123,
'tenant': {'name': 'tenant', 'id': 'abcd1234'}}}})
self._stub_http_connection(response=ok_resp,
params={'AWSAccessKeyId': 'foo'})
self.m.ReplayAll()
self.assertEqual('woot', ec2.__call__(dummy_req))
self.m.VerifyAll()
def test_call_ok_auth_uri_ec2authtoken_long(self):
# Prove we tolerate a url which already includes the /ec2tokens path
dummy_url = 'http://123:5000/v2.0/ec2tokens'
cfg.CONF.set_default('auth_uri', dummy_url, group='ec2authtoken')
ec2 = ec2token.EC2Token(app='woot', conf={})
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
ok_resp = json.dumps({'access': {'metadata': {}, 'token': {
'id': 123,
'tenant': {'name': 'tenant', 'id': 'abcd1234'}}}})
self._stub_http_connection(response=ok_resp,
params={'AWSAccessKeyId': 'foo'})
self.m.ReplayAll()
self.assertEqual('woot', ec2.__call__(dummy_req))
self.m.VerifyAll()
def test_call_ok_auth_uri_ks_authtoken(self):
# Import auth_token to have keystone_authtoken settings setup.
importutils.import_module('keystonemiddleware.auth_token')
dummy_url = 'http://123:5000/v2.0'
cfg.CONF.set_override('auth_uri', dummy_url,
group='keystone_authtoken')
ec2 = ec2token.EC2Token(app='woot', conf={})
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
ok_resp = json.dumps({'access': {'metadata': {}, 'token': {
'id': 123,
'tenant': {'name': 'tenant', 'id': 'abcd1234'}}}})
self._stub_http_connection(response=ok_resp,
params={'AWSAccessKeyId': 'foo'})
self.m.ReplayAll()
self.assertEqual('woot', ec2.__call__(dummy_req))
self.m.VerifyAll()
def test_filter_factory(self):
ec2_filter = ec2token.EC2Token_filter_factory(global_conf={})
self.assertEqual('xyz', ec2_filter('xyz').application)
def test_filter_factory_none_app(self):
ec2_filter = ec2token.EC2Token_filter_factory(global_conf={})
self.assertEqual(None, ec2_filter(None).application)
|
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class Lawyer(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Lawyer - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'owner_id': 'str',
'created_at': 'datetime',
'name': 'str',
'email_address': 'str',
'trello_id': 'str',
'id': 'str',
'v': 'float',
'id': 'str',
'referrals': 'list[str]',
'specialisms': 'list[str]'
}
self.attribute_map = {
'owner_id': '_ownerId',
'created_at': '_createdAt',
'name': 'name',
'email_address': 'emailAddress',
'trello_id': 'trelloId',
'id': '_id',
'v': '__v',
'referrals': 'referrals',
'specialisms': 'specialisms'
}
self._owner_id = None
self._created_at = None
self._name = None
self._email_address = None
self._trello_id = None
self._id = None
self._v = None
self._id = None
self._referrals = None
self._specialisms = None
@property
def owner_id(self):
"""
Gets the owner_id of this Lawyer.
:return: The owner_id of this Lawyer.
:rtype: str
"""
return self._owner_id
@owner_id.setter
def owner_id(self, owner_id):
"""
Sets the owner_id of this Lawyer.
:param owner_id: The owner_id of this Lawyer.
:type: str
"""
self._owner_id = owner_id
@property
def created_at(self):
"""
Gets the created_at of this Lawyer.
:return: The created_at of this Lawyer.
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""
Sets the created_at of this Lawyer.
:param created_at: The created_at of this Lawyer.
:type: datetime
"""
self._created_at = created_at
@property
def name(self):
"""
Gets the name of this Lawyer.
:return: The name of this Lawyer.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Lawyer.
:param name: The name of this Lawyer.
:type: str
"""
self._name = name
@property
def email_address(self):
"""
Gets the email_address of this Lawyer.
:return: The email_address of this Lawyer.
:rtype: str
"""
return self._email_address
@email_address.setter
def email_address(self, email_address):
"""
Sets the email_address of this Lawyer.
:param email_address: The email_address of this Lawyer.
:type: str
"""
self._email_address = email_address
@property
def trello_id(self):
"""
Gets the trello_id of this Lawyer.
:return: The trello_id of this Lawyer.
:rtype: str
"""
return self._trello_id
@trello_id.setter
def trello_id(self, trello_id):
"""
Sets the trello_id of this Lawyer.
:param trello_id: The trello_id of this Lawyer.
:type: str
"""
self._trello_id = trello_id
@property
def id(self):
"""
Gets the id of this Lawyer.
:return: The id of this Lawyer.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Lawyer.
:param id: The id of this Lawyer.
:type: str
"""
self._id = id
@property
def v(self):
"""
Gets the v of this Lawyer.
:return: The v of this Lawyer.
:rtype: float
"""
return self._v
@v.setter
def v(self, v):
"""
Sets the v of this Lawyer.
:param v: The v of this Lawyer.
:type: float
"""
self._v = v
@property
def id(self):
"""
Gets the id of this Lawyer.
:return: The id of this Lawyer.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Lawyer.
:param id: The id of this Lawyer.
:type: str
"""
self._id = id
@property
def referrals(self):
"""
Gets the referrals of this Lawyer.
:return: The referrals of this Lawyer.
:rtype: list[str]
"""
return self._referrals
@referrals.setter
def referrals(self, referrals):
"""
Sets the referrals of this Lawyer.
:param referrals: The referrals of this Lawyer.
:type: list[str]
"""
self._referrals = referrals
@property
def specialisms(self):
"""
Gets the specialisms of this Lawyer.
:return: The specialisms of this Lawyer.
:rtype: list[str]
"""
return self._specialisms
@specialisms.setter
def specialisms(self, specialisms):
"""
Sets the specialisms of this Lawyer.
:param specialisms: The specialisms of this Lawyer.
:type: list[str]
"""
self._specialisms = specialisms
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
#!/usr/bin/envn python3
# -+-coding: utf-8 -+-
#----------------------------------------
# Created by fboers at 19.09.18
#----------------------------------------
# Update
#----------------------------------------
import wx
from pubsub import pub
from jumeg.gui.wxlib.jumeg_gui_wxlib_logger import JuMEG_wxLogger
#from jumeg.gui.wxlib.jumeg_gui_wxlib_loglog import JuMEG_wxLogger
from jumeg.gui.wxlib.utils.jumeg_gui_wxlib_utils_controls import JuMEG_wxSplitterWindow,JuMEG_wxCMDButtons
__version__= '2019.05.14.001'
class JuMEG_wxMainPanelBase(wx.Panel):
'''
JuMEG_wxPanel base CLS for JuMEG wx.Panels
with functions to ovewrite to avoid overhead
PanelTop
PanelA | PanelB
Logger panel
PanelTop: for comboboxes e.g. experiment selection
PanelA : left container panel
PanelB : right container panel e.g. parameter for argparser
Logger : panel with TextCtrl and <Clear> and <MinMax> button on top right
Paremeters:
-----------
bg : panel backgroundcolor <grey88>
bgA : panel A" backgroundcolor <wx.Colour(132, 126, 238)>
bgB : panel B" backgroundcolor <wx.Colour(140, 233, 238)>
labelA : panel A label
labelB : panel B label
Flags
ShowLogger : show logger window at bottom <False>
ShowMinMaxBt : show MinMax Button in Logger window, to min/max window <True>
ShowCmdButtons: show command buttons <Cloas,Cancel,Apply> <True>
verbose : <False>
debug : <False>
Functions to overwrite:
-----------------------
update_from_kwargs(**kwargs)
wx_init(**kwargs)
init_pubsub(**kwargs)
update(**kwargs)
Layout function
----------------
ApplyLayout() use <self.MainPanel> to pack you controls
Example:
--------
class JuMEG_wxMEEGMerger(JuMEG_wxPanel):
def __init__(self, parent, **kwargs):
super(JuMEG_wxMEEGMerger, self).__init__(parent)
self._init(**kwargs) # here is the initialization
def ApplyLayout(self):
""" use PanelA and PanelB to put the contrls and to split between """
ds = 4
#--- fill PanelA with controls
vboxA = wx.BoxSizer(wx.VERTICAL)
vboxA.Add(self.CtrlA1, 0, wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, ds)
vboxA.Add((0, 0), 0, wx.ALIGN_RIGHT | wx.ALL)
hboxA = wx.BoxSizer(wx.HORIZONTAL)
hboxA.Add(self.CtrlA2, 0, wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, ds)
hboxA.Add(self.CtrlA3, 1, wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, ds)
vboxA.Add(hboxA, 1, wx.ALIGN_LEFT | wx.EXPAND | wx.ALL)
self.PanelA.SetSizer(vboxA)
#--- fill PanelB with controls
vboxB = wx.BoxSizer(wx.VERTICAL)
vboxB.Add(self.CtrlB1, 0, wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, ds)
self.PanelB.SetSizer(vboxB)
self.SplitterAB.SplitVertically(self.PanelA,self.PanelB)
'''
def __init__(self,parent,name="MAIN_PANEL_BASE",**kwargs):
super().__init__(parent,id=wx.ID_ANY,style=wx.SUNKEN_BORDER,name=name)
self._splitter = None
self.use_pubsub = True
self.debug = False
self.verbose = False
self._param = {}
self.__isInit = False
self.__isMinimize = False
self._show_logger = False
self._show_minmax_bt= False
self._show_cmd_bts = False
self._show_top_pnl = True
self._show_titleA = True
self._show_titleB = True
# self._init(**kwargs) # the function to call in child class
def _get_param(self,k1,k2):
return self._param[k1][k2]
def _set_param(self,k1,k2,v):
self._param[k1][k2]=v
@property
def isInit(self): return self.__isInit
@property
def MainPanel(self): return self._pnl_main
@property
def LoggerPanel(self): return self._pnl_logger
@property
def TopPanel(self): return self._pnl_top
@property
def CmdButtonPanel(self): return self._pnl_cmd_buttons
@property
def WindowSplitter(self): return self._splitter
@property
def ShowLogger(self) : return self._show_logger
@ShowLogger.setter
def ShowLogger(self,v): self._show_logger = v
@property
def ShowMinMaxBt(self) : return self._show_minmax_bt
@ShowMinMaxBt.setter
def ShowMinMaxBt(self,v): self._show_minmax_bt = v
@property
def ShowCmdButtons(self) : return self._show_cmd_bts
@ShowCmdButtons.setter
def ShowCmdButtons(self,v): self._show_cmd_bts = v
@property
def ShowTopPanel(self) : return self._show_top_pnl
@ShowTopPanel.setter
def ShowTopPanel(self,v): self._show_top_pnl = v
@property
def ShowTitleA(self): return self._show_titleA
@ShowTitleA.setter
def ShowTitleA(self,v): self._show_titleA = v
@property
def ShowTitleB(self): return self._show_titleB
@ShowTitleB.setter
def ShowTitleB(self,v): self._show_titleB = v
def SetVerbose(self,value=False):
self.verbose=value
def ShowHelp(self):
""" show help __doc__string"""
wx.MessageBox(self,self.__doc__ )
#--- default methods
def _update_from_kwargs_default(self,**kwargs):
"""
update kwargs like doc <JuMEG_wxMainPanelBase>
:param kwargs:
:return:
"""
self.labelA = kwargs.get("labelA","PANEL A")
self.labelB = kwargs.get("labelB","PANEL B")
self.bgA = kwargs.get("bgA",wx.Colour(132, 126, 238))
self.bgB = kwargs.get("bgB",wx.Colour(140, 233, 238))
self._show_logger = kwargs.get("ShowLogger",self._show_logger)
self._show_minmax_bt= kwargs.get("ShowMinMaxBt",self._show_minmax_bt)
self._show_cmd_bts = kwargs.get("ShowCmdButtons",self._show_cmd_bts)
self.verbose = kwargs.get("verbose", False)
self.debug = kwargs.get("debug", False)
self.SashPosition = kwargs.get("SashPosition",-50)
self.SetBackgroundColour(kwargs.get("bg", "grey88"))
def FitBoxSizer(self,pnl,pos=wx.HORIZONTAL):
""" fits a BoxSizer to a panel + AutoLayout
pnl: wx:panel
pos: sizer type <wx.HORIZONTAL>
"""
pnl.SetSizer(wx.BoxSizer(pos))
pnl.Fit()
pnl.SetAutoLayout(True)
def _wx_init_default(self, **kwargs):
""" window default settings"""
self.clear_children(self)
# --- init splitter for controls and logger
self._splitter = None
self._pnl_logger = None
self._pnl_main = None
self._pnl_top = None
self._pnl_cmd_buttons = None
# --- command logger
if self.ShowLogger:
self._splitter = JuMEG_wxSplitterWindow(self,label="Logger",name=self.GetName() + ".SPLITTER")
self._pnl_logger = JuMEG_wxLogger(self._splitter,name=self.GetName().upper()+".LOGGER") #listener=self.GetName())
self._pnl_main = wx.Panel(self._splitter)
self._pnl_main.SetBackgroundColour(wx.Colour(0, 0, 128))
else: # --- only you controls
self._pnl_main = wx.Panel(self)
self._pnl_main.SetBackgroundColour(wx.RED)
def _init_pubsub_default(self):
pub.subscribe(self.SetVerbose,'MAIN_FRAME.VERBOSE')
pub.subscribe(self.ShowHelp, self.GetName()+".SHOW_HELP")
#--- overwrite methods
def _update_from_kwargs(self,**kwargs):
""" pass """
pass
def wx_init(self, **kwargs):
""" init WX controls """
pass
def init_pubsub(self, **kwargs):
""""
init pubsub call
pub.subscribe(self.SetVerbose,'MAIN_FRAME.VERBOSE')
"""
pass
def update(self, **kwargs):
pass
def ApplyLayout(self):
""" your layout stuff """
pass
#--- init all
def _init(self,**kwargs):
if self.isInit:
self.clear()
#---
self._update_from_kwargs_default(**kwargs)
self._update_from_kwargs(**kwargs)
#---
self._wx_init_default()
self.wx_init(**kwargs)
# ---
self.update(**kwargs)
#---
self._init_pubsub_default()
self.init_pubsub(**kwargs)
#---
self.__isInit=True
self._ApplyLayout()
self.update_on_display()
def update_on_display(self):
pass
def _ApplyLayout(self):
""" default Layout Framework """
self.Sizer = wx.BoxSizer(wx.VERTICAL)
self.ApplyLayout() #-- fill PanelA amd PanelB with controls
self.MainPanel.Fit()
self.MainPanel.SetAutoLayout(1)
if self.ShowLogger:
self._splitter.SplitHorizontally(self.MainPanel,self.LoggerPanel)
self._splitter.SetMinimumPaneSize(50)
self._splitter.SetSashGravity(1.0)
self._splitter.SetSashPosition(self.SashPosition,redraw=True)
self.Sizer.Add(self._splitter, 1, wx.ALIGN_CENTER | wx.EXPAND | wx.ALL, 5)
else:
self.Sizer.Add(self.MainPanel, 1, wx.ALIGN_CENTER | wx.EXPAND | wx.ALL, 5)
self.SetSizer(self.Sizer)
self.Fit()
self.SetAutoLayout(1)
self.GetParent().Layout()
#--- clear
def clear_parameter(self):
""" clear parameter overwrite"""
pass
def clear_children(self,wxctrl):
""" clear/delete wx childeren """
for child in wxctrl.GetChildren():
child.Destroy()
self.Layout()
self.Fit()
def clear(self,wxctrl=None):
""" clear parameter and delete wx childeren """
self.__isInit = False
self.clear_parameter()
#--- clear wx stuff
self.clear_children(self)
class JuMEG_wxMainPanel(JuMEG_wxMainPanelBase):
"""
main panel with sub-panels A,B inside a SplitterPanel
call self._init(**kwargs) in subclass
Parameter:
----------
labelA: title panel A
labelB: title panel B
bgA : wx.Colour() <132, 126, 238>
bgB : wx.Colour() <140, 233, 238>
ShowMinMAxBt: True
Panels:
---------
TopPanel
========================================
PanelA Title | PanelB Title + MinMaxBt
PanelA Panles | PanelB Panels
========================================
Button Panel
"""
def __init__(self,*kargs,name="MAIN_PANEL",**kwargs):
super(JuMEG_wxMainPanel,self).__init__(*kargs,name=name,**kwargs)
#self._init(**kwargs)
def wx_init(self, **kwargs):
""" init WX controls """
self._update_from_kwargs(**kwargs)
#---
if self.ShowTopPanel:
self._pnl_top = wx.Panel(self.MainPanel)
self.FitBoxSizer(self.TopPanel)
#---
self.SplitterAB = JuMEG_wxSplitterWindow(self.MainPanel,listener=self.GetName()+"_B")
self.SplitterAB.SetSashGravity(1.0)
#---
self.PanelA = JuMEG_wxPanelAB(self.SplitterAB,name=self.GetName()+"_A",bg=self.bgA,label=self.labelA,ShowTitle=self.ShowTitleA)
self.PanelB = JuMEG_wxPanelAB(self.SplitterAB,name=self.GetName()+"_B",bg=self.bgB,label=self.labelB,ShowMinMaxBt=self.ShowMinMaxBt,ShowTitle=self.ShowTitleB)
self.SplitterAB.SplitVertically(self.PanelA, self.PanelB)
#---
if self.ShowCmdButtons:
self._pnl_cmd_buttons = JuMEG_wxCMDButtons(self.MainPanel,prefix=self.GetName(),
ShowClose=True,ShowCancel=True,ShowApply=True)
#--- make a BoxSizer to pack later CTRLs
self.FitBoxSizer(self.PanelA.Panel)
self.FitBoxSizer(self.PanelB.Panel)
def ApplyLayout(self):
""" your layout stuff """
vbox = wx.BoxSizer(wx.VERTICAL)
if self.TopPanel:
# if self.TopPanel.GetChildren():
vbox.Add(self.TopPanel, 0, wx.ALIGN_LEFT| wx.EXPAND | wx.ALL, 1)
#else: self.TopPanel.Destroy()
vbox.Add(self.SplitterAB, 1, wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, 1)
if self.ShowCmdButtons:
vbox.Add(self.CmdButtonPanel,0,wx.ALIGN_BOTTOM|wx.EXPAND|wx.ALL,1 )
self.MainPanel.SetSizer(vbox)
class JuMEG_wxTitlePanel(wx.Panel):
"""
wx.Panel with TextCtrl and MinMax Button packed Horizontal
Parameter:
----------
parent: parent widget
title : text <TEST>
ShowMinMaxBt: will show MinMAx button <False>
Example:
--------
pnl_title = JuMEG_wxTitlePanel( <parent pnl>,label="Parameter",ShowMinMaxBt=True)
"""
def __init__(self,parent,*kargs,**kwargs):
super(JuMEG_wxTitlePanel,self).__init__(parent,*kargs,id=wx.ID_ANY,style=wx.SUNKEN_BORDER)
self._txt = None
self._ShowMinMaxBt = False
self._font = wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Arial', wx.FONTENCODING_ISO8859_1)
self.update_from_kwargs(**kwargs)
self._wx_init(**kwargs)
self.ApplyLayout()
@property
def label(self): return self._label
@label.setter
def label(self,v):
self._label=v
self._txt.SetLabel(v)
@property
def TxtCtrl(self): return self._txt
def update_from_kwargs(self,**kwargs):
self._label = kwargs.get("label",self.GetName())
self._ShowMinMaxBt = kwargs.get("ShowMinMaxBt",False)
def _wx_init(self,**kwargs):
""" init Wx controls """
self.Sizer = wx.BoxSizer(wx.HORIZONTAL)
self._txt = wx.StaticText(self, wx.ID_ANY,self.label,style=wx.ALIGN_CENTRE)
self._txt.SetBackgroundColour("grey70")
self._txt.SetFont(self._font)
self.Sizer.Add(self._txt, 1, wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, 1)
if self._ShowMinMaxBt:
stl = wx.BU_EXACTFIT | wx.BU_NOTEXT
self._MinMaxBt = wx.Button(self, -1,name=self.GetParent().GetName()+".SPLIT_MIN_MAX", style=stl)
self._MinMaxBt.SetBitmapLabel(wx.ArtProvider.GetBitmap(wx.ART_CROSS_MARK, wx.ART_MENU, (12, 12)))
self.Bind(wx.EVT_BUTTON, self.ToggleMinimize, self._MinMaxBt)
self.Sizer.Add( self._MinMaxBt, 0, wx.ALIGN_RIGHT | wx.EXPAND | wx.ALL, 1)
def ToggleMinimize(self, evt):
"""
toggle min/max size of logger window
send cmd to parent splitter window via pubsub
"""
obj = evt.GetEventObject()
#print("ToggleMinimize: {} name: {} ".format(obj.GetName().upper(),self.GetParent().GetName()))
pub.sendMessage(obj.GetName().upper(),name=self.GetParent().GetName(),size=obj.GetSize())
def ApplyLayout(self):
self.SetSizer(self.Sizer)
self.Fit()
self.SetAutoLayout(True)
class JuMEG_wxPanelAB(wx.Panel):
"""
wx.Panel PanelA or PanelB container panel
Parameter:
----------
parent: parent widget
name : text <TEST>
bg : wx.Colour()
title : text for title-panel
ShowMinMaxBt: will show MinMAx button <False>
showTitle: <True>
Example:
--------
PanelA = JuMEG_wxPanelAB(SplitterAB,name="MAIN_PANEL_A",bg=wx.Colour(132, 126, 238),label="PDFs")
PanelB = JuMEG_wxPanelAB(SplitterAB,name="MAIN_PANEL_B",bg=wx.Colour(140, 233, 238),label="Parameter",ShowMinMaxBt=True)
"""
def __init__(self,parent,*kargs,**kwargs):
super().__init__(parent,*kargs,id=wx.ID_ANY,style=wx.SUNKEN_BORDER)
self.update_from_kwargs(**kwargs)
self.wx_init(**kwargs)
self.ApplyLayout()
def SetLabel(self,v):
try:
self.PanelHead.label= v
except: pass
def SetTitle(self, v):
self.SetLabel(v)
def update_from_kwargs(self,**kwargs):
""" """
self.SetName(kwargs.get("name","PANEL_AB"))
self.SetBackgroundColour(kwargs.get("bg",wx.Colour(132, 126, 238)))
self._show_title = kwargs.get("ShowTitle",True)
def wx_init(self, **kwargs):
""" init WX controls """
if self._show_title:
self.PanelHead = JuMEG_wxTitlePanel(self,**kwargs) # label="PDFs",ShowMinMaxBt=False
self.Panel = wx.Panel(self)
def ApplyLayout(self):
self.Sizer = wx.BoxSizer(wx.VERTICAL)
if self._show_title:
self.Sizer.Add(self.PanelHead,0, wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, 1)
self.Sizer.Add(self.Panel,1, wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, 1)
self.SetSizer(self.Sizer)
self.Fit()
self.SetAutoLayout(True)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
import enum
from croniter import croniter
from flask import flash, g
from flask_appbuilder import expose
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import has_access
from flask_babel import gettext as __
from flask_babel import lazy_gettext as _
import simplejson as json
from wtforms import BooleanField, StringField
from superset import app, appbuilder, db, security_manager
from superset.exceptions import SupersetException
from superset.models.core import Dashboard, Slice
from superset.models.schedules import (
DashboardEmailSchedule,
ScheduleType,
SliceEmailSchedule,
)
from superset.tasks.schedules import schedule_email_report
from superset.utils.core import (
get_email_address_list,
json_iso_dttm_ser,
)
from superset.views.core import json_success
from .base import DeleteMixin, SupersetModelView
class EmailScheduleView(SupersetModelView, DeleteMixin):
_extra_data = {
'test_email': False,
'test_email_recipients': None,
}
schedule_type = None
schedule_type_model = None
page_size = 20
add_exclude_columns = [
'user',
'created_on',
'changed_on',
'created_by',
'changed_by',
]
edit_exclude_columns = add_exclude_columns
description_columns = {
'deliver_as_group': 'If enabled, send a single email to all '
'recipients (in email/To: field)',
'crontab': 'Unix style crontab schedule to deliver emails. '
'Changes to schedules reflect in one hour.',
'delivery_type': 'Indicates how the rendered content is delivered',
}
add_form_extra_fields = {
'test_email': BooleanField(
'Send Test Email',
default=False,
description='If enabled, we send a test mail on create / update',
),
'test_email_recipients': StringField(
'Test Email Recipients',
default=None,
description='List of recipients to send test email to. '
'If empty, we send it to the original recipients',
),
}
edit_form_extra_fields = add_form_extra_fields
def process_form(self, form, is_created):
recipients = form.test_email_recipients.data.strip() or None
self._extra_data['test_email'] = form.test_email.data
self._extra_data['test_email_recipients'] = recipients
def pre_add(self, obj):
try:
recipients = get_email_address_list(obj.recipients)
obj.recipients = ', '.join(recipients)
except Exception:
raise SupersetException('Invalid email list')
obj.user = obj.user or g.user
if not croniter.is_valid(obj.crontab):
raise SupersetException('Invalid crontab format')
def pre_update(self, obj):
self.pre_add(obj)
def post_add(self, obj):
# Schedule a test mail if the user requested for it.
if self._extra_data['test_email']:
recipients = self._extra_data['test_email_recipients']
args = (self.schedule_type, obj.id)
kwargs = dict(recipients=recipients)
schedule_email_report.apply_async(args=args, kwargs=kwargs)
# Notify the user that schedule changes will be activate only in the
# next hour
if obj.active:
flash('Schedule changes will get applied in one hour', 'warning')
def post_update(self, obj):
self.post_add(obj)
@has_access
@expose('/fetch/<int:item_id>/', methods=['GET'])
def fetch_schedules(self, item_id):
query = db.session.query(self.datamodel.obj)
query = query.join(self.schedule_type_model).filter(
self.schedule_type_model.id == item_id)
schedules = []
for schedule in query.all():
info = {'schedule': schedule.id}
for col in self.list_columns + self.add_exclude_columns:
info[col] = getattr(schedule, col)
if isinstance(info[col], enum.Enum):
info[col] = info[col].name
elif isinstance(info[col], security_manager.user_model):
info[col] = info[col].username
info['user'] = schedule.user.username
info[self.schedule_type] = getattr(schedule, self.schedule_type).id
schedules.append(info)
return json_success(json.dumps(schedules, default=json_iso_dttm_ser))
class DashboardEmailScheduleView(EmailScheduleView):
schedule_type = ScheduleType.dashboard.name
schedule_type_model = Dashboard
add_title = _('Schedule Email Reports for Dashboards')
edit_title = add_title
list_title = _('Manage Email Reports for Dashboards')
datamodel = SQLAInterface(DashboardEmailSchedule)
order_columns = ['user', 'dashboard', 'created_on']
list_columns = [
'dashboard',
'active',
'crontab',
'user',
'deliver_as_group',
'delivery_type',
]
add_columns = [
'dashboard',
'active',
'crontab',
'recipients',
'deliver_as_group',
'delivery_type',
'test_email',
'test_email_recipients',
]
edit_columns = add_columns
search_columns = [
'dashboard',
'active',
'user',
'deliver_as_group',
'delivery_type',
]
label_columns = {
'dashboard': _('Dashboard'),
'created_on': _('Created On'),
'changed_on': _('Changed On'),
'user': _('User'),
'active': _('Active'),
'crontab': _('Crontab'),
'recipients': _('Recipients'),
'deliver_as_group': _('Deliver As Group'),
'delivery_type': _('Delivery Type'),
}
def pre_add(self, obj):
if obj.dashboard is None:
raise SupersetException('Dashboard is mandatory')
super(DashboardEmailScheduleView, self).pre_add(obj)
class SliceEmailScheduleView(EmailScheduleView):
schedule_type = ScheduleType.slice.name
schedule_type_model = Slice
add_title = _('Schedule Email Reports for Charts')
edit_title = add_title
list_title = _('Manage Email Reports for Charts')
datamodel = SQLAInterface(SliceEmailSchedule)
order_columns = ['user', 'slice', 'created_on']
list_columns = [
'slice',
'active',
'crontab',
'user',
'deliver_as_group',
'delivery_type',
'email_format',
]
add_columns = [
'slice',
'active',
'crontab',
'recipients',
'deliver_as_group',
'delivery_type',
'email_format',
'test_email',
'test_email_recipients',
]
edit_columns = add_columns
search_columns = [
'slice',
'active',
'user',
'deliver_as_group',
'delivery_type',
'email_format',
]
label_columns = {
'slice': _('Chart'),
'created_on': _('Created On'),
'changed_on': _('Changed On'),
'user': _('User'),
'active': _('Active'),
'crontab': _('Crontab'),
'recipients': _('Recipients'),
'deliver_as_group': _('Deliver As Group'),
'delivery_type': _('Delivery Type'),
'email_format': _('Email Format'),
}
def pre_add(self, obj):
if obj.slice is None:
raise SupersetException('Slice is mandatory')
super(SliceEmailScheduleView, self).pre_add(obj)
def _register_schedule_menus():
appbuilder.add_separator('Manage')
appbuilder.add_view(
DashboardEmailScheduleView,
'Dashboard Email Schedules',
label=__('Dashboard Emails'),
category='Manage',
category_label=__('Manage'),
icon='fa-search')
appbuilder.add_view(
SliceEmailScheduleView,
'Chart Emails',
label=__('Chart Email Schedules'),
category='Manage',
category_label=__('Manage'),
icon='fa-search')
if app.config.get('ENABLE_SCHEDULED_EMAIL_REPORTS'):
_register_schedule_menus()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.