code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
Builder.load_string('''
<FxDialog@Popup>
id: popup
title: 'Fiat Currency'
size_hint: 0.8, 0.8
pos_hint: {'top':0.9}
BoxLayout:
orientation: 'vertical'
Widget:
size_hint: 1, 0.1
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.1
Label:
text: _('Currency')
height: '48dp'
Spinner:
height: '48dp'
id: ccy
on_text: popup.on_currency(self.text)
Widget:
size_hint: 1, 0.1
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.1
Label:
text: _('Source')
height: '48dp'
Spinner:
height: '48dp'
id: exchanges
on_text: popup.on_exchange(self.text)
Widget:
size_hint: 1, 0.2
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.2
Button:
text: 'Cancel'
size_hint: 0.5, None
height: '48dp'
on_release: popup.dismiss()
Button:
text: 'OK'
size_hint: 0.5, None
height: '48dp'
on_release:
root.callback()
popup.dismiss()
''')
from kivy.uix.label import Label
from kivy.uix.checkbox import CheckBox
from kivy.uix.widget import Widget
from kivy.clock import Clock
from electrum_vtc_gui.kivy.i18n import _
from functools import partial
class FxDialog(Factory.Popup):
def __init__(self, app, plugins, config, callback):
Factory.Popup.__init__(self)
self.app = app
self.config = config
self.callback = callback
self.fx = self.app.fx
self.fx.set_history_config(True)
self.add_currencies()
def add_exchanges(self):
exchanges = sorted(self.fx.get_exchanges_by_ccy(self.fx.get_currency(), True)) if self.fx.is_enabled() else []
mx = self.fx.exchange.name() if self.fx.is_enabled() else ''
ex = self.ids.exchanges
ex.values = exchanges
ex.text = (mx if mx in exchanges else exchanges[0]) if self.fx.is_enabled() else ''
def on_exchange(self, text):
if not text:
return
if self.fx.is_enabled() and text != self.fx.exchange.name():
self.fx.set_exchange(text)
def add_currencies(self):
currencies = [_('None')] + self.fx.get_currencies(True)
my_ccy = self.fx.get_currency() if self.fx.is_enabled() else _('None')
self.ids.ccy.values = currencies
self.ids.ccy.text = my_ccy
def on_currency(self, ccy):
b = (ccy != _('None'))
self.fx.set_enabled(b)
if b:
if ccy != self.fx.get_currency():
self.fx.set_currency(ccy)
self.app.fiat_unit = ccy
else:
self.app.is_fiat = False
Clock.schedule_once(lambda dt: self.add_exchanges()) | gui/kivy/uix/dialogs/fx_dialog.py | from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
Builder.load_string('''
<FxDialog@Popup>
id: popup
title: 'Fiat Currency'
size_hint: 0.8, 0.8
pos_hint: {'top':0.9}
BoxLayout:
orientation: 'vertical'
Widget:
size_hint: 1, 0.1
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.1
Label:
text: _('Currency')
height: '48dp'
Spinner:
height: '48dp'
id: ccy
on_text: popup.on_currency(self.text)
Widget:
size_hint: 1, 0.1
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.1
Label:
text: _('Source')
height: '48dp'
Spinner:
height: '48dp'
id: exchanges
on_text: popup.on_exchange(self.text)
Widget:
size_hint: 1, 0.2
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.2
Button:
text: 'Cancel'
size_hint: 0.5, None
height: '48dp'
on_release: popup.dismiss()
Button:
text: 'OK'
size_hint: 0.5, None
height: '48dp'
on_release:
root.callback()
popup.dismiss()
''')
from kivy.uix.label import Label
from kivy.uix.checkbox import CheckBox
from kivy.uix.widget import Widget
from kivy.clock import Clock
from electrum_vtc_gui.kivy.i18n import _
from functools import partial
class FxDialog(Factory.Popup):
def __init__(self, app, plugins, config, callback):
Factory.Popup.__init__(self)
self.app = app
self.config = config
self.callback = callback
self.fx = self.app.fx
self.fx.set_history_config(True)
self.add_currencies()
def add_exchanges(self):
exchanges = sorted(self.fx.get_exchanges_by_ccy(self.fx.get_currency(), True)) if self.fx.is_enabled() else []
mx = self.fx.exchange.name() if self.fx.is_enabled() else ''
ex = self.ids.exchanges
ex.values = exchanges
ex.text = (mx if mx in exchanges else exchanges[0]) if self.fx.is_enabled() else ''
def on_exchange(self, text):
if not text:
return
if self.fx.is_enabled() and text != self.fx.exchange.name():
self.fx.set_exchange(text)
def add_currencies(self):
currencies = [_('None')] + self.fx.get_currencies(True)
my_ccy = self.fx.get_currency() if self.fx.is_enabled() else _('None')
self.ids.ccy.values = currencies
self.ids.ccy.text = my_ccy
def on_currency(self, ccy):
b = (ccy != _('None'))
self.fx.set_enabled(b)
if b:
if ccy != self.fx.get_currency():
self.fx.set_currency(ccy)
self.app.fiat_unit = ccy
else:
self.app.is_fiat = False
Clock.schedule_once(lambda dt: self.add_exchanges()) | 0.576184 | 0.10942 |
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING
from grouper.entities.permission import PermissionNotFoundException
from grouper.util import matches_glob
if TYPE_CHECKING:
from grouper.usecases.interfaces import (
GroupInterface,
PermissionInterface,
ServiceAccountInterface,
TransactionInterface,
UserInterface,
)
from typing import List, Tuple
class GrantPermissionToGroupUI(metaclass=ABCMeta):
@abstractmethod
def grant_permission_to_group_failed_invalid_argument(
self, permission, argument, group, message
):
# type: (str, str, str, str) -> None
pass
@abstractmethod
def grant_permission_to_group_failed_permission_denied(
self, permission, argument, group, message
):
# type: (str, str, str, str) -> None
pass
@abstractmethod
def grant_permission_to_group_failed_permission_not_found(self, permission, group):
# type: (str, str) -> None
pass
@abstractmethod
def grant_permission_to_group_failed_group_not_found(self, group):
# type: (str) -> None
pass
@abstractmethod
def grant_permission_to_group_failed_permission_already_exists(self, group):
# type: (str) -> None
pass
@abstractmethod
def granted_permission_to_group(self, permission, argument, group):
# type; (str, str, str) -> None
pass
class GrantPermissionToGroup:
def __init__(
self,
actor, # type: str
ui, # type: GrantPermissionToGroupUI
permission_service, # type: PermissionInterface
service_account_service, # type: ServiceAccountInterface
user_service, # type: UserInterface
group_service, # type: GroupInterface
transaction_service, # type: TransactionInterface
):
# type: (...) -> None
self.actor = actor
self.ui = ui
self.permission_service = permission_service
self.service_account_service = service_account_service
self.user_service = user_service
self.group_service = group_service
self.transaction_service = transaction_service
def permissions_grantable(self):
# The actor can grant a permission to the group if the actor independently has the
# ability to grant the permission, as a permission admin or because of grants of
# grouper.permission.grants.
actor_grantable_perms = [] # type: List[Tuple[str, str]]
if self.service_account_service.service_account_exists(self.actor):
p = self.service_account_service.permissions_grantable_by_service_account(self.actor)
actor_grantable_perms = p # line length :( if you see a nicer way, do it
else: # actor is not a service account, and is thus a normal user
actor_grantable_perms = self.user_service.permissions_grantable_by_user(self.actor)
return sorted(actor_grantable_perms, key=lambda x: x[0] + x[1])
def grant_permission_to_group(self, permission, argument, group):
# type: (str, str, str) -> None
if not self.group_service.group_exists(group):
self.ui.grant_permission_to_group_failed_group_not_found(group)
return
if self.group_service.group_has_matching_permission_grant(group, permission, argument):
self.ui.grant_permission_to_group_failed_permission_already_exists(group)
return
valid, error = self.permission_service.is_valid_permission_argument(permission, argument)
if not valid:
assert error
self.ui.grant_permission_to_group_failed_invalid_argument(
permission, argument, group, error
)
return
allowed = False
grantable = self.permissions_grantable()
for grantable_perm, grantable_arg in grantable:
if grantable_perm == permission and matches_glob(grantable_arg, argument):
allowed = True
break
if not allowed:
message = (
"Permission denied. Actor {actor} does not have the ability to"
"grant the permission {permission} with argument {argument}."
).format(actor=self.actor, permission=permission, argument=argument)
if argument == "":
message += " (Did you mean to leave the argument empty?)"
self.ui.grant_permission_to_group_failed_permission_denied(
permission, argument, group, message
)
return
with self.transaction_service.transaction():
try:
self.group_service.grant_permission_to_group(permission, argument, group)
except PermissionNotFoundException:
# It should be impossible to hit this exception. In order to get this far, the
# perm must be on the list of perms the actor can grant, and thus must exist.
# Leaving the logic here however in case that changes in the future.
self.ui.grant_permission_to_group_failed_permission_not_found(permission, group)
return
self.ui.granted_permission_to_group(permission, argument, group) | grouper/usecases/grant_permission_to_group.py | from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING
from grouper.entities.permission import PermissionNotFoundException
from grouper.util import matches_glob
if TYPE_CHECKING:
from grouper.usecases.interfaces import (
GroupInterface,
PermissionInterface,
ServiceAccountInterface,
TransactionInterface,
UserInterface,
)
from typing import List, Tuple
class GrantPermissionToGroupUI(metaclass=ABCMeta):
@abstractmethod
def grant_permission_to_group_failed_invalid_argument(
self, permission, argument, group, message
):
# type: (str, str, str, str) -> None
pass
@abstractmethod
def grant_permission_to_group_failed_permission_denied(
self, permission, argument, group, message
):
# type: (str, str, str, str) -> None
pass
@abstractmethod
def grant_permission_to_group_failed_permission_not_found(self, permission, group):
# type: (str, str) -> None
pass
@abstractmethod
def grant_permission_to_group_failed_group_not_found(self, group):
# type: (str) -> None
pass
@abstractmethod
def grant_permission_to_group_failed_permission_already_exists(self, group):
# type: (str) -> None
pass
@abstractmethod
def granted_permission_to_group(self, permission, argument, group):
# type; (str, str, str) -> None
pass
class GrantPermissionToGroup:
def __init__(
self,
actor, # type: str
ui, # type: GrantPermissionToGroupUI
permission_service, # type: PermissionInterface
service_account_service, # type: ServiceAccountInterface
user_service, # type: UserInterface
group_service, # type: GroupInterface
transaction_service, # type: TransactionInterface
):
# type: (...) -> None
self.actor = actor
self.ui = ui
self.permission_service = permission_service
self.service_account_service = service_account_service
self.user_service = user_service
self.group_service = group_service
self.transaction_service = transaction_service
def permissions_grantable(self):
# The actor can grant a permission to the group if the actor independently has the
# ability to grant the permission, as a permission admin or because of grants of
# grouper.permission.grants.
actor_grantable_perms = [] # type: List[Tuple[str, str]]
if self.service_account_service.service_account_exists(self.actor):
p = self.service_account_service.permissions_grantable_by_service_account(self.actor)
actor_grantable_perms = p # line length :( if you see a nicer way, do it
else: # actor is not a service account, and is thus a normal user
actor_grantable_perms = self.user_service.permissions_grantable_by_user(self.actor)
return sorted(actor_grantable_perms, key=lambda x: x[0] + x[1])
def grant_permission_to_group(self, permission, argument, group):
# type: (str, str, str) -> None
if not self.group_service.group_exists(group):
self.ui.grant_permission_to_group_failed_group_not_found(group)
return
if self.group_service.group_has_matching_permission_grant(group, permission, argument):
self.ui.grant_permission_to_group_failed_permission_already_exists(group)
return
valid, error = self.permission_service.is_valid_permission_argument(permission, argument)
if not valid:
assert error
self.ui.grant_permission_to_group_failed_invalid_argument(
permission, argument, group, error
)
return
allowed = False
grantable = self.permissions_grantable()
for grantable_perm, grantable_arg in grantable:
if grantable_perm == permission and matches_glob(grantable_arg, argument):
allowed = True
break
if not allowed:
message = (
"Permission denied. Actor {actor} does not have the ability to"
"grant the permission {permission} with argument {argument}."
).format(actor=self.actor, permission=permission, argument=argument)
if argument == "":
message += " (Did you mean to leave the argument empty?)"
self.ui.grant_permission_to_group_failed_permission_denied(
permission, argument, group, message
)
return
with self.transaction_service.transaction():
try:
self.group_service.grant_permission_to_group(permission, argument, group)
except PermissionNotFoundException:
# It should be impossible to hit this exception. In order to get this far, the
# perm must be on the list of perms the actor can grant, and thus must exist.
# Leaving the logic here however in case that changes in the future.
self.ui.grant_permission_to_group_failed_permission_not_found(permission, group)
return
self.ui.granted_permission_to_group(permission, argument, group) | 0.738952 | 0.135518 |
from unittest import TestCase
import simplejson as json
from mock import patch, Mock, PropertyMock
from pypurlib.pypurlib import bin2hstr
from pur.core import config
from pur.core.Indexer import Indexer
from pur.core.misc import logger
from pur.core.State import State
from pur.core.StateContainer import StateContainer
from pur.core.OptimizedAddressState import OptimizedAddressState
from pur.core.BlockHeader import BlockHeader
from pur.core.txs.CoinBase import CoinBase
from pur.crypto.misc import sha256
from tests.core.txs.testdata import test_json_CoinBase
from tests.misc.helper import get_alice_purss, set_pur_dir
logger.initialize_default()
@patch('pur.core.txs.Transaction.logger')
class TestCoinBase(TestCase):
def __init__(self, *args, **kwargs):
super(TestCoinBase, self).__init__(*args, **kwargs)
with set_pur_dir('no_data'):
self.state = State()
self.alice = get_alice_purss()
self.alice.set_ots_index(11)
self.mock_blockheader = Mock(spec=BlockHeader)
self.mock_blockheader.stake_selector = self.alice.address
self.mock_blockheader.block_reward = 50
self.mock_blockheader.fee_reward = 40
self.mock_blockheader.prev_headerhash = sha256(b'prev_headerhash')
self.mock_blockheader.block_number = 1
self.mock_blockheader.headerhash = sha256(b'headerhash')
self.amount = self.mock_blockheader.block_reward + self.mock_blockheader.fee_reward
self.maxDiff = None
def test_create(self, m_logger):
tx = CoinBase.create(config.dev, self.amount, self.alice.address, self.mock_blockheader.block_number)
self.assertIsInstance(tx, CoinBase)
def test_to_json(self, m_logger):
amount = self.mock_blockheader.block_reward + self.mock_blockheader.fee_reward
tx = CoinBase.create(config.dev, amount, self.alice.address, self.mock_blockheader.block_number)
txjson = tx.to_json()
self.assertEqual(json.loads(test_json_CoinBase), json.loads(txjson))
def test_from_txdict(self, m_logger):
amount = self.mock_blockheader.block_reward + self.mock_blockheader.fee_reward
tx = CoinBase.create(config.dev, amount, self.alice.address, self.mock_blockheader.block_number)
self.assertIsInstance(tx, CoinBase)
# Test that common Transaction components were copied over.
self.assertEqual(self.mock_blockheader.block_number + 1, tx.nonce)
self.assertEqual('010300a1da274e68c88b0ccf448e0b1916fa789b01eb2ed4e9ad565ce264c9390782a9c61ac02f',
bin2hstr(tx.addr_to))
self.assertEqual('222460cc57ab8683b46f1831fe6cf1832c7e3134baf74d33bfaf91741e19cba2', bin2hstr(tx.txhash))
self.assertEqual(tx.amount, 90)
def test_validate_custom(self, m_logger):
"""
CoinBase _validate_custom() only checks if fee == 0
"""
tx = CoinBase.create(config.dev, self.amount, self.alice.address, self.mock_blockheader.block_number)
tx._data.fee = 1
result = tx._validate_custom()
self.assertFalse(result)
tx._data.fee = 0
result = tx._validate_custom()
self.assertTrue(result)
def test_validate_extended(self, m_logger):
"""
CoinBase validate_extended() checks for
1. valid coinbase address (the coinbase address must be config.dev.coinbase_address)
2. valid addr_to
then calls _validate_custom()
"""
tx = CoinBase.create(config.dev, self.amount, self.alice.address, self.mock_blockheader.block_number)
tx._data.master_addr = self.alice.address
addresses_state = {
config.dev.coinbase_address: OptimizedAddressState.get_default(config.dev.coinbase_address),
self.alice.address: OptimizedAddressState.get_default(self.alice.address),
}
addresses_state[config.dev.coinbase_address].pbdata.balance = 1000000
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=self.mock_blockheader.block_number,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
result = tx._validate_extended(state_container)
self.assertFalse(result)
tx._data.master_addr = config.dev.coinbase_address
with patch('pur.core.txs.CoinBase.CoinBase.addr_to', new_callable=PropertyMock) as m_addr_to:
m_addr_to.return_value = b'Fake Address'
result = tx._validate_extended(state_container)
self.assertFalse(result)
result = tx._validate_extended(state_container)
self.assertTrue(result)
def test_apply_coinbase_txn(self, m_logger):
"""
Alice earned some coins.
"""
addresses_state = {
config.dev.coinbase_address: OptimizedAddressState.get_default(config.dev.coinbase_address),
self.alice.address: OptimizedAddressState.get_default(self.alice.address),
}
addresses_state[config.dev.coinbase_address].pbdata.balance = 1000000
tx = CoinBase.create(config.dev, self.amount, self.alice.address, self.mock_blockheader.block_number)
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=self.mock_blockheader.block_number,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
# self.state.apply(tx, addresses_state)
tx.apply(self.state, state_container)
self.assertEqual(1000000 - tx.amount, addresses_state[config.dev.coinbase_address].balance)
storage_key = state_container.paginated_tx_hash.generate_key(config.dev.coinbase_address, 1)
self.assertEqual([tx.txhash], state_container.paginated_tx_hash.key_value[storage_key])
self.assertEqual(tx.amount, addresses_state[self.alice.address].balance)
storage_key = state_container.paginated_tx_hash.generate_key(self.alice.address, 1)
self.assertEqual([tx.txhash], state_container.paginated_tx_hash.key_value[storage_key])
def test_revert_coinbase_txn(self, m_logger):
"""
Alice earned some coins. Undo this.
"""
tx = CoinBase.create(config.dev, self.amount, self.alice.address, self.mock_blockheader.block_number)
addresses_state = {
config.dev.coinbase_address: OptimizedAddressState.get_default(config.dev.coinbase_address),
self.alice.address: OptimizedAddressState.get_default(self.alice.address),
}
addresses_state[config.dev.coinbase_address].pbdata.balance = 1000000
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=self.mock_blockheader.block_number,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
tx.apply(self.state, state_container)
tx.revert(self.state, state_container)
self.assertEqual(1000000, addresses_state[config.dev.coinbase_address].balance)
storage_key = state_container.paginated_tx_hash.generate_key(config.dev.coinbase_address, 1)
self.assertEqual([], state_container.paginated_tx_hash.key_value[storage_key])
self.assertEqual(0, addresses_state[self.alice.address].balance)
storage_key = state_container.paginated_tx_hash.generate_key(config.dev.coinbase_address, 1)
self.assertEqual([], state_container.paginated_tx_hash.key_value[storage_key])
def test_affected_address(self, m_logger):
# This transaction can only involve 2 addresses.
affected_addresses = set()
tx = CoinBase.create(config.dev, self.amount, self.alice.address, self.mock_blockheader.block_number)
tx.set_affected_address(affected_addresses)
self.assertEqual(2, len(affected_addresses)) | tests/core/txs/test_CoinBase.py | from unittest import TestCase
import simplejson as json
from mock import patch, Mock, PropertyMock
from pypurlib.pypurlib import bin2hstr
from pur.core import config
from pur.core.Indexer import Indexer
from pur.core.misc import logger
from pur.core.State import State
from pur.core.StateContainer import StateContainer
from pur.core.OptimizedAddressState import OptimizedAddressState
from pur.core.BlockHeader import BlockHeader
from pur.core.txs.CoinBase import CoinBase
from pur.crypto.misc import sha256
from tests.core.txs.testdata import test_json_CoinBase
from tests.misc.helper import get_alice_purss, set_pur_dir
logger.initialize_default()
@patch('pur.core.txs.Transaction.logger')
class TestCoinBase(TestCase):
def __init__(self, *args, **kwargs):
super(TestCoinBase, self).__init__(*args, **kwargs)
with set_pur_dir('no_data'):
self.state = State()
self.alice = get_alice_purss()
self.alice.set_ots_index(11)
self.mock_blockheader = Mock(spec=BlockHeader)
self.mock_blockheader.stake_selector = self.alice.address
self.mock_blockheader.block_reward = 50
self.mock_blockheader.fee_reward = 40
self.mock_blockheader.prev_headerhash = sha256(b'prev_headerhash')
self.mock_blockheader.block_number = 1
self.mock_blockheader.headerhash = sha256(b'headerhash')
self.amount = self.mock_blockheader.block_reward + self.mock_blockheader.fee_reward
self.maxDiff = None
def test_create(self, m_logger):
tx = CoinBase.create(config.dev, self.amount, self.alice.address, self.mock_blockheader.block_number)
self.assertIsInstance(tx, CoinBase)
def test_to_json(self, m_logger):
amount = self.mock_blockheader.block_reward + self.mock_blockheader.fee_reward
tx = CoinBase.create(config.dev, amount, self.alice.address, self.mock_blockheader.block_number)
txjson = tx.to_json()
self.assertEqual(json.loads(test_json_CoinBase), json.loads(txjson))
def test_from_txdict(self, m_logger):
amount = self.mock_blockheader.block_reward + self.mock_blockheader.fee_reward
tx = CoinBase.create(config.dev, amount, self.alice.address, self.mock_blockheader.block_number)
self.assertIsInstance(tx, CoinBase)
# Test that common Transaction components were copied over.
self.assertEqual(self.mock_blockheader.block_number + 1, tx.nonce)
self.assertEqual('010300a1da274e68c88b0ccf448e0b1916fa789b01eb2ed4e9ad565ce264c9390782a9c61ac02f',
bin2hstr(tx.addr_to))
self.assertEqual('222460cc57ab8683b46f1831fe6cf1832c7e3134baf74d33bfaf91741e19cba2', bin2hstr(tx.txhash))
self.assertEqual(tx.amount, 90)
def test_validate_custom(self, m_logger):
"""
CoinBase _validate_custom() only checks if fee == 0
"""
tx = CoinBase.create(config.dev, self.amount, self.alice.address, self.mock_blockheader.block_number)
tx._data.fee = 1
result = tx._validate_custom()
self.assertFalse(result)
tx._data.fee = 0
result = tx._validate_custom()
self.assertTrue(result)
def test_validate_extended(self, m_logger):
"""
CoinBase validate_extended() checks for
1. valid coinbase address (the coinbase address must be config.dev.coinbase_address)
2. valid addr_to
then calls _validate_custom()
"""
tx = CoinBase.create(config.dev, self.amount, self.alice.address, self.mock_blockheader.block_number)
tx._data.master_addr = self.alice.address
addresses_state = {
config.dev.coinbase_address: OptimizedAddressState.get_default(config.dev.coinbase_address),
self.alice.address: OptimizedAddressState.get_default(self.alice.address),
}
addresses_state[config.dev.coinbase_address].pbdata.balance = 1000000
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=self.mock_blockheader.block_number,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
result = tx._validate_extended(state_container)
self.assertFalse(result)
tx._data.master_addr = config.dev.coinbase_address
with patch('pur.core.txs.CoinBase.CoinBase.addr_to', new_callable=PropertyMock) as m_addr_to:
m_addr_to.return_value = b'Fake Address'
result = tx._validate_extended(state_container)
self.assertFalse(result)
result = tx._validate_extended(state_container)
self.assertTrue(result)
def test_apply_coinbase_txn(self, m_logger):
"""
Alice earned some coins.
"""
addresses_state = {
config.dev.coinbase_address: OptimizedAddressState.get_default(config.dev.coinbase_address),
self.alice.address: OptimizedAddressState.get_default(self.alice.address),
}
addresses_state[config.dev.coinbase_address].pbdata.balance = 1000000
tx = CoinBase.create(config.dev, self.amount, self.alice.address, self.mock_blockheader.block_number)
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=self.mock_blockheader.block_number,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
# self.state.apply(tx, addresses_state)
tx.apply(self.state, state_container)
self.assertEqual(1000000 - tx.amount, addresses_state[config.dev.coinbase_address].balance)
storage_key = state_container.paginated_tx_hash.generate_key(config.dev.coinbase_address, 1)
self.assertEqual([tx.txhash], state_container.paginated_tx_hash.key_value[storage_key])
self.assertEqual(tx.amount, addresses_state[self.alice.address].balance)
storage_key = state_container.paginated_tx_hash.generate_key(self.alice.address, 1)
self.assertEqual([tx.txhash], state_container.paginated_tx_hash.key_value[storage_key])
def test_revert_coinbase_txn(self, m_logger):
"""
Alice earned some coins. Undo this.
"""
tx = CoinBase.create(config.dev, self.amount, self.alice.address, self.mock_blockheader.block_number)
addresses_state = {
config.dev.coinbase_address: OptimizedAddressState.get_default(config.dev.coinbase_address),
self.alice.address: OptimizedAddressState.get_default(self.alice.address),
}
addresses_state[config.dev.coinbase_address].pbdata.balance = 1000000
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=self.mock_blockheader.block_number,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
tx.apply(self.state, state_container)
tx.revert(self.state, state_container)
self.assertEqual(1000000, addresses_state[config.dev.coinbase_address].balance)
storage_key = state_container.paginated_tx_hash.generate_key(config.dev.coinbase_address, 1)
self.assertEqual([], state_container.paginated_tx_hash.key_value[storage_key])
self.assertEqual(0, addresses_state[self.alice.address].balance)
storage_key = state_container.paginated_tx_hash.generate_key(config.dev.coinbase_address, 1)
self.assertEqual([], state_container.paginated_tx_hash.key_value[storage_key])
def test_affected_address(self, m_logger):
# This transaction can only involve 2 addresses.
affected_addresses = set()
tx = CoinBase.create(config.dev, self.amount, self.alice.address, self.mock_blockheader.block_number)
tx.set_affected_address(affected_addresses)
self.assertEqual(2, len(affected_addresses)) | 0.824956 | 0.238689 |
"""UNIT Test for Cli bin/v0upload.py."""
from typing import List
from v0tools.cli import Cli
import unittest
from io import StringIO
import sys
import contextlib
import pathlib
import threading
import tempfile
from pathlib import Path
from v0tools.lib import util
import requests
import time
BASE = pathlib.Path(__file__).parent.parent.resolve()
sys.path.append(str(BASE.joinpath("bin")))
name = "v0upload"
cli_mod = __import__(name)
cli = cli_mod.cli # type: Cli
def _bgserv(args):
cli.run_nocatch(args)
class Test_v0serv(unittest.TestCase):
def test_help(self):
inp_args = "--help"
with StringIO() as buf, contextlib.redirect_stdout(buf):
with self.assertRaises(SystemExit) as err:
args = cli.get_parse(inp_args)
print(args)
self.assertEqual(err.exception.code, 0) # exits ok
output = buf.getvalue()
self.assertIn("--help", output)
def test_serv_dir(self):
r_files = {f"file_{idx}.txt": util.randstr(20) for idx in range(50)}
vfiles = [] # type: List[Path]
port = util.randport()
with tempfile.TemporaryDirectory() as tdir:
tdir = Path(tdir)
upload_dir = tdir.joinpath("upload_path")
upload_dir.mkdir()
# Write rando files
for fn, content in r_files.items():
fobj = tdir.joinpath(fn)
vfiles.append(fobj)
fobj.write_text(content)
# start upload server
pass_args = cli.get_parse(f"-p {port} -i lo {upload_dir}")
t = threading.Thread(target=_bgserv, args=(pass_args,))
t.daemon = True
t.start()
time.sleep(3)
# Let's goooooo
for i in vfiles:
url = f"http://1172.16.58.3:{port}/{i.name}"
with i.open("rb") as fileh:
requests.put(url, data=fileh)
for i in vfiles:
expected = i.read_text()
npath = upload_dir.joinpath(i.name)
upl_content = npath.read_text()
self.assertEqual(expected, upl_content)
time.sleep(3)
if __name__ == "__main__":
unittest.main()
# done | tests/test_cli_v0upload.py | """UNIT Test for Cli bin/v0upload.py."""
from typing import List
from v0tools.cli import Cli
import unittest
from io import StringIO
import sys
import contextlib
import pathlib
import threading
import tempfile
from pathlib import Path
from v0tools.lib import util
import requests
import time
BASE = pathlib.Path(__file__).parent.parent.resolve()
sys.path.append(str(BASE.joinpath("bin")))
name = "v0upload"
cli_mod = __import__(name)
cli = cli_mod.cli # type: Cli
def _bgserv(args):
cli.run_nocatch(args)
class Test_v0serv(unittest.TestCase):
def test_help(self):
inp_args = "--help"
with StringIO() as buf, contextlib.redirect_stdout(buf):
with self.assertRaises(SystemExit) as err:
args = cli.get_parse(inp_args)
print(args)
self.assertEqual(err.exception.code, 0) # exits ok
output = buf.getvalue()
self.assertIn("--help", output)
def test_serv_dir(self):
r_files = {f"file_{idx}.txt": util.randstr(20) for idx in range(50)}
vfiles = [] # type: List[Path]
port = util.randport()
with tempfile.TemporaryDirectory() as tdir:
tdir = Path(tdir)
upload_dir = tdir.joinpath("upload_path")
upload_dir.mkdir()
# Write rando files
for fn, content in r_files.items():
fobj = tdir.joinpath(fn)
vfiles.append(fobj)
fobj.write_text(content)
# start upload server
pass_args = cli.get_parse(f"-p {port} -i lo {upload_dir}")
t = threading.Thread(target=_bgserv, args=(pass_args,))
t.daemon = True
t.start()
time.sleep(3)
# Let's goooooo
for i in vfiles:
url = f"http://1172.16.58.3:{port}/{i.name}"
with i.open("rb") as fileh:
requests.put(url, data=fileh)
for i in vfiles:
expected = i.read_text()
npath = upload_dir.joinpath(i.name)
upl_content = npath.read_text()
self.assertEqual(expected, upl_content)
time.sleep(3)
if __name__ == "__main__":
unittest.main()
# done | 0.49585 | 0.252734 |
import random
import pytest
import networkx as nx
from networkx.testing.utils import *
class TestFunction(object):
def setup_method(self):
self.G = nx.Graph({0: [1, 2, 3], 1: [1, 2, 0], 4: []}, name='Test')
self.Gdegree = {0: 3, 1: 2, 2: 2, 3: 1, 4: 0}
self.Gnodes = list(range(5))
self.Gedges = [(0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2)]
self.DG = nx.DiGraph({0: [1, 2, 3], 1: [1, 2, 0], 4: []})
self.DGin_degree = {0: 1, 1: 2, 2: 2, 3: 1, 4: 0}
self.DGout_degree = {0: 3, 1: 3, 2: 0, 3: 0, 4: 0}
self.DGnodes = list(range(5))
self.DGedges = [(0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2)]
def test_nodes(self):
assert_nodes_equal(self.G.nodes(), list(nx.nodes(self.G)))
assert_nodes_equal(self.DG.nodes(), list(nx.nodes(self.DG)))
def test_edges(self):
assert_edges_equal(self.G.edges(), list(nx.edges(self.G)))
assert sorted(self.DG.edges()) == sorted(nx.edges(self.DG))
assert_edges_equal(self.G.edges(nbunch=[0, 1, 3]),
list(nx.edges(self.G, nbunch=[0, 1, 3])))
assert (sorted(self.DG.edges(nbunch=[0, 1, 3])) ==
sorted(nx.edges(self.DG, nbunch=[0, 1, 3])))
def test_degree(self):
assert_edges_equal(self.G.degree(), list(nx.degree(self.G)))
assert sorted(self.DG.degree()) == sorted(nx.degree(self.DG))
assert_edges_equal(self.G.degree(nbunch=[0, 1]),
list(nx.degree(self.G, nbunch=[0, 1])))
assert (sorted(self.DG.degree(nbunch=[0, 1])) ==
sorted(nx.degree(self.DG, nbunch=[0, 1])))
assert_edges_equal(self.G.degree(weight='weight'),
list(nx.degree(self.G, weight='weight')))
assert (sorted(self.DG.degree(weight='weight')) ==
sorted(nx.degree(self.DG, weight='weight')))
def test_neighbors(self):
assert list(self.G.neighbors(1)) == list(nx.neighbors(self.G, 1))
assert list(self.DG.neighbors(1)) == list(nx.neighbors(self.DG, 1))
def test_number_of_nodes(self):
assert self.G.number_of_nodes() == nx.number_of_nodes(self.G)
assert self.DG.number_of_nodes() == nx.number_of_nodes(self.DG)
def test_number_of_edges(self):
assert self.G.number_of_edges() == nx.number_of_edges(self.G)
assert self.DG.number_of_edges() == nx.number_of_edges(self.DG)
def test_is_directed(self):
assert self.G.is_directed() == nx.is_directed(self.G)
assert self.DG.is_directed() == nx.is_directed(self.DG)
def test_add_star(self):
G = self.G.copy()
nlist = [12, 13, 14, 15]
nx.add_star(G, nlist)
assert_edges_equal(G.edges(nlist), [(12, 13), (12, 14), (12, 15)])
G = self.G.copy()
nx.add_star(G, nlist, weight=2.0)
assert_edges_equal(G.edges(nlist, data=True),
[(12, 13, {'weight': 2.}),
(12, 14, {'weight': 2.}),
(12, 15, {'weight': 2.})])
G = self.G.copy()
nlist = [12]
nx.add_star(G, nlist)
assert_nodes_equal(G, list(self.G) + nlist)
G = self.G.copy()
nlist = []
nx.add_star(G, nlist)
assert_nodes_equal(G.nodes, self.Gnodes)
assert_edges_equal(G.edges, self.G.edges)
def test_add_path(self):
G = self.G.copy()
nlist = [12, 13, 14, 15]
nx.add_path(G, nlist)
assert_edges_equal(G.edges(nlist), [(12, 13), (13, 14), (14, 15)])
G = self.G.copy()
nx.add_path(G, nlist, weight=2.0)
assert_edges_equal(G.edges(nlist, data=True),
[(12, 13, {'weight': 2.}),
(13, 14, {'weight': 2.}),
(14, 15, {'weight': 2.})])
G = self.G.copy()
nlist = [None]
nx.add_path(G, nlist)
assert_edges_equal(G.edges(nlist), [])
assert_nodes_equal(G, list(self.G) + [None])
G = self.G.copy()
nlist = iter([None])
nx.add_path(G, nlist)
assert_edges_equal(G.edges([None]), [])
assert_nodes_equal(G, list(self.G) + [None])
G = self.G.copy()
nlist = [12]
nx.add_path(G, nlist)
assert_edges_equal(G.edges(nlist), [])
assert_nodes_equal(G, list(self.G) + [12])
G = self.G.copy()
nlist = iter([12])
nx.add_path(G, nlist)
assert_edges_equal(G.edges([12]), [])
assert_nodes_equal(G, list(self.G) + [12])
G = self.G.copy()
nlist = []
nx.add_path(G, nlist)
assert_edges_equal(G.edges, self.G.edges)
assert_nodes_equal(G, list(self.G))
G = self.G.copy()
nlist = iter([])
nx.add_path(G, nlist)
assert_edges_equal(G.edges, self.G.edges)
assert_nodes_equal(G, list(self.G))
def test_add_cycle(self):
G = self.G.copy()
nlist = [12, 13, 14, 15]
oklists = [[(12, 13), (12, 15), (13, 14), (14, 15)],
[(12, 13), (13, 14), (14, 15), (15, 12)]]
nx.add_cycle(G, nlist)
assert sorted(G.edges(nlist)) in oklists
G = self.G.copy()
oklists = [[(12, 13, {'weight': 1.}),
(12, 15, {'weight': 1.}),
(13, 14, {'weight': 1.}),
(14, 15, {'weight': 1.})],
[(12, 13, {'weight': 1.}),
(13, 14, {'weight': 1.}),
(14, 15, {'weight': 1.}),
(15, 12, {'weight': 1.})]]
nx.add_cycle(G, nlist, weight=1.0)
assert sorted(G.edges(nlist, data=True)) in oklists
G = self.G.copy()
nlist = [12]
nx.add_cycle(G, nlist)
assert_nodes_equal(G, list(self.G) + nlist)
G = self.G.copy()
nlist = []
nx.add_cycle(G, nlist)
assert_nodes_equal(G.nodes, self.Gnodes)
assert_edges_equal(G.edges, self.G.edges)
def test_subgraph(self):
assert (self.G.subgraph([0, 1, 2, 4]).adj ==
nx.subgraph(self.G, [0, 1, 2, 4]).adj)
assert (self.DG.subgraph([0, 1, 2, 4]).adj ==
nx.subgraph(self.DG, [0, 1, 2, 4]).adj)
assert (self.G.subgraph([0, 1, 2, 4]).adj ==
nx.induced_subgraph(self.G, [0, 1, 2, 4]).adj)
assert (self.DG.subgraph([0, 1, 2, 4]).adj ==
nx.induced_subgraph(self.DG, [0, 1, 2, 4]).adj)
# subgraph-subgraph chain is allowed in function interface
H = nx.induced_subgraph(self.G.subgraph([0, 1, 2, 4]), [0, 1, 4])
assert H._graph is not self.G
assert H.adj == self.G.subgraph([0, 1, 4]).adj
def test_edge_subgraph(self):
assert (self.G.edge_subgraph([(1, 2), (0, 3)]).adj ==
nx.edge_subgraph(self.G, [(1, 2), (0, 3)]).adj)
assert (self.DG.edge_subgraph([(1, 2), (0, 3)]).adj ==
nx.edge_subgraph(self.DG, [(1, 2), (0, 3)]).adj)
def test_restricted_view(self):
H = nx.restricted_view(self.G, [0, 2, 5], [(1, 2), (3, 4)])
assert set(H.nodes) == {1, 3, 4}
assert set(H.edges) == {(1, 1)}
def test_create_empty_copy(self):
G = nx.create_empty_copy(self.G, with_data=False)
assert_nodes_equal(G, list(self.G))
assert G.graph == {}
assert G._node == {}.fromkeys(self.G.nodes(), {})
assert G._adj == {}.fromkeys(self.G.nodes(), {})
G = nx.create_empty_copy(self.G)
assert_nodes_equal(G, list(self.G))
assert G.graph == self.G.graph
assert G._node == self.G._node
assert G._adj == {}.fromkeys(self.G.nodes(), {})
def test_degree_histogram(self):
assert nx.degree_histogram(self.G) == [1, 1, 1, 1, 1]
def test_density(self):
assert nx.density(self.G) == 0.5
assert nx.density(self.DG) == 0.3
G = nx.Graph()
G.add_node(1)
assert nx.density(G) == 0.0
def test_density_selfloop(self):
G = nx.Graph()
G.add_edge(1, 1)
assert nx.density(G) == 0.0
G.add_edge(1, 2)
assert nx.density(G) == 2.0
def test_freeze(self):
G = nx.freeze(self.G)
assert G.frozen == True
pytest.raises(nx.NetworkXError, G.add_node, 1)
pytest.raises(nx.NetworkXError, G.add_nodes_from, [1])
pytest.raises(nx.NetworkXError, G.remove_node, 1)
pytest.raises(nx.NetworkXError, G.remove_nodes_from, [1])
pytest.raises(nx.NetworkXError, G.add_edge, 1, 2)
pytest.raises(nx.NetworkXError, G.add_edges_from, [(1, 2)])
pytest.raises(nx.NetworkXError, G.remove_edge, 1, 2)
pytest.raises(nx.NetworkXError, G.remove_edges_from, [(1, 2)])
pytest.raises(nx.NetworkXError, G.clear)
def test_is_frozen(self):
assert nx.is_frozen(self.G) == False
G = nx.freeze(self.G)
assert G.frozen == nx.is_frozen(self.G)
assert G.frozen == True
def test_info(self):
G = nx.path_graph(5)
G.name = "path_graph(5)"
info = nx.info(G)
expected_graph_info = '\n'.join(['Name: path_graph(5)',
'Type: Graph',
'Number of nodes: 5',
'Number of edges: 4',
'Average degree: 1.6000'])
assert info == expected_graph_info
info = nx.info(G, n=1)
expected_node_info = '\n'.join(
['Node 1 has the following properties:',
'Degree: 2',
'Neighbors: 0 2'])
assert info == expected_node_info
def test_info_digraph(self):
G = nx.DiGraph(name='path_graph(5)')
nx.add_path(G, [0, 1, 2, 3, 4])
info = nx.info(G)
expected_graph_info = '\n'.join(['Name: path_graph(5)',
'Type: DiGraph',
'Number of nodes: 5',
'Number of edges: 4',
'Average in degree: 0.8000',
'Average out degree: 0.8000'])
assert info == expected_graph_info
info = nx.info(G, n=1)
expected_node_info = '\n'.join(
['Node 1 has the following properties:',
'Degree: 2',
'Neighbors: 2'])
assert info == expected_node_info
pytest.raises(nx.NetworkXError, nx.info, G, n=-1)
def test_neighbors_complete_graph(self):
graph = nx.complete_graph(100)
pop = random.sample(list(graph), 1)
nbors = list(nx.neighbors(graph, pop[0]))
# should be all the other vertices in the graph
assert len(nbors) == len(graph) - 1
graph = nx.path_graph(100)
node = random.sample(list(graph), 1)[0]
nbors = list(nx.neighbors(graph, node))
# should be all the other vertices in the graph
if node != 0 and node != 99:
assert len(nbors) == 2
else:
assert len(nbors) == 1
# create a star graph with 99 outer nodes
graph = nx.star_graph(99)
nbors = list(nx.neighbors(graph, 0))
assert len(nbors) == 99
def test_non_neighbors(self):
graph = nx.complete_graph(100)
pop = random.sample(list(graph), 1)
nbors = list(nx.non_neighbors(graph, pop[0]))
# should be all the other vertices in the graph
assert len(nbors) == 0
graph = nx.path_graph(100)
node = random.sample(list(graph), 1)[0]
nbors = list(nx.non_neighbors(graph, node))
# should be all the other vertices in the graph
if node != 0 and node != 99:
assert len(nbors) == 97
else:
assert len(nbors) == 98
# create a star graph with 99 outer nodes
graph = nx.star_graph(99)
nbors = list(nx.non_neighbors(graph, 0))
assert len(nbors) == 0
# disconnected graph
graph = nx.Graph()
graph.add_nodes_from(range(10))
nbors = list(nx.non_neighbors(graph, 0))
assert len(nbors) == 9
def test_non_edges(self):
# All possible edges exist
graph = nx.complete_graph(5)
nedges = list(nx.non_edges(graph))
assert len(nedges) == 0
graph = nx.path_graph(4)
expected = [(0, 2), (0, 3), (1, 3)]
nedges = list(nx.non_edges(graph))
for (u, v) in expected:
assert (u, v) in nedges or (v, u) in nedges
graph = nx.star_graph(4)
expected = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]
nedges = list(nx.non_edges(graph))
for (u, v) in expected:
assert (u, v) in nedges or (v, u) in nedges
# Directed graphs
graph = nx.DiGraph()
graph.add_edges_from([(0, 2), (2, 0), (2, 1)])
expected = [(0, 1), (1, 0), (1, 2)]
nedges = list(nx.non_edges(graph))
for e in expected:
assert e in nedges
def test_is_weighted(self):
G = nx.Graph()
assert not nx.is_weighted(G)
G = nx.path_graph(4)
assert not nx.is_weighted(G)
assert not nx.is_weighted(G, (2, 3))
G.add_node(4)
G.add_edge(3, 4, weight=4)
assert not nx.is_weighted(G)
assert nx.is_weighted(G, (3, 4))
G = nx.DiGraph()
G.add_weighted_edges_from([('0', '3', 3), ('0', '1', -5),
('1', '0', -5), ('0', '2', 2),
('1', '2', 4), ('2', '3', 1)])
assert nx.is_weighted(G)
assert nx.is_weighted(G, ('1', '0'))
G = G.to_undirected()
assert nx.is_weighted(G)
assert nx.is_weighted(G, ('1', '0'))
pytest.raises(nx.NetworkXError, nx.is_weighted, G, (1, 2))
def test_is_negatively_weighted(self):
G = nx.Graph()
assert not nx.is_negatively_weighted(G)
G.add_node(1)
G.add_nodes_from([2, 3, 4, 5])
assert not nx.is_negatively_weighted(G)
G.add_edge(1, 2, weight=4)
assert not nx.is_negatively_weighted(G, (1, 2))
G.add_edges_from([(1, 3), (2, 4), (2, 6)])
G[1][3]['color'] = 'blue'
assert not nx.is_negatively_weighted(G)
assert not nx.is_negatively_weighted(G, (1, 3))
G[2][4]['weight'] = -2
assert nx.is_negatively_weighted(G, (2, 4))
assert nx.is_negatively_weighted(G)
G = nx.DiGraph()
G.add_weighted_edges_from([('0', '3', 3), ('0', '1', -5),
('1', '0', -2), ('0', '2', 2),
('1', '2', -3), ('2', '3', 1)])
assert nx.is_negatively_weighted(G)
assert not nx.is_negatively_weighted(G, ('0', '3'))
assert nx.is_negatively_weighted(G, ('1', '0'))
pytest.raises(nx.NetworkXError, nx.is_negatively_weighted, G, (1, 4))
class TestCommonNeighbors():
@classmethod
def setup_class(cls):
cls.func = staticmethod(nx.common_neighbors)
def test_func(G, u, v, expected):
result = sorted(cls.func(G, u, v))
assert result == expected
cls.test = staticmethod(test_func)
def test_K5(self):
G = nx.complete_graph(5)
self.test(G, 0, 1, [2, 3, 4])
def test_P3(self):
G = nx.path_graph(3)
self.test(G, 0, 2, [1])
def test_S4(self):
G = nx.star_graph(4)
self.test(G, 1, 2, [0])
def test_digraph(self):
with pytest.raises(nx.NetworkXNotImplemented):
G = nx.DiGraph()
G.add_edges_from([(0, 1), (1, 2)])
self.func(G, 0, 2)
def test_nonexistent_nodes(self):
G = nx.complete_graph(5)
pytest.raises(nx.NetworkXError, nx.common_neighbors, G, 5, 4)
pytest.raises(nx.NetworkXError, nx.common_neighbors, G, 4, 5)
pytest.raises(nx.NetworkXError, nx.common_neighbors, G, 5, 6)
def test_custom1(self):
"""Case of no common neighbors."""
G = nx.Graph()
G.add_nodes_from([0, 1])
self.test(G, 0, 1, [])
def test_custom2(self):
"""Case of equal nodes."""
G = nx.complete_graph(4)
self.test(G, 0, 0, [1, 2, 3])
def test_set_node_attributes():
graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()]
for G in graphs:
# Test single value
G = nx.path_graph(3, create_using=G)
vals = 100
attr = 'hello'
nx.set_node_attributes(G, vals, attr)
assert G.nodes[0][attr] == vals
assert G.nodes[1][attr] == vals
assert G.nodes[2][attr] == vals
# Test dictionary
G = nx.path_graph(3, create_using=G)
vals = dict(zip(sorted(G.nodes()), range(len(G))))
attr = 'hi'
nx.set_node_attributes(G, vals, attr)
assert G.nodes[0][attr] == 0
assert G.nodes[1][attr] == 1
assert G.nodes[2][attr] == 2
# Test dictionary of dictionaries
G = nx.path_graph(3, create_using=G)
d = {'hi': 0, 'hello': 200}
vals = dict.fromkeys(G.nodes(), d)
vals.pop(0)
nx.set_node_attributes(G, vals)
assert G.nodes[0] == {}
assert G.nodes[1]["hi"] == 0
assert G.nodes[2]["hello"] == 200
def test_set_edge_attributes():
graphs = [nx.Graph(), nx.DiGraph()]
for G in graphs:
# Test single value
G = nx.path_graph(3, create_using=G)
attr = 'hello'
vals = 3
nx.set_edge_attributes(G, vals, attr)
assert G[0][1][attr] == vals
assert G[1][2][attr] == vals
# Test multiple values
G = nx.path_graph(3, create_using=G)
attr = 'hi'
edges = [(0, 1), (1, 2)]
vals = dict(zip(edges, range(len(edges))))
nx.set_edge_attributes(G, vals, attr)
assert G[0][1][attr] == 0
assert G[1][2][attr] == 1
# Test dictionary of dictionaries
G = nx.path_graph(3, create_using=G)
d = {'hi': 0, 'hello': 200}
edges = [(0, 1)]
vals = dict.fromkeys(edges, d)
nx.set_edge_attributes(G, vals)
assert G[0][1]['hi'] == 0
assert G[0][1]['hello'] == 200
assert G[1][2] == {}
def test_set_edge_attributes_multi():
graphs = [nx.MultiGraph(), nx.MultiDiGraph()]
for G in graphs:
# Test single value
G = nx.path_graph(3, create_using=G)
attr = 'hello'
vals = 3
nx.set_edge_attributes(G, vals, attr)
assert G[0][1][0][attr] == vals
assert G[1][2][0][attr] == vals
# Test multiple values
G = nx.path_graph(3, create_using=G)
attr = 'hi'
edges = [(0, 1, 0), (1, 2, 0)]
vals = dict(zip(edges, range(len(edges))))
nx.set_edge_attributes(G, vals, attr)
assert G[0][1][0][attr] == 0
assert G[1][2][0][attr] == 1
# Test dictionary of dictionaries
G = nx.path_graph(3, create_using=G)
d = {'hi': 0, 'hello': 200}
edges = [(0, 1, 0)]
vals = dict.fromkeys(edges, d)
nx.set_edge_attributes(G, vals)
assert G[0][1][0]['hi'] == 0
assert G[0][1][0]['hello'] == 200
assert G[1][2][0] == {}
def test_get_node_attributes():
graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()]
for G in graphs:
G = nx.path_graph(3, create_using=G)
attr = 'hello'
vals = 100
nx.set_node_attributes(G, vals, attr)
attrs = nx.get_node_attributes(G, attr)
assert attrs[0] == vals
assert attrs[1] == vals
assert attrs[2] == vals
def test_get_edge_attributes():
graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()]
for G in graphs:
G = nx.path_graph(3, create_using=G)
attr = 'hello'
vals = 100
nx.set_edge_attributes(G, vals, attr)
attrs = nx.get_edge_attributes(G, attr)
assert len(attrs) == 2
if G.is_multigraph():
keys = [(0, 1, 0), (1, 2, 0)]
for u, v, k in keys:
try:
assert attrs[(u, v, k)] == 100
except KeyError:
assert attrs[(v, u, k)] == 100
else:
keys = [(0, 1), (1, 2)]
for u, v in keys:
try:
assert attrs[(u, v)] == 100
except KeyError:
assert attrs[(v, u)] == 100
def test_is_empty():
graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()]
for G in graphs:
assert nx.is_empty(G)
G.add_nodes_from(range(5))
assert nx.is_empty(G)
G.add_edges_from([(1, 2), (3, 4)])
assert not nx.is_empty(G)
def test_selfloops():
graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()]
for graph in graphs:
G = nx.complete_graph(3, create_using=graph)
G.add_edge(0, 0)
assert_nodes_equal(nx.nodes_with_selfloops(G), [0])
assert_edges_equal(nx.selfloop_edges(G), [(0, 0)])
assert_edges_equal(nx.selfloop_edges(G, data=True), [(0, 0, {})])
assert nx.number_of_selfloops(G) == 1
# test selfloop attr
G.add_edge(1, 1, weight=2)
assert_edges_equal(nx.selfloop_edges(G, data=True),
[(0, 0, {}), (1, 1, {'weight': 2})])
assert_edges_equal(nx.selfloop_edges(G, data='weight'),
[(0, 0, None), (1, 1, 2)]) | networkx/classes/tests/test_function.py | import random
import pytest
import networkx as nx
from networkx.testing.utils import *
class TestFunction(object):
def setup_method(self):
self.G = nx.Graph({0: [1, 2, 3], 1: [1, 2, 0], 4: []}, name='Test')
self.Gdegree = {0: 3, 1: 2, 2: 2, 3: 1, 4: 0}
self.Gnodes = list(range(5))
self.Gedges = [(0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2)]
self.DG = nx.DiGraph({0: [1, 2, 3], 1: [1, 2, 0], 4: []})
self.DGin_degree = {0: 1, 1: 2, 2: 2, 3: 1, 4: 0}
self.DGout_degree = {0: 3, 1: 3, 2: 0, 3: 0, 4: 0}
self.DGnodes = list(range(5))
self.DGedges = [(0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2)]
def test_nodes(self):
assert_nodes_equal(self.G.nodes(), list(nx.nodes(self.G)))
assert_nodes_equal(self.DG.nodes(), list(nx.nodes(self.DG)))
def test_edges(self):
assert_edges_equal(self.G.edges(), list(nx.edges(self.G)))
assert sorted(self.DG.edges()) == sorted(nx.edges(self.DG))
assert_edges_equal(self.G.edges(nbunch=[0, 1, 3]),
list(nx.edges(self.G, nbunch=[0, 1, 3])))
assert (sorted(self.DG.edges(nbunch=[0, 1, 3])) ==
sorted(nx.edges(self.DG, nbunch=[0, 1, 3])))
def test_degree(self):
assert_edges_equal(self.G.degree(), list(nx.degree(self.G)))
assert sorted(self.DG.degree()) == sorted(nx.degree(self.DG))
assert_edges_equal(self.G.degree(nbunch=[0, 1]),
list(nx.degree(self.G, nbunch=[0, 1])))
assert (sorted(self.DG.degree(nbunch=[0, 1])) ==
sorted(nx.degree(self.DG, nbunch=[0, 1])))
assert_edges_equal(self.G.degree(weight='weight'),
list(nx.degree(self.G, weight='weight')))
assert (sorted(self.DG.degree(weight='weight')) ==
sorted(nx.degree(self.DG, weight='weight')))
def test_neighbors(self):
assert list(self.G.neighbors(1)) == list(nx.neighbors(self.G, 1))
assert list(self.DG.neighbors(1)) == list(nx.neighbors(self.DG, 1))
def test_number_of_nodes(self):
assert self.G.number_of_nodes() == nx.number_of_nodes(self.G)
assert self.DG.number_of_nodes() == nx.number_of_nodes(self.DG)
def test_number_of_edges(self):
assert self.G.number_of_edges() == nx.number_of_edges(self.G)
assert self.DG.number_of_edges() == nx.number_of_edges(self.DG)
def test_is_directed(self):
assert self.G.is_directed() == nx.is_directed(self.G)
assert self.DG.is_directed() == nx.is_directed(self.DG)
def test_add_star(self):
G = self.G.copy()
nlist = [12, 13, 14, 15]
nx.add_star(G, nlist)
assert_edges_equal(G.edges(nlist), [(12, 13), (12, 14), (12, 15)])
G = self.G.copy()
nx.add_star(G, nlist, weight=2.0)
assert_edges_equal(G.edges(nlist, data=True),
[(12, 13, {'weight': 2.}),
(12, 14, {'weight': 2.}),
(12, 15, {'weight': 2.})])
G = self.G.copy()
nlist = [12]
nx.add_star(G, nlist)
assert_nodes_equal(G, list(self.G) + nlist)
G = self.G.copy()
nlist = []
nx.add_star(G, nlist)
assert_nodes_equal(G.nodes, self.Gnodes)
assert_edges_equal(G.edges, self.G.edges)
def test_add_path(self):
G = self.G.copy()
nlist = [12, 13, 14, 15]
nx.add_path(G, nlist)
assert_edges_equal(G.edges(nlist), [(12, 13), (13, 14), (14, 15)])
G = self.G.copy()
nx.add_path(G, nlist, weight=2.0)
assert_edges_equal(G.edges(nlist, data=True),
[(12, 13, {'weight': 2.}),
(13, 14, {'weight': 2.}),
(14, 15, {'weight': 2.})])
G = self.G.copy()
nlist = [None]
nx.add_path(G, nlist)
assert_edges_equal(G.edges(nlist), [])
assert_nodes_equal(G, list(self.G) + [None])
G = self.G.copy()
nlist = iter([None])
nx.add_path(G, nlist)
assert_edges_equal(G.edges([None]), [])
assert_nodes_equal(G, list(self.G) + [None])
G = self.G.copy()
nlist = [12]
nx.add_path(G, nlist)
assert_edges_equal(G.edges(nlist), [])
assert_nodes_equal(G, list(self.G) + [12])
G = self.G.copy()
nlist = iter([12])
nx.add_path(G, nlist)
assert_edges_equal(G.edges([12]), [])
assert_nodes_equal(G, list(self.G) + [12])
G = self.G.copy()
nlist = []
nx.add_path(G, nlist)
assert_edges_equal(G.edges, self.G.edges)
assert_nodes_equal(G, list(self.G))
G = self.G.copy()
nlist = iter([])
nx.add_path(G, nlist)
assert_edges_equal(G.edges, self.G.edges)
assert_nodes_equal(G, list(self.G))
def test_add_cycle(self):
G = self.G.copy()
nlist = [12, 13, 14, 15]
oklists = [[(12, 13), (12, 15), (13, 14), (14, 15)],
[(12, 13), (13, 14), (14, 15), (15, 12)]]
nx.add_cycle(G, nlist)
assert sorted(G.edges(nlist)) in oklists
G = self.G.copy()
oklists = [[(12, 13, {'weight': 1.}),
(12, 15, {'weight': 1.}),
(13, 14, {'weight': 1.}),
(14, 15, {'weight': 1.})],
[(12, 13, {'weight': 1.}),
(13, 14, {'weight': 1.}),
(14, 15, {'weight': 1.}),
(15, 12, {'weight': 1.})]]
nx.add_cycle(G, nlist, weight=1.0)
assert sorted(G.edges(nlist, data=True)) in oklists
G = self.G.copy()
nlist = [12]
nx.add_cycle(G, nlist)
assert_nodes_equal(G, list(self.G) + nlist)
G = self.G.copy()
nlist = []
nx.add_cycle(G, nlist)
assert_nodes_equal(G.nodes, self.Gnodes)
assert_edges_equal(G.edges, self.G.edges)
def test_subgraph(self):
assert (self.G.subgraph([0, 1, 2, 4]).adj ==
nx.subgraph(self.G, [0, 1, 2, 4]).adj)
assert (self.DG.subgraph([0, 1, 2, 4]).adj ==
nx.subgraph(self.DG, [0, 1, 2, 4]).adj)
assert (self.G.subgraph([0, 1, 2, 4]).adj ==
nx.induced_subgraph(self.G, [0, 1, 2, 4]).adj)
assert (self.DG.subgraph([0, 1, 2, 4]).adj ==
nx.induced_subgraph(self.DG, [0, 1, 2, 4]).adj)
# subgraph-subgraph chain is allowed in function interface
H = nx.induced_subgraph(self.G.subgraph([0, 1, 2, 4]), [0, 1, 4])
assert H._graph is not self.G
assert H.adj == self.G.subgraph([0, 1, 4]).adj
def test_edge_subgraph(self):
assert (self.G.edge_subgraph([(1, 2), (0, 3)]).adj ==
nx.edge_subgraph(self.G, [(1, 2), (0, 3)]).adj)
assert (self.DG.edge_subgraph([(1, 2), (0, 3)]).adj ==
nx.edge_subgraph(self.DG, [(1, 2), (0, 3)]).adj)
def test_restricted_view(self):
H = nx.restricted_view(self.G, [0, 2, 5], [(1, 2), (3, 4)])
assert set(H.nodes) == {1, 3, 4}
assert set(H.edges) == {(1, 1)}
def test_create_empty_copy(self):
G = nx.create_empty_copy(self.G, with_data=False)
assert_nodes_equal(G, list(self.G))
assert G.graph == {}
assert G._node == {}.fromkeys(self.G.nodes(), {})
assert G._adj == {}.fromkeys(self.G.nodes(), {})
G = nx.create_empty_copy(self.G)
assert_nodes_equal(G, list(self.G))
assert G.graph == self.G.graph
assert G._node == self.G._node
assert G._adj == {}.fromkeys(self.G.nodes(), {})
def test_degree_histogram(self):
assert nx.degree_histogram(self.G) == [1, 1, 1, 1, 1]
def test_density(self):
assert nx.density(self.G) == 0.5
assert nx.density(self.DG) == 0.3
G = nx.Graph()
G.add_node(1)
assert nx.density(G) == 0.0
def test_density_selfloop(self):
G = nx.Graph()
G.add_edge(1, 1)
assert nx.density(G) == 0.0
G.add_edge(1, 2)
assert nx.density(G) == 2.0
def test_freeze(self):
G = nx.freeze(self.G)
assert G.frozen == True
pytest.raises(nx.NetworkXError, G.add_node, 1)
pytest.raises(nx.NetworkXError, G.add_nodes_from, [1])
pytest.raises(nx.NetworkXError, G.remove_node, 1)
pytest.raises(nx.NetworkXError, G.remove_nodes_from, [1])
pytest.raises(nx.NetworkXError, G.add_edge, 1, 2)
pytest.raises(nx.NetworkXError, G.add_edges_from, [(1, 2)])
pytest.raises(nx.NetworkXError, G.remove_edge, 1, 2)
pytest.raises(nx.NetworkXError, G.remove_edges_from, [(1, 2)])
pytest.raises(nx.NetworkXError, G.clear)
def test_is_frozen(self):
assert nx.is_frozen(self.G) == False
G = nx.freeze(self.G)
assert G.frozen == nx.is_frozen(self.G)
assert G.frozen == True
def test_info(self):
G = nx.path_graph(5)
G.name = "path_graph(5)"
info = nx.info(G)
expected_graph_info = '\n'.join(['Name: path_graph(5)',
'Type: Graph',
'Number of nodes: 5',
'Number of edges: 4',
'Average degree: 1.6000'])
assert info == expected_graph_info
info = nx.info(G, n=1)
expected_node_info = '\n'.join(
['Node 1 has the following properties:',
'Degree: 2',
'Neighbors: 0 2'])
assert info == expected_node_info
def test_info_digraph(self):
G = nx.DiGraph(name='path_graph(5)')
nx.add_path(G, [0, 1, 2, 3, 4])
info = nx.info(G)
expected_graph_info = '\n'.join(['Name: path_graph(5)',
'Type: DiGraph',
'Number of nodes: 5',
'Number of edges: 4',
'Average in degree: 0.8000',
'Average out degree: 0.8000'])
assert info == expected_graph_info
info = nx.info(G, n=1)
expected_node_info = '\n'.join(
['Node 1 has the following properties:',
'Degree: 2',
'Neighbors: 2'])
assert info == expected_node_info
pytest.raises(nx.NetworkXError, nx.info, G, n=-1)
def test_neighbors_complete_graph(self):
graph = nx.complete_graph(100)
pop = random.sample(list(graph), 1)
nbors = list(nx.neighbors(graph, pop[0]))
# should be all the other vertices in the graph
assert len(nbors) == len(graph) - 1
graph = nx.path_graph(100)
node = random.sample(list(graph), 1)[0]
nbors = list(nx.neighbors(graph, node))
# should be all the other vertices in the graph
if node != 0 and node != 99:
assert len(nbors) == 2
else:
assert len(nbors) == 1
# create a star graph with 99 outer nodes
graph = nx.star_graph(99)
nbors = list(nx.neighbors(graph, 0))
assert len(nbors) == 99
def test_non_neighbors(self):
graph = nx.complete_graph(100)
pop = random.sample(list(graph), 1)
nbors = list(nx.non_neighbors(graph, pop[0]))
# should be all the other vertices in the graph
assert len(nbors) == 0
graph = nx.path_graph(100)
node = random.sample(list(graph), 1)[0]
nbors = list(nx.non_neighbors(graph, node))
# should be all the other vertices in the graph
if node != 0 and node != 99:
assert len(nbors) == 97
else:
assert len(nbors) == 98
# create a star graph with 99 outer nodes
graph = nx.star_graph(99)
nbors = list(nx.non_neighbors(graph, 0))
assert len(nbors) == 0
# disconnected graph
graph = nx.Graph()
graph.add_nodes_from(range(10))
nbors = list(nx.non_neighbors(graph, 0))
assert len(nbors) == 9
def test_non_edges(self):
# All possible edges exist
graph = nx.complete_graph(5)
nedges = list(nx.non_edges(graph))
assert len(nedges) == 0
graph = nx.path_graph(4)
expected = [(0, 2), (0, 3), (1, 3)]
nedges = list(nx.non_edges(graph))
for (u, v) in expected:
assert (u, v) in nedges or (v, u) in nedges
graph = nx.star_graph(4)
expected = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]
nedges = list(nx.non_edges(graph))
for (u, v) in expected:
assert (u, v) in nedges or (v, u) in nedges
# Directed graphs
graph = nx.DiGraph()
graph.add_edges_from([(0, 2), (2, 0), (2, 1)])
expected = [(0, 1), (1, 0), (1, 2)]
nedges = list(nx.non_edges(graph))
for e in expected:
assert e in nedges
def test_is_weighted(self):
G = nx.Graph()
assert not nx.is_weighted(G)
G = nx.path_graph(4)
assert not nx.is_weighted(G)
assert not nx.is_weighted(G, (2, 3))
G.add_node(4)
G.add_edge(3, 4, weight=4)
assert not nx.is_weighted(G)
assert nx.is_weighted(G, (3, 4))
G = nx.DiGraph()
G.add_weighted_edges_from([('0', '3', 3), ('0', '1', -5),
('1', '0', -5), ('0', '2', 2),
('1', '2', 4), ('2', '3', 1)])
assert nx.is_weighted(G)
assert nx.is_weighted(G, ('1', '0'))
G = G.to_undirected()
assert nx.is_weighted(G)
assert nx.is_weighted(G, ('1', '0'))
pytest.raises(nx.NetworkXError, nx.is_weighted, G, (1, 2))
def test_is_negatively_weighted(self):
G = nx.Graph()
assert not nx.is_negatively_weighted(G)
G.add_node(1)
G.add_nodes_from([2, 3, 4, 5])
assert not nx.is_negatively_weighted(G)
G.add_edge(1, 2, weight=4)
assert not nx.is_negatively_weighted(G, (1, 2))
G.add_edges_from([(1, 3), (2, 4), (2, 6)])
G[1][3]['color'] = 'blue'
assert not nx.is_negatively_weighted(G)
assert not nx.is_negatively_weighted(G, (1, 3))
G[2][4]['weight'] = -2
assert nx.is_negatively_weighted(G, (2, 4))
assert nx.is_negatively_weighted(G)
G = nx.DiGraph()
G.add_weighted_edges_from([('0', '3', 3), ('0', '1', -5),
('1', '0', -2), ('0', '2', 2),
('1', '2', -3), ('2', '3', 1)])
assert nx.is_negatively_weighted(G)
assert not nx.is_negatively_weighted(G, ('0', '3'))
assert nx.is_negatively_weighted(G, ('1', '0'))
pytest.raises(nx.NetworkXError, nx.is_negatively_weighted, G, (1, 4))
class TestCommonNeighbors():
@classmethod
def setup_class(cls):
cls.func = staticmethod(nx.common_neighbors)
def test_func(G, u, v, expected):
result = sorted(cls.func(G, u, v))
assert result == expected
cls.test = staticmethod(test_func)
def test_K5(self):
G = nx.complete_graph(5)
self.test(G, 0, 1, [2, 3, 4])
def test_P3(self):
G = nx.path_graph(3)
self.test(G, 0, 2, [1])
def test_S4(self):
G = nx.star_graph(4)
self.test(G, 1, 2, [0])
def test_digraph(self):
with pytest.raises(nx.NetworkXNotImplemented):
G = nx.DiGraph()
G.add_edges_from([(0, 1), (1, 2)])
self.func(G, 0, 2)
def test_nonexistent_nodes(self):
G = nx.complete_graph(5)
pytest.raises(nx.NetworkXError, nx.common_neighbors, G, 5, 4)
pytest.raises(nx.NetworkXError, nx.common_neighbors, G, 4, 5)
pytest.raises(nx.NetworkXError, nx.common_neighbors, G, 5, 6)
def test_custom1(self):
"""Case of no common neighbors."""
G = nx.Graph()
G.add_nodes_from([0, 1])
self.test(G, 0, 1, [])
def test_custom2(self):
"""Case of equal nodes."""
G = nx.complete_graph(4)
self.test(G, 0, 0, [1, 2, 3])
def test_set_node_attributes():
graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()]
for G in graphs:
# Test single value
G = nx.path_graph(3, create_using=G)
vals = 100
attr = 'hello'
nx.set_node_attributes(G, vals, attr)
assert G.nodes[0][attr] == vals
assert G.nodes[1][attr] == vals
assert G.nodes[2][attr] == vals
# Test dictionary
G = nx.path_graph(3, create_using=G)
vals = dict(zip(sorted(G.nodes()), range(len(G))))
attr = 'hi'
nx.set_node_attributes(G, vals, attr)
assert G.nodes[0][attr] == 0
assert G.nodes[1][attr] == 1
assert G.nodes[2][attr] == 2
# Test dictionary of dictionaries
G = nx.path_graph(3, create_using=G)
d = {'hi': 0, 'hello': 200}
vals = dict.fromkeys(G.nodes(), d)
vals.pop(0)
nx.set_node_attributes(G, vals)
assert G.nodes[0] == {}
assert G.nodes[1]["hi"] == 0
assert G.nodes[2]["hello"] == 200
def test_set_edge_attributes():
graphs = [nx.Graph(), nx.DiGraph()]
for G in graphs:
# Test single value
G = nx.path_graph(3, create_using=G)
attr = 'hello'
vals = 3
nx.set_edge_attributes(G, vals, attr)
assert G[0][1][attr] == vals
assert G[1][2][attr] == vals
# Test multiple values
G = nx.path_graph(3, create_using=G)
attr = 'hi'
edges = [(0, 1), (1, 2)]
vals = dict(zip(edges, range(len(edges))))
nx.set_edge_attributes(G, vals, attr)
assert G[0][1][attr] == 0
assert G[1][2][attr] == 1
# Test dictionary of dictionaries
G = nx.path_graph(3, create_using=G)
d = {'hi': 0, 'hello': 200}
edges = [(0, 1)]
vals = dict.fromkeys(edges, d)
nx.set_edge_attributes(G, vals)
assert G[0][1]['hi'] == 0
assert G[0][1]['hello'] == 200
assert G[1][2] == {}
def test_set_edge_attributes_multi():
graphs = [nx.MultiGraph(), nx.MultiDiGraph()]
for G in graphs:
# Test single value
G = nx.path_graph(3, create_using=G)
attr = 'hello'
vals = 3
nx.set_edge_attributes(G, vals, attr)
assert G[0][1][0][attr] == vals
assert G[1][2][0][attr] == vals
# Test multiple values
G = nx.path_graph(3, create_using=G)
attr = 'hi'
edges = [(0, 1, 0), (1, 2, 0)]
vals = dict(zip(edges, range(len(edges))))
nx.set_edge_attributes(G, vals, attr)
assert G[0][1][0][attr] == 0
assert G[1][2][0][attr] == 1
# Test dictionary of dictionaries
G = nx.path_graph(3, create_using=G)
d = {'hi': 0, 'hello': 200}
edges = [(0, 1, 0)]
vals = dict.fromkeys(edges, d)
nx.set_edge_attributes(G, vals)
assert G[0][1][0]['hi'] == 0
assert G[0][1][0]['hello'] == 200
assert G[1][2][0] == {}
def test_get_node_attributes():
graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()]
for G in graphs:
G = nx.path_graph(3, create_using=G)
attr = 'hello'
vals = 100
nx.set_node_attributes(G, vals, attr)
attrs = nx.get_node_attributes(G, attr)
assert attrs[0] == vals
assert attrs[1] == vals
assert attrs[2] == vals
def test_get_edge_attributes():
graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()]
for G in graphs:
G = nx.path_graph(3, create_using=G)
attr = 'hello'
vals = 100
nx.set_edge_attributes(G, vals, attr)
attrs = nx.get_edge_attributes(G, attr)
assert len(attrs) == 2
if G.is_multigraph():
keys = [(0, 1, 0), (1, 2, 0)]
for u, v, k in keys:
try:
assert attrs[(u, v, k)] == 100
except KeyError:
assert attrs[(v, u, k)] == 100
else:
keys = [(0, 1), (1, 2)]
for u, v in keys:
try:
assert attrs[(u, v)] == 100
except KeyError:
assert attrs[(v, u)] == 100
def test_is_empty():
graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()]
for G in graphs:
assert nx.is_empty(G)
G.add_nodes_from(range(5))
assert nx.is_empty(G)
G.add_edges_from([(1, 2), (3, 4)])
assert not nx.is_empty(G)
def test_selfloops():
graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()]
for graph in graphs:
G = nx.complete_graph(3, create_using=graph)
G.add_edge(0, 0)
assert_nodes_equal(nx.nodes_with_selfloops(G), [0])
assert_edges_equal(nx.selfloop_edges(G), [(0, 0)])
assert_edges_equal(nx.selfloop_edges(G, data=True), [(0, 0, {})])
assert nx.number_of_selfloops(G) == 1
# test selfloop attr
G.add_edge(1, 1, weight=2)
assert_edges_equal(nx.selfloop_edges(G, data=True),
[(0, 0, {}), (1, 1, {'weight': 2})])
assert_edges_equal(nx.selfloop_edges(G, data='weight'),
[(0, 0, None), (1, 1, 2)]) | 0.604399 | 0.773943 |
import copy
import logging
import PIL
import cv2
import numpy as np
import scipy.ndimage
import skimage.morphology
import src.utils as utils
def _get_xy_bounding_box(vertex, padding):
"""Returns the xy bounding box of the environment."""
min_ = np.floor(np.min(vertex[:, :2], axis=0) - padding).astype(np.int)
max_ = np.ceil(np.max(vertex[:, :2], axis=0) + padding).astype(np.int)
return min_, max_
def _project_to_map(map, vertex, wt=None, ignore_points_outside_map=False):
"""Projects points to map, returns how many points are present at each
location."""
num_points = np.zeros((map.size[1], map.size[0]))
vertex_ = vertex[:, :2] - map.origin
vertex_ = np.round(vertex_ / map.resolution).astype(np.int)
if ignore_points_outside_map:
good_ind = np.all(np.array([vertex_[:,1] >= 0, vertex_[:,1] < map.size[1],
vertex_[:,0] >= 0, vertex_[:,0] < map.size[0]]),
axis=0)
vertex_ = vertex_[good_ind, :]
if wt is not None:
wt = wt[good_ind, :]
if wt is None:
np.add.at(num_points, (vertex_[:, 1], vertex_[:, 0]), 1)
else:
assert(wt.shape[0] == vertex.shape[0]), \
'number of weights should be same as vertices.'
np.add.at(num_points, (vertex_[:, 1], vertex_[:, 0]), wt)
return num_points
def make_map(padding, resolution, vertex=None, sc=1.):
"""Returns a map structure."""
min_, max_ = _get_xy_bounding_box(vertex*sc, padding=padding)
sz = np.ceil((max_ - min_ + 1) / resolution).astype(np.int32)
max_ = min_ + sz * resolution - 1
map = utils.Foo(origin=min_, size=sz, max=max_, resolution=resolution,
padding=padding)
return map
def _fill_holes(img, thresh):
"""Fills holes less than thresh area (assumes 4 connectivity when computing
hole area."""
l, n = scipy.ndimage.label(np.logical_not(img))
img_ = img == True
cnts = np.bincount(l.reshape(-1))
for i, cnt in enumerate(cnts):
if cnt < thresh:
l[l == i] = -1
img_[l == -1] = True
return img_
def compute_traversibility(map, robot_base, robot_height, robot_radius,
valid_min, valid_max, num_point_threshold, shapess,
sc=100., n_samples_per_face=200):
"""Returns a bit map with pixels that are traversible or not as long as the
robot center is inside this volume we are good colisions can be detected by
doing a line search on things, or walking from current location to final
location in the bitmap, or doing bwlabel on the traversibility map."""
tt = utils.Timer()
tt.tic()
num_obstcale_points = np.zeros((map.size[1], map.size[0]))
num_points = np.zeros((map.size[1], map.size[0]))
for i, shapes in enumerate(shapess):
for j in range(shapes.get_number_of_meshes()):
p, face_areas, face_idx = shapes.sample_points_on_face_of_shape(
j, n_samples_per_face, sc)
wt = face_areas[face_idx]/n_samples_per_face
ind = np.all(np.concatenate(
(p[:, [2]] > robot_base,
p[:, [2]] < robot_base + robot_height), axis=1),axis=1)
num_obstcale_points += _project_to_map(map, p[ind, :], wt[ind])
ind = np.all(np.concatenate(
(p[:, [2]] > valid_min,
p[:, [2]] < valid_max), axis=1),axis=1)
num_points += _project_to_map(map, p[ind, :], wt[ind])
selem = skimage.morphology.disk(robot_radius / map.resolution)
obstacle_free = skimage.morphology.binary_dilation(
_fill_holes(num_obstcale_points > num_point_threshold, 20), selem) != True
valid_space = _fill_holes(num_points > num_point_threshold, 20)
traversible = np.all(np.concatenate((obstacle_free[...,np.newaxis],
valid_space[...,np.newaxis]), axis=2),
axis=2)
# plt.imshow(np.concatenate((obstacle_free, valid_space, traversible), axis=1))
# plt.show()
map_out = copy.deepcopy(map)
map_out.num_obstcale_points = num_obstcale_points
map_out.num_points = num_points
map_out.traversible = traversible
map_out.obstacle_free = obstacle_free
map_out.valid_space = valid_space
tt.toc(log_at=1, log_str='src.map_utils.compute_traversibility: ')
return map_out
def resize_maps(map, map_scales, resize_method):
scaled_maps = []
for i, sc in enumerate(map_scales):
if resize_method == 'antialiasing':
# Resize using open cv so that we can compute the size.
# Use PIL resize to use anti aliasing feature.
map_ = cv2.resize(map*1, None, None, fx=sc, fy=sc, interpolation=cv2.INTER_LINEAR)
w = map_.shape[1]; h = map_.shape[0]
map_img = PIL.Image.fromarray((map*255).astype(np.uint8))
map__img = map_img.resize((w,h), PIL.Image.ANTIALIAS)
map_ = np.asarray(map__img).astype(np.float32)
map_ = map_/255.
map_ = np.minimum(map_, 1.0)
map_ = np.maximum(map_, 0.0)
elif resize_method == 'linear_noantialiasing':
map_ = cv2.resize(map*1, None, None, fx=sc, fy=sc, interpolation=cv2.INTER_LINEAR)
else:
logging.error('Unknown resizing method')
scaled_maps.append(map_)
return scaled_maps
def pick_largest_cc(traversible):
out = scipy.ndimage.label(traversible)[0]
cnt = np.bincount(out.reshape(-1))[1:]
return out == np.argmax(cnt) + 1
def get_graph_origin_loc(rng, traversible):
"""Erode the traversibility mask so that we get points in the bulk of the
graph, and not end up with a situation where the graph is localized in the
corner of a cramped room. Output Locs is in the coordinate frame of the
map."""
aa = pick_largest_cc(skimage.morphology.binary_erosion(traversible == True,
selem=np.ones((15,15))))
y, x = np.where(aa > 0)
ind = rng.choice(y.size)
locs = np.array([x[ind], y[ind]])
locs = locs + rng.rand(*(locs.shape)) - 0.5
return locs
def generate_egocentric_maps(scaled_maps, map_scales, map_crop_sizes, loc,
x_axis, y_axis, theta):
maps = []
for i, (map_, sc, map_crop_size) in enumerate(zip(scaled_maps, map_scales, map_crop_sizes)):
maps_i = np.array(get_map_to_predict(loc*sc, x_axis, y_axis, map_,
map_crop_size,
interpolation=cv2.INTER_LINEAR)[0])
maps_i[np.isnan(maps_i)] = 0
maps.append(maps_i)
return maps
def generate_goal_images(map_scales, map_crop_sizes, n_ori, goal_dist,
goal_theta, rel_goal_orientation):
goal_dist = goal_dist[:,0]
goal_theta = goal_theta[:,0]
rel_goal_orientation = rel_goal_orientation[:,0]
goals = [];
# Generate the map images.
for i, (sc, map_crop_size) in enumerate(zip(map_scales, map_crop_sizes)):
goal_i = np.zeros((goal_dist.shape[0], map_crop_size, map_crop_size, n_ori),
dtype=np.float32)
x = goal_dist*np.cos(goal_theta)*sc + (map_crop_size-1.)/2.
y = goal_dist*np.sin(goal_theta)*sc + (map_crop_size-1.)/2.
for j in range(goal_dist.shape[0]):
gc = rel_goal_orientation[j]
x0 = np.floor(x[j]).astype(np.int32); x1 = x0 + 1;
y0 = np.floor(y[j]).astype(np.int32); y1 = y0 + 1;
if x0 >= 0 and x0 <= map_crop_size-1:
if y0 >= 0 and y0 <= map_crop_size-1:
goal_i[j, y0, x0, gc] = (x1-x[j])*(y1-y[j])
if y1 >= 0 and y1 <= map_crop_size-1:
goal_i[j, y1, x0, gc] = (x1-x[j])*(y[j]-y0)
if x1 >= 0 and x1 <= map_crop_size-1:
if y0 >= 0 and y0 <= map_crop_size-1:
goal_i[j, y0, x1, gc] = (x[j]-x0)*(y1-y[j])
if y1 >= 0 and y1 <= map_crop_size-1:
goal_i[j, y1, x1, gc] = (x[j]-x0)*(y[j]-y0)
goals.append(goal_i)
return goals
def get_map_to_predict(src_locs, src_x_axiss, src_y_axiss, map, map_size,
interpolation=cv2.INTER_LINEAR):
fss = []
valids = []
center = (map_size-1.0)/2.0
dst_theta = np.pi/2.0
dst_loc = np.array([center, center])
dst_x_axis = np.array([np.cos(dst_theta), np.sin(dst_theta)])
dst_y_axis = np.array([np.cos(dst_theta+np.pi/2), np.sin(dst_theta+np.pi/2)])
def compute_points(center, x_axis, y_axis):
points = np.zeros((3,2),dtype=np.float32)
points[0,:] = center
points[1,:] = center + x_axis
points[2,:] = center + y_axis
return points
dst_points = compute_points(dst_loc, dst_x_axis, dst_y_axis)
for i in range(src_locs.shape[0]):
src_loc = src_locs[i,:]
src_x_axis = src_x_axiss[i,:]
src_y_axis = src_y_axiss[i,:]
src_points = compute_points(src_loc, src_x_axis, src_y_axis)
M = cv2.getAffineTransform(src_points, dst_points)
fs = cv2.warpAffine(map, M, (map_size, map_size), None, flags=interpolation,
borderValue=np.NaN)
valid = np.invert(np.isnan(fs))
valids.append(valid)
fss.append(fs)
return fss, valids | research/cognitive_mapping_and_planning/src/map_utils.py | import copy
import logging
import PIL
import cv2
import numpy as np
import scipy.ndimage
import skimage.morphology
import src.utils as utils
def _get_xy_bounding_box(vertex, padding):
"""Returns the xy bounding box of the environment."""
min_ = np.floor(np.min(vertex[:, :2], axis=0) - padding).astype(np.int)
max_ = np.ceil(np.max(vertex[:, :2], axis=0) + padding).astype(np.int)
return min_, max_
def _project_to_map(map, vertex, wt=None, ignore_points_outside_map=False):
"""Projects points to map, returns how many points are present at each
location."""
num_points = np.zeros((map.size[1], map.size[0]))
vertex_ = vertex[:, :2] - map.origin
vertex_ = np.round(vertex_ / map.resolution).astype(np.int)
if ignore_points_outside_map:
good_ind = np.all(np.array([vertex_[:,1] >= 0, vertex_[:,1] < map.size[1],
vertex_[:,0] >= 0, vertex_[:,0] < map.size[0]]),
axis=0)
vertex_ = vertex_[good_ind, :]
if wt is not None:
wt = wt[good_ind, :]
if wt is None:
np.add.at(num_points, (vertex_[:, 1], vertex_[:, 0]), 1)
else:
assert(wt.shape[0] == vertex.shape[0]), \
'number of weights should be same as vertices.'
np.add.at(num_points, (vertex_[:, 1], vertex_[:, 0]), wt)
return num_points
def make_map(padding, resolution, vertex=None, sc=1.):
"""Returns a map structure."""
min_, max_ = _get_xy_bounding_box(vertex*sc, padding=padding)
sz = np.ceil((max_ - min_ + 1) / resolution).astype(np.int32)
max_ = min_ + sz * resolution - 1
map = utils.Foo(origin=min_, size=sz, max=max_, resolution=resolution,
padding=padding)
return map
def _fill_holes(img, thresh):
"""Fills holes less than thresh area (assumes 4 connectivity when computing
hole area."""
l, n = scipy.ndimage.label(np.logical_not(img))
img_ = img == True
cnts = np.bincount(l.reshape(-1))
for i, cnt in enumerate(cnts):
if cnt < thresh:
l[l == i] = -1
img_[l == -1] = True
return img_
def compute_traversibility(map, robot_base, robot_height, robot_radius,
valid_min, valid_max, num_point_threshold, shapess,
sc=100., n_samples_per_face=200):
"""Returns a bit map with pixels that are traversible or not as long as the
robot center is inside this volume we are good colisions can be detected by
doing a line search on things, or walking from current location to final
location in the bitmap, or doing bwlabel on the traversibility map."""
tt = utils.Timer()
tt.tic()
num_obstcale_points = np.zeros((map.size[1], map.size[0]))
num_points = np.zeros((map.size[1], map.size[0]))
for i, shapes in enumerate(shapess):
for j in range(shapes.get_number_of_meshes()):
p, face_areas, face_idx = shapes.sample_points_on_face_of_shape(
j, n_samples_per_face, sc)
wt = face_areas[face_idx]/n_samples_per_face
ind = np.all(np.concatenate(
(p[:, [2]] > robot_base,
p[:, [2]] < robot_base + robot_height), axis=1),axis=1)
num_obstcale_points += _project_to_map(map, p[ind, :], wt[ind])
ind = np.all(np.concatenate(
(p[:, [2]] > valid_min,
p[:, [2]] < valid_max), axis=1),axis=1)
num_points += _project_to_map(map, p[ind, :], wt[ind])
selem = skimage.morphology.disk(robot_radius / map.resolution)
obstacle_free = skimage.morphology.binary_dilation(
_fill_holes(num_obstcale_points > num_point_threshold, 20), selem) != True
valid_space = _fill_holes(num_points > num_point_threshold, 20)
traversible = np.all(np.concatenate((obstacle_free[...,np.newaxis],
valid_space[...,np.newaxis]), axis=2),
axis=2)
# plt.imshow(np.concatenate((obstacle_free, valid_space, traversible), axis=1))
# plt.show()
map_out = copy.deepcopy(map)
map_out.num_obstcale_points = num_obstcale_points
map_out.num_points = num_points
map_out.traversible = traversible
map_out.obstacle_free = obstacle_free
map_out.valid_space = valid_space
tt.toc(log_at=1, log_str='src.map_utils.compute_traversibility: ')
return map_out
def resize_maps(map, map_scales, resize_method):
scaled_maps = []
for i, sc in enumerate(map_scales):
if resize_method == 'antialiasing':
# Resize using open cv so that we can compute the size.
# Use PIL resize to use anti aliasing feature.
map_ = cv2.resize(map*1, None, None, fx=sc, fy=sc, interpolation=cv2.INTER_LINEAR)
w = map_.shape[1]; h = map_.shape[0]
map_img = PIL.Image.fromarray((map*255).astype(np.uint8))
map__img = map_img.resize((w,h), PIL.Image.ANTIALIAS)
map_ = np.asarray(map__img).astype(np.float32)
map_ = map_/255.
map_ = np.minimum(map_, 1.0)
map_ = np.maximum(map_, 0.0)
elif resize_method == 'linear_noantialiasing':
map_ = cv2.resize(map*1, None, None, fx=sc, fy=sc, interpolation=cv2.INTER_LINEAR)
else:
logging.error('Unknown resizing method')
scaled_maps.append(map_)
return scaled_maps
def pick_largest_cc(traversible):
out = scipy.ndimage.label(traversible)[0]
cnt = np.bincount(out.reshape(-1))[1:]
return out == np.argmax(cnt) + 1
def get_graph_origin_loc(rng, traversible):
"""Erode the traversibility mask so that we get points in the bulk of the
graph, and not end up with a situation where the graph is localized in the
corner of a cramped room. Output Locs is in the coordinate frame of the
map."""
aa = pick_largest_cc(skimage.morphology.binary_erosion(traversible == True,
selem=np.ones((15,15))))
y, x = np.where(aa > 0)
ind = rng.choice(y.size)
locs = np.array([x[ind], y[ind]])
locs = locs + rng.rand(*(locs.shape)) - 0.5
return locs
def generate_egocentric_maps(scaled_maps, map_scales, map_crop_sizes, loc,
x_axis, y_axis, theta):
maps = []
for i, (map_, sc, map_crop_size) in enumerate(zip(scaled_maps, map_scales, map_crop_sizes)):
maps_i = np.array(get_map_to_predict(loc*sc, x_axis, y_axis, map_,
map_crop_size,
interpolation=cv2.INTER_LINEAR)[0])
maps_i[np.isnan(maps_i)] = 0
maps.append(maps_i)
return maps
def generate_goal_images(map_scales, map_crop_sizes, n_ori, goal_dist,
goal_theta, rel_goal_orientation):
goal_dist = goal_dist[:,0]
goal_theta = goal_theta[:,0]
rel_goal_orientation = rel_goal_orientation[:,0]
goals = [];
# Generate the map images.
for i, (sc, map_crop_size) in enumerate(zip(map_scales, map_crop_sizes)):
goal_i = np.zeros((goal_dist.shape[0], map_crop_size, map_crop_size, n_ori),
dtype=np.float32)
x = goal_dist*np.cos(goal_theta)*sc + (map_crop_size-1.)/2.
y = goal_dist*np.sin(goal_theta)*sc + (map_crop_size-1.)/2.
for j in range(goal_dist.shape[0]):
gc = rel_goal_orientation[j]
x0 = np.floor(x[j]).astype(np.int32); x1 = x0 + 1;
y0 = np.floor(y[j]).astype(np.int32); y1 = y0 + 1;
if x0 >= 0 and x0 <= map_crop_size-1:
if y0 >= 0 and y0 <= map_crop_size-1:
goal_i[j, y0, x0, gc] = (x1-x[j])*(y1-y[j])
if y1 >= 0 and y1 <= map_crop_size-1:
goal_i[j, y1, x0, gc] = (x1-x[j])*(y[j]-y0)
if x1 >= 0 and x1 <= map_crop_size-1:
if y0 >= 0 and y0 <= map_crop_size-1:
goal_i[j, y0, x1, gc] = (x[j]-x0)*(y1-y[j])
if y1 >= 0 and y1 <= map_crop_size-1:
goal_i[j, y1, x1, gc] = (x[j]-x0)*(y[j]-y0)
goals.append(goal_i)
return goals
def get_map_to_predict(src_locs, src_x_axiss, src_y_axiss, map, map_size,
interpolation=cv2.INTER_LINEAR):
fss = []
valids = []
center = (map_size-1.0)/2.0
dst_theta = np.pi/2.0
dst_loc = np.array([center, center])
dst_x_axis = np.array([np.cos(dst_theta), np.sin(dst_theta)])
dst_y_axis = np.array([np.cos(dst_theta+np.pi/2), np.sin(dst_theta+np.pi/2)])
def compute_points(center, x_axis, y_axis):
points = np.zeros((3,2),dtype=np.float32)
points[0,:] = center
points[1,:] = center + x_axis
points[2,:] = center + y_axis
return points
dst_points = compute_points(dst_loc, dst_x_axis, dst_y_axis)
for i in range(src_locs.shape[0]):
src_loc = src_locs[i,:]
src_x_axis = src_x_axiss[i,:]
src_y_axis = src_y_axiss[i,:]
src_points = compute_points(src_loc, src_x_axis, src_y_axis)
M = cv2.getAffineTransform(src_points, dst_points)
fs = cv2.warpAffine(map, M, (map_size, map_size), None, flags=interpolation,
borderValue=np.NaN)
valid = np.invert(np.isnan(fs))
valids.append(valid)
fss.append(fs)
return fss, valids | 0.809163 | 0.528716 |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
# all states
N_STATES = 19
# discount
GAMMA = 1
# initial state values
stateValues = np.zeros(N_STATES + 2)
# all states but terminal states
states = np.arange(1, N_STATES + 1)
# start from the middle state
START_STATE = 10
# two terminal states
# an action leading to the left terminal state has reward -1
# an action leading to the right terminal state has reward 1
END_STATES = [0, N_STATES + 1]
# true state value from bellman equation
realStateValues = np.arange(-20, 22, 2) / 20.0
realStateValues[0] = realStateValues[-1] = 0
# n-steps TD method
# @stateValues: values for each state, will be updated
# @n: # of steps
# @alpha: # step size
def temporalDifference(stateValues, n, alpha):
# initial starting state
currentState = START_STATE
# arrays to store states and rewards for an episode
# space isn't a major consideration, so I didn't use the mod trick
states = [currentState]
rewards = [0]
# track the time
time = 0
# the length of this episode
T = float('inf')
while True:
# go to next time step
time += 1
if time < T:
# choose an action randomly
if np.random.binomial(1, 0.5) == 1:
newState = currentState + 1
else:
newState = currentState - 1
if newState == 0:
reward = -1
elif newState == 20:
reward = 1
else:
reward = 0
# store new state and new reward
states.append(newState)
rewards.append(reward)
if newState in END_STATES:
T = time
# get the time of the state to update
updateTime = time - n
if updateTime >= 0:
returns = 0.0
# calculate corresponding rewards
for t in range(updateTime + 1, min(T, updateTime + n) + 1):
returns += pow(GAMMA, t - updateTime - 1) * rewards[t]
# add state value to the return
if updateTime + n <= T:
returns += pow(GAMMA, n) * stateValues[states[(updateTime + n)]]
stateToUpdate = states[updateTime]
# update the state value
if not stateToUpdate in END_STATES:
stateValues[stateToUpdate] += alpha * (returns - stateValues[stateToUpdate])
if updateTime == T - 1:
break
currentState = newState
# Figure 7.2, it will take quite a while
def figure7_2():
# truncate value for better display
truncateValue = 0.55
# all possible steps
steps = np.power(2, np.arange(0, 10))
# all possible alphas
alphas = np.arange(0, 1.1, 0.1)
# each run has 10 episodes
episodes = 10
# perform 100 independent runs
runs = 100
# track the errors for each (step, alpha) combination
errors = np.zeros((len(steps), len(alphas)))
for run in range(0, runs):
for stepInd, step in zip(range(len(steps)), steps):
for alphaInd, alpha in zip(range(len(alphas)), alphas):
print('run:', run, 'step:', step, 'alpha:', alpha)
currentStateValues = np.copy(stateValues)
for ep in range(0, episodes):
temporalDifference(currentStateValues, step, alpha)
# calculate the RMS error
errors[stepInd, alphaInd] += np.sqrt(np.sum(np.power(currentStateValues - realStateValues, 2)) / N_STATES)
# take average
errors /= episodes * runs
# truncate the error
errors[errors > truncateValue] = truncateValue
plt.figure()
for i in range(0, len(steps)):
plt.plot(alphas, errors[i, :], label='n = ' + str(steps[i]))
plt.xlabel('alpha')
plt.ylabel('RMS error')
plt.legend()
figure7_2()
plt.show() | chapter07/RandomWalk.py |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
# all states
N_STATES = 19
# discount
GAMMA = 1
# initial state values
stateValues = np.zeros(N_STATES + 2)
# all states but terminal states
states = np.arange(1, N_STATES + 1)
# start from the middle state
START_STATE = 10
# two terminal states
# an action leading to the left terminal state has reward -1
# an action leading to the right terminal state has reward 1
END_STATES = [0, N_STATES + 1]
# true state value from bellman equation
realStateValues = np.arange(-20, 22, 2) / 20.0
realStateValues[0] = realStateValues[-1] = 0
# n-steps TD method
# @stateValues: values for each state, will be updated
# @n: # of steps
# @alpha: # step size
def temporalDifference(stateValues, n, alpha):
# initial starting state
currentState = START_STATE
# arrays to store states and rewards for an episode
# space isn't a major consideration, so I didn't use the mod trick
states = [currentState]
rewards = [0]
# track the time
time = 0
# the length of this episode
T = float('inf')
while True:
# go to next time step
time += 1
if time < T:
# choose an action randomly
if np.random.binomial(1, 0.5) == 1:
newState = currentState + 1
else:
newState = currentState - 1
if newState == 0:
reward = -1
elif newState == 20:
reward = 1
else:
reward = 0
# store new state and new reward
states.append(newState)
rewards.append(reward)
if newState in END_STATES:
T = time
# get the time of the state to update
updateTime = time - n
if updateTime >= 0:
returns = 0.0
# calculate corresponding rewards
for t in range(updateTime + 1, min(T, updateTime + n) + 1):
returns += pow(GAMMA, t - updateTime - 1) * rewards[t]
# add state value to the return
if updateTime + n <= T:
returns += pow(GAMMA, n) * stateValues[states[(updateTime + n)]]
stateToUpdate = states[updateTime]
# update the state value
if not stateToUpdate in END_STATES:
stateValues[stateToUpdate] += alpha * (returns - stateValues[stateToUpdate])
if updateTime == T - 1:
break
currentState = newState
# Figure 7.2, it will take quite a while
def figure7_2():
# truncate value for better display
truncateValue = 0.55
# all possible steps
steps = np.power(2, np.arange(0, 10))
# all possible alphas
alphas = np.arange(0, 1.1, 0.1)
# each run has 10 episodes
episodes = 10
# perform 100 independent runs
runs = 100
# track the errors for each (step, alpha) combination
errors = np.zeros((len(steps), len(alphas)))
for run in range(0, runs):
for stepInd, step in zip(range(len(steps)), steps):
for alphaInd, alpha in zip(range(len(alphas)), alphas):
print('run:', run, 'step:', step, 'alpha:', alpha)
currentStateValues = np.copy(stateValues)
for ep in range(0, episodes):
temporalDifference(currentStateValues, step, alpha)
# calculate the RMS error
errors[stepInd, alphaInd] += np.sqrt(np.sum(np.power(currentStateValues - realStateValues, 2)) / N_STATES)
# take average
errors /= episodes * runs
# truncate the error
errors[errors > truncateValue] = truncateValue
plt.figure()
for i in range(0, len(steps)):
plt.plot(alphas, errors[i, :], label='n = ' + str(steps[i]))
plt.xlabel('alpha')
plt.ylabel('RMS error')
plt.legend()
figure7_2()
plt.show() | 0.704973 | 0.709988 |
from .constants import GRAIN_TYPE_LIST
from .constants import HOP_TYPE_LIST
from .constants import IMPERIAL_UNITS
from .constants import SI_UNITS
from .exceptions import ValidatorException
__all__ = [
u"validate_grain_type",
u"validate_hop_type",
u"validate_percentage",
u"validate_units",
u"validate_required_fields",
u"validate_optional_fields",
]
def validate_grain_type(grain_type):
"""
Validate a grain type
:param str grain_type: Type of Grain
:return: grain type
:rtype: str
:raises ValidatorException: If grain type is unknown
"""
if grain_type in GRAIN_TYPE_LIST:
return grain_type
raise ValidatorException(
u"Unkown grain type '{}', must use {}".format(
grain_type, u", ".join(GRAIN_TYPE_LIST)
)
)
def validate_hop_type(hop_type):
"""
Validate a hop type
:param str hop_type: Type of Grain
:return: hop type
:rtype: str
:raises ValidatorException: If hop type is unknown
"""
if hop_type in HOP_TYPE_LIST:
return hop_type
raise ValidatorException(
u"Unkown hop type '{}', must use {}".format(hop_type, u", ".join(HOP_TYPE_LIST))
)
def validate_percentage(percent):
"""
Validate decimal percentage
:param float percent: Percentage between 0.0 and 1.0
:return: percentage
:rtype: float
:raises ValidatorException: If decimal percentage not between 0.0 and 1.0
"""
if 0.0 <= percent <= 1.0:
return percent
raise ValidatorException(u"Percentage values should be in decimal format")
def validate_units(units):
"""
Validate units
:param str units: Unit type
:return: units
:rtype: str
:raises ValidatorException: If units is unknown
"""
if units in [IMPERIAL_UNITS, SI_UNITS]:
return units
raise ValidatorException(
u"Unkown units '{}', must use {} or {}".format(units, IMPERIAL_UNITS, SI_UNITS)
)
def validate_required_fields(data, required_fields):
"""
Validate fields which are required as part of the data.
:param dict data: A python dictionary to check for required fields
:param list(tuple) required_fields: Values and types to check for in data
:raises ValidatorException: Required field is missing from data
:raises ValidatorException: Required field is of the wrong type
The format is a list of tuples where the first element is a string with
a value that should be a key found in the data dict and
where the second element is a python type or list/tuple of
python types to check the field against.
"""
for field, field_type in required_fields:
if field not in data:
raise ValidatorException(
u"Required field '{}' missing from data".format(field) # noqa
)
if field_type == str:
try:
field_type = unicode
except NameError:
field_type = str
if not isinstance(data[field], field_type):
raise ValidatorException(
u"Required field '{}' is not of type '{}'".format( # noqa
field, field_type
)
)
def validate_optional_fields(data, optional_fields, data_field=u"data"):
"""
Validate fields which are optional as part of the data.
:param dict data: A python dictionary to check for required fields
:param list(tuple) optional_fields: Values and types to check for in data
:param str data_field: The key in the data dictionary containing the optional fields
:raises ValidatorException: Optional field is of the wrong type
The format is a list of tuples where the first element is a string with
a value that should be a key found in the data dict and
where the second element is a python type or list/tuple of
python types to check the field against.
""" # noqa
# If no optional data field present then return
if data_field not in data:
return
for field, field_type in optional_fields:
if field in data[data_field]:
if field_type == str:
try:
field_type = unicode
except NameError:
field_type = str
# With optional fields only check the type as they are overrides
# and not all overrides need to be present
if not isinstance(data[data_field][field], field_type):
raise ValidatorException(
u"Optional field '{}' in '{}' is not of type '{}'".format( # noqa
field, data_field, field_type
)
) | brew/validators.py | from .constants import GRAIN_TYPE_LIST
from .constants import HOP_TYPE_LIST
from .constants import IMPERIAL_UNITS
from .constants import SI_UNITS
from .exceptions import ValidatorException
__all__ = [
u"validate_grain_type",
u"validate_hop_type",
u"validate_percentage",
u"validate_units",
u"validate_required_fields",
u"validate_optional_fields",
]
def validate_grain_type(grain_type):
"""
Validate a grain type
:param str grain_type: Type of Grain
:return: grain type
:rtype: str
:raises ValidatorException: If grain type is unknown
"""
if grain_type in GRAIN_TYPE_LIST:
return grain_type
raise ValidatorException(
u"Unkown grain type '{}', must use {}".format(
grain_type, u", ".join(GRAIN_TYPE_LIST)
)
)
def validate_hop_type(hop_type):
"""
Validate a hop type
:param str hop_type: Type of Grain
:return: hop type
:rtype: str
:raises ValidatorException: If hop type is unknown
"""
if hop_type in HOP_TYPE_LIST:
return hop_type
raise ValidatorException(
u"Unkown hop type '{}', must use {}".format(hop_type, u", ".join(HOP_TYPE_LIST))
)
def validate_percentage(percent):
"""
Validate decimal percentage
:param float percent: Percentage between 0.0 and 1.0
:return: percentage
:rtype: float
:raises ValidatorException: If decimal percentage not between 0.0 and 1.0
"""
if 0.0 <= percent <= 1.0:
return percent
raise ValidatorException(u"Percentage values should be in decimal format")
def validate_units(units):
"""
Validate units
:param str units: Unit type
:return: units
:rtype: str
:raises ValidatorException: If units is unknown
"""
if units in [IMPERIAL_UNITS, SI_UNITS]:
return units
raise ValidatorException(
u"Unkown units '{}', must use {} or {}".format(units, IMPERIAL_UNITS, SI_UNITS)
)
def validate_required_fields(data, required_fields):
"""
Validate fields which are required as part of the data.
:param dict data: A python dictionary to check for required fields
:param list(tuple) required_fields: Values and types to check for in data
:raises ValidatorException: Required field is missing from data
:raises ValidatorException: Required field is of the wrong type
The format is a list of tuples where the first element is a string with
a value that should be a key found in the data dict and
where the second element is a python type or list/tuple of
python types to check the field against.
"""
for field, field_type in required_fields:
if field not in data:
raise ValidatorException(
u"Required field '{}' missing from data".format(field) # noqa
)
if field_type == str:
try:
field_type = unicode
except NameError:
field_type = str
if not isinstance(data[field], field_type):
raise ValidatorException(
u"Required field '{}' is not of type '{}'".format( # noqa
field, field_type
)
)
def validate_optional_fields(data, optional_fields, data_field=u"data"):
"""
Validate fields which are optional as part of the data.
:param dict data: A python dictionary to check for required fields
:param list(tuple) optional_fields: Values and types to check for in data
:param str data_field: The key in the data dictionary containing the optional fields
:raises ValidatorException: Optional field is of the wrong type
The format is a list of tuples where the first element is a string with
a value that should be a key found in the data dict and
where the second element is a python type or list/tuple of
python types to check the field against.
""" # noqa
# If no optional data field present then return
if data_field not in data:
return
for field, field_type in optional_fields:
if field in data[data_field]:
if field_type == str:
try:
field_type = unicode
except NameError:
field_type = str
# With optional fields only check the type as they are overrides
# and not all overrides need to be present
if not isinstance(data[data_field][field], field_type):
raise ValidatorException(
u"Optional field '{}' in '{}' is not of type '{}'".format( # noqa
field, data_field, field_type
)
) | 0.763924 | 0.437763 |
from base.middleware import RequestMiddleware
from base.utils import get_our_models
# django imports
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.conf import settings
@receiver(post_save)
def audit_log(sender, instance, created, raw, update_fields, **kwargs):
"""
Post save signal that creates a log when an object from a models from
our apps is created or updated.
"""
# only listening models created in our apps
if sender not in get_our_models():
return
sensitive_fields = settings.LOG_SENSITIVE_FIELDS
ignored_fields = settings.LOG_IGNORE_FIELDS
user = get_user()
if created:
message = {'Created': instance.to_dict(
exclude=ignored_fields + sensitive_fields,
include_m2m=False,
)}
instance.save_addition(user, message)
elif not raw:
change_message = []
changed_field_labels = {}
original_dict = instance.original_dict
actual_dict = instance.to_dict(
exclude=ignored_fields,
include_m2m=False,
)
change = False
for key in original_dict.keys():
if original_dict[key] != actual_dict[key]:
change = True
if key in sensitive_fields:
changed_field_labels[key] = {'change': 'field updated'}
else:
changed_field_labels[key] = {
'from': original_dict[key],
'to': actual_dict[key],
}
if change:
change_message = {'changed': {'fields': changed_field_labels}}
instance.save_edition(user, change_message)
@receiver(post_delete)
def audit_delete_log(sender, instance, **kwargs):
"""
Post delete signal that creates a log when an object from a models from
our apps is deleted.
"""
# only listening models created in our apps
if sender not in get_our_models():
return
user = get_user()
instance.save_deletion(user)
def get_user():
thread_local = RequestMiddleware.thread_local
if hasattr(thread_local, 'user'):
user = thread_local.user
else:
user = None
return user | base/signals.py | from base.middleware import RequestMiddleware
from base.utils import get_our_models
# django imports
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.conf import settings
@receiver(post_save)
def audit_log(sender, instance, created, raw, update_fields, **kwargs):
"""
Post save signal that creates a log when an object from a models from
our apps is created or updated.
"""
# only listening models created in our apps
if sender not in get_our_models():
return
sensitive_fields = settings.LOG_SENSITIVE_FIELDS
ignored_fields = settings.LOG_IGNORE_FIELDS
user = get_user()
if created:
message = {'Created': instance.to_dict(
exclude=ignored_fields + sensitive_fields,
include_m2m=False,
)}
instance.save_addition(user, message)
elif not raw:
change_message = []
changed_field_labels = {}
original_dict = instance.original_dict
actual_dict = instance.to_dict(
exclude=ignored_fields,
include_m2m=False,
)
change = False
for key in original_dict.keys():
if original_dict[key] != actual_dict[key]:
change = True
if key in sensitive_fields:
changed_field_labels[key] = {'change': 'field updated'}
else:
changed_field_labels[key] = {
'from': original_dict[key],
'to': actual_dict[key],
}
if change:
change_message = {'changed': {'fields': changed_field_labels}}
instance.save_edition(user, change_message)
@receiver(post_delete)
def audit_delete_log(sender, instance, **kwargs):
"""
Post delete signal that creates a log when an object from a models from
our apps is deleted.
"""
# only listening models created in our apps
if sender not in get_our_models():
return
user = get_user()
instance.save_deletion(user)
def get_user():
thread_local = RequestMiddleware.thread_local
if hasattr(thread_local, 'user'):
user = thread_local.user
else:
user = None
return user | 0.61832 | 0.069668 |
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from instance.models import Host
from dashboard.views import sort_host
from webvirtmgr.server import ConnServer
from libvirt import libvirtError
from webvirtmgr.settings import TIME_JS_REFRESH
def cpuusage(request, host_id):
"""
Return CPU Usage in %
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
host = Host.objects.get(id=host_id)
try:
conn = ConnServer(host)
except:
conn = None
if conn:
cpu_usage = conn.cpu_get_usage()
return HttpResponse(cpu_usage)
def memusage(request, host_id):
"""
Return Memory Usage in %
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
host = Host.objects.get(id=host_id)
try:
conn = ConnServer(host)
except:
conn = None
if conn:
mem_usage = conn.memory_get_usage()
return HttpResponse(mem_usage[2])
def overview(request, host_id):
"""
Overview page.
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
errors = []
time_refresh = TIME_JS_REFRESH
host = Host.objects.get(id=host_id)
all_vm = hostname = arch = cpus = cpu_model = \
type_conn = libvirt_ver = all_mem = \
mem_usage = mem_percent = cpu_usage = None
try:
conn = ConnServer(host)
except libvirtError as e:
conn = None
if not conn:
errors.append(e.message)
else:
have_kvm = conn.hard_accel_node()
if not have_kvm:
msg = _('Your CPU doesn\'t support hardware virtualization')
errors.append(msg)
all_vm = sort_host(conn.vds_get_node())
hostname, arch, cpus, cpu_model, type_conn, libvirt_ver = conn.node_get_info()
all_mem, mem_usage, mem_percent = conn.memory_get_usage()
cpu_usage = conn.cpu_get_usage()
if request.method == 'POST':
vname = request.POST.get('vname', '')
dom = conn.lookupVM(vname)
if 'start' in request.POST:
try:
dom.create()
return HttpResponseRedirect(request.get_full_path())
except libvirtError as msg_error:
errors.append(msg_error.message)
if 'shutdown' in request.POST:
try:
dom.shutdown()
return HttpResponseRedirect(request.get_full_path())
except libvirtError as msg_error:
errors.append(msg_error.message)
if 'destroy' in request.POST:
try:
dom.destroy()
return HttpResponseRedirect(request.get_full_path())
except libvirtError as msg_error:
errors.append(msg_error.message)
if 'suspend' in request.POST:
try:
dom.suspend()
return HttpResponseRedirect(request.get_full_path())
except libvirtError as msg_error:
errors.append(msg_error.message)
if 'resume' in request.POST:
try:
dom.resume()
return HttpResponseRedirect(request.get_full_path())
except libvirtError as msg_error:
errors.append(msg_error.message)
conn.close()
return render_to_response('overview.html', {'host_id': host_id,
'errors': errors,
'time_refresh': time_refresh,
'all_vm': all_vm,
'hostname': hostname,
'arch': arch, 'cpus': cpus, 'cpu_model': cpu_model, 'cpu_usage': cpu_usage,
'type_conn': type_conn, 'libvirt_ver': libvirt_ver,
'all_mem': all_mem, 'mem_usage': mem_usage, 'mem_percent': mem_percent
},
context_instance=RequestContext(request)) | webvirtmgr/overview/views.py | from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from instance.models import Host
from dashboard.views import sort_host
from webvirtmgr.server import ConnServer
from libvirt import libvirtError
from webvirtmgr.settings import TIME_JS_REFRESH
def cpuusage(request, host_id):
"""
Return CPU Usage in %
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
host = Host.objects.get(id=host_id)
try:
conn = ConnServer(host)
except:
conn = None
if conn:
cpu_usage = conn.cpu_get_usage()
return HttpResponse(cpu_usage)
def memusage(request, host_id):
"""
Return Memory Usage in %
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
host = Host.objects.get(id=host_id)
try:
conn = ConnServer(host)
except:
conn = None
if conn:
mem_usage = conn.memory_get_usage()
return HttpResponse(mem_usage[2])
def overview(request, host_id):
"""
Overview page.
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
errors = []
time_refresh = TIME_JS_REFRESH
host = Host.objects.get(id=host_id)
all_vm = hostname = arch = cpus = cpu_model = \
type_conn = libvirt_ver = all_mem = \
mem_usage = mem_percent = cpu_usage = None
try:
conn = ConnServer(host)
except libvirtError as e:
conn = None
if not conn:
errors.append(e.message)
else:
have_kvm = conn.hard_accel_node()
if not have_kvm:
msg = _('Your CPU doesn\'t support hardware virtualization')
errors.append(msg)
all_vm = sort_host(conn.vds_get_node())
hostname, arch, cpus, cpu_model, type_conn, libvirt_ver = conn.node_get_info()
all_mem, mem_usage, mem_percent = conn.memory_get_usage()
cpu_usage = conn.cpu_get_usage()
if request.method == 'POST':
vname = request.POST.get('vname', '')
dom = conn.lookupVM(vname)
if 'start' in request.POST:
try:
dom.create()
return HttpResponseRedirect(request.get_full_path())
except libvirtError as msg_error:
errors.append(msg_error.message)
if 'shutdown' in request.POST:
try:
dom.shutdown()
return HttpResponseRedirect(request.get_full_path())
except libvirtError as msg_error:
errors.append(msg_error.message)
if 'destroy' in request.POST:
try:
dom.destroy()
return HttpResponseRedirect(request.get_full_path())
except libvirtError as msg_error:
errors.append(msg_error.message)
if 'suspend' in request.POST:
try:
dom.suspend()
return HttpResponseRedirect(request.get_full_path())
except libvirtError as msg_error:
errors.append(msg_error.message)
if 'resume' in request.POST:
try:
dom.resume()
return HttpResponseRedirect(request.get_full_path())
except libvirtError as msg_error:
errors.append(msg_error.message)
conn.close()
return render_to_response('overview.html', {'host_id': host_id,
'errors': errors,
'time_refresh': time_refresh,
'all_vm': all_vm,
'hostname': hostname,
'arch': arch, 'cpus': cpus, 'cpu_model': cpu_model, 'cpu_usage': cpu_usage,
'type_conn': type_conn, 'libvirt_ver': libvirt_ver,
'all_mem': all_mem, 'mem_usage': mem_usage, 'mem_percent': mem_percent
},
context_instance=RequestContext(request)) | 0.377196 | 0.034947 |
import KratosMultiphysics
import run_cpp_unit_tests
# Import Kratos "wrapper" for unittests
import KratosMultiphysics.KratosUnittest as KratosUnittest
# Import from Test Factories (with general analysis flows)
from particle_mechanics_test_factory import AxisSymmetricCircularPlate2DTriTest as TAxisSymmetricCircularPlate2DTriTest
from particle_mechanics_test_factory import BeamCantileverStaticLinearElasticPointLoad2DTriTest as TBeamCantileverStaticLinearElasticPointLoad2DTriTest
from particle_mechanics_test_factory import BeamCantileverStaticLinearElasticParticlePointLoad2DTriTest as TBeamCantileverStaticLinearElasticParticlePointLoad2DTriTest
from particle_mechanics_test_factory import BeamCantileverStaticLinearElasticLineLoad2DQuadTest as TBeamCantileverStaticLinearElasticLineLoad2DQuadTest
from particle_mechanics_test_factory import BeamCantileverStaticLinearElasticSurfaceLoad3DHexaTest as TBeamCantileverStaticLinearElasticSurfaceLoad3DHexaTest
from particle_mechanics_test_factory import BeamCantileverStaticHyperelasticSelfWeightLoad2DQuadTest as TBeamCantileverStaticHyperelasticSelfWeightLoad2DQuadTest
from particle_mechanics_test_factory import CooksMembraneCompressibleTest as TCooksMembraneCompressibleTest
from particle_mechanics_test_factory import CooksMembraneUPCompressibleTest as TCooksMembraneUPCompressibleTest
from particle_mechanics_test_factory import CooksMembraneUPIncompressibleTest as TCooksMembraneUPIncompressibleTest
from particle_mechanics_test_factory import CLLinearElastic3DQuadTest as TCLLinearElastic3DQuadTest
from particle_mechanics_test_factory import GravityApplicationTest as TGravityApplicationTest
from particle_mechanics_test_factory import PenaltyImpositionBeamCantileverStaticHyperelasticSelfWeightLoad2DQuadTest as TPenaltyImpositionBeamCantileverStaticHyperelasticSelfWeightLoad2DQuadTest
from particle_mechanics_test_factory import SlipBoundaryTest as TSlipBoundaryTest
# Import from Test Factories (with different analysis flows)
from test_generate_mpm_particle import TestGenerateMPMParticle as TTestGenerateMPMParticle
from test_generate_mpm_particle_condition import TestGenerateMPMParticleCondition as TTestGenerateMPMParticleCondition
from test_particle_erase_process import TestParticleEraseProcess as TTestParticleEraseProcess
from test_search_mpm_particle import TestSearchMPMParticle as TTestSearchMPMParticle
from test_static_loading_conditions_point import TestStaticLoadingConditionsPoint as TTestStaticLoadingConditionsPoint
from test_static_loading_conditions_line import TestStaticLoadingConditionsLine as TTestStaticLoadingConditionsLine
from test_static_loading_conditions_surface import TestStaticLoadingConditionsSurface as TTestStaticLoadingConditionsSurface
def AssembleTestSuites():
''' Populates the test suites to run.
Populates the test suites to run. At least, it should populate the suites:
"small", "nightly" and "all"
Return
------
suites: A dictionary of suites
The set of suites with its test_cases added.
'''
suites = KratosUnittest.KratosSuites
### Create a test suit with the selected tests (Small tests):
### These tests have to be very fast!
### Execution time << 1 sec on a regular PC !!!
## These tests are executed by the continuous integration tool
smallSuite = suites['small']
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([TTestGenerateMPMParticle]))
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([TTestGenerateMPMParticleCondition]))
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([TTestParticleEraseProcess]))
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([TTestSearchMPMParticle]))
# TODO: Look further into these three tests as they are still failing for AMatrix
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([TTestStaticLoadingConditionsPoint])) # FIXME:
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([TTestStaticLoadingConditionsLine])) # FIXME:
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([TTestStaticLoadingConditionsSurface])) # FIXME:
smallSuite.addTest(TCLLinearElastic3DQuadTest('test_execution'))
smallSuite.addTest(TGravityApplicationTest('test_execution'))
# TODO: Look further into this test as they are still failing for AMatrix
smallSuite.addTest(TSlipBoundaryTest('test_execution')) # FIXME:
## These tests are executed in the nightly build
nightSuite = suites['nightly']
nightSuite.addTests(smallSuite)
nightSuite.addTest(TAxisSymmetricCircularPlate2DTriTest('test_execution'))
nightSuite.addTest(TBeamCantileverStaticLinearElasticPointLoad2DTriTest('test_execution'))
nightSuite.addTest(TBeamCantileverStaticLinearElasticParticlePointLoad2DTriTest('test_execution'))
nightSuite.addTest(TBeamCantileverStaticLinearElasticLineLoad2DQuadTest('test_execution'))
nightSuite.addTest(TBeamCantileverStaticLinearElasticSurfaceLoad3DHexaTest('test_execution'))
nightSuite.addTest(TBeamCantileverStaticHyperelasticSelfWeightLoad2DQuadTest('test_execution'))
nightSuite.addTest(TCooksMembraneCompressibleTest('test_execution'))
nightSuite.addTest(TCooksMembraneUPCompressibleTest('test_execution'))
nightSuite.addTest(TCooksMembraneUPIncompressibleTest('test_execution'))
nightSuite.addTest(TPenaltyImpositionBeamCantileverStaticHyperelasticSelfWeightLoad2DQuadTest('test_execution'))
### Adding Validation Tests
## For very long tests that should not be in nighly and you can use to validate
validationSuite = suites['validation']
### Create a test suit that contains all the tests:
allSuite = suites['all']
allSuite.addTests(nightSuite) # already contains the smallSuite
allSuite.addTests(validationSuite)
return suites
if __name__ == '__main__':
KratosMultiphysics.Logger.PrintInfo("Unittests", "\nRunning cpp unit tests ...")
run_cpp_unit_tests.run()
KratosMultiphysics.Logger.PrintInfo("Unittests", "Finished running cpp unit tests!")
KratosMultiphysics.Logger.PrintInfo("Unittests", "\nRunning python tests ...")
KratosUnittest.runTests(AssembleTestSuites())
KratosMultiphysics.Logger.PrintInfo("Unittests", "Finished python tests!") | applications/ParticleMechanicsApplication/tests/test_ParticleMechanicsApplication.py | import KratosMultiphysics
import run_cpp_unit_tests
# Import Kratos "wrapper" for unittests
import KratosMultiphysics.KratosUnittest as KratosUnittest
# Import from Test Factories (with general analysis flows)
from particle_mechanics_test_factory import AxisSymmetricCircularPlate2DTriTest as TAxisSymmetricCircularPlate2DTriTest
from particle_mechanics_test_factory import BeamCantileverStaticLinearElasticPointLoad2DTriTest as TBeamCantileverStaticLinearElasticPointLoad2DTriTest
from particle_mechanics_test_factory import BeamCantileverStaticLinearElasticParticlePointLoad2DTriTest as TBeamCantileverStaticLinearElasticParticlePointLoad2DTriTest
from particle_mechanics_test_factory import BeamCantileverStaticLinearElasticLineLoad2DQuadTest as TBeamCantileverStaticLinearElasticLineLoad2DQuadTest
from particle_mechanics_test_factory import BeamCantileverStaticLinearElasticSurfaceLoad3DHexaTest as TBeamCantileverStaticLinearElasticSurfaceLoad3DHexaTest
from particle_mechanics_test_factory import BeamCantileverStaticHyperelasticSelfWeightLoad2DQuadTest as TBeamCantileverStaticHyperelasticSelfWeightLoad2DQuadTest
from particle_mechanics_test_factory import CooksMembraneCompressibleTest as TCooksMembraneCompressibleTest
from particle_mechanics_test_factory import CooksMembraneUPCompressibleTest as TCooksMembraneUPCompressibleTest
from particle_mechanics_test_factory import CooksMembraneUPIncompressibleTest as TCooksMembraneUPIncompressibleTest
from particle_mechanics_test_factory import CLLinearElastic3DQuadTest as TCLLinearElastic3DQuadTest
from particle_mechanics_test_factory import GravityApplicationTest as TGravityApplicationTest
from particle_mechanics_test_factory import PenaltyImpositionBeamCantileverStaticHyperelasticSelfWeightLoad2DQuadTest as TPenaltyImpositionBeamCantileverStaticHyperelasticSelfWeightLoad2DQuadTest
from particle_mechanics_test_factory import SlipBoundaryTest as TSlipBoundaryTest
# Import from Test Factories (with different analysis flows)
from test_generate_mpm_particle import TestGenerateMPMParticle as TTestGenerateMPMParticle
from test_generate_mpm_particle_condition import TestGenerateMPMParticleCondition as TTestGenerateMPMParticleCondition
from test_particle_erase_process import TestParticleEraseProcess as TTestParticleEraseProcess
from test_search_mpm_particle import TestSearchMPMParticle as TTestSearchMPMParticle
from test_static_loading_conditions_point import TestStaticLoadingConditionsPoint as TTestStaticLoadingConditionsPoint
from test_static_loading_conditions_line import TestStaticLoadingConditionsLine as TTestStaticLoadingConditionsLine
from test_static_loading_conditions_surface import TestStaticLoadingConditionsSurface as TTestStaticLoadingConditionsSurface
def AssembleTestSuites():
''' Populates the test suites to run.
Populates the test suites to run. At least, it should populate the suites:
"small", "nightly" and "all"
Return
------
suites: A dictionary of suites
The set of suites with its test_cases added.
'''
suites = KratosUnittest.KratosSuites
### Create a test suit with the selected tests (Small tests):
### These tests have to be very fast!
### Execution time << 1 sec on a regular PC !!!
## These tests are executed by the continuous integration tool
smallSuite = suites['small']
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([TTestGenerateMPMParticle]))
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([TTestGenerateMPMParticleCondition]))
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([TTestParticleEraseProcess]))
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([TTestSearchMPMParticle]))
# TODO: Look further into these three tests as they are still failing for AMatrix
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([TTestStaticLoadingConditionsPoint])) # FIXME:
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([TTestStaticLoadingConditionsLine])) # FIXME:
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([TTestStaticLoadingConditionsSurface])) # FIXME:
smallSuite.addTest(TCLLinearElastic3DQuadTest('test_execution'))
smallSuite.addTest(TGravityApplicationTest('test_execution'))
# TODO: Look further into this test as they are still failing for AMatrix
smallSuite.addTest(TSlipBoundaryTest('test_execution')) # FIXME:
## These tests are executed in the nightly build
nightSuite = suites['nightly']
nightSuite.addTests(smallSuite)
nightSuite.addTest(TAxisSymmetricCircularPlate2DTriTest('test_execution'))
nightSuite.addTest(TBeamCantileverStaticLinearElasticPointLoad2DTriTest('test_execution'))
nightSuite.addTest(TBeamCantileverStaticLinearElasticParticlePointLoad2DTriTest('test_execution'))
nightSuite.addTest(TBeamCantileverStaticLinearElasticLineLoad2DQuadTest('test_execution'))
nightSuite.addTest(TBeamCantileverStaticLinearElasticSurfaceLoad3DHexaTest('test_execution'))
nightSuite.addTest(TBeamCantileverStaticHyperelasticSelfWeightLoad2DQuadTest('test_execution'))
nightSuite.addTest(TCooksMembraneCompressibleTest('test_execution'))
nightSuite.addTest(TCooksMembraneUPCompressibleTest('test_execution'))
nightSuite.addTest(TCooksMembraneUPIncompressibleTest('test_execution'))
nightSuite.addTest(TPenaltyImpositionBeamCantileverStaticHyperelasticSelfWeightLoad2DQuadTest('test_execution'))
### Adding Validation Tests
## For very long tests that should not be in nighly and you can use to validate
validationSuite = suites['validation']
### Create a test suit that contains all the tests:
allSuite = suites['all']
allSuite.addTests(nightSuite) # already contains the smallSuite
allSuite.addTests(validationSuite)
return suites
if __name__ == '__main__':
KratosMultiphysics.Logger.PrintInfo("Unittests", "\nRunning cpp unit tests ...")
run_cpp_unit_tests.run()
KratosMultiphysics.Logger.PrintInfo("Unittests", "Finished running cpp unit tests!")
KratosMultiphysics.Logger.PrintInfo("Unittests", "\nRunning python tests ...")
KratosUnittest.runTests(AssembleTestSuites())
KratosMultiphysics.Logger.PrintInfo("Unittests", "Finished python tests!") | 0.410284 | 0.571288 |
import copy
import glob
import typing
from deoppet.parser import Parser, Snippet
from deoppet.mapping import Mapping
from deoppet.util import debug
from pynvim import Nvim
class Deoppet():
def __init__(self, vim: Nvim) -> None:
self._vim = vim
if not self._vim.call('has', 'nvim-0.5.0'):
return
self._mapping = Mapping(self._vim)
self._vim.call('deoppet#custom#_update_cache')
self._load_snippets()
self._vim.call('deoppet#mapping#_init')
self._vim.call('deoppet#handler#_init')
def debug(self, expr: typing.Any) -> None:
debug(self._vim, expr)
def mapping(self, name: str, cur_text: str, col: int) -> None:
return self._mapping.mapping(name, cur_text, col)
def expand(self, trigger: str) -> None:
return self._mapping.expand(
trigger, self._vim.call('deoppet#util#_get_cur_text'))
def event(self, name: str) -> None:
self._vim.call('deoppet#custom#_update_cache')
buf = self._vim.current.buffer
if name == 'FileType' or (name == 'InsertEnter' and
'deoppet_snippets' not in buf.vars):
return self._load_snippets()
elif name == 'BufWritePost':
return self._mapping.clear()
def _load_snippets(self) -> None:
buf = self._vim.current.buffer
filetype: str = self._vim.call(
'deoppet#util#_get_context_filetype')
if not filetype:
filetype = 'nothing'
snippets_dirs = [x['path'] for x in self._vim.call(
'deoppet#custom#_get_option', 'snippets')]
ft_snippets_map = self._vim.call(
'deoppet#custom#_get_option', 'ft_snippets_map')
if filetype in ft_snippets_map:
fts = ft_snippets_map[filetype]
else:
fts = filetype.split('.')
snippets: typing.Dict[str, Snippet] = {}
for dir in snippets_dirs:
for ft in fts:
for filename in glob.glob(
f'{dir}/{ft}.snip') + glob.glob(f'{dir}/_.snip'):
# debug(self._vim, filename)
with open(filename) as f:
parser = Parser(self._vim, filename, snippets_dirs)
snippets.update(parser.parse(f.read()))
for s in copy.deepcopy(snippets).values():
for a in s.get('alias', []):
snippets[a] = s
# debug(self._vim, snippets)
buf.vars['deoppet_snippets'] = snippets | rplugin/python3/deoppet/deoppet.py |
import copy
import glob
import typing
from deoppet.parser import Parser, Snippet
from deoppet.mapping import Mapping
from deoppet.util import debug
from pynvim import Nvim
class Deoppet():
def __init__(self, vim: Nvim) -> None:
self._vim = vim
if not self._vim.call('has', 'nvim-0.5.0'):
return
self._mapping = Mapping(self._vim)
self._vim.call('deoppet#custom#_update_cache')
self._load_snippets()
self._vim.call('deoppet#mapping#_init')
self._vim.call('deoppet#handler#_init')
def debug(self, expr: typing.Any) -> None:
debug(self._vim, expr)
def mapping(self, name: str, cur_text: str, col: int) -> None:
return self._mapping.mapping(name, cur_text, col)
def expand(self, trigger: str) -> None:
return self._mapping.expand(
trigger, self._vim.call('deoppet#util#_get_cur_text'))
def event(self, name: str) -> None:
self._vim.call('deoppet#custom#_update_cache')
buf = self._vim.current.buffer
if name == 'FileType' or (name == 'InsertEnter' and
'deoppet_snippets' not in buf.vars):
return self._load_snippets()
elif name == 'BufWritePost':
return self._mapping.clear()
def _load_snippets(self) -> None:
buf = self._vim.current.buffer
filetype: str = self._vim.call(
'deoppet#util#_get_context_filetype')
if not filetype:
filetype = 'nothing'
snippets_dirs = [x['path'] for x in self._vim.call(
'deoppet#custom#_get_option', 'snippets')]
ft_snippets_map = self._vim.call(
'deoppet#custom#_get_option', 'ft_snippets_map')
if filetype in ft_snippets_map:
fts = ft_snippets_map[filetype]
else:
fts = filetype.split('.')
snippets: typing.Dict[str, Snippet] = {}
for dir in snippets_dirs:
for ft in fts:
for filename in glob.glob(
f'{dir}/{ft}.snip') + glob.glob(f'{dir}/_.snip'):
# debug(self._vim, filename)
with open(filename) as f:
parser = Parser(self._vim, filename, snippets_dirs)
snippets.update(parser.parse(f.read()))
for s in copy.deepcopy(snippets).values():
for a in s.get('alias', []):
snippets[a] = s
# debug(self._vim, snippets)
buf.vars['deoppet_snippets'] = snippets | 0.424889 | 0.117902 |
from __future__ import division, print_function, absolute_import
from warnings import warn
from numpy import asarray, asarray_chkfinite
# Local imports
from .misc import _datacopied
from .lapack import get_lapack_funcs
from .flinalg import get_flinalg_funcs
__all__ = ['lu', 'lu_solve', 'lu_factor']
def lu_factor(a, overwrite_a=False, check_finite=True):
"""Compute pivoted LU decomposition of a matrix.
The decomposition is::
A = P L U
where P is a permutation matrix, L lower triangular with unit
diagonal elements, and U upper triangular.
Parameters
----------
a : array, shape (M, M)
Matrix to decompose
overwrite_a : boolean
Whether to overwrite data in A (may increase performance)
check_finite : boolean, optional
Whether to check the input matrixes contain only finite numbers.
Disabling may give a performance gain, but may result to problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
lu : array, shape (N, N)
Matrix containing U in its upper triangle, and L in its lower triangle.
The unit diagonal elements of L are not stored.
piv : array, shape (N,)
Pivot indices representing the permutation matrix P:
row i of matrix was interchanged with row piv[i].
See also
--------
lu_solve : solve an equation system using the LU factorization of a matrix
Notes
-----
This is a wrapper to the ``*GETRF`` routines from LAPACK.
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or (_datacopied(a1, a))
getrf, = get_lapack_funcs(('getrf',), (a1,))
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of '
'internal getrf (lu_factor)' % -info)
if info > 0:
warn("Diagonal number %d is exactly zero. Singular matrix." % info,
RuntimeWarning)
return lu, piv
def lu_solve(lu_and_piv, b, trans=0, overwrite_b=False, check_finite=True):
"""Solve an equation system, a x = b, given the LU factorization of a
Parameters
----------
(lu, piv)
Factorization of the coefficient matrix a, as given by lu_factor
b : array
Right-hand side
trans : {0, 1, 2}
Type of system to solve:
===== =========
trans system
===== =========
0 a x = b
1 a^T x = b
2 a^H x = b
===== =========
check_finite : boolean, optional
Whether to check the input matrixes contain only finite numbers.
Disabling may give a performance gain, but may result to problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Solution to the system
See also
--------
lu_factor : LU factorize a matrix
"""
(lu, piv) = lu_and_piv
if check_finite:
b1 = asarray_chkfinite(b)
else:
b1 = asarray(b)
overwrite_b = overwrite_b or _datacopied(b1, b)
if lu.shape[0] != b1.shape[0]:
raise ValueError("incompatible dimensions.")
getrs, = get_lapack_funcs(('getrs',), (lu, b1))
x,info = getrs(lu, piv, b1, trans=trans, overwrite_b=overwrite_b)
if info == 0:
return x
raise ValueError('illegal value in %d-th argument of internal gesv|posv'
% -info)
def lu(a, permute_l=False, overwrite_a=False, check_finite=True):
"""Compute pivoted LU decompostion of a matrix.
The decomposition is::
A = P L U
where P is a permutation matrix, L lower triangular with unit
diagonal elements, and U upper triangular.
Parameters
----------
a : array, shape (M, N)
Array to decompose
permute_l : boolean
Perform the multiplication P*L (Default: do not permute)
overwrite_a : boolean
Whether to overwrite data in a (may improve performance)
check_finite : boolean, optional
Whether to check the input matrixes contain only finite numbers.
Disabling may give a performance gain, but may result to problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
(If permute_l == False)
p : array, shape (M, M)
Permutation matrix
l : array, shape (M, K)
Lower triangular or trapezoidal matrix with unit diagonal.
K = min(M, N)
u : array, shape (K, N)
Upper triangular or trapezoidal matrix
(If permute_l == True)
pl : array, shape (M, K)
Permuted L matrix.
K = min(M, N)
u : array, shape (K, N)
Upper triangular or trapezoidal matrix
Notes
-----
This is a LU factorization routine written for Scipy.
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
overwrite_a = overwrite_a or (_datacopied(a1, a))
flu, = get_flinalg_funcs(('lu',), (a1,))
p, l, u, info = flu(a1, permute_l=permute_l, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of '
'internal lu.getrf' % -info)
if permute_l:
return l, u
return p, l, u | scipy/linalg/decomp_lu.py |
from __future__ import division, print_function, absolute_import
from warnings import warn
from numpy import asarray, asarray_chkfinite
# Local imports
from .misc import _datacopied
from .lapack import get_lapack_funcs
from .flinalg import get_flinalg_funcs
__all__ = ['lu', 'lu_solve', 'lu_factor']
def lu_factor(a, overwrite_a=False, check_finite=True):
"""Compute pivoted LU decomposition of a matrix.
The decomposition is::
A = P L U
where P is a permutation matrix, L lower triangular with unit
diagonal elements, and U upper triangular.
Parameters
----------
a : array, shape (M, M)
Matrix to decompose
overwrite_a : boolean
Whether to overwrite data in A (may increase performance)
check_finite : boolean, optional
Whether to check the input matrixes contain only finite numbers.
Disabling may give a performance gain, but may result to problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
lu : array, shape (N, N)
Matrix containing U in its upper triangle, and L in its lower triangle.
The unit diagonal elements of L are not stored.
piv : array, shape (N,)
Pivot indices representing the permutation matrix P:
row i of matrix was interchanged with row piv[i].
See also
--------
lu_solve : solve an equation system using the LU factorization of a matrix
Notes
-----
This is a wrapper to the ``*GETRF`` routines from LAPACK.
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or (_datacopied(a1, a))
getrf, = get_lapack_funcs(('getrf',), (a1,))
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of '
'internal getrf (lu_factor)' % -info)
if info > 0:
warn("Diagonal number %d is exactly zero. Singular matrix." % info,
RuntimeWarning)
return lu, piv
def lu_solve(lu_and_piv, b, trans=0, overwrite_b=False, check_finite=True):
"""Solve an equation system, a x = b, given the LU factorization of a
Parameters
----------
(lu, piv)
Factorization of the coefficient matrix a, as given by lu_factor
b : array
Right-hand side
trans : {0, 1, 2}
Type of system to solve:
===== =========
trans system
===== =========
0 a x = b
1 a^T x = b
2 a^H x = b
===== =========
check_finite : boolean, optional
Whether to check the input matrixes contain only finite numbers.
Disabling may give a performance gain, but may result to problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Solution to the system
See also
--------
lu_factor : LU factorize a matrix
"""
(lu, piv) = lu_and_piv
if check_finite:
b1 = asarray_chkfinite(b)
else:
b1 = asarray(b)
overwrite_b = overwrite_b or _datacopied(b1, b)
if lu.shape[0] != b1.shape[0]:
raise ValueError("incompatible dimensions.")
getrs, = get_lapack_funcs(('getrs',), (lu, b1))
x,info = getrs(lu, piv, b1, trans=trans, overwrite_b=overwrite_b)
if info == 0:
return x
raise ValueError('illegal value in %d-th argument of internal gesv|posv'
% -info)
def lu(a, permute_l=False, overwrite_a=False, check_finite=True):
"""Compute pivoted LU decompostion of a matrix.
The decomposition is::
A = P L U
where P is a permutation matrix, L lower triangular with unit
diagonal elements, and U upper triangular.
Parameters
----------
a : array, shape (M, N)
Array to decompose
permute_l : boolean
Perform the multiplication P*L (Default: do not permute)
overwrite_a : boolean
Whether to overwrite data in a (may improve performance)
check_finite : boolean, optional
Whether to check the input matrixes contain only finite numbers.
Disabling may give a performance gain, but may result to problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
(If permute_l == False)
p : array, shape (M, M)
Permutation matrix
l : array, shape (M, K)
Lower triangular or trapezoidal matrix with unit diagonal.
K = min(M, N)
u : array, shape (K, N)
Upper triangular or trapezoidal matrix
(If permute_l == True)
pl : array, shape (M, K)
Permuted L matrix.
K = min(M, N)
u : array, shape (K, N)
Upper triangular or trapezoidal matrix
Notes
-----
This is a LU factorization routine written for Scipy.
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
overwrite_a = overwrite_a or (_datacopied(a1, a))
flu, = get_flinalg_funcs(('lu',), (a1,))
p, l, u, info = flu(a1, permute_l=permute_l, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of '
'internal lu.getrf' % -info)
if permute_l:
return l, u
return p, l, u | 0.955194 | 0.710797 |
from collections import namedtuple, Counter
class Colors:
"""
Used as an enum to hold possible color values.
"""
def __init__(self):
pass
red, orange, blue, yellow, green, pink, black, white, none = range(9)
colors_list = ['Red', 'Orange', 'Blue', 'Yellow', 'Green', 'Pink', 'Black', 'White']
@staticmethod
def str(color):
return Colors.colors_list[color] if len(Colors.colors_list) > color else 'None'
@staticmethod
def str_card(color):
return Colors.colors_list[color] if len(Colors.colors_list) > color else 'Wild'
class FailureCause:
def __init__(self):
pass
none, no_route, wrong_turn, missing_cards, incompatible_cards, already_drew, invalid_card_index, \
insufficient_cars, game_over, deck_out_of_cards, no_action, incorrect_destinations, already_claimed_opponent, \
already_claimed_self, not_enough_destinations, wrong_destination_card = range(16)
cause_list = ['None', 'No Route', "Wrong Turn", "Missing Cards", "Incompatible Cards", "Already Drew",
"Invalid Card Index", "Insufficient Cars", "Game Over", "Deck out of Cards", "No Action",
"Incorrect Destinations", "Edge Claimed by Opponent", "Edge Claimed by Self",
"Not Enough Destination Cards", "Incorrect "]
@staticmethod
def str(failure_cause):
return FailureCause.cause_list[failure_cause] if len(FailureCause.cause_list) > failure_cause else "Unknown"
class Edge(namedtuple("Edge", "city1 city2 cost color")):
def __new__(cls, city1, city2, cost, color):
return tuple.__new__(cls, (city1, city2, cost, color))
def __init__(self, city1, city2, cost, color):
super(Edge, self).__init__()
def other_city(self, city):
if city == self.city1:
return self.city2
if city == self.city2:
return self.city1
return None
def contains_city(self, city):
return self.city1 == city or self.city2 == city
def __str__(self):
return "(%s, %s, %s, %s)" % (str(self.city1), str(self.city2), str(self.cost), Colors.str(self.color))
def __repr__(self):
return str(self)
class Destination(namedtuple("Destination", "city1 city2 value")):
def __new__(cls, city1, city2, value):
return tuple.__new__(cls, (city1, city2, value))
def __init__(self, city1, city2, value):
super(Destination, self).__init__()
def __str__(self):
return "(%s, %s, %s)" % (str(self.city1), str(self.city2), str(self.value))
class Hand:
def __init__(self, cards):
self.cards = Counter(cards)
def add_card(self, card):
self.cards[card] += 1
def remove_card(self, card):
self.cards[card] = max(self.cards[card] - 1, 0)
def contains_cards(self, cards):
for card in cards:
if self.cards[card] - cards[card] < 0:
return False
return True
def __str__(self):
return Hand.cards_str(self.cards)
@staticmethod
def cards_str(cards):
return "(%s)" % ", ".join(map(Colors.str_card, [card for card in cards.elements()]))
class PlayerInfo:
def __init__(self):
self.score = 0
self.destinations = None
self.completed_destinations = []
self.hand = None
self.num_cars = 0
# self.route_score=0
# self.ticket_score=0
self.draws = 0
self.connects = 0
def __str__(self):
return "{\n\tPrivate Score: %s\n" \
"\tHand: %s\n" \
"\tCars Remaining: %s\n" \
"\tDestinations: [%s]\n" \
"\tCompleted Destinations: [%s]}" % (str(self.score), str(self.hand), str(self.num_cars),
", ".join(map(str, self.destinations)),
", ".join(map(str, self.completed_destinations)))
def set_num_cars(self, num_cars):
self.num_cars = num_cars
def set_score(self, score):
self.score = score
def set_destinations(self, destinations):
self.destinations = destinations
def set_hand(self, hand):
self.hand = hand
def get_destination_points(self):
points = 0
for dest in self.completed_destinations:
points = dest.value + points
return points
def get_destination_deductions(self):
points = 0
for dest in self.destinations:
points = dest.value + points
return points
def get_route_points(self):
t_points = self.get_destination_points()
m_points = self.get_destination_deductions()
return self.score - t_points + m_points
def note_draw(self):
self.draws = self.draws + 1
def note_connect(self):
self.connects = self.connects + 1
class Path:
def __init__(self, edges, scoring, player=None, edge_claims=None):
self.edges = edges
self.cost = 0
self.score = 0
for edge in edges:
# If the player owns the edge, then the there's no cost or score to the edge.
if player is None or edge_claims is None or edge_claims[edge] != player.name:
self.cost += edge.cost
self.score += scoring[edge.cost]
def add_edge(self, edge, scoring, player=None, edge_claims=None):
self.edges.add(edge)
# If the player owns the edge, then the there's no cost or score to the edge.
if player is None or edge_claims is None or edge_claims[edge] != player.name:
self.cost += edge.cost
self.score += scoring[edge.cost]
@staticmethod
def default_sort_method(path):
return path.cost
def __repr__(self):
return "( Cost: %s, Score: %s, [%s])" % (str(self.cost), str(self.score),
", ".join(["(%s, %s, %s, %s)" % (edge.city1,
edge.city2,
Colors.str(edge.color),
edge.cost) for edge in self.edges]))
class HistoryEvent:
def __init__(self, player_name, action):
self.player_name = player_name
self.action = action
def __str__(self):
return "%s: %s" % (self.player_name, str(self.action)) | game/classes.py | from collections import namedtuple, Counter
class Colors:
"""
Used as an enum to hold possible color values.
"""
def __init__(self):
pass
red, orange, blue, yellow, green, pink, black, white, none = range(9)
colors_list = ['Red', 'Orange', 'Blue', 'Yellow', 'Green', 'Pink', 'Black', 'White']
@staticmethod
def str(color):
return Colors.colors_list[color] if len(Colors.colors_list) > color else 'None'
@staticmethod
def str_card(color):
return Colors.colors_list[color] if len(Colors.colors_list) > color else 'Wild'
class FailureCause:
def __init__(self):
pass
none, no_route, wrong_turn, missing_cards, incompatible_cards, already_drew, invalid_card_index, \
insufficient_cars, game_over, deck_out_of_cards, no_action, incorrect_destinations, already_claimed_opponent, \
already_claimed_self, not_enough_destinations, wrong_destination_card = range(16)
cause_list = ['None', 'No Route', "Wrong Turn", "Missing Cards", "Incompatible Cards", "Already Drew",
"Invalid Card Index", "Insufficient Cars", "Game Over", "Deck out of Cards", "No Action",
"Incorrect Destinations", "Edge Claimed by Opponent", "Edge Claimed by Self",
"Not Enough Destination Cards", "Incorrect "]
@staticmethod
def str(failure_cause):
return FailureCause.cause_list[failure_cause] if len(FailureCause.cause_list) > failure_cause else "Unknown"
class Edge(namedtuple("Edge", "city1 city2 cost color")):
def __new__(cls, city1, city2, cost, color):
return tuple.__new__(cls, (city1, city2, cost, color))
def __init__(self, city1, city2, cost, color):
super(Edge, self).__init__()
def other_city(self, city):
if city == self.city1:
return self.city2
if city == self.city2:
return self.city1
return None
def contains_city(self, city):
return self.city1 == city or self.city2 == city
def __str__(self):
return "(%s, %s, %s, %s)" % (str(self.city1), str(self.city2), str(self.cost), Colors.str(self.color))
def __repr__(self):
return str(self)
class Destination(namedtuple("Destination", "city1 city2 value")):
def __new__(cls, city1, city2, value):
return tuple.__new__(cls, (city1, city2, value))
def __init__(self, city1, city2, value):
super(Destination, self).__init__()
def __str__(self):
return "(%s, %s, %s)" % (str(self.city1), str(self.city2), str(self.value))
class Hand:
def __init__(self, cards):
self.cards = Counter(cards)
def add_card(self, card):
self.cards[card] += 1
def remove_card(self, card):
self.cards[card] = max(self.cards[card] - 1, 0)
def contains_cards(self, cards):
for card in cards:
if self.cards[card] - cards[card] < 0:
return False
return True
def __str__(self):
return Hand.cards_str(self.cards)
@staticmethod
def cards_str(cards):
return "(%s)" % ", ".join(map(Colors.str_card, [card for card in cards.elements()]))
class PlayerInfo:
def __init__(self):
self.score = 0
self.destinations = None
self.completed_destinations = []
self.hand = None
self.num_cars = 0
# self.route_score=0
# self.ticket_score=0
self.draws = 0
self.connects = 0
def __str__(self):
return "{\n\tPrivate Score: %s\n" \
"\tHand: %s\n" \
"\tCars Remaining: %s\n" \
"\tDestinations: [%s]\n" \
"\tCompleted Destinations: [%s]}" % (str(self.score), str(self.hand), str(self.num_cars),
", ".join(map(str, self.destinations)),
", ".join(map(str, self.completed_destinations)))
def set_num_cars(self, num_cars):
self.num_cars = num_cars
def set_score(self, score):
self.score = score
def set_destinations(self, destinations):
self.destinations = destinations
def set_hand(self, hand):
self.hand = hand
def get_destination_points(self):
points = 0
for dest in self.completed_destinations:
points = dest.value + points
return points
def get_destination_deductions(self):
points = 0
for dest in self.destinations:
points = dest.value + points
return points
def get_route_points(self):
t_points = self.get_destination_points()
m_points = self.get_destination_deductions()
return self.score - t_points + m_points
def note_draw(self):
self.draws = self.draws + 1
def note_connect(self):
self.connects = self.connects + 1
class Path:
def __init__(self, edges, scoring, player=None, edge_claims=None):
self.edges = edges
self.cost = 0
self.score = 0
for edge in edges:
# If the player owns the edge, then the there's no cost or score to the edge.
if player is None or edge_claims is None or edge_claims[edge] != player.name:
self.cost += edge.cost
self.score += scoring[edge.cost]
def add_edge(self, edge, scoring, player=None, edge_claims=None):
self.edges.add(edge)
# If the player owns the edge, then the there's no cost or score to the edge.
if player is None or edge_claims is None or edge_claims[edge] != player.name:
self.cost += edge.cost
self.score += scoring[edge.cost]
@staticmethod
def default_sort_method(path):
return path.cost
def __repr__(self):
return "( Cost: %s, Score: %s, [%s])" % (str(self.cost), str(self.score),
", ".join(["(%s, %s, %s, %s)" % (edge.city1,
edge.city2,
Colors.str(edge.color),
edge.cost) for edge in self.edges]))
class HistoryEvent:
def __init__(self, player_name, action):
self.player_name = player_name
self.action = action
def __str__(self):
return "%s: %s" % (self.player_name, str(self.action)) | 0.86431 | 0.285142 |
from typing import Optional
from fedot.core.operations.model import Model
from fedot.core.pipelines.node import PrimaryNode
from fedot.core.pipelines.pipeline import Pipeline, nodes_with_operation
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.repository.operation_types_repository import OperationTypesRepository, get_ts_operations
from fedot.core.repository.tasks import Task
ERROR_PREFIX = 'Invalid pipeline configuration:'
def has_correct_operation_positions(pipeline: 'Pipeline', task: Optional[Task] = None):
is_root_satisfy_task_type = True
if task:
is_root_satisfy_task_type = task.task_type in pipeline.root_node.operation.acceptable_task_types
if not is_root_satisfy_task_type:
raise ValueError(f'{ERROR_PREFIX} Pipeline has incorrect operations positions')
return True
def has_primary_nodes(pipeline: 'Pipeline'):
if not any(node for node in pipeline.nodes if isinstance(node, PrimaryNode)):
raise ValueError(f'{ERROR_PREFIX} Pipeline does not have primary nodes')
return True
def has_final_operation_as_model(pipeline: 'Pipeline'):
""" Check if the operation in root node is model or not """
root_node = pipeline.root_node
if type(root_node.operation) is not Model:
raise ValueError(f'{ERROR_PREFIX} Root operation is not a model')
return True
def has_no_conflicts_with_data_flow(pipeline: 'Pipeline'):
""" Check if the pipeline contains incorrect connections between nodes """
operation_repo = OperationTypesRepository(repository_name='data_operation_repository.json')
forbidden_parents_combination, _ = operation_repo.suitable_operation()
forbidden_parents_combination = set(forbidden_parents_combination)
for node in pipeline.nodes:
parent_nodes = node.nodes_from
if parent_nodes is not None and len(parent_nodes) > 1:
# There are several parents
operation_names = []
for parent in parent_nodes:
operation_names.append(parent.operation.operation_type)
# If operations are identical
if len(set(operation_names)) == 1:
# And if it is forbidden to combine them
if operation_names[0] in forbidden_parents_combination:
raise ValueError(f'{ERROR_PREFIX} Pipeline has incorrect subgraph with identical data operations')
return True
def has_correct_data_connections(pipeline: 'Pipeline'):
""" Check if the pipeline contains incorrect connections between operation for different data types """
operation_repo = OperationTypesRepository(repository_name='data_operation_repository.json')
models_repo = OperationTypesRepository(repository_name='model_repository.json')
for node in pipeline.nodes:
parent_nodes = node.nodes_from
if parent_nodes is not None and len(parent_nodes) > 0:
for parent_node in parent_nodes:
current_nodes_supported_data_types = \
get_supported_data_types(node, operation_repo, models_repo)
parent_node_supported_data_types = \
get_supported_data_types(parent_node, operation_repo, models_repo)
node_dtypes = set(current_nodes_supported_data_types.input_types)
parent_dtypes = set(parent_node_supported_data_types.output_types)
if len(set.intersection(node_dtypes, parent_dtypes)) == 0:
raise ValueError(f'{ERROR_PREFIX} Pipeline has incorrect data connections')
return True
def get_supported_data_types(node, operation_repo, models_repo):
supported_data_types = operation_repo.operation_info_by_id(node.operation.operation_type)
if supported_data_types is None:
supported_data_types = models_repo.operation_info_by_id(node.operation.operation_type)
return supported_data_types
def is_pipeline_contains_ts_operations(pipeline: 'Pipeline'):
""" Function checks is the model contains operations for time series
forecasting """
# Get time series specific operations with tag "ts_specific"
ts_operations = get_ts_operations(tags=["ts_specific"], mode='all')
# List with operations in considering pipeline
operations_in_pipeline = []
for node in pipeline.nodes:
operations_in_pipeline.append(node.operation.operation_type)
if len(set(ts_operations) & set(operations_in_pipeline)) > 0:
return True
else:
return False
def has_no_data_flow_conflicts_in_ts_pipeline(pipeline: 'Pipeline'):
""" Function checks the correctness of connection between nodes """
if not is_pipeline_contains_ts_operations(pipeline):
return True
models = get_ts_operations(mode='models')
# Preprocessing not only for time series
non_ts_data_operations = get_ts_operations(mode='data_operations',
forbidden_tags=["ts_specific"])
ts_data_operations = get_ts_operations(mode='data_operations',
tags=["ts_specific"])
# Remove lagged transformation
ts_data_operations.remove('lagged')
ts_data_operations.remove('exog_ts_data_source')
# Dictionary as {'current operation in the node': 'parent operations list'}
# TODO refactor
wrong_connections = {'lagged': models + non_ts_data_operations + ['lagged'],
'ar': models + non_ts_data_operations + ['lagged'],
'arima': models + non_ts_data_operations + ['lagged'],
'ridge': ts_data_operations, 'linear': ts_data_operations,
'lasso': ts_data_operations, 'dtreg': ts_data_operations,
'knnreg': ts_data_operations, 'scaling': ts_data_operations,
'xgbreg': ts_data_operations, 'adareg': ts_data_operations,
'gbr': ts_data_operations, 'treg': ts_data_operations,
'rfr': ts_data_operations, 'svr': ts_data_operations,
'sgdr': ts_data_operations, 'normalization': ts_data_operations,
'simple_imputation': ts_data_operations, 'pca': ts_data_operations,
'kernel_pca': ts_data_operations, 'poly_features': ts_data_operations,
'ransac_lin_reg': ts_data_operations, 'ransac_non_lin_reg': ts_data_operations,
'rfe_lin_reg': ts_data_operations, 'rfe_non_lin_reg': ts_data_operations}
for node in pipeline.nodes:
# Operation name in the current node
current_operation = node.operation.operation_type
parent_nodes = node.nodes_from
if parent_nodes is not None:
# There are several parents for current node or at least 1
for parent in parent_nodes:
parent_operation = parent.operation.operation_type
forbidden_parents = wrong_connections.get(current_operation)
if forbidden_parents is not None:
__check_connection(parent_operation, forbidden_parents)
return True
def only_ts_specific_operations_are_primary(pipeline: 'Pipeline'):
""" Only time series specific operations could be placed in primary nodes """
if not is_pipeline_contains_ts_operations(pipeline):
return True
# Check only primary nodes
for node in pipeline.nodes:
if type(node) == PrimaryNode and DataTypesEnum.ts not in node.operation.metadata.input_types:
raise ValueError(
f'{ERROR_PREFIX} Pipeline for forecasting has not ts_specific preprocessing in primary nodes')
return True
def has_no_conflicts_in_decompose(pipeline: Pipeline):
""" The function checks whether the 'class_decompose' or 'decompose'
operation has two ancestors
"""
for decomposer in ['decompose', 'class_decompose']:
decompose_nodes = nodes_with_operation(pipeline,
decomposer)
if len(decompose_nodes) != 0:
# Launch check decomposers
__check_decomposer_has_two_parents(nodes_to_check=decompose_nodes)
__check_decompose_parent_position(nodes_to_check=decompose_nodes)
return True
def __check_connection(parent_operation, forbidden_parents):
if parent_operation in forbidden_parents:
raise ValueError(f'{ERROR_PREFIX} Pipeline has incorrect subgraph with wrong parent nodes combination')
def __check_decompose_parent_position(nodes_to_check: list):
""" Function check if the data flow before decompose operation is correct
or not
:param nodes_to_check: list with decompose nodes in the pipeline
"""
for decompose_node in nodes_to_check:
parents = decompose_node.nodes_from
model_parent = parents[0]
if type(model_parent.operation) is not Model:
raise ValueError(f'{ERROR_PREFIX} For decompose operation Model as first parent is required')
def __check_decomposer_has_two_parents(nodes_to_check: list):
""" Function check if there are two parent nodes for decompose operation
:param nodes_to_check: list with decompose nodes in the pipeline
"""
for decompose_node in nodes_to_check:
parents = decompose_node.nodes_from
if len(parents) != 2:
raise ValueError(f'{ERROR_PREFIX} Two parents for decompose node were'
f' expected, but {len(parents)} were given') | fedot/core/pipelines/validation_rules.py | from typing import Optional
from fedot.core.operations.model import Model
from fedot.core.pipelines.node import PrimaryNode
from fedot.core.pipelines.pipeline import Pipeline, nodes_with_operation
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.repository.operation_types_repository import OperationTypesRepository, get_ts_operations
from fedot.core.repository.tasks import Task
ERROR_PREFIX = 'Invalid pipeline configuration:'
def has_correct_operation_positions(pipeline: 'Pipeline', task: Optional[Task] = None):
is_root_satisfy_task_type = True
if task:
is_root_satisfy_task_type = task.task_type in pipeline.root_node.operation.acceptable_task_types
if not is_root_satisfy_task_type:
raise ValueError(f'{ERROR_PREFIX} Pipeline has incorrect operations positions')
return True
def has_primary_nodes(pipeline: 'Pipeline'):
if not any(node for node in pipeline.nodes if isinstance(node, PrimaryNode)):
raise ValueError(f'{ERROR_PREFIX} Pipeline does not have primary nodes')
return True
def has_final_operation_as_model(pipeline: 'Pipeline'):
""" Check if the operation in root node is model or not """
root_node = pipeline.root_node
if type(root_node.operation) is not Model:
raise ValueError(f'{ERROR_PREFIX} Root operation is not a model')
return True
def has_no_conflicts_with_data_flow(pipeline: 'Pipeline'):
""" Check if the pipeline contains incorrect connections between nodes """
operation_repo = OperationTypesRepository(repository_name='data_operation_repository.json')
forbidden_parents_combination, _ = operation_repo.suitable_operation()
forbidden_parents_combination = set(forbidden_parents_combination)
for node in pipeline.nodes:
parent_nodes = node.nodes_from
if parent_nodes is not None and len(parent_nodes) > 1:
# There are several parents
operation_names = []
for parent in parent_nodes:
operation_names.append(parent.operation.operation_type)
# If operations are identical
if len(set(operation_names)) == 1:
# And if it is forbidden to combine them
if operation_names[0] in forbidden_parents_combination:
raise ValueError(f'{ERROR_PREFIX} Pipeline has incorrect subgraph with identical data operations')
return True
def has_correct_data_connections(pipeline: 'Pipeline'):
""" Check if the pipeline contains incorrect connections between operation for different data types """
operation_repo = OperationTypesRepository(repository_name='data_operation_repository.json')
models_repo = OperationTypesRepository(repository_name='model_repository.json')
for node in pipeline.nodes:
parent_nodes = node.nodes_from
if parent_nodes is not None and len(parent_nodes) > 0:
for parent_node in parent_nodes:
current_nodes_supported_data_types = \
get_supported_data_types(node, operation_repo, models_repo)
parent_node_supported_data_types = \
get_supported_data_types(parent_node, operation_repo, models_repo)
node_dtypes = set(current_nodes_supported_data_types.input_types)
parent_dtypes = set(parent_node_supported_data_types.output_types)
if len(set.intersection(node_dtypes, parent_dtypes)) == 0:
raise ValueError(f'{ERROR_PREFIX} Pipeline has incorrect data connections')
return True
def get_supported_data_types(node, operation_repo, models_repo):
supported_data_types = operation_repo.operation_info_by_id(node.operation.operation_type)
if supported_data_types is None:
supported_data_types = models_repo.operation_info_by_id(node.operation.operation_type)
return supported_data_types
def is_pipeline_contains_ts_operations(pipeline: 'Pipeline'):
""" Function checks is the model contains operations for time series
forecasting """
# Get time series specific operations with tag "ts_specific"
ts_operations = get_ts_operations(tags=["ts_specific"], mode='all')
# List with operations in considering pipeline
operations_in_pipeline = []
for node in pipeline.nodes:
operations_in_pipeline.append(node.operation.operation_type)
if len(set(ts_operations) & set(operations_in_pipeline)) > 0:
return True
else:
return False
def has_no_data_flow_conflicts_in_ts_pipeline(pipeline: 'Pipeline'):
""" Function checks the correctness of connection between nodes """
if not is_pipeline_contains_ts_operations(pipeline):
return True
models = get_ts_operations(mode='models')
# Preprocessing not only for time series
non_ts_data_operations = get_ts_operations(mode='data_operations',
forbidden_tags=["ts_specific"])
ts_data_operations = get_ts_operations(mode='data_operations',
tags=["ts_specific"])
# Remove lagged transformation
ts_data_operations.remove('lagged')
ts_data_operations.remove('exog_ts_data_source')
# Dictionary as {'current operation in the node': 'parent operations list'}
# TODO refactor
wrong_connections = {'lagged': models + non_ts_data_operations + ['lagged'],
'ar': models + non_ts_data_operations + ['lagged'],
'arima': models + non_ts_data_operations + ['lagged'],
'ridge': ts_data_operations, 'linear': ts_data_operations,
'lasso': ts_data_operations, 'dtreg': ts_data_operations,
'knnreg': ts_data_operations, 'scaling': ts_data_operations,
'xgbreg': ts_data_operations, 'adareg': ts_data_operations,
'gbr': ts_data_operations, 'treg': ts_data_operations,
'rfr': ts_data_operations, 'svr': ts_data_operations,
'sgdr': ts_data_operations, 'normalization': ts_data_operations,
'simple_imputation': ts_data_operations, 'pca': ts_data_operations,
'kernel_pca': ts_data_operations, 'poly_features': ts_data_operations,
'ransac_lin_reg': ts_data_operations, 'ransac_non_lin_reg': ts_data_operations,
'rfe_lin_reg': ts_data_operations, 'rfe_non_lin_reg': ts_data_operations}
for node in pipeline.nodes:
# Operation name in the current node
current_operation = node.operation.operation_type
parent_nodes = node.nodes_from
if parent_nodes is not None:
# There are several parents for current node or at least 1
for parent in parent_nodes:
parent_operation = parent.operation.operation_type
forbidden_parents = wrong_connections.get(current_operation)
if forbidden_parents is not None:
__check_connection(parent_operation, forbidden_parents)
return True
def only_ts_specific_operations_are_primary(pipeline: 'Pipeline'):
""" Only time series specific operations could be placed in primary nodes """
if not is_pipeline_contains_ts_operations(pipeline):
return True
# Check only primary nodes
for node in pipeline.nodes:
if type(node) == PrimaryNode and DataTypesEnum.ts not in node.operation.metadata.input_types:
raise ValueError(
f'{ERROR_PREFIX} Pipeline for forecasting has not ts_specific preprocessing in primary nodes')
return True
def has_no_conflicts_in_decompose(pipeline: Pipeline):
""" The function checks whether the 'class_decompose' or 'decompose'
operation has two ancestors
"""
for decomposer in ['decompose', 'class_decompose']:
decompose_nodes = nodes_with_operation(pipeline,
decomposer)
if len(decompose_nodes) != 0:
# Launch check decomposers
__check_decomposer_has_two_parents(nodes_to_check=decompose_nodes)
__check_decompose_parent_position(nodes_to_check=decompose_nodes)
return True
def __check_connection(parent_operation, forbidden_parents):
if parent_operation in forbidden_parents:
raise ValueError(f'{ERROR_PREFIX} Pipeline has incorrect subgraph with wrong parent nodes combination')
def __check_decompose_parent_position(nodes_to_check: list):
""" Function check if the data flow before decompose operation is correct
or not
:param nodes_to_check: list with decompose nodes in the pipeline
"""
for decompose_node in nodes_to_check:
parents = decompose_node.nodes_from
model_parent = parents[0]
if type(model_parent.operation) is not Model:
raise ValueError(f'{ERROR_PREFIX} For decompose operation Model as first parent is required')
def __check_decomposer_has_two_parents(nodes_to_check: list):
""" Function check if there are two parent nodes for decompose operation
:param nodes_to_check: list with decompose nodes in the pipeline
"""
for decompose_node in nodes_to_check:
parents = decompose_node.nodes_from
if len(parents) != 2:
raise ValueError(f'{ERROR_PREFIX} Two parents for decompose node were'
f' expected, but {len(parents)} were given') | 0.717408 | 0.244093 |
import discord
import os
from discord.ext import commands
from discord.utils import get
import youtube_dl
from youtube_search import YoutubeSearch
import validators
class MusicCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, aliases=["J", "jo"])
async def join(self, ctx):
"""Have the bot join the voice channel to which you're currently connected"""
global voice
channel = ctx.message.author.voice.channel
voice = get(self.bot.voice_clients, guild=ctx.guild)
if voice and voice.is_connected():
await voice.move_to(channel)
else:
voice = await channel.connect()
# Work around for currently existing issue wherein
# music will not play upon first connection
await voice.disconnect()
if voice and voice.is_connected():
await voice.move_to(channel)
else:
voice = await channel.connect()
@commands.command(pass_context=True, aliases=["L", "le"])
async def leave(self, ctx):
"""Have the bot leave the voice channel to which it's currently connected"""
channel = ctx.message.author.voice.channel
voice = get(self.bot.voice_clients, guild=ctx.guild)
if voice and voice.is_connected():
await voice.disconnect()
else:
await ctx.send("Error: no voice channel to leave")
@commands.command(pass_context=True, aliases=["p", "pl"])
async def play(self, ctx, url: str):
"""Have the bot play the audio from a provided youtube link.
If no valid url is provided, the bot will interpret the input
as a search term and find the first result for the given input.
If a search term contains more than one word, make sure to wrap
the search term in quotes.
Usage: `!play <youtube_link | search_term>`
"""
if not validators.url(url):
url = (
"https://youtube.com"
+ YoutubeSearch(url, max_results=1).videos[0]["url_suffix"]
)
song_there = os.path.isfile("./data/song.mp3")
try:
if song_there:
os.remove("./data/song.mp3")
except PermissionError:
print(
"Error: unable to remove old song file. Do you have permission to do this?"
)
await ctx.send("Permission error. Please contact an administrator.")
return
voice = get(self.bot.voice_clients, guild=ctx.guild)
ydl_opts = {
"format": "bestaudio/best",
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "192",
},
],
"outtmpl": "./data/%(title)s.%(ext)s",
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
for file in os.listdir("./data/"):
if file.endswith("mp3"):
name = " ".join(file.split(".")[:-1])
await self.bot.change_presence(
activity=discord.Activity(
name=name, type=discord.ActivityType.listening
)
)
os.rename(f"./data/{file}", "song.mp3")
voice.play(discord.FFmpegPCMAudio("song.mp3"))
voice.source = discord.PCMVolumeTransformer(voice.source)
voice.source.volume = 0.07
await ctx.send(f"Playing: {name}")
@commands.command(pass_context=True, aliases=["pa", "pau"])
async def pause(self, ctx):
"""Pause currently playing music"""
voice = get(self.bot.voice_clients, guild=ctx.guild)
if voice and voice.is_playing():
voice.pause()
await ctx.send("Music Paused")
else:
await ctx.send("Music is not playing, pause failure")
@commands.command(pass_context=True, aliases=["re", "resum"])
async def resume(self, ctx):
"""Resume playback of music that has been paused"""
voice = get(self.bot.voice_clients, guild=ctx.guild)
if voice and voice.is_paused():
voice.resume()
await ctx.send("Music Resumed")
else:
await ctx.send("Music is not playing, resume failure")
@commands.command(pass_context=True, aliases=["st", "stp"])
async def stop(self, ctx):
"""Stop the playback of music that is currently playing"""
voice = get(self.bot.voice_clients, guild=ctx.guild)
if voice and voice.is_playing():
voice.stop()
await ctx.send("Music Stopped")
else:
await ctx.send("Music is not playing, stop failure")
def setup(bot):
bot.add_cog(MusicCog(bot)) | cogs/musiccog.py | import discord
import os
from discord.ext import commands
from discord.utils import get
import youtube_dl
from youtube_search import YoutubeSearch
import validators
class MusicCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, aliases=["J", "jo"])
async def join(self, ctx):
"""Have the bot join the voice channel to which you're currently connected"""
global voice
channel = ctx.message.author.voice.channel
voice = get(self.bot.voice_clients, guild=ctx.guild)
if voice and voice.is_connected():
await voice.move_to(channel)
else:
voice = await channel.connect()
# Work around for currently existing issue wherein
# music will not play upon first connection
await voice.disconnect()
if voice and voice.is_connected():
await voice.move_to(channel)
else:
voice = await channel.connect()
@commands.command(pass_context=True, aliases=["L", "le"])
async def leave(self, ctx):
"""Have the bot leave the voice channel to which it's currently connected"""
channel = ctx.message.author.voice.channel
voice = get(self.bot.voice_clients, guild=ctx.guild)
if voice and voice.is_connected():
await voice.disconnect()
else:
await ctx.send("Error: no voice channel to leave")
@commands.command(pass_context=True, aliases=["p", "pl"])
async def play(self, ctx, url: str):
"""Have the bot play the audio from a provided youtube link.
If no valid url is provided, the bot will interpret the input
as a search term and find the first result for the given input.
If a search term contains more than one word, make sure to wrap
the search term in quotes.
Usage: `!play <youtube_link | search_term>`
"""
if not validators.url(url):
url = (
"https://youtube.com"
+ YoutubeSearch(url, max_results=1).videos[0]["url_suffix"]
)
song_there = os.path.isfile("./data/song.mp3")
try:
if song_there:
os.remove("./data/song.mp3")
except PermissionError:
print(
"Error: unable to remove old song file. Do you have permission to do this?"
)
await ctx.send("Permission error. Please contact an administrator.")
return
voice = get(self.bot.voice_clients, guild=ctx.guild)
ydl_opts = {
"format": "bestaudio/best",
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "192",
},
],
"outtmpl": "./data/%(title)s.%(ext)s",
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
for file in os.listdir("./data/"):
if file.endswith("mp3"):
name = " ".join(file.split(".")[:-1])
await self.bot.change_presence(
activity=discord.Activity(
name=name, type=discord.ActivityType.listening
)
)
os.rename(f"./data/{file}", "song.mp3")
voice.play(discord.FFmpegPCMAudio("song.mp3"))
voice.source = discord.PCMVolumeTransformer(voice.source)
voice.source.volume = 0.07
await ctx.send(f"Playing: {name}")
@commands.command(pass_context=True, aliases=["pa", "pau"])
async def pause(self, ctx):
"""Pause currently playing music"""
voice = get(self.bot.voice_clients, guild=ctx.guild)
if voice and voice.is_playing():
voice.pause()
await ctx.send("Music Paused")
else:
await ctx.send("Music is not playing, pause failure")
@commands.command(pass_context=True, aliases=["re", "resum"])
async def resume(self, ctx):
"""Resume playback of music that has been paused"""
voice = get(self.bot.voice_clients, guild=ctx.guild)
if voice and voice.is_paused():
voice.resume()
await ctx.send("Music Resumed")
else:
await ctx.send("Music is not playing, resume failure")
@commands.command(pass_context=True, aliases=["st", "stp"])
async def stop(self, ctx):
"""Stop the playback of music that is currently playing"""
voice = get(self.bot.voice_clients, guild=ctx.guild)
if voice and voice.is_playing():
voice.stop()
await ctx.send("Music Stopped")
else:
await ctx.send("Music is not playing, stop failure")
def setup(bot):
bot.add_cog(MusicCog(bot)) | 0.345326 | 0.122261 |
class Selector:
price_res = "/html/body/div/div/div[1]/div/div[5]/div[3]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[3]/div/span[1]/span//text()"
price_res2 = "//*[@id='page-block']/div/div[5]/div[4]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[3]/div/span/span//text()"
price_res1 = "//*[@id='page-block']/div/div[5]/div[4]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[3]/div/span/span/span//text()"
price_res3 = "//*[@id='page-block']/div/div[5]/div[4]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[3]/div/span/span//text()"
price_res4 = "//*[@id='page-block']/div/div[5]/div[3]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[3]/div/span/span//text()"
price_res5 = '//*[@id="page-block"]/div/div[5]/div[4]/div[3]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[3]/div/span[1]/span//text()'
price_res6 = '//*[@id="page-block"]/div/div[5]/div[3]/div[3]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[3]/div/span/span//text()'
nal_res = "//*[@id='page-block']/div/div[5]/div[3]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[2]/span/span//text()"
nal_res2 = "//*[@id='page-block']/div/div[5]/div[4]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[2]/span/span//text()"
nal_res1 = "//*[@id='page-block']/div/div[5]/div[4]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[2]/span/span//text()"
nal_res3 = '//*[@id="page-block"]/div/div[5]/div[4]/div[3]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[2]/span/span//text()'
nal_res4 = '//*[@id="page-block"]/div/div[5]/div[3]/div[3]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[2]/span/span//text()'
head_res = "//*[@id='page-block']/div/div[5]/div[3]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[1]/div[2]/span//text()"
head_res1 = '//*[@id="page-block"]/div/div[5]/div[4]/div[3]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[1]/div[2]/span//text()'
head_res4 = '//*[@id="page-block"]/div/div[5]/div[4]/div[3]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[1]/div/span//text()'
head_res2 = "//*[@id='page-block']/div/div[5]/div[4]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[1]/div/span//text()"
head_res3 = "//*[@id='page-block']/div/div[5]/div[3]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[1]/div/span//text()"
head_res5 = '//*[@id="page-block"]/div/div[5]/div[3]/div[3]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[1]/div/span//text()'
pass | promoprice/prom/moduls/constant.py |
class Selector:
price_res = "/html/body/div/div/div[1]/div/div[5]/div[3]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[3]/div/span[1]/span//text()"
price_res2 = "//*[@id='page-block']/div/div[5]/div[4]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[3]/div/span/span//text()"
price_res1 = "//*[@id='page-block']/div/div[5]/div[4]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[3]/div/span/span/span//text()"
price_res3 = "//*[@id='page-block']/div/div[5]/div[4]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[3]/div/span/span//text()"
price_res4 = "//*[@id='page-block']/div/div[5]/div[3]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[3]/div/span/span//text()"
price_res5 = '//*[@id="page-block"]/div/div[5]/div[4]/div[3]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[3]/div/span[1]/span//text()'
price_res6 = '//*[@id="page-block"]/div/div[5]/div[3]/div[3]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[3]/div/span/span//text()'
nal_res = "//*[@id='page-block']/div/div[5]/div[3]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[2]/span/span//text()"
nal_res2 = "//*[@id='page-block']/div/div[5]/div[4]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[2]/span/span//text()"
nal_res1 = "//*[@id='page-block']/div/div[5]/div[4]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[2]/span/span//text()"
nal_res3 = '//*[@id="page-block"]/div/div[5]/div[4]/div[3]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[2]/span/span//text()'
nal_res4 = '//*[@id="page-block"]/div/div[5]/div[3]/div[3]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[2]/span/span//text()'
head_res = "//*[@id='page-block']/div/div[5]/div[3]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[1]/div[2]/span//text()"
head_res1 = '//*[@id="page-block"]/div/div[5]/div[4]/div[3]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[1]/div[2]/span//text()'
head_res4 = '//*[@id="page-block"]/div/div[5]/div[4]/div[3]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[1]/div/span//text()'
head_res2 = "//*[@id='page-block']/div/div[5]/div[4]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[1]/div/span//text()"
head_res3 = "//*[@id='page-block']/div/div[5]/div[3]/div[2]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[1]/div/span//text()"
head_res5 = '//*[@id="page-block"]/div/div[5]/div[3]/div[3]/div/div[2]/div[1]/div[1]/div/div[{0}]/div/div[1]/div/span//text()'
pass | 0.108981 | 0.124559 |
import json
import logging
import inspect
from .decorators import pipeline_functions, register_pipeline
from indra.statements import get_statement_by_name, Statement
logger = logging.getLogger(__name__)
class AssemblyPipeline():
"""An assembly pipeline that runs the specified steps on a given set of
statements.
Ways to initialize and run the pipeline (examples assume you have a list
of INDRA Statements stored in the `stmts` variable.)
>>> from indra.statements import *
>>> map2k1 = Agent('MAP2K1', db_refs={'HGNC': '6840'})
>>> mapk1 = Agent('MAPK1', db_refs={'HGNC': '6871'})
>>> braf = Agent('BRAF')
>>> stmts = [Phosphorylation(map2k1, mapk1, 'T', '185'),
... Phosphorylation(braf, map2k1)]
1) Provide a JSON file containing the steps, then use the classmethod
`from_json_file`, and run it with the `run` method on a list of statements.
This option allows storing pipeline versions in a separate file and
reproducing the same results. All functions referenced in the JSON file
have to be registered with the @register_pipeline decorator.
>>> import os
>>> path_this = os.path.dirname(os.path.abspath(__file__))
>>> filename = os.path.abspath(
... os.path.join(path_this, '..', 'tests', 'pipeline_test.json'))
>>> ap = AssemblyPipeline.from_json_file(filename)
>>> assembled_stmts = ap.run(stmts)
2) Initialize a pipeline with a list of steps and run it with the `run`
method on a list of statements. All functions referenced in steps have to
be registered with the @register_pipeline decorator.
>>> steps = [
... {"function": "filter_no_hypothesis"},
... {"function": "filter_grounded_only",
... "kwargs": {"score_threshold": 0.8}}
... ]
>>> ap = AssemblyPipeline(steps)
>>> assembled_stmts = ap.run(stmts)
3) Initialize an empty pipeline and append/insert the steps one by one.
Provide a function and its args and kwargs. For arguments that
require calling a different function, use the RunnableArgument class. All
functions referenced here have to be either imported and passed as function
objects or registered with the @register_pipeline decorator and passed as
function names (strings). The pipeline built this way can be optionally
saved into a JSON file.
>>> from indra.tools.assemble_corpus import *
>>> from indra.ontology.world import load_world_ontology
>>> from indra.belief.wm_scorer import get_eidos_scorer
>>> ap = AssemblyPipeline()
>>> ap.append(filter_no_hypothesis)
>>> ap.append(filter_grounded_only)
>>> ap.append(run_preassembly,
... belief_scorer=RunnableArgument(get_eidos_scorer),
... ontology=RunnableArgument(load_world_ontology))
>>> assembled_stmts = ap.run(stmts)
>>> ap.to_json_file('filename.json')
Parameters
----------
steps : list[dict]
A list of dictionaries representing steps in the pipeline. Each step
should have a 'function' key and, if appropriate, 'args' and 'kwargs'
keys. Arguments can be simple values (strings, integers, booleans,
lists, etc.) or can be functions themselves. In case an argument is a
function or a result of another function, it should also be
represented as a dictionary of a similar structure. If a function
itself is an argument (and not its result), the dictionary should
contain a key-value pair {'no_run': True}. If an argument is a type
of a statement, it should be represented as a dictionary {'stmt_type':
<name of a statement type>}.
"""
def __init__(self, steps=None):
# This import is here to avoid circular imports
# It is enough to import one function to get all registered functions
from indra.tools.assemble_corpus import filter_grounded_only
from indra.ontology.world import load_world_ontology
from indra.ontology.bio import bio_ontology
from indra.preassembler.grounding_mapper.gilda import ground_statements
from indra.belief.wm_scorer import get_eidos_scorer
from indra.preassembler.custom_preassembly import location_matches
self.steps = steps if steps else []
@classmethod
def from_json_file(cls, filename):
"""Create an instance of AssemblyPipeline from a JSON file with
steps."""
with open(filename, 'r') as f:
steps = json.load(f)
ap = AssemblyPipeline(steps)
return ap
def to_json_file(self, filename):
"""Save AssemblyPipeline to a JSON file."""
with open(filename, 'w') as f:
json.dump(self.steps, f, indent=1)
def run(self, statements, **kwargs):
"""Run all steps of the pipeline.
Parameters
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements to run the pipeline on.
**kwargs : kwargs
It is recommended to define all arguments for the steps functions
in the steps definition, but it is also possible to provide some
external objects (if it is not possible to provide them as a step
argument) as kwargs to the entire pipeline here. One should be
cautious to avoid kwargs name clashes between multiple functions
(this value will be provided to all functions that expect an
argument with the same name). To overwrite this value in other
functions, provide it explicitly in the corresponding steps kwargs.
Returns
-------
list[indra.statements.Statement]
The list of INDRA Statements resulting from running the pipeline
on the list of input Statements.
"""
logger.info('Running the pipeline')
for step in self.steps:
statements = self.run_function(step, statements, **kwargs)
return statements
def append(self, func, *args, **kwargs):
"""Append a step to the end of the pipeline.
Args and kwargs here can be of any type. All functions referenced here
have to be either imported and passed as function objects or
registered with @register_pipeline decorator and passed as function
names (strings). For arguments that require calling a different
function, use RunnableArgument class.
Parameters
----------
func : str or function
A function or the string name of a function to add to the pipeline.
args : args
Args that are passed to func when calling it.
kwargs : kwargs
Kwargs that are passed to func when calling it.
"""
if inspect.isfunction(func):
func_name = func.__name__
if func_name not in pipeline_functions:
register_pipeline(func)
elif isinstance(func, str):
func_name = func
else:
raise TypeError('Should be a function object or a string')
new_step = self.create_new_step(func_name, *args, **kwargs)
self.steps.append(new_step)
def insert(self, ix, func, *args, **kwargs):
"""Insert a step to any position in the pipeline.
Args and kwargs here can be of any type. All functions referenced here
have to be either imported and passed as function objects or
registered with @register_pipeline decorator and passed as function
names (strings). For arguments that require calling a different
function, use RunnableArgument class.
Parameters
----------
func : str or function
A function or the string name of a function to add to the pipeline.
args : args
Args that are passed to func when calling it.
kwargs : kwargs
Kwargs that are passed to func when calling it.
"""
if inspect.isfunction(func):
func_name = func.__name__
if func_name not in pipeline_functions:
register_pipeline(func)
elif isinstance(func, str):
func_name = func
else:
raise TypeError('Should be a function object or a string')
new_step = self.create_new_step(func_name, *args, **kwargs)
self.steps.insert(ix, new_step)
def create_new_step(self, func_name, *args, **kwargs):
"""Create a dictionary representing a new step in the pipeline.
Parameters
----------
func_name : str
The string name of a function to create as a step.
args : args
Args that are passed to the function when calling it.
kwargs : kwargs
Kwargs that are passed to the function when calling it.
Returns
-------
dict
A dict structure representing a step in the pipeline.
"""
assert self.get_function_from_name(func_name)
new_step = {'function': func_name}
if args:
new_step['args'] = [jsonify_arg_input(arg) for arg in args]
if kwargs:
new_step['kwargs'] = {
k: jsonify_arg_input(v) for (k, v) in kwargs.items()}
return new_step
@staticmethod
def get_function_parameters(func_dict):
"""Retrieve a function name and arguments from function dictionary.
Parameters
----------
func_dict : dict
A dict structure representing a function and its args and kwargs.
Returns
-------
tuple of str, list and dict
A tuple with the following elements: the name of the function,
the args of the function, and the kwargs of the function.
"""
func_name = func_dict['function']
args = func_dict.get('args', [])
kwargs = func_dict.get('kwargs', {})
return func_name, args, kwargs
@staticmethod
def get_function_from_name(name):
"""Return a function object by name if available or raise exception.
Parameters
----------
name : str
The name of the function.
Returns
-------
function
The function that was found based on its name. If not found,
a NotRegisteredFunctionError is raised.
"""
if name in pipeline_functions:
return pipeline_functions[name]
raise NotRegisteredFunctionError('%s is not registered' % name)
@staticmethod
def run_simple_function(func, *args, **kwargs):
"""Run a simple function and return the result.
Simple here means a function all arguments of which are simple values
(do not require extra function calls).
Parameters
----------
func : function
The function to call.
args : args
Args that are passed to the function when calling it.
kwargs : kwargs
Kwargs that are passed to the function when calling it.
Returns
-------
object
Any value that the given function returns.
"""
statements = kwargs.pop('statements', None)
if statements is not None:
return func(statements, *args, **kwargs)
return func(*args, **kwargs)
def run_function(self, func_dict, statements=None, **kwargs):
"""Run a given function and return the results.
For each of the arguments, if it requires an extra
function call, recursively call the functions until we get a simple
function.
Parameters
----------
func_dict : dict
A dict representing the function to call, its args and kwargs.
args : args
Args that are passed to the function when calling it.
kwargs : kwargs
Kwargs that are passed to the function when calling it.
Returns
-------
object
Any value that the given function returns.
"""
func_name, func_args, func_kwargs = self.get_function_parameters(
func_dict)
func = self.get_function_from_name(func_name)
logger.info('Calling %s' % func_name)
new_args = []
new_kwargs = {}
for arg in func_args:
arg_value = self.get_argument_value(arg)
new_args.append(arg_value)
for k, v in func_kwargs.items():
kwarg_value = self.get_argument_value(v)
new_kwargs[k] = kwarg_value
if statements is not None:
new_kwargs['statements'] = statements
if kwargs:
for k, v in kwargs.items():
if k not in new_kwargs and k in inspect.getargspec(func).args:
new_kwargs[k] = v
return self.run_simple_function(func, *new_args, **new_kwargs)
@staticmethod
def is_function(argument, keyword='function'):
"""Check if an argument should be converted to a specific object type,
e.g. a function or a statement type.
Parameters
----------
argument : dict or other object
The argument is a dict, its keyword entry is checked, and if it is
there, we return True, otherwise we return False.
keyword : Optional[str]
The keyword to check if it's there if the argument is a dict.
Default: function
"""
if not isinstance(argument, dict):
return False
if keyword not in argument:
return False
return True
def get_argument_value(self, arg_json):
"""Get a value of an argument from its json version."""
if self.is_function(arg_json, 'function'):
# Argument is a function
if arg_json.get('no_run', False):
value = self.get_function_from_name(arg_json['function'])
# Argument is a result of a function
else:
value = self.run_function(arg_json)
# Argument is a statement type
elif self.is_function(arg_json, 'stmt_type'):
value = get_statement_by_name(arg_json.get('stmt_type'))
# Argument is a simple value (str, int, boolean, etc.)
else:
value = arg_json
return value
def __len__(self):
return len(self.steps)
def __iter__(self):
return iter(self.steps)
class NotRegisteredFunctionError(Exception):
pass
class RunnableArgument():
"""Class representing arguments generated by calling a function.
RunnableArguments should be used as args or kwargs in AssemblyPipeline
`append` and `insert` methods.
Parameters
----------
func : str or function
A function or a name of a function to be called to generate argument
value.
"""
def __init__(self, func, *args, **kwargs):
if inspect.isfunction(func):
self.func_name = func.__name__
if self.func_name not in pipeline_functions:
register_pipeline(func)
elif isinstance(func, str):
self.func_name = func
else:
raise TypeError('Should be a function object or a string')
self.args = args
self.kwargs = kwargs
def to_json(self):
"""Jsonify to standard AssemblyPipeline step format."""
json_dict = {'function': self.func_name}
new_args = []
new_kwargs = {}
for arg in self.args:
new_args.append(jsonify_arg_input(arg))
for k, v in self.kwargs.items():
new_kwargs[k] = jsonify_arg_input(v)
if new_args:
json_dict['args'] = new_args
if new_kwargs:
json_dict['kwargs'] = new_kwargs
return json_dict
def jsonify_arg_input(arg):
"""Jsonify user input (in AssemblyPipeline `append` and `insert` methods)
into a standard step json."""
if isinstance(arg, RunnableArgument):
return arg.to_json()
# If a function object or name of a function is provided, we assume it
# does not have to be run (function itself is argument).
if inspect.isfunction(arg):
func_name = arg.__name__
if func_name not in pipeline_functions:
register_pipeline(arg)
return {'function': func_name, 'no_run': True}
if isinstance(arg, str) and arg in pipeline_functions:
return {'function': arg, 'no_run': True}
# For some functions Statement type has to be argument
if inspect.isclass(arg) and issubclass(arg, Statement):
return {'stmt_type': arg.__name__}
# Argument is a simple value and can be stored as provided
return arg | indra/pipeline/pipeline.py | import json
import logging
import inspect
from .decorators import pipeline_functions, register_pipeline
from indra.statements import get_statement_by_name, Statement
logger = logging.getLogger(__name__)
class AssemblyPipeline():
"""An assembly pipeline that runs the specified steps on a given set of
statements.
Ways to initialize and run the pipeline (examples assume you have a list
of INDRA Statements stored in the `stmts` variable.)
>>> from indra.statements import *
>>> map2k1 = Agent('MAP2K1', db_refs={'HGNC': '6840'})
>>> mapk1 = Agent('MAPK1', db_refs={'HGNC': '6871'})
>>> braf = Agent('BRAF')
>>> stmts = [Phosphorylation(map2k1, mapk1, 'T', '185'),
... Phosphorylation(braf, map2k1)]
1) Provide a JSON file containing the steps, then use the classmethod
`from_json_file`, and run it with the `run` method on a list of statements.
This option allows storing pipeline versions in a separate file and
reproducing the same results. All functions referenced in the JSON file
have to be registered with the @register_pipeline decorator.
>>> import os
>>> path_this = os.path.dirname(os.path.abspath(__file__))
>>> filename = os.path.abspath(
... os.path.join(path_this, '..', 'tests', 'pipeline_test.json'))
>>> ap = AssemblyPipeline.from_json_file(filename)
>>> assembled_stmts = ap.run(stmts)
2) Initialize a pipeline with a list of steps and run it with the `run`
method on a list of statements. All functions referenced in steps have to
be registered with the @register_pipeline decorator.
>>> steps = [
... {"function": "filter_no_hypothesis"},
... {"function": "filter_grounded_only",
... "kwargs": {"score_threshold": 0.8}}
... ]
>>> ap = AssemblyPipeline(steps)
>>> assembled_stmts = ap.run(stmts)
3) Initialize an empty pipeline and append/insert the steps one by one.
Provide a function and its args and kwargs. For arguments that
require calling a different function, use the RunnableArgument class. All
functions referenced here have to be either imported and passed as function
objects or registered with the @register_pipeline decorator and passed as
function names (strings). The pipeline built this way can be optionally
saved into a JSON file.
>>> from indra.tools.assemble_corpus import *
>>> from indra.ontology.world import load_world_ontology
>>> from indra.belief.wm_scorer import get_eidos_scorer
>>> ap = AssemblyPipeline()
>>> ap.append(filter_no_hypothesis)
>>> ap.append(filter_grounded_only)
>>> ap.append(run_preassembly,
... belief_scorer=RunnableArgument(get_eidos_scorer),
... ontology=RunnableArgument(load_world_ontology))
>>> assembled_stmts = ap.run(stmts)
>>> ap.to_json_file('filename.json')
Parameters
----------
steps : list[dict]
A list of dictionaries representing steps in the pipeline. Each step
should have a 'function' key and, if appropriate, 'args' and 'kwargs'
keys. Arguments can be simple values (strings, integers, booleans,
lists, etc.) or can be functions themselves. In case an argument is a
function or a result of another function, it should also be
represented as a dictionary of a similar structure. If a function
itself is an argument (and not its result), the dictionary should
contain a key-value pair {'no_run': True}. If an argument is a type
of a statement, it should be represented as a dictionary {'stmt_type':
<name of a statement type>}.
"""
def __init__(self, steps=None):
# This import is here to avoid circular imports
# It is enough to import one function to get all registered functions
from indra.tools.assemble_corpus import filter_grounded_only
from indra.ontology.world import load_world_ontology
from indra.ontology.bio import bio_ontology
from indra.preassembler.grounding_mapper.gilda import ground_statements
from indra.belief.wm_scorer import get_eidos_scorer
from indra.preassembler.custom_preassembly import location_matches
self.steps = steps if steps else []
@classmethod
def from_json_file(cls, filename):
"""Create an instance of AssemblyPipeline from a JSON file with
steps."""
with open(filename, 'r') as f:
steps = json.load(f)
ap = AssemblyPipeline(steps)
return ap
def to_json_file(self, filename):
"""Save AssemblyPipeline to a JSON file."""
with open(filename, 'w') as f:
json.dump(self.steps, f, indent=1)
def run(self, statements, **kwargs):
"""Run all steps of the pipeline.
Parameters
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements to run the pipeline on.
**kwargs : kwargs
It is recommended to define all arguments for the steps functions
in the steps definition, but it is also possible to provide some
external objects (if it is not possible to provide them as a step
argument) as kwargs to the entire pipeline here. One should be
cautious to avoid kwargs name clashes between multiple functions
(this value will be provided to all functions that expect an
argument with the same name). To overwrite this value in other
functions, provide it explicitly in the corresponding steps kwargs.
Returns
-------
list[indra.statements.Statement]
The list of INDRA Statements resulting from running the pipeline
on the list of input Statements.
"""
logger.info('Running the pipeline')
for step in self.steps:
statements = self.run_function(step, statements, **kwargs)
return statements
def append(self, func, *args, **kwargs):
"""Append a step to the end of the pipeline.
Args and kwargs here can be of any type. All functions referenced here
have to be either imported and passed as function objects or
registered with @register_pipeline decorator and passed as function
names (strings). For arguments that require calling a different
function, use RunnableArgument class.
Parameters
----------
func : str or function
A function or the string name of a function to add to the pipeline.
args : args
Args that are passed to func when calling it.
kwargs : kwargs
Kwargs that are passed to func when calling it.
"""
if inspect.isfunction(func):
func_name = func.__name__
if func_name not in pipeline_functions:
register_pipeline(func)
elif isinstance(func, str):
func_name = func
else:
raise TypeError('Should be a function object or a string')
new_step = self.create_new_step(func_name, *args, **kwargs)
self.steps.append(new_step)
def insert(self, ix, func, *args, **kwargs):
"""Insert a step to any position in the pipeline.
Args and kwargs here can be of any type. All functions referenced here
have to be either imported and passed as function objects or
registered with @register_pipeline decorator and passed as function
names (strings). For arguments that require calling a different
function, use RunnableArgument class.
Parameters
----------
func : str or function
A function or the string name of a function to add to the pipeline.
args : args
Args that are passed to func when calling it.
kwargs : kwargs
Kwargs that are passed to func when calling it.
"""
if inspect.isfunction(func):
func_name = func.__name__
if func_name not in pipeline_functions:
register_pipeline(func)
elif isinstance(func, str):
func_name = func
else:
raise TypeError('Should be a function object or a string')
new_step = self.create_new_step(func_name, *args, **kwargs)
self.steps.insert(ix, new_step)
def create_new_step(self, func_name, *args, **kwargs):
"""Create a dictionary representing a new step in the pipeline.
Parameters
----------
func_name : str
The string name of a function to create as a step.
args : args
Args that are passed to the function when calling it.
kwargs : kwargs
Kwargs that are passed to the function when calling it.
Returns
-------
dict
A dict structure representing a step in the pipeline.
"""
assert self.get_function_from_name(func_name)
new_step = {'function': func_name}
if args:
new_step['args'] = [jsonify_arg_input(arg) for arg in args]
if kwargs:
new_step['kwargs'] = {
k: jsonify_arg_input(v) for (k, v) in kwargs.items()}
return new_step
@staticmethod
def get_function_parameters(func_dict):
"""Retrieve a function name and arguments from function dictionary.
Parameters
----------
func_dict : dict
A dict structure representing a function and its args and kwargs.
Returns
-------
tuple of str, list and dict
A tuple with the following elements: the name of the function,
the args of the function, and the kwargs of the function.
"""
func_name = func_dict['function']
args = func_dict.get('args', [])
kwargs = func_dict.get('kwargs', {})
return func_name, args, kwargs
@staticmethod
def get_function_from_name(name):
"""Return a function object by name if available or raise exception.
Parameters
----------
name : str
The name of the function.
Returns
-------
function
The function that was found based on its name. If not found,
a NotRegisteredFunctionError is raised.
"""
if name in pipeline_functions:
return pipeline_functions[name]
raise NotRegisteredFunctionError('%s is not registered' % name)
@staticmethod
def run_simple_function(func, *args, **kwargs):
"""Run a simple function and return the result.
Simple here means a function all arguments of which are simple values
(do not require extra function calls).
Parameters
----------
func : function
The function to call.
args : args
Args that are passed to the function when calling it.
kwargs : kwargs
Kwargs that are passed to the function when calling it.
Returns
-------
object
Any value that the given function returns.
"""
statements = kwargs.pop('statements', None)
if statements is not None:
return func(statements, *args, **kwargs)
return func(*args, **kwargs)
def run_function(self, func_dict, statements=None, **kwargs):
"""Run a given function and return the results.
For each of the arguments, if it requires an extra
function call, recursively call the functions until we get a simple
function.
Parameters
----------
func_dict : dict
A dict representing the function to call, its args and kwargs.
args : args
Args that are passed to the function when calling it.
kwargs : kwargs
Kwargs that are passed to the function when calling it.
Returns
-------
object
Any value that the given function returns.
"""
func_name, func_args, func_kwargs = self.get_function_parameters(
func_dict)
func = self.get_function_from_name(func_name)
logger.info('Calling %s' % func_name)
new_args = []
new_kwargs = {}
for arg in func_args:
arg_value = self.get_argument_value(arg)
new_args.append(arg_value)
for k, v in func_kwargs.items():
kwarg_value = self.get_argument_value(v)
new_kwargs[k] = kwarg_value
if statements is not None:
new_kwargs['statements'] = statements
if kwargs:
for k, v in kwargs.items():
if k not in new_kwargs and k in inspect.getargspec(func).args:
new_kwargs[k] = v
return self.run_simple_function(func, *new_args, **new_kwargs)
@staticmethod
def is_function(argument, keyword='function'):
"""Check if an argument should be converted to a specific object type,
e.g. a function or a statement type.
Parameters
----------
argument : dict or other object
The argument is a dict, its keyword entry is checked, and if it is
there, we return True, otherwise we return False.
keyword : Optional[str]
The keyword to check if it's there if the argument is a dict.
Default: function
"""
if not isinstance(argument, dict):
return False
if keyword not in argument:
return False
return True
def get_argument_value(self, arg_json):
"""Get a value of an argument from its json version."""
if self.is_function(arg_json, 'function'):
# Argument is a function
if arg_json.get('no_run', False):
value = self.get_function_from_name(arg_json['function'])
# Argument is a result of a function
else:
value = self.run_function(arg_json)
# Argument is a statement type
elif self.is_function(arg_json, 'stmt_type'):
value = get_statement_by_name(arg_json.get('stmt_type'))
# Argument is a simple value (str, int, boolean, etc.)
else:
value = arg_json
return value
def __len__(self):
return len(self.steps)
def __iter__(self):
return iter(self.steps)
class NotRegisteredFunctionError(Exception):
pass
class RunnableArgument():
"""Class representing arguments generated by calling a function.
RunnableArguments should be used as args or kwargs in AssemblyPipeline
`append` and `insert` methods.
Parameters
----------
func : str or function
A function or a name of a function to be called to generate argument
value.
"""
def __init__(self, func, *args, **kwargs):
if inspect.isfunction(func):
self.func_name = func.__name__
if self.func_name not in pipeline_functions:
register_pipeline(func)
elif isinstance(func, str):
self.func_name = func
else:
raise TypeError('Should be a function object or a string')
self.args = args
self.kwargs = kwargs
def to_json(self):
"""Jsonify to standard AssemblyPipeline step format."""
json_dict = {'function': self.func_name}
new_args = []
new_kwargs = {}
for arg in self.args:
new_args.append(jsonify_arg_input(arg))
for k, v in self.kwargs.items():
new_kwargs[k] = jsonify_arg_input(v)
if new_args:
json_dict['args'] = new_args
if new_kwargs:
json_dict['kwargs'] = new_kwargs
return json_dict
def jsonify_arg_input(arg):
"""Jsonify user input (in AssemblyPipeline `append` and `insert` methods)
into a standard step json."""
if isinstance(arg, RunnableArgument):
return arg.to_json()
# If a function object or name of a function is provided, we assume it
# does not have to be run (function itself is argument).
if inspect.isfunction(arg):
func_name = arg.__name__
if func_name not in pipeline_functions:
register_pipeline(arg)
return {'function': func_name, 'no_run': True}
if isinstance(arg, str) and arg in pipeline_functions:
return {'function': arg, 'no_run': True}
# For some functions Statement type has to be argument
if inspect.isclass(arg) and issubclass(arg, Statement):
return {'stmt_type': arg.__name__}
# Argument is a simple value and can be stored as provided
return arg | 0.838515 | 0.473779 |
from toontown.toonbase import TTLocalizer
from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr', 'name': 'LevelMgr', 'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500.0,
'modelFilename': 'phase_9/models/cogHQ/SelbotLegFactory',
'wantDoors': 1},
1001: {'type': 'editMgr', 'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': None,
'requestNewEntity': None,
'requestSave': None},
0: {'type': 'zone', 'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
3: {'type': 'zone', 'name': 'Main Entrance',
'comment': '',
'parentEntId': 0,
'scale': Vec3(1, 1, 1),
'description': TTLocalizer.SellbotSwagFactorySpecMainEntrance,
'visibility': [
114]},
4: {'type': 'zone', 'name': 'Lobby',
'comment': '',
'parentEntId': 0,
'scale': Vec3(1, 1, 1),
'description': TTLocalizer.SellbotLegFactorySpecLobby,
'visibility': [
113, 114]},
5: {'type': 'zone', 'name': 'hallwayFromLobby',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': [
113, 116]},
6: {'type': 'zone', 'name': 'hallwayToBoiler/Control/Lookout',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecLobbyHallway,
'visibility': [
109,
116,
117,
118]},
7: {'type': 'zone', 'name': 'GearRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecGearRoom,
'visibility': [
109, 110]},
8: {'type': 'zone', 'name': 'BoilerRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecBoilerRoom,
'visibility': [
108, 117]},
9: {'type': 'zone', 'name': 'EastCatwalk',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecEastCatwalk,
'visibility': [
23,
25,
26,
33,
34,
35,
38,
41,
53,
110,
112,
115,
124,
200,
222]},
10: {'type': 'zone', 'name': 'PaintMixer',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecPaintMixer,
'visibility': [
11, 111, 112]},
11: {'type': 'zone', 'name': 'PaintMixerRewardRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecPaintMixerStorageRoom,
'visibility': [
10, 111, 112]},
12: {'type': 'zone', 'name': 'WestSiloCatwalk',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecWestSiloCatwalk,
'visibility': [
21,
26,
33,
34,
35,
36,
37,
40,
41,
53,
60,
61,
108,
110,
119,
120,
125,
127,
128,
129,
130,
200]},
13: {'type': 'zone', 'name': 'PipeRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecPipeRoom,
'visibility': [
119, 121]},
14: {'type': 'zone', 'name': 'StairsToPipeRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': [
17,
18,
121,
126,
131]},
15: {'type': 'zone', 'name': 'DuctRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecDuctRoom,
'visibility': [
106, 126]},
16: {'type': 'zone', 'name': 'Side Entrance',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecSideEntrance,
'visibility': [
106]},
17: {'type': 'zone', 'name': 'StomperAlley',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecStomperAlley,
'visibility': [
14,
121,
126,
131]},
18: {'type': 'zone', 'name': 'LavaRoomFoyer',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecLavaRoomFoyer,
'visibility': [
19,
20,
102,
103,
105,
131]},
19: {'type': 'zone', 'name': 'LavaRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecLavaRoom,
'visibility': [
17,
18,
20,
105,
131]},
20: {'type': 'zone', 'name': 'LavaRewardRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecLavaStorageRoom,
'visibility': [
18, 19, 105]},
21: {'type': 'zone', 'name': 'WestCatwalk',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecWestCatwalk,
'visibility': [
12,
23,
26,
33,
34,
35,
40,
41,
53,
60,
108,
119,
120,
125,
127,
200]},
22: {'type': 'zone', 'name': 'OilRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecOilRoom,
'visibility': [
107]},
23: {'type': 'zone', 'name': 'Lookout',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecLookout,
'visibility': [
24,
39,
115,
118,
120,
123,
124,
125]},
24: {'type': 'zone', 'name': 'Warehouse',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecWarehouse,
'visibility': [
23,
39,
115,
120,
123,
124,
125]},
25: {'type': 'zone', 'name': 'PaintMixerExterior',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
26: {'type': 'zone', 'name': 'WarehouseExterior',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
27: {'type': 'zone', 'name': 'OilRoomHallway',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecOilRoomHallway,
'visibility': [
105, 107, 127]},
30: {'type': 'zone', 'name': 'EastSiloControlRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecEastSiloControlRoom,
'visibility': [
130]},
31: {'type': 'zone', 'name': 'WestSiloControlRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecWestSiloControlRoom,
'visibility': [
128]},
32: {'type': 'zone', 'name': 'CenterSiloControlRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecCenterSiloControlRoom,
'visibility': [
129]},
33: {'type': 'zone', 'name': 'EastSilo',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecEastSilo,
'visibility': [
9,
12,
21,
25,
26,
34,
35,
36,
37,
38,
40,
41,
53,
60,
61,
108,
110,
112,
119,
124,
128,
129,
130,
200,
222]},
34: {'type': 'zone', 'name': 'WestSilo',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecWestSilo,
'visibility': [
9,
12,
21,
25,
26,
33,
35,
36,
37,
40,
41,
53,
60,
61,
108,
110,
112,
119,
120,
125,
127,
128,
129,
130,
200]},
35: {'type': 'zone', 'name': 'CenterSilo',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecCenterSilo,
'visibility': [
9,
21,
25,
26,
33,
34,
36,
37,
40,
41,
53,
60,
61,
108,
110,
112,
119,
128,
129,
130,
200]},
36: {'type': 'zone', 'name': 'WestSiloBridge',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': [
9,
12,
21,
25,
26,
33,
34,
35,
36,
37,
40,
41,
53,
60,
61,
108,
110,
112,
119,
127,
128,
129,
130,
200]},
37: {'type': 'zone', 'name': 'EastSiloBridge',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': [
9,
12,
21,
25,
26,
33,
34,
35,
36,
37,
38,
40,
41,
53,
60,
61,
108,
110,
112,
119,
128,
129,
130,
200,
222]},
38: {'type': 'zone', 'name': 'EastSiloCatwalk',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecEastSiloCatwalk,
'visibility': [
9,
25,
26,
33,
34,
35,
36,
37,
41,
53,
60,
110,
112,
115,
124,
200,
222]},
39: {'type': 'zone', 'name': 'WarehouseCeiling',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
40: {'type': 'zone', 'name': 'WestExterior',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
41: {'type': 'zone', 'name': 'EastExterior',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
53: {'type': 'zone', 'name': 'ExteriorFloor',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
60: {'type': 'zone', 'name': 'WestElevatorShaft',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecWestElevatorShaft,
'visibility': [
12, 34]},
61: {'type': 'zone', 'name': 'EastElevatorShaft',
'comment': 'no geom or DCS',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecEastElevatorShaft,
'visibility': [
33, 38]},
101: {'type': 'zone', 'name': 'dwToLavaRewardRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
102: {'type': 'zone', 'name': 'dwToLavaRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
103: {'type': 'zone', 'name': 'dwToLavaRoomHallway',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
105: {'type': 'zone', 'name': 'dwToOilRoomCatwalks',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
106: {'type': 'zone', 'name': 'dwToDuctRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
107: {'type': 'zone', 'name': 'dwToOilRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
108: {'type': 'zone', 'name': 'dwFromBoilerRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
109: {'type': 'zone', 'name': 'dwToGearRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
110: {'type': 'zone', 'name': 'dwFromGearRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
111: {'type': 'zone', 'name': 'dwToPaintMixerRewardRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
112: {'type': 'zone', 'name': 'dwToPaintMixer',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
113: {'type': 'zone', 'name': 'dwFromLobby',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
114: {'type': 'zone', 'name': 'dwToLobby',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
115: {'type': 'zone', 'name': 'dwToWarehouseFromRight',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
116: {'type': 'zone', 'name': 'dwFromLobbyFar',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
117: {'type': 'zone', 'name': 'dwToBoilerRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
118: {'type': 'zone', 'name': 'dwToLookout',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
119: {'type': 'zone', 'name': 'dwFromPipeRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
120: {'type': 'zone', 'name': 'dwToWarehouseFromLeft',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
121: {'type': 'zone', 'name': 'dwToPipeRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
122: {'type': 'zone', 'name': 'dwToWarehouseControlRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
123: {'type': 'zone', 'name': 'dwFromWarehouseFloor',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
124: {'type': 'zone', 'name': 'dwFromWarehouseRight',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
125: {'type': 'zone', 'name': 'dwFromWarehouseLeft',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
126: {'type': 'zone', 'name': 'dwFromDuctRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
127: {'type': 'zone', 'name': 'dwFromOilRoomHallway',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
128: {'type': 'zone', 'name': 'dwToWestSiloRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
129: {'type': 'zone', 'name': 'dwToCenterSiloRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
130: {'type': 'zone', 'name': 'dwToEastSiloRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
131: {'type': 'zone', 'name': 'dwFromStomperAlley',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
200: {'type': 'zone', 'name': 'sky',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
201: {'type': 'zone', 'name': 'extraZone201',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
202: {'type': 'zone', 'name': 'extraZone202',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
203: {'type': 'zone', 'name': 'extraZone203',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
204: {'type': 'zone', 'name': 'extraZone204',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
205: {'type': 'zone', 'name': 'extraZone205',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
206: {'type': 'zone', 'name': 'extraZone206',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
207: {'type': 'zone', 'name': 'extraZone207',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
208: {'type': 'zone', 'name': 'extraZone208',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
209: {'type': 'zone', 'name': 'extraZone209',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
210: {'type': 'zone', 'name': 'extraZone210',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
211: {'type': 'zone', 'name': 'extraZone211',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
212: {'type': 'zone', 'name': 'extraZone212',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
213: {'type': 'zone', 'name': 'extraZone213',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
214: {'type': 'zone', 'name': 'extraZone214',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
215: {'type': 'zone', 'name': 'extraZone215',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
216: {'type': 'zone', 'name': 'extraZone216',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
217: {'type': 'zone', 'name': 'extraZone217',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
218: {'type': 'zone', 'name': 'extraZone218',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
219: {'type': 'zone', 'name': 'extraZone219',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
220: {'type': 'zone', 'name': 'extraZone220',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
221: {'type': 'zone', 'name': 'extraZone221',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
222: {'type': 'zone', 'name': 'dwToEastSiloInterior',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
10010: {'type': 'ambientSound', 'name': 'westWind',
'comment': '',
'parentEntId': 35,
'pos': Point3(-52.7549, -38.8374, 53.3758),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'enabled': 1,
'soundPath': 'phase_9/audio/sfx/CHQ_FACT_whistling_wind.ogg',
'volume': 1},
10016: {'type': 'ambientSound', 'name': 'sndConveyorBelt',
'comment': '',
'parentEntId': 10056,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'enabled': 1,
'soundPath': 'phase_9/audio/sfx/CHQ_FACT_conveyor_belt.ogg',
'volume': 0.5},
10053: {'type': 'ambientSound', 'name': 'eastWind',
'comment': '',
'parentEntId': 35,
'pos': Point3(52.75, -38.84, 53.38),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'enabled': 1,
'soundPath': 'phase_9/audio/sfx/CHQ_FACT_whistling_wind.ogg',
'volume': 1},
10055: {'type': 'ambientSound', 'name': 'sndGears',
'comment': '',
'parentEntId': 10056,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'enabled': 1,
'soundPath': 'phase_9/audio/sfx/CHQ_FACT_gears_turning.ogg',
'volume': 1},
10031: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 8,
'pos': Point3(-1, 79, 10),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1.75, 1, 1),
'cellId': 1,
'radius': 10.0},
10035: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10039,
'pos': Point3(0, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 4,
'radius': 10.0},
10038: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 7,
'pos': Point3(0, -28.04, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 5,
'radius': 10.0},
20048: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0.973602, 71.7, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 0.2, 1),
'cellId': 0,
'radius': 15.0},
20063: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20033,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 8,
'radius': 1},
20064: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20034,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 8,
'radius': 1},
20065: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20035,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 8,
'radius': 1},
20066: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20036,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 8,
'radius': 1},
20086: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 15,
'pos': Point3(0, 33, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(2, 1, 1),
'cellId': 6,
'radius': 12.0},
20112: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 33,
'pos': Point3(-10.0936, -9.55975, 4),
'hpr': Point3(45, 0, 0),
'scale': Point3(10, 1, 5),
'cellId': 10,
'radius': 5.0},
20113: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 34,
'pos': Point3(9.08399, 4.42157, 0),
'hpr': Point3(-50, 0, 0),
'scale': Point3(10, 2, 6),
'cellId': 9,
'radius': 5.0},
20114: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60103,
'pos': Point3(0, 0, 1),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 0.5),
'cellId': 8,
'radius': 3.0},
10003: {'type': 'beanBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20,
'pos': Point3(1.25458, 19.2471, 0.0249529),
'hpr': Vec3(-8.28434, 0, 0),
'scale': 1,
'rewardPerGrab': 25,
'rewardPerGrabMax': 0},
10011: {'type': 'beanBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20,
'pos': Point3(16.344, -9.73, 0.025),
'hpr': Vec3(-79.8888, 0, 0),
'scale': 1,
'rewardPerGrab': 25,
'rewardPerGrabMax': 0},
20017: {'type': 'beanBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10022,
'pos': Point3(20.0035, 2.94232, 0),
'hpr': Vec3(-31.6033, 0, 0),
'scale': 1,
'rewardPerGrab': 35,
'rewardPerGrabMax': 0},
10039: {'type': 'button', 'name': '<unnamed>',
'comment': '',
'parentEntId': 22,
'pos': Point3(-7, 29, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(4, 4, 4),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
20033: {'type': 'button', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20022,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(3, 3, 3),
'color': Vec4(0.862745, 0.517647, 0.0941177, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1},
20034: {'type': 'button', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20022,
'pos': Point3(7.5, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(3, 3, 3),
'color': Vec4(0.862745, 0.517647, 0.0941177, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1},
20035: {'type': 'button', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20022,
'pos': Point3(15, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(3, 3, 3),
'color': Vec4(0.862745, 0.517647, 0.0941177, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1},
20036: {'type': 'button', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20022,
'pos': Point3(22.5, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(3, 3, 3),
'color': Vec4(0.862745, 0.517647, 0.0941177, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1},
30040: {'type': 'button', 'name': 'door button',
'comment': 'Entrance door unlock',
'parentEntId': 3,
'pos': Point3(0, 6.75, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(3, 3, 3),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
30076: {'type': 'button', 'name': 'open door 113',
'comment': 'Lobby door unlock',
'parentEntId': 4,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(3, 3, 3),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1},
60102: {'type': 'button', 'name': 'door button',
'comment': 'Entrance Door Unlock',
'parentEntId': 16,
'pos': Point3(4, 8, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(3, 3, 3),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
60103: {'type': 'button', 'name': 'door button',
'comment': '',
'parentEntId': 20022,
'pos': Point3(25, -7, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(4, 4, 4),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
60104: {'type': 'button', 'name': '<unnamed>',
'comment': '',
'parentEntId': 31,
'pos': Point3(0, 10, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(5, 5, 4),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
60105: {'type': 'button', 'name': 'door button',
'comment': '',
'parentEntId': 30,
'pos': Point3(-4, 7, 0),
'hpr': Point3(0, 0, 0),
'scale': Point3(5, 5, 4),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
60118: {'type': 'button', 'name': '<unnamed>',
'comment': '',
'parentEntId': 15,
'pos': Point3(0, 20, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(3, 3, 3),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
10005: {'type': 'conveyorBelt', 'name': 'belt',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0, 45.2024, 7.24937),
'hpr': Point3(180, 0, 0),
'scale': 1,
'floorName': 'platformcollision',
'length': 78.81881352704218,
'speed': 2.0,
'treadLength': 10.0,
'treadModelPath': 'phase_9/models/cogHQ/platform1',
'widthScale': 0.85},
20081: {'type': 'crate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20080,
'pos': Point3(0, 0, 0),
'scale': 0.920000016689,
'crushCellId': None,
'gridId': 20080,
'modelType': 0,
'pushable': 1},
20091: {'type': 'crate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20090,
'pos': Point3(0, 23, 0),
'scale': 0.920000016689,
'crushCellId': None,
'gridId': 20090,
'modelType': 0,
'pushable': 1},
20024: {'type': 'crusherCell', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20023,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'col': 1,
'gridId': 20025,
'row': 14},
20026: {'type': 'crusherCell', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20023,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'col': 10,
'gridId': 20025,
'row': 14},
20027: {'type': 'crusherCell', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 20023,
'pos': Point3(1, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'col': 21,
'gridId': 20025,
'row': 14},
20028: {'type': 'crusherCell', 'name': 'copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20023,
'pos': Point3(2, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'col': 28,
'gridId': 20025,
'row': 14},
30078: {'type': 'cutScene', 'name': 'button door',
'comment': '',
'parentEntId': 114,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'duration': 4.0,
'effect': 'irisInOut',
'motion': 'doorUnlock',
'startStopEvent': 30077},
10002: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 128,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
10052: {'type': 'door', 'name': 'door 127',
'comment': '',
'parentEntId': 127,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 10039,
'unlock2Event': 0,
'unlock3Event': 0},
30000: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 114,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 60132,
'unlock2Event': 0,
'unlock3Event': 0},
30001: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 105,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1.0,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30002: {'type': 'door', 'name': 'door 106',
'comment': '',
'parentEntId': 106,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 60132,
'unlock2Event': 0,
'unlock3Event': 0},
30003: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 107,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30004: {'type': 'door', 'name': 'doorFromBoilerRoom',
'comment': '',
'parentEntId': 108,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30005: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 109,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30006: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 110,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30008: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 112,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30009: {'type': 'door', 'name': 'door 113',
'comment': '',
'parentEntId': 113,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 60119,
'unlock2Event': 0,
'unlock3Event': 0},
30010: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 115,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30011: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 116,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30012: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 117,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30013: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 118,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30014: {'type': 'door', 'name': 'doorFromPipeRoom 119',
'comment': '',
'parentEntId': 119,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30015: {'type': 'door', 'name': 'door 120',
'comment': '',
'parentEntId': 120,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30016: {'type': 'door', 'name': 'door 121',
'comment': '',
'parentEntId': 121,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30017: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 122,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30018: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 123,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 0,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 60103,
'unlock2Event': 0,
'unlock3Event': 0},
30019: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 124,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30020: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 125,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30021: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 126,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 60119,
'unlock2Event': 0,
'unlock3Event': 0},
60088: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 131,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1.0,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
60094: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 129,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 0,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1.0,
'unlock0Event': 0,
'unlock1Event': 60104,
'unlock2Event': 60105,
'unlock3Event': 0},
60095: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 130,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
60101: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 222,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
10049: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 3},
10051: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 4},
60000: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 5},
60001: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 6},
60002: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 7},
60003: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 9},
60004: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 10},
60005: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 8},
60006: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 21},
60007: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 24},
60009: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 38},
60011: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 12},
60013: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 13},
60014: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 14},
60015: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 17},
60016: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 15},
60017: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 16},
60018: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 19},
60019: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 18},
60024: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 22},
60031: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 23},
60044: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 33},
60066: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 11},
60067: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 27},
60096: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 30},
60108: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 34},
60111: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 36},
60114: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 37},
60121: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 31},
60126: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 35},
60130: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 32},
10028: {'type': 'entrancePoint', 'name': 'entrance1',
'comment': '',
'parentEntId': 3,
'pos': Point3(0, 10, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'entranceId': 0,
'radius': 15,
'theta': 20},
10029: {'type': 'entrancePoint', 'name': 'entrance2',
'comment': '',
'parentEntId': 16,
'pos': Point3(0, 10, 0),
'hpr': Point3(0, 0, 0),
'scale': 1,
'entranceId': 1,
'radius': 15,
'theta': 20},
10021: {'type': 'gagBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10022,
'pos': Point3(-2.02081, 0, 0),
'hpr': Vec3(337.477, 0, 0),
'scale': 1,
'gagLevel': 2,
'gagLevelMax': 0,
'gagTrack': 0,
'rewardPerGrab': 3,
'rewardPerGrabMax': 0},
10024: {'type': 'gagBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10022,
'pos': Point3(20.3012, -26.3219, 0),
'hpr': Vec3(233.187, 0, 0),
'scale': 1,
'gagLevel': 4,
'gagLevelMax': 0,
'gagTrack': 4,
'rewardPerGrab': 4,
'rewardPerGrabMax': 0},
10025: {'type': 'gagBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10022,
'pos': Point3(-47.312, 7.22571, 0),
'hpr': Vec3(19.1524, 0, 0),
'scale': 1,
'gagLevel': 0,
'gagLevelMax': 0,
'gagTrack': 0,
'rewardPerGrab': 3,
'rewardPerGrabMax': 0},
10026: {'type': 'gagBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10022,
'pos': Point3(-11.2037, 5.43514, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'gagLevel': 4,
'gagLevelMax': 0,
'gagTrack': 5,
'rewardPerGrab': 4,
'rewardPerGrabMax': 0},
20020: {'type': 'gagBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20,
'pos': Point3(-23.0209, 0, 0),
'hpr': Vec3(126.676, 0, 0),
'scale': 1,
'gagLevel': 4,
'gagLevelMax': 0,
'gagTrack': 3,
'rewardPerGrab': 5,
'rewardPerGrabMax': 0},
20021: {'type': 'gagBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20,
'pos': Point3(-31.3225, 14.1021, 0),
'hpr': Vec3(-136.57, 0, 0),
'scale': 1,
'gagLevel': 4,
'gagLevelMax': 0,
'gagTrack': 5,
'rewardPerGrab': 5,
'rewardPerGrabMax': 0},
20085: {'type': 'gagBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 5,
'pos': Point3(3.14, 12.6703, 10.12),
'hpr': Vec3(-24.8105, 0, 0),
'scale': 1,
'gagLevel': 5,
'gagLevelMax': 0,
'gagTrack': 4,
'rewardPerGrab': 5,
'rewardPerGrabMax': 0},
20093: {'type': 'gagBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20087,
'pos': Point3(2.4, -1, 7),
'hpr': Vec3(-151.532, 0, 0),
'scale': 1,
'gagLevel': 0,
'gagLevelMax': 0,
'gagTrack': 0,
'rewardPerGrab': 5,
'rewardPerGrabMax': 0},
10006: {'type': 'gear', 'name': 'first',
'comment': '',
'parentEntId': 10004,
'pos': Point3(0, 0, 26.0634),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'degreesPerSec': 20.0,
'gearScale': 25.0,
'modelType': 'factory',
'orientation': 'vertical',
'phaseShift': 0},
10007: {'type': 'gear', 'name': 'second',
'comment': '',
'parentEntId': 10004,
'pos': Point3(0, 15, 26.06),
'hpr': Point3(180, 0, 0),
'scale': 1,
'degreesPerSec': 30.0,
'gearScale': 25.0,
'modelType': 'factory',
'orientation': 'vertical',
'phaseShift': 0},
10008: {'type': 'gear', 'name': 'third',
'comment': '',
'parentEntId': 10004,
'pos': Point3(0, 30, 26.06),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'degreesPerSec': 40.0,
'gearScale': 25.0,
'modelType': 'factory',
'orientation': 'vertical',
'phaseShift': 0},
10009: {'type': 'gear', 'name': 'fourth',
'comment': '',
'parentEntId': 10004,
'pos': Point3(0, 45, 26.06),
'hpr': Point3(180, 0, 0),
'scale': 1,
'degreesPerSec': 47.0,
'gearScale': 25.0,
'modelType': 'factory',
'orientation': 'vertical',
'phaseShift': 0},
20013: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20012,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.25,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 15,
'velocity': 4},
20014: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20010,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.25,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 15,
'velocity': 4},
20016: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20015,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.25,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 15,
'velocity': 4},
20041: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20040,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20026,
'goonType': 'pg',
'gridId': 20025,
'hFov': 80.0,
'strength': 20,
'velocity': 6.0},
20043: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20042,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20024,
'goonType': 'pg',
'gridId': 20025,
'hFov': 80.0,
'strength': 20,
'velocity': 5.0},
20046: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20044,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20024,
'goonType': 'pg',
'gridId': 20025,
'hFov': 70,
'strength': 20,
'velocity': 6.0},
20047: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20045,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20026,
'goonType': 'pg',
'gridId': 20025,
'hFov': 80.0,
'strength': 20,
'velocity': 6.0},
20052: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20051,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 12.0,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 7,
'velocity': 4.0},
20054: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20053,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20027,
'goonType': 'pg',
'gridId': 20025,
'hFov': 80.0,
'strength': 20,
'velocity': 5.5},
20056: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20055,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20028,
'goonType': 'pg',
'gridId': 20025,
'hFov': 70,
'strength': 20,
'velocity': 6.0},
20060: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20059,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20028,
'goonType': 'pg',
'gridId': 20025,
'hFov': 90.0,
'strength': 20,
'velocity': 6.5},
20062: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20061,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20027,
'goonType': 'pg',
'gridId': 20025,
'hFov': 70,
'strength': 20,
'velocity': 7.5},
20071: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20070,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 15,
'velocity': 6.0},
20072: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20069,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 80.0,
'strength': 15,
'velocity': 6.0},
20074: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20073,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.25,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 15,
'velocity': 4},
20089: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20084,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 15,
'velocity': 4},
20115: {'type': 'goonClipPlane', 'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, -7.4, 0),
'hpr': Point3(-90, 0, 0),
'scale': Point3(5, 5, 5),
'goonId': 20052},
20116: {'type': 'goonClipPlane', 'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, -58, 0),
'hpr': Point3(90, 0, 0),
'scale': 1,
'goonId': None},
20117: {'type': 'goonClipPlane', 'name': '<unnamed>',
'comment': '',
'parentEntId': 24,
'pos': Point3(0, -29, 0),
'hpr': Point3(90, 0, 0),
'scale': 1,
'goonId': None},
20118: {'type': 'goonClipPlane', 'name': '<unnamed>',
'comment': '',
'parentEntId': 24,
'pos': Point3(-52, 0, 5),
'hpr': Point3(0, 0, 0),
'scale': 1,
'goonId': None},
20025: {'type': 'grid', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20023,
'pos': Point3(-48.4442, -24.9385, 0),
'scale': 1,
'cellSize': 3,
'numCol': 30,
'numRow': 16},
20080: {'type': 'grid', 'name': '<unnamed>',
'comment': '',
'parentEntId': 5,
'pos': Point3(1.5, -10.7, 0),
'scale': 1,
'cellSize': 3,
'numCol': 2,
'numRow': 5},
20090: {'type': 'grid', 'name': '<unnamed>',
'comment': '',
'parentEntId': 17,
'pos': Point3(-6.5, -111, 0),
'scale': 1,
'cellSize': 3,
'numCol': 2,
'numRow': 9},
20011: {'type': 'healBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20,
'pos': Point3(-2.06235, 20.2198, 0.025),
'hpr': Vec3(-19.2153, 0, 0),
'scale': 1,
'rewardPerGrab': 10,
'rewardPerGrabMax': 0},
20092: {'type': 'healBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20087,
'pos': Point3(-1, -1.5, 7),
'hpr': Vec3(-191.79, 0, 0),
'scale': 1,
'rewardPerGrab': 5,
'rewardPerGrabMax': 0},
10041: {'type': 'lift', 'name': 'westLift',
'comment': '',
'parentEntId': 60,
'pos': Point3(0, 0, 0.0641994),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'autoMoveDelay': 5,
'duration': 7.0,
'endBoardSides': [
'back'],
'endGuardName': 'topGuard',
'endPos': Point3(0, 0, 165),
'floorName': 'elevator_floor',
'modelPath': 'phase_9/models/cogHQ/Elevator',
'modelScale': Vec3(1, 1, 1),
'moveDelay': 1,
'startBoardSides': [
'front'],
'startGuardName': 'bottomGuard',
'startPos': Point3(0, 0, 0)},
10048: {'type': 'lift', 'name': 'eastLift',
'comment': '',
'parentEntId': 61,
'pos': Point3(0, -0.684064, 0.589322),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'autoMoveDelay': 5.0,
'duration': 7.0,
'endBoardSides': [
'front',
'back',
'left',
'right'],
'endGuardName': 'topGuard',
'endPos': Point3(0, 0, 165),
'floorName': 'elevator_floor',
'modelPath': 'phase_9/models/cogHQ/Elevator',
'modelScale': Vec3(1, 1, 1),
'moveDelay': 1,
'startBoardSides': [
'front'],
'startGuardName': 'bottomGuard',
'startPos': Point3(0, 0, 0)},
10057: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10043,
'input1Event': 30009,
'input2Event': 30000,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
10059: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10058,
'input1Event': 10057,
'input2Event': 30011,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
10061: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10060,
'input1Event': 10059,
'input2Event': 30013,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
10063: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10062,
'input1Event': 60033,
'input2Event': 30009,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
30068: {'type': 'logicGate', 'name': 'door 116 and door 118',
'comment': '',
'parentEntId': 30069,
'input1Event': 30013,
'input2Event': 30011,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60023: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60021,
'input1Event': 30011,
'input2Event': 30009,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60025: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60022,
'input1Event': 60023,
'input2Event': 30013,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60028: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60026,
'input1Event': 30011,
'input2Event': 30005,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60029: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60027,
'input1Event': 30011,
'input2Event': 30012,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60030: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 30071,
'input1Event': 30011,
'input2Event': 30009,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60033: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 30073,
'input1Event': 30013,
'input2Event': 30011,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60034: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 30075,
'input1Event': 30013,
'input2Event': 30005,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60035: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60032,
'input1Event': 30013,
'input2Event': 30012,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60037: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60036,
'input1Event': 30005,
'input2Event': 30012,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60039: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60038,
'input1Event': 30012,
'input2Event': 30005,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60041: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60040,
'input1Event': 30020,
'input2Event': 30019,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60043: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60042,
'input1Event': 30019,
'input2Event': 30020,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60047: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60045,
'input1Event': 10002,
'input2Event': 30019,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60049: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60048,
'input1Event': 30003,
'input2Event': 10052,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60051: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60050,
'input1Event': 30001,
'input2Event': 10052,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60053: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60052,
'input1Event': 30021,
'input2Event': 30016,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60055: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60054,
'input1Event': 30002,
'input2Event': 30021,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60057: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60056,
'input1Event': 30016,
'input2Event': 30021,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60059: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60058,
'input1Event': 30012,
'input2Event': 30011,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60061: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60060,
'input1Event': 30012,
'input2Event': 30013,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60064: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60062,
'input1Event': 30005,
'input2Event': 30011,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60065: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60063,
'input1Event': 30005,
'input2Event': 30013,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60074: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60072,
'input1Event': 10052,
'input2Event': 30003,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60075: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60073,
'input1Event': 10052,
'input2Event': 30001,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60076: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60020,
'input1Event': 30021,
'input2Event': 30002,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60078: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60077,
'input1Event': 30021,
'input2Event': 30002,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60080: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60079,
'input1Event': 60057,
'input2Event': 30002,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60082: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60081,
'input1Event': 60055,
'input2Event': 30016,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60084: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60083,
'input1Event': 30004,
'input2Event': 30014,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60086: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60085,
'input1Event': 30006,
'input2Event': 30008,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60091: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60087,
'input1Event': 60088,
'input2Event': 30001,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60093: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60092,
'input1Event': 30001,
'input2Event': 60088,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60100: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60099,
'input1Event': 60095,
'input2Event': 10002,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60119: {'type': 'logicGate', 'name': 'open sesame Duct & Lobby',
'comment': 'links together the Duct Room and Lobby buttons',
'parentEntId': 0,
'input1Event': 30076,
'input2Event': 60118,
'isInput1': 0,
'isInput2': 0,
'logicType': 'or'},
60132: {'type': 'logicGate', 'name': 'open sesame Entrances',
'comment': 'links together the buttons in the two entrances',
'parentEntId': 0,
'input1Event': 30040,
'input2Event': 60102,
'isInput1': 0,
'isInput2': 0,
'logicType': 'or'},
60138: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60137,
'input1Event': 60095,
'input2Event': 60094,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60141: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60139,
'input1Event': 10002,
'input2Event': 60094,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60142: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60140,
'input1Event': 10002,
'input2Event': 60095,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
10001: {'type': 'model', 'name': 'dropshadow',
'comment': '',
'parentEntId': 10006,
'pos': Point3(0, 0, -25),
'hpr': Vec3(0, 0, 0),
'scale': Point3(2, 1.5, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_3/models/props/drop_shadow'},
10012: {'type': 'model', 'name': 'backCrate',
'comment': '',
'parentEntId': 10067,
'pos': Point3(0, -5.81496, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_9/models/cogHQ/metal_crateB'},
10033: {'type': 'model', 'name': 'dropshadow',
'comment': '',
'parentEntId': 10007,
'pos': Point3(0, 0, -25),
'hpr': Vec3(0, 0, 0),
'scale': Point3(2, 1.5, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_3/models/props/drop_shadow'},
10045: {'type': 'model', 'name': 'dropshadow',
'comment': '',
'parentEntId': 10008,
'pos': Point3(0, 0, -25),
'hpr': Vec3(0, 0, 0),
'scale': Point3(2, 1.5, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_3/models/props/drop_shadow'},
10046: {'type': 'model', 'name': 'dropshadow',
'comment': '',
'parentEntId': 10009,
'pos': Point3(0, 0, -25),
'hpr': Vec3(0, 0, 0),
'scale': Point3(2, 1.5, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_3/models/props/drop_shadow'},
10050: {'type': 'model', 'name': 'sky',
'comment': '',
'parentEntId': 200,
'pos': Point3(-142.02, 437.227, 0.922491),
'hpr': Point3(0, 0, 0),
'scale': Point3(2.5, 2.5, 2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_9/models/cogHQ/cog_sky'},
10066: {'type': 'model', 'name': 'frontCrate',
'comment': '',
'parentEntId': 10067,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_9/models/cogHQ/metal_crateB'},
10069: {'type': 'model', 'name': 'backCrate',
'comment': '',
'parentEntId': 10065,
'pos': Point3(0, -5.81496, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_9/models/cogHQ/metal_crateB'},
10070: {'type': 'model', 'name': 'frontCrate',
'comment': '',
'parentEntId': 10065,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_9/models/cogHQ/metal_crateB'},
20082: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 5,
'pos': Point3(4.50815, 11.6508, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(0.92, 0.92, 0.92),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_9/models/cogHQ/metal_crateB'},
20083: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20082,
'pos': Point3(0, 0, 5.5),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_9/models/cogHQ/metal_crateB'},
20088: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20087,
'pos': Point3(1, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1.3, 1, 1.3),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_9/models/cogHQ/metal_crateB'},
10000: {'type': 'nodepath', 'name': 'gearGauntletObstacle',
'comment': '',
'parentEntId': 10027,
'pos': Point3(0, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
10004: {'type': 'nodepath', 'name': 'gearGauntlet',
'comment': 'gears are staggered 15 ft in Y',
'parentEntId': 10000,
'pos': Point3(0, -23.25, 6.85),
'hpr': Point3(0, 0, 0),
'scale': 1},
10014: {'type': 'nodepath', 'name': 'cogs',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, 34.07, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
10015: {'type': 'nodepath', 'name': 'paint mixer platforms',
'comment': '',
'parentEntId': 10,
'pos': Point3(0, 5.15136, -2),
'hpr': Point3(0, 0, 0),
'scale': 1},
10022: {'type': 'nodepath', 'name': 'gagBarrels',
'comment': '',
'parentEntId': 11,
'pos': Point3(11.2328, 14.7959, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
10023: {'type': 'nodepath', 'name': 'leftCogs',
'comment': '',
'parentEntId': 13,
'pos': Point3(-42.0363, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
10027: {'type': 'nodepath', 'name': 'zoneNodeCompensate',
'comment': 'I think the ZoneNode was moved.',
'parentEntId': 19,
'pos': Point3(-0.426482, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
10030: {'type': 'nodepath', 'name': 'cogs',
'comment': '',
'parentEntId': 8,
'pos': Point3(2.5, 62.5, 10),
'hpr': Point3(0, 0, 0),
'scale': 1},
10032: {'type': 'nodepath', 'name': 'rightCogs',
'comment': '',
'parentEntId': 13,
'pos': Point3(46.88, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
10034: {'type': 'nodepath', 'name': 'cogs',
'comment': '',
'parentEntId': 22,
'pos': Point3(0, 0, 0),
'hpr': Point3(180, 0, 0),
'scale': 1},
10036: {'type': 'nodepath', 'name': 'cogs',
'comment': '',
'parentEntId': 15,
'pos': Point3(5.5, 0, 0),
'hpr': Point3(161, 0, 0),
'scale': 1},
10037: {'type': 'nodepath', 'name': 'cogs',
'comment': '',
'parentEntId': 7,
'pos': Point3(3.1, -48.27, 0.05),
'hpr': Point3(0, 0, 0),
'scale': 1},
10040: {'type': 'nodepath', 'name': 'FactoryBoss',
'comment': '',
'parentEntId': 24,
'pos': Point3(0, 68.4457, 9.5669),
'hpr': Point3(180, 0, 0),
'scale': 1},
10047: {'type': 'nodepath', 'name': 'battleCell',
'comment': '',
'parentEntId': 34,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
10056: {'type': 'nodepath', 'name': 'sounds',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0, 0, 15),
'hpr': Vec3(0, 0, 0),
'scale': 1},
10064: {'type': 'nodepath', 'name': 'battleCell',
'comment': '',
'parentEntId': 32,
'pos': Point3(0, -5.20447, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
10065: {'type': 'nodepath', 'name': 'backSteps',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0, 56.2652, 0),
'hpr': Point3(0, 0, 0),
'scale': Point3(1.5, 1.3, 0.73)},
10067: {'type': 'nodepath', 'name': 'frontSteps',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0, -44.7196, 0),
'hpr': Point3(180, 0, 0),
'scale': Point3(1.5, 1.3, 0.729057)},
10068: {'type': 'nodepath', 'name': 'battleCell',
'comment': '',
'parentEntId': 33,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20000: {'type': 'nodepath', 'name': 'stompers',
'comment': '',
'parentEntId': 17,
'pos': Point3(0.75, 0, 0.5),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20018: {'type': 'nodepath', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10014,
'pos': Point3(0, -24, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20019: {'type': 'nodepath', 'name': 'cogsJoin',
'comment': '',
'parentEntId': 10030,
'pos': Point3(16, 2, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20022: {'type': 'nodepath', 'name': 'StomperButtonsNodepath',
'comment': '',
'parentEntId': 24,
'pos': Point3(-11.75, -35.8, 14.9),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20023: {'type': 'nodepath', 'name': '<unnamed>',
'comment': '',
'parentEntId': 24,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20037: {'type': 'nodepath', 'name': 'SignatureGoonNP',
'comment': '',
'parentEntId': 24,
'pos': Point3(-48.4442, -24.9385, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20058: {'type': 'nodepath', 'name': 'SigRoomCogs',
'comment': '',
'parentEntId': 24,
'pos': Point3(-1.0928, -45, 14.99),
'hpr': Point3(90, 0, 0),
'scale': 1},
20087: {'type': 'nodepath', 'name': '<unnamed>',
'comment': '',
'parentEntId': 17,
'pos': Point3(-4, -117, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20094: {'type': 'nodepath', 'name': 'cogs',
'comment': '',
'parentEntId': 34,
'pos': Point3(-0.720506, 27.5461, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20095: {'type': 'nodepath', 'name': 'cogs',
'comment': '',
'parentEntId': 32,
'pos': Point3(0, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
20096: {'type': 'nodepath', 'name': 'cogs',
'comment': '',
'parentEntId': 33,
'pos': Point3(4.84921, 8.74482, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
26900: {'type': 'nodepath', 'name': 'centercog',
'comment': 'Legal Eagle Center Silo.',
'parentEntId': 35,
'pos': Point3(0, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
10017: {'type': 'paintMixer', 'name': 'fifth',
'comment': '',
'parentEntId': 10015,
'pos': Point3(5.24, 23.52, 8),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Point3(0.8, 0.8, 0.8),
'motion': 'easeInOut',
'offset': Point3(-12, -6, 0),
'period': 8.0,
'phaseShift': 0.5,
'shaftScale': 1,
'waitPercent': 0.1},
10018: {'type': 'paintMixer', 'name': 'fourth',
'comment': '',
'parentEntId': 10015,
'pos': Point3(-12.1, 3, 8),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Point3(0.8, 0.8, 0.8),
'motion': 'easeInOut',
'offset': Point3(0, -6, 15),
'period': 8.0,
'phaseShift': 0.0,
'shaftScale': 2.5,
'waitPercent': 0.1},
10019: {'type': 'paintMixer', 'name': 'third',
'comment': '',
'parentEntId': 10015,
'pos': Point3(-3.85419, -7.75751, 22.5836),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Point3(0.8, 0.8, 0.8),
'motion': 'easeInOut',
'offset': Point3(7, 0, 0),
'period': 8.0,
'phaseShift': 0.0,
'shaftScale': 2.5,
'waitPercent': 0.1},
10020: {'type': 'paintMixer', 'name': 'second',
'comment': '',
'parentEntId': 10015,
'pos': Point3(16.01, -6.47, 23),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Point3(0.8, 0.8, 0.8),
'motion': 'easeInOut',
'offset': Point3(-4, -8, -15),
'period': 8.0,
'phaseShift': 0.0,
'shaftScale': 2.5,
'waitPercent': 0.1},
10054: {'type': 'paintMixer', 'name': 'first',
'comment': '',
'parentEntId': 10015,
'pos': Point3(-10, -26.1, 8),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Point3(0.8, 0.8, 0.8),
'motion': 'easeInOut',
'offset': Point3(15, 0, 0),
'period': 8.0,
'phaseShift': 0.0,
'shaftScale': 1,
'waitPercent': 0.1},
20008: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
20009: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 17,
'pathScale': 1.0},
20010: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 21,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 36,
'pathScale': 1.0},
20012: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 21,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 34,
'pathScale': 1.0},
20015: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 21,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 37,
'pathScale': 1.0},
20038: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 15,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 38,
'pathScale': 1.0},
20039: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 7,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 12,
'pathScale': 1.0},
20040: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(41.5, 33.5, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
20042: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(15, 34, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
20044: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(1.5, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'pathIndex': 6,
'pathScale': 1.0},
20045: {'type': 'path', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'pathIndex': 7,
'pathScale': 1.0},
20049: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 7,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 13,
'pathScale': 1.0},
20051: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(1, -24, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 42,
'pathScale': 1.0},
20053: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 8,
'pathScale': 1.0},
20055: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 9,
'pathScale': 1.0},
20059: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 10,
'pathScale': 1.0},
20061: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 11,
'pathScale': 1.0},
20067: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 15,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 39,
'pathScale': 1.0},
20068: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 15,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 40,
'pathScale': 1.0},
20069: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 9,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 5,
'pathScale': 1.0},
20070: {'type': 'path', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 9,
'pos': Point3(1, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 5,
'pathScale': 1.0},
20073: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 21,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 35,
'pathScale': 1.0},
20075: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 7,
'pos': Point3(4, 4, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 14,
'pathScale': 1.0},
20076: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 8,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 15,
'pathScale': 1.0},
20077: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 8,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 16,
'pathScale': 1.0},
20078: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 18,
'pathScale': 1.0},
20079: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
20084: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 9,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 41,
'pathScale': 1.0},
20097: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 34,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 19,
'pathScale': 1.0},
20098: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 34,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 20,
'pathScale': 1.0},
20099: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 34,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 21,
'pathScale': 1.0},
20100: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 33,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 22,
'pathScale': 1.0},
20101: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 33,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 23,
'pathScale': 1.0},
20102: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 33,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 24,
'pathScale': 1.0},
20103: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 32,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 25,
'pathScale': 1.0},
20104: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 32,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 26,
'pathScale': 1.0},
20105: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 32,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 27,
'pathScale': 1.0},
20106: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 28,
'pathScale': 1.0},
20107: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 29,
'pathScale': 1.0},
20108: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 30,
'pathScale': 1.0},
20109: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 31,
'pathScale': 1.0},
20110: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 32,
'pathScale': 1.0},
20111: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 33,
'pathScale': 1.0},
60133: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 22,
'pos': Point3(-10, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
60134: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 22,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
60135: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 22,
'pos': Point3(10, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
10042: {'type': 'propSpinner', 'name': '<unnamed>',
'comment': '',
'parentEntId': 7},
20001: {'type': 'stomper', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(0, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 3,
'headScale': Point3(7, 5, 7),
'modelPath': 0,
'motion': 3,
'period': 4.0,
'phaseShift': 0.0,
'range': 30.0,
'shaftScale': Point3(0.5, 12, 0.5),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20002: {'type': 'stomper', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(0, -14.3294, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 3,
'headScale': Point3(7, 5, 7),
'modelPath': 0,
'motion': 3,
'period': 2.0,
'phaseShift': 0.0,
'range': 10.0,
'shaftScale': Point3(0.5, 12, 0.5),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20003: {'type': 'stomper', 'name': 'copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(0, -28.3252, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 3,
'headScale': Point3(7, 5, 7),
'modelPath': 0,
'motion': 3,
'period': 2.0,
'phaseShift': 0.5,
'range': 10.0,
'shaftScale': Point3(0.5, 12, 0.5),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20004: {'type': 'stomper', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(-3.5, 16.2588, 0),
'hpr': Point3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 3,
'headScale': Point3(3.5, 5, 3.5),
'modelPath': 0,
'motion': 3,
'period': 3.0001373423482587,
'phaseShift': 0.0,
'range': 15.0,
'shaftScale': Point3(0.71, 12, 0.71),
'soundLen': 0,
'soundOn': 1,
'soundPath': 0,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20005: {'type': 'stomper', 'name': 'copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(3.5, 16.2588, 0),
'hpr': Point3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 3,
'headScale': Point3(3.5, 5, 3.5),
'modelPath': 0,
'motion': 3,
'period': 1.5,
'phaseShift': 0.0,
'range': 15.0,
'shaftScale': Point3(0.71, 12, 0.71),
'soundLen': 0,
'soundOn': 1,
'soundPath': 1,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20006: {'type': 'stomper', 'name': 'copy of copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(-3.5, 23.4392, 0),
'hpr': Point3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 3,
'headScale': Point3(3.5, 5, 3.5),
'modelPath': 0,
'motion': 3,
'period': 1.5,
'phaseShift': 0.5,
'range': 15.0,
'shaftScale': Point3(0.71, 12, 0.71),
'soundLen': 0,
'soundOn': 1,
'soundPath': 0,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20007: {'type': 'stomper', 'name': 'copy of copy of copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(3.5, 23.4392, 0),
'hpr': Point3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 3,
'headScale': Point3(3.5, 5, 3.5),
'modelPath': 0,
'motion': 3,
'period': 3.0,
'phaseShift': 0.5,
'range': 15.0,
'shaftScale': Point3(0.71, 12, 0.71),
'soundLen': 0,
'soundOn': 1,
'soundPath': 0,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20029: {'type': 'stomper', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20025,
'pos': Point3(4.5, 43.5, 0.25),
'hpr': Point3(0, 0, 0),
'scale': 1,
'animateShadow': 0,
'crushCellId': 20024,
'damage': 3,
'headScale': Point3(3, 2, 3),
'modelPath': 0,
'motion': 5,
'period': 2.0,
'phaseShift': 0.0,
'range': 12.0,
'shaftScale': Point3(0.66, 37.5, 0.66),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 20033,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20030: {'type': 'stomper', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 20025,
'pos': Point3(31.5, 43.5, 0.25),
'hpr': Point3(0, 0, 0),
'scale': 1,
'animateShadow': 0,
'crushCellId': 20026,
'damage': 3,
'headScale': Point3(3, 2, 3),
'modelPath': 0,
'motion': 5,
'period': 2.0,
'phaseShift': 0.0,
'range': 12.0,
'shaftScale': Point3(0.66, 37.5, 0.66),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 20034,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20031: {'type': 'stomper', 'name': 'copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20025,
'pos': Point3(64.5, 43.5, 0.25),
'hpr': Point3(0, 0, 0),
'scale': 1,
'animateShadow': 0,
'crushCellId': 20027,
'damage': 3,
'headScale': Point3(3, 2, 3),
'modelPath': 0,
'motion': 5,
'period': 2.0,
'phaseShift': 0.0,
'range': 12.0,
'shaftScale': Point3(0.66, 37.5, 0.66),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 20035,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20032: {'type': 'stomper', 'name': 'copy of copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20025,
'pos': Point3(85.5, 43.5, 0.25),
'hpr': Point3(0, 0, 0),
'scale': 1,
'animateShadow': 0,
'crushCellId': 20028,
'damage': 3,
'headScale': Point3(3, 2, 3),
'modelPath': 0,
'motion': 5,
'period': 2.0,
'phaseShift': 0.0,
'range': 12.0,
'shaftScale': Point3(0.66, 37.5, 0.66),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 20036,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20050: {'type': 'trigger', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20022,
'pos': Point3(10, 0, 10),
'hpr': Vec3(0, 0, 0),
'scale': Point3(20, 20, 20),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1,
'triggerName': 'signatureRoomView'},
20057: {'type': 'trigger', 'name': '<unnamed>',
'comment': '',
'parentEntId': 23,
'pos': Point3(3, -8.8, 15.5091),
'hpr': Vec3(0, 0, 0),
'scale': Point3(25, 25, 25),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1,
'triggerName': 'lookoutTrigger'},
30077: {'type': 'trigger', 'name': 'button cutscene',
'comment': '',
'parentEntId': 3,
'pos': Point3(-4, 8, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1,
'triggerName': ''},
10013: {'type': 'visibilityExtender', 'name': 'intoEastSilo',
'comment': '',
'parentEntId': 60009,
'event': 60101,
'newZones': [
61]},
10043: {'type': 'visibilityExtender', 'name': 'beyondLobby',
'comment': '',
'parentEntId': 10049,
'event': 10057,
'newZones': [
5, 116]},
10044: {'type': 'visibilityExtender', 'name': 'intoEntrance1',
'comment': '',
'parentEntId': 10051,
'event': 30000,
'newZones': [
3]},
10058: {'type': 'visibilityExtender', 'name': 'intoFarHallway',
'comment': '',
'parentEntId': 10049,
'event': 10059,
'newZones': [
6, 118]},
10060: {'type': 'visibilityExtender', 'name': 'intoLookout',
'comment': '',
'parentEntId': 10049,
'event': 10061,
'newZones': [
23]},
10062: {'type': 'visibilityExtender', 'name': 'intoLobby',
'comment': '',
'parentEntId': 60031,
'event': 10063,
'newZones': [
4, 114]},
30022: {'type': 'visibilityExtender', 'name': 'intoLobby',
'comment': '',
'parentEntId': 10049,
'event': 30000,
'newZones': [
4, 113]},
30023: {'type': 'visibilityExtender', 'name': 'beyond door 106',
'comment': '',
'parentEntId': 60017,
'event': 30002,
'newZones': [
15, 126]},
30024: {'type': 'visibilityExtender', 'name': 'beyond door 106',
'comment': '',
'parentEntId': 60016,
'event': 30002,
'newZones': [
16]},
30025: {'type': 'visibilityExtender', 'name': 'beyond door 126',
'comment': '',
'parentEntId': 60016,
'event': 30021,
'newZones': [
14, 17, 121]},
30026: {'type': 'visibilityExtender', 'name': 'beyond door 121',
'comment': '',
'parentEntId': 60015,
'event': 30016,
'newZones': [
13, 119]},
30027: {'type': 'visibilityExtender', 'name': 'beyond door 126',
'comment': '',
'parentEntId': 60015,
'event': 30021,
'newZones': [
15, 106]},
30029: {'type': 'visibilityExtender', 'name': 'beyondLobby',
'comment': '',
'parentEntId': 10051,
'event': 30009,
'newZones': [
5, 116]},
30030: {'type': 'visibilityExtender', 'name': 'beyond door 113',
'comment': '',
'parentEntId': 60000,
'event': 30009,
'newZones': [
4, 114]},
30031: {'type': 'visibilityExtender', 'name': 'beyond door 116',
'comment': '',
'parentEntId': 60000,
'event': 30011,
'newZones': [
6,
109,
117,
118]},
30032: {'type': 'visibilityExtender', 'name': 'intoHallwayFromLobby',
'comment': '',
'parentEntId': 60001,
'event': 30011,
'newZones': [
5, 113]},
30033: {'type': 'visibilityExtender', 'name': 'intoBoilerRoom',
'comment': '',
'parentEntId': 60001,
'event': 30012,
'newZones': [
8]},
30034: {'type': 'visibilityExtender', 'name': 'intoLookout',
'comment': '',
'parentEntId': 60001,
'event': 30013,
'newZones': [
23, 39]},
30035: {'type': 'visibilityExtender', 'name': 'intoGearRoom',
'comment': '',
'parentEntId': 60001,
'event': 30005,
'newZones': [
7]},
30036: {'type': 'visibilityExtender', 'name': 'beyond door 109',
'comment': '',
'parentEntId': 60002,
'event': 30005,
'newZones': [
6,
116,
117,
118]},
30037: {'type': 'visibilityExtender', 'name': 'beyond door 110',
'comment': '',
'parentEntId': 60002,
'event': 30006,
'newZones': [
9,
25,
26,
33,
34,
35,
38,
41,
53,
112,
115,
200]},
30038: {'type': 'visibilityExtender', 'name': 'beyond door 117',
'comment': '',
'parentEntId': 60005,
'event': 30012,
'newZones': [
6,
109,
116,
118]},
30039: {'type': 'visibilityExtender', 'name': 'beyond door 108',
'comment': '',
'parentEntId': 60005,
'event': 30004,
'newZones': [
12,
21,
26,
34,
35,
40,
41,
53,
60,
119,
120,
200]},
30041: {'type': 'visibilityExtender', 'name': 'beyond door 110',
'comment': '',
'parentEntId': 60003,
'event': 30006,
'newZones': [
7]},
30042: {'type': 'visibilityExtender', 'name': 'beyond door 112',
'comment': '',
'parentEntId': 60003,
'event': 30008,
'newZones': [
10, 11]},
30043: {'type': 'visibilityExtender', 'name': 'intoWarehouse',
'comment': '',
'parentEntId': 60003,
'event': 30010,
'newZones': [
24, 39]},
30044: {'type': 'visibilityExtender', 'name': 'beyond door 112',
'comment': '',
'parentEntId': 60004,
'event': 30008,
'newZones': [
9,
25,
26,
33,
34,
35,
38,
41,
53,
110,
115,
200]},
30046: {'type': 'visibilityExtender', 'name': 'beyond door 112',
'comment': '',
'parentEntId': 60066,
'event': 30008,
'newZones': [
9,
25,
26,
41,
200]},
30049: {'type': 'visibilityExtender', 'name': 'beyond door 119',
'comment': '',
'parentEntId': 60013,
'event': 30014,
'newZones': [
12,
21,
23,
26,
33,
34,
35,
41,
53,
60,
108,
112,
120,
200]},
30050: {'type': 'visibilityExtender', 'name': 'beyond door 121',
'comment': '',
'parentEntId': 60013,
'event': 30016,
'newZones': [
14, 17, 126]},
30051: {'type': 'visibilityExtender', 'name': 'beyond door 121',
'comment': '',
'parentEntId': 60014,
'event': 30016,
'newZones': [
13, 119]},
30052: {'type': 'visibilityExtender', 'name': 'beyond door 126',
'comment': '',
'parentEntId': 60014,
'event': 30021,
'newZones': [
15, 106]},
30055: {'type': 'visibilityExtender', 'name': 'beyond door 105',
'comment': '',
'parentEntId': 60019,
'event': 30001,
'newZones': [
27, 127]},
30056: {'type': 'visibilityExtender', 'name': 'beyond door 105',
'comment': '',
'parentEntId': 60018,
'event': 30001,
'newZones': [
27, 127]},
30057: {'type': 'visibilityExtender', 'name': 'beyond door 103',
'comment': '',
'parentEntId': 60018,
'event': 60088,
'newZones': [
17]},
30059: {'type': 'visibilityExtender', 'name': 'beyond door 108',
'comment': '',
'parentEntId': 60006,
'event': 30004,
'newZones': [
8, 117]},
30060: {'type': 'visibilityExtender', 'name': 'beyond door 119',
'comment': '',
'parentEntId': 60006,
'event': 30014,
'newZones': [
13, 121]},
30061: {'type': 'visibilityExtender', 'name': 'intoWarehouse',
'comment': '',
'parentEntId': 60006,
'event': 30015,
'newZones': [
24, 39]},
30062: {'type': 'visibilityExtender', 'name': 'beyond door 107',
'comment': '',
'parentEntId': 60024,
'event': 30003,
'newZones': [
27, 127]},
30063: {'type': 'visibilityExtender', 'name': 'intoHallway',
'comment': '',
'parentEntId': 60031,
'event': 30013,
'newZones': [
6,
109,
116,
117]},
30064: {'type': 'visibilityExtender', 'name': 'beyondLowerWestDoor',
'comment': '',
'parentEntId': 60007,
'event': 30015,
'newZones': [
12,
21,
26,
34,
40,
41,
53,
200]},
30066: {'type': 'visibilityExtender', 'name': 'beyondLowerEastDoor',
'comment': '',
'parentEntId': 60007,
'event': 30010,
'newZones': [
9,
25,
26,
33,
38,
41,
200]},
30067: {'type': 'visibilityExtender', 'name': 'beyondUpperEastDoor',
'comment': '',
'parentEntId': 60007,
'event': 30019,
'newZones': [
9,
33,
38,
41,
200,
222]},
30069: {'type': 'visibilityExtender', 'name': 'beyond door 118',
'comment': '',
'parentEntId': 60000,
'event': 30068,
'newZones': [
23]},
30071: {'type': 'visibilityExtender', 'name': 'intoLobby',
'comment': '',
'parentEntId': 60001,
'event': 60030,
'newZones': [
4, 114]},
30073: {'type': 'visibilityExtender', 'name': 'intoLobbyHallway',
'comment': '',
'parentEntId': 60031,
'event': 60033,
'newZones': [
5, 113]},
30075: {'type': 'visibilityExtender', 'name': 'intoGearRoom',
'comment': '',
'parentEntId': 60031,
'event': 60034,
'newZones': [
7]},
60008: {'type': 'visibilityExtender', 'name': 'beyondUpperWestDoor',
'comment': '',
'parentEntId': 60007,
'event': 30020,
'newZones': [
12,
21,
34,
40,
41,
60,
127,
200]},
60010: {'type': 'visibilityExtender', 'name': 'intoWarehouse',
'comment': '',
'parentEntId': 60009,
'event': 30019,
'newZones': [
24, 39, 125]},
60012: {'type': 'visibilityExtender', 'name': 'beyond door 125',
'comment': '',
'parentEntId': 60011,
'event': 30020,
'newZones': [
24, 39, 124]},
60020: {'type': 'visibilityExtender', 'name': 'beyond door 106',
'comment': '',
'parentEntId': 60015,
'event': 60076,
'newZones': [
16]},
60021: {'type': 'visibilityExtender', 'name': 'beyond door 116',
'comment': '',
'parentEntId': 10051,
'event': 60023,
'newZones': [
6, 118]},
60022: {'type': 'visibilityExtender', 'name': 'beyond door 118',
'comment': '',
'parentEntId': 10051,
'event': 60025,
'newZones': [
23]},
60026: {'type': 'visibilityExtender', 'name': 'beyond door 109',
'comment': '',
'parentEntId': 60000,
'event': 60028,
'newZones': [
7]},
60027: {'type': 'visibilityExtender', 'name': 'beyond door 117',
'comment': '',
'parentEntId': 60000,
'event': 60029,
'newZones': [
8]},
60032: {'type': 'visibilityExtender', 'name': 'intoBoilerRoom',
'comment': '',
'parentEntId': 60031,
'event': 60035,
'newZones': [
8]},
60036: {'type': 'visibilityExtender', 'name': 'beyond door 117',
'comment': '',
'parentEntId': 60002,
'event': 60037,
'newZones': [
8]},
60038: {'type': 'visibilityExtender', 'name': 'beyond door 109',
'comment': '',
'parentEntId': 60005,
'event': 60039,
'newZones': [
7]},
60040: {'type': 'visibilityExtender', 'name': 'beyond door 124',
'comment': '',
'parentEntId': 60011,
'event': 60041,
'newZones': [
38]},
60042: {'type': 'visibilityExtender', 'name': 'beyondWarehouse',
'comment': '',
'parentEntId': 60009,
'event': 60043,
'newZones': [
12, 200]},
60045: {'type': 'visibilityExtender', 'name': 'beyond door 124',
'comment': '',
'parentEntId': 60044,
'event': 60047,
'newZones': [
24]},
60046: {'type': 'visibilityExtender', 'name': 'beyond door 128',
'comment': '',
'parentEntId': 60044,
'event': 10002,
'newZones': [
31]},
60048: {'type': 'visibilityExtender', 'name': 'beyond door 127',
'comment': '',
'parentEntId': 60024,
'event': 60049,
'newZones': [
21, 200]},
60050: {'type': 'visibilityExtender', 'name': 'beyond door 127',
'comment': '',
'parentEntId': 60019,
'event': 60051,
'newZones': [
21, 34, 200]},
60052: {'type': 'visibilityExtender', 'name': 'beyond door 121',
'comment': '',
'parentEntId': 60016,
'event': 60053,
'newZones': [
13, 119]},
60054: {'type': 'visibilityExtender', 'name': 'beyond door 126',
'comment': '',
'parentEntId': 60017,
'event': 60055,
'newZones': [
14, 17, 121]},
60056: {'type': 'visibilityExtender', 'name': 'beyond door 126',
'comment': '',
'parentEntId': 60013,
'event': 60057,
'newZones': [
15, 106]},
60058: {'type': 'visibilityExtender', 'name': 'beyond door 116',
'comment': '',
'parentEntId': 60005,
'event': 60059,
'newZones': [
5]},
60060: {'type': 'visibilityExtender', 'name': 'beyond door 118',
'comment': '',
'parentEntId': 60005,
'event': 60061,
'newZones': [
23]},
60062: {'type': 'visibilityExtender', 'name': 'beyond door 116',
'comment': '',
'parentEntId': 60002,
'event': 60064,
'newZones': [
5]},
60063: {'type': 'visibilityExtender', 'name': 'beyond door 118',
'comment': '',
'parentEntId': 60002,
'event': 60065,
'newZones': [
23]},
60068: {'type': 'visibilityExtender', 'name': 'beyond door 105',
'comment': '',
'parentEntId': 60067,
'event': 30001,
'newZones': [
18,
19,
20,
131]},
60069: {'type': 'visibilityExtender', 'name': 'beyond door 107',
'comment': '',
'parentEntId': 60067,
'event': 30003,
'newZones': [
22]},
60070: {'type': 'visibilityExtender', 'name': 'beyond door 127',
'comment': '',
'parentEntId': 60067,
'event': 10052,
'newZones': [
12,
21,
26,
34,
35,
40,
41,
53,
60,
200]},
60071: {'type': 'visibilityExtender', 'name': 'beyond door 127',
'comment': '',
'parentEntId': 60006,
'event': 10052,
'newZones': [
27, 105, 107]},
60072: {'type': 'visibilityExtender', 'name': 'beyond door 107',
'comment': '',
'parentEntId': 60006,
'event': 60074,
'newZones': [
22]},
60073: {'type': 'visibilityExtender', 'name': 'beyond door 105',
'comment': '',
'parentEntId': 60006,
'event': 60075,
'newZones': [
18]},
60077: {'type': 'visibilityExtender', 'name': 'beyond door 106',
'comment': '',
'parentEntId': 60014,
'event': 60078,
'newZones': [
16]},
60079: {'type': 'visibilityExtender', 'name': 'beyond door 106',
'comment': '',
'parentEntId': 60013,
'event': 60080,
'newZones': [
16]},
60081: {'type': 'visibilityExtender', 'name': 'beyond door 121',
'comment': '',
'parentEntId': 60017,
'event': 60082,
'newZones': [
13]},
60083: {'type': 'visibilityExtender', 'name': 'beyond door 119',
'comment': '',
'parentEntId': 60005,
'event': 60084,
'newZones': [
13]},
60085: {'type': 'visibilityExtender', 'name': 'beyond door 112',
'comment': '',
'parentEntId': 60002,
'event': 60086,
'newZones': [
10]},
60087: {'type': 'visibilityExtender', 'name': 'beyond door 105',
'comment': '',
'parentEntId': 60015,
'event': 60091,
'newZones': [
27]},
60089: {'type': 'visibilityExtender', 'name': 'beyond door 103',
'comment': '',
'parentEntId': 60019,
'event': 60088,
'newZones': [
17]},
60090: {'type': 'visibilityExtender', 'name': 'beyond door 103',
'comment': '',
'parentEntId': 60015,
'event': 60088,
'newZones': [
18, 19, 105]},
60092: {'type': 'visibilityExtender', 'name': 'beyond door 103',
'comment': '',
'parentEntId': 60067,
'event': 60093,
'newZones': [
17]},
60097: {'type': 'visibilityExtender', 'name': 'beyond door 130',
'comment': '',
'parentEntId': 60096,
'event': 60095,
'newZones': [
33,
34,
35,
36,
37,
60,
61,
128,
129,
200]},
60098: {'type': 'visibilityExtender', 'name': 'beyond door 130',
'comment': '',
'parentEntId': 60044,
'event': 60095,
'newZones': [
30]},
60099: {'type': 'visibilityExtender', 'name': 'beyond door 128',
'comment': '',
'parentEntId': 60096,
'event': 60100,
'newZones': [
31]},
60106: {'type': 'visibilityExtender', 'name': 'beyond door 129',
'comment': '',
'parentEntId': 60011,
'event': 60094,
'newZones': [
32]},
60107: {'type': 'visibilityExtender', 'name': 'beyond door 130',
'comment': '',
'parentEntId': 60011,
'event': 60095,
'newZones': [
30]},
60109: {'type': 'visibilityExtender', 'name': 'beyond door 129',
'comment': '',
'parentEntId': 60108,
'event': 60094,
'newZones': [
32]},
60110: {'type': 'visibilityExtender', 'name': 'beyond door 130',
'comment': '',
'parentEntId': 60108,
'event': 60095,
'newZones': [
30]},
60112: {'type': 'visibilityExtender', 'name': 'beyond door 129',
'comment': '',
'parentEntId': 60111,
'event': 60094,
'newZones': [
32]},
60113: {'type': 'visibilityExtender', 'name': 'beyond door 130',
'comment': '',
'parentEntId': 60111,
'event': 60095,
'newZones': [
30]},
60115: {'type': 'visibilityExtender', 'name': 'beyond door 129',
'comment': '',
'parentEntId': 60114,
'event': 60094,
'newZones': [
32]},
60116: {'type': 'visibilityExtender', 'name': 'beyond door 130',
'comment': '',
'parentEntId': 60114,
'event': 60095,
'newZones': [
30]},
60117: {'type': 'visibilityExtender', 'name': 'beyond door 103',
'comment': '',
'parentEntId': 60014,
'event': 60088,
'newZones': [
18]},
60120: {'type': 'visibilityExtender', 'name': 'beyond door 128',
'comment': '',
'parentEntId': 60108,
'event': 10002,
'newZones': [
31]},
60122: {'type': 'visibilityExtender', 'name': 'beyond door 128',
'comment': '',
'parentEntId': 60121,
'event': 10002,
'newZones': [
33,
34,
35,
36,
37,
60,
61,
128,
129,
130,
200]},
60123: {'type': 'visibilityExtender', 'name': 'beyond door 128',
'comment': '',
'parentEntId': 60111,
'event': 10002,
'newZones': []},
60124: {'type': 'visibilityExtender', 'name': 'beyond door 128',
'comment': '',
'parentEntId': 60114,
'event': 10002,
'newZones': [
31]},
60125: {'type': 'visibilityExtender', 'name': 'beyond door 128',
'comment': '',
'parentEntId': 60011,
'event': 10002,
'newZones': [
31]},
60127: {'type': 'visibilityExtender', 'name': 'beyond door 128',
'comment': '',
'parentEntId': 60126,
'event': 10002,
'newZones': [
31]},
60128: {'type': 'visibilityExtender', 'name': 'beyond door 129',
'comment': '',
'parentEntId': 60126,
'event': 60094,
'newZones': [
32]},
60129: {'type': 'visibilityExtender', 'name': 'beyond door 130',
'comment': '',
'parentEntId': 60126,
'event': 60095,
'newZones': [
30]},
60131: {'type': 'visibilityExtender', 'name': 'beyond door 129',
'comment': '',
'parentEntId': 60130,
'event': 60094,
'newZones': [
33,
34,
35,
36,
37,
60,
61,
128,
130,
200]},
60136: {'type': 'visibilityExtender', 'name': 'beyond door 129',
'comment': '',
'parentEntId': 60044,
'event': 60094,
'newZones': [
32]},
60137: {'type': 'visibilityExtender', 'name': 'beyond door 129',
'comment': '',
'parentEntId': 60096,
'event': 60138,
'newZones': [
32]},
60139: {'type': 'visibilityExtender', 'name': 'beyond door 129',
'comment': '',
'parentEntId': 60121,
'event': 60141,
'newZones': [
32]},
60140: {'type': 'visibilityExtender', 'name': 'beyond door 130',
'comment': '',
'parentEntId': 60121,
'event': 60142,
'newZones': [
30]}}
Scenario0 = {}
levelSpec = {'globalEntities': GlobalEntities, 'scenarios': [
Scenario0]} | v2.5.7/toontown/coghq/SellbotSwagFactorySpec.py | from toontown.toonbase import TTLocalizer
from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr', 'name': 'LevelMgr', 'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500.0,
'modelFilename': 'phase_9/models/cogHQ/SelbotLegFactory',
'wantDoors': 1},
1001: {'type': 'editMgr', 'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': None,
'requestNewEntity': None,
'requestSave': None},
0: {'type': 'zone', 'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
3: {'type': 'zone', 'name': 'Main Entrance',
'comment': '',
'parentEntId': 0,
'scale': Vec3(1, 1, 1),
'description': TTLocalizer.SellbotSwagFactorySpecMainEntrance,
'visibility': [
114]},
4: {'type': 'zone', 'name': 'Lobby',
'comment': '',
'parentEntId': 0,
'scale': Vec3(1, 1, 1),
'description': TTLocalizer.SellbotLegFactorySpecLobby,
'visibility': [
113, 114]},
5: {'type': 'zone', 'name': 'hallwayFromLobby',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': [
113, 116]},
6: {'type': 'zone', 'name': 'hallwayToBoiler/Control/Lookout',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecLobbyHallway,
'visibility': [
109,
116,
117,
118]},
7: {'type': 'zone', 'name': 'GearRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecGearRoom,
'visibility': [
109, 110]},
8: {'type': 'zone', 'name': 'BoilerRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecBoilerRoom,
'visibility': [
108, 117]},
9: {'type': 'zone', 'name': 'EastCatwalk',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecEastCatwalk,
'visibility': [
23,
25,
26,
33,
34,
35,
38,
41,
53,
110,
112,
115,
124,
200,
222]},
10: {'type': 'zone', 'name': 'PaintMixer',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecPaintMixer,
'visibility': [
11, 111, 112]},
11: {'type': 'zone', 'name': 'PaintMixerRewardRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecPaintMixerStorageRoom,
'visibility': [
10, 111, 112]},
12: {'type': 'zone', 'name': 'WestSiloCatwalk',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecWestSiloCatwalk,
'visibility': [
21,
26,
33,
34,
35,
36,
37,
40,
41,
53,
60,
61,
108,
110,
119,
120,
125,
127,
128,
129,
130,
200]},
13: {'type': 'zone', 'name': 'PipeRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecPipeRoom,
'visibility': [
119, 121]},
14: {'type': 'zone', 'name': 'StairsToPipeRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': [
17,
18,
121,
126,
131]},
15: {'type': 'zone', 'name': 'DuctRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecDuctRoom,
'visibility': [
106, 126]},
16: {'type': 'zone', 'name': 'Side Entrance',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecSideEntrance,
'visibility': [
106]},
17: {'type': 'zone', 'name': 'StomperAlley',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecStomperAlley,
'visibility': [
14,
121,
126,
131]},
18: {'type': 'zone', 'name': 'LavaRoomFoyer',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecLavaRoomFoyer,
'visibility': [
19,
20,
102,
103,
105,
131]},
19: {'type': 'zone', 'name': 'LavaRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecLavaRoom,
'visibility': [
17,
18,
20,
105,
131]},
20: {'type': 'zone', 'name': 'LavaRewardRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecLavaStorageRoom,
'visibility': [
18, 19, 105]},
21: {'type': 'zone', 'name': 'WestCatwalk',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecWestCatwalk,
'visibility': [
12,
23,
26,
33,
34,
35,
40,
41,
53,
60,
108,
119,
120,
125,
127,
200]},
22: {'type': 'zone', 'name': 'OilRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecOilRoom,
'visibility': [
107]},
23: {'type': 'zone', 'name': 'Lookout',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecLookout,
'visibility': [
24,
39,
115,
118,
120,
123,
124,
125]},
24: {'type': 'zone', 'name': 'Warehouse',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecWarehouse,
'visibility': [
23,
39,
115,
120,
123,
124,
125]},
25: {'type': 'zone', 'name': 'PaintMixerExterior',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
26: {'type': 'zone', 'name': 'WarehouseExterior',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
27: {'type': 'zone', 'name': 'OilRoomHallway',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecOilRoomHallway,
'visibility': [
105, 107, 127]},
30: {'type': 'zone', 'name': 'EastSiloControlRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecEastSiloControlRoom,
'visibility': [
130]},
31: {'type': 'zone', 'name': 'WestSiloControlRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecWestSiloControlRoom,
'visibility': [
128]},
32: {'type': 'zone', 'name': 'CenterSiloControlRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecCenterSiloControlRoom,
'visibility': [
129]},
33: {'type': 'zone', 'name': 'EastSilo',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecEastSilo,
'visibility': [
9,
12,
21,
25,
26,
34,
35,
36,
37,
38,
40,
41,
53,
60,
61,
108,
110,
112,
119,
124,
128,
129,
130,
200,
222]},
34: {'type': 'zone', 'name': 'WestSilo',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecWestSilo,
'visibility': [
9,
12,
21,
25,
26,
33,
35,
36,
37,
40,
41,
53,
60,
61,
108,
110,
112,
119,
120,
125,
127,
128,
129,
130,
200]},
35: {'type': 'zone', 'name': 'CenterSilo',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecCenterSilo,
'visibility': [
9,
21,
25,
26,
33,
34,
36,
37,
40,
41,
53,
60,
61,
108,
110,
112,
119,
128,
129,
130,
200]},
36: {'type': 'zone', 'name': 'WestSiloBridge',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': [
9,
12,
21,
25,
26,
33,
34,
35,
36,
37,
40,
41,
53,
60,
61,
108,
110,
112,
119,
127,
128,
129,
130,
200]},
37: {'type': 'zone', 'name': 'EastSiloBridge',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': [
9,
12,
21,
25,
26,
33,
34,
35,
36,
37,
38,
40,
41,
53,
60,
61,
108,
110,
112,
119,
128,
129,
130,
200,
222]},
38: {'type': 'zone', 'name': 'EastSiloCatwalk',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecEastSiloCatwalk,
'visibility': [
9,
25,
26,
33,
34,
35,
36,
37,
41,
53,
60,
110,
112,
115,
124,
200,
222]},
39: {'type': 'zone', 'name': 'WarehouseCeiling',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
40: {'type': 'zone', 'name': 'WestExterior',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
41: {'type': 'zone', 'name': 'EastExterior',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
53: {'type': 'zone', 'name': 'ExteriorFloor',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
60: {'type': 'zone', 'name': 'WestElevatorShaft',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecWestElevatorShaft,
'visibility': [
12, 34]},
61: {'type': 'zone', 'name': 'EastElevatorShaft',
'comment': 'no geom or DCS',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecEastElevatorShaft,
'visibility': [
33, 38]},
101: {'type': 'zone', 'name': 'dwToLavaRewardRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
102: {'type': 'zone', 'name': 'dwToLavaRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
103: {'type': 'zone', 'name': 'dwToLavaRoomHallway',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
105: {'type': 'zone', 'name': 'dwToOilRoomCatwalks',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
106: {'type': 'zone', 'name': 'dwToDuctRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
107: {'type': 'zone', 'name': 'dwToOilRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
108: {'type': 'zone', 'name': 'dwFromBoilerRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
109: {'type': 'zone', 'name': 'dwToGearRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
110: {'type': 'zone', 'name': 'dwFromGearRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
111: {'type': 'zone', 'name': 'dwToPaintMixerRewardRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
112: {'type': 'zone', 'name': 'dwToPaintMixer',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
113: {'type': 'zone', 'name': 'dwFromLobby',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
114: {'type': 'zone', 'name': 'dwToLobby',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
115: {'type': 'zone', 'name': 'dwToWarehouseFromRight',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
116: {'type': 'zone', 'name': 'dwFromLobbyFar',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
117: {'type': 'zone', 'name': 'dwToBoilerRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
118: {'type': 'zone', 'name': 'dwToLookout',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
119: {'type': 'zone', 'name': 'dwFromPipeRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
120: {'type': 'zone', 'name': 'dwToWarehouseFromLeft',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
121: {'type': 'zone', 'name': 'dwToPipeRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
122: {'type': 'zone', 'name': 'dwToWarehouseControlRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
123: {'type': 'zone', 'name': 'dwFromWarehouseFloor',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
124: {'type': 'zone', 'name': 'dwFromWarehouseRight',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
125: {'type': 'zone', 'name': 'dwFromWarehouseLeft',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
126: {'type': 'zone', 'name': 'dwFromDuctRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
127: {'type': 'zone', 'name': 'dwFromOilRoomHallway',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
128: {'type': 'zone', 'name': 'dwToWestSiloRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
129: {'type': 'zone', 'name': 'dwToCenterSiloRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
130: {'type': 'zone', 'name': 'dwToEastSiloRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
131: {'type': 'zone', 'name': 'dwFromStomperAlley',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
200: {'type': 'zone', 'name': 'sky',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
201: {'type': 'zone', 'name': 'extraZone201',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
202: {'type': 'zone', 'name': 'extraZone202',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
203: {'type': 'zone', 'name': 'extraZone203',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
204: {'type': 'zone', 'name': 'extraZone204',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
205: {'type': 'zone', 'name': 'extraZone205',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
206: {'type': 'zone', 'name': 'extraZone206',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
207: {'type': 'zone', 'name': 'extraZone207',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
208: {'type': 'zone', 'name': 'extraZone208',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
209: {'type': 'zone', 'name': 'extraZone209',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
210: {'type': 'zone', 'name': 'extraZone210',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
211: {'type': 'zone', 'name': 'extraZone211',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
212: {'type': 'zone', 'name': 'extraZone212',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
213: {'type': 'zone', 'name': 'extraZone213',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
214: {'type': 'zone', 'name': 'extraZone214',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
215: {'type': 'zone', 'name': 'extraZone215',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
216: {'type': 'zone', 'name': 'extraZone216',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
217: {'type': 'zone', 'name': 'extraZone217',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
218: {'type': 'zone', 'name': 'extraZone218',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
219: {'type': 'zone', 'name': 'extraZone219',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
220: {'type': 'zone', 'name': 'extraZone220',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
221: {'type': 'zone', 'name': 'extraZone221',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
222: {'type': 'zone', 'name': 'dwToEastSiloInterior',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
10010: {'type': 'ambientSound', 'name': 'westWind',
'comment': '',
'parentEntId': 35,
'pos': Point3(-52.7549, -38.8374, 53.3758),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'enabled': 1,
'soundPath': 'phase_9/audio/sfx/CHQ_FACT_whistling_wind.ogg',
'volume': 1},
10016: {'type': 'ambientSound', 'name': 'sndConveyorBelt',
'comment': '',
'parentEntId': 10056,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'enabled': 1,
'soundPath': 'phase_9/audio/sfx/CHQ_FACT_conveyor_belt.ogg',
'volume': 0.5},
10053: {'type': 'ambientSound', 'name': 'eastWind',
'comment': '',
'parentEntId': 35,
'pos': Point3(52.75, -38.84, 53.38),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'enabled': 1,
'soundPath': 'phase_9/audio/sfx/CHQ_FACT_whistling_wind.ogg',
'volume': 1},
10055: {'type': 'ambientSound', 'name': 'sndGears',
'comment': '',
'parentEntId': 10056,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'enabled': 1,
'soundPath': 'phase_9/audio/sfx/CHQ_FACT_gears_turning.ogg',
'volume': 1},
10031: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 8,
'pos': Point3(-1, 79, 10),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1.75, 1, 1),
'cellId': 1,
'radius': 10.0},
10035: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10039,
'pos': Point3(0, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 4,
'radius': 10.0},
10038: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 7,
'pos': Point3(0, -28.04, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 5,
'radius': 10.0},
20048: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0.973602, 71.7, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 0.2, 1),
'cellId': 0,
'radius': 15.0},
20063: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20033,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 8,
'radius': 1},
20064: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20034,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 8,
'radius': 1},
20065: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20035,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 8,
'radius': 1},
20066: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20036,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 8,
'radius': 1},
20086: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 15,
'pos': Point3(0, 33, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(2, 1, 1),
'cellId': 6,
'radius': 12.0},
20112: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 33,
'pos': Point3(-10.0936, -9.55975, 4),
'hpr': Point3(45, 0, 0),
'scale': Point3(10, 1, 5),
'cellId': 10,
'radius': 5.0},
20113: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 34,
'pos': Point3(9.08399, 4.42157, 0),
'hpr': Point3(-50, 0, 0),
'scale': Point3(10, 2, 6),
'cellId': 9,
'radius': 5.0},
20114: {'type': 'battleBlocker', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60103,
'pos': Point3(0, 0, 1),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 0.5),
'cellId': 8,
'radius': 3.0},
10003: {'type': 'beanBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20,
'pos': Point3(1.25458, 19.2471, 0.0249529),
'hpr': Vec3(-8.28434, 0, 0),
'scale': 1,
'rewardPerGrab': 25,
'rewardPerGrabMax': 0},
10011: {'type': 'beanBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20,
'pos': Point3(16.344, -9.73, 0.025),
'hpr': Vec3(-79.8888, 0, 0),
'scale': 1,
'rewardPerGrab': 25,
'rewardPerGrabMax': 0},
20017: {'type': 'beanBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10022,
'pos': Point3(20.0035, 2.94232, 0),
'hpr': Vec3(-31.6033, 0, 0),
'scale': 1,
'rewardPerGrab': 35,
'rewardPerGrabMax': 0},
10039: {'type': 'button', 'name': '<unnamed>',
'comment': '',
'parentEntId': 22,
'pos': Point3(-7, 29, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(4, 4, 4),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
20033: {'type': 'button', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20022,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(3, 3, 3),
'color': Vec4(0.862745, 0.517647, 0.0941177, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1},
20034: {'type': 'button', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20022,
'pos': Point3(7.5, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(3, 3, 3),
'color': Vec4(0.862745, 0.517647, 0.0941177, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1},
20035: {'type': 'button', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20022,
'pos': Point3(15, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(3, 3, 3),
'color': Vec4(0.862745, 0.517647, 0.0941177, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1},
20036: {'type': 'button', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20022,
'pos': Point3(22.5, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(3, 3, 3),
'color': Vec4(0.862745, 0.517647, 0.0941177, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1},
30040: {'type': 'button', 'name': 'door button',
'comment': 'Entrance door unlock',
'parentEntId': 3,
'pos': Point3(0, 6.75, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(3, 3, 3),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
30076: {'type': 'button', 'name': 'open door 113',
'comment': 'Lobby door unlock',
'parentEntId': 4,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(3, 3, 3),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1},
60102: {'type': 'button', 'name': 'door button',
'comment': 'Entrance Door Unlock',
'parentEntId': 16,
'pos': Point3(4, 8, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(3, 3, 3),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
60103: {'type': 'button', 'name': 'door button',
'comment': '',
'parentEntId': 20022,
'pos': Point3(25, -7, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(4, 4, 4),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
60104: {'type': 'button', 'name': '<unnamed>',
'comment': '',
'parentEntId': 31,
'pos': Point3(0, 10, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(5, 5, 4),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
60105: {'type': 'button', 'name': 'door button',
'comment': '',
'parentEntId': 30,
'pos': Point3(-4, 7, 0),
'hpr': Point3(0, 0, 0),
'scale': Point3(5, 5, 4),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
60118: {'type': 'button', 'name': '<unnamed>',
'comment': '',
'parentEntId': 15,
'pos': Point3(0, 20, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(3, 3, 3),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
10005: {'type': 'conveyorBelt', 'name': 'belt',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0, 45.2024, 7.24937),
'hpr': Point3(180, 0, 0),
'scale': 1,
'floorName': 'platformcollision',
'length': 78.81881352704218,
'speed': 2.0,
'treadLength': 10.0,
'treadModelPath': 'phase_9/models/cogHQ/platform1',
'widthScale': 0.85},
20081: {'type': 'crate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20080,
'pos': Point3(0, 0, 0),
'scale': 0.920000016689,
'crushCellId': None,
'gridId': 20080,
'modelType': 0,
'pushable': 1},
20091: {'type': 'crate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20090,
'pos': Point3(0, 23, 0),
'scale': 0.920000016689,
'crushCellId': None,
'gridId': 20090,
'modelType': 0,
'pushable': 1},
20024: {'type': 'crusherCell', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20023,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'col': 1,
'gridId': 20025,
'row': 14},
20026: {'type': 'crusherCell', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20023,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'col': 10,
'gridId': 20025,
'row': 14},
20027: {'type': 'crusherCell', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 20023,
'pos': Point3(1, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'col': 21,
'gridId': 20025,
'row': 14},
20028: {'type': 'crusherCell', 'name': 'copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20023,
'pos': Point3(2, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'col': 28,
'gridId': 20025,
'row': 14},
30078: {'type': 'cutScene', 'name': 'button door',
'comment': '',
'parentEntId': 114,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'duration': 4.0,
'effect': 'irisInOut',
'motion': 'doorUnlock',
'startStopEvent': 30077},
10002: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 128,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
10052: {'type': 'door', 'name': 'door 127',
'comment': '',
'parentEntId': 127,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 10039,
'unlock2Event': 0,
'unlock3Event': 0},
30000: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 114,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 60132,
'unlock2Event': 0,
'unlock3Event': 0},
30001: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 105,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1.0,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30002: {'type': 'door', 'name': 'door 106',
'comment': '',
'parentEntId': 106,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 60132,
'unlock2Event': 0,
'unlock3Event': 0},
30003: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 107,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30004: {'type': 'door', 'name': 'doorFromBoilerRoom',
'comment': '',
'parentEntId': 108,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30005: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 109,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30006: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 110,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30008: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 112,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30009: {'type': 'door', 'name': 'door 113',
'comment': '',
'parentEntId': 113,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 60119,
'unlock2Event': 0,
'unlock3Event': 0},
30010: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 115,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30011: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 116,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30012: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 117,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30013: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 118,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30014: {'type': 'door', 'name': 'doorFromPipeRoom 119',
'comment': '',
'parentEntId': 119,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30015: {'type': 'door', 'name': 'door 120',
'comment': '',
'parentEntId': 120,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30016: {'type': 'door', 'name': 'door 121',
'comment': '',
'parentEntId': 121,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30017: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 122,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30018: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 123,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 0,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 60103,
'unlock2Event': 0,
'unlock3Event': 0},
30019: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 124,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30020: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 125,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30021: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 126,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 60119,
'unlock2Event': 0,
'unlock3Event': 0},
60088: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 131,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1.0,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
60094: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 129,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 0,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1.0,
'unlock0Event': 0,
'unlock1Event': 60104,
'unlock2Event': 60105,
'unlock3Event': 0},
60095: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 130,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
60101: {'type': 'door', 'name': '<unnamed>',
'comment': '',
'parentEntId': 222,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
10049: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 3},
10051: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 4},
60000: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 5},
60001: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 6},
60002: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 7},
60003: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 9},
60004: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 10},
60005: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 8},
60006: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 21},
60007: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 24},
60009: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 38},
60011: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 12},
60013: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 13},
60014: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 14},
60015: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 17},
60016: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 15},
60017: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 16},
60018: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 19},
60019: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 18},
60024: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 22},
60031: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 23},
60044: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 33},
60066: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 11},
60067: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 27},
60096: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 30},
60108: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 34},
60111: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 36},
60114: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 37},
60121: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 31},
60126: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 35},
60130: {'type': 'entityGroup', 'name': 'viz',
'comment': '',
'parentEntId': 32},
10028: {'type': 'entrancePoint', 'name': 'entrance1',
'comment': '',
'parentEntId': 3,
'pos': Point3(0, 10, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'entranceId': 0,
'radius': 15,
'theta': 20},
10029: {'type': 'entrancePoint', 'name': 'entrance2',
'comment': '',
'parentEntId': 16,
'pos': Point3(0, 10, 0),
'hpr': Point3(0, 0, 0),
'scale': 1,
'entranceId': 1,
'radius': 15,
'theta': 20},
10021: {'type': 'gagBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10022,
'pos': Point3(-2.02081, 0, 0),
'hpr': Vec3(337.477, 0, 0),
'scale': 1,
'gagLevel': 2,
'gagLevelMax': 0,
'gagTrack': 0,
'rewardPerGrab': 3,
'rewardPerGrabMax': 0},
10024: {'type': 'gagBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10022,
'pos': Point3(20.3012, -26.3219, 0),
'hpr': Vec3(233.187, 0, 0),
'scale': 1,
'gagLevel': 4,
'gagLevelMax': 0,
'gagTrack': 4,
'rewardPerGrab': 4,
'rewardPerGrabMax': 0},
10025: {'type': 'gagBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10022,
'pos': Point3(-47.312, 7.22571, 0),
'hpr': Vec3(19.1524, 0, 0),
'scale': 1,
'gagLevel': 0,
'gagLevelMax': 0,
'gagTrack': 0,
'rewardPerGrab': 3,
'rewardPerGrabMax': 0},
10026: {'type': 'gagBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10022,
'pos': Point3(-11.2037, 5.43514, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'gagLevel': 4,
'gagLevelMax': 0,
'gagTrack': 5,
'rewardPerGrab': 4,
'rewardPerGrabMax': 0},
20020: {'type': 'gagBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20,
'pos': Point3(-23.0209, 0, 0),
'hpr': Vec3(126.676, 0, 0),
'scale': 1,
'gagLevel': 4,
'gagLevelMax': 0,
'gagTrack': 3,
'rewardPerGrab': 5,
'rewardPerGrabMax': 0},
20021: {'type': 'gagBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20,
'pos': Point3(-31.3225, 14.1021, 0),
'hpr': Vec3(-136.57, 0, 0),
'scale': 1,
'gagLevel': 4,
'gagLevelMax': 0,
'gagTrack': 5,
'rewardPerGrab': 5,
'rewardPerGrabMax': 0},
20085: {'type': 'gagBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 5,
'pos': Point3(3.14, 12.6703, 10.12),
'hpr': Vec3(-24.8105, 0, 0),
'scale': 1,
'gagLevel': 5,
'gagLevelMax': 0,
'gagTrack': 4,
'rewardPerGrab': 5,
'rewardPerGrabMax': 0},
20093: {'type': 'gagBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20087,
'pos': Point3(2.4, -1, 7),
'hpr': Vec3(-151.532, 0, 0),
'scale': 1,
'gagLevel': 0,
'gagLevelMax': 0,
'gagTrack': 0,
'rewardPerGrab': 5,
'rewardPerGrabMax': 0},
10006: {'type': 'gear', 'name': 'first',
'comment': '',
'parentEntId': 10004,
'pos': Point3(0, 0, 26.0634),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'degreesPerSec': 20.0,
'gearScale': 25.0,
'modelType': 'factory',
'orientation': 'vertical',
'phaseShift': 0},
10007: {'type': 'gear', 'name': 'second',
'comment': '',
'parentEntId': 10004,
'pos': Point3(0, 15, 26.06),
'hpr': Point3(180, 0, 0),
'scale': 1,
'degreesPerSec': 30.0,
'gearScale': 25.0,
'modelType': 'factory',
'orientation': 'vertical',
'phaseShift': 0},
10008: {'type': 'gear', 'name': 'third',
'comment': '',
'parentEntId': 10004,
'pos': Point3(0, 30, 26.06),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'degreesPerSec': 40.0,
'gearScale': 25.0,
'modelType': 'factory',
'orientation': 'vertical',
'phaseShift': 0},
10009: {'type': 'gear', 'name': 'fourth',
'comment': '',
'parentEntId': 10004,
'pos': Point3(0, 45, 26.06),
'hpr': Point3(180, 0, 0),
'scale': 1,
'degreesPerSec': 47.0,
'gearScale': 25.0,
'modelType': 'factory',
'orientation': 'vertical',
'phaseShift': 0},
20013: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20012,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.25,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 15,
'velocity': 4},
20014: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20010,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.25,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 15,
'velocity': 4},
20016: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20015,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.25,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 15,
'velocity': 4},
20041: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20040,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20026,
'goonType': 'pg',
'gridId': 20025,
'hFov': 80.0,
'strength': 20,
'velocity': 6.0},
20043: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20042,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20024,
'goonType': 'pg',
'gridId': 20025,
'hFov': 80.0,
'strength': 20,
'velocity': 5.0},
20046: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20044,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20024,
'goonType': 'pg',
'gridId': 20025,
'hFov': 70,
'strength': 20,
'velocity': 6.0},
20047: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20045,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20026,
'goonType': 'pg',
'gridId': 20025,
'hFov': 80.0,
'strength': 20,
'velocity': 6.0},
20052: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20051,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 12.0,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 7,
'velocity': 4.0},
20054: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20053,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20027,
'goonType': 'pg',
'gridId': 20025,
'hFov': 80.0,
'strength': 20,
'velocity': 5.5},
20056: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20055,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20028,
'goonType': 'pg',
'gridId': 20025,
'hFov': 70,
'strength': 20,
'velocity': 6.0},
20060: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20059,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20028,
'goonType': 'pg',
'gridId': 20025,
'hFov': 90.0,
'strength': 20,
'velocity': 6.5},
20062: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20061,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20027,
'goonType': 'pg',
'gridId': 20025,
'hFov': 70,
'strength': 20,
'velocity': 7.5},
20071: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20070,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 15,
'velocity': 6.0},
20072: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20069,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 80.0,
'strength': 15,
'velocity': 6.0},
20074: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20073,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.25,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 15,
'velocity': 4},
20089: {'type': 'goon', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20084,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 15,
'velocity': 4},
20115: {'type': 'goonClipPlane', 'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, -7.4, 0),
'hpr': Point3(-90, 0, 0),
'scale': Point3(5, 5, 5),
'goonId': 20052},
20116: {'type': 'goonClipPlane', 'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, -58, 0),
'hpr': Point3(90, 0, 0),
'scale': 1,
'goonId': None},
20117: {'type': 'goonClipPlane', 'name': '<unnamed>',
'comment': '',
'parentEntId': 24,
'pos': Point3(0, -29, 0),
'hpr': Point3(90, 0, 0),
'scale': 1,
'goonId': None},
20118: {'type': 'goonClipPlane', 'name': '<unnamed>',
'comment': '',
'parentEntId': 24,
'pos': Point3(-52, 0, 5),
'hpr': Point3(0, 0, 0),
'scale': 1,
'goonId': None},
20025: {'type': 'grid', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20023,
'pos': Point3(-48.4442, -24.9385, 0),
'scale': 1,
'cellSize': 3,
'numCol': 30,
'numRow': 16},
20080: {'type': 'grid', 'name': '<unnamed>',
'comment': '',
'parentEntId': 5,
'pos': Point3(1.5, -10.7, 0),
'scale': 1,
'cellSize': 3,
'numCol': 2,
'numRow': 5},
20090: {'type': 'grid', 'name': '<unnamed>',
'comment': '',
'parentEntId': 17,
'pos': Point3(-6.5, -111, 0),
'scale': 1,
'cellSize': 3,
'numCol': 2,
'numRow': 9},
20011: {'type': 'healBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20,
'pos': Point3(-2.06235, 20.2198, 0.025),
'hpr': Vec3(-19.2153, 0, 0),
'scale': 1,
'rewardPerGrab': 10,
'rewardPerGrabMax': 0},
20092: {'type': 'healBarrel', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20087,
'pos': Point3(-1, -1.5, 7),
'hpr': Vec3(-191.79, 0, 0),
'scale': 1,
'rewardPerGrab': 5,
'rewardPerGrabMax': 0},
10041: {'type': 'lift', 'name': 'westLift',
'comment': '',
'parentEntId': 60,
'pos': Point3(0, 0, 0.0641994),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'autoMoveDelay': 5,
'duration': 7.0,
'endBoardSides': [
'back'],
'endGuardName': 'topGuard',
'endPos': Point3(0, 0, 165),
'floorName': 'elevator_floor',
'modelPath': 'phase_9/models/cogHQ/Elevator',
'modelScale': Vec3(1, 1, 1),
'moveDelay': 1,
'startBoardSides': [
'front'],
'startGuardName': 'bottomGuard',
'startPos': Point3(0, 0, 0)},
10048: {'type': 'lift', 'name': 'eastLift',
'comment': '',
'parentEntId': 61,
'pos': Point3(0, -0.684064, 0.589322),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'autoMoveDelay': 5.0,
'duration': 7.0,
'endBoardSides': [
'front',
'back',
'left',
'right'],
'endGuardName': 'topGuard',
'endPos': Point3(0, 0, 165),
'floorName': 'elevator_floor',
'modelPath': 'phase_9/models/cogHQ/Elevator',
'modelScale': Vec3(1, 1, 1),
'moveDelay': 1,
'startBoardSides': [
'front'],
'startGuardName': 'bottomGuard',
'startPos': Point3(0, 0, 0)},
10057: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10043,
'input1Event': 30009,
'input2Event': 30000,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
10059: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10058,
'input1Event': 10057,
'input2Event': 30011,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
10061: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10060,
'input1Event': 10059,
'input2Event': 30013,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
10063: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10062,
'input1Event': 60033,
'input2Event': 30009,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
30068: {'type': 'logicGate', 'name': 'door 116 and door 118',
'comment': '',
'parentEntId': 30069,
'input1Event': 30013,
'input2Event': 30011,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60023: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60021,
'input1Event': 30011,
'input2Event': 30009,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60025: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60022,
'input1Event': 60023,
'input2Event': 30013,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60028: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60026,
'input1Event': 30011,
'input2Event': 30005,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60029: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60027,
'input1Event': 30011,
'input2Event': 30012,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60030: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 30071,
'input1Event': 30011,
'input2Event': 30009,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60033: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 30073,
'input1Event': 30013,
'input2Event': 30011,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60034: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 30075,
'input1Event': 30013,
'input2Event': 30005,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60035: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60032,
'input1Event': 30013,
'input2Event': 30012,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60037: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60036,
'input1Event': 30005,
'input2Event': 30012,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60039: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60038,
'input1Event': 30012,
'input2Event': 30005,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60041: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60040,
'input1Event': 30020,
'input2Event': 30019,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60043: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60042,
'input1Event': 30019,
'input2Event': 30020,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60047: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60045,
'input1Event': 10002,
'input2Event': 30019,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60049: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60048,
'input1Event': 30003,
'input2Event': 10052,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60051: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60050,
'input1Event': 30001,
'input2Event': 10052,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60053: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60052,
'input1Event': 30021,
'input2Event': 30016,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60055: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60054,
'input1Event': 30002,
'input2Event': 30021,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60057: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60056,
'input1Event': 30016,
'input2Event': 30021,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60059: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60058,
'input1Event': 30012,
'input2Event': 30011,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60061: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60060,
'input1Event': 30012,
'input2Event': 30013,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60064: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60062,
'input1Event': 30005,
'input2Event': 30011,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60065: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60063,
'input1Event': 30005,
'input2Event': 30013,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60074: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60072,
'input1Event': 10052,
'input2Event': 30003,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60075: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60073,
'input1Event': 10052,
'input2Event': 30001,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60076: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60020,
'input1Event': 30021,
'input2Event': 30002,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60078: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60077,
'input1Event': 30021,
'input2Event': 30002,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60080: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60079,
'input1Event': 60057,
'input2Event': 30002,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60082: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60081,
'input1Event': 60055,
'input2Event': 30016,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60084: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60083,
'input1Event': 30004,
'input2Event': 30014,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60086: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60085,
'input1Event': 30006,
'input2Event': 30008,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60091: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60087,
'input1Event': 60088,
'input2Event': 30001,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60093: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60092,
'input1Event': 30001,
'input2Event': 60088,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60100: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60099,
'input1Event': 60095,
'input2Event': 10002,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60119: {'type': 'logicGate', 'name': 'open sesame Duct & Lobby',
'comment': 'links together the Duct Room and Lobby buttons',
'parentEntId': 0,
'input1Event': 30076,
'input2Event': 60118,
'isInput1': 0,
'isInput2': 0,
'logicType': 'or'},
60132: {'type': 'logicGate', 'name': 'open sesame Entrances',
'comment': 'links together the buttons in the two entrances',
'parentEntId': 0,
'input1Event': 30040,
'input2Event': 60102,
'isInput1': 0,
'isInput2': 0,
'logicType': 'or'},
60138: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60137,
'input1Event': 60095,
'input2Event': 60094,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60141: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60139,
'input1Event': 10002,
'input2Event': 60094,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60142: {'type': 'logicGate', 'name': '<unnamed>',
'comment': '',
'parentEntId': 60140,
'input1Event': 10002,
'input2Event': 60095,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
10001: {'type': 'model', 'name': 'dropshadow',
'comment': '',
'parentEntId': 10006,
'pos': Point3(0, 0, -25),
'hpr': Vec3(0, 0, 0),
'scale': Point3(2, 1.5, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_3/models/props/drop_shadow'},
10012: {'type': 'model', 'name': 'backCrate',
'comment': '',
'parentEntId': 10067,
'pos': Point3(0, -5.81496, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_9/models/cogHQ/metal_crateB'},
10033: {'type': 'model', 'name': 'dropshadow',
'comment': '',
'parentEntId': 10007,
'pos': Point3(0, 0, -25),
'hpr': Vec3(0, 0, 0),
'scale': Point3(2, 1.5, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_3/models/props/drop_shadow'},
10045: {'type': 'model', 'name': 'dropshadow',
'comment': '',
'parentEntId': 10008,
'pos': Point3(0, 0, -25),
'hpr': Vec3(0, 0, 0),
'scale': Point3(2, 1.5, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_3/models/props/drop_shadow'},
10046: {'type': 'model', 'name': 'dropshadow',
'comment': '',
'parentEntId': 10009,
'pos': Point3(0, 0, -25),
'hpr': Vec3(0, 0, 0),
'scale': Point3(2, 1.5, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_3/models/props/drop_shadow'},
10050: {'type': 'model', 'name': 'sky',
'comment': '',
'parentEntId': 200,
'pos': Point3(-142.02, 437.227, 0.922491),
'hpr': Point3(0, 0, 0),
'scale': Point3(2.5, 2.5, 2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_9/models/cogHQ/cog_sky'},
10066: {'type': 'model', 'name': 'frontCrate',
'comment': '',
'parentEntId': 10067,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_9/models/cogHQ/metal_crateB'},
10069: {'type': 'model', 'name': 'backCrate',
'comment': '',
'parentEntId': 10065,
'pos': Point3(0, -5.81496, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_9/models/cogHQ/metal_crateB'},
10070: {'type': 'model', 'name': 'frontCrate',
'comment': '',
'parentEntId': 10065,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_9/models/cogHQ/metal_crateB'},
20082: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 5,
'pos': Point3(4.50815, 11.6508, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(0.92, 0.92, 0.92),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_9/models/cogHQ/metal_crateB'},
20083: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20082,
'pos': Point3(0, 0, 5.5),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_9/models/cogHQ/metal_crateB'},
20088: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20087,
'pos': Point3(1, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1.3, 1, 1.3),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_9/models/cogHQ/metal_crateB'},
10000: {'type': 'nodepath', 'name': 'gearGauntletObstacle',
'comment': '',
'parentEntId': 10027,
'pos': Point3(0, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
10004: {'type': 'nodepath', 'name': 'gearGauntlet',
'comment': 'gears are staggered 15 ft in Y',
'parentEntId': 10000,
'pos': Point3(0, -23.25, 6.85),
'hpr': Point3(0, 0, 0),
'scale': 1},
10014: {'type': 'nodepath', 'name': 'cogs',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, 34.07, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
10015: {'type': 'nodepath', 'name': 'paint mixer platforms',
'comment': '',
'parentEntId': 10,
'pos': Point3(0, 5.15136, -2),
'hpr': Point3(0, 0, 0),
'scale': 1},
10022: {'type': 'nodepath', 'name': 'gagBarrels',
'comment': '',
'parentEntId': 11,
'pos': Point3(11.2328, 14.7959, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
10023: {'type': 'nodepath', 'name': 'leftCogs',
'comment': '',
'parentEntId': 13,
'pos': Point3(-42.0363, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
10027: {'type': 'nodepath', 'name': 'zoneNodeCompensate',
'comment': 'I think the ZoneNode was moved.',
'parentEntId': 19,
'pos': Point3(-0.426482, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
10030: {'type': 'nodepath', 'name': 'cogs',
'comment': '',
'parentEntId': 8,
'pos': Point3(2.5, 62.5, 10),
'hpr': Point3(0, 0, 0),
'scale': 1},
10032: {'type': 'nodepath', 'name': 'rightCogs',
'comment': '',
'parentEntId': 13,
'pos': Point3(46.88, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
10034: {'type': 'nodepath', 'name': 'cogs',
'comment': '',
'parentEntId': 22,
'pos': Point3(0, 0, 0),
'hpr': Point3(180, 0, 0),
'scale': 1},
10036: {'type': 'nodepath', 'name': 'cogs',
'comment': '',
'parentEntId': 15,
'pos': Point3(5.5, 0, 0),
'hpr': Point3(161, 0, 0),
'scale': 1},
10037: {'type': 'nodepath', 'name': 'cogs',
'comment': '',
'parentEntId': 7,
'pos': Point3(3.1, -48.27, 0.05),
'hpr': Point3(0, 0, 0),
'scale': 1},
10040: {'type': 'nodepath', 'name': 'FactoryBoss',
'comment': '',
'parentEntId': 24,
'pos': Point3(0, 68.4457, 9.5669),
'hpr': Point3(180, 0, 0),
'scale': 1},
10047: {'type': 'nodepath', 'name': 'battleCell',
'comment': '',
'parentEntId': 34,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
10056: {'type': 'nodepath', 'name': 'sounds',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0, 0, 15),
'hpr': Vec3(0, 0, 0),
'scale': 1},
10064: {'type': 'nodepath', 'name': 'battleCell',
'comment': '',
'parentEntId': 32,
'pos': Point3(0, -5.20447, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
10065: {'type': 'nodepath', 'name': 'backSteps',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0, 56.2652, 0),
'hpr': Point3(0, 0, 0),
'scale': Point3(1.5, 1.3, 0.73)},
10067: {'type': 'nodepath', 'name': 'frontSteps',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0, -44.7196, 0),
'hpr': Point3(180, 0, 0),
'scale': Point3(1.5, 1.3, 0.729057)},
10068: {'type': 'nodepath', 'name': 'battleCell',
'comment': '',
'parentEntId': 33,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20000: {'type': 'nodepath', 'name': 'stompers',
'comment': '',
'parentEntId': 17,
'pos': Point3(0.75, 0, 0.5),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20018: {'type': 'nodepath', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10014,
'pos': Point3(0, -24, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20019: {'type': 'nodepath', 'name': 'cogsJoin',
'comment': '',
'parentEntId': 10030,
'pos': Point3(16, 2, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20022: {'type': 'nodepath', 'name': 'StomperButtonsNodepath',
'comment': '',
'parentEntId': 24,
'pos': Point3(-11.75, -35.8, 14.9),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20023: {'type': 'nodepath', 'name': '<unnamed>',
'comment': '',
'parentEntId': 24,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20037: {'type': 'nodepath', 'name': 'SignatureGoonNP',
'comment': '',
'parentEntId': 24,
'pos': Point3(-48.4442, -24.9385, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20058: {'type': 'nodepath', 'name': 'SigRoomCogs',
'comment': '',
'parentEntId': 24,
'pos': Point3(-1.0928, -45, 14.99),
'hpr': Point3(90, 0, 0),
'scale': 1},
20087: {'type': 'nodepath', 'name': '<unnamed>',
'comment': '',
'parentEntId': 17,
'pos': Point3(-4, -117, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20094: {'type': 'nodepath', 'name': 'cogs',
'comment': '',
'parentEntId': 34,
'pos': Point3(-0.720506, 27.5461, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20095: {'type': 'nodepath', 'name': 'cogs',
'comment': '',
'parentEntId': 32,
'pos': Point3(0, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
20096: {'type': 'nodepath', 'name': 'cogs',
'comment': '',
'parentEntId': 33,
'pos': Point3(4.84921, 8.74482, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
26900: {'type': 'nodepath', 'name': 'centercog',
'comment': 'Legal Eagle Center Silo.',
'parentEntId': 35,
'pos': Point3(0, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
10017: {'type': 'paintMixer', 'name': 'fifth',
'comment': '',
'parentEntId': 10015,
'pos': Point3(5.24, 23.52, 8),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Point3(0.8, 0.8, 0.8),
'motion': 'easeInOut',
'offset': Point3(-12, -6, 0),
'period': 8.0,
'phaseShift': 0.5,
'shaftScale': 1,
'waitPercent': 0.1},
10018: {'type': 'paintMixer', 'name': 'fourth',
'comment': '',
'parentEntId': 10015,
'pos': Point3(-12.1, 3, 8),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Point3(0.8, 0.8, 0.8),
'motion': 'easeInOut',
'offset': Point3(0, -6, 15),
'period': 8.0,
'phaseShift': 0.0,
'shaftScale': 2.5,
'waitPercent': 0.1},
10019: {'type': 'paintMixer', 'name': 'third',
'comment': '',
'parentEntId': 10015,
'pos': Point3(-3.85419, -7.75751, 22.5836),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Point3(0.8, 0.8, 0.8),
'motion': 'easeInOut',
'offset': Point3(7, 0, 0),
'period': 8.0,
'phaseShift': 0.0,
'shaftScale': 2.5,
'waitPercent': 0.1},
10020: {'type': 'paintMixer', 'name': 'second',
'comment': '',
'parentEntId': 10015,
'pos': Point3(16.01, -6.47, 23),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Point3(0.8, 0.8, 0.8),
'motion': 'easeInOut',
'offset': Point3(-4, -8, -15),
'period': 8.0,
'phaseShift': 0.0,
'shaftScale': 2.5,
'waitPercent': 0.1},
10054: {'type': 'paintMixer', 'name': 'first',
'comment': '',
'parentEntId': 10015,
'pos': Point3(-10, -26.1, 8),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Point3(0.8, 0.8, 0.8),
'motion': 'easeInOut',
'offset': Point3(15, 0, 0),
'period': 8.0,
'phaseShift': 0.0,
'shaftScale': 1,
'waitPercent': 0.1},
20008: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
20009: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 17,
'pathScale': 1.0},
20010: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 21,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 36,
'pathScale': 1.0},
20012: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 21,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 34,
'pathScale': 1.0},
20015: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 21,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 37,
'pathScale': 1.0},
20038: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 15,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 38,
'pathScale': 1.0},
20039: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 7,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 12,
'pathScale': 1.0},
20040: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(41.5, 33.5, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
20042: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(15, 34, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
20044: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(1.5, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'pathIndex': 6,
'pathScale': 1.0},
20045: {'type': 'path', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'pathIndex': 7,
'pathScale': 1.0},
20049: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 7,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 13,
'pathScale': 1.0},
20051: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(1, -24, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 42,
'pathScale': 1.0},
20053: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 8,
'pathScale': 1.0},
20055: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 9,
'pathScale': 1.0},
20059: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 10,
'pathScale': 1.0},
20061: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 11,
'pathScale': 1.0},
20067: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 15,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 39,
'pathScale': 1.0},
20068: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 15,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 40,
'pathScale': 1.0},
20069: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 9,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 5,
'pathScale': 1.0},
20070: {'type': 'path', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 9,
'pos': Point3(1, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 5,
'pathScale': 1.0},
20073: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 21,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 35,
'pathScale': 1.0},
20075: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 7,
'pos': Point3(4, 4, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 14,
'pathScale': 1.0},
20076: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 8,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 15,
'pathScale': 1.0},
20077: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 8,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 16,
'pathScale': 1.0},
20078: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 18,
'pathScale': 1.0},
20079: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
20084: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 9,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 41,
'pathScale': 1.0},
20097: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 34,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 19,
'pathScale': 1.0},
20098: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 34,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 20,
'pathScale': 1.0},
20099: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 34,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 21,
'pathScale': 1.0},
20100: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 33,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 22,
'pathScale': 1.0},
20101: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 33,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 23,
'pathScale': 1.0},
20102: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 33,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 24,
'pathScale': 1.0},
20103: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 32,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 25,
'pathScale': 1.0},
20104: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 32,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 26,
'pathScale': 1.0},
20105: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 32,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 27,
'pathScale': 1.0},
20106: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 28,
'pathScale': 1.0},
20107: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 29,
'pathScale': 1.0},
20108: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 30,
'pathScale': 1.0},
20109: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 31,
'pathScale': 1.0},
20110: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 32,
'pathScale': 1.0},
20111: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 33,
'pathScale': 1.0},
60133: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 22,
'pos': Point3(-10, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
60134: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 22,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
60135: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 22,
'pos': Point3(10, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
10042: {'type': 'propSpinner', 'name': '<unnamed>',
'comment': '',
'parentEntId': 7},
20001: {'type': 'stomper', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(0, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 3,
'headScale': Point3(7, 5, 7),
'modelPath': 0,
'motion': 3,
'period': 4.0,
'phaseShift': 0.0,
'range': 30.0,
'shaftScale': Point3(0.5, 12, 0.5),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20002: {'type': 'stomper', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(0, -14.3294, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 3,
'headScale': Point3(7, 5, 7),
'modelPath': 0,
'motion': 3,
'period': 2.0,
'phaseShift': 0.0,
'range': 10.0,
'shaftScale': Point3(0.5, 12, 0.5),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20003: {'type': 'stomper', 'name': 'copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(0, -28.3252, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 3,
'headScale': Point3(7, 5, 7),
'modelPath': 0,
'motion': 3,
'period': 2.0,
'phaseShift': 0.5,
'range': 10.0,
'shaftScale': Point3(0.5, 12, 0.5),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20004: {'type': 'stomper', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(-3.5, 16.2588, 0),
'hpr': Point3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 3,
'headScale': Point3(3.5, 5, 3.5),
'modelPath': 0,
'motion': 3,
'period': 3.0001373423482587,
'phaseShift': 0.0,
'range': 15.0,
'shaftScale': Point3(0.71, 12, 0.71),
'soundLen': 0,
'soundOn': 1,
'soundPath': 0,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20005: {'type': 'stomper', 'name': 'copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(3.5, 16.2588, 0),
'hpr': Point3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 3,
'headScale': Point3(3.5, 5, 3.5),
'modelPath': 0,
'motion': 3,
'period': 1.5,
'phaseShift': 0.0,
'range': 15.0,
'shaftScale': Point3(0.71, 12, 0.71),
'soundLen': 0,
'soundOn': 1,
'soundPath': 1,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20006: {'type': 'stomper', 'name': 'copy of copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(-3.5, 23.4392, 0),
'hpr': Point3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 3,
'headScale': Point3(3.5, 5, 3.5),
'modelPath': 0,
'motion': 3,
'period': 1.5,
'phaseShift': 0.5,
'range': 15.0,
'shaftScale': Point3(0.71, 12, 0.71),
'soundLen': 0,
'soundOn': 1,
'soundPath': 0,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20007: {'type': 'stomper', 'name': 'copy of copy of copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(3.5, 23.4392, 0),
'hpr': Point3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 3,
'headScale': Point3(3.5, 5, 3.5),
'modelPath': 0,
'motion': 3,
'period': 3.0,
'phaseShift': 0.5,
'range': 15.0,
'shaftScale': Point3(0.71, 12, 0.71),
'soundLen': 0,
'soundOn': 1,
'soundPath': 0,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20029: {'type': 'stomper', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20025,
'pos': Point3(4.5, 43.5, 0.25),
'hpr': Point3(0, 0, 0),
'scale': 1,
'animateShadow': 0,
'crushCellId': 20024,
'damage': 3,
'headScale': Point3(3, 2, 3),
'modelPath': 0,
'motion': 5,
'period': 2.0,
'phaseShift': 0.0,
'range': 12.0,
'shaftScale': Point3(0.66, 37.5, 0.66),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 20033,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20030: {'type': 'stomper', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 20025,
'pos': Point3(31.5, 43.5, 0.25),
'hpr': Point3(0, 0, 0),
'scale': 1,
'animateShadow': 0,
'crushCellId': 20026,
'damage': 3,
'headScale': Point3(3, 2, 3),
'modelPath': 0,
'motion': 5,
'period': 2.0,
'phaseShift': 0.0,
'range': 12.0,
'shaftScale': Point3(0.66, 37.5, 0.66),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 20034,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20031: {'type': 'stomper', 'name': 'copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20025,
'pos': Point3(64.5, 43.5, 0.25),
'hpr': Point3(0, 0, 0),
'scale': 1,
'animateShadow': 0,
'crushCellId': 20027,
'damage': 3,
'headScale': Point3(3, 2, 3),
'modelPath': 0,
'motion': 5,
'period': 2.0,
'phaseShift': 0.0,
'range': 12.0,
'shaftScale': Point3(0.66, 37.5, 0.66),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 20035,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20032: {'type': 'stomper', 'name': 'copy of copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20025,
'pos': Point3(85.5, 43.5, 0.25),
'hpr': Point3(0, 0, 0),
'scale': 1,
'animateShadow': 0,
'crushCellId': 20028,
'damage': 3,
'headScale': Point3(3, 2, 3),
'modelPath': 0,
'motion': 5,
'period': 2.0,
'phaseShift': 0.0,
'range': 12.0,
'shaftScale': Point3(0.66, 37.5, 0.66),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 20036,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20050: {'type': 'trigger', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20022,
'pos': Point3(10, 0, 10),
'hpr': Vec3(0, 0, 0),
'scale': Point3(20, 20, 20),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1,
'triggerName': 'signatureRoomView'},
20057: {'type': 'trigger', 'name': '<unnamed>',
'comment': '',
'parentEntId': 23,
'pos': Point3(3, -8.8, 15.5091),
'hpr': Vec3(0, 0, 0),
'scale': Point3(25, 25, 25),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1,
'triggerName': 'lookoutTrigger'},
30077: {'type': 'trigger', 'name': 'button cutscene',
'comment': '',
'parentEntId': 3,
'pos': Point3(-4, 8, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1,
'triggerName': ''},
10013: {'type': 'visibilityExtender', 'name': 'intoEastSilo',
'comment': '',
'parentEntId': 60009,
'event': 60101,
'newZones': [
61]},
10043: {'type': 'visibilityExtender', 'name': 'beyondLobby',
'comment': '',
'parentEntId': 10049,
'event': 10057,
'newZones': [
5, 116]},
10044: {'type': 'visibilityExtender', 'name': 'intoEntrance1',
'comment': '',
'parentEntId': 10051,
'event': 30000,
'newZones': [
3]},
10058: {'type': 'visibilityExtender', 'name': 'intoFarHallway',
'comment': '',
'parentEntId': 10049,
'event': 10059,
'newZones': [
6, 118]},
10060: {'type': 'visibilityExtender', 'name': 'intoLookout',
'comment': '',
'parentEntId': 10049,
'event': 10061,
'newZones': [
23]},
10062: {'type': 'visibilityExtender', 'name': 'intoLobby',
'comment': '',
'parentEntId': 60031,
'event': 10063,
'newZones': [
4, 114]},
30022: {'type': 'visibilityExtender', 'name': 'intoLobby',
'comment': '',
'parentEntId': 10049,
'event': 30000,
'newZones': [
4, 113]},
30023: {'type': 'visibilityExtender', 'name': 'beyond door 106',
'comment': '',
'parentEntId': 60017,
'event': 30002,
'newZones': [
15, 126]},
30024: {'type': 'visibilityExtender', 'name': 'beyond door 106',
'comment': '',
'parentEntId': 60016,
'event': 30002,
'newZones': [
16]},
30025: {'type': 'visibilityExtender', 'name': 'beyond door 126',
'comment': '',
'parentEntId': 60016,
'event': 30021,
'newZones': [
14, 17, 121]},
30026: {'type': 'visibilityExtender', 'name': 'beyond door 121',
'comment': '',
'parentEntId': 60015,
'event': 30016,
'newZones': [
13, 119]},
30027: {'type': 'visibilityExtender', 'name': 'beyond door 126',
'comment': '',
'parentEntId': 60015,
'event': 30021,
'newZones': [
15, 106]},
30029: {'type': 'visibilityExtender', 'name': 'beyondLobby',
'comment': '',
'parentEntId': 10051,
'event': 30009,
'newZones': [
5, 116]},
30030: {'type': 'visibilityExtender', 'name': 'beyond door 113',
'comment': '',
'parentEntId': 60000,
'event': 30009,
'newZones': [
4, 114]},
30031: {'type': 'visibilityExtender', 'name': 'beyond door 116',
'comment': '',
'parentEntId': 60000,
'event': 30011,
'newZones': [
6,
109,
117,
118]},
30032: {'type': 'visibilityExtender', 'name': 'intoHallwayFromLobby',
'comment': '',
'parentEntId': 60001,
'event': 30011,
'newZones': [
5, 113]},
30033: {'type': 'visibilityExtender', 'name': 'intoBoilerRoom',
'comment': '',
'parentEntId': 60001,
'event': 30012,
'newZones': [
8]},
30034: {'type': 'visibilityExtender', 'name': 'intoLookout',
'comment': '',
'parentEntId': 60001,
'event': 30013,
'newZones': [
23, 39]},
30035: {'type': 'visibilityExtender', 'name': 'intoGearRoom',
'comment': '',
'parentEntId': 60001,
'event': 30005,
'newZones': [
7]},
30036: {'type': 'visibilityExtender', 'name': 'beyond door 109',
'comment': '',
'parentEntId': 60002,
'event': 30005,
'newZones': [
6,
116,
117,
118]},
30037: {'type': 'visibilityExtender', 'name': 'beyond door 110',
'comment': '',
'parentEntId': 60002,
'event': 30006,
'newZones': [
9,
25,
26,
33,
34,
35,
38,
41,
53,
112,
115,
200]},
30038: {'type': 'visibilityExtender', 'name': 'beyond door 117',
'comment': '',
'parentEntId': 60005,
'event': 30012,
'newZones': [
6,
109,
116,
118]},
30039: {'type': 'visibilityExtender', 'name': 'beyond door 108',
'comment': '',
'parentEntId': 60005,
'event': 30004,
'newZones': [
12,
21,
26,
34,
35,
40,
41,
53,
60,
119,
120,
200]},
30041: {'type': 'visibilityExtender', 'name': 'beyond door 110',
'comment': '',
'parentEntId': 60003,
'event': 30006,
'newZones': [
7]},
30042: {'type': 'visibilityExtender', 'name': 'beyond door 112',
'comment': '',
'parentEntId': 60003,
'event': 30008,
'newZones': [
10, 11]},
30043: {'type': 'visibilityExtender', 'name': 'intoWarehouse',
'comment': '',
'parentEntId': 60003,
'event': 30010,
'newZones': [
24, 39]},
30044: {'type': 'visibilityExtender', 'name': 'beyond door 112',
'comment': '',
'parentEntId': 60004,
'event': 30008,
'newZones': [
9,
25,
26,
33,
34,
35,
38,
41,
53,
110,
115,
200]},
30046: {'type': 'visibilityExtender', 'name': 'beyond door 112',
'comment': '',
'parentEntId': 60066,
'event': 30008,
'newZones': [
9,
25,
26,
41,
200]},
30049: {'type': 'visibilityExtender', 'name': 'beyond door 119',
'comment': '',
'parentEntId': 60013,
'event': 30014,
'newZones': [
12,
21,
23,
26,
33,
34,
35,
41,
53,
60,
108,
112,
120,
200]},
30050: {'type': 'visibilityExtender', 'name': 'beyond door 121',
'comment': '',
'parentEntId': 60013,
'event': 30016,
'newZones': [
14, 17, 126]},
30051: {'type': 'visibilityExtender', 'name': 'beyond door 121',
'comment': '',
'parentEntId': 60014,
'event': 30016,
'newZones': [
13, 119]},
30052: {'type': 'visibilityExtender', 'name': 'beyond door 126',
'comment': '',
'parentEntId': 60014,
'event': 30021,
'newZones': [
15, 106]},
30055: {'type': 'visibilityExtender', 'name': 'beyond door 105',
'comment': '',
'parentEntId': 60019,
'event': 30001,
'newZones': [
27, 127]},
30056: {'type': 'visibilityExtender', 'name': 'beyond door 105',
'comment': '',
'parentEntId': 60018,
'event': 30001,
'newZones': [
27, 127]},
30057: {'type': 'visibilityExtender', 'name': 'beyond door 103',
'comment': '',
'parentEntId': 60018,
'event': 60088,
'newZones': [
17]},
30059: {'type': 'visibilityExtender', 'name': 'beyond door 108',
'comment': '',
'parentEntId': 60006,
'event': 30004,
'newZones': [
8, 117]},
30060: {'type': 'visibilityExtender', 'name': 'beyond door 119',
'comment': '',
'parentEntId': 60006,
'event': 30014,
'newZones': [
13, 121]},
30061: {'type': 'visibilityExtender', 'name': 'intoWarehouse',
'comment': '',
'parentEntId': 60006,
'event': 30015,
'newZones': [
24, 39]},
30062: {'type': 'visibilityExtender', 'name': 'beyond door 107',
'comment': '',
'parentEntId': 60024,
'event': 30003,
'newZones': [
27, 127]},
30063: {'type': 'visibilityExtender', 'name': 'intoHallway',
'comment': '',
'parentEntId': 60031,
'event': 30013,
'newZones': [
6,
109,
116,
117]},
30064: {'type': 'visibilityExtender', 'name': 'beyondLowerWestDoor',
'comment': '',
'parentEntId': 60007,
'event': 30015,
'newZones': [
12,
21,
26,
34,
40,
41,
53,
200]},
30066: {'type': 'visibilityExtender', 'name': 'beyondLowerEastDoor',
'comment': '',
'parentEntId': 60007,
'event': 30010,
'newZones': [
9,
25,
26,
33,
38,
41,
200]},
30067: {'type': 'visibilityExtender', 'name': 'beyondUpperEastDoor',
'comment': '',
'parentEntId': 60007,
'event': 30019,
'newZones': [
9,
33,
38,
41,
200,
222]},
30069: {'type': 'visibilityExtender', 'name': 'beyond door 118',
'comment': '',
'parentEntId': 60000,
'event': 30068,
'newZones': [
23]},
30071: {'type': 'visibilityExtender', 'name': 'intoLobby',
'comment': '',
'parentEntId': 60001,
'event': 60030,
'newZones': [
4, 114]},
30073: {'type': 'visibilityExtender', 'name': 'intoLobbyHallway',
'comment': '',
'parentEntId': 60031,
'event': 60033,
'newZones': [
5, 113]},
30075: {'type': 'visibilityExtender', 'name': 'intoGearRoom',
'comment': '',
'parentEntId': 60031,
'event': 60034,
'newZones': [
7]},
60008: {'type': 'visibilityExtender', 'name': 'beyondUpperWestDoor',
'comment': '',
'parentEntId': 60007,
'event': 30020,
'newZones': [
12,
21,
34,
40,
41,
60,
127,
200]},
60010: {'type': 'visibilityExtender', 'name': 'intoWarehouse',
'comment': '',
'parentEntId': 60009,
'event': 30019,
'newZones': [
24, 39, 125]},
60012: {'type': 'visibilityExtender', 'name': 'beyond door 125',
'comment': '',
'parentEntId': 60011,
'event': 30020,
'newZones': [
24, 39, 124]},
60020: {'type': 'visibilityExtender', 'name': 'beyond door 106',
'comment': '',
'parentEntId': 60015,
'event': 60076,
'newZones': [
16]},
60021: {'type': 'visibilityExtender', 'name': 'beyond door 116',
'comment': '',
'parentEntId': 10051,
'event': 60023,
'newZones': [
6, 118]},
60022: {'type': 'visibilityExtender', 'name': 'beyond door 118',
'comment': '',
'parentEntId': 10051,
'event': 60025,
'newZones': [
23]},
60026: {'type': 'visibilityExtender', 'name': 'beyond door 109',
'comment': '',
'parentEntId': 60000,
'event': 60028,
'newZones': [
7]},
60027: {'type': 'visibilityExtender', 'name': 'beyond door 117',
'comment': '',
'parentEntId': 60000,
'event': 60029,
'newZones': [
8]},
60032: {'type': 'visibilityExtender', 'name': 'intoBoilerRoom',
'comment': '',
'parentEntId': 60031,
'event': 60035,
'newZones': [
8]},
60036: {'type': 'visibilityExtender', 'name': 'beyond door 117',
'comment': '',
'parentEntId': 60002,
'event': 60037,
'newZones': [
8]},
60038: {'type': 'visibilityExtender', 'name': 'beyond door 109',
'comment': '',
'parentEntId': 60005,
'event': 60039,
'newZones': [
7]},
60040: {'type': 'visibilityExtender', 'name': 'beyond door 124',
'comment': '',
'parentEntId': 60011,
'event': 60041,
'newZones': [
38]},
60042: {'type': 'visibilityExtender', 'name': 'beyondWarehouse',
'comment': '',
'parentEntId': 60009,
'event': 60043,
'newZones': [
12, 200]},
60045: {'type': 'visibilityExtender', 'name': 'beyond door 124',
'comment': '',
'parentEntId': 60044,
'event': 60047,
'newZones': [
24]},
60046: {'type': 'visibilityExtender', 'name': 'beyond door 128',
'comment': '',
'parentEntId': 60044,
'event': 10002,
'newZones': [
31]},
60048: {'type': 'visibilityExtender', 'name': 'beyond door 127',
'comment': '',
'parentEntId': 60024,
'event': 60049,
'newZones': [
21, 200]},
60050: {'type': 'visibilityExtender', 'name': 'beyond door 127',
'comment': '',
'parentEntId': 60019,
'event': 60051,
'newZones': [
21, 34, 200]},
60052: {'type': 'visibilityExtender', 'name': 'beyond door 121',
'comment': '',
'parentEntId': 60016,
'event': 60053,
'newZones': [
13, 119]},
60054: {'type': 'visibilityExtender', 'name': 'beyond door 126',
'comment': '',
'parentEntId': 60017,
'event': 60055,
'newZones': [
14, 17, 121]},
60056: {'type': 'visibilityExtender', 'name': 'beyond door 126',
'comment': '',
'parentEntId': 60013,
'event': 60057,
'newZones': [
15, 106]},
60058: {'type': 'visibilityExtender', 'name': 'beyond door 116',
'comment': '',
'parentEntId': 60005,
'event': 60059,
'newZones': [
5]},
60060: {'type': 'visibilityExtender', 'name': 'beyond door 118',
'comment': '',
'parentEntId': 60005,
'event': 60061,
'newZones': [
23]},
60062: {'type': 'visibilityExtender', 'name': 'beyond door 116',
'comment': '',
'parentEntId': 60002,
'event': 60064,
'newZones': [
5]},
60063: {'type': 'visibilityExtender', 'name': 'beyond door 118',
'comment': '',
'parentEntId': 60002,
'event': 60065,
'newZones': [
23]},
60068: {'type': 'visibilityExtender', 'name': 'beyond door 105',
'comment': '',
'parentEntId': 60067,
'event': 30001,
'newZones': [
18,
19,
20,
131]},
60069: {'type': 'visibilityExtender', 'name': 'beyond door 107',
'comment': '',
'parentEntId': 60067,
'event': 30003,
'newZones': [
22]},
60070: {'type': 'visibilityExtender', 'name': 'beyond door 127',
'comment': '',
'parentEntId': 60067,
'event': 10052,
'newZones': [
12,
21,
26,
34,
35,
40,
41,
53,
60,
200]},
60071: {'type': 'visibilityExtender', 'name': 'beyond door 127',
'comment': '',
'parentEntId': 60006,
'event': 10052,
'newZones': [
27, 105, 107]},
60072: {'type': 'visibilityExtender', 'name': 'beyond door 107',
'comment': '',
'parentEntId': 60006,
'event': 60074,
'newZones': [
22]},
60073: {'type': 'visibilityExtender', 'name': 'beyond door 105',
'comment': '',
'parentEntId': 60006,
'event': 60075,
'newZones': [
18]},
60077: {'type': 'visibilityExtender', 'name': 'beyond door 106',
'comment': '',
'parentEntId': 60014,
'event': 60078,
'newZones': [
16]},
60079: {'type': 'visibilityExtender', 'name': 'beyond door 106',
'comment': '',
'parentEntId': 60013,
'event': 60080,
'newZones': [
16]},
60081: {'type': 'visibilityExtender', 'name': 'beyond door 121',
'comment': '',
'parentEntId': 60017,
'event': 60082,
'newZones': [
13]},
60083: {'type': 'visibilityExtender', 'name': 'beyond door 119',
'comment': '',
'parentEntId': 60005,
'event': 60084,
'newZones': [
13]},
60085: {'type': 'visibilityExtender', 'name': 'beyond door 112',
'comment': '',
'parentEntId': 60002,
'event': 60086,
'newZones': [
10]},
60087: {'type': 'visibilityExtender', 'name': 'beyond door 105',
'comment': '',
'parentEntId': 60015,
'event': 60091,
'newZones': [
27]},
60089: {'type': 'visibilityExtender', 'name': 'beyond door 103',
'comment': '',
'parentEntId': 60019,
'event': 60088,
'newZones': [
17]},
60090: {'type': 'visibilityExtender', 'name': 'beyond door 103',
'comment': '',
'parentEntId': 60015,
'event': 60088,
'newZones': [
18, 19, 105]},
60092: {'type': 'visibilityExtender', 'name': 'beyond door 103',
'comment': '',
'parentEntId': 60067,
'event': 60093,
'newZones': [
17]},
60097: {'type': 'visibilityExtender', 'name': 'beyond door 130',
'comment': '',
'parentEntId': 60096,
'event': 60095,
'newZones': [
33,
34,
35,
36,
37,
60,
61,
128,
129,
200]},
60098: {'type': 'visibilityExtender', 'name': 'beyond door 130',
'comment': '',
'parentEntId': 60044,
'event': 60095,
'newZones': [
30]},
60099: {'type': 'visibilityExtender', 'name': 'beyond door 128',
'comment': '',
'parentEntId': 60096,
'event': 60100,
'newZones': [
31]},
60106: {'type': 'visibilityExtender', 'name': 'beyond door 129',
'comment': '',
'parentEntId': 60011,
'event': 60094,
'newZones': [
32]},
60107: {'type': 'visibilityExtender', 'name': 'beyond door 130',
'comment': '',
'parentEntId': 60011,
'event': 60095,
'newZones': [
30]},
60109: {'type': 'visibilityExtender', 'name': 'beyond door 129',
'comment': '',
'parentEntId': 60108,
'event': 60094,
'newZones': [
32]},
60110: {'type': 'visibilityExtender', 'name': 'beyond door 130',
'comment': '',
'parentEntId': 60108,
'event': 60095,
'newZones': [
30]},
60112: {'type': 'visibilityExtender', 'name': 'beyond door 129',
'comment': '',
'parentEntId': 60111,
'event': 60094,
'newZones': [
32]},
60113: {'type': 'visibilityExtender', 'name': 'beyond door 130',
'comment': '',
'parentEntId': 60111,
'event': 60095,
'newZones': [
30]},
60115: {'type': 'visibilityExtender', 'name': 'beyond door 129',
'comment': '',
'parentEntId': 60114,
'event': 60094,
'newZones': [
32]},
60116: {'type': 'visibilityExtender', 'name': 'beyond door 130',
'comment': '',
'parentEntId': 60114,
'event': 60095,
'newZones': [
30]},
60117: {'type': 'visibilityExtender', 'name': 'beyond door 103',
'comment': '',
'parentEntId': 60014,
'event': 60088,
'newZones': [
18]},
60120: {'type': 'visibilityExtender', 'name': 'beyond door 128',
'comment': '',
'parentEntId': 60108,
'event': 10002,
'newZones': [
31]},
60122: {'type': 'visibilityExtender', 'name': 'beyond door 128',
'comment': '',
'parentEntId': 60121,
'event': 10002,
'newZones': [
33,
34,
35,
36,
37,
60,
61,
128,
129,
130,
200]},
60123: {'type': 'visibilityExtender', 'name': 'beyond door 128',
'comment': '',
'parentEntId': 60111,
'event': 10002,
'newZones': []},
60124: {'type': 'visibilityExtender', 'name': 'beyond door 128',
'comment': '',
'parentEntId': 60114,
'event': 10002,
'newZones': [
31]},
60125: {'type': 'visibilityExtender', 'name': 'beyond door 128',
'comment': '',
'parentEntId': 60011,
'event': 10002,
'newZones': [
31]},
60127: {'type': 'visibilityExtender', 'name': 'beyond door 128',
'comment': '',
'parentEntId': 60126,
'event': 10002,
'newZones': [
31]},
60128: {'type': 'visibilityExtender', 'name': 'beyond door 129',
'comment': '',
'parentEntId': 60126,
'event': 60094,
'newZones': [
32]},
60129: {'type': 'visibilityExtender', 'name': 'beyond door 130',
'comment': '',
'parentEntId': 60126,
'event': 60095,
'newZones': [
30]},
60131: {'type': 'visibilityExtender', 'name': 'beyond door 129',
'comment': '',
'parentEntId': 60130,
'event': 60094,
'newZones': [
33,
34,
35,
36,
37,
60,
61,
128,
130,
200]},
60136: {'type': 'visibilityExtender', 'name': 'beyond door 129',
'comment': '',
'parentEntId': 60044,
'event': 60094,
'newZones': [
32]},
60137: {'type': 'visibilityExtender', 'name': 'beyond door 129',
'comment': '',
'parentEntId': 60096,
'event': 60138,
'newZones': [
32]},
60139: {'type': 'visibilityExtender', 'name': 'beyond door 129',
'comment': '',
'parentEntId': 60121,
'event': 60141,
'newZones': [
32]},
60140: {'type': 'visibilityExtender', 'name': 'beyond door 130',
'comment': '',
'parentEntId': 60121,
'event': 60142,
'newZones': [
30]}}
Scenario0 = {}
levelSpec = {'globalEntities': GlobalEntities, 'scenarios': [
Scenario0]} | 0.336549 | 0.212048 |
import os
from collections import OrderedDict
from . import _espeak
import threading
import languageHandler
from synthDriverHandler import SynthDriver, VoiceInfo, synthIndexReached, synthDoneSpeaking
import speech
from logHandler import log
from speech.commands import (
IndexCommand,
CharacterModeCommand,
LangChangeCommand,
BreakCommand,
PitchCommand,
RateCommand,
VolumeCommand,
PhonemeCommand,
)
class SynthDriver(SynthDriver):
name = "espeak"
description = "eSpeak NG"
supportedSettings=(
SynthDriver.VoiceSetting(),
SynthDriver.VariantSetting(),
SynthDriver.RateSetting(),
SynthDriver.RateBoostSetting(),
SynthDriver.PitchSetting(),
SynthDriver.InflectionSetting(),
SynthDriver.VolumeSetting(),
)
supportedCommands = {
IndexCommand,
CharacterModeCommand,
LangChangeCommand,
BreakCommand,
PitchCommand,
RateCommand,
VolumeCommand,
PhonemeCommand,
}
supportedNotifications = {synthIndexReached, synthDoneSpeaking}
@classmethod
def check(cls):
return True
def __init__(self):
_espeak.initialize(self._onIndexReached)
log.info("Using eSpeak NG version %s" % _espeak.info())
lang=languageHandler.getLanguage()
_espeak.setVoiceByLanguage(lang)
self._language=lang
self._variantDict=_espeak.getVariantDict()
self.variant="max"
self.rate=30
self.pitch=40
self.inflection=75
def _get_language(self):
return self._language
PROSODY_ATTRS = {
PitchCommand: "pitch",
VolumeCommand: "volume",
RateCommand: "rate",
}
IPA_TO_ESPEAK = {
u"θ": u"T",
u"s": u"s",
u"ˈ": u"'",
}
def _processText(self, text):
# We need to make several replacements.
return text.translate({
0x1: None, # used for embedded commands
0x3C: u"<", # <: because of XML
0x3E: u">", # >: because of XML
0x5B: u" [", # [: [[ indicates phonemes
})
def speak(self,speechSequence):
defaultLanguage=self._language
textList=[]
langChanged=False
prosody={}
# We output malformed XML, as we might close an outer tag after opening an inner one; e.g.
# <voice><prosody></voice></prosody>.
# However, eSpeak doesn't seem to mind.
for item in speechSequence:
if isinstance(item,str):
textList.append(self._processText(item))
elif isinstance(item, IndexCommand):
textList.append("<mark name=\"%d\" />"%item.index)
elif isinstance(item, CharacterModeCommand):
textList.append("<say-as interpret-as=\"characters\">" if item.state else "</say-as>")
elif isinstance(item, LangChangeCommand):
if langChanged:
textList.append("</voice>")
textList.append("<voice xml:lang=\"%s\">"%(item.lang if item.lang else defaultLanguage).replace('_','-'))
langChanged=True
elif isinstance(item, BreakCommand):
textList.append('<break time="%dms" />' % item.time)
elif type(item) in self.PROSODY_ATTRS:
if prosody:
# Close previous prosody tag.
textList.append("</prosody>")
attr=self.PROSODY_ATTRS[type(item)]
if item.multiplier==1:
# Returning to normal.
try:
del prosody[attr]
except KeyError:
pass
else:
prosody[attr]=int(item.multiplier* 100)
if not prosody:
continue
textList.append("<prosody")
for attr,val in prosody.items():
textList.append(' %s="%d%%"'%(attr,val))
textList.append(">")
elif isinstance(item, PhonemeCommand):
# We can't use str.translate because we want to reject unknown characters.
try:
phonemes="".join([self.IPA_TO_ESPEAK[char] for char in item.ipa])
# There needs to be a space after the phoneme command.
# Otherwise, eSpeak will announce a subsequent SSML tag instead of processing it.
textList.append(u"[[%s]] "%phonemes)
except KeyError:
log.debugWarning("Unknown character in IPA string: %s"%item.ipa)
if item.text:
textList.append(self._processText(item.text))
else:
log.error("Unknown speech: %s"%item)
# Close any open tags.
if langChanged:
textList.append("</voice>")
if prosody:
textList.append("</prosody>")
text=u"".join(textList)
_espeak.speak(text)
def cancel(self):
_espeak.stop()
def pause(self,switch):
_espeak.pause(switch)
_rateBoost = False
RATE_BOOST_MULTIPLIER = 3
def _get_rateBoost(self):
return self._rateBoost
def _set_rateBoost(self, enable):
if enable == self._rateBoost:
return
rate = self.rate
self._rateBoost = enable
self.rate = rate
def _get_rate(self):
val=_espeak.getParameter(_espeak.espeakRATE,1)
if self._rateBoost:
val=int(val/self.RATE_BOOST_MULTIPLIER)
return self._paramToPercent(val,_espeak.minRate,_espeak.maxRate)
def _set_rate(self,rate):
val=self._percentToParam(rate, _espeak.minRate, _espeak.maxRate)
if self._rateBoost:
val=int(val*self.RATE_BOOST_MULTIPLIER)
_espeak.setParameter(_espeak.espeakRATE,val,0)
def _get_pitch(self):
val=_espeak.getParameter(_espeak.espeakPITCH,1)
return self._paramToPercent(val,_espeak.minPitch,_espeak.maxPitch)
def _set_pitch(self,pitch):
val=self._percentToParam(pitch, _espeak.minPitch, _espeak.maxPitch)
_espeak.setParameter(_espeak.espeakPITCH,val,0)
def _get_inflection(self):
val=_espeak.getParameter(_espeak.espeakRANGE,1)
return self._paramToPercent(val,_espeak.minPitch,_espeak.maxPitch)
def _set_inflection(self,val):
val=self._percentToParam(val, _espeak.minPitch, _espeak.maxPitch)
_espeak.setParameter(_espeak.espeakRANGE,val,0)
def _get_volume(self):
return _espeak.getParameter(_espeak.espeakVOLUME,1)
def _set_volume(self,volume):
_espeak.setParameter(_espeak.espeakVOLUME,volume,0)
def _getAvailableVoices(self):
voices=OrderedDict()
for v in _espeak.getVoiceList():
l=_espeak.decodeEspeakString(v.languages[1:])
# #7167: Some languages names contain unicode characters EG: Norwegian Bokmål
name=_espeak.decodeEspeakString(v.name)
# #5783: For backwards compatibility, voice identifies should always be lowercase
identifier=os.path.basename(_espeak.decodeEspeakString(v.identifier)).lower()
voices[identifier]=VoiceInfo(identifier,name,l)
return voices
def _get_voice(self):
curVoice=getattr(self,'_voice',None)
if curVoice: return curVoice
curVoice = _espeak.getCurrentVoice()
if not curVoice:
return ""
# #5783: For backwards compatibility, voice identifies should always be lowercase
return _espeak.decodeEspeakString(curVoice.identifier).split('+')[0].lower()
def _set_voice(self, identifier):
if not identifier:
return
# #5783: For backwards compatibility, voice identifies should always be lowercase
identifier=identifier.lower()
if "\\" in identifier:
identifier=os.path.basename(identifier)
self._voice=identifier
try:
_espeak.setVoiceAndVariant(voice=identifier,variant=self._variant)
except:
self._voice=None
raise
self._language=super(SynthDriver,self).language
def _onIndexReached(self, index):
if index is not None:
synthIndexReached.notify(synth=self, index=index)
else:
synthDoneSpeaking.notify(synth=self)
def terminate(self):
_espeak.terminate()
def _get_variant(self):
return self._variant
def _set_variant(self,val):
self._variant = val if val in self._variantDict else "max"
_espeak.setVoiceAndVariant(variant=self._variant)
def _getAvailableVariants(self):
return OrderedDict((ID,VoiceInfo(ID, name)) for ID, name in self._variantDict.items()) | source/synthDrivers/espeak.py |
import os
from collections import OrderedDict
from . import _espeak
import threading
import languageHandler
from synthDriverHandler import SynthDriver, VoiceInfo, synthIndexReached, synthDoneSpeaking
import speech
from logHandler import log
from speech.commands import (
IndexCommand,
CharacterModeCommand,
LangChangeCommand,
BreakCommand,
PitchCommand,
RateCommand,
VolumeCommand,
PhonemeCommand,
)
class SynthDriver(SynthDriver):
name = "espeak"
description = "eSpeak NG"
supportedSettings=(
SynthDriver.VoiceSetting(),
SynthDriver.VariantSetting(),
SynthDriver.RateSetting(),
SynthDriver.RateBoostSetting(),
SynthDriver.PitchSetting(),
SynthDriver.InflectionSetting(),
SynthDriver.VolumeSetting(),
)
supportedCommands = {
IndexCommand,
CharacterModeCommand,
LangChangeCommand,
BreakCommand,
PitchCommand,
RateCommand,
VolumeCommand,
PhonemeCommand,
}
supportedNotifications = {synthIndexReached, synthDoneSpeaking}
@classmethod
def check(cls):
return True
def __init__(self):
_espeak.initialize(self._onIndexReached)
log.info("Using eSpeak NG version %s" % _espeak.info())
lang=languageHandler.getLanguage()
_espeak.setVoiceByLanguage(lang)
self._language=lang
self._variantDict=_espeak.getVariantDict()
self.variant="max"
self.rate=30
self.pitch=40
self.inflection=75
def _get_language(self):
return self._language
PROSODY_ATTRS = {
PitchCommand: "pitch",
VolumeCommand: "volume",
RateCommand: "rate",
}
IPA_TO_ESPEAK = {
u"θ": u"T",
u"s": u"s",
u"ˈ": u"'",
}
def _processText(self, text):
# We need to make several replacements.
return text.translate({
0x1: None, # used for embedded commands
0x3C: u"<", # <: because of XML
0x3E: u">", # >: because of XML
0x5B: u" [", # [: [[ indicates phonemes
})
def speak(self,speechSequence):
defaultLanguage=self._language
textList=[]
langChanged=False
prosody={}
# We output malformed XML, as we might close an outer tag after opening an inner one; e.g.
# <voice><prosody></voice></prosody>.
# However, eSpeak doesn't seem to mind.
for item in speechSequence:
if isinstance(item,str):
textList.append(self._processText(item))
elif isinstance(item, IndexCommand):
textList.append("<mark name=\"%d\" />"%item.index)
elif isinstance(item, CharacterModeCommand):
textList.append("<say-as interpret-as=\"characters\">" if item.state else "</say-as>")
elif isinstance(item, LangChangeCommand):
if langChanged:
textList.append("</voice>")
textList.append("<voice xml:lang=\"%s\">"%(item.lang if item.lang else defaultLanguage).replace('_','-'))
langChanged=True
elif isinstance(item, BreakCommand):
textList.append('<break time="%dms" />' % item.time)
elif type(item) in self.PROSODY_ATTRS:
if prosody:
# Close previous prosody tag.
textList.append("</prosody>")
attr=self.PROSODY_ATTRS[type(item)]
if item.multiplier==1:
# Returning to normal.
try:
del prosody[attr]
except KeyError:
pass
else:
prosody[attr]=int(item.multiplier* 100)
if not prosody:
continue
textList.append("<prosody")
for attr,val in prosody.items():
textList.append(' %s="%d%%"'%(attr,val))
textList.append(">")
elif isinstance(item, PhonemeCommand):
# We can't use str.translate because we want to reject unknown characters.
try:
phonemes="".join([self.IPA_TO_ESPEAK[char] for char in item.ipa])
# There needs to be a space after the phoneme command.
# Otherwise, eSpeak will announce a subsequent SSML tag instead of processing it.
textList.append(u"[[%s]] "%phonemes)
except KeyError:
log.debugWarning("Unknown character in IPA string: %s"%item.ipa)
if item.text:
textList.append(self._processText(item.text))
else:
log.error("Unknown speech: %s"%item)
# Close any open tags.
if langChanged:
textList.append("</voice>")
if prosody:
textList.append("</prosody>")
text=u"".join(textList)
_espeak.speak(text)
def cancel(self):
_espeak.stop()
def pause(self,switch):
_espeak.pause(switch)
_rateBoost = False
RATE_BOOST_MULTIPLIER = 3
def _get_rateBoost(self):
return self._rateBoost
def _set_rateBoost(self, enable):
if enable == self._rateBoost:
return
rate = self.rate
self._rateBoost = enable
self.rate = rate
def _get_rate(self):
val=_espeak.getParameter(_espeak.espeakRATE,1)
if self._rateBoost:
val=int(val/self.RATE_BOOST_MULTIPLIER)
return self._paramToPercent(val,_espeak.minRate,_espeak.maxRate)
def _set_rate(self,rate):
val=self._percentToParam(rate, _espeak.minRate, _espeak.maxRate)
if self._rateBoost:
val=int(val*self.RATE_BOOST_MULTIPLIER)
_espeak.setParameter(_espeak.espeakRATE,val,0)
def _get_pitch(self):
val=_espeak.getParameter(_espeak.espeakPITCH,1)
return self._paramToPercent(val,_espeak.minPitch,_espeak.maxPitch)
def _set_pitch(self,pitch):
val=self._percentToParam(pitch, _espeak.minPitch, _espeak.maxPitch)
_espeak.setParameter(_espeak.espeakPITCH,val,0)
def _get_inflection(self):
val=_espeak.getParameter(_espeak.espeakRANGE,1)
return self._paramToPercent(val,_espeak.minPitch,_espeak.maxPitch)
def _set_inflection(self,val):
val=self._percentToParam(val, _espeak.minPitch, _espeak.maxPitch)
_espeak.setParameter(_espeak.espeakRANGE,val,0)
def _get_volume(self):
return _espeak.getParameter(_espeak.espeakVOLUME,1)
def _set_volume(self,volume):
_espeak.setParameter(_espeak.espeakVOLUME,volume,0)
def _getAvailableVoices(self):
voices=OrderedDict()
for v in _espeak.getVoiceList():
l=_espeak.decodeEspeakString(v.languages[1:])
# #7167: Some languages names contain unicode characters EG: Norwegian Bokmål
name=_espeak.decodeEspeakString(v.name)
# #5783: For backwards compatibility, voice identifies should always be lowercase
identifier=os.path.basename(_espeak.decodeEspeakString(v.identifier)).lower()
voices[identifier]=VoiceInfo(identifier,name,l)
return voices
def _get_voice(self):
curVoice=getattr(self,'_voice',None)
if curVoice: return curVoice
curVoice = _espeak.getCurrentVoice()
if not curVoice:
return ""
# #5783: For backwards compatibility, voice identifies should always be lowercase
return _espeak.decodeEspeakString(curVoice.identifier).split('+')[0].lower()
def _set_voice(self, identifier):
if not identifier:
return
# #5783: For backwards compatibility, voice identifies should always be lowercase
identifier=identifier.lower()
if "\\" in identifier:
identifier=os.path.basename(identifier)
self._voice=identifier
try:
_espeak.setVoiceAndVariant(voice=identifier,variant=self._variant)
except:
self._voice=None
raise
self._language=super(SynthDriver,self).language
def _onIndexReached(self, index):
if index is not None:
synthIndexReached.notify(synth=self, index=index)
else:
synthDoneSpeaking.notify(synth=self)
def terminate(self):
_espeak.terminate()
def _get_variant(self):
return self._variant
def _set_variant(self,val):
self._variant = val if val in self._variantDict else "max"
_espeak.setVoiceAndVariant(variant=self._variant)
def _getAvailableVariants(self):
return OrderedDict((ID,VoiceInfo(ID, name)) for ID, name in self._variantDict.items()) | 0.336767 | 0.05301 |
from samplebase import SampleBase
from rgbmatrix import graphics
import time
import platform
print(platform.python_version())
import livesports
from livesports import LiveSportsClient
from yahoo_weather.weather import YahooWeather
from yahoo_weather.config.units import Unit
from threading import Timer
class GraphicsTest(SampleBase):
def __init__(self, *args, **kwargs):
super(GraphicsTest, self).__init__(*args, **kwargs)
self.refresh_weather()
def refresh_weather(self):
self.philly_weather = YahooWeather(APP_ID="RVSaie5a", api_key="my_key",
api_secret="my_secret")
self.philly_weather.get_yahoo_weather_by_city("philadelphia", Unit.fahrenheit)
print("Updated weather")
def run(self):
rotation = 0
offscreen_canvas = self.matrix.CreateFrameCanvas()
#canvas = self.matrix
font = graphics.Font()
font.LoadFont("../../../fonts/5x8.bdf")
weather_font = graphics.Font()
weather_font.LoadFont("../../../fonts/4x6.bdf")
green = graphics.Color(0, 255, 0)
#graphics.DrawCircle(canvas, 15, 15, 10, green)
blue = graphics.Color(0, 255, 213)
yellow = graphics.Color(255, 255, 0)
client = LiveSportsClient(api_key='my_key')
scoreboard_index = 0
client.set_todays_nba_games_list()
length_of_games_today = len(client.games_today)
home_score = ''
away_score = ''
final = ''
while True:
home_score, away_score, final = client.get_scoreboard_for_todays_game(client.games_today[scoreboard_index]).get_scoreboard()
#home_score = "PHI 32"
#away_score = "MIN 22"
#final = "FINAL"
if rotation % 2 == 0:
graphics.DrawText(offscreen_canvas, font, 2, 9, blue, home_score)
graphics.DrawText(offscreen_canvas, font, 2, 20, blue, away_score)
graphics.DrawText(offscreen_canvas, font, 2, 30, blue, final)
else:
#home_score = "HHI 32"
#away_score = "NNN 22"
#final = "INALF"
graphics.DrawText(offscreen_canvas, font, 2, 9, green, home_score)
graphics.DrawText(offscreen_canvas, font, 2, 20, green, away_score)
graphics.DrawText(offscreen_canvas, font, 2, 30, green, final)
graphics.DrawText(offscreen_canvas, weather_font, 40, 6, yellow, self.philly_weather.condition.text)
graphics.DrawText(offscreen_canvas, weather_font, 55, 13, yellow, str(self.philly_weather.condition.temperature))
scoreboard_index = scoreboard_index + 1
if scoreboard_index == length_of_games_today:
scoreboard_index = 0
rotation = rotation + 1
if rotation == 90000:
rotation = 0
offscreen_canvas = self.matrix.SwapOnVSync(offscreen_canvas)
offscreen_canvas = self.matrix.CreateFrameCanvas()
time.sleep(5)
home_score = ''
away_score = ''
final = ''
# Main function
if __name__ == "__main__":
graphics_test = GraphicsTest()
# My "just get it working" solution after digging through python in-program cron scheduling forums:
weather_update_interval = 900.0 #every 15 mins = 900.0
for i in range(16): # 16 weather updates = assumes program runs for at max 4 hours. Also creates 16 threads
Timer(weather_update_interval * i, graphics_test.refresh_weather).start()
if (not graphics_test.process()):
graphics_test.print_help() | ledmatrix_scripts/display_led_scoreboard.py | from samplebase import SampleBase
from rgbmatrix import graphics
import time
import platform
print(platform.python_version())
import livesports
from livesports import LiveSportsClient
from yahoo_weather.weather import YahooWeather
from yahoo_weather.config.units import Unit
from threading import Timer
class GraphicsTest(SampleBase):
def __init__(self, *args, **kwargs):
super(GraphicsTest, self).__init__(*args, **kwargs)
self.refresh_weather()
def refresh_weather(self):
self.philly_weather = YahooWeather(APP_ID="RVSaie5a", api_key="my_key",
api_secret="my_secret")
self.philly_weather.get_yahoo_weather_by_city("philadelphia", Unit.fahrenheit)
print("Updated weather")
def run(self):
rotation = 0
offscreen_canvas = self.matrix.CreateFrameCanvas()
#canvas = self.matrix
font = graphics.Font()
font.LoadFont("../../../fonts/5x8.bdf")
weather_font = graphics.Font()
weather_font.LoadFont("../../../fonts/4x6.bdf")
green = graphics.Color(0, 255, 0)
#graphics.DrawCircle(canvas, 15, 15, 10, green)
blue = graphics.Color(0, 255, 213)
yellow = graphics.Color(255, 255, 0)
client = LiveSportsClient(api_key='my_key')
scoreboard_index = 0
client.set_todays_nba_games_list()
length_of_games_today = len(client.games_today)
home_score = ''
away_score = ''
final = ''
while True:
home_score, away_score, final = client.get_scoreboard_for_todays_game(client.games_today[scoreboard_index]).get_scoreboard()
#home_score = "PHI 32"
#away_score = "MIN 22"
#final = "FINAL"
if rotation % 2 == 0:
graphics.DrawText(offscreen_canvas, font, 2, 9, blue, home_score)
graphics.DrawText(offscreen_canvas, font, 2, 20, blue, away_score)
graphics.DrawText(offscreen_canvas, font, 2, 30, blue, final)
else:
#home_score = "HHI 32"
#away_score = "NNN 22"
#final = "INALF"
graphics.DrawText(offscreen_canvas, font, 2, 9, green, home_score)
graphics.DrawText(offscreen_canvas, font, 2, 20, green, away_score)
graphics.DrawText(offscreen_canvas, font, 2, 30, green, final)
graphics.DrawText(offscreen_canvas, weather_font, 40, 6, yellow, self.philly_weather.condition.text)
graphics.DrawText(offscreen_canvas, weather_font, 55, 13, yellow, str(self.philly_weather.condition.temperature))
scoreboard_index = scoreboard_index + 1
if scoreboard_index == length_of_games_today:
scoreboard_index = 0
rotation = rotation + 1
if rotation == 90000:
rotation = 0
offscreen_canvas = self.matrix.SwapOnVSync(offscreen_canvas)
offscreen_canvas = self.matrix.CreateFrameCanvas()
time.sleep(5)
home_score = ''
away_score = ''
final = ''
# Main function
if __name__ == "__main__":
graphics_test = GraphicsTest()
# My "just get it working" solution after digging through python in-program cron scheduling forums:
weather_update_interval = 900.0 #every 15 mins = 900.0
for i in range(16): # 16 weather updates = assumes program runs for at max 4 hours. Also creates 16 threads
Timer(weather_update_interval * i, graphics_test.refresh_weather).start()
if (not graphics_test.process()):
graphics_test.print_help() | 0.323166 | 0.154823 |
import time, os, glob
import cv2
import numpy as np
from erl.customized_agents.customized_ppo import CustomizedPPO
from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize
from erl.tools.gym_helper import make_env
from erl.tools.adjust_camera_callback import AdjustCameraCallback
img_path = f"videos/images/"
def test_current_exp(args):
if args.save_img:
all_folders = glob.glob(os.path.join(img_path,"*"))
all_folders = [os.path.basename(x) for x in all_folders]
all_folders = [int(x) if x.isnumeric() else -1 for x in all_folders] + [0]
current_folder = max(all_folders) + 1
current_folder = os.path.join(img_path, str(current_folder))
os.makedirs(current_folder, exist_ok=True)
print(f"Writing into {current_folder}")
input("Press Enter...")
env = DummyVecEnv([make_env(env_id=args.env_id, rank=0, seed=0, render=True)])
env = VecNormalize.load(args.vnorm_filename, env)
model = CustomizedPPO.load(args.model_filename, env=env)
callback = AdjustCameraCallback()
obs = env.reset()
callback.reset_lights(env.envs[0].env._p) # once window is opened, change the lighting
if args.save_img:
time.sleep(1) # please use this time to maximize the window, so that the image recorded will be full size
with model.policy.features_extractor.start_testing():
while True:
for i in range(1000):
action, _ = model.predict(obs, deterministic=True)
obs, reward, done, info = env.step(action)
callback.camera_simpy_follow_robot(target_env=env.envs[0])
if args.save_img:
callback.write_a_image(current_folder=current_folder, step=i, target_env=env.envs[0])
if obs.shape[1]>100: # With Vision I guess
image = np.rollaxis(obs[:, -3*8*8:].reshape([3,8,8]), 0, start=3) * 255.0
print(image.shape)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(f"{current_folder}/vision_{i:05}.png", image)
if done:
break
time.sleep(0.01)
break
time.sleep(0.1)
env.close() | erl/tests.py | import time, os, glob
import cv2
import numpy as np
from erl.customized_agents.customized_ppo import CustomizedPPO
from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize
from erl.tools.gym_helper import make_env
from erl.tools.adjust_camera_callback import AdjustCameraCallback
img_path = f"videos/images/"
def test_current_exp(args):
if args.save_img:
all_folders = glob.glob(os.path.join(img_path,"*"))
all_folders = [os.path.basename(x) for x in all_folders]
all_folders = [int(x) if x.isnumeric() else -1 for x in all_folders] + [0]
current_folder = max(all_folders) + 1
current_folder = os.path.join(img_path, str(current_folder))
os.makedirs(current_folder, exist_ok=True)
print(f"Writing into {current_folder}")
input("Press Enter...")
env = DummyVecEnv([make_env(env_id=args.env_id, rank=0, seed=0, render=True)])
env = VecNormalize.load(args.vnorm_filename, env)
model = CustomizedPPO.load(args.model_filename, env=env)
callback = AdjustCameraCallback()
obs = env.reset()
callback.reset_lights(env.envs[0].env._p) # once window is opened, change the lighting
if args.save_img:
time.sleep(1) # please use this time to maximize the window, so that the image recorded will be full size
with model.policy.features_extractor.start_testing():
while True:
for i in range(1000):
action, _ = model.predict(obs, deterministic=True)
obs, reward, done, info = env.step(action)
callback.camera_simpy_follow_robot(target_env=env.envs[0])
if args.save_img:
callback.write_a_image(current_folder=current_folder, step=i, target_env=env.envs[0])
if obs.shape[1]>100: # With Vision I guess
image = np.rollaxis(obs[:, -3*8*8:].reshape([3,8,8]), 0, start=3) * 255.0
print(image.shape)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(f"{current_folder}/vision_{i:05}.png", image)
if done:
break
time.sleep(0.01)
break
time.sleep(0.1)
env.close() | 0.292393 | 0.219338 |
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateVcnDetails(object):
"""
CreateVcnDetails model.
"""
def __init__(self, **kwargs):
"""
Initializes a new CreateVcnDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param cidr_block:
The value to assign to the cidr_block property of this CreateVcnDetails.
:type cidr_block: str
:param cidr_blocks:
The value to assign to the cidr_blocks property of this CreateVcnDetails.
:type cidr_blocks: list[str]
:param compartment_id:
The value to assign to the compartment_id property of this CreateVcnDetails.
:type compartment_id: str
:param defined_tags:
The value to assign to the defined_tags property of this CreateVcnDetails.
:type defined_tags: dict(str, dict(str, object))
:param display_name:
The value to assign to the display_name property of this CreateVcnDetails.
:type display_name: str
:param dns_label:
The value to assign to the dns_label property of this CreateVcnDetails.
:type dns_label: str
:param freeform_tags:
The value to assign to the freeform_tags property of this CreateVcnDetails.
:type freeform_tags: dict(str, str)
:param is_ipv6_enabled:
The value to assign to the is_ipv6_enabled property of this CreateVcnDetails.
:type is_ipv6_enabled: bool
"""
self.swagger_types = {
'cidr_block': 'str',
'cidr_blocks': 'list[str]',
'compartment_id': 'str',
'defined_tags': 'dict(str, dict(str, object))',
'display_name': 'str',
'dns_label': 'str',
'freeform_tags': 'dict(str, str)',
'is_ipv6_enabled': 'bool'
}
self.attribute_map = {
'cidr_block': 'cidrBlock',
'cidr_blocks': 'cidrBlocks',
'compartment_id': 'compartmentId',
'defined_tags': 'definedTags',
'display_name': 'displayName',
'dns_label': 'dnsLabel',
'freeform_tags': 'freeformTags',
'is_ipv6_enabled': 'isIpv6Enabled'
}
self._cidr_block = None
self._cidr_blocks = None
self._compartment_id = None
self._defined_tags = None
self._display_name = None
self._dns_label = None
self._freeform_tags = None
self._is_ipv6_enabled = None
@property
def cidr_block(self):
"""
Gets the cidr_block of this CreateVcnDetails.
**Deprecated.** Do *not* set this value. Use `cidrBlocks` instead.
Example: `10.0.0.0/16`
:return: The cidr_block of this CreateVcnDetails.
:rtype: str
"""
return self._cidr_block
@cidr_block.setter
def cidr_block(self, cidr_block):
"""
Sets the cidr_block of this CreateVcnDetails.
**Deprecated.** Do *not* set this value. Use `cidrBlocks` instead.
Example: `10.0.0.0/16`
:param cidr_block: The cidr_block of this CreateVcnDetails.
:type: str
"""
self._cidr_block = cidr_block
@property
def cidr_blocks(self):
"""
Gets the cidr_blocks of this CreateVcnDetails.
The list of one or more IPv4 CIDR blocks for the VCN that meet the following criteria:
- The CIDR blocks must be valid.
- They must not overlap with each other or with the on-premises network CIDR block.
- The number of CIDR blocks must not exceed the limit of CIDR blocks allowed per VCN.
**Important:** Do *not* specify a value for `cidrBlock`. Use this parameter instead.
:return: The cidr_blocks of this CreateVcnDetails.
:rtype: list[str]
"""
return self._cidr_blocks
@cidr_blocks.setter
def cidr_blocks(self, cidr_blocks):
"""
Sets the cidr_blocks of this CreateVcnDetails.
The list of one or more IPv4 CIDR blocks for the VCN that meet the following criteria:
- The CIDR blocks must be valid.
- They must not overlap with each other or with the on-premises network CIDR block.
- The number of CIDR blocks must not exceed the limit of CIDR blocks allowed per VCN.
**Important:** Do *not* specify a value for `cidrBlock`. Use this parameter instead.
:param cidr_blocks: The cidr_blocks of this CreateVcnDetails.
:type: list[str]
"""
self._cidr_blocks = cidr_blocks
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this CreateVcnDetails.
The `OCID`__ of the compartment to contain the VCN.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this CreateVcnDetails.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this CreateVcnDetails.
The `OCID`__ of the compartment to contain the VCN.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this CreateVcnDetails.
:type: str
"""
self._compartment_id = compartment_id
@property
def defined_tags(self):
"""
Gets the defined_tags of this CreateVcnDetails.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this CreateVcnDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this CreateVcnDetails.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this CreateVcnDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def display_name(self):
"""
Gets the display_name of this CreateVcnDetails.
A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
:return: The display_name of this CreateVcnDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this CreateVcnDetails.
A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
:param display_name: The display_name of this CreateVcnDetails.
:type: str
"""
self._display_name = display_name
@property
def dns_label(self):
"""
Gets the dns_label of this CreateVcnDetails.
A DNS label for the VCN, used in conjunction with the VNIC's hostname and
subnet's DNS label to form a fully qualified domain name (FQDN) for each VNIC
within this subnet (for example, `bminstance-1.subnet123.vcn1.oraclevcn.com`).
Not required to be unique, but it's a best practice to set unique DNS labels
for VCNs in your tenancy. Must be an alphanumeric string that begins with a letter.
The value cannot be changed.
You must set this value if you want instances to be able to use hostnames to
resolve other instances in the VCN. Otherwise the Internet and VCN Resolver
will not work.
For more information, see
`DNS in Your Virtual Cloud Network`__.
Example: `vcn1`
__ https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/dns.htm
:return: The dns_label of this CreateVcnDetails.
:rtype: str
"""
return self._dns_label
@dns_label.setter
def dns_label(self, dns_label):
"""
Sets the dns_label of this CreateVcnDetails.
A DNS label for the VCN, used in conjunction with the VNIC's hostname and
subnet's DNS label to form a fully qualified domain name (FQDN) for each VNIC
within this subnet (for example, `bminstance-1.subnet123.vcn1.oraclevcn.com`).
Not required to be unique, but it's a best practice to set unique DNS labels
for VCNs in your tenancy. Must be an alphanumeric string that begins with a letter.
The value cannot be changed.
You must set this value if you want instances to be able to use hostnames to
resolve other instances in the VCN. Otherwise the Internet and VCN Resolver
will not work.
For more information, see
`DNS in Your Virtual Cloud Network`__.
Example: `vcn1`
__ https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/dns.htm
:param dns_label: The dns_label of this CreateVcnDetails.
:type: str
"""
self._dns_label = dns_label
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this CreateVcnDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this CreateVcnDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this CreateVcnDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this CreateVcnDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def is_ipv6_enabled(self):
"""
Gets the is_ipv6_enabled of this CreateVcnDetails.
Whether IPv6 is enabled for the VCN. Default is `false`.
If enabled, Oracle will assign the VCN a IPv6 /56 CIDR block.
For important details about IPv6 addressing in a VCN, see `IPv6 Addresses`__.
Example: `true`
__ https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/ipv6.htm
:return: The is_ipv6_enabled of this CreateVcnDetails.
:rtype: bool
"""
return self._is_ipv6_enabled
@is_ipv6_enabled.setter
def is_ipv6_enabled(self, is_ipv6_enabled):
"""
Sets the is_ipv6_enabled of this CreateVcnDetails.
Whether IPv6 is enabled for the VCN. Default is `false`.
If enabled, Oracle will assign the VCN a IPv6 /56 CIDR block.
For important details about IPv6 addressing in a VCN, see `IPv6 Addresses`__.
Example: `true`
__ https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/ipv6.htm
:param is_ipv6_enabled: The is_ipv6_enabled of this CreateVcnDetails.
:type: bool
"""
self._is_ipv6_enabled = is_ipv6_enabled
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other | src/oci/core/models/create_vcn_details.py |
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateVcnDetails(object):
"""
CreateVcnDetails model.
"""
def __init__(self, **kwargs):
"""
Initializes a new CreateVcnDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param cidr_block:
The value to assign to the cidr_block property of this CreateVcnDetails.
:type cidr_block: str
:param cidr_blocks:
The value to assign to the cidr_blocks property of this CreateVcnDetails.
:type cidr_blocks: list[str]
:param compartment_id:
The value to assign to the compartment_id property of this CreateVcnDetails.
:type compartment_id: str
:param defined_tags:
The value to assign to the defined_tags property of this CreateVcnDetails.
:type defined_tags: dict(str, dict(str, object))
:param display_name:
The value to assign to the display_name property of this CreateVcnDetails.
:type display_name: str
:param dns_label:
The value to assign to the dns_label property of this CreateVcnDetails.
:type dns_label: str
:param freeform_tags:
The value to assign to the freeform_tags property of this CreateVcnDetails.
:type freeform_tags: dict(str, str)
:param is_ipv6_enabled:
The value to assign to the is_ipv6_enabled property of this CreateVcnDetails.
:type is_ipv6_enabled: bool
"""
self.swagger_types = {
'cidr_block': 'str',
'cidr_blocks': 'list[str]',
'compartment_id': 'str',
'defined_tags': 'dict(str, dict(str, object))',
'display_name': 'str',
'dns_label': 'str',
'freeform_tags': 'dict(str, str)',
'is_ipv6_enabled': 'bool'
}
self.attribute_map = {
'cidr_block': 'cidrBlock',
'cidr_blocks': 'cidrBlocks',
'compartment_id': 'compartmentId',
'defined_tags': 'definedTags',
'display_name': 'displayName',
'dns_label': 'dnsLabel',
'freeform_tags': 'freeformTags',
'is_ipv6_enabled': 'isIpv6Enabled'
}
self._cidr_block = None
self._cidr_blocks = None
self._compartment_id = None
self._defined_tags = None
self._display_name = None
self._dns_label = None
self._freeform_tags = None
self._is_ipv6_enabled = None
@property
def cidr_block(self):
"""
Gets the cidr_block of this CreateVcnDetails.
**Deprecated.** Do *not* set this value. Use `cidrBlocks` instead.
Example: `10.0.0.0/16`
:return: The cidr_block of this CreateVcnDetails.
:rtype: str
"""
return self._cidr_block
@cidr_block.setter
def cidr_block(self, cidr_block):
"""
Sets the cidr_block of this CreateVcnDetails.
**Deprecated.** Do *not* set this value. Use `cidrBlocks` instead.
Example: `10.0.0.0/16`
:param cidr_block: The cidr_block of this CreateVcnDetails.
:type: str
"""
self._cidr_block = cidr_block
@property
def cidr_blocks(self):
"""
Gets the cidr_blocks of this CreateVcnDetails.
The list of one or more IPv4 CIDR blocks for the VCN that meet the following criteria:
- The CIDR blocks must be valid.
- They must not overlap with each other or with the on-premises network CIDR block.
- The number of CIDR blocks must not exceed the limit of CIDR blocks allowed per VCN.
**Important:** Do *not* specify a value for `cidrBlock`. Use this parameter instead.
:return: The cidr_blocks of this CreateVcnDetails.
:rtype: list[str]
"""
return self._cidr_blocks
@cidr_blocks.setter
def cidr_blocks(self, cidr_blocks):
"""
Sets the cidr_blocks of this CreateVcnDetails.
The list of one or more IPv4 CIDR blocks for the VCN that meet the following criteria:
- The CIDR blocks must be valid.
- They must not overlap with each other or with the on-premises network CIDR block.
- The number of CIDR blocks must not exceed the limit of CIDR blocks allowed per VCN.
**Important:** Do *not* specify a value for `cidrBlock`. Use this parameter instead.
:param cidr_blocks: The cidr_blocks of this CreateVcnDetails.
:type: list[str]
"""
self._cidr_blocks = cidr_blocks
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this CreateVcnDetails.
The `OCID`__ of the compartment to contain the VCN.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this CreateVcnDetails.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this CreateVcnDetails.
The `OCID`__ of the compartment to contain the VCN.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this CreateVcnDetails.
:type: str
"""
self._compartment_id = compartment_id
@property
def defined_tags(self):
"""
Gets the defined_tags of this CreateVcnDetails.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this CreateVcnDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this CreateVcnDetails.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this CreateVcnDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def display_name(self):
"""
Gets the display_name of this CreateVcnDetails.
A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
:return: The display_name of this CreateVcnDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this CreateVcnDetails.
A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
:param display_name: The display_name of this CreateVcnDetails.
:type: str
"""
self._display_name = display_name
@property
def dns_label(self):
"""
Gets the dns_label of this CreateVcnDetails.
A DNS label for the VCN, used in conjunction with the VNIC's hostname and
subnet's DNS label to form a fully qualified domain name (FQDN) for each VNIC
within this subnet (for example, `bminstance-1.subnet123.vcn1.oraclevcn.com`).
Not required to be unique, but it's a best practice to set unique DNS labels
for VCNs in your tenancy. Must be an alphanumeric string that begins with a letter.
The value cannot be changed.
You must set this value if you want instances to be able to use hostnames to
resolve other instances in the VCN. Otherwise the Internet and VCN Resolver
will not work.
For more information, see
`DNS in Your Virtual Cloud Network`__.
Example: `vcn1`
__ https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/dns.htm
:return: The dns_label of this CreateVcnDetails.
:rtype: str
"""
return self._dns_label
@dns_label.setter
def dns_label(self, dns_label):
"""
Sets the dns_label of this CreateVcnDetails.
A DNS label for the VCN, used in conjunction with the VNIC's hostname and
subnet's DNS label to form a fully qualified domain name (FQDN) for each VNIC
within this subnet (for example, `bminstance-1.subnet123.vcn1.oraclevcn.com`).
Not required to be unique, but it's a best practice to set unique DNS labels
for VCNs in your tenancy. Must be an alphanumeric string that begins with a letter.
The value cannot be changed.
You must set this value if you want instances to be able to use hostnames to
resolve other instances in the VCN. Otherwise the Internet and VCN Resolver
will not work.
For more information, see
`DNS in Your Virtual Cloud Network`__.
Example: `vcn1`
__ https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/dns.htm
:param dns_label: The dns_label of this CreateVcnDetails.
:type: str
"""
self._dns_label = dns_label
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this CreateVcnDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this CreateVcnDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this CreateVcnDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this CreateVcnDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def is_ipv6_enabled(self):
"""
Gets the is_ipv6_enabled of this CreateVcnDetails.
Whether IPv6 is enabled for the VCN. Default is `false`.
If enabled, Oracle will assign the VCN a IPv6 /56 CIDR block.
For important details about IPv6 addressing in a VCN, see `IPv6 Addresses`__.
Example: `true`
__ https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/ipv6.htm
:return: The is_ipv6_enabled of this CreateVcnDetails.
:rtype: bool
"""
return self._is_ipv6_enabled
@is_ipv6_enabled.setter
def is_ipv6_enabled(self, is_ipv6_enabled):
"""
Sets the is_ipv6_enabled of this CreateVcnDetails.
Whether IPv6 is enabled for the VCN. Default is `false`.
If enabled, Oracle will assign the VCN a IPv6 /56 CIDR block.
For important details about IPv6 addressing in a VCN, see `IPv6 Addresses`__.
Example: `true`
__ https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/ipv6.htm
:param is_ipv6_enabled: The is_ipv6_enabled of this CreateVcnDetails.
:type: bool
"""
self._is_ipv6_enabled = is_ipv6_enabled
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other | 0.761627 | 0.307683 |
import itertools
import pickle
import shutil
import tempfile
from soap import logger
from soap.flopoco.common import cd, template_file, flopoco, xilinx
class _RTLGenerator(object):
def __init__(self, expr, var_env, prec, file_name=None, dir=None):
from soap.expression import Expr
self.expr = Expr(expr)
self.var_env = var_env
self.wf = prec
self.we = self.expr.exponent_width(var_env, prec)
self.dir = dir or tempfile.mktemp(suffix='/')
with cd(self.dir):
self.f = file_name or tempfile.mktemp(suffix='.vhdl', dir=dir)
def generate(self):
from akpytemp import Template
ops = set()
in_ports = set()
out_port, ls = self.expr.as_labels()
wires = set()
signals = set()
def wire(op, in1, in2, out):
def wire_name(i):
if i in signals:
return i.signal_name()
if i in in_ports:
return i.port_name()
if i == out_port:
return 'p_out'
for i in [in1, in2, out]:
# a variable represented as a string is a port
if isinstance(i.e, str):
in_ports.add(i)
continue
# a number is a port
try:
float(i.e)
in_ports.add(i)
continue
except (TypeError, ValueError):
pass
# a range is a port
try:
a, b = i.e
float(a), float(b)
in_ports.add(i)
continue
except (TypeError, ValueError):
pass
# an expression, need a signal for its output
try:
i.e.op
if i != out_port:
signals.add(i)
except AttributeError:
pass
wires.add((op, wire_name(in1), wire_name(in2), wire_name(out)))
for out, e in ls.items():
try:
op, in1, in2 = e.op, e.a1, e.a2
wire(op, in1, in2, out)
ops.add(e.op)
except AttributeError:
pass
in_ports = [i.port_name() for i in in_ports]
out_port = 'p_out'
signals = [i.signal_name() for i in signals]
logger.debug(in_ports, signals, wires)
Template(path=template_file).save(
path=self.f, directory=self.dir, flopoco=flopoco,
ops=ops, e=self.expr,
we=self.we, wf=self.wf,
in_ports=in_ports, out_port=out_port,
signals=signals, wires=wires)
return self.f
def actual_luts(expr, var_env, prec):
import sh
dir = tempfile.mktemp(suffix='/')
f = _RTLGenerator(expr, var_env, prec, dir=dir).generate()
logger.debug('Synthesising', str(expr), 'with precision', prec, 'in', f)
try:
return xilinx(f, dir=dir)
except (sh.ErrorReturnCode, KeyboardInterrupt):
raise
finally:
shutil.rmtree(dir)
def _para_area(i_n_e_v_p):
import sh
i, n, e, v, p = i_n_e_v_p
try:
real_area, estimated_area = e.real_area(v, p), e.area(v, p).area
logger.info(
'%d/%d, Expr: %s, Prec: %d, Real Area: %d, Estimated Area: %d' %
(i + 1, n, str(e), p, real_area, estimated_area))
return real_area, estimated_area
except sh.ErrorReturnCode:
logger.error('Unable to synthesise', str(e), 'with precision', p)
except Exception as exc:
logger.error('Unknown failure', exc, 'when synthesising', str(e),
'with precision', p)
_pool = None
def pool():
global _pool
if _pool:
return _pool
from multiprocessing import Pool
_pool = Pool()
return _pool
_setup_rc_done = False
def _setup_rc():
global _setup_rc_done
if _setup_rc_done:
return
from matplotlib import rc
rc('font', family='serif', size=24, serif='Times')
rc('text', usetex=True)
_setup_rc_done = True
class AreaEstimateValidator(object):
"""Validates our area model by comparing it against synthesis"""
def __init__(self, expr_set=None, var_env=None, prec_list=None):
self.e = expr_set
self.v = var_env
self.p = prec_list
def scatter_points(self):
try:
return self.points
except AttributeError:
pass
v = self.v
n = len(self.e) * len(self.p)
s = [(i, n, e, v, p)
for i, (e, p) in enumerate(itertools.product(self.e, self.p))]
self.points = pool().imap_unordered(_para_area, s)
self.points = [p for p in self.points if p is not None]
return self.points
def _plot(self):
try:
return self.figure
except AttributeError:
pass
from matplotlib import pyplot, pylab
_setup_rc()
self.figure = pyplot.figure()
plot = self.figure.add_subplot(111)
for ax in [plot.xaxis, plot.yaxis]:
ax.get_major_formatter().set_scientific(True)
ax.get_major_formatter().set_powerlimits((-2, 3))
real_area, estimated_area = zip(*self.scatter_points())
scatter_real_area = [v for i, v in enumerate(real_area) if i % 10 == 0]
scatter_estimated_area = [v for i, v in enumerate(estimated_area)
if i % 10 == 0]
plot.scatter(scatter_real_area, scatter_estimated_area,
marker='.', s=0.5, linewidth=1, color='r')
plot.grid(True, which='both', ls=':')
plot.set_xlabel('Actual Area (Number of LUTs)')
plot.set_ylabel('Estimated Area (Number of LUTs)')
lim = max(plot.get_xlim())
reg_fit = pylab.polyfit(real_area, estimated_area, 1)
logger.info(reg_fit)
reg_func = pylab.poly1d(reg_fit)
plot.plot([0, lim], reg_func([0, lim]), color='k')
plot.plot([0, lim], [0, lim], linestyle=':', color='k')
plot.set_xlim(0, lim)
plot.set_ylim(0, lim)
return self.figure
def show_plot(self):
from matplotlib import pyplot
pyplot.show(self._plot())
def save_plot(self, *args, **kwargs):
self._plot().savefig(*args, bbox_inches='tight', **kwargs)
@classmethod
def load_points(cls, f):
a = cls()
with open(f, 'rb') as f:
a.points = pickle.load(f)
return a
def save_points(self, f):
p = self.scatter_points()
with open(f, 'wb') as f:
pickle.dump(p, f)
def actual_vs_estimate():
from soap.transformer.utils import greedy_trace
from soap.flopoco.common import wf_range
logger.set_context(level=logger.levels.info)
try:
a = AreaEstimateValidator.load_points('area.pkl')
except FileNotFoundError:
exprs = [
"""(a + a + b) * (a + b + b) * (b + b + c) *
(b + c + c) * (c + c + a) * (c + a + a)""",
'(1 + b + c) * (a + 1 + b) * (a + b + 1)',
'(a + 1) * (b + 1) * (c + 1)',
'a + b + c',
]
v = {
'a': ['1', '2'],
'b': ['10', '20'],
'c': ['100', '200'],
}
p = list(reversed(wf_range))
s = []
for e in exprs:
s += greedy_trace(e, v, depth=3)
a = AreaEstimateValidator(s, v, p)
a.save_points('area.pkl')
a.save_plot('area.pdf')
a.show_plot() | soap/flopoco/actual.py | import itertools
import pickle
import shutil
import tempfile
from soap import logger
from soap.flopoco.common import cd, template_file, flopoco, xilinx
class _RTLGenerator(object):
def __init__(self, expr, var_env, prec, file_name=None, dir=None):
from soap.expression import Expr
self.expr = Expr(expr)
self.var_env = var_env
self.wf = prec
self.we = self.expr.exponent_width(var_env, prec)
self.dir = dir or tempfile.mktemp(suffix='/')
with cd(self.dir):
self.f = file_name or tempfile.mktemp(suffix='.vhdl', dir=dir)
def generate(self):
from akpytemp import Template
ops = set()
in_ports = set()
out_port, ls = self.expr.as_labels()
wires = set()
signals = set()
def wire(op, in1, in2, out):
def wire_name(i):
if i in signals:
return i.signal_name()
if i in in_ports:
return i.port_name()
if i == out_port:
return 'p_out'
for i in [in1, in2, out]:
# a variable represented as a string is a port
if isinstance(i.e, str):
in_ports.add(i)
continue
# a number is a port
try:
float(i.e)
in_ports.add(i)
continue
except (TypeError, ValueError):
pass
# a range is a port
try:
a, b = i.e
float(a), float(b)
in_ports.add(i)
continue
except (TypeError, ValueError):
pass
# an expression, need a signal for its output
try:
i.e.op
if i != out_port:
signals.add(i)
except AttributeError:
pass
wires.add((op, wire_name(in1), wire_name(in2), wire_name(out)))
for out, e in ls.items():
try:
op, in1, in2 = e.op, e.a1, e.a2
wire(op, in1, in2, out)
ops.add(e.op)
except AttributeError:
pass
in_ports = [i.port_name() for i in in_ports]
out_port = 'p_out'
signals = [i.signal_name() for i in signals]
logger.debug(in_ports, signals, wires)
Template(path=template_file).save(
path=self.f, directory=self.dir, flopoco=flopoco,
ops=ops, e=self.expr,
we=self.we, wf=self.wf,
in_ports=in_ports, out_port=out_port,
signals=signals, wires=wires)
return self.f
def actual_luts(expr, var_env, prec):
import sh
dir = tempfile.mktemp(suffix='/')
f = _RTLGenerator(expr, var_env, prec, dir=dir).generate()
logger.debug('Synthesising', str(expr), 'with precision', prec, 'in', f)
try:
return xilinx(f, dir=dir)
except (sh.ErrorReturnCode, KeyboardInterrupt):
raise
finally:
shutil.rmtree(dir)
def _para_area(i_n_e_v_p):
import sh
i, n, e, v, p = i_n_e_v_p
try:
real_area, estimated_area = e.real_area(v, p), e.area(v, p).area
logger.info(
'%d/%d, Expr: %s, Prec: %d, Real Area: %d, Estimated Area: %d' %
(i + 1, n, str(e), p, real_area, estimated_area))
return real_area, estimated_area
except sh.ErrorReturnCode:
logger.error('Unable to synthesise', str(e), 'with precision', p)
except Exception as exc:
logger.error('Unknown failure', exc, 'when synthesising', str(e),
'with precision', p)
_pool = None
def pool():
global _pool
if _pool:
return _pool
from multiprocessing import Pool
_pool = Pool()
return _pool
_setup_rc_done = False
def _setup_rc():
global _setup_rc_done
if _setup_rc_done:
return
from matplotlib import rc
rc('font', family='serif', size=24, serif='Times')
rc('text', usetex=True)
_setup_rc_done = True
class AreaEstimateValidator(object):
"""Validates our area model by comparing it against synthesis"""
def __init__(self, expr_set=None, var_env=None, prec_list=None):
self.e = expr_set
self.v = var_env
self.p = prec_list
def scatter_points(self):
try:
return self.points
except AttributeError:
pass
v = self.v
n = len(self.e) * len(self.p)
s = [(i, n, e, v, p)
for i, (e, p) in enumerate(itertools.product(self.e, self.p))]
self.points = pool().imap_unordered(_para_area, s)
self.points = [p for p in self.points if p is not None]
return self.points
def _plot(self):
try:
return self.figure
except AttributeError:
pass
from matplotlib import pyplot, pylab
_setup_rc()
self.figure = pyplot.figure()
plot = self.figure.add_subplot(111)
for ax in [plot.xaxis, plot.yaxis]:
ax.get_major_formatter().set_scientific(True)
ax.get_major_formatter().set_powerlimits((-2, 3))
real_area, estimated_area = zip(*self.scatter_points())
scatter_real_area = [v for i, v in enumerate(real_area) if i % 10 == 0]
scatter_estimated_area = [v for i, v in enumerate(estimated_area)
if i % 10 == 0]
plot.scatter(scatter_real_area, scatter_estimated_area,
marker='.', s=0.5, linewidth=1, color='r')
plot.grid(True, which='both', ls=':')
plot.set_xlabel('Actual Area (Number of LUTs)')
plot.set_ylabel('Estimated Area (Number of LUTs)')
lim = max(plot.get_xlim())
reg_fit = pylab.polyfit(real_area, estimated_area, 1)
logger.info(reg_fit)
reg_func = pylab.poly1d(reg_fit)
plot.plot([0, lim], reg_func([0, lim]), color='k')
plot.plot([0, lim], [0, lim], linestyle=':', color='k')
plot.set_xlim(0, lim)
plot.set_ylim(0, lim)
return self.figure
def show_plot(self):
from matplotlib import pyplot
pyplot.show(self._plot())
def save_plot(self, *args, **kwargs):
self._plot().savefig(*args, bbox_inches='tight', **kwargs)
@classmethod
def load_points(cls, f):
a = cls()
with open(f, 'rb') as f:
a.points = pickle.load(f)
return a
def save_points(self, f):
p = self.scatter_points()
with open(f, 'wb') as f:
pickle.dump(p, f)
def actual_vs_estimate():
from soap.transformer.utils import greedy_trace
from soap.flopoco.common import wf_range
logger.set_context(level=logger.levels.info)
try:
a = AreaEstimateValidator.load_points('area.pkl')
except FileNotFoundError:
exprs = [
"""(a + a + b) * (a + b + b) * (b + b + c) *
(b + c + c) * (c + c + a) * (c + a + a)""",
'(1 + b + c) * (a + 1 + b) * (a + b + 1)',
'(a + 1) * (b + 1) * (c + 1)',
'a + b + c',
]
v = {
'a': ['1', '2'],
'b': ['10', '20'],
'c': ['100', '200'],
}
p = list(reversed(wf_range))
s = []
for e in exprs:
s += greedy_trace(e, v, depth=3)
a = AreaEstimateValidator(s, v, p)
a.save_points('area.pkl')
a.save_plot('area.pdf')
a.show_plot() | 0.364551 | 0.166675 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_remote_peering_connection_facts
short_description: Retrieve facts of Remote Peering Connections(RPCs)
description:
- This module retrieves information of the specified remote peering connection(RPC) or lists the RPCs for the
specified DRG and compartment (the RPC's compartment).
version_added: "2.5"
options:
compartment_id:
description: The OCID of the compartment. I(compartment_id) is required to get all the RPCs in the specified
compartment (the RPC's compartment).
required: false
drg_id:
description: The OCID of the DRG. I(drg_id) is required to get all the RPCs for the specified DRG and
compartment (the RPC's compartment).
required: false
remote_peering_connection_id:
description: The OCID of the RPC. I(remote_peering_connection_id) is required to get a specific RPC's
information.
required: false
aliases: [ 'id' ]
author: "<NAME> (@rohitChaware)"
extends_documentation_fragment: [ oracle, oracle_display_name_option ]
"""
EXAMPLES = """
- name: Get all the RPCs in a compartment
oci_remote_peering_connection_facts:
compartment_id: 'ocid1.compartment.oc1..xxxxxEXAMPLExxxxx'
- name: Get a specific RPC using its OCID
oci_remote_peering_connection_facts:
remote_peering_connection_id: ocid1.remotepeeringconnection.oc1.phx.xxxxxEXAMPLExxxxx
"""
RETURN = """
remote_peering_connections:
description: List of RPC details
returned: always
type: complex
contains:
compartment_id:
description: The OCID of the compartment containing the RPC.
returned: always
type: string
sample: ocid1.compartment.oc1..xxxxxEXAMPLExxxxx
display_name:
description: Name of the RPC.
returned: always
type: string
sample: ansible_remote_peering_connection
drg_id:
description: The OCID of the DRG that this RPC belongs to.
returned: always
type: string
sample: ocid1.drg.oc1.phx.xxxxxEXAMPLExxxxx
id:
description: OCID of the RPC.
returned: always
type: string
sample: ocid1.remotepeeringconnection.oc1.phx.xxxxxEXAMPLExxxxx
is_cross_tenancy_peering:
description: Whether the VCN at the other end of the peering is in a different tenancy.
returned: always
type: bool
sample: false
lifecycle_state:
description: Current state of the RPC.
returned: always
type: string
sample: AVAILABLE
peer_id:
description: If this RPC is peered, this value is the OCID of the other RPC.
returned: always
type: string
sample: ocid1.remotepeeringconnection.oc1.iad.xxxxxEXAMPLExxxxx
peering_status:
description: Whether the RPC is peered with another RPC. NEW means the RPC has not yet been peered. PENDING
means the peering is being established. REVOKED means the RPC at the other end of the peering
has been deleted.
returned: always
type: string
sample: PEERED
peering_region_name:
description: If this RPC is peered, this value is the region that contains the other RPC.
returned: always
type: string
sample: us-ashburn-1
time_created:
description: The date and time the RPC was created, in the format defined by RFC3339.
returned: always
type: string
sample: 2017-11-13T20:22:40.626000+00:00
peer_tenancy_id:
description: If this RPC is peered, this value is the OCID of the other RPC's tenancy.
returned: always
type: string
sample: "ocid1.tenancy.oc1..xxxxxEXAMPLExxxxx"
sample: [{
"compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
"display_name": "ansible_remote_peering_connection",
"drg_id": "ocid1.drg.oc1.phx.xxxxxEXAMPLExxxxx",
"id": "ocid1.remotepeeringconnection.oc1.phx.xxxxxEXAMPLExxxxx",
"is_cross_tenancy_peering": false,
"lifecycle_state": "AVAILABLE",
"peer_id": "ocid1.remotepeeringconnection.oc1.iad.xxxxxEXAMPLExxxxx",
"peer_region_name": "us-ashburn-1",
"peer_tenancy_id": "ocid1.tenancy.oc1..xxxxxEXAMPLExxxxx",
"peering_status": "PEERED",
"time_created": "2018-09-24T06:51:59.491000+00:00"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oracle import oci_utils
try:
from oci.core.virtual_network_client import VirtualNetworkClient
from oci.util import to_dict
from oci.exceptions import ServiceError
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
def main():
module_args = oci_utils.get_facts_module_arg_spec()
module_args.update(
dict(
compartment_id=dict(type="str", required=False),
drg_id=dict(type="str", required=False),
remote_peering_connection_id=dict(
type="str", required=False, aliases=["id"]
),
)
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False,
required_one_of=[["remote_peering_connection_id", "compartment_id"]],
)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
virtual_network_client = oci_utils.create_service_client(
module, VirtualNetworkClient
)
remote_peering_connection_id = module.params["remote_peering_connection_id"]
result = []
try:
if remote_peering_connection_id is not None:
result = [
to_dict(
oci_utils.call_with_backoff(
virtual_network_client.get_remote_peering_connection,
remote_peering_connection_id=remote_peering_connection_id,
).data
)
]
else:
result = to_dict(
oci_utils.list_all_resources(
virtual_network_client.list_remote_peering_connections,
display_name=module.params["display_name"],
drg_id=module.params["drg_id"],
compartment_id=module.params["compartment_id"],
)
)
except ServiceError as ex:
module.fail_json(msg=ex.message)
module.exit_json(remote_peering_connections=result)
if __name__ == "__main__":
main() | library/oci_remote_peering_connection_facts.py |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_remote_peering_connection_facts
short_description: Retrieve facts of Remote Peering Connections(RPCs)
description:
- This module retrieves information of the specified remote peering connection(RPC) or lists the RPCs for the
specified DRG and compartment (the RPC's compartment).
version_added: "2.5"
options:
compartment_id:
description: The OCID of the compartment. I(compartment_id) is required to get all the RPCs in the specified
compartment (the RPC's compartment).
required: false
drg_id:
description: The OCID of the DRG. I(drg_id) is required to get all the RPCs for the specified DRG and
compartment (the RPC's compartment).
required: false
remote_peering_connection_id:
description: The OCID of the RPC. I(remote_peering_connection_id) is required to get a specific RPC's
information.
required: false
aliases: [ 'id' ]
author: "<NAME> (@rohitChaware)"
extends_documentation_fragment: [ oracle, oracle_display_name_option ]
"""
EXAMPLES = """
- name: Get all the RPCs in a compartment
oci_remote_peering_connection_facts:
compartment_id: 'ocid1.compartment.oc1..xxxxxEXAMPLExxxxx'
- name: Get a specific RPC using its OCID
oci_remote_peering_connection_facts:
remote_peering_connection_id: ocid1.remotepeeringconnection.oc1.phx.xxxxxEXAMPLExxxxx
"""
RETURN = """
remote_peering_connections:
description: List of RPC details
returned: always
type: complex
contains:
compartment_id:
description: The OCID of the compartment containing the RPC.
returned: always
type: string
sample: ocid1.compartment.oc1..xxxxxEXAMPLExxxxx
display_name:
description: Name of the RPC.
returned: always
type: string
sample: ansible_remote_peering_connection
drg_id:
description: The OCID of the DRG that this RPC belongs to.
returned: always
type: string
sample: ocid1.drg.oc1.phx.xxxxxEXAMPLExxxxx
id:
description: OCID of the RPC.
returned: always
type: string
sample: ocid1.remotepeeringconnection.oc1.phx.xxxxxEXAMPLExxxxx
is_cross_tenancy_peering:
description: Whether the VCN at the other end of the peering is in a different tenancy.
returned: always
type: bool
sample: false
lifecycle_state:
description: Current state of the RPC.
returned: always
type: string
sample: AVAILABLE
peer_id:
description: If this RPC is peered, this value is the OCID of the other RPC.
returned: always
type: string
sample: ocid1.remotepeeringconnection.oc1.iad.xxxxxEXAMPLExxxxx
peering_status:
description: Whether the RPC is peered with another RPC. NEW means the RPC has not yet been peered. PENDING
means the peering is being established. REVOKED means the RPC at the other end of the peering
has been deleted.
returned: always
type: string
sample: PEERED
peering_region_name:
description: If this RPC is peered, this value is the region that contains the other RPC.
returned: always
type: string
sample: us-ashburn-1
time_created:
description: The date and time the RPC was created, in the format defined by RFC3339.
returned: always
type: string
sample: 2017-11-13T20:22:40.626000+00:00
peer_tenancy_id:
description: If this RPC is peered, this value is the OCID of the other RPC's tenancy.
returned: always
type: string
sample: "ocid1.tenancy.oc1..xxxxxEXAMPLExxxxx"
sample: [{
"compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
"display_name": "ansible_remote_peering_connection",
"drg_id": "ocid1.drg.oc1.phx.xxxxxEXAMPLExxxxx",
"id": "ocid1.remotepeeringconnection.oc1.phx.xxxxxEXAMPLExxxxx",
"is_cross_tenancy_peering": false,
"lifecycle_state": "AVAILABLE",
"peer_id": "ocid1.remotepeeringconnection.oc1.iad.xxxxxEXAMPLExxxxx",
"peer_region_name": "us-ashburn-1",
"peer_tenancy_id": "ocid1.tenancy.oc1..xxxxxEXAMPLExxxxx",
"peering_status": "PEERED",
"time_created": "2018-09-24T06:51:59.491000+00:00"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oracle import oci_utils
try:
from oci.core.virtual_network_client import VirtualNetworkClient
from oci.util import to_dict
from oci.exceptions import ServiceError
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
def main():
module_args = oci_utils.get_facts_module_arg_spec()
module_args.update(
dict(
compartment_id=dict(type="str", required=False),
drg_id=dict(type="str", required=False),
remote_peering_connection_id=dict(
type="str", required=False, aliases=["id"]
),
)
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False,
required_one_of=[["remote_peering_connection_id", "compartment_id"]],
)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
virtual_network_client = oci_utils.create_service_client(
module, VirtualNetworkClient
)
remote_peering_connection_id = module.params["remote_peering_connection_id"]
result = []
try:
if remote_peering_connection_id is not None:
result = [
to_dict(
oci_utils.call_with_backoff(
virtual_network_client.get_remote_peering_connection,
remote_peering_connection_id=remote_peering_connection_id,
).data
)
]
else:
result = to_dict(
oci_utils.list_all_resources(
virtual_network_client.list_remote_peering_connections,
display_name=module.params["display_name"],
drg_id=module.params["drg_id"],
compartment_id=module.params["compartment_id"],
)
)
except ServiceError as ex:
module.fail_json(msg=ex.message)
module.exit_json(remote_peering_connections=result)
if __name__ == "__main__":
main() | 0.669421 | 0.347509 |
import numpy as np
from openvino.inference_engine import IECore
class Model(object):
"""
The Salt model class contains methods important to fine cube inference.
If you would like to make classes that utilize these
inference tasks, treat the Salt model class as the interface to implement.
"""
def __init__(self, path_to_xml, path_to_bin, requests=1):
"""
Initialization of Salt model.
:param path_to_xml: Path to xml file of model's OpenVINO IR.
:param path_to_bin: Path to bin file of model's OpenVINO IR.
:param requests: Number of requests to use during inference.
"""
self.ie = IECore()
self.requests = requests
self.salt_net = self.ie.read_network(
model=path_to_xml, weights=path_to_bin
)
self.input_layers = list(self.salt_net.inputs.keys())
self.output_layers = list(self.salt_net.outputs.keys())
self.salt_exec = self.ie.load_network(
network=self.salt_net, device_name="CPU", num_requests=requests
)
self.name = "salt_model"
# warm session
self.salt_exec.requests[0].infer()
def infer(self, input_dict, flexible_infer=False):
"""
Conduct simple inference.
:param input_dict: Input dictionary containing ``{..., layer_name:input_val, ...}``.
:param flexible_infer: Boolean for specifying shape change mid-inference.
:return: output_dict, latency (ms)
"""
input_val = input_dict[self.input_layers[0]]
if flexible_infer and not np.all(input_val.shape == self.input_layer.shape):
self.salt_net.reshape({self.input_layer: input_val.shape})
self.salt_exec = self.ie.load_network(
network=self.salt_net, device_name="CPU"
)
self.infer_requests = self.salt_exec.requests
infer_requests = self.salt_exec.requests
infer_requests[0].infer(input_dict)
latency = infer_requests[0].latency
output_dict = infer_requests[0].outputs
return output_dict, latency # latency in milliseconds
def reshape_input(self, shape):
"""
Change the shape of the input layer to ``shape``. In the case of Salt model, the first
and only layer will change shape.
:param shape: Tuple/list representing the new desired input shape.
:return: None
"""
self.salt_net.reshape({self.input_layers[0]: shape})
self.salt_exec = self.ie.load_network(
network=self.salt_net, device_name="CPU", num_requests=self.requests
)
self.infer_requests = self.salt_exec.requests
# Base Functions Below
def get_inputs(self):
"""
Get input layer name(s). Only one layer name will be returned for the Salt model.
:return: List of strings representing the names of the input layers.
"""
return self.input_layers
def get_outputs(self):
"""
Get output layer name(s). Only one layer name will be returned for the Salt
model.
:return: List of strings representing the names of the output layers.
"""
return self.output_layers
def get_requests(self):
"""
Get requests object of model. The ``get_requests`` and ``get_idle_request_id`` methods
are used for asynchronous inference.
:return: Requests object of model.
"""
return self.salt_exec.requests
def get_idle_request_id(self):
"""
Get an idle request id. The ``get_requests`` and ``get_idle_request_id`` methods
are used for asynchronous inference.
:return: Idle request id (int)
"""
return self.salt_exec.get_idle_request_id() | models/salt/only_model_script/model.py | import numpy as np
from openvino.inference_engine import IECore
class Model(object):
"""
The Salt model class contains methods important to fine cube inference.
If you would like to make classes that utilize these
inference tasks, treat the Salt model class as the interface to implement.
"""
def __init__(self, path_to_xml, path_to_bin, requests=1):
"""
Initialization of Salt model.
:param path_to_xml: Path to xml file of model's OpenVINO IR.
:param path_to_bin: Path to bin file of model's OpenVINO IR.
:param requests: Number of requests to use during inference.
"""
self.ie = IECore()
self.requests = requests
self.salt_net = self.ie.read_network(
model=path_to_xml, weights=path_to_bin
)
self.input_layers = list(self.salt_net.inputs.keys())
self.output_layers = list(self.salt_net.outputs.keys())
self.salt_exec = self.ie.load_network(
network=self.salt_net, device_name="CPU", num_requests=requests
)
self.name = "salt_model"
# warm session
self.salt_exec.requests[0].infer()
def infer(self, input_dict, flexible_infer=False):
"""
Conduct simple inference.
:param input_dict: Input dictionary containing ``{..., layer_name:input_val, ...}``.
:param flexible_infer: Boolean for specifying shape change mid-inference.
:return: output_dict, latency (ms)
"""
input_val = input_dict[self.input_layers[0]]
if flexible_infer and not np.all(input_val.shape == self.input_layer.shape):
self.salt_net.reshape({self.input_layer: input_val.shape})
self.salt_exec = self.ie.load_network(
network=self.salt_net, device_name="CPU"
)
self.infer_requests = self.salt_exec.requests
infer_requests = self.salt_exec.requests
infer_requests[0].infer(input_dict)
latency = infer_requests[0].latency
output_dict = infer_requests[0].outputs
return output_dict, latency # latency in milliseconds
def reshape_input(self, shape):
"""
Change the shape of the input layer to ``shape``. In the case of Salt model, the first
and only layer will change shape.
:param shape: Tuple/list representing the new desired input shape.
:return: None
"""
self.salt_net.reshape({self.input_layers[0]: shape})
self.salt_exec = self.ie.load_network(
network=self.salt_net, device_name="CPU", num_requests=self.requests
)
self.infer_requests = self.salt_exec.requests
# Base Functions Below
def get_inputs(self):
"""
Get input layer name(s). Only one layer name will be returned for the Salt model.
:return: List of strings representing the names of the input layers.
"""
return self.input_layers
def get_outputs(self):
"""
Get output layer name(s). Only one layer name will be returned for the Salt
model.
:return: List of strings representing the names of the output layers.
"""
return self.output_layers
def get_requests(self):
"""
Get requests object of model. The ``get_requests`` and ``get_idle_request_id`` methods
are used for asynchronous inference.
:return: Requests object of model.
"""
return self.salt_exec.requests
def get_idle_request_id(self):
"""
Get an idle request id. The ``get_requests`` and ``get_idle_request_id`` methods
are used for asynchronous inference.
:return: Idle request id (int)
"""
return self.salt_exec.get_idle_request_id() | 0.887327 | 0.425068 |
import json
import requests
class Touchpoint():
from .employee import Employee
from .location import Location
from .item import Item
def __init__(self, api_key=None):
if api_key is None:
raise ValueError("A valid API_KEY must be provided.")
self.api_key = api_key
@classmethod
def url(cls, path='/', pretty=False):
base_url = 'http://c1.tchpt.com'
query = ['?']
if pretty:
query.append('pretty=1')
req_url = base_url + path + ''.join(query)
return req_url
def _headers(self):
return {'content-type': 'application/json',
'Api-Key': self.api_key}
def ping(self):
req = requests.get(self.url('/api/ping'))
return req.status_code
def ping_full(self):
req = requests.get(self.url('/api/ping'))
return req.text
# Fetch all employees
def get_employees(self, store_id=None):
employees = []
path = f'/api/location/{store_id}/employees'
req = requests.get(self.url(path, pretty=True), headers=self._headers())
employees_data = json.loads(req.text)
for emp in employees_data['data']['employees']:
employees.append(self.Employee(employee_id=emp['id'],
first_name=emp['firstname'],
last_name=emp['lastname'],
emp_id=emp['emp_id']))
return employees
# Fetch an employee
def get_employee(self, store_id=None, employee_id=None):
path = f'/api/location/{store_id}/employees/{employee_id}'
req = requests.get(self.url(path, pretty=True), headers=self._headers())
employee_data = json.loads(req.text)
emp = employee_data['data']['employees'][0]
return self.Employee(employee_id=emp['id'],
first_name=emp['firstname'],
last_name=emp['lastname'],
emp_id=emp['emp_id'])
# Fetch a location
def get_location(self, store_id=None):
path = f'/api/location/{store_id}'
req = requests.get(self.url(path, pretty=True), headers=self._headers())
location_data = json.loads(req.text)
loc = location_data['data']['location']
return self.Location(location_id=loc['id'],
wss_url=loc['wssUrl'],
default_order_notes=loc['defaultOrderNotes'],
name=loc['name'],
address=loc['address'],
phone=loc['phone'])
# Fetch all items
def get_items(self, store_id=None):
items = []
path = f'/api/location/{store_id}/items'
req = requests.get(self.url(path, pretty=True), headers=self._headers())
items_data = json.loads(req.text)
for item in items_data['data']['items']:
items.append(self.Item(item_id=item['id'],
name=item['name'],
description=item['description'],
reporting_categories=item['reportingCategories'],
tax_rates=item['taxRates'],
item_type=item['type']))
return items
# Fetch an item
def get_item(self, store_id=None, item_id=None):
path = f'/api/location/{store_id}/items/{item_id}'
req = requests.get(self.url(path, pretty=True), headers=self._headers())
item_data = json.loads(req.text)
item = item_data['data']['items'][0]
return self.Item(item_id=item['id'],
name=item['name'],
description=item['description'],
reporting_categories=item['reportingCategories'],
tax_rates=item['taxRates'],
item_type=item['type']) | touchpoint/touchpoint.py | import json
import requests
class Touchpoint():
from .employee import Employee
from .location import Location
from .item import Item
def __init__(self, api_key=None):
if api_key is None:
raise ValueError("A valid API_KEY must be provided.")
self.api_key = api_key
@classmethod
def url(cls, path='/', pretty=False):
base_url = 'http://c1.tchpt.com'
query = ['?']
if pretty:
query.append('pretty=1')
req_url = base_url + path + ''.join(query)
return req_url
def _headers(self):
return {'content-type': 'application/json',
'Api-Key': self.api_key}
def ping(self):
req = requests.get(self.url('/api/ping'))
return req.status_code
def ping_full(self):
req = requests.get(self.url('/api/ping'))
return req.text
# Fetch all employees
def get_employees(self, store_id=None):
employees = []
path = f'/api/location/{store_id}/employees'
req = requests.get(self.url(path, pretty=True), headers=self._headers())
employees_data = json.loads(req.text)
for emp in employees_data['data']['employees']:
employees.append(self.Employee(employee_id=emp['id'],
first_name=emp['firstname'],
last_name=emp['lastname'],
emp_id=emp['emp_id']))
return employees
# Fetch an employee
def get_employee(self, store_id=None, employee_id=None):
path = f'/api/location/{store_id}/employees/{employee_id}'
req = requests.get(self.url(path, pretty=True), headers=self._headers())
employee_data = json.loads(req.text)
emp = employee_data['data']['employees'][0]
return self.Employee(employee_id=emp['id'],
first_name=emp['firstname'],
last_name=emp['lastname'],
emp_id=emp['emp_id'])
# Fetch a location
def get_location(self, store_id=None):
path = f'/api/location/{store_id}'
req = requests.get(self.url(path, pretty=True), headers=self._headers())
location_data = json.loads(req.text)
loc = location_data['data']['location']
return self.Location(location_id=loc['id'],
wss_url=loc['wssUrl'],
default_order_notes=loc['defaultOrderNotes'],
name=loc['name'],
address=loc['address'],
phone=loc['phone'])
# Fetch all items
def get_items(self, store_id=None):
items = []
path = f'/api/location/{store_id}/items'
req = requests.get(self.url(path, pretty=True), headers=self._headers())
items_data = json.loads(req.text)
for item in items_data['data']['items']:
items.append(self.Item(item_id=item['id'],
name=item['name'],
description=item['description'],
reporting_categories=item['reportingCategories'],
tax_rates=item['taxRates'],
item_type=item['type']))
return items
# Fetch an item
def get_item(self, store_id=None, item_id=None):
path = f'/api/location/{store_id}/items/{item_id}'
req = requests.get(self.url(path, pretty=True), headers=self._headers())
item_data = json.loads(req.text)
item = item_data['data']['items'][0]
return self.Item(item_id=item['id'],
name=item['name'],
description=item['description'],
reporting_categories=item['reportingCategories'],
tax_rates=item['taxRates'],
item_type=item['type']) | 0.426322 | 0.061961 |
import pytest
from petisco import Persistence
from petisco.extra.sqlalchemy import SqliteConnection, SqliteDatabase
from tests.modules.extra.sqlalchemy.mother.model_filename_mother import (
ModelFilenameMother,
)
@pytest.mark.integration
def test_should_create_persistence_with_sqlite_database():
filename = ModelFilenameMother.get("sql/persistence.sql.models.yml")
connection = SqliteConnection.create(
server_name="sqlite", database_name="petisco.db"
)
database = SqliteDatabase(
name="sqlite_test", connection=connection, model_filename=filename
)
persistence = Persistence()
persistence.add(database)
persistence.create()
assert database.info() == {
"name": "sqlite_test",
"models": {
"client": "tests.modules.extra.sqlalchemy.ymls.sql.models.ClientModel",
"product": "tests.modules.extra.sqlalchemy.ymls.sql.models.ProductModel",
"user": "tests.modules.extra.sqlalchemy.ymls.sql.models.UserModel",
},
}
assert Persistence.is_available()
persistence.delete()
Persistence.clear()
@pytest.mark.integration
def test_should_add_an_user_with_sqlite_database_with_session():
filename = ModelFilenameMother.get("sql/persistence.sql.models.yml")
connection = SqliteConnection.create(
server_name="sqlite", database_name="petisco.db"
)
database = SqliteDatabase(
name="sqlite_test", connection=connection, model_filename=filename
)
persistence = Persistence()
persistence.add(database)
persistence.delete()
persistence.create()
UserModel = Persistence.get_model("sqlite_test", "user")
session = Persistence.get_session("sqlite_test")
model = UserModel(name="Petisco")
session.add(model)
session.commit()
persistence.delete()
Persistence.clear()
@pytest.mark.integration
def test_should_add_a_product_with_sqlite_database_with_session_scope():
filename = ModelFilenameMother.get("sql/persistence.sql.models.yml")
connection = SqliteConnection.create(
server_name="sqlite", database_name="petisco.db"
)
database = SqliteDatabase(
name="sqlite_test", connection=connection, model_filename=filename
)
persistence = Persistence()
persistence.add(database)
persistence.delete()
persistence.create()
ProductModel = Persistence.get_model("sqlite_test", "product")
session_scope = Persistence.get_session_scope("sqlite_test")
with session_scope() as session:
model = ProductModel(name="Petisco", price=2)
session.add(model)
persistence.clear_data()
persistence.delete()
Persistence.clear() | tests/modules/extra/sqlalchemy/integration/test_sqlite_database.py | import pytest
from petisco import Persistence
from petisco.extra.sqlalchemy import SqliteConnection, SqliteDatabase
from tests.modules.extra.sqlalchemy.mother.model_filename_mother import (
ModelFilenameMother,
)
@pytest.mark.integration
def test_should_create_persistence_with_sqlite_database():
filename = ModelFilenameMother.get("sql/persistence.sql.models.yml")
connection = SqliteConnection.create(
server_name="sqlite", database_name="petisco.db"
)
database = SqliteDatabase(
name="sqlite_test", connection=connection, model_filename=filename
)
persistence = Persistence()
persistence.add(database)
persistence.create()
assert database.info() == {
"name": "sqlite_test",
"models": {
"client": "tests.modules.extra.sqlalchemy.ymls.sql.models.ClientModel",
"product": "tests.modules.extra.sqlalchemy.ymls.sql.models.ProductModel",
"user": "tests.modules.extra.sqlalchemy.ymls.sql.models.UserModel",
},
}
assert Persistence.is_available()
persistence.delete()
Persistence.clear()
@pytest.mark.integration
def test_should_add_an_user_with_sqlite_database_with_session():
filename = ModelFilenameMother.get("sql/persistence.sql.models.yml")
connection = SqliteConnection.create(
server_name="sqlite", database_name="petisco.db"
)
database = SqliteDatabase(
name="sqlite_test", connection=connection, model_filename=filename
)
persistence = Persistence()
persistence.add(database)
persistence.delete()
persistence.create()
UserModel = Persistence.get_model("sqlite_test", "user")
session = Persistence.get_session("sqlite_test")
model = UserModel(name="Petisco")
session.add(model)
session.commit()
persistence.delete()
Persistence.clear()
@pytest.mark.integration
def test_should_add_a_product_with_sqlite_database_with_session_scope():
filename = ModelFilenameMother.get("sql/persistence.sql.models.yml")
connection = SqliteConnection.create(
server_name="sqlite", database_name="petisco.db"
)
database = SqliteDatabase(
name="sqlite_test", connection=connection, model_filename=filename
)
persistence = Persistence()
persistence.add(database)
persistence.delete()
persistence.create()
ProductModel = Persistence.get_model("sqlite_test", "product")
session_scope = Persistence.get_session_scope("sqlite_test")
with session_scope() as session:
model = ProductModel(name="Petisco", price=2)
session.add(model)
persistence.clear_data()
persistence.delete()
Persistence.clear() | 0.544075 | 0.275404 |
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import special
from scipy import stats
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
@test_util.run_all_in_graph_and_eager_modes
class Chi2Test(tf.test.TestCase):
def testChi2LogPDF(self):
batch_size = 6
df = tf.constant([2.0] * batch_size, dtype=np.float64)
df_v = 2.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float64)
chi2 = tfd.Chi2(df=df)
expected_log_pdf = stats.chi2.logpdf(x, df_v)
log_pdf = chi2.log_prob(x)
self.assertEqual(log_pdf.shape, (6,))
self.assertAllClose(self.evaluate(log_pdf), expected_log_pdf)
pdf = chi2.prob(x)
self.assertEqual(pdf.shape, (6,))
self.assertAllClose(self.evaluate(pdf), np.exp(expected_log_pdf))
def testChi2CDF(self):
batch_size = 6
df = tf.constant([2.0] * batch_size, dtype=np.float64)
df_v = 2.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float64)
chi2 = tfd.Chi2(df=df)
expected_cdf = stats.chi2.cdf(x, df_v)
cdf = chi2.cdf(x)
self.assertEqual(cdf.shape, (6,))
self.assertAllClose(self.evaluate(cdf), expected_cdf)
def testChi2Mean(self):
df_v = np.array([1., 3, 5], dtype=np.float64)
expected_mean = stats.chi2.mean(df_v)
chi2 = tfd.Chi2(df=df_v)
self.assertEqual(chi2.mean().shape, (3,))
self.assertAllClose(self.evaluate(chi2.mean()), expected_mean)
def testChi2Variance(self):
df_v = np.array([1., 3, 5], np.float64)
expected_variances = stats.chi2.var(df_v)
chi2 = tfd.Chi2(df=df_v)
self.assertEqual(chi2.variance().shape, (3,))
self.assertAllClose(self.evaluate(chi2.variance()), expected_variances)
def testChi2Entropy(self):
df_v = np.array([1., 3, 5], dtype=np.float64)
expected_entropy = stats.chi2.entropy(df_v)
chi2 = tfd.Chi2(df=df_v)
self.assertEqual(chi2.entropy().shape, (3,))
self.assertAllClose(self.evaluate(chi2.entropy()), expected_entropy)
def testChi2WithAbsDf(self):
df_v = np.array([-1.3, -3.2, 5], dtype=np.float64)
chi2 = tfd.Chi2WithAbsDf(df=df_v)
self.assertAllClose(
self.evaluate(tf.floor(tf.abs(df_v))), self.evaluate(chi2.df))
def testChi2Chi2KL(self):
a_df = np.arange(1.0, 10.0)
b_df = np.arange(1.0, 10.0)
# This reshape is intended to expand the number of test cases.
a_df = a_df.reshape((len(a_df), 1))
b_df = b_df.reshape((1, len(b_df)))
a = tfd.Chi2(df=a_df)
b = tfd.Chi2(df=b_df)
# Consistent with
# http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 110
true_kl = (special.gammaln(b_df / 2.0) - special.gammaln(a_df / 2.0) +
(a_df - b_df) / 2.0 * special.digamma(a_df / 2.0))
kl = tfd.kl_divergence(a, b)
x = a.sample(int(1e5), seed=0)
kl_sample = tf.reduce_mean(
input_tensor=a.log_prob(x) - b.log_prob(x), axis=0)
kl_, kl_sample_ = self.evaluate([kl, kl_sample])
self.assertAllClose(true_kl, kl_, atol=0., rtol=5e-13)
self.assertAllClose(true_kl, kl_sample_, atol=0., rtol=5e-2)
zero_kl = tfd.kl_divergence(a, a)
true_zero_kl_, zero_kl_ = self.evaluate([tf.zeros_like(zero_kl), zero_kl])
self.assertAllEqual(true_zero_kl_, zero_kl_)
if __name__ == "__main__":
tf.test.main() | tensorflow_probability/python/distributions/chi2_test.py | """Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import special
from scipy import stats
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
@test_util.run_all_in_graph_and_eager_modes
class Chi2Test(tf.test.TestCase):
def testChi2LogPDF(self):
batch_size = 6
df = tf.constant([2.0] * batch_size, dtype=np.float64)
df_v = 2.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float64)
chi2 = tfd.Chi2(df=df)
expected_log_pdf = stats.chi2.logpdf(x, df_v)
log_pdf = chi2.log_prob(x)
self.assertEqual(log_pdf.shape, (6,))
self.assertAllClose(self.evaluate(log_pdf), expected_log_pdf)
pdf = chi2.prob(x)
self.assertEqual(pdf.shape, (6,))
self.assertAllClose(self.evaluate(pdf), np.exp(expected_log_pdf))
def testChi2CDF(self):
batch_size = 6
df = tf.constant([2.0] * batch_size, dtype=np.float64)
df_v = 2.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float64)
chi2 = tfd.Chi2(df=df)
expected_cdf = stats.chi2.cdf(x, df_v)
cdf = chi2.cdf(x)
self.assertEqual(cdf.shape, (6,))
self.assertAllClose(self.evaluate(cdf), expected_cdf)
def testChi2Mean(self):
df_v = np.array([1., 3, 5], dtype=np.float64)
expected_mean = stats.chi2.mean(df_v)
chi2 = tfd.Chi2(df=df_v)
self.assertEqual(chi2.mean().shape, (3,))
self.assertAllClose(self.evaluate(chi2.mean()), expected_mean)
def testChi2Variance(self):
df_v = np.array([1., 3, 5], np.float64)
expected_variances = stats.chi2.var(df_v)
chi2 = tfd.Chi2(df=df_v)
self.assertEqual(chi2.variance().shape, (3,))
self.assertAllClose(self.evaluate(chi2.variance()), expected_variances)
def testChi2Entropy(self):
df_v = np.array([1., 3, 5], dtype=np.float64)
expected_entropy = stats.chi2.entropy(df_v)
chi2 = tfd.Chi2(df=df_v)
self.assertEqual(chi2.entropy().shape, (3,))
self.assertAllClose(self.evaluate(chi2.entropy()), expected_entropy)
def testChi2WithAbsDf(self):
df_v = np.array([-1.3, -3.2, 5], dtype=np.float64)
chi2 = tfd.Chi2WithAbsDf(df=df_v)
self.assertAllClose(
self.evaluate(tf.floor(tf.abs(df_v))), self.evaluate(chi2.df))
def testChi2Chi2KL(self):
a_df = np.arange(1.0, 10.0)
b_df = np.arange(1.0, 10.0)
# This reshape is intended to expand the number of test cases.
a_df = a_df.reshape((len(a_df), 1))
b_df = b_df.reshape((1, len(b_df)))
a = tfd.Chi2(df=a_df)
b = tfd.Chi2(df=b_df)
# Consistent with
# http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 110
true_kl = (special.gammaln(b_df / 2.0) - special.gammaln(a_df / 2.0) +
(a_df - b_df) / 2.0 * special.digamma(a_df / 2.0))
kl = tfd.kl_divergence(a, b)
x = a.sample(int(1e5), seed=0)
kl_sample = tf.reduce_mean(
input_tensor=a.log_prob(x) - b.log_prob(x), axis=0)
kl_, kl_sample_ = self.evaluate([kl, kl_sample])
self.assertAllClose(true_kl, kl_, atol=0., rtol=5e-13)
self.assertAllClose(true_kl, kl_sample_, atol=0., rtol=5e-2)
zero_kl = tfd.kl_divergence(a, a)
true_zero_kl_, zero_kl_ = self.evaluate([tf.zeros_like(zero_kl), zero_kl])
self.assertAllEqual(true_zero_kl_, zero_kl_)
if __name__ == "__main__":
tf.test.main() | 0.880771 | 0.605245 |
import os
import numpy as np
import PIL.Image as Img
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
def extract_image(filename: str):
"""
extract the image from the mask, used for cvat results
for colab using, the input is designed for situation where default path have
both "SegmentationObject" and "JPEGImages" folders
input:
filename: str, the name under folder "SegmentationObject"(.png name)
return:
np.ndarray, the image extracted by the mask
"""
assert filename[-3:] == "png"
_pngname = filename
_jpgname = filename[:-3] + "jpg"
_mask_read_in = np.array(Img.open(os.path.join("SegmentationObject",_pngname)).convert("RGB"))
_original_image_read_in = np.array(Img.open(os.path.join("JPEGImages",_jpgname)).convert("RGB"))
_mask_convert = np.any(_mask_read_in, axis = 2)
_original_image_read_in[_mask_convert == 0] = 0
return _original_image_read_in
def crop_image(img):
"""
crop all the 0 paddings aside
input:
img: np.ndarray, image to be cropped
return:
img: np.ndarray, image cropped
"""
_x = np.nonzero(np.any(img, axis = (0,2)))[0]
_y = np.nonzero(np.any(img, axis = (1,2)))[0]
_xs,_xf = _x[0],_x[-1]
_ys,_yf = _y[0],_y[-1]
return img[_ys:_yf,_xs:_xf]
def visualize_result(collage, mask, dictionary):
"""
A visualization of result generated
Discrete legend part credit to
https://stackoverflow.com/questions/40662475/matplot-imshow-add-label-to-each-color-and-put-them-in-legend/40666123#40666123
input:
collage, mask, dictionary, the output of collage generator's .generate() function, mask is the pixel mask
"""
_f, _axarr = plt.subplots(1,2)
_axarr[0].set_axis_off()
_im1 = _axarr[0].imshow(collage)
_axarr[1].set_axis_off()
_mask = mask if mask.ndim == 2 else mask[:,:,0]
_values = np.unique(_mask.ravel())
_im2 = _axarr[1].imshow(_mask)
# get the colors of the values, according to the colormap used by imshow
_colors = [_im2.cmap(_im2.norm(value)) for value in _values]
# create a patch (proxy artist) for every color
_labels = ["background"] + [i for i in dictionary if dictionary[i] in values]
_patches = [mpatches.Patch(color=_colors[i], label=_labels[i]) for i in range(len(_values))]
# put those patched as legend-handles into the legend
plt.legend(handles=_patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0 )
plt.show()
print(dictionary)
def extract_binary_mask(mask, dictionary):
"""
convert a integer mask to multi-channel binary mask
arg:
mask, dictionary: output of col_gen.generate() method
return:
np.ndarray, the binary mask in different channel
"""
return np.stack([np.where(mask == i, 1, 0)for i in list(dictionary.values())], axis=-1) | Collage_generator/utils.py | import os
import numpy as np
import PIL.Image as Img
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
def extract_image(filename: str):
"""
extract the image from the mask, used for cvat results
for colab using, the input is designed for situation where default path have
both "SegmentationObject" and "JPEGImages" folders
input:
filename: str, the name under folder "SegmentationObject"(.png name)
return:
np.ndarray, the image extracted by the mask
"""
assert filename[-3:] == "png"
_pngname = filename
_jpgname = filename[:-3] + "jpg"
_mask_read_in = np.array(Img.open(os.path.join("SegmentationObject",_pngname)).convert("RGB"))
_original_image_read_in = np.array(Img.open(os.path.join("JPEGImages",_jpgname)).convert("RGB"))
_mask_convert = np.any(_mask_read_in, axis = 2)
_original_image_read_in[_mask_convert == 0] = 0
return _original_image_read_in
def crop_image(img):
"""
crop all the 0 paddings aside
input:
img: np.ndarray, image to be cropped
return:
img: np.ndarray, image cropped
"""
_x = np.nonzero(np.any(img, axis = (0,2)))[0]
_y = np.nonzero(np.any(img, axis = (1,2)))[0]
_xs,_xf = _x[0],_x[-1]
_ys,_yf = _y[0],_y[-1]
return img[_ys:_yf,_xs:_xf]
def visualize_result(collage, mask, dictionary):
"""
A visualization of result generated
Discrete legend part credit to
https://stackoverflow.com/questions/40662475/matplot-imshow-add-label-to-each-color-and-put-them-in-legend/40666123#40666123
input:
collage, mask, dictionary, the output of collage generator's .generate() function, mask is the pixel mask
"""
_f, _axarr = plt.subplots(1,2)
_axarr[0].set_axis_off()
_im1 = _axarr[0].imshow(collage)
_axarr[1].set_axis_off()
_mask = mask if mask.ndim == 2 else mask[:,:,0]
_values = np.unique(_mask.ravel())
_im2 = _axarr[1].imshow(_mask)
# get the colors of the values, according to the colormap used by imshow
_colors = [_im2.cmap(_im2.norm(value)) for value in _values]
# create a patch (proxy artist) for every color
_labels = ["background"] + [i for i in dictionary if dictionary[i] in values]
_patches = [mpatches.Patch(color=_colors[i], label=_labels[i]) for i in range(len(_values))]
# put those patched as legend-handles into the legend
plt.legend(handles=_patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0 )
plt.show()
print(dictionary)
def extract_binary_mask(mask, dictionary):
"""
convert a integer mask to multi-channel binary mask
arg:
mask, dictionary: output of col_gen.generate() method
return:
np.ndarray, the binary mask in different channel
"""
return np.stack([np.where(mask == i, 1, 0)for i in list(dictionary.values())], axis=-1) | 0.611498 | 0.579579 |
from __future__ import division, print_function
import os
import re
import sys
import argparse
import cv2
import pickle
import numpy as np
import h5py
import chainer
from chainer.links import caffe
from chainer import cuda
"""
Resize and crop an image to 224x224 (some part of sourcecode from chainer_imagenet_tools/inspect_caffenet.py)
Extract features of an image frame using caffe pretrained model and chainer
"""
def mismatch(error_message):
print('An error occurred in loading a property of model.')
print('Probably there is a mismatch between the versions of Chainer.')
print('Remove the pickle file and try again.')
print(error_message)
sys.exit(1)
def chainer_extract_features(input_folder, batchsize, layer='fc7'):
i = 0
z = xp.zeros((len(frames), 4096), dtype=np.float32)
x_batch = np.ndarray((batchsize, 3, in_size, in_size), dtype=np.float32)
num_frame = len(os.listdir(input_folder))
for step in range(num_frame):
step2 = 1 + (step * 8)
image_path = os.path.join(input_folder, "%06d.png" % step2)
print(image_path)
image = cv2.imread(image_path)
height, width, depth = image.shape
new_height = output_side_length
new_width = output_side_length
if height > width:
new_height = output_side_length * height // width
else:
new_width = output_side_length * width // height
resized_img = cv2.resize(image, (new_width, new_height))
height_offset = (new_height - output_side_length) // 2
width_offset = (new_width - output_side_length) // 2
image = resized_img[height_offset:height_offset + output_side_length,
width_offset:width_offset + output_side_length]
image = image.transpose(2, 0, 1)
image = image[:, start:stop, start:stop].astype(np.float32)
image -= mean_image
x_batch[i] = image
i += 1
if i == batchsize:
x_data = xp.asarray(x_batch)
x = chainer.Variable(x_data)
try:
y, = func(inputs={'data': x}, outputs=[layer])
except AttributeError as e:
mismatch(str(e))
z[step - batchsize + 1:step + 1] = y.data
# print(y.data)
i = 0
if not i == 0:
x_data = xp.asarray(x_batch[0:i])
x = chainer.Variable(x_data)
try:
y, = func(inputs={'data': x}, outputs=[layer])
except AttributeError as e:
mismatch(str(e))
z[len(frames) - i:len(frames)] = y.data
return z
if __name__ == "__main__":
description = \
"Extract features for images using chainer and caffe pretrained models"
argparser = argparse.ArgumentParser(description=description)
argparser.add_argument("input_folder",
type=str,
help="input frame folder")
argparser.add_argument("output_filename",
type=str,
help="output file name")
argparser.add_argument("--mode", default='all')
argparser.add_argument("--mean",
default='ilsvrc_2012_mean.npy')
argparser.add_argument('--model',
help='path to model file',
default='VGG_ILSVRC_16_layers.caffemodel',
type=str)
argparser.add_argument('--batchsize',
help='batch size',
default=50,
type=int)
argparser.add_argument('--layer',
help='layer name: fc6, fc7(default), fc8',
default='fc7',
type=str,
choices=['fc6', 'fc7', 'fc8'])
argparser.add_argument('--gpu',
'-g',
type=int,
default=-1,
help='Zero-origin GPU ID (nevative value indicates CPU)')
args = argparser.parse_args()
if args.gpu >= 0:
cuda.check_cuda_available()
xp = cuda.cupy if args.gpu >= 0 else np
caffe_model = args.model
func = caffe.CaffeFunction(caffe_model)
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
func.to_gpu()
in_size = 224
mean_image = np.load(args.mean)
cropwidth = 256 - in_size
start = cropwidth // 2
stop = start + in_size
mean_image = mean_image[:, start:stop, start:stop].copy()
target_shape = (256, 256)
output_side_length = 256
features_path = args.output_filename
if args.mode == 'all':
videos = os.listdir(args.input_folder)
fw = h5py.File(features_path + '.h5', mode='w')
for video in videos:
print(video)
path = os.path.join(args.input_folder, video)
frames = os.listdir(path)
y = chainer_extract_features(path, args.batchsize, args.layer)
y_cpu = cuda.to_cpu(y)
fw.create_dataset(video, data=y_cpu)
fw.close()
elif args.mode == 'append':
videos = os.listdir(args.input_folder)
fw = h5py.File(features_path + '.h5', mode='a')
for video in videos:
print(video)
path = os.path.join(args.input_folder, video)
frames = os.listdir(path)
y = chainer_extract_features(path, args.batchsize, args.layer)
y_cpu = cuda.to_cpu(y)
fw.create_dataset(video, data=y_cpu)
fw.close() | code/chainer_extract_vgg.py |
from __future__ import division, print_function
import os
import re
import sys
import argparse
import cv2
import pickle
import numpy as np
import h5py
import chainer
from chainer.links import caffe
from chainer import cuda
"""
Resize and crop an image to 224x224 (some part of sourcecode from chainer_imagenet_tools/inspect_caffenet.py)
Extract features of an image frame using caffe pretrained model and chainer
"""
def mismatch(error_message):
print('An error occurred in loading a property of model.')
print('Probably there is a mismatch between the versions of Chainer.')
print('Remove the pickle file and try again.')
print(error_message)
sys.exit(1)
def chainer_extract_features(input_folder, batchsize, layer='fc7'):
i = 0
z = xp.zeros((len(frames), 4096), dtype=np.float32)
x_batch = np.ndarray((batchsize, 3, in_size, in_size), dtype=np.float32)
num_frame = len(os.listdir(input_folder))
for step in range(num_frame):
step2 = 1 + (step * 8)
image_path = os.path.join(input_folder, "%06d.png" % step2)
print(image_path)
image = cv2.imread(image_path)
height, width, depth = image.shape
new_height = output_side_length
new_width = output_side_length
if height > width:
new_height = output_side_length * height // width
else:
new_width = output_side_length * width // height
resized_img = cv2.resize(image, (new_width, new_height))
height_offset = (new_height - output_side_length) // 2
width_offset = (new_width - output_side_length) // 2
image = resized_img[height_offset:height_offset + output_side_length,
width_offset:width_offset + output_side_length]
image = image.transpose(2, 0, 1)
image = image[:, start:stop, start:stop].astype(np.float32)
image -= mean_image
x_batch[i] = image
i += 1
if i == batchsize:
x_data = xp.asarray(x_batch)
x = chainer.Variable(x_data)
try:
y, = func(inputs={'data': x}, outputs=[layer])
except AttributeError as e:
mismatch(str(e))
z[step - batchsize + 1:step + 1] = y.data
# print(y.data)
i = 0
if not i == 0:
x_data = xp.asarray(x_batch[0:i])
x = chainer.Variable(x_data)
try:
y, = func(inputs={'data': x}, outputs=[layer])
except AttributeError as e:
mismatch(str(e))
z[len(frames) - i:len(frames)] = y.data
return z
if __name__ == "__main__":
description = \
"Extract features for images using chainer and caffe pretrained models"
argparser = argparse.ArgumentParser(description=description)
argparser.add_argument("input_folder",
type=str,
help="input frame folder")
argparser.add_argument("output_filename",
type=str,
help="output file name")
argparser.add_argument("--mode", default='all')
argparser.add_argument("--mean",
default='ilsvrc_2012_mean.npy')
argparser.add_argument('--model',
help='path to model file',
default='VGG_ILSVRC_16_layers.caffemodel',
type=str)
argparser.add_argument('--batchsize',
help='batch size',
default=50,
type=int)
argparser.add_argument('--layer',
help='layer name: fc6, fc7(default), fc8',
default='fc7',
type=str,
choices=['fc6', 'fc7', 'fc8'])
argparser.add_argument('--gpu',
'-g',
type=int,
default=-1,
help='Zero-origin GPU ID (nevative value indicates CPU)')
args = argparser.parse_args()
if args.gpu >= 0:
cuda.check_cuda_available()
xp = cuda.cupy if args.gpu >= 0 else np
caffe_model = args.model
func = caffe.CaffeFunction(caffe_model)
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
func.to_gpu()
in_size = 224
mean_image = np.load(args.mean)
cropwidth = 256 - in_size
start = cropwidth // 2
stop = start + in_size
mean_image = mean_image[:, start:stop, start:stop].copy()
target_shape = (256, 256)
output_side_length = 256
features_path = args.output_filename
if args.mode == 'all':
videos = os.listdir(args.input_folder)
fw = h5py.File(features_path + '.h5', mode='w')
for video in videos:
print(video)
path = os.path.join(args.input_folder, video)
frames = os.listdir(path)
y = chainer_extract_features(path, args.batchsize, args.layer)
y_cpu = cuda.to_cpu(y)
fw.create_dataset(video, data=y_cpu)
fw.close()
elif args.mode == 'append':
videos = os.listdir(args.input_folder)
fw = h5py.File(features_path + '.h5', mode='a')
for video in videos:
print(video)
path = os.path.join(args.input_folder, video)
frames = os.listdir(path)
y = chainer_extract_features(path, args.batchsize, args.layer)
y_cpu = cuda.to_cpu(y)
fw.create_dataset(video, data=y_cpu)
fw.close() | 0.284477 | 0.170197 |
import json
import os
import requests
import subprocess
import sys
import tempfile
import time
import yaml
from oslo_utils import timeutils
from heat_integrationtests.common import exceptions
from heat_integrationtests.functional import functional_base
class ParallelDeploymentsTest(functional_base.FunctionalTestsBase):
server_template = '''
heat_template_version: "2013-05-23"
parameters:
flavor:
type: string
image:
type: string
network:
type: string
resources:
server:
type: OS::Nova::Server
properties:
image: {get_param: image}
flavor: {get_param: flavor}
user_data_format: SOFTWARE_CONFIG
networks: [{network: {get_param: network}}]
outputs:
server:
value: {get_resource: server}
'''
config_template = '''
heat_template_version: "2013-05-23"
parameters:
server:
type: string
resources:
config:
type: OS::Heat::SoftwareConfig
properties:
'''
deployment_snippet = '''
type: OS::Heat::SoftwareDeployments
properties:
config: {get_resource: config}
servers: {'0': {get_param: server}}
'''
enable_cleanup = True
def test_deployments_metadata(self):
parms = {'flavor': self.conf.minimal_instance_type,
'network': self.conf.fixed_network_name,
'image': self.conf.minimal_image_ref}
stack_identifier = self.stack_create(
parameters=parms,
template=self.server_template,
enable_cleanup=self.enable_cleanup)
server_stack = self.client.stacks.get(stack_identifier)
server = server_stack.outputs[0]['output_value']
config_stacks = []
# add up to 3 stacks each with up to 3 deployments
deploy_count = 0
deploy_count = self.deploy_many_configs(
stack_identifier,
server,
config_stacks,
2,
5,
deploy_count)
self.deploy_many_configs(
stack_identifier,
server,
config_stacks,
3,
3,
deploy_count)
self.signal_deployments(stack_identifier)
for config_stack in config_stacks:
self._wait_for_stack_status(config_stack, 'CREATE_COMPLETE')
def deploy_many_configs(self, stack, server, config_stacks,
stack_count, deploys_per_stack,
deploy_count_start):
for a in range(stack_count):
config_stacks.append(
self.deploy_config(server, deploys_per_stack))
new_count = deploy_count_start + stack_count * deploys_per_stack
self.wait_for_deploy_metadata_set(stack, new_count)
return new_count
def deploy_config(self, server, deploy_count):
parms = {'server': server}
template = yaml.safe_load(self.config_template)
resources = template['resources']
resources['config']['properties'] = {'config': 'x' * 10000}
for a in range(deploy_count):
resources['dep_%s' % a] = yaml.safe_load(self.deployment_snippet)
return self.stack_create(
parameters=parms,
template=template,
enable_cleanup=self.enable_cleanup,
expected_status=None)
def wait_for_deploy_metadata_set(self, stack, deploy_count):
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
server_metadata = self.client.resources.metadata(
stack, 'server')
if len(server_metadata['deployments']) == deploy_count:
return
time.sleep(build_interval)
message = ('Deployment resources failed to be created within '
'the required time (%s s).' %
(build_timeout))
raise exceptions.TimeoutException(message)
def signal_deployments(self, stack_identifier):
server_metadata = self.client.resources.metadata(
stack_identifier, 'server')
for dep in server_metadata['deployments']:
iv = dict((i['name'], i['value']) for i in dep['inputs'])
sigurl = iv.get('deploy_signal_id')
requests.post(sigurl, data='{}',
headers={'content-type': 'application/json'},
verify=self.verify_cert)
class ZaqarSignalTransportTest(functional_base.FunctionalTestsBase):
server_template = '''
heat_template_version: "2013-05-23"
parameters:
flavor:
type: string
image:
type: string
network:
type: string
resources:
server:
type: OS::Nova::Server
properties:
image: {get_param: image}
flavor: {get_param: flavor}
user_data_format: SOFTWARE_CONFIG
software_config_transport: ZAQAR_MESSAGE
networks: [{network: {get_param: network}}]
config:
type: OS::Heat::SoftwareConfig
properties:
config: echo 'foo'
deployment:
type: OS::Heat::SoftwareDeployment
properties:
config: {get_resource: config}
server: {get_resource: server}
signal_transport: ZAQAR_SIGNAL
outputs:
data:
value: {get_attr: [deployment, deploy_stdout]}
'''
conf_template = '''
[zaqar]
user_id = %(user_id)s
password = <PASSWORD>
project_id = %(project_id)s
auth_url = %(auth_url)s
queue_id = %(queue_id)s
'''
def test_signal_queues(self):
parms = {'flavor': self.conf.minimal_instance_type,
'network': self.conf.fixed_network_name,
'image': self.conf.minimal_image_ref}
stack_identifier = self.stack_create(
parameters=parms,
template=self.server_template,
expected_status=None)
metadata = self.wait_for_deploy_metadata_set(stack_identifier)
config = metadata['os-collect-config']['zaqar']
conf_content = self.conf_template % config
fd, temp_path = tempfile.mkstemp()
os.write(fd, conf_content.encode('utf-8'))
os.close(fd)
cmd = ['os-collect-config', '--one-time',
'--config-file=%s' % temp_path, 'zaqar']
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout_value = proc.communicate()[0]
data = json.loads(stdout_value.decode('utf-8'))
self.assertEqual(config, data['zaqar']['os-collect-config']['zaqar'])
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout_value = proc.communicate()[0]
data = json.loads(stdout_value.decode('utf-8'))
fd, temp_path = tempfile.mkstemp()
os.write(fd,
json.dumps(data['zaqar']['deployments'][0]).encode('utf-8'))
os.close(fd)
cmd = [sys.executable, self.conf.heat_config_notify_script, temp_path]
proc = subprocess.Popen(cmd,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
proc.communicate(
json.dumps({'deploy_stdout': 'here!'}).encode('utf-8'))
self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
stack = self.client.stacks.get(stack_identifier)
self.assertEqual('here!', stack.outputs[0]['output_value'])
def wait_for_deploy_metadata_set(self, stack):
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
server_metadata = self.client.resources.metadata(
stack, 'server')
if server_metadata.get('deployments'):
return server_metadata
time.sleep(build_interval)
message = ('Deployment resources failed to be created within '
'the required time (%s s).' %
(build_timeout))
raise exceptions.TimeoutException(message) | heat_integrationtests/functional/test_software_config.py |
import json
import os
import requests
import subprocess
import sys
import tempfile
import time
import yaml
from oslo_utils import timeutils
from heat_integrationtests.common import exceptions
from heat_integrationtests.functional import functional_base
class ParallelDeploymentsTest(functional_base.FunctionalTestsBase):
server_template = '''
heat_template_version: "2013-05-23"
parameters:
flavor:
type: string
image:
type: string
network:
type: string
resources:
server:
type: OS::Nova::Server
properties:
image: {get_param: image}
flavor: {get_param: flavor}
user_data_format: SOFTWARE_CONFIG
networks: [{network: {get_param: network}}]
outputs:
server:
value: {get_resource: server}
'''
config_template = '''
heat_template_version: "2013-05-23"
parameters:
server:
type: string
resources:
config:
type: OS::Heat::SoftwareConfig
properties:
'''
deployment_snippet = '''
type: OS::Heat::SoftwareDeployments
properties:
config: {get_resource: config}
servers: {'0': {get_param: server}}
'''
enable_cleanup = True
def test_deployments_metadata(self):
parms = {'flavor': self.conf.minimal_instance_type,
'network': self.conf.fixed_network_name,
'image': self.conf.minimal_image_ref}
stack_identifier = self.stack_create(
parameters=parms,
template=self.server_template,
enable_cleanup=self.enable_cleanup)
server_stack = self.client.stacks.get(stack_identifier)
server = server_stack.outputs[0]['output_value']
config_stacks = []
# add up to 3 stacks each with up to 3 deployments
deploy_count = 0
deploy_count = self.deploy_many_configs(
stack_identifier,
server,
config_stacks,
2,
5,
deploy_count)
self.deploy_many_configs(
stack_identifier,
server,
config_stacks,
3,
3,
deploy_count)
self.signal_deployments(stack_identifier)
for config_stack in config_stacks:
self._wait_for_stack_status(config_stack, 'CREATE_COMPLETE')
def deploy_many_configs(self, stack, server, config_stacks,
stack_count, deploys_per_stack,
deploy_count_start):
for a in range(stack_count):
config_stacks.append(
self.deploy_config(server, deploys_per_stack))
new_count = deploy_count_start + stack_count * deploys_per_stack
self.wait_for_deploy_metadata_set(stack, new_count)
return new_count
def deploy_config(self, server, deploy_count):
parms = {'server': server}
template = yaml.safe_load(self.config_template)
resources = template['resources']
resources['config']['properties'] = {'config': 'x' * 10000}
for a in range(deploy_count):
resources['dep_%s' % a] = yaml.safe_load(self.deployment_snippet)
return self.stack_create(
parameters=parms,
template=template,
enable_cleanup=self.enable_cleanup,
expected_status=None)
def wait_for_deploy_metadata_set(self, stack, deploy_count):
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
server_metadata = self.client.resources.metadata(
stack, 'server')
if len(server_metadata['deployments']) == deploy_count:
return
time.sleep(build_interval)
message = ('Deployment resources failed to be created within '
'the required time (%s s).' %
(build_timeout))
raise exceptions.TimeoutException(message)
def signal_deployments(self, stack_identifier):
server_metadata = self.client.resources.metadata(
stack_identifier, 'server')
for dep in server_metadata['deployments']:
iv = dict((i['name'], i['value']) for i in dep['inputs'])
sigurl = iv.get('deploy_signal_id')
requests.post(sigurl, data='{}',
headers={'content-type': 'application/json'},
verify=self.verify_cert)
class ZaqarSignalTransportTest(functional_base.FunctionalTestsBase):
server_template = '''
heat_template_version: "2013-05-23"
parameters:
flavor:
type: string
image:
type: string
network:
type: string
resources:
server:
type: OS::Nova::Server
properties:
image: {get_param: image}
flavor: {get_param: flavor}
user_data_format: SOFTWARE_CONFIG
software_config_transport: ZAQAR_MESSAGE
networks: [{network: {get_param: network}}]
config:
type: OS::Heat::SoftwareConfig
properties:
config: echo 'foo'
deployment:
type: OS::Heat::SoftwareDeployment
properties:
config: {get_resource: config}
server: {get_resource: server}
signal_transport: ZAQAR_SIGNAL
outputs:
data:
value: {get_attr: [deployment, deploy_stdout]}
'''
conf_template = '''
[zaqar]
user_id = %(user_id)s
password = <PASSWORD>
project_id = %(project_id)s
auth_url = %(auth_url)s
queue_id = %(queue_id)s
'''
def test_signal_queues(self):
parms = {'flavor': self.conf.minimal_instance_type,
'network': self.conf.fixed_network_name,
'image': self.conf.minimal_image_ref}
stack_identifier = self.stack_create(
parameters=parms,
template=self.server_template,
expected_status=None)
metadata = self.wait_for_deploy_metadata_set(stack_identifier)
config = metadata['os-collect-config']['zaqar']
conf_content = self.conf_template % config
fd, temp_path = tempfile.mkstemp()
os.write(fd, conf_content.encode('utf-8'))
os.close(fd)
cmd = ['os-collect-config', '--one-time',
'--config-file=%s' % temp_path, 'zaqar']
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout_value = proc.communicate()[0]
data = json.loads(stdout_value.decode('utf-8'))
self.assertEqual(config, data['zaqar']['os-collect-config']['zaqar'])
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout_value = proc.communicate()[0]
data = json.loads(stdout_value.decode('utf-8'))
fd, temp_path = tempfile.mkstemp()
os.write(fd,
json.dumps(data['zaqar']['deployments'][0]).encode('utf-8'))
os.close(fd)
cmd = [sys.executable, self.conf.heat_config_notify_script, temp_path]
proc = subprocess.Popen(cmd,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
proc.communicate(
json.dumps({'deploy_stdout': 'here!'}).encode('utf-8'))
self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
stack = self.client.stacks.get(stack_identifier)
self.assertEqual('here!', stack.outputs[0]['output_value'])
def wait_for_deploy_metadata_set(self, stack):
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
server_metadata = self.client.resources.metadata(
stack, 'server')
if server_metadata.get('deployments'):
return server_metadata
time.sleep(build_interval)
message = ('Deployment resources failed to be created within '
'the required time (%s s).' %
(build_timeout))
raise exceptions.TimeoutException(message) | 0.319121 | 0.138841 |
import keyring
import oslo_messaging
from oslo_config import cfg
from oslo_log import log
from sysinv.common import constants
from sysinv.common import utils
from sysinv.db import api as dbapi
LOG = log.getLogger(__name__)
callback_func = None
context = None
class NotificationEndpoint(object):
"""Task which exposes the API for consuming priority based notifications.
The Oslo notification framework delivers notifications based on priority to
matching callback APIs as defined in its notification listener endpoint
list.
Currently from Keystone perspective, `info` API is sufficient as Keystone
send notifications at `info` priority ONLY. Other priority level APIs
(warn, error, critical, audit, debug) are not needed here.
"""
filter_rule = oslo_messaging.NotificationFilter(
event_type='identity.user.updated')
def info(self, ctxt, publisher_id, event_type, payload, metadata):
"""Receives notification at info level."""
global callback_func
global context
if payload['eventType'] == 'activity' and \
payload['action'] == 'updated.user' and \
payload['outcome'] == 'success' and \
payload['resource_info'] == context.user:
callback_func(context)
return oslo_messaging.NotificationResult.HANDLED
def get_transport_url():
try:
db_api = dbapi.get_instance()
network_object = db_api.address_get_by_name(
utils.format_address_name(constants.CONTROLLER_HOSTNAME,
constants.NETWORK_TYPE_MGMT)
)
except Exception as e:
LOG.error("Failed to get management IP address: %s" % str(e))
return None
auth_password = keyring.get_password('<PASSWORD>', '<PASSWORD>')
if utils.is_valid_ipv6(network_object.address):
ip_address = "[%s]" % network_object.address
else:
ip_address = "%s" % network_object.address
transport_url = "rabbit://guest:%s@%s:5672" % (auth_password, ip_address)
return transport_url
def start_keystone_listener(func, ctxt):
global callback_func
global context
callback_func = func
context = ctxt
conf = cfg.ConfigOpts()
conf.transport_url = get_transport_url()
if conf.transport_url is None:
return
transport = oslo_messaging.get_rpc_transport(conf)
targets = [
oslo_messaging.Target(exchange='keystone', topic='notifications', fanout=True),
]
endpoints = [
NotificationEndpoint(),
]
pool = "sysinv-keystone-listener-workers"
server = oslo_messaging.get_notification_listener(transport, targets,
endpoints, pool=pool)
LOG.info("Sysinv keystone listener started!")
server.start()
server.wait() | sysinv/sysinv/sysinv/sysinv/conductor/keystone_listener.py | import keyring
import oslo_messaging
from oslo_config import cfg
from oslo_log import log
from sysinv.common import constants
from sysinv.common import utils
from sysinv.db import api as dbapi
LOG = log.getLogger(__name__)
callback_func = None
context = None
class NotificationEndpoint(object):
"""Task which exposes the API for consuming priority based notifications.
The Oslo notification framework delivers notifications based on priority to
matching callback APIs as defined in its notification listener endpoint
list.
Currently from Keystone perspective, `info` API is sufficient as Keystone
send notifications at `info` priority ONLY. Other priority level APIs
(warn, error, critical, audit, debug) are not needed here.
"""
filter_rule = oslo_messaging.NotificationFilter(
event_type='identity.user.updated')
def info(self, ctxt, publisher_id, event_type, payload, metadata):
"""Receives notification at info level."""
global callback_func
global context
if payload['eventType'] == 'activity' and \
payload['action'] == 'updated.user' and \
payload['outcome'] == 'success' and \
payload['resource_info'] == context.user:
callback_func(context)
return oslo_messaging.NotificationResult.HANDLED
def get_transport_url():
try:
db_api = dbapi.get_instance()
network_object = db_api.address_get_by_name(
utils.format_address_name(constants.CONTROLLER_HOSTNAME,
constants.NETWORK_TYPE_MGMT)
)
except Exception as e:
LOG.error("Failed to get management IP address: %s" % str(e))
return None
auth_password = keyring.get_password('<PASSWORD>', '<PASSWORD>')
if utils.is_valid_ipv6(network_object.address):
ip_address = "[%s]" % network_object.address
else:
ip_address = "%s" % network_object.address
transport_url = "rabbit://guest:%s@%s:5672" % (auth_password, ip_address)
return transport_url
def start_keystone_listener(func, ctxt):
global callback_func
global context
callback_func = func
context = ctxt
conf = cfg.ConfigOpts()
conf.transport_url = get_transport_url()
if conf.transport_url is None:
return
transport = oslo_messaging.get_rpc_transport(conf)
targets = [
oslo_messaging.Target(exchange='keystone', topic='notifications', fanout=True),
]
endpoints = [
NotificationEndpoint(),
]
pool = "sysinv-keystone-listener-workers"
server = oslo_messaging.get_notification_listener(transport, targets,
endpoints, pool=pool)
LOG.info("Sysinv keystone listener started!")
server.start()
server.wait() | 0.407687 | 0.082883 |
import os
from options.test_options import TestOptions
from data import CreateDataLoader
from models import create_model
import numpy as np
from PIL import Image
from shutil import copyfile
def save_final_layer_image(image_numpy, image_path):
# last layer is tanh
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
image_numpy = image_numpy.astype(np.uint8)
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def save_channel_image(image_numpy, image_path):
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
# normalize to (0, 1)
min_value = np.min(image_numpy)
image_numpy = image_numpy - min_value
max_value = np.max(image_numpy)
image_numpy = image_numpy / max_value
image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0
image_numpy = image_numpy.astype(np.uint8)
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def debug_layer_images(target_dir, target_layer):
"""Save each channel in target_layer as one image into target_dir.
Args:
target_dir: str, target directory to save result images.
target_layer: int, target layer. None for all layers.
"""
opt = TestOptions().parse()
# hard-code some parameters for test
opt.num_threads = 1 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # no shuffle
opt.no_flip = True # no flip
opt.display_id = -1 # no visdom display
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
model = create_model(opt)
model.setup(opt)
# extract each layers in netG_A
layers = list(list(list(list(model.netG_A.children())[0].children())[0].modules())[0].children())
# print network
print("--- Start network A->B ---")
for i, layer in enumerate(layers):
print("#{}: {}".format(i, layer))
print("--- End network A->B ---")
# prepare data: only use first data for test
data_list_enu = enumerate(dataset)
i, data = next(data_list_enu)
print("--- Start data info ---")
print("data[A].shape: ", data['A'].shape)
print("A_paths: ", data['A_paths'])
print("--- End data info ---")
# compute each layers
output = data['A']
result = []
for i in range(len(layers)):
output = layers[i].cpu()(output)
print("layer{} output shape: {}".format(i, output.shape))
result.append(output.detach().numpy())
# create target dir
if not os.path.exists(target_dir):
os.makedirs(target_dir)
# save input image
path = os.path.join(target_dir, 'input.jpg')
copyfile(data['A_paths'][0], path)
# save result image
path = os.path.join(target_dir, 'output.jpg')
save_final_layer_image(result[27][0], path)
# save each layer's each channel as one image
for i in range(len(layers)):
if target_layer is not None and target_layer != i:
continue
print("Create images for layer_{}".format(i))
layer_path = os.path.join(target_dir, "layer_{}".format(i))
if not os.path.exists(layer_path):
os.makedirs(layer_path)
for target_channel in range(result[i].shape[1]):
path = os.path.join(layer_path, 'channel_{}.jpg'.format(target_channel))
save_channel_image(result[i][0, target_channel:target_channel + 1], path)
if __name__ == '__main__':
target_dir = 'debug_layer_images'
target_layer = None
debug_layer_images(target_dir, target_layer) | visualize_intermediate_layers.py | import os
from options.test_options import TestOptions
from data import CreateDataLoader
from models import create_model
import numpy as np
from PIL import Image
from shutil import copyfile
def save_final_layer_image(image_numpy, image_path):
# last layer is tanh
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
image_numpy = image_numpy.astype(np.uint8)
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def save_channel_image(image_numpy, image_path):
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
# normalize to (0, 1)
min_value = np.min(image_numpy)
image_numpy = image_numpy - min_value
max_value = np.max(image_numpy)
image_numpy = image_numpy / max_value
image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0
image_numpy = image_numpy.astype(np.uint8)
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def debug_layer_images(target_dir, target_layer):
"""Save each channel in target_layer as one image into target_dir.
Args:
target_dir: str, target directory to save result images.
target_layer: int, target layer. None for all layers.
"""
opt = TestOptions().parse()
# hard-code some parameters for test
opt.num_threads = 1 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # no shuffle
opt.no_flip = True # no flip
opt.display_id = -1 # no visdom display
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
model = create_model(opt)
model.setup(opt)
# extract each layers in netG_A
layers = list(list(list(list(model.netG_A.children())[0].children())[0].modules())[0].children())
# print network
print("--- Start network A->B ---")
for i, layer in enumerate(layers):
print("#{}: {}".format(i, layer))
print("--- End network A->B ---")
# prepare data: only use first data for test
data_list_enu = enumerate(dataset)
i, data = next(data_list_enu)
print("--- Start data info ---")
print("data[A].shape: ", data['A'].shape)
print("A_paths: ", data['A_paths'])
print("--- End data info ---")
# compute each layers
output = data['A']
result = []
for i in range(len(layers)):
output = layers[i].cpu()(output)
print("layer{} output shape: {}".format(i, output.shape))
result.append(output.detach().numpy())
# create target dir
if not os.path.exists(target_dir):
os.makedirs(target_dir)
# save input image
path = os.path.join(target_dir, 'input.jpg')
copyfile(data['A_paths'][0], path)
# save result image
path = os.path.join(target_dir, 'output.jpg')
save_final_layer_image(result[27][0], path)
# save each layer's each channel as one image
for i in range(len(layers)):
if target_layer is not None and target_layer != i:
continue
print("Create images for layer_{}".format(i))
layer_path = os.path.join(target_dir, "layer_{}".format(i))
if not os.path.exists(layer_path):
os.makedirs(layer_path)
for target_channel in range(result[i].shape[1]):
path = os.path.join(layer_path, 'channel_{}.jpg'.format(target_channel))
save_channel_image(result[i][0, target_channel:target_channel + 1], path)
if __name__ == '__main__':
target_dir = 'debug_layer_images'
target_layer = None
debug_layer_images(target_dir, target_layer) | 0.246171 | 0.344443 |
import pprint
import re # noqa: F401
import six
class GetCharactersCharacterIdAttributesOk(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'charisma': 'int',
'intelligence': 'int',
'memory': 'int',
'perception': 'int',
'willpower': 'int',
'bonus_remaps': 'int',
'last_remap_date': 'datetime',
'accrued_remap_cooldown_date': 'datetime'
}
attribute_map = {
'charisma': 'charisma',
'intelligence': 'intelligence',
'memory': 'memory',
'perception': 'perception',
'willpower': 'willpower',
'bonus_remaps': 'bonus_remaps',
'last_remap_date': 'last_remap_date',
'accrued_remap_cooldown_date': 'accrued_remap_cooldown_date'
}
def __init__(self, charisma=None, intelligence=None, memory=None, perception=None, willpower=None, bonus_remaps=None, last_remap_date=None, accrued_remap_cooldown_date=None): # noqa: E501
"""GetCharactersCharacterIdAttributesOk - a model defined in Swagger""" # noqa: E501
self._charisma = None
self._intelligence = None
self._memory = None
self._perception = None
self._willpower = None
self._bonus_remaps = None
self._last_remap_date = None
self._accrued_remap_cooldown_date = None
self.discriminator = None
self.charisma = charisma
self.intelligence = intelligence
self.memory = memory
self.perception = perception
self.willpower = willpower
if bonus_remaps is not None:
self.bonus_remaps = bonus_remaps
if last_remap_date is not None:
self.last_remap_date = last_remap_date
if accrued_remap_cooldown_date is not None:
self.accrued_remap_cooldown_date = accrued_remap_cooldown_date
@property
def charisma(self):
"""Gets the charisma of this GetCharactersCharacterIdAttributesOk. # noqa: E501
charisma integer # noqa: E501
:return: The charisma of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:rtype: int
"""
return self._charisma
@charisma.setter
def charisma(self, charisma):
"""Sets the charisma of this GetCharactersCharacterIdAttributesOk.
charisma integer # noqa: E501
:param charisma: The charisma of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:type: int
"""
if charisma is None:
raise ValueError("Invalid value for `charisma`, must not be `None`") # noqa: E501
self._charisma = charisma
@property
def intelligence(self):
"""Gets the intelligence of this GetCharactersCharacterIdAttributesOk. # noqa: E501
intelligence integer # noqa: E501
:return: The intelligence of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:rtype: int
"""
return self._intelligence
@intelligence.setter
def intelligence(self, intelligence):
"""Sets the intelligence of this GetCharactersCharacterIdAttributesOk.
intelligence integer # noqa: E501
:param intelligence: The intelligence of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:type: int
"""
if intelligence is None:
raise ValueError("Invalid value for `intelligence`, must not be `None`") # noqa: E501
self._intelligence = intelligence
@property
def memory(self):
"""Gets the memory of this GetCharactersCharacterIdAttributesOk. # noqa: E501
memory integer # noqa: E501
:return: The memory of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:rtype: int
"""
return self._memory
@memory.setter
def memory(self, memory):
"""Sets the memory of this GetCharactersCharacterIdAttributesOk.
memory integer # noqa: E501
:param memory: The memory of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:type: int
"""
if memory is None:
raise ValueError("Invalid value for `memory`, must not be `None`") # noqa: E501
self._memory = memory
@property
def perception(self):
"""Gets the perception of this GetCharactersCharacterIdAttributesOk. # noqa: E501
perception integer # noqa: E501
:return: The perception of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:rtype: int
"""
return self._perception
@perception.setter
def perception(self, perception):
"""Sets the perception of this GetCharactersCharacterIdAttributesOk.
perception integer # noqa: E501
:param perception: The perception of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:type: int
"""
if perception is None:
raise ValueError("Invalid value for `perception`, must not be `None`") # noqa: E501
self._perception = perception
@property
def willpower(self):
"""Gets the willpower of this GetCharactersCharacterIdAttributesOk. # noqa: E501
willpower integer # noqa: E501
:return: The willpower of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:rtype: int
"""
return self._willpower
@willpower.setter
def willpower(self, willpower):
"""Sets the willpower of this GetCharactersCharacterIdAttributesOk.
willpower integer # noqa: E501
:param willpower: The willpower of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:type: int
"""
if willpower is None:
raise ValueError("Invalid value for `willpower`, must not be `None`") # noqa: E501
self._willpower = willpower
@property
def bonus_remaps(self):
"""Gets the bonus_remaps of this GetCharactersCharacterIdAttributesOk. # noqa: E501
Number of available bonus character neural remaps # noqa: E501
:return: The bonus_remaps of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:rtype: int
"""
return self._bonus_remaps
@bonus_remaps.setter
def bonus_remaps(self, bonus_remaps):
"""Sets the bonus_remaps of this GetCharactersCharacterIdAttributesOk.
Number of available bonus character neural remaps # noqa: E501
:param bonus_remaps: The bonus_remaps of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:type: int
"""
self._bonus_remaps = bonus_remaps
@property
def last_remap_date(self):
"""Gets the last_remap_date of this GetCharactersCharacterIdAttributesOk. # noqa: E501
Datetime of last neural remap, including usage of bonus remaps # noqa: E501
:return: The last_remap_date of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:rtype: datetime
"""
return self._last_remap_date
@last_remap_date.setter
def last_remap_date(self, last_remap_date):
"""Sets the last_remap_date of this GetCharactersCharacterIdAttributesOk.
Datetime of last neural remap, including usage of bonus remaps # noqa: E501
:param last_remap_date: The last_remap_date of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:type: datetime
"""
self._last_remap_date = last_remap_date
@property
def accrued_remap_cooldown_date(self):
"""Gets the accrued_remap_cooldown_date of this GetCharactersCharacterIdAttributesOk. # noqa: E501
Neural remapping cooldown after a character uses remap accrued over time # noqa: E501
:return: The accrued_remap_cooldown_date of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:rtype: datetime
"""
return self._accrued_remap_cooldown_date
@accrued_remap_cooldown_date.setter
def accrued_remap_cooldown_date(self, accrued_remap_cooldown_date):
"""Sets the accrued_remap_cooldown_date of this GetCharactersCharacterIdAttributesOk.
Neural remapping cooldown after a character uses remap accrued over time # noqa: E501
:param accrued_remap_cooldown_date: The accrued_remap_cooldown_date of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:type: datetime
"""
self._accrued_remap_cooldown_date = accrued_remap_cooldown_date
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetCharactersCharacterIdAttributesOk):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | swagger_client/models/get_characters_character_id_attributes_ok.py | import pprint
import re # noqa: F401
import six
class GetCharactersCharacterIdAttributesOk(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'charisma': 'int',
'intelligence': 'int',
'memory': 'int',
'perception': 'int',
'willpower': 'int',
'bonus_remaps': 'int',
'last_remap_date': 'datetime',
'accrued_remap_cooldown_date': 'datetime'
}
attribute_map = {
'charisma': 'charisma',
'intelligence': 'intelligence',
'memory': 'memory',
'perception': 'perception',
'willpower': 'willpower',
'bonus_remaps': 'bonus_remaps',
'last_remap_date': 'last_remap_date',
'accrued_remap_cooldown_date': 'accrued_remap_cooldown_date'
}
def __init__(self, charisma=None, intelligence=None, memory=None, perception=None, willpower=None, bonus_remaps=None, last_remap_date=None, accrued_remap_cooldown_date=None): # noqa: E501
"""GetCharactersCharacterIdAttributesOk - a model defined in Swagger""" # noqa: E501
self._charisma = None
self._intelligence = None
self._memory = None
self._perception = None
self._willpower = None
self._bonus_remaps = None
self._last_remap_date = None
self._accrued_remap_cooldown_date = None
self.discriminator = None
self.charisma = charisma
self.intelligence = intelligence
self.memory = memory
self.perception = perception
self.willpower = willpower
if bonus_remaps is not None:
self.bonus_remaps = bonus_remaps
if last_remap_date is not None:
self.last_remap_date = last_remap_date
if accrued_remap_cooldown_date is not None:
self.accrued_remap_cooldown_date = accrued_remap_cooldown_date
@property
def charisma(self):
"""Gets the charisma of this GetCharactersCharacterIdAttributesOk. # noqa: E501
charisma integer # noqa: E501
:return: The charisma of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:rtype: int
"""
return self._charisma
@charisma.setter
def charisma(self, charisma):
"""Sets the charisma of this GetCharactersCharacterIdAttributesOk.
charisma integer # noqa: E501
:param charisma: The charisma of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:type: int
"""
if charisma is None:
raise ValueError("Invalid value for `charisma`, must not be `None`") # noqa: E501
self._charisma = charisma
@property
def intelligence(self):
"""Gets the intelligence of this GetCharactersCharacterIdAttributesOk. # noqa: E501
intelligence integer # noqa: E501
:return: The intelligence of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:rtype: int
"""
return self._intelligence
@intelligence.setter
def intelligence(self, intelligence):
"""Sets the intelligence of this GetCharactersCharacterIdAttributesOk.
intelligence integer # noqa: E501
:param intelligence: The intelligence of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:type: int
"""
if intelligence is None:
raise ValueError("Invalid value for `intelligence`, must not be `None`") # noqa: E501
self._intelligence = intelligence
@property
def memory(self):
"""Gets the memory of this GetCharactersCharacterIdAttributesOk. # noqa: E501
memory integer # noqa: E501
:return: The memory of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:rtype: int
"""
return self._memory
@memory.setter
def memory(self, memory):
"""Sets the memory of this GetCharactersCharacterIdAttributesOk.
memory integer # noqa: E501
:param memory: The memory of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:type: int
"""
if memory is None:
raise ValueError("Invalid value for `memory`, must not be `None`") # noqa: E501
self._memory = memory
@property
def perception(self):
"""Gets the perception of this GetCharactersCharacterIdAttributesOk. # noqa: E501
perception integer # noqa: E501
:return: The perception of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:rtype: int
"""
return self._perception
@perception.setter
def perception(self, perception):
"""Sets the perception of this GetCharactersCharacterIdAttributesOk.
perception integer # noqa: E501
:param perception: The perception of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:type: int
"""
if perception is None:
raise ValueError("Invalid value for `perception`, must not be `None`") # noqa: E501
self._perception = perception
@property
def willpower(self):
"""Gets the willpower of this GetCharactersCharacterIdAttributesOk. # noqa: E501
willpower integer # noqa: E501
:return: The willpower of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:rtype: int
"""
return self._willpower
@willpower.setter
def willpower(self, willpower):
"""Sets the willpower of this GetCharactersCharacterIdAttributesOk.
willpower integer # noqa: E501
:param willpower: The willpower of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:type: int
"""
if willpower is None:
raise ValueError("Invalid value for `willpower`, must not be `None`") # noqa: E501
self._willpower = willpower
@property
def bonus_remaps(self):
"""Gets the bonus_remaps of this GetCharactersCharacterIdAttributesOk. # noqa: E501
Number of available bonus character neural remaps # noqa: E501
:return: The bonus_remaps of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:rtype: int
"""
return self._bonus_remaps
@bonus_remaps.setter
def bonus_remaps(self, bonus_remaps):
"""Sets the bonus_remaps of this GetCharactersCharacterIdAttributesOk.
Number of available bonus character neural remaps # noqa: E501
:param bonus_remaps: The bonus_remaps of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:type: int
"""
self._bonus_remaps = bonus_remaps
@property
def last_remap_date(self):
"""Gets the last_remap_date of this GetCharactersCharacterIdAttributesOk. # noqa: E501
Datetime of last neural remap, including usage of bonus remaps # noqa: E501
:return: The last_remap_date of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:rtype: datetime
"""
return self._last_remap_date
@last_remap_date.setter
def last_remap_date(self, last_remap_date):
"""Sets the last_remap_date of this GetCharactersCharacterIdAttributesOk.
Datetime of last neural remap, including usage of bonus remaps # noqa: E501
:param last_remap_date: The last_remap_date of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:type: datetime
"""
self._last_remap_date = last_remap_date
@property
def accrued_remap_cooldown_date(self):
"""Gets the accrued_remap_cooldown_date of this GetCharactersCharacterIdAttributesOk. # noqa: E501
Neural remapping cooldown after a character uses remap accrued over time # noqa: E501
:return: The accrued_remap_cooldown_date of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:rtype: datetime
"""
return self._accrued_remap_cooldown_date
@accrued_remap_cooldown_date.setter
def accrued_remap_cooldown_date(self, accrued_remap_cooldown_date):
"""Sets the accrued_remap_cooldown_date of this GetCharactersCharacterIdAttributesOk.
Neural remapping cooldown after a character uses remap accrued over time # noqa: E501
:param accrued_remap_cooldown_date: The accrued_remap_cooldown_date of this GetCharactersCharacterIdAttributesOk. # noqa: E501
:type: datetime
"""
self._accrued_remap_cooldown_date = accrued_remap_cooldown_date
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetCharactersCharacterIdAttributesOk):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | 0.772359 | 0.090173 |
import sys
import typing
import numba as nb
import numpy as np
@nb.njit
def sort_csgraph(
n: int,
g: np.ndarray,
) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]:
sort_idx = np.argsort(g[:, 0], kind='mergesort')
g = g[sort_idx]
edge_idx = np.searchsorted(g[:, 0], np.arange(n + 1))
original_idx = np.arange(len(g))[sort_idx]
return g, edge_idx , original_idx
@nb.njit
def scc(n: int, g: np.ndarray) -> np.ndarray:
def _scc_dfs(n, g):
g, edge_idx, _ = sort_csgraph(n, g)
order = np.full(n, -1, np.int64)
ord_ = 0
visited = np.zeros(n, np.int8)
for i in range(n):
if visited[i]: continue
st = [i]
while st:
u = st.pop()
if u < 0:
u = -u - 1
order[u] = ord_
ord_ += 1
continue
if visited[u]: continue
visited[u] = True
st.append(-u - 1)
for v in g[edge_idx[u]:edge_idx[u + 1], 1][::-1]:
if visited[v]: continue
st.append(v)
que = np.empty(n, np.int64)
que[order] = np.arange(n)
return que[::-1]
def _scc_reverse_dfs(n, g, que):
g[:, :2] = g[:, 1::-1]
g, edge_idx, _ = sort_csgraph(n, g)
label = np.full(n, -1, np.int64)
l = 0
for i in que:
if label[i] != -1: continue
st = [i]
label[i] = l
while st:
u = st.pop()
for v in g[edge_idx[u]:edge_idx[u + 1], 1][::-1]:
if label[v] != -1: continue
label[v] = l
st.append(v)
l += 1
return label
g = g.copy()
que = _scc_dfs(n, g)
return _scc_reverse_dfs(n, g, que)
@nb.njit((nb.i8, nb.i8[:, :]), cache=True)
def solve(n: int, g: np.ndarray) -> typing.NoReturn:
label = scc(n, g)
return label
def main() -> typing.NoReturn:
n, m = map(int, input().split())
ab = np.array(
sys.stdin.read().split(),
dtype=np.int64,
).reshape(m, 2)
label = solve(n, ab)
k = label.max() + 1
label = label.tolist()
scc = [[] for _ in range(k)]
for i, l in enumerate(label):
scc[l].append(i)
print(k)
for s in scc:
print(len(s), *s)
main() | jp.atcoder/practice2/practice2_g/26020641.py | import sys
import typing
import numba as nb
import numpy as np
@nb.njit
def sort_csgraph(
n: int,
g: np.ndarray,
) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]:
sort_idx = np.argsort(g[:, 0], kind='mergesort')
g = g[sort_idx]
edge_idx = np.searchsorted(g[:, 0], np.arange(n + 1))
original_idx = np.arange(len(g))[sort_idx]
return g, edge_idx , original_idx
@nb.njit
def scc(n: int, g: np.ndarray) -> np.ndarray:
def _scc_dfs(n, g):
g, edge_idx, _ = sort_csgraph(n, g)
order = np.full(n, -1, np.int64)
ord_ = 0
visited = np.zeros(n, np.int8)
for i in range(n):
if visited[i]: continue
st = [i]
while st:
u = st.pop()
if u < 0:
u = -u - 1
order[u] = ord_
ord_ += 1
continue
if visited[u]: continue
visited[u] = True
st.append(-u - 1)
for v in g[edge_idx[u]:edge_idx[u + 1], 1][::-1]:
if visited[v]: continue
st.append(v)
que = np.empty(n, np.int64)
que[order] = np.arange(n)
return que[::-1]
def _scc_reverse_dfs(n, g, que):
g[:, :2] = g[:, 1::-1]
g, edge_idx, _ = sort_csgraph(n, g)
label = np.full(n, -1, np.int64)
l = 0
for i in que:
if label[i] != -1: continue
st = [i]
label[i] = l
while st:
u = st.pop()
for v in g[edge_idx[u]:edge_idx[u + 1], 1][::-1]:
if label[v] != -1: continue
label[v] = l
st.append(v)
l += 1
return label
g = g.copy()
que = _scc_dfs(n, g)
return _scc_reverse_dfs(n, g, que)
@nb.njit((nb.i8, nb.i8[:, :]), cache=True)
def solve(n: int, g: np.ndarray) -> typing.NoReturn:
label = scc(n, g)
return label
def main() -> typing.NoReturn:
n, m = map(int, input().split())
ab = np.array(
sys.stdin.read().split(),
dtype=np.int64,
).reshape(m, 2)
label = solve(n, ab)
k = label.max() + 1
label = label.tolist()
scc = [[] for _ in range(k)]
for i, l in enumerate(label):
scc[l].append(i)
print(k)
for s in scc:
print(len(s), *s)
main() | 0.165256 | 0.250913 |
import json
from copy import deepcopy
from datetime import datetime, timedelta
from buildkite.schedule_and_publish.get_commits import get_commits
from models.benchmark_group_execution import BenchmarkGroupExecution
from models.machine import Machine
from models.memory_usage import MemoryUsage
from models.run import Run
from utils import generate_uuid
benchmark_group_execution_id = generate_uuid()
memory_usage_id = generate_uuid()
benchmark_group_execution_data = {
"type": "BenchmarkGroupExecution",
"id": benchmark_group_execution_id,
"lang": "Python",
"name": "file-read",
"options": "options",
"flags": "flags",
"benchmarkable_id": "1",
"run_id": "run_id",
"run_name": "rune_name",
"machine": "machine",
"process_pid": 2,
"command": "command",
"started_at": str(datetime.now() - timedelta(minutes=3)),
"finished_at": str(datetime.now()),
"total_run_time": str(timedelta(minutes=3)),
"failed": True,
"return_code": 137,
"stderr": "stderr",
"total_machine_virtual_memory": 16624467968,
}
memory_usage_data = {
"type": "MemoryUsage",
"id": memory_usage_id,
"benchmark_group_execution_id": benchmark_group_execution_id,
"process_pid": 3,
"parent_process_pid": 2,
"process_name": "R",
"process_cmdline": [
"/var/lib/buildkite-agent/miniconda3/envs/arrow-commit/lib/R/bin/exec/R",
"-e",
'library(arrowbench);~+~run_one(write_file,~+~source="nyctaxi_2010-01",~+~format="feather",~+~compression="lz4",~+~input="data_frame",~+~cpu_count=NULL)',
],
"mem_percent": 0.4413227066347222,
"mem_rss_bytes": 27533,
}
def log_benchmark_group_execution(client, data=None, api_access_token=None):
get_commits()
machine = Machine.first()
if not data:
run = Run.first(machine_name=machine.name)
data = deepcopy(benchmark_group_execution_data)
data["run_id"] = run.id
if not api_access_token:
api_access_token = machine.create_api_access_token()
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_access_token}",
}
return client.post("/logs", data=json.dumps(data), headers=headers)
def log_memory_usage(client, data=None, api_access_token=None):
if not data:
data = deepcopy(memory_usage_data)
if not api_access_token:
machine = Machine.first()
api_access_token = machine.create_api_access_token()
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_access_token}",
}
return client.post("/logs", data=json.dumps(data), headers=headers)
def test_benchmark_group_execution_logs_201(client):
assert not BenchmarkGroupExecution.get(benchmark_group_execution_data["id"])
response = log_benchmark_group_execution(client, data=None, api_access_token=None)
assert response.status_code == 201
assert BenchmarkGroupExecution.get(benchmark_group_execution_data["id"])
def test_benchmark_group_execution_logs_401_invalid_token(client):
assert not BenchmarkGroupExecution.get(benchmark_group_execution_data["id"])
response = log_benchmark_group_execution(
client, data=None, api_access_token="invalid token"
)
assert response.status_code == 401
assert not BenchmarkGroupExecution.get(benchmark_group_execution_data["id"])
def test_benchmark_group_execution_logs_401_invalid_run_id(client):
assert not BenchmarkGroupExecution.get(benchmark_group_execution_data["id"])
response = log_benchmark_group_execution(
client, data=benchmark_group_execution_data, api_access_token=None
)
assert response.status_code == 401
assert not BenchmarkGroupExecution.get(benchmark_group_execution_data["id"])
def test_memory_usage_logs_201(client):
assert not MemoryUsage.get(memory_usage_data["id"])
machine = Machine.first()
api_access_token = machine.create_api_access_token()
log_benchmark_group_execution(client, data=None, api_access_token=api_access_token)
response = log_memory_usage(client, data=None, api_access_token=api_access_token)
assert response.status_code == 201
assert MemoryUsage.get(memory_usage_data["id"])
def test_memory_usage_logs_401_invalid_token(client):
assert not MemoryUsage.get(memory_usage_data["id"])
machine = Machine.first()
api_access_token = machine.create_api_access_token()
log_benchmark_group_execution(client, data=None, api_access_token=api_access_token)
response = log_memory_usage(client, data=None, api_access_token="invalid token")
assert response.status_code == 401
assert not MemoryUsage.get(memory_usage_data["id"])
def test_memory_usage_logs_401_not_existing_benchmark_execution_group(client):
assert not MemoryUsage.get(memory_usage_data["id"])
machine = Machine.first()
api_access_token = machine.create_api_access_token()
log_benchmark_group_execution(client, data=None, api_access_token=api_access_token)
data = deepcopy(memory_usage_data)
data["benchmark_group_execution_id"] = "not_existing_benchmark_execution_group_id"
response = log_memory_usage(client, data=data, api_access_token=api_access_token)
assert response.status_code == 401
assert not MemoryUsage.get(memory_usage_data["id"])
def test_memory_usage_logs_401_unauthorized_machine(client):
assert not MemoryUsage.get(memory_usage_data["id"])
machines = Machine.all()
api_access_token_1 = machines[0].create_api_access_token()
api_access_token_2 = machines[1].create_api_access_token()
log_benchmark_group_execution(
client, data=None, api_access_token=api_access_token_1
)
response = log_memory_usage(client, data=None, api_access_token=api_access_token_2)
assert response.status_code == 401
assert not MemoryUsage.get(memory_usage_data["id"]) | tests/api/test_logs.py | import json
from copy import deepcopy
from datetime import datetime, timedelta
from buildkite.schedule_and_publish.get_commits import get_commits
from models.benchmark_group_execution import BenchmarkGroupExecution
from models.machine import Machine
from models.memory_usage import MemoryUsage
from models.run import Run
from utils import generate_uuid
benchmark_group_execution_id = generate_uuid()
memory_usage_id = generate_uuid()
benchmark_group_execution_data = {
"type": "BenchmarkGroupExecution",
"id": benchmark_group_execution_id,
"lang": "Python",
"name": "file-read",
"options": "options",
"flags": "flags",
"benchmarkable_id": "1",
"run_id": "run_id",
"run_name": "rune_name",
"machine": "machine",
"process_pid": 2,
"command": "command",
"started_at": str(datetime.now() - timedelta(minutes=3)),
"finished_at": str(datetime.now()),
"total_run_time": str(timedelta(minutes=3)),
"failed": True,
"return_code": 137,
"stderr": "stderr",
"total_machine_virtual_memory": 16624467968,
}
memory_usage_data = {
"type": "MemoryUsage",
"id": memory_usage_id,
"benchmark_group_execution_id": benchmark_group_execution_id,
"process_pid": 3,
"parent_process_pid": 2,
"process_name": "R",
"process_cmdline": [
"/var/lib/buildkite-agent/miniconda3/envs/arrow-commit/lib/R/bin/exec/R",
"-e",
'library(arrowbench);~+~run_one(write_file,~+~source="nyctaxi_2010-01",~+~format="feather",~+~compression="lz4",~+~input="data_frame",~+~cpu_count=NULL)',
],
"mem_percent": 0.4413227066347222,
"mem_rss_bytes": 27533,
}
def log_benchmark_group_execution(client, data=None, api_access_token=None):
get_commits()
machine = Machine.first()
if not data:
run = Run.first(machine_name=machine.name)
data = deepcopy(benchmark_group_execution_data)
data["run_id"] = run.id
if not api_access_token:
api_access_token = machine.create_api_access_token()
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_access_token}",
}
return client.post("/logs", data=json.dumps(data), headers=headers)
def log_memory_usage(client, data=None, api_access_token=None):
if not data:
data = deepcopy(memory_usage_data)
if not api_access_token:
machine = Machine.first()
api_access_token = machine.create_api_access_token()
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_access_token}",
}
return client.post("/logs", data=json.dumps(data), headers=headers)
def test_benchmark_group_execution_logs_201(client):
assert not BenchmarkGroupExecution.get(benchmark_group_execution_data["id"])
response = log_benchmark_group_execution(client, data=None, api_access_token=None)
assert response.status_code == 201
assert BenchmarkGroupExecution.get(benchmark_group_execution_data["id"])
def test_benchmark_group_execution_logs_401_invalid_token(client):
assert not BenchmarkGroupExecution.get(benchmark_group_execution_data["id"])
response = log_benchmark_group_execution(
client, data=None, api_access_token="invalid token"
)
assert response.status_code == 401
assert not BenchmarkGroupExecution.get(benchmark_group_execution_data["id"])
def test_benchmark_group_execution_logs_401_invalid_run_id(client):
assert not BenchmarkGroupExecution.get(benchmark_group_execution_data["id"])
response = log_benchmark_group_execution(
client, data=benchmark_group_execution_data, api_access_token=None
)
assert response.status_code == 401
assert not BenchmarkGroupExecution.get(benchmark_group_execution_data["id"])
def test_memory_usage_logs_201(client):
assert not MemoryUsage.get(memory_usage_data["id"])
machine = Machine.first()
api_access_token = machine.create_api_access_token()
log_benchmark_group_execution(client, data=None, api_access_token=api_access_token)
response = log_memory_usage(client, data=None, api_access_token=api_access_token)
assert response.status_code == 201
assert MemoryUsage.get(memory_usage_data["id"])
def test_memory_usage_logs_401_invalid_token(client):
assert not MemoryUsage.get(memory_usage_data["id"])
machine = Machine.first()
api_access_token = machine.create_api_access_token()
log_benchmark_group_execution(client, data=None, api_access_token=api_access_token)
response = log_memory_usage(client, data=None, api_access_token="invalid token")
assert response.status_code == 401
assert not MemoryUsage.get(memory_usage_data["id"])
def test_memory_usage_logs_401_not_existing_benchmark_execution_group(client):
assert not MemoryUsage.get(memory_usage_data["id"])
machine = Machine.first()
api_access_token = machine.create_api_access_token()
log_benchmark_group_execution(client, data=None, api_access_token=api_access_token)
data = deepcopy(memory_usage_data)
data["benchmark_group_execution_id"] = "not_existing_benchmark_execution_group_id"
response = log_memory_usage(client, data=data, api_access_token=api_access_token)
assert response.status_code == 401
assert not MemoryUsage.get(memory_usage_data["id"])
def test_memory_usage_logs_401_unauthorized_machine(client):
assert not MemoryUsage.get(memory_usage_data["id"])
machines = Machine.all()
api_access_token_1 = machines[0].create_api_access_token()
api_access_token_2 = machines[1].create_api_access_token()
log_benchmark_group_execution(
client, data=None, api_access_token=api_access_token_1
)
response = log_memory_usage(client, data=None, api_access_token=api_access_token_2)
assert response.status_code == 401
assert not MemoryUsage.get(memory_usage_data["id"]) | 0.485356 | 0.25163 |
from __future__ import division
import os
import argparse
from solver import Solver
def main(args):
# Create directory if it doesn't exist.
if not os.path.exists(args.text2pal_dir):
os.makedirs(args.text2pal_dir)
if not os.path.exists(args.pal2color_dir):
os.makedirs(args.pal2color_dir)
if not os.path.exists(args.train_sample_dir):
os.makedirs(args.train_sample_dir)
if not os.path.exists(os.path.join(args.test_sample_dir, args.mode)):
os.makedirs(os.path.join(args.test_sample_dir, args.mode))
# Solver for training and testing Text2Colors.
solver = Solver(args)
# Train or test.
if args.mode == 'train_TPN':
solver.train_TPN()
elif args.mode == 'test_TPN':
solver.test_TPN()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Model configuration.
# text2pal
parser.add_argument('--hidden_size', type=int, default=150)
parser.add_argument('--n_layers', type=int, default=1)
# pal2color
parser.add_argument('--always_give_global_hint', type=int, default=1)
parser.add_argument('--add_L', type=int, default=1)
# Training and testing configuration.
parser.add_argument('--mode', type=str, default='train_TPN',
choices=['train_TPN', 'train_PCN', 'test_TPN', 'test_text2colors'])
parser.add_argument('--dataset', type=str, default='bird256', choices=['imagenet', 'bird256'])
parser.add_argument('--lr', type=float, default=5e-4, help='initial learning rate')
parser.add_argument('--num_epochs', type=int, default=1000, help='number of epochs for training')
parser.add_argument('--resume_epoch', type=int, default=None, help='resume training from this epoch')
parser.add_argument('--batch_size', type=int, default=32, help='batch size for training')
parser.add_argument('--dropout_p', type=float, default=0.2)
parser.add_argument('--weight_decay', type=float, default=5e-5)
parser.add_argument('--beta1', type=float, default=0.5)
parser.add_argument('--beta2', type=float, default=0.99)
parser.add_argument('--lambda_sL1', type=float, default=100.0, help='weight for L1 loss')
parser.add_argument('--lambda_KL', type=float, default=0.5, help='weight for KL loss')
parser.add_argument('--lambda_GAN', type=float, default=0.1)
# Directories.
parser.add_argument('--text2pal_dir', type=str, default='./models/TPN')
parser.add_argument('--pal2color_dir', type=str, default='./models/PCN')
parser.add_argument('--train_sample_dir', type=str, default='./samples/train')
parser.add_argument('--test_sample_dir', type=str, default='./samples/test')
# Step size.
parser.add_argument('--log_interval', type=int, default=1,
help='how many steps to wait before logging training status')
parser.add_argument('--sample_interval', type=int, default=20,
help='how many steps to wait before saving the training output')
parser.add_argument('--save_interval', type=int, default=50,
help='how many steps to wait before saving the trained models')
args = parser.parse_args()
print(args)
main(args) | main.py | from __future__ import division
import os
import argparse
from solver import Solver
def main(args):
# Create directory if it doesn't exist.
if not os.path.exists(args.text2pal_dir):
os.makedirs(args.text2pal_dir)
if not os.path.exists(args.pal2color_dir):
os.makedirs(args.pal2color_dir)
if not os.path.exists(args.train_sample_dir):
os.makedirs(args.train_sample_dir)
if not os.path.exists(os.path.join(args.test_sample_dir, args.mode)):
os.makedirs(os.path.join(args.test_sample_dir, args.mode))
# Solver for training and testing Text2Colors.
solver = Solver(args)
# Train or test.
if args.mode == 'train_TPN':
solver.train_TPN()
elif args.mode == 'test_TPN':
solver.test_TPN()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Model configuration.
# text2pal
parser.add_argument('--hidden_size', type=int, default=150)
parser.add_argument('--n_layers', type=int, default=1)
# pal2color
parser.add_argument('--always_give_global_hint', type=int, default=1)
parser.add_argument('--add_L', type=int, default=1)
# Training and testing configuration.
parser.add_argument('--mode', type=str, default='train_TPN',
choices=['train_TPN', 'train_PCN', 'test_TPN', 'test_text2colors'])
parser.add_argument('--dataset', type=str, default='bird256', choices=['imagenet', 'bird256'])
parser.add_argument('--lr', type=float, default=5e-4, help='initial learning rate')
parser.add_argument('--num_epochs', type=int, default=1000, help='number of epochs for training')
parser.add_argument('--resume_epoch', type=int, default=None, help='resume training from this epoch')
parser.add_argument('--batch_size', type=int, default=32, help='batch size for training')
parser.add_argument('--dropout_p', type=float, default=0.2)
parser.add_argument('--weight_decay', type=float, default=5e-5)
parser.add_argument('--beta1', type=float, default=0.5)
parser.add_argument('--beta2', type=float, default=0.99)
parser.add_argument('--lambda_sL1', type=float, default=100.0, help='weight for L1 loss')
parser.add_argument('--lambda_KL', type=float, default=0.5, help='weight for KL loss')
parser.add_argument('--lambda_GAN', type=float, default=0.1)
# Directories.
parser.add_argument('--text2pal_dir', type=str, default='./models/TPN')
parser.add_argument('--pal2color_dir', type=str, default='./models/PCN')
parser.add_argument('--train_sample_dir', type=str, default='./samples/train')
parser.add_argument('--test_sample_dir', type=str, default='./samples/test')
# Step size.
parser.add_argument('--log_interval', type=int, default=1,
help='how many steps to wait before logging training status')
parser.add_argument('--sample_interval', type=int, default=20,
help='how many steps to wait before saving the training output')
parser.add_argument('--save_interval', type=int, default=50,
help='how many steps to wait before saving the trained models')
args = parser.parse_args()
print(args)
main(args) | 0.448909 | 0.075551 |
from ...Generator import PYTHON_TYPE, TAB, TAB2, TAB3, TAB4, TAB5, TAB6, TAB7
from ...Generator.read_fct import (
is_list_pyleecan_type,
is_dict_pyleecan_type,
is_list_unknow_type,
)
def generate_set_None(gen_dict, class_dict, soft_name="software"):
"""Generate the code for the _set_None method of the class
Parameters
----------
gen_dict : dict
Dict with key = class name and value = class dict (name, package, properties, methods...)
class_dict : dict
dictionary of the class to generate (keys are name, package, properties, methods...)
soft_name : str
Name of the software to generate
Returns
-------
None_str : str
String containing the code for the _set_None method of the class
"""
class_name = class_dict["name"]
None_str = "" # This string is for the generated code
# Code line to set every properties to None (except pyleecan object)
var_str = ""
for prop in class_dict["properties"]:
if (
prop["type"] in PYTHON_TYPE
or prop["type"] in ["ndarray", "{ndarray}", "[ndarray]", "function"]
or "." in prop["type"]
):
var_str += TAB2 + "self." + prop["name"] + " = None\n"
elif (
is_list_pyleecan_type(prop["type"])
or is_dict_pyleecan_type(prop["type"])
or is_list_unknow_type(prop["type"])
):
var_str += TAB2 + "self." + prop["name"] + " = None\n"
elif prop["type"] in ["", None]: # No type
var_str += TAB2 + "if hasattr(self." + prop["name"] + ", '_set_None'):\n"
var_str += TAB3 + "self." + prop["name"] + "._set_None()\n"
var_str += TAB2 + "else:\n"
var_str += TAB3 + "self." + prop["name"] + " = None\n"
else: # Pyleecan type
var_str += TAB2 + "if self." + prop["name"] + " is not None:\n"
var_str += TAB3 + "self." + prop["name"] + "._set_None()\n"
# Code generation
None_str += TAB + "def _set_None(self):\n"
None_str += (
TAB2
+ '"""Set all the properties to None (except '
+ soft_name
+ ' object)"""\n\n'
)
None_str += var_str
if class_dict["mother"] != "":
# Get the properties of the mother class (if needed)
None_str += (
TAB2
+ "# Set to None the properties inherited from "
+ class_dict["mother"]
+ "\n"
)
None_str += TAB2 + "super(" + class_name + ", self)._set_None()\n"
elif len(class_dict["properties"]) == 0:
# No mother and no proprety => Nothing to do
None_str = None_str[:-1] + TAB2 + "pass\n"
return None_str | pyleecan/Generator/ClassGenerator/set_None_method_generator.py | from ...Generator import PYTHON_TYPE, TAB, TAB2, TAB3, TAB4, TAB5, TAB6, TAB7
from ...Generator.read_fct import (
is_list_pyleecan_type,
is_dict_pyleecan_type,
is_list_unknow_type,
)
def generate_set_None(gen_dict, class_dict, soft_name="software"):
"""Generate the code for the _set_None method of the class
Parameters
----------
gen_dict : dict
Dict with key = class name and value = class dict (name, package, properties, methods...)
class_dict : dict
dictionary of the class to generate (keys are name, package, properties, methods...)
soft_name : str
Name of the software to generate
Returns
-------
None_str : str
String containing the code for the _set_None method of the class
"""
class_name = class_dict["name"]
None_str = "" # This string is for the generated code
# Code line to set every properties to None (except pyleecan object)
var_str = ""
for prop in class_dict["properties"]:
if (
prop["type"] in PYTHON_TYPE
or prop["type"] in ["ndarray", "{ndarray}", "[ndarray]", "function"]
or "." in prop["type"]
):
var_str += TAB2 + "self." + prop["name"] + " = None\n"
elif (
is_list_pyleecan_type(prop["type"])
or is_dict_pyleecan_type(prop["type"])
or is_list_unknow_type(prop["type"])
):
var_str += TAB2 + "self." + prop["name"] + " = None\n"
elif prop["type"] in ["", None]: # No type
var_str += TAB2 + "if hasattr(self." + prop["name"] + ", '_set_None'):\n"
var_str += TAB3 + "self." + prop["name"] + "._set_None()\n"
var_str += TAB2 + "else:\n"
var_str += TAB3 + "self." + prop["name"] + " = None\n"
else: # Pyleecan type
var_str += TAB2 + "if self." + prop["name"] + " is not None:\n"
var_str += TAB3 + "self." + prop["name"] + "._set_None()\n"
# Code generation
None_str += TAB + "def _set_None(self):\n"
None_str += (
TAB2
+ '"""Set all the properties to None (except '
+ soft_name
+ ' object)"""\n\n'
)
None_str += var_str
if class_dict["mother"] != "":
# Get the properties of the mother class (if needed)
None_str += (
TAB2
+ "# Set to None the properties inherited from "
+ class_dict["mother"]
+ "\n"
)
None_str += TAB2 + "super(" + class_name + ", self)._set_None()\n"
elif len(class_dict["properties"]) == 0:
# No mother and no proprety => Nothing to do
None_str = None_str[:-1] + TAB2 + "pass\n"
return None_str | 0.48121 | 0.14774 |
import json
import tempfile
from werkzeug.wsgi import wrap_file
from werkzeug.wrappers import Request, Response
from executor import execute
@Request.application
def application(request):
"""
To use this application, the user must send a POST request with
base64 or form encoded encoded HTML content and the wkhtmltopdf Options in
request data, with keys 'base64_html' and 'options'.
The application will return a response with the PDF file.
"""
if request.method != 'POST':
return
request_is_json = request.content_type.endswith('json')
with tempfile.NamedTemporaryFile(suffix='.html') as source_file:
if request_is_json:
# If a JSON payload is there, all data is in the payload
payload = json.loads(request.data)
source_file.write(payload['contents'].decode('base64'))
options = payload.get('options', {})
elif request.files:
# First check if any files were uploaded
source_file.write(request.files['file'].read())
# Load any options that may have been provided in options
options = json.loads(request.form.get('options', '{}'))
source_file.flush()
# Evaluate argument to run with subprocess
args = ['wkhtmltoimage']
# Add Global Options
if options:
for option, value in options.items():
args.append('--%s' % option)
if value:
args.append('"%s"' % value)
# Add source file name and output file name
file_name = source_file.name
args += [file_name, file_name + ".jpeg"]
# Execute the command using executor
execute(' '.join(args))
return Response(
wrap_file(request.environ, open(file_name + '.jpeg')),
mimetype='image/jpeg',
)
if __name__ == '__main__':
from werkzeug.serving import run_simple
run_simple(
'127.0.0.1', 5000, application, use_debugger=True, use_reloader=True
) | app.py | import json
import tempfile
from werkzeug.wsgi import wrap_file
from werkzeug.wrappers import Request, Response
from executor import execute
@Request.application
def application(request):
"""
To use this application, the user must send a POST request with
base64 or form encoded encoded HTML content and the wkhtmltopdf Options in
request data, with keys 'base64_html' and 'options'.
The application will return a response with the PDF file.
"""
if request.method != 'POST':
return
request_is_json = request.content_type.endswith('json')
with tempfile.NamedTemporaryFile(suffix='.html') as source_file:
if request_is_json:
# If a JSON payload is there, all data is in the payload
payload = json.loads(request.data)
source_file.write(payload['contents'].decode('base64'))
options = payload.get('options', {})
elif request.files:
# First check if any files were uploaded
source_file.write(request.files['file'].read())
# Load any options that may have been provided in options
options = json.loads(request.form.get('options', '{}'))
source_file.flush()
# Evaluate argument to run with subprocess
args = ['wkhtmltoimage']
# Add Global Options
if options:
for option, value in options.items():
args.append('--%s' % option)
if value:
args.append('"%s"' % value)
# Add source file name and output file name
file_name = source_file.name
args += [file_name, file_name + ".jpeg"]
# Execute the command using executor
execute(' '.join(args))
return Response(
wrap_file(request.environ, open(file_name + '.jpeg')),
mimetype='image/jpeg',
)
if __name__ == '__main__':
from werkzeug.serving import run_simple
run_simple(
'127.0.0.1', 5000, application, use_debugger=True, use_reloader=True
) | 0.449151 | 0.110088 |
import gensim
import sys,os
ROOT_PATH = '/'.join(os.path.abspath(__file__).split('/')[:-2])
sys.path.append(ROOT_PATH)
import numpy as np
from itertools import chain
import tensorflow as tf
from utils.preprocess import *
from embedding.embedding_base import Base
from common.layers import get_initializer
import collections
import pickle
import pandas as pd
import pdb
class WordEmbedding(Base):
def __init__(self, text_list, dict_path, vocab_dict, random = False,\
maxlen = 20, embedding_size = 128, **kwargs):
super(WordEmbedding, self).__init__(**kwargs)
self.embedding_path = kwargs['conf']['word_embedding_path']
self.vocab_dict = vocab_dict
self.maxlen= maxlen
self.dict_path = dict_path
self.size = embedding_size
self.trainable = kwargs['conf'].get('embedding_trainable', True)
if random:
self.embedding = tf.get_variable("embeddings",
shape = [len(self.vocab_dict), self.size],
initializer=get_initializer('xavier'),
trainable = self.trainable)
else:
loaded_embedding = self._get_embedding(self.vocab_dict)
self.embedding = tf.get_variable("embeddings",
shape = [len(self.vocab_dict),self.size],
initializer=get_initializer('xavier'),
trainable = self.trainable)
tf.assign(self.embedding, loaded_embedding)
self.input_ids = {}
def __call__(self, features = None, name = "word_embedding"):
"""define placeholder"""
if features == None:
self.input_ids[name] = tf.placeholder(dtype=tf.int32, shape=[None,
self.maxlen], name = name)
else:
self.input_ids[name] = features[name]
return tf.nn.embedding_lookup(self.embedding, self.input_ids[name])
def feed_dict(self, input_x, name = 'word_embedding'):
feed_dict = {}
feed_dict[self.input_ids[name]] = input_x
return feed_dict
def pb_feed_dict(self, graph, input_x, name = 'word_embedding'):
feed_dict = {}
input_x_node = graph.get_operation_by_name(name).outputs[0]
feed_dict[input_x_node] = input_x
return feed_dict
@staticmethod
def build_dict(dict_path, text_list = None, mode = "train"):
if not os.path.exists(dict_path) or mode == "train":
assert text_list != None, "text_list can't be None in train mode"
words = list()
for content in text_list:
for word in word_tokenize(clean_str(content)):
words.append(word)
word_counter = collections.Counter(words).most_common()
vocab_dict = dict()
vocab_dict["<pad>"] = 0
vocab_dict["<unk>"] = 1
for word, _ in word_counter:
vocab_dict[word] = len(vocab_dict)
with open(dict_path, "wb") as f:
pickle.dump(vocab_dict, f)
else:
with open(dict_path, "rb") as f:
vocab_dict = pickle.load(f)
return vocab_dict
@staticmethod
def text2id(text_list, vocab_dict, maxlen, need_preprocess = True):
"""
文本id化
"""
if need_preprocess:
pre = Preprocess()
text_list = [pre.get_dl_input_by_text(text) for text in text_list]
x = list(map(lambda d: word_tokenize(clean_str(d)), text_list))
x_len = [min(len(text), maxlen) for text in x]
x = list(map(lambda d: list(map(lambda w: vocab_dict.get(w, vocab_dict["<unk>"]), d)), x))
x = list(map(lambda d: d[:maxlen], x))
x = list(map(lambda d: d + (maxlen - len(d)) * [vocab_dict["<pad>"]], x))
return text_list, x, x_len
def _get_embedding(self, vocab_dict, add_embedding_word = True):
"""get embedding vector by dict and embedding_file"""
model = self._load_embedding_file(self.embedding_path)
embedding = []
dict_rev = {vocab_dict[word]:word for word in vocab_dict}
for idx in range(len(vocab_dict)):
word = dict_rev[idx]
if word in model:
embedding.append(model[word])
else:
embedding.append(self._get_rand_embedding())
if add_embedding_word:
for key in model.vocab.keys():
if key not in vocab_dict:
vocab_dict[key] = len(vocab_dict)
embedding.append(model[key])
with open(self.dict_path, "wb") as f:
pickle.dump(vocab_dict, f)
return tf.convert_to_tensor(np.array(embedding), tf.float32)
def _get_rand_embedding(self):
"""random embedding"""
return np.random.randn(self.size)
def _load_embedding_file(self, path):
"""
模型格式有两种bin和model,使用方式:
a. bin模式:model = gensim.models.KeyedVectors.load_word2vec_format(model_path, binary=True)
b. model模式:model = gensim.models.Word2Vec.load(model_path)
model from https://www.jianshu.com/p/ae5b45e96dbf
"""
model = gensim.models.KeyedVectors.load_word2vec_format(path,
binary=False)
assert model.vector_size == self.size, "the size of vector\
from embedding file {} != defined embedding_size {}".format(
model.vector_size, self.size)
return model
if __name__ == '__main__':
embedding = WordEmbedding() | embedding/word_embedding.py | import gensim
import sys,os
ROOT_PATH = '/'.join(os.path.abspath(__file__).split('/')[:-2])
sys.path.append(ROOT_PATH)
import numpy as np
from itertools import chain
import tensorflow as tf
from utils.preprocess import *
from embedding.embedding_base import Base
from common.layers import get_initializer
import collections
import pickle
import pandas as pd
import pdb
class WordEmbedding(Base):
def __init__(self, text_list, dict_path, vocab_dict, random = False,\
maxlen = 20, embedding_size = 128, **kwargs):
super(WordEmbedding, self).__init__(**kwargs)
self.embedding_path = kwargs['conf']['word_embedding_path']
self.vocab_dict = vocab_dict
self.maxlen= maxlen
self.dict_path = dict_path
self.size = embedding_size
self.trainable = kwargs['conf'].get('embedding_trainable', True)
if random:
self.embedding = tf.get_variable("embeddings",
shape = [len(self.vocab_dict), self.size],
initializer=get_initializer('xavier'),
trainable = self.trainable)
else:
loaded_embedding = self._get_embedding(self.vocab_dict)
self.embedding = tf.get_variable("embeddings",
shape = [len(self.vocab_dict),self.size],
initializer=get_initializer('xavier'),
trainable = self.trainable)
tf.assign(self.embedding, loaded_embedding)
self.input_ids = {}
def __call__(self, features = None, name = "word_embedding"):
"""define placeholder"""
if features == None:
self.input_ids[name] = tf.placeholder(dtype=tf.int32, shape=[None,
self.maxlen], name = name)
else:
self.input_ids[name] = features[name]
return tf.nn.embedding_lookup(self.embedding, self.input_ids[name])
def feed_dict(self, input_x, name = 'word_embedding'):
feed_dict = {}
feed_dict[self.input_ids[name]] = input_x
return feed_dict
def pb_feed_dict(self, graph, input_x, name = 'word_embedding'):
feed_dict = {}
input_x_node = graph.get_operation_by_name(name).outputs[0]
feed_dict[input_x_node] = input_x
return feed_dict
@staticmethod
def build_dict(dict_path, text_list = None, mode = "train"):
if not os.path.exists(dict_path) or mode == "train":
assert text_list != None, "text_list can't be None in train mode"
words = list()
for content in text_list:
for word in word_tokenize(clean_str(content)):
words.append(word)
word_counter = collections.Counter(words).most_common()
vocab_dict = dict()
vocab_dict["<pad>"] = 0
vocab_dict["<unk>"] = 1
for word, _ in word_counter:
vocab_dict[word] = len(vocab_dict)
with open(dict_path, "wb") as f:
pickle.dump(vocab_dict, f)
else:
with open(dict_path, "rb") as f:
vocab_dict = pickle.load(f)
return vocab_dict
@staticmethod
def text2id(text_list, vocab_dict, maxlen, need_preprocess = True):
"""
文本id化
"""
if need_preprocess:
pre = Preprocess()
text_list = [pre.get_dl_input_by_text(text) for text in text_list]
x = list(map(lambda d: word_tokenize(clean_str(d)), text_list))
x_len = [min(len(text), maxlen) for text in x]
x = list(map(lambda d: list(map(lambda w: vocab_dict.get(w, vocab_dict["<unk>"]), d)), x))
x = list(map(lambda d: d[:maxlen], x))
x = list(map(lambda d: d + (maxlen - len(d)) * [vocab_dict["<pad>"]], x))
return text_list, x, x_len
def _get_embedding(self, vocab_dict, add_embedding_word = True):
"""get embedding vector by dict and embedding_file"""
model = self._load_embedding_file(self.embedding_path)
embedding = []
dict_rev = {vocab_dict[word]:word for word in vocab_dict}
for idx in range(len(vocab_dict)):
word = dict_rev[idx]
if word in model:
embedding.append(model[word])
else:
embedding.append(self._get_rand_embedding())
if add_embedding_word:
for key in model.vocab.keys():
if key not in vocab_dict:
vocab_dict[key] = len(vocab_dict)
embedding.append(model[key])
with open(self.dict_path, "wb") as f:
pickle.dump(vocab_dict, f)
return tf.convert_to_tensor(np.array(embedding), tf.float32)
def _get_rand_embedding(self):
"""random embedding"""
return np.random.randn(self.size)
def _load_embedding_file(self, path):
"""
模型格式有两种bin和model,使用方式:
a. bin模式:model = gensim.models.KeyedVectors.load_word2vec_format(model_path, binary=True)
b. model模式:model = gensim.models.Word2Vec.load(model_path)
model from https://www.jianshu.com/p/ae5b45e96dbf
"""
model = gensim.models.KeyedVectors.load_word2vec_format(path,
binary=False)
assert model.vector_size == self.size, "the size of vector\
from embedding file {} != defined embedding_size {}".format(
model.vector_size, self.size)
return model
if __name__ == '__main__':
embedding = WordEmbedding() | 0.371137 | 0.151153 |
import re
from collections import defaultdict, deque
import regex
from telethon import events
from telethon.tl import functions, types
from audynesia import udy
from ..Config import Config
plugin_category = "tools"
HEADER = "「sed」\n"
KNOWN_RE_BOTS = re.compile(Config.GROUP_REG_SED_EX_BOT_S, flags=re.IGNORECASE)
# Heavily based on
# https://github.com/SijmenSchoon/regexbot/blob/master/regexbot.py
last_msgs = defaultdict(lambda: deque(maxlen=10))
def doit(chat_id, match, original):
fr = match.group(1)
to = match.group(2)
to = to.replace("\\/", "/")
try:
fl = match.group(3)
if fl is None:
fl = ""
fl = fl[1:]
except IndexError:
fl = ""
# Build Python regex flags
count = 1
flags = 0
for f in fl:
if f == "i":
flags |= regex.IGNORECASE
elif f == "g":
count = 0
else:
return None, f"Unknown flag: {f}"
def actually_doit(original):
try:
s = original.message
if s.startswith(HEADER):
s = s[len(HEADER) :]
s, i = regex.subn(fr, to, s, count=count, flags=flags)
if i > 0:
return original, s
except Exception as e:
return None, f"u dun goofed m8: {str(e)}"
return None, None
if original is not None:
return actually_doit(original)
# Try matching the last few messages
for org in last_msgs[chat_id]:
m, s = actually_doit(org)
if s is not None:
return m, s
return None, None
async def group_has_sedbot(group):
if isinstance(group, types.InputPeerChannel):
full = await udy(functions.channels.GetFullChannelRequest(group))
elif isinstance(group, types.InputPeerChat):
full = await udy(functions.messages.GetFullChatRequest(group.chat_id))
else:
return False
return any(KNOWN_RE_BOTS.match(x.username or "") for x in full.users)
@udy.on(events.NewMessage)
async def on_message(event):
last_msgs[event.chat_id].appendleft(event.message)
@udy.on(events.MessageEdited)
async def on_edit(event):
for m in last_msgs[event.chat_id]:
if m.id == event.id:
m.raw_text = event.raw_text
break
@udy.cod_cmd(
pattern="^s/((?:\\/|[^/])+)/((?:\\/|[^/])*)(/.*)?",
command=("sed", plugin_category),
info={
"header": "Replaces a word or words with other words.",
"description": "Tag any sentence and type s/a/b. where is required word to replace and b is correct word.",
"usage": "s<delimiter><old word(s)><delimiter><new word(s)>",
"delimiters": ["/", ":", "|", "_"],
"examples": "s/delimiter/symbol - replace this command to this message",
},
)
async def on_regex(event):
"To replace words in sentences"
if not event.is_private and await group_has_sedbot(await event.get_input_chat()):
await edit_delete(event, "This group has a sed bot. Ignoring this message!")
return
m, s = doit(event.chat_id, event.pattern_match, await event.get_reply_message())
if m is not None:
s = f"{HEADER}{s}"
out = await event.client.send_message(
await event.get_input_chat(), s, reply_to=m.id
)
last_msgs[event.chat_id].appendleft(out)
elif s is not None:
await edit_or_reply(event, s)
raise events.StopPropagation | audynesia/plugins/sed.py | import re
from collections import defaultdict, deque
import regex
from telethon import events
from telethon.tl import functions, types
from audynesia import udy
from ..Config import Config
plugin_category = "tools"
HEADER = "「sed」\n"
KNOWN_RE_BOTS = re.compile(Config.GROUP_REG_SED_EX_BOT_S, flags=re.IGNORECASE)
# Heavily based on
# https://github.com/SijmenSchoon/regexbot/blob/master/regexbot.py
last_msgs = defaultdict(lambda: deque(maxlen=10))
def doit(chat_id, match, original):
fr = match.group(1)
to = match.group(2)
to = to.replace("\\/", "/")
try:
fl = match.group(3)
if fl is None:
fl = ""
fl = fl[1:]
except IndexError:
fl = ""
# Build Python regex flags
count = 1
flags = 0
for f in fl:
if f == "i":
flags |= regex.IGNORECASE
elif f == "g":
count = 0
else:
return None, f"Unknown flag: {f}"
def actually_doit(original):
try:
s = original.message
if s.startswith(HEADER):
s = s[len(HEADER) :]
s, i = regex.subn(fr, to, s, count=count, flags=flags)
if i > 0:
return original, s
except Exception as e:
return None, f"u dun goofed m8: {str(e)}"
return None, None
if original is not None:
return actually_doit(original)
# Try matching the last few messages
for org in last_msgs[chat_id]:
m, s = actually_doit(org)
if s is not None:
return m, s
return None, None
async def group_has_sedbot(group):
if isinstance(group, types.InputPeerChannel):
full = await udy(functions.channels.GetFullChannelRequest(group))
elif isinstance(group, types.InputPeerChat):
full = await udy(functions.messages.GetFullChatRequest(group.chat_id))
else:
return False
return any(KNOWN_RE_BOTS.match(x.username or "") for x in full.users)
@udy.on(events.NewMessage)
async def on_message(event):
last_msgs[event.chat_id].appendleft(event.message)
@udy.on(events.MessageEdited)
async def on_edit(event):
for m in last_msgs[event.chat_id]:
if m.id == event.id:
m.raw_text = event.raw_text
break
@udy.cod_cmd(
pattern="^s/((?:\\/|[^/])+)/((?:\\/|[^/])*)(/.*)?",
command=("sed", plugin_category),
info={
"header": "Replaces a word or words with other words.",
"description": "Tag any sentence and type s/a/b. where is required word to replace and b is correct word.",
"usage": "s<delimiter><old word(s)><delimiter><new word(s)>",
"delimiters": ["/", ":", "|", "_"],
"examples": "s/delimiter/symbol - replace this command to this message",
},
)
async def on_regex(event):
"To replace words in sentences"
if not event.is_private and await group_has_sedbot(await event.get_input_chat()):
await edit_delete(event, "This group has a sed bot. Ignoring this message!")
return
m, s = doit(event.chat_id, event.pattern_match, await event.get_reply_message())
if m is not None:
s = f"{HEADER}{s}"
out = await event.client.send_message(
await event.get_input_chat(), s, reply_to=m.id
)
last_msgs[event.chat_id].appendleft(out)
elif s is not None:
await edit_or_reply(event, s)
raise events.StopPropagation | 0.421552 | 0.153962 |
import os
from optparse import make_option
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from hvz.main import models
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--auto',
help="Disable interactive terminal input",
action="store_true",
default=False,
)
def handle(self, *args, **options):
auto = options['auto']
self.stderr.write("Synchronizing the database...")
self.delimit()
call_command('migrate', interactive = not auto)
self.stderr.write("Synchronized!")
self.delimit()
try:
self.stderr.write("Loading colleges data...")
self.delimit()
call_command(
'loaddata',
os.path.join(
settings.BASE_DIR,
'hvz', 'main', 'fixtures', 'campus.json'
)
)
self.stderr.write("Data loaded!")
self.delimit()
except CommandError:
self.stderr.write("Data loading failed!")
self.stderr.write("Compiling style files...")
self.delimit()
try:
call_command('compilecss')
except CommandError:
self.stderr.write("Style compilation failed!")
self.stderr.write("Collecting static files...")
self.delimit()
try:
call_command('collectstatic', interactive=False)
except CommandError:
self.delimit()
self.stderr.write("Static file collection failed!")
if models.Game.objects.count() == 0:
self.delimit()
self.stderr.write("Simulating a game...")
call_command('newgame', stdout=self.stdout, stderr=self.stderr)
call_command('randomplayers', stdout=self.stdout, stderr=self.stderr)
call_command('randomhistory', stdout=self.stdout, stderr=self.stderr)
self.stderr.write("Game created!")
if models.Game.objects.count() == 0:
self.delimit()
self.stderr.write("Simulating a game...")
call_command('newgame', stdout=self.stdout, stderr=self.stderr)
self.stderr.write("Game created!")
if models.Player.current_players().count() == 0:
self.delimit()
self.stderr.write("Generating a history...")
self.delimit()
call_command('randomplayers', stdout=self.stdout, stderr=self.stderr)
self.stderr.write("Players created!")
call_command('randomhistory', stdout=self.stdout, stderr=self.stderr)
self.stderr.write("Brains eaten!")
# Unit tests must come last, as running them puts us under a test db
self.delimit()
self.stderr.write("Running unit tests...")
self.delimit()
call_command('test', 'hvz', stdout=self.stdout, stderr=self.stderr)
self.delimit()
self.stderr.write("Build Complete!")
self.delimit()
self.stderr.write(
"Now run `python manage.py runserver` and go to localhost:8000 in your browser."
)
self.delimit()
def delimit(self):
self.stderr.write('-' * 79) | hvz/main/management/commands/setup-dev.py | import os
from optparse import make_option
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from hvz.main import models
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--auto',
help="Disable interactive terminal input",
action="store_true",
default=False,
)
def handle(self, *args, **options):
auto = options['auto']
self.stderr.write("Synchronizing the database...")
self.delimit()
call_command('migrate', interactive = not auto)
self.stderr.write("Synchronized!")
self.delimit()
try:
self.stderr.write("Loading colleges data...")
self.delimit()
call_command(
'loaddata',
os.path.join(
settings.BASE_DIR,
'hvz', 'main', 'fixtures', 'campus.json'
)
)
self.stderr.write("Data loaded!")
self.delimit()
except CommandError:
self.stderr.write("Data loading failed!")
self.stderr.write("Compiling style files...")
self.delimit()
try:
call_command('compilecss')
except CommandError:
self.stderr.write("Style compilation failed!")
self.stderr.write("Collecting static files...")
self.delimit()
try:
call_command('collectstatic', interactive=False)
except CommandError:
self.delimit()
self.stderr.write("Static file collection failed!")
if models.Game.objects.count() == 0:
self.delimit()
self.stderr.write("Simulating a game...")
call_command('newgame', stdout=self.stdout, stderr=self.stderr)
call_command('randomplayers', stdout=self.stdout, stderr=self.stderr)
call_command('randomhistory', stdout=self.stdout, stderr=self.stderr)
self.stderr.write("Game created!")
if models.Game.objects.count() == 0:
self.delimit()
self.stderr.write("Simulating a game...")
call_command('newgame', stdout=self.stdout, stderr=self.stderr)
self.stderr.write("Game created!")
if models.Player.current_players().count() == 0:
self.delimit()
self.stderr.write("Generating a history...")
self.delimit()
call_command('randomplayers', stdout=self.stdout, stderr=self.stderr)
self.stderr.write("Players created!")
call_command('randomhistory', stdout=self.stdout, stderr=self.stderr)
self.stderr.write("Brains eaten!")
# Unit tests must come last, as running them puts us under a test db
self.delimit()
self.stderr.write("Running unit tests...")
self.delimit()
call_command('test', 'hvz', stdout=self.stdout, stderr=self.stderr)
self.delimit()
self.stderr.write("Build Complete!")
self.delimit()
self.stderr.write(
"Now run `python manage.py runserver` and go to localhost:8000 in your browser."
)
self.delimit()
def delimit(self):
self.stderr.write('-' * 79) | 0.306112 | 0.06134 |
import unittest
import sys, os
sys.path.append('../../')
from etk.core import Core
import json
import codecs
class TestExtractionsInputPaths(unittest.TestCase):
def setUp(self):
file_path = os.path.join(os.path.dirname(__file__), "ground_truth/1_content_extracted.jl")
self.doc = json.load(codecs.open(file_path))
def test_extraction_input_path(self):
women_name_file_path = os.path.join(os.path.dirname(__file__), "resources/female-names.json.gz")
e_config = {"document_id": "doc_id",
"resources": {
"dictionaries": {
"women_name": women_name_file_path
}
},
"data_extraction": [
{
"input_path": "*.*.text.`parent`"
,
"fields": {
"name": {
"extractors": {
"extract_using_dictionary": {
"config": {
"dictionary": "women_name",
"ngrams": 1,
"joiner": " ",
"pre_process": [
"x.lower()"
],
"pre_filter": [
"x"
],
"post_filter": [
"isinstance(x, basestring)"
]
},
"extraction_policy": "keep_existing"
},
"extract_using_regex": {
"config": {
"include_context": "true",
"regex": "(?:my[\\s]+name[\\s]+is[\\s]+([-a-z0-9@$!]+))",
"regex_options": [
"IGNORECASE"
],
"pre_filter": [
"x.replace('\\n', '')",
"x.replace('\\r', '')"
]
},
"extraction_policy": "replace"
}
}
}
}
}
]
}
c = Core(extraction_config=e_config)
r = c.process(self.doc, create_knowledge_graph=True)
self.assertTrue('knowledge_graph' in r)
kg = r['knowledge_graph']
expected_kg = {
"title": [
{
"confidence": 1,
"provenance": [
{
"source": {
"segment": "html",
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"method": "rearrange_title"
}
],
"key": "title",
"value": "323-452-2013 ESCORT ALERT! - Luna The Hot Playmate (323) 452-2013 - 23"
}
],
"name": [
{
"confidence": 1,
"provenance": [
{
"source": {
"segment": "content_relaxed",
"context": {
"start": 10,
"end": 11,
"input": "tokens",
"text": "27 \n my name is <etk 'attribute' = 'name'>helena</etk> height 160cms weight 55 kilos "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "helena"
},
{
"source": {
"segment": "content_relaxed",
"context": {
"start": 41,
"end": 58,
"input": "text",
"text": "91 27 \n <etk 'attribute' = 'name'>My name is Helena</etk> height 16"
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_regex",
"extracted_value": "Helena"
},
{
"source": {
"segment": "content_strict",
"context": {
"start": 10,
"end": 11,
"input": "tokens",
"text": "27 \n my name is <etk 'attribute' = 'name'>helena</etk> height 160cms weight 55 kilos "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "helena"
},
{
"source": {
"segment": "content_strict",
"context": {
"start": 41,
"end": 58,
"input": "text",
"text": "91 27 \n <etk 'attribute' = 'name'>My name is Helena</etk> height 16"
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_regex",
"extracted_value": "Helena"
}
],
"key": "helena",
"value": "helena"
},
{
"confidence": 1,
"provenance": [
{
"source": {
"segment": "content_relaxed",
"context": {
"start": 136,
"end": 137,
"input": "tokens",
"text": "\n hey i ' m <etk 'attribute' = 'name'>luna</etk> 3234522013 let ' s explore "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "luna"
},
{
"source": {
"segment": "content_strict",
"context": {
"start": 136,
"end": 137,
"input": "tokens",
"text": "\n hey i ' m <etk 'attribute' = 'name'>luna</etk> 3234522013 let ' s explore "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "luna"
},
{
"source": {
"segment": "title",
"context": {
"start": 9,
"end": 10,
"input": "tokens",
"text": "2013 escort alert ! - <etk 'attribute' = 'name'>luna</etk> the hot playmate ( 323 "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "luna"
}
],
"key": "luna",
"value": "luna"
}
],
"description": [
{
"confidence": 1,
"provenance": [
{
"source": {
"segment": "inferlink",
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"method": "rearrange_description"
}
],
"key": "description",
"value": "Hey I'm luna 3234522013 Let's explore , embrace and indulge in your favorite fantasy % independent. discreet no drama Firm Thighs and Sexy. My Soft skin & Tight Grip is exactly what you deserve Call or text Fetish friendly Fantasy friendly Party friendly 140 Hr SPECIALS 3234522013"
}
]
}
for key in kg.keys():
self.assertTrue(key in expected_kg)
if key != 'title' and key != 'description':
self.assertEqual(kg[key], expected_kg[key])
if __name__ == '__main__':
unittest.main() | etk/unit_tests/test_knowledge_graph.py | import unittest
import sys, os
sys.path.append('../../')
from etk.core import Core
import json
import codecs
class TestExtractionsInputPaths(unittest.TestCase):
def setUp(self):
file_path = os.path.join(os.path.dirname(__file__), "ground_truth/1_content_extracted.jl")
self.doc = json.load(codecs.open(file_path))
def test_extraction_input_path(self):
women_name_file_path = os.path.join(os.path.dirname(__file__), "resources/female-names.json.gz")
e_config = {"document_id": "doc_id",
"resources": {
"dictionaries": {
"women_name": women_name_file_path
}
},
"data_extraction": [
{
"input_path": "*.*.text.`parent`"
,
"fields": {
"name": {
"extractors": {
"extract_using_dictionary": {
"config": {
"dictionary": "women_name",
"ngrams": 1,
"joiner": " ",
"pre_process": [
"x.lower()"
],
"pre_filter": [
"x"
],
"post_filter": [
"isinstance(x, basestring)"
]
},
"extraction_policy": "keep_existing"
},
"extract_using_regex": {
"config": {
"include_context": "true",
"regex": "(?:my[\\s]+name[\\s]+is[\\s]+([-a-z0-9@$!]+))",
"regex_options": [
"IGNORECASE"
],
"pre_filter": [
"x.replace('\\n', '')",
"x.replace('\\r', '')"
]
},
"extraction_policy": "replace"
}
}
}
}
}
]
}
c = Core(extraction_config=e_config)
r = c.process(self.doc, create_knowledge_graph=True)
self.assertTrue('knowledge_graph' in r)
kg = r['knowledge_graph']
expected_kg = {
"title": [
{
"confidence": 1,
"provenance": [
{
"source": {
"segment": "html",
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"method": "rearrange_title"
}
],
"key": "title",
"value": "323-452-2013 ESCORT ALERT! - Luna The Hot Playmate (323) 452-2013 - 23"
}
],
"name": [
{
"confidence": 1,
"provenance": [
{
"source": {
"segment": "content_relaxed",
"context": {
"start": 10,
"end": 11,
"input": "tokens",
"text": "27 \n my name is <etk 'attribute' = 'name'>helena</etk> height 160cms weight 55 kilos "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "helena"
},
{
"source": {
"segment": "content_relaxed",
"context": {
"start": 41,
"end": 58,
"input": "text",
"text": "91 27 \n <etk 'attribute' = 'name'>My name is Helena</etk> height 16"
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_regex",
"extracted_value": "Helena"
},
{
"source": {
"segment": "content_strict",
"context": {
"start": 10,
"end": 11,
"input": "tokens",
"text": "27 \n my name is <etk 'attribute' = 'name'>helena</etk> height 160cms weight 55 kilos "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "helena"
},
{
"source": {
"segment": "content_strict",
"context": {
"start": 41,
"end": 58,
"input": "text",
"text": "91 27 \n <etk 'attribute' = 'name'>My name is Helena</etk> height 16"
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_regex",
"extracted_value": "Helena"
}
],
"key": "helena",
"value": "helena"
},
{
"confidence": 1,
"provenance": [
{
"source": {
"segment": "content_relaxed",
"context": {
"start": 136,
"end": 137,
"input": "tokens",
"text": "\n hey i ' m <etk 'attribute' = 'name'>luna</etk> 3234522013 let ' s explore "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "luna"
},
{
"source": {
"segment": "content_strict",
"context": {
"start": 136,
"end": 137,
"input": "tokens",
"text": "\n hey i ' m <etk 'attribute' = 'name'>luna</etk> 3234522013 let ' s explore "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "luna"
},
{
"source": {
"segment": "title",
"context": {
"start": 9,
"end": 10,
"input": "tokens",
"text": "2013 escort alert ! - <etk 'attribute' = 'name'>luna</etk> the hot playmate ( 323 "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "luna"
}
],
"key": "luna",
"value": "luna"
}
],
"description": [
{
"confidence": 1,
"provenance": [
{
"source": {
"segment": "inferlink",
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"method": "rearrange_description"
}
],
"key": "description",
"value": "Hey I'm luna 3234522013 Let's explore , embrace and indulge in your favorite fantasy % independent. discreet no drama Firm Thighs and Sexy. My Soft skin & Tight Grip is exactly what you deserve Call or text Fetish friendly Fantasy friendly Party friendly 140 Hr SPECIALS 3234522013"
}
]
}
for key in kg.keys():
self.assertTrue(key in expected_kg)
if key != 'title' and key != 'description':
self.assertEqual(kg[key], expected_kg[key])
if __name__ == '__main__':
unittest.main() | 0.270962 | 0.279472 |
from math import ceil
from pyrogram.types import InlineKeyboardButton
from wbb import MOD_LOAD, MOD_NOLOAD
class EqInlineKeyboardButton(InlineKeyboardButton):
def __eq__(self, other):
return self.text == other.text
def __lt__(self, other):
return self.text < other.text
def __gt__(self, other):
return self.text > other.text
def paginate_modules(page_n, module_dict, prefix, chat=None):
if not chat:
modules = sorted(
[
EqInlineKeyboardButton(
x.__MODULE__,
callback_data="{}_module({})".format(
prefix, x.__MODULE__.lower()
),
)
for x in module_dict.values()
]
)
else:
modules = sorted(
[
EqInlineKeyboardButton(
x.__MODULE__,
callback_data="{}_module({},{})".format(
prefix, chat, x.__MODULE__.lower()
),
)
for x in module_dict.values()
]
)
pairs = list(zip(modules[::3], modules[1::3], modules[2::3]))
i = 0
for m in pairs:
for _ in m:
i += 1
if len(modules) - i == 1:
pairs.append((modules[-1],))
elif len(modules) - i == 2:
pairs.append(
(
modules[-2],
modules[-1],
)
)
COLUMN_SIZE = 4
max_num_pages = ceil(len(pairs) / COLUMN_SIZE)
modulo_page = page_n % max_num_pages
# can only have a certain amount of buttons side by side
if len(pairs) > COLUMN_SIZE:
pairs = pairs[
modulo_page
* COLUMN_SIZE : COLUMN_SIZE
* (modulo_page + 1)
] + [
(
EqInlineKeyboardButton(
"❮",
callback_data="{}_prev({})".format(
prefix, modulo_page
),
),
EqInlineKeyboardButton(
"Back",
callback_data="{}_home({})".format(
prefix, modulo_page
),
),
EqInlineKeyboardButton(
"❯",
callback_data="{}_next({})".format(
prefix, modulo_page
),
),
)
]
return pairs
def is_module_loaded(name):
return (
not MOD_LOAD or name in MOD_LOAD
) and name not in MOD_NOLOAD | wbb/utils/misc.py | from math import ceil
from pyrogram.types import InlineKeyboardButton
from wbb import MOD_LOAD, MOD_NOLOAD
class EqInlineKeyboardButton(InlineKeyboardButton):
def __eq__(self, other):
return self.text == other.text
def __lt__(self, other):
return self.text < other.text
def __gt__(self, other):
return self.text > other.text
def paginate_modules(page_n, module_dict, prefix, chat=None):
if not chat:
modules = sorted(
[
EqInlineKeyboardButton(
x.__MODULE__,
callback_data="{}_module({})".format(
prefix, x.__MODULE__.lower()
),
)
for x in module_dict.values()
]
)
else:
modules = sorted(
[
EqInlineKeyboardButton(
x.__MODULE__,
callback_data="{}_module({},{})".format(
prefix, chat, x.__MODULE__.lower()
),
)
for x in module_dict.values()
]
)
pairs = list(zip(modules[::3], modules[1::3], modules[2::3]))
i = 0
for m in pairs:
for _ in m:
i += 1
if len(modules) - i == 1:
pairs.append((modules[-1],))
elif len(modules) - i == 2:
pairs.append(
(
modules[-2],
modules[-1],
)
)
COLUMN_SIZE = 4
max_num_pages = ceil(len(pairs) / COLUMN_SIZE)
modulo_page = page_n % max_num_pages
# can only have a certain amount of buttons side by side
if len(pairs) > COLUMN_SIZE:
pairs = pairs[
modulo_page
* COLUMN_SIZE : COLUMN_SIZE
* (modulo_page + 1)
] + [
(
EqInlineKeyboardButton(
"❮",
callback_data="{}_prev({})".format(
prefix, modulo_page
),
),
EqInlineKeyboardButton(
"Back",
callback_data="{}_home({})".format(
prefix, modulo_page
),
),
EqInlineKeyboardButton(
"❯",
callback_data="{}_next({})".format(
prefix, modulo_page
),
),
)
]
return pairs
def is_module_loaded(name):
return (
not MOD_LOAD or name in MOD_LOAD
) and name not in MOD_NOLOAD | 0.71103 | 0.155174 |
# Before you know it, you're inside a submarine the Elves keep ready for situations like this. It's covered in Christmas lights (because of course it is), and it even has an experimental antenna that should be able to track the keys if you can boost its signal strength high enough; there's a little meter that indicates the antenna's signal strength by displaying 0-50 stars.
# Your instincts tell you that in order to save Christmas, you'll need to get all fifty stars by December 25th.
# Collect stars by solving puzzles. Two puzzles will be made available on each day in the Advent calendar; the second puzzle is unlocked when you complete the first. Each puzzle grants one star. Good luck!
# As the submarine drops below the surface of the ocean, it automatically performs a sonar sweep of the nearby sea floor. On a small screen, the sonar sweep report (your puzzle input) appears: each line is a measurement of the sea floor depth as the sweep looks further and further away from the submarine.
# For example, suppose you had the following report:
# 199
# 200
# 208
# 210
# 200
# 207
# 240
# 269
# 260
# 263
# This report indicates that, scanning outward from the submarine, the sonar sweep found depths of 199, 200, 208, 210, and so on.
# The first order of business is to figure out how quickly the depth increases, just so you know what you're dealing with - you never know if the keys will get carried into deeper water by an ocean current or a fish or something.
# To do this, count the number of times a depth measurement increases from the previous measurement. (There is no measurement before the first measurement.) In the example above, the changes are as follows:
# 199 (N/A - no previous measurement)
# 200 (increased)
# 208 (increased)
# 210 (increased)
# 200 (decreased)
# 207 (increased)
# 240 (increased)
# 269 (increased)
# 260 (decreased)
# 263 (increased)
# In this example, there are 7 measurements that are larger than the previous measurement.
# How many measurements are larger than the previous measurement?
inp = list(map(int, open(r".\2021\input\d1-sonar-sweep.txt").readlines()))
print(sum([1 if inp[i] > inp[i-1] else 0 for i in range(1, len(inp))]))
# --- Part Two ---
# Considering every single measurement isn't as useful as you expected: there's just too much noise in the data.
# Instead, consider sums of a three-measurement sliding window. Again considering the above example:
# 199 A
# 200 A B
# 208 A B C
# 210 B C D
# 200 E C D
# 207 E F D
# 240 E F G
# 269 F G H
# 260 G H
# 263 H
# Start by comparing the first and second three-measurement windows. The measurements in the first window are marked A (199, 200, 208); their sum is 199 + 200 + 208 = 607. The second window is marked B (200, 208, 210); its sum is 618. The sum of measurements in the second window is larger than the sum of the first, so this first comparison increased.
# Your goal now is to count the number of times the sum of measurements in this sliding window increases from the previous sum. So, compare A with B, then compare B with C, then C with D, and so on. Stop when there aren't enough measurements left to create a new three-measurement sum.
# In the above example, the sum of each three-measurement window is as follows:
# A: 607 (N/A - no previous sum)
# B: 618 (increased)
# C: 618 (no change)
# D: 617 (decreased)
# E: 647 (increased)
# F: 716 (increased)
# G: 769 (increased)
# H: 792 (increased)
# In this example, there are 5 sums that are larger than the previous sum.
# Consider sums of a three-measurement sliding window. How many sums are larger than the previous sum?
print(sum([1 if sum(inp[i-2:i+1]) > sum(inp[i-3:i]) else 0 for i in range(3, len(inp))])) | 2021/solutions/d1-sonar-sweep.py |
# Before you know it, you're inside a submarine the Elves keep ready for situations like this. It's covered in Christmas lights (because of course it is), and it even has an experimental antenna that should be able to track the keys if you can boost its signal strength high enough; there's a little meter that indicates the antenna's signal strength by displaying 0-50 stars.
# Your instincts tell you that in order to save Christmas, you'll need to get all fifty stars by December 25th.
# Collect stars by solving puzzles. Two puzzles will be made available on each day in the Advent calendar; the second puzzle is unlocked when you complete the first. Each puzzle grants one star. Good luck!
# As the submarine drops below the surface of the ocean, it automatically performs a sonar sweep of the nearby sea floor. On a small screen, the sonar sweep report (your puzzle input) appears: each line is a measurement of the sea floor depth as the sweep looks further and further away from the submarine.
# For example, suppose you had the following report:
# 199
# 200
# 208
# 210
# 200
# 207
# 240
# 269
# 260
# 263
# This report indicates that, scanning outward from the submarine, the sonar sweep found depths of 199, 200, 208, 210, and so on.
# The first order of business is to figure out how quickly the depth increases, just so you know what you're dealing with - you never know if the keys will get carried into deeper water by an ocean current or a fish or something.
# To do this, count the number of times a depth measurement increases from the previous measurement. (There is no measurement before the first measurement.) In the example above, the changes are as follows:
# 199 (N/A - no previous measurement)
# 200 (increased)
# 208 (increased)
# 210 (increased)
# 200 (decreased)
# 207 (increased)
# 240 (increased)
# 269 (increased)
# 260 (decreased)
# 263 (increased)
# In this example, there are 7 measurements that are larger than the previous measurement.
# How many measurements are larger than the previous measurement?
inp = list(map(int, open(r".\2021\input\d1-sonar-sweep.txt").readlines()))
print(sum([1 if inp[i] > inp[i-1] else 0 for i in range(1, len(inp))]))
# --- Part Two ---
# Considering every single measurement isn't as useful as you expected: there's just too much noise in the data.
# Instead, consider sums of a three-measurement sliding window. Again considering the above example:
# 199 A
# 200 A B
# 208 A B C
# 210 B C D
# 200 E C D
# 207 E F D
# 240 E F G
# 269 F G H
# 260 G H
# 263 H
# Start by comparing the first and second three-measurement windows. The measurements in the first window are marked A (199, 200, 208); their sum is 199 + 200 + 208 = 607. The second window is marked B (200, 208, 210); its sum is 618. The sum of measurements in the second window is larger than the sum of the first, so this first comparison increased.
# Your goal now is to count the number of times the sum of measurements in this sliding window increases from the previous sum. So, compare A with B, then compare B with C, then C with D, and so on. Stop when there aren't enough measurements left to create a new three-measurement sum.
# In the above example, the sum of each three-measurement window is as follows:
# A: 607 (N/A - no previous sum)
# B: 618 (increased)
# C: 618 (no change)
# D: 617 (decreased)
# E: 647 (increased)
# F: 716 (increased)
# G: 769 (increased)
# H: 792 (increased)
# In this example, there are 5 sums that are larger than the previous sum.
# Consider sums of a three-measurement sliding window. How many sums are larger than the previous sum?
print(sum([1 if sum(inp[i-2:i+1]) > sum(inp[i-3:i]) else 0 for i in range(3, len(inp))])) | 0.752559 | 0.641773 |
from __future__ import print_function
from .anchors import compute_overlap
from .visualization import draw_detections, draw_annotations
import numpy as np
import itertools
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import os
import cv2
import pickle
def _compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], recall, [1.]))
mpre = np.concatenate(([0.], precision, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def _get_detections(generator, model, score_threshold=0.05, max_detections=100, save_path=None, ground_truth=False):
""" Get the detections from the model using the generator.
The result is a list of lists such that the size is:
all_detections[num_images][num_classes] = detections[num_detections, 4 + num_classes]
# Arguments
generator : The generator used to run images through the model.
model : The model to run on the images.
score_threshold : The score confidence threshold to use.
max_detections : The maximum number of detections to use per image.
save_path : The path to save the images with visualized detections to.
# Returns
A list of lists containing the detections for each image in the generator.
"""
all_detections = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
for i in range(generator.size()):
raw_image = generator.load_image(i)
image = generator.preprocess_image(raw_image.copy())
image, scale = generator.resize_image(image)
# run network
_, _, boxes, nms_classification = model.predict_on_batch(np.expand_dims(image, axis=0))
# correct boxes for image scale
boxes /= scale
# select indices which have a score above the threshold
indices = np.where(nms_classification[0, :, :] > score_threshold)
# select those scores
scores = nms_classification[0][indices]
# find the order with which to sort the scores
scores_sort = np.argsort(-scores)[:max_detections]
# select detections
image_boxes = boxes[0, indices[0][scores_sort], :]
image_scores = np.expand_dims(nms_classification[0, indices[0][scores_sort], indices[1][scores_sort]], axis=1)
image_detections = np.append(image_boxes, image_scores, axis=1)
image_predicted_labels = indices[1][scores_sort]
if save_path is not None:
if ground_truth:
draw_annotations(raw_image, generator.load_annotations(i), generator=generator)
draw_detections(raw_image, boxes[0, indices[0][scores_sort], :], nms_classification[0, indices[0][scores_sort], :], generator=generator)
cv2.imwrite(os.path.join(save_path, '{}.png'.format(i)), raw_image)
# copy detections to all_detections
for label in range(generator.num_classes()):
all_detections[i][label] = image_detections[image_predicted_labels == label, :]
print('{}/{}'.format(i, generator.size()), end='\r')
return all_detections
def _get_annotations(generator):
""" Get the ground truth annotations from the generator.
The result is a list of lists such that the size is:
all_detections[num_images][num_classes] = annotations[num_detections, 5]
# Arguments
generator : The generator used to retrieve ground truth annotations.
# Returns
A list of lists containing the annotations for each image in the generator.
"""
all_annotations = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
for i in range(generator.size()):
# load the annotations
annotations = generator.load_annotations(i)
if annotations.size == 0:
continue
# copy detections to all_annotations
for label in range(generator.num_classes()):
all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()
print('{}/{}'.format(i, generator.size()), end='\r')
return all_annotations
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
code in part taken from http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize == True and cm[i, j] == 0:
fmt = '.0f'
elif normalize == True and cm[i, j] != 0:
fmt = '.2f'
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def evaluate(
generator,
model,
iou_threshold=0.5,
score_threshold=0.05,
max_detections=100,
save_path=None,
ground_truth=False
):
""" Evaluate a given dataset using a given model.
# Arguments
generator : The generator that represents the dataset to evaluate.
model : The model to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
score_threshold : The score confidence threshold to use for detections.
max_detections : The maximum number of detections to use per image.
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
all_annotations = _get_annotations(generator)
all_detections = _get_detections(generator, model, score_threshold=score_threshold, max_detections=max_detections,
save_path=save_path, ground_truth=ground_truth)
average_precisions = {}
# all_detections = pickle.load(open('all_detections.pkl', 'rb'))
# all_annotations = pickle.load(open('all_annotations.pkl', 'rb'))
# pickle.dump(all_detections, open('all_detections.pkl', 'wb'))
# pickle.dump(all_annotations, open('all_annotations.pkl', 'wb'))
# process detections and annotations
for label in range(generator.num_classes()):
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(generator.size()):
detections = all_detections[i][label]
annotations = all_annotations[i][label]
if annotations is not None:
num_annotations += annotations.shape[0]
else:
continue
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
# no annotations -> AP for this class is 0 (is this correct?)
if num_annotations == 0:
average_precisions[label] = 0
continue
# sort by score
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# compute average precision
average_precision = _compute_ap(recall, precision)
average_precisions[label] = average_precision
return average_precisions
def ret_confusion_matrix(
generator,
model,
iou_threshold=0.5,
score_threshold=0.05,
max_detections=100,
save_path=None,
ground_truth=False
):
""" Evaluate a given dataset using a given model.
# Arguments
generator : The generator that represents the dataset to evaluate.
model : The model to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
score_threshold : The score confidence threshold to use for detections.
max_detections : The maximum number of detections to use per image.
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
all_annotations = _get_annotations(generator)
all_detections = _get_detections(generator, model, score_threshold=score_threshold, max_detections=max_detections,
save_path=save_path, ground_truth=ground_truth)
# all_detections = pickle.load(open('all_detections.pkl', 'rb'))
# all_annotations = pickle.load(open('all_annotations.pkl', 'rb'))
# pickle.dump(all_detections, open('all_detections.pkl', 'wb'))
# pickle.dump(all_annotations, open('all_annotations.pkl', 'wb'))
# process detections and annotations
all_detected = []
all_annotated = []
for i in range(generator.size()):
for label in range(generator.num_classes()):
annotated_file = all_annotations[i][label]
if annotated_file.size == 0:
continue
for annotation in annotated_file:
if annotation.size == 0:
continue
found = False
for detect_label, detect_on_i in np.ndenumerate(all_detections[i]):
if detect_on_i.size == 0:
continue
if found:
break
for d in detect_on_i:
d = np.array(d)
if annotation.ndim == 1:
annotation = np.array([annotation])
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotation)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap[0] >= iou_threshold:
found = True
all_detected.append(detect_label[0])
all_annotated.append(label)
break
cnf_matrix = confusion_matrix(all_annotated, all_detected)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=list(range(1, generator.num_classes() + 1)), normalize=True,
title='Normalized confusion matrix')
plt.show() | keras_retinanet/utils/eval.py | from __future__ import print_function
from .anchors import compute_overlap
from .visualization import draw_detections, draw_annotations
import numpy as np
import itertools
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import os
import cv2
import pickle
def _compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], recall, [1.]))
mpre = np.concatenate(([0.], precision, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def _get_detections(generator, model, score_threshold=0.05, max_detections=100, save_path=None, ground_truth=False):
""" Get the detections from the model using the generator.
The result is a list of lists such that the size is:
all_detections[num_images][num_classes] = detections[num_detections, 4 + num_classes]
# Arguments
generator : The generator used to run images through the model.
model : The model to run on the images.
score_threshold : The score confidence threshold to use.
max_detections : The maximum number of detections to use per image.
save_path : The path to save the images with visualized detections to.
# Returns
A list of lists containing the detections for each image in the generator.
"""
all_detections = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
for i in range(generator.size()):
raw_image = generator.load_image(i)
image = generator.preprocess_image(raw_image.copy())
image, scale = generator.resize_image(image)
# run network
_, _, boxes, nms_classification = model.predict_on_batch(np.expand_dims(image, axis=0))
# correct boxes for image scale
boxes /= scale
# select indices which have a score above the threshold
indices = np.where(nms_classification[0, :, :] > score_threshold)
# select those scores
scores = nms_classification[0][indices]
# find the order with which to sort the scores
scores_sort = np.argsort(-scores)[:max_detections]
# select detections
image_boxes = boxes[0, indices[0][scores_sort], :]
image_scores = np.expand_dims(nms_classification[0, indices[0][scores_sort], indices[1][scores_sort]], axis=1)
image_detections = np.append(image_boxes, image_scores, axis=1)
image_predicted_labels = indices[1][scores_sort]
if save_path is not None:
if ground_truth:
draw_annotations(raw_image, generator.load_annotations(i), generator=generator)
draw_detections(raw_image, boxes[0, indices[0][scores_sort], :], nms_classification[0, indices[0][scores_sort], :], generator=generator)
cv2.imwrite(os.path.join(save_path, '{}.png'.format(i)), raw_image)
# copy detections to all_detections
for label in range(generator.num_classes()):
all_detections[i][label] = image_detections[image_predicted_labels == label, :]
print('{}/{}'.format(i, generator.size()), end='\r')
return all_detections
def _get_annotations(generator):
""" Get the ground truth annotations from the generator.
The result is a list of lists such that the size is:
all_detections[num_images][num_classes] = annotations[num_detections, 5]
# Arguments
generator : The generator used to retrieve ground truth annotations.
# Returns
A list of lists containing the annotations for each image in the generator.
"""
all_annotations = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
for i in range(generator.size()):
# load the annotations
annotations = generator.load_annotations(i)
if annotations.size == 0:
continue
# copy detections to all_annotations
for label in range(generator.num_classes()):
all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()
print('{}/{}'.format(i, generator.size()), end='\r')
return all_annotations
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
code in part taken from http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize == True and cm[i, j] == 0:
fmt = '.0f'
elif normalize == True and cm[i, j] != 0:
fmt = '.2f'
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def evaluate(
generator,
model,
iou_threshold=0.5,
score_threshold=0.05,
max_detections=100,
save_path=None,
ground_truth=False
):
""" Evaluate a given dataset using a given model.
# Arguments
generator : The generator that represents the dataset to evaluate.
model : The model to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
score_threshold : The score confidence threshold to use for detections.
max_detections : The maximum number of detections to use per image.
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
all_annotations = _get_annotations(generator)
all_detections = _get_detections(generator, model, score_threshold=score_threshold, max_detections=max_detections,
save_path=save_path, ground_truth=ground_truth)
average_precisions = {}
# all_detections = pickle.load(open('all_detections.pkl', 'rb'))
# all_annotations = pickle.load(open('all_annotations.pkl', 'rb'))
# pickle.dump(all_detections, open('all_detections.pkl', 'wb'))
# pickle.dump(all_annotations, open('all_annotations.pkl', 'wb'))
# process detections and annotations
for label in range(generator.num_classes()):
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(generator.size()):
detections = all_detections[i][label]
annotations = all_annotations[i][label]
if annotations is not None:
num_annotations += annotations.shape[0]
else:
continue
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
# no annotations -> AP for this class is 0 (is this correct?)
if num_annotations == 0:
average_precisions[label] = 0
continue
# sort by score
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# compute average precision
average_precision = _compute_ap(recall, precision)
average_precisions[label] = average_precision
return average_precisions
def ret_confusion_matrix(
generator,
model,
iou_threshold=0.5,
score_threshold=0.05,
max_detections=100,
save_path=None,
ground_truth=False
):
""" Evaluate a given dataset using a given model.
# Arguments
generator : The generator that represents the dataset to evaluate.
model : The model to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
score_threshold : The score confidence threshold to use for detections.
max_detections : The maximum number of detections to use per image.
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
all_annotations = _get_annotations(generator)
all_detections = _get_detections(generator, model, score_threshold=score_threshold, max_detections=max_detections,
save_path=save_path, ground_truth=ground_truth)
# all_detections = pickle.load(open('all_detections.pkl', 'rb'))
# all_annotations = pickle.load(open('all_annotations.pkl', 'rb'))
# pickle.dump(all_detections, open('all_detections.pkl', 'wb'))
# pickle.dump(all_annotations, open('all_annotations.pkl', 'wb'))
# process detections and annotations
all_detected = []
all_annotated = []
for i in range(generator.size()):
for label in range(generator.num_classes()):
annotated_file = all_annotations[i][label]
if annotated_file.size == 0:
continue
for annotation in annotated_file:
if annotation.size == 0:
continue
found = False
for detect_label, detect_on_i in np.ndenumerate(all_detections[i]):
if detect_on_i.size == 0:
continue
if found:
break
for d in detect_on_i:
d = np.array(d)
if annotation.ndim == 1:
annotation = np.array([annotation])
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotation)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap[0] >= iou_threshold:
found = True
all_detected.append(detect_label[0])
all_annotated.append(label)
break
cnf_matrix = confusion_matrix(all_annotated, all_detected)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=list(range(1, generator.num_classes() + 1)), normalize=True,
title='Normalized confusion matrix')
plt.show() | 0.915412 | 0.591546 |
from __future__ import division, print_function, unicode_literals, absolute_import
import imghdr
import struct
import io
import numpy as np
_dtypes_float = [np.float, np.float16, np.float32]
_dtypes_int = [np.int, np.int8, np.int16, np.int32, np.uint8, np.uint16, np.uint32]
def _is_float_type(data):
if type(data) == np.dtype:
dtype_data = data
else:
data = np.asarray(data)
dtype_data = data.dtype
return dtype_data in _dtypes_float
def _is_integer_type(data):
if type(data) == np.dtype:
dtype_data = data
else:
data = np.asarray(data)
dtype_data = data.dtype
return dtype_data in _dtypes_int
#------------------------------------------------
def image_data_mode(data):
"""Determine image color mode compatible with PIL / Pillow
Input data is expected to be 2D or 3D: [num_lines, num_samples, num_bands].
"""
# Force data to be Numpy ndarray, if not already.
data = np.asarray(data)
if data.ndim == 2:
num_bands = 1
elif data.ndim == 3:
num_bands = data.shape[2]
else:
raise ValueError('Invalid number of data dimensions: {}'.format(data.ndim))
if num_bands == 1:
mode = 'L'
elif num_bands == 3:
mode = 'RGB'
elif num_bands == 4:
mode = 'RGBA'
else:
raise ValueError('Invalid number of bands.')
return mode
def setup_uint8(data, lohi=None):
"""Ensure data is unsigned bytes
If data type is not np.uint8 it will be converted by scaling
min(data) -> 0 and max(data) -> 255.
"""
data = np.asarray(data)
# Scale to np.uint8?
if not (data.dtype == np.uint8) or lohi is not None:
data = data.astype(np.float32)
if lohi is None:
lohi = data.min(), data.max()
lo, hi = lohi
if lo == hi:
raise ValueError('Invalid data range: {}, {}'.format(lo, hi))
data = (data - lo) / (hi - lo)
data = np.clip(data, 0, 1)*255
data = np.round(data).astype(np.uint8)
return data
def collapse_alpha(data_rgba):
"""Collapse alpha channel
https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending
"""
data_rgba = np.asarray(data_rgba)
if data_rgba.dtype != np.uint8:
# Nothing to do
return data_rgba
if data_rgba.ndim != 3:
# Nothing to do
return data_rgba
if data_rgba.shape[2] != 4:
# Nothing to do
return data_rgba
# Convert to float, between 0 and 1.
data_rgba = data_rgba.astype(np.float32)/255
data_src = data_rgba[:, :, :3]
alpha_src = data_rgba[:, :, 3]
data_bkg = 1
data_rgb = data_src*alpha_src + data_bkg*(1 - alpha_src)
data_rgb = np.clip(data_rgb, 0, 1)*255
data_rgb = np.round(data_rgb).astype(np.uint8)
# Done
return data_rgb
def get_image_size(data):
"""Determine the image type of fhandle and return its size
"""
if len(data) < 24:
return
kind = imghdr.what(None, h=data)
if kind == 'png':
check = struct.unpack('>i', data[4:8])[0]
if check != 0x0d0a1a0a:
return
width, height = struct.unpack('>ii', data[16:24])
elif kind == 'gif':
width, height = struct.unpack('<HH', data[6:10])
elif kind == 'jpeg':
buff = io.BytesIO(data)
try:
buff.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf:
buff.seek(size, 1)
byte = buff.read(1)
while ord(byte) == 0xff:
byte = buff.read(1)
ftype = ord(byte)
size = struct.unpack('>H', buff.read(2))[0] - 2
# We are at a SOFn block
buff.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack('>HH', buff.read(4))
except Exception: #IGNORE:W0703
return
else:
return
# Done
return kind, width, height
def data_url(data_comp, fmt):
"""Assemble compressed image data into URL data string
"""
data_encode = base64.b64encode(data_comp)
encoding = 'utf-8'
template = 'data:image/{:s};charset={};base64,{:s}'
# The decoding step here is necesary since we need to interpret byte data as text.
# See this link for a nice explanation:
# http://stackoverflow.com/questions/14010551/how-to-convert-between-bytes-and-strings-in-python-3
result = template.format(fmt, encoding, data_encode.decode(encoding=encoding))
return result
def iter_tiles(img, size):
"""Generator over image tiles
"""
num_lines, num_samples = img.shape[:2]
num_chunk_lines = int(num_lines/size)
chunk_lines = int(np.round(num_lines/num_chunk_lines))
chunk_lines -= chunk_lines % 2
num_chunk_samples = int(num_samples/size)
chunk_samples = int(np.round(num_samples/num_chunk_samples))
chunk_samples -= chunk_samples % 2
for j in range(num_chunk_lines):
j0 = j*chunk_lines
slice_lines = slice(j0, j0 + chunk_lines)
for i in range(num_chunk_samples):
i0 = i*chunk_samples
slice_samples = slice(i0, i0 + chunk_samples)
yield img[slice_lines, slice_samples], j0, i0
#------------------------------------------------
if __name__ == '__main__':
pass | image_attendant/utility.py | from __future__ import division, print_function, unicode_literals, absolute_import
import imghdr
import struct
import io
import numpy as np
_dtypes_float = [np.float, np.float16, np.float32]
_dtypes_int = [np.int, np.int8, np.int16, np.int32, np.uint8, np.uint16, np.uint32]
def _is_float_type(data):
if type(data) == np.dtype:
dtype_data = data
else:
data = np.asarray(data)
dtype_data = data.dtype
return dtype_data in _dtypes_float
def _is_integer_type(data):
if type(data) == np.dtype:
dtype_data = data
else:
data = np.asarray(data)
dtype_data = data.dtype
return dtype_data in _dtypes_int
#------------------------------------------------
def image_data_mode(data):
"""Determine image color mode compatible with PIL / Pillow
Input data is expected to be 2D or 3D: [num_lines, num_samples, num_bands].
"""
# Force data to be Numpy ndarray, if not already.
data = np.asarray(data)
if data.ndim == 2:
num_bands = 1
elif data.ndim == 3:
num_bands = data.shape[2]
else:
raise ValueError('Invalid number of data dimensions: {}'.format(data.ndim))
if num_bands == 1:
mode = 'L'
elif num_bands == 3:
mode = 'RGB'
elif num_bands == 4:
mode = 'RGBA'
else:
raise ValueError('Invalid number of bands.')
return mode
def setup_uint8(data, lohi=None):
"""Ensure data is unsigned bytes
If data type is not np.uint8 it will be converted by scaling
min(data) -> 0 and max(data) -> 255.
"""
data = np.asarray(data)
# Scale to np.uint8?
if not (data.dtype == np.uint8) or lohi is not None:
data = data.astype(np.float32)
if lohi is None:
lohi = data.min(), data.max()
lo, hi = lohi
if lo == hi:
raise ValueError('Invalid data range: {}, {}'.format(lo, hi))
data = (data - lo) / (hi - lo)
data = np.clip(data, 0, 1)*255
data = np.round(data).astype(np.uint8)
return data
def collapse_alpha(data_rgba):
"""Collapse alpha channel
https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending
"""
data_rgba = np.asarray(data_rgba)
if data_rgba.dtype != np.uint8:
# Nothing to do
return data_rgba
if data_rgba.ndim != 3:
# Nothing to do
return data_rgba
if data_rgba.shape[2] != 4:
# Nothing to do
return data_rgba
# Convert to float, between 0 and 1.
data_rgba = data_rgba.astype(np.float32)/255
data_src = data_rgba[:, :, :3]
alpha_src = data_rgba[:, :, 3]
data_bkg = 1
data_rgb = data_src*alpha_src + data_bkg*(1 - alpha_src)
data_rgb = np.clip(data_rgb, 0, 1)*255
data_rgb = np.round(data_rgb).astype(np.uint8)
# Done
return data_rgb
def get_image_size(data):
"""Determine the image type of fhandle and return its size
"""
if len(data) < 24:
return
kind = imghdr.what(None, h=data)
if kind == 'png':
check = struct.unpack('>i', data[4:8])[0]
if check != 0x0d0a1a0a:
return
width, height = struct.unpack('>ii', data[16:24])
elif kind == 'gif':
width, height = struct.unpack('<HH', data[6:10])
elif kind == 'jpeg':
buff = io.BytesIO(data)
try:
buff.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf:
buff.seek(size, 1)
byte = buff.read(1)
while ord(byte) == 0xff:
byte = buff.read(1)
ftype = ord(byte)
size = struct.unpack('>H', buff.read(2))[0] - 2
# We are at a SOFn block
buff.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack('>HH', buff.read(4))
except Exception: #IGNORE:W0703
return
else:
return
# Done
return kind, width, height
def data_url(data_comp, fmt):
"""Assemble compressed image data into URL data string
"""
data_encode = base64.b64encode(data_comp)
encoding = 'utf-8'
template = 'data:image/{:s};charset={};base64,{:s}'
# The decoding step here is necesary since we need to interpret byte data as text.
# See this link for a nice explanation:
# http://stackoverflow.com/questions/14010551/how-to-convert-between-bytes-and-strings-in-python-3
result = template.format(fmt, encoding, data_encode.decode(encoding=encoding))
return result
def iter_tiles(img, size):
"""Generator over image tiles
"""
num_lines, num_samples = img.shape[:2]
num_chunk_lines = int(num_lines/size)
chunk_lines = int(np.round(num_lines/num_chunk_lines))
chunk_lines -= chunk_lines % 2
num_chunk_samples = int(num_samples/size)
chunk_samples = int(np.round(num_samples/num_chunk_samples))
chunk_samples -= chunk_samples % 2
for j in range(num_chunk_lines):
j0 = j*chunk_lines
slice_lines = slice(j0, j0 + chunk_lines)
for i in range(num_chunk_samples):
i0 = i*chunk_samples
slice_samples = slice(i0, i0 + chunk_samples)
yield img[slice_lines, slice_samples], j0, i0
#------------------------------------------------
if __name__ == '__main__':
pass | 0.589244 | 0.603377 |
import mmcv
import torch
from mmtrack.models.track_heads.stark_head import (CornerPredictorHead,
ScoreHead, StarkHead,
StarkTransformer)
def test_corner_predictor_head():
bbox_head = CornerPredictorHead(8, 8, feat_size=20, stride=16)
inputs = torch.randn(1, 8, 20, 20)
outputs = bbox_head(inputs)
assert outputs.shape == (1, 4)
def test_score_head():
score_head = ScoreHead(8, 8, 1, 3)
inputs = torch.randn(1, 1, 1, 8)
outputs = score_head(inputs)
assert outputs.shape == (1, 1, 1)
def test_transormer_head():
cfg = mmcv.Config(
dict(
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=[
dict(
type='MultiheadAttention',
embed_dims=16,
num_heads=8,
attn_drop=0.1,
dropout_layer=dict(type='Dropout', drop_prob=0.1))
],
ffn_cfgs=dict(
feedforward_channels=16, embed_dims=16, ffn_drop=0.1),
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='DetrTransformerDecoder',
return_intermediate=False,
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=16,
num_heads=8,
attn_drop=0.1,
dropout_layer=dict(type='Dropout', drop_prob=0.1)),
ffn_cfgs=dict(
feedforward_channels=16, embed_dims=16, ffn_drop=0.1),
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm')))))
self = StarkTransformer(**cfg)
feat = torch.randn(20, 1, 16)
mask = torch.zeros(1, 20, dtype=bool)
query_embed = torch.randn(1, 16)
pos_embed = torch.randn(20, 1, 16)
out_dec, enc_mem = self.forward(feat, mask, query_embed, pos_embed)
assert out_dec.shape == (1, 1, 1, 16)
assert enc_mem.shape == (20, 1, 16)
def test_stark_head_loss():
"""Tests stark head loss when truth is non-empty."""
head_cfg = dict(
num_query=1,
transformer=dict(
type='StarkTransformer',
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=[
dict(
type='MultiheadAttention',
embed_dims=16,
num_heads=8,
attn_drop=0.1,
dropout_layer=dict(type='Dropout', drop_prob=0.1))
],
ffn_cfgs=dict(
feedforward_channels=16, embed_dims=16, ffn_drop=0.1),
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='DetrTransformerDecoder',
return_intermediate=False,
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=16,
num_heads=8,
attn_drop=0.1,
dropout_layer=dict(type='Dropout', drop_prob=0.1)),
ffn_cfgs=dict(
feedforward_channels=16, embed_dims=16, ffn_drop=0.1),
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm'))),
),
positional_encoding=dict(
type='SinePositionalEncoding', num_feats=8, normalize=True),
bbox_head=dict(
type='CornerPredictorHead',
inplanes=16,
channel=16,
feat_size=20,
stride=16),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0),
test_cfg=dict(
search_factor=5.0,
search_size=320,
template_factor=2.0,
template_size=128,
update_intervals=[200]))
cfg = mmcv.Config(head_cfg)
self = StarkHead(**cfg)
head_inputs = [
dict(
feat=(torch.rand(1, 16, 8, 8), ),
mask=torch.zeros(1, 128, 128, dtype=bool)),
dict(
feat=(torch.rand(1, 16, 8, 8), ),
mask=torch.zeros(1, 128, 128, dtype=bool)),
dict(
feat=(torch.rand(1, 16, 20, 20), ),
mask=torch.zeros(1, 320, 320, dtype=bool))
]
track_results = self.forward(head_inputs)
gt_bboxes = [
torch.Tensor([[0., 23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [
torch.Tensor([[0., 1]]),
]
bboxes_losses = self.loss(track_results, gt_bboxes, gt_labels, (320, 320))
assert bboxes_losses['loss_iou'] >= 0, 'iou loss should be'
'non-zero or zero'
assert bboxes_losses[
'loss_bbox'] >= 0, 'bbox loss should be non-zero or zero'
head_cfg.update(
dict(
cls_head=dict(
type='ScoreHead',
input_dim=16,
hidden_dim=16,
output_dim=1,
num_layers=3,
use_bn=False),
frozen_module=['transformer', 'bbox_head'],
loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True)))
cfg = mmcv.Config(head_cfg)
self = StarkHead(**cfg)
track_results = self.forward(head_inputs)
bboxes_losses = self.loss(track_results, gt_bboxes, gt_labels, (320, 320))
assert bboxes_losses['loss_cls'] >= 0, 'iou loss should be'
'non-zero or zero' | tests/test_models/test_track_heads/test_stark_head.py | import mmcv
import torch
from mmtrack.models.track_heads.stark_head import (CornerPredictorHead,
ScoreHead, StarkHead,
StarkTransformer)
def test_corner_predictor_head():
bbox_head = CornerPredictorHead(8, 8, feat_size=20, stride=16)
inputs = torch.randn(1, 8, 20, 20)
outputs = bbox_head(inputs)
assert outputs.shape == (1, 4)
def test_score_head():
score_head = ScoreHead(8, 8, 1, 3)
inputs = torch.randn(1, 1, 1, 8)
outputs = score_head(inputs)
assert outputs.shape == (1, 1, 1)
def test_transormer_head():
cfg = mmcv.Config(
dict(
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=[
dict(
type='MultiheadAttention',
embed_dims=16,
num_heads=8,
attn_drop=0.1,
dropout_layer=dict(type='Dropout', drop_prob=0.1))
],
ffn_cfgs=dict(
feedforward_channels=16, embed_dims=16, ffn_drop=0.1),
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='DetrTransformerDecoder',
return_intermediate=False,
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=16,
num_heads=8,
attn_drop=0.1,
dropout_layer=dict(type='Dropout', drop_prob=0.1)),
ffn_cfgs=dict(
feedforward_channels=16, embed_dims=16, ffn_drop=0.1),
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm')))))
self = StarkTransformer(**cfg)
feat = torch.randn(20, 1, 16)
mask = torch.zeros(1, 20, dtype=bool)
query_embed = torch.randn(1, 16)
pos_embed = torch.randn(20, 1, 16)
out_dec, enc_mem = self.forward(feat, mask, query_embed, pos_embed)
assert out_dec.shape == (1, 1, 1, 16)
assert enc_mem.shape == (20, 1, 16)
def test_stark_head_loss():
"""Tests stark head loss when truth is non-empty."""
head_cfg = dict(
num_query=1,
transformer=dict(
type='StarkTransformer',
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=[
dict(
type='MultiheadAttention',
embed_dims=16,
num_heads=8,
attn_drop=0.1,
dropout_layer=dict(type='Dropout', drop_prob=0.1))
],
ffn_cfgs=dict(
feedforward_channels=16, embed_dims=16, ffn_drop=0.1),
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='DetrTransformerDecoder',
return_intermediate=False,
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=16,
num_heads=8,
attn_drop=0.1,
dropout_layer=dict(type='Dropout', drop_prob=0.1)),
ffn_cfgs=dict(
feedforward_channels=16, embed_dims=16, ffn_drop=0.1),
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm'))),
),
positional_encoding=dict(
type='SinePositionalEncoding', num_feats=8, normalize=True),
bbox_head=dict(
type='CornerPredictorHead',
inplanes=16,
channel=16,
feat_size=20,
stride=16),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0),
test_cfg=dict(
search_factor=5.0,
search_size=320,
template_factor=2.0,
template_size=128,
update_intervals=[200]))
cfg = mmcv.Config(head_cfg)
self = StarkHead(**cfg)
head_inputs = [
dict(
feat=(torch.rand(1, 16, 8, 8), ),
mask=torch.zeros(1, 128, 128, dtype=bool)),
dict(
feat=(torch.rand(1, 16, 8, 8), ),
mask=torch.zeros(1, 128, 128, dtype=bool)),
dict(
feat=(torch.rand(1, 16, 20, 20), ),
mask=torch.zeros(1, 320, 320, dtype=bool))
]
track_results = self.forward(head_inputs)
gt_bboxes = [
torch.Tensor([[0., 23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [
torch.Tensor([[0., 1]]),
]
bboxes_losses = self.loss(track_results, gt_bboxes, gt_labels, (320, 320))
assert bboxes_losses['loss_iou'] >= 0, 'iou loss should be'
'non-zero or zero'
assert bboxes_losses[
'loss_bbox'] >= 0, 'bbox loss should be non-zero or zero'
head_cfg.update(
dict(
cls_head=dict(
type='ScoreHead',
input_dim=16,
hidden_dim=16,
output_dim=1,
num_layers=3,
use_bn=False),
frozen_module=['transformer', 'bbox_head'],
loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True)))
cfg = mmcv.Config(head_cfg)
self = StarkHead(**cfg)
track_results = self.forward(head_inputs)
bboxes_losses = self.loss(track_results, gt_bboxes, gt_labels, (320, 320))
assert bboxes_losses['loss_cls'] >= 0, 'iou loss should be'
'non-zero or zero' | 0.811153 | 0.264212 |
import argparse
import json
import pathlib
from pathlib import Path
import numpy as np
import data_loader.data_loaders as module_data
import model.loss as module_loss
import model.metric as module_metric
import model.model as module_arch # monorec model
from evaluater import Evaluater
from utils.parse_config import ConfigParser
def main(config: ConfigParser):
logger = config.get_logger('train')
# setup data_loader instances
data_loader = config.initialize('data_loader', module_data)
# get function handles of loss and metrics
loss = getattr(module_loss, config['loss']) # 1个 depth_loss
# print("loss: ", loss)
metrics = [getattr(module_metric, met) for met in config['metrics']] # 7个
# print("metrics: ", metrics)
# build model architecture, then print to console
if "arch" in config.config:
models = [config.initialize('arch', module_arch)] # monorec model
else:
models = config.initialize_list("models", module_arch)
results = []
for i, model in enumerate(models): # 就一个model
model_dict = dict(model.__dict__) # 获取model 参数
keys = list(model_dict.keys())
'''
dict_keys(['training', '_parameters', '_buffers', '_non_persistent_buffers_set',
'_backward_hooks', '_is_full_backward_hook', '_forward_hooks', '_forward_pre_hooks',
'_state_dict_hooks', '_load_state_dict_pre_hooks', '_modules', 'inv_depth_min_max',
'cv_depth_steps', 'use_mono', 'use_stereo', 'use_ssim', 'sfcv_mult_mask', 'pretrain_mode',
'pretrain_dropout', 'pretrain_dropout_mode', 'augmentation', 'simple_mask', 'mask_use_cv',
'mask_use_feats', 'cv_patch_size', 'no_cv', 'depth_large_model', 'checkpoint_location', 'mask_cp_loc',
'depth_cp_loc', 'freeze_module', 'freeze_resnet', 'augmenter'])
'''
for k in keys:
if k.startswith("_"):
model_dict.__delitem__(k)
elif type(model_dict[k]) == np.ndarray:
model_dict[k] = list(model_dict[k])
dataset_dict = dict(data_loader.dataset.__dict__) # 获取dataset的参数
keys = list(dataset_dict.keys()) # 代表的是dataset的init paras
'''
['dataset_dir', 'frame_count', 'target_image_size', 'offset_d', 'nusc', 'pointsensor_channel',
'camera_channel', '_offset', 'length', 'dilation', 'use_color_augmentation', 'return_mvobj_mask']
'''
for k in keys:
if k.startswith("_"):
dataset_dict.__delitem__(k)
elif type(dataset_dict[k]) == np.ndarray:
dataset_dict[k] = list(dataset_dict[k])
elif isinstance(dataset_dict[k], pathlib.PurePath):
dataset_dict[k] = str(dataset_dict[k])
logger.info(model_dict)
logger.info(dataset_dict)
print("############################ start eval ##########################")
# 传入了模型,loss,需要记录的metrics,config,测试数据
evaluater = Evaluater(model, loss, metrics, config=config, data_loader=data_loader)
print("############################ end eval ##########################")
result = evaluater.eval(i) # eval 0th model
result["metrics"] = result["metrics"]
del model
result["metrics_info"] = [metric.__name__ for metric in metrics]
logger.info(result)
results.append({
"model": model_dict,
"dataset": dataset_dict,
"result": result
})
save_file = Path(config.log_dir) / "results.json"
with open(save_file, "w") as f:
json.dump(results, f, indent=4)
logger.info("Finished")
if __name__ == "__main__":
args = argparse.ArgumentParser(description='Deeptam Evaluation')
args.add_argument('-c', '--config', default=None, type=str,
help='config file path (default: None)')
args.add_argument('-d', '--device', default=None, type=str,
help='indices of GPUs to enable (default: all)')
args.add_argument('-r', '--resume', default=None, type=str,
help='path to latest checkpoint (default: None)')
config = ConfigParser(args)
print(config.config)
main(config) | eval_test.py | import argparse
import json
import pathlib
from pathlib import Path
import numpy as np
import data_loader.data_loaders as module_data
import model.loss as module_loss
import model.metric as module_metric
import model.model as module_arch # monorec model
from evaluater import Evaluater
from utils.parse_config import ConfigParser
def main(config: ConfigParser):
logger = config.get_logger('train')
# setup data_loader instances
data_loader = config.initialize('data_loader', module_data)
# get function handles of loss and metrics
loss = getattr(module_loss, config['loss']) # 1个 depth_loss
# print("loss: ", loss)
metrics = [getattr(module_metric, met) for met in config['metrics']] # 7个
# print("metrics: ", metrics)
# build model architecture, then print to console
if "arch" in config.config:
models = [config.initialize('arch', module_arch)] # monorec model
else:
models = config.initialize_list("models", module_arch)
results = []
for i, model in enumerate(models): # 就一个model
model_dict = dict(model.__dict__) # 获取model 参数
keys = list(model_dict.keys())
'''
dict_keys(['training', '_parameters', '_buffers', '_non_persistent_buffers_set',
'_backward_hooks', '_is_full_backward_hook', '_forward_hooks', '_forward_pre_hooks',
'_state_dict_hooks', '_load_state_dict_pre_hooks', '_modules', 'inv_depth_min_max',
'cv_depth_steps', 'use_mono', 'use_stereo', 'use_ssim', 'sfcv_mult_mask', 'pretrain_mode',
'pretrain_dropout', 'pretrain_dropout_mode', 'augmentation', 'simple_mask', 'mask_use_cv',
'mask_use_feats', 'cv_patch_size', 'no_cv', 'depth_large_model', 'checkpoint_location', 'mask_cp_loc',
'depth_cp_loc', 'freeze_module', 'freeze_resnet', 'augmenter'])
'''
for k in keys:
if k.startswith("_"):
model_dict.__delitem__(k)
elif type(model_dict[k]) == np.ndarray:
model_dict[k] = list(model_dict[k])
dataset_dict = dict(data_loader.dataset.__dict__) # 获取dataset的参数
keys = list(dataset_dict.keys()) # 代表的是dataset的init paras
'''
['dataset_dir', 'frame_count', 'target_image_size', 'offset_d', 'nusc', 'pointsensor_channel',
'camera_channel', '_offset', 'length', 'dilation', 'use_color_augmentation', 'return_mvobj_mask']
'''
for k in keys:
if k.startswith("_"):
dataset_dict.__delitem__(k)
elif type(dataset_dict[k]) == np.ndarray:
dataset_dict[k] = list(dataset_dict[k])
elif isinstance(dataset_dict[k], pathlib.PurePath):
dataset_dict[k] = str(dataset_dict[k])
logger.info(model_dict)
logger.info(dataset_dict)
print("############################ start eval ##########################")
# 传入了模型,loss,需要记录的metrics,config,测试数据
evaluater = Evaluater(model, loss, metrics, config=config, data_loader=data_loader)
print("############################ end eval ##########################")
result = evaluater.eval(i) # eval 0th model
result["metrics"] = result["metrics"]
del model
result["metrics_info"] = [metric.__name__ for metric in metrics]
logger.info(result)
results.append({
"model": model_dict,
"dataset": dataset_dict,
"result": result
})
save_file = Path(config.log_dir) / "results.json"
with open(save_file, "w") as f:
json.dump(results, f, indent=4)
logger.info("Finished")
if __name__ == "__main__":
args = argparse.ArgumentParser(description='Deeptam Evaluation')
args.add_argument('-c', '--config', default=None, type=str,
help='config file path (default: None)')
args.add_argument('-d', '--device', default=None, type=str,
help='indices of GPUs to enable (default: all)')
args.add_argument('-r', '--resume', default=None, type=str,
help='path to latest checkpoint (default: None)')
config = ConfigParser(args)
print(config.config)
main(config) | 0.348645 | 0.166913 |
from __future__ import absolute_import, print_function
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.db.models import (
BaseManager, BoundedPositiveIntegerField, FlexibleForeignKey, Model,
sane_repr
)
from sentry.db.models.utils import slugify_instance
from sentry.utils.http import absolute_uri
class TeamManager(BaseManager):
def get_for_user(self, organization, user, access=None, access_groups=True,
with_projects=False):
"""
Returns a list of all teams a user has some level of access to.
Each <Team> returned has an ``access_type`` attribute which holds the
OrganizationMemberType value.
"""
from sentry.models import (
AccessGroup, OrganizationMember, OrganizationMemberType, Project
)
if not user.is_authenticated():
return []
base_team_qs = self.filter(
organization=organization,
status=TeamStatus.VISIBLE
)
if user.is_superuser:
team_list = list(base_team_qs)
for team in team_list:
team.access_type = OrganizationMemberType.OWNER
elif settings.SENTRY_PUBLIC and access is None:
team_list = list(base_team_qs)
for team in team_list:
team.access_type = OrganizationMemberType.MEMBER
else:
om_qs = OrganizationMember.objects.filter(
user=user,
organization=organization,
)
if access is not None:
om_qs = om_qs.filter(type__lte=access)
try:
om = om_qs.get()
except OrganizationMember.DoesNotExist:
team_qs = self.none()
else:
if om.has_global_access:
team_qs = base_team_qs
else:
team_qs = om.teams.filter(
status=TeamStatus.VISIBLE
)
for team in team_qs:
team.access_type = om.type
team_list = set(team_qs)
# TODO(dcramer): remove all of this junk when access groups are
# killed
ag_qs = AccessGroup.objects.filter(
members=user,
team__organization=organization,
team__status=TeamStatus.VISIBLE,
).select_related('team')
if access is not None:
ag_qs = ag_qs.filter(type__lte=access)
for ag in ag_qs:
if ag.team in team_list:
continue
ag.team.is_access_group = True
ag.team.access_type = ag.type
team_list.add(ag.team)
results = sorted(team_list, key=lambda x: x.name.lower())
if with_projects:
# these kinds of queries make people sad :(
for idx, team in enumerate(results):
project_list = list(Project.objects.get_for_user(
team=team,
user=user,
_skip_team_check=True
))
results[idx] = (team, project_list)
return results
# TODO(dcramer): pull in enum library
class TeamStatus(object):
VISIBLE = 0
PENDING_DELETION = 1
DELETION_IN_PROGRESS = 2
class Team(Model):
"""
A team represents a group of individuals which maintain ownership of projects.
"""
organization = FlexibleForeignKey('sentry.Organization')
slug = models.SlugField()
name = models.CharField(max_length=64)
owner = FlexibleForeignKey(settings.AUTH_USER_MODEL)
status = BoundedPositiveIntegerField(choices=(
(TeamStatus.VISIBLE, _('Active')),
(TeamStatus.PENDING_DELETION, _('Pending Deletion')),
(TeamStatus.DELETION_IN_PROGRESS, _('Deletion in Progress')),
), default=TeamStatus.VISIBLE)
date_added = models.DateTimeField(default=timezone.now, null=True)
objects = TeamManager(cache_fields=(
'pk',
'slug',
))
class Meta:
app_label = 'sentry'
db_table = 'sentry_team'
unique_together = (('organization', 'slug'),)
__repr__ = sane_repr('slug', 'owner_id', 'name')
def __unicode__(self):
return u'%s (%s)' % (self.name, self.slug)
def save(self, *args, **kwargs):
if not self.slug:
slugify_instance(self, self.name, organization=self.organization)
super(Team, self).save(*args, **kwargs)
def get_absolute_url(self):
return absolute_uri(reverse('sentry-team-dashboard', args=[
self.organization.slug,
self.slug,
]))
def get_owner_name(self):
if not self.owner:
return None
if self.owner.first_name:
return self.owner.first_name
if self.owner.email:
return self.owner.email.split('@', 1)[0]
return self.owner.username
@property
def member_set(self):
return self.organization.member_set.filter(
Q(teams=self) | Q(has_global_access=True),
user__is_active=True,
)
def has_access(self, user, access=None):
queryset = self.member_set.filter(user=user)
if access is not None:
queryset = queryset.filter(type__lte=access)
return queryset.exists()
def get_audit_log_data(self):
return {
'slug': self.slug,
'name': self.name,
'status': self.status,
} | src/sentry/models/team.py | from __future__ import absolute_import, print_function
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.db.models import (
BaseManager, BoundedPositiveIntegerField, FlexibleForeignKey, Model,
sane_repr
)
from sentry.db.models.utils import slugify_instance
from sentry.utils.http import absolute_uri
class TeamManager(BaseManager):
def get_for_user(self, organization, user, access=None, access_groups=True,
with_projects=False):
"""
Returns a list of all teams a user has some level of access to.
Each <Team> returned has an ``access_type`` attribute which holds the
OrganizationMemberType value.
"""
from sentry.models import (
AccessGroup, OrganizationMember, OrganizationMemberType, Project
)
if not user.is_authenticated():
return []
base_team_qs = self.filter(
organization=organization,
status=TeamStatus.VISIBLE
)
if user.is_superuser:
team_list = list(base_team_qs)
for team in team_list:
team.access_type = OrganizationMemberType.OWNER
elif settings.SENTRY_PUBLIC and access is None:
team_list = list(base_team_qs)
for team in team_list:
team.access_type = OrganizationMemberType.MEMBER
else:
om_qs = OrganizationMember.objects.filter(
user=user,
organization=organization,
)
if access is not None:
om_qs = om_qs.filter(type__lte=access)
try:
om = om_qs.get()
except OrganizationMember.DoesNotExist:
team_qs = self.none()
else:
if om.has_global_access:
team_qs = base_team_qs
else:
team_qs = om.teams.filter(
status=TeamStatus.VISIBLE
)
for team in team_qs:
team.access_type = om.type
team_list = set(team_qs)
# TODO(dcramer): remove all of this junk when access groups are
# killed
ag_qs = AccessGroup.objects.filter(
members=user,
team__organization=organization,
team__status=TeamStatus.VISIBLE,
).select_related('team')
if access is not None:
ag_qs = ag_qs.filter(type__lte=access)
for ag in ag_qs:
if ag.team in team_list:
continue
ag.team.is_access_group = True
ag.team.access_type = ag.type
team_list.add(ag.team)
results = sorted(team_list, key=lambda x: x.name.lower())
if with_projects:
# these kinds of queries make people sad :(
for idx, team in enumerate(results):
project_list = list(Project.objects.get_for_user(
team=team,
user=user,
_skip_team_check=True
))
results[idx] = (team, project_list)
return results
# TODO(dcramer): pull in enum library
class TeamStatus(object):
VISIBLE = 0
PENDING_DELETION = 1
DELETION_IN_PROGRESS = 2
class Team(Model):
"""
A team represents a group of individuals which maintain ownership of projects.
"""
organization = FlexibleForeignKey('sentry.Organization')
slug = models.SlugField()
name = models.CharField(max_length=64)
owner = FlexibleForeignKey(settings.AUTH_USER_MODEL)
status = BoundedPositiveIntegerField(choices=(
(TeamStatus.VISIBLE, _('Active')),
(TeamStatus.PENDING_DELETION, _('Pending Deletion')),
(TeamStatus.DELETION_IN_PROGRESS, _('Deletion in Progress')),
), default=TeamStatus.VISIBLE)
date_added = models.DateTimeField(default=timezone.now, null=True)
objects = TeamManager(cache_fields=(
'pk',
'slug',
))
class Meta:
app_label = 'sentry'
db_table = 'sentry_team'
unique_together = (('organization', 'slug'),)
__repr__ = sane_repr('slug', 'owner_id', 'name')
def __unicode__(self):
return u'%s (%s)' % (self.name, self.slug)
def save(self, *args, **kwargs):
if not self.slug:
slugify_instance(self, self.name, organization=self.organization)
super(Team, self).save(*args, **kwargs)
def get_absolute_url(self):
return absolute_uri(reverse('sentry-team-dashboard', args=[
self.organization.slug,
self.slug,
]))
def get_owner_name(self):
if not self.owner:
return None
if self.owner.first_name:
return self.owner.first_name
if self.owner.email:
return self.owner.email.split('@', 1)[0]
return self.owner.username
@property
def member_set(self):
return self.organization.member_set.filter(
Q(teams=self) | Q(has_global_access=True),
user__is_active=True,
)
def has_access(self, user, access=None):
queryset = self.member_set.filter(user=user)
if access is not None:
queryset = queryset.filter(type__lte=access)
return queryset.exists()
def get_audit_log_data(self):
return {
'slug': self.slug,
'name': self.name,
'status': self.status,
} | 0.474144 | 0.068226 |
from unittest import mock
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.contrib.sessions.backends.base import SessionBase
from django.http import HttpRequest, HttpResponse
from utm_tracker.middleware import LeadSourceMiddleware, UtmSessionMiddleware
from utm_tracker.session import SESSION_KEY_UTM_PARAMS
User = get_user_model()
class TestUtmSessionMiddleware:
@mock.patch("utm_tracker.middleware.parse_qs")
def test_middleware(self, mock_utm):
request = mock.Mock(spec=HttpRequest)
request.session = SessionBase()
mock_utm.return_value = {
"utm_medium": "medium",
"utm_source": "source",
"utm_campaign": "campaign",
"utm_term": "term",
"utm_content": "content",
"gclid": "1C5CHFA_enGB874GB874",
"aclk": "2C5CHFA_enGB874GB874",
"msclkid": "3C5CHFA_enGB874GB874",
"twclid": "4C5CHFA_enGB874GB874",
"fbclid": "5C5CHFA_enGB874GB874",
}
middleware = UtmSessionMiddleware(lambda r: HttpResponse())
middleware(request)
assert len(request.session[SESSION_KEY_UTM_PARAMS]) == 1
utm_params = request.session[SESSION_KEY_UTM_PARAMS][0]
assert utm_params["utm_medium"] == "medium"
assert utm_params["utm_source"] == "source"
assert utm_params["utm_campaign"] == "campaign"
assert utm_params["utm_term"] == "term"
assert utm_params["utm_content"] == "content"
assert utm_params["gclid"] == "1C5CHFA_enGB874GB874"
assert utm_params["aclk"] == "2C5CHFA_enGB874GB874"
assert utm_params["msclkid"] == "3C5CHFA_enGB874GB874"
assert utm_params["twclid"] == "4C5CHFA_enGB874GB874"
assert utm_params["fbclid"] == "5C5CHFA_enGB874GB874"
@mock.patch("utm_tracker.middleware.parse_qs")
def test_middleware__no_params(self, mock_utm):
request = mock.Mock(spec=HttpRequest)
request.session = SessionBase()
mock_utm.return_value = {}
middleware = UtmSessionMiddleware(lambda r: HttpResponse())
middleware(request)
assert SESSION_KEY_UTM_PARAMS not in request.session
class TestLeadSourceMiddleware:
@mock.patch("utm_tracker.middleware.dump_utm_params")
def test_middleware__unauthenticated(self, mock_flush):
request = mock.Mock(spec=HttpRequest, user=AnonymousUser())
assert not request.user.is_authenticated
middleware = LeadSourceMiddleware(lambda r: HttpResponse())
middleware(request)
assert mock_flush.call_count == 0
@mock.patch("utm_tracker.middleware.dump_utm_params")
def test_middleware__authenticated(self, mock_flush):
session = mock.Mock(SessionBase)
request = mock.Mock(spec=HttpRequest, user=User(), session=session)
middleware = LeadSourceMiddleware(lambda r: HttpResponse())
middleware(request)
assert mock_flush.call_count == 1
mock_flush.assert_called_once_with(request.user, session)
@mock.patch("utm_tracker.middleware.dump_utm_params")
def test_middleware__error(self, mock_flush):
session = mock.Mock(SessionBase)
request = mock.Mock(spec=HttpRequest, user=User(), session=session)
mock_flush.side_effect = Exception("Panic")
middleware = LeadSourceMiddleware(lambda r: HttpResponse())
middleware(request)
assert mock_flush.call_count == 1 | tests/test_middleware.py | from unittest import mock
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.contrib.sessions.backends.base import SessionBase
from django.http import HttpRequest, HttpResponse
from utm_tracker.middleware import LeadSourceMiddleware, UtmSessionMiddleware
from utm_tracker.session import SESSION_KEY_UTM_PARAMS
User = get_user_model()
class TestUtmSessionMiddleware:
@mock.patch("utm_tracker.middleware.parse_qs")
def test_middleware(self, mock_utm):
request = mock.Mock(spec=HttpRequest)
request.session = SessionBase()
mock_utm.return_value = {
"utm_medium": "medium",
"utm_source": "source",
"utm_campaign": "campaign",
"utm_term": "term",
"utm_content": "content",
"gclid": "1C5CHFA_enGB874GB874",
"aclk": "2C5CHFA_enGB874GB874",
"msclkid": "3C5CHFA_enGB874GB874",
"twclid": "4C5CHFA_enGB874GB874",
"fbclid": "5C5CHFA_enGB874GB874",
}
middleware = UtmSessionMiddleware(lambda r: HttpResponse())
middleware(request)
assert len(request.session[SESSION_KEY_UTM_PARAMS]) == 1
utm_params = request.session[SESSION_KEY_UTM_PARAMS][0]
assert utm_params["utm_medium"] == "medium"
assert utm_params["utm_source"] == "source"
assert utm_params["utm_campaign"] == "campaign"
assert utm_params["utm_term"] == "term"
assert utm_params["utm_content"] == "content"
assert utm_params["gclid"] == "1C5CHFA_enGB874GB874"
assert utm_params["aclk"] == "2C5CHFA_enGB874GB874"
assert utm_params["msclkid"] == "3C5CHFA_enGB874GB874"
assert utm_params["twclid"] == "4C5CHFA_enGB874GB874"
assert utm_params["fbclid"] == "5C5CHFA_enGB874GB874"
@mock.patch("utm_tracker.middleware.parse_qs")
def test_middleware__no_params(self, mock_utm):
request = mock.Mock(spec=HttpRequest)
request.session = SessionBase()
mock_utm.return_value = {}
middleware = UtmSessionMiddleware(lambda r: HttpResponse())
middleware(request)
assert SESSION_KEY_UTM_PARAMS not in request.session
class TestLeadSourceMiddleware:
@mock.patch("utm_tracker.middleware.dump_utm_params")
def test_middleware__unauthenticated(self, mock_flush):
request = mock.Mock(spec=HttpRequest, user=AnonymousUser())
assert not request.user.is_authenticated
middleware = LeadSourceMiddleware(lambda r: HttpResponse())
middleware(request)
assert mock_flush.call_count == 0
@mock.patch("utm_tracker.middleware.dump_utm_params")
def test_middleware__authenticated(self, mock_flush):
session = mock.Mock(SessionBase)
request = mock.Mock(spec=HttpRequest, user=User(), session=session)
middleware = LeadSourceMiddleware(lambda r: HttpResponse())
middleware(request)
assert mock_flush.call_count == 1
mock_flush.assert_called_once_with(request.user, session)
@mock.patch("utm_tracker.middleware.dump_utm_params")
def test_middleware__error(self, mock_flush):
session = mock.Mock(SessionBase)
request = mock.Mock(spec=HttpRequest, user=User(), session=session)
mock_flush.side_effect = Exception("Panic")
middleware = LeadSourceMiddleware(lambda r: HttpResponse())
middleware(request)
assert mock_flush.call_count == 1 | 0.646237 | 0.182936 |
import torch
from PIL import Image
from torchvision import transforms
from .pose_resnet import *
from operator import itemgetter
import copy
import matplotlib.pyplot as plt
import cv2
import numpy as np
import time
get_detached = lambda x: copy.deepcopy(x.cpu().detach().numpy())
get_keypoints = lambda pose_layers: map(itemgetter(1, 3), [cv2.minMaxLoc(pose_layer) for pose_layer in pose_layers])
JOINTS = ['r-ankle', 'r-knee', 'r-hip', 'l-hip',
'l-knee', 'l-ankle', 'pelvis', 'thorax',
'upper-neck', 'head-top', 'r-wrist', 'r-elbow',
'r-shoulder', 'l-shoulder', 'l-elbow', 'l-wrist']
POSE_PAIRS = [
# UPPER BODY
[9, 8],
[8, 7],
[7, 3],
[7, 2],
# LOWER BODY
[6, 2],
[2, 1],
[1, 0],
[6, 3],
[3, 4],
[4, 5],
# ARMS
[8, 12],
[12, 11],
[11, 10],
[8, 13],
[13, 14],
[14, 15]
]
POSE_PAIRS_COL = [
# UPPER BODY
(65,190,115), # Green
(50,55,235), # Red
(110,230,255), # Yellow
(195,125,40), # Blue
# LOWER BODY
(180,75,160), # Purple
(255,225,110), # Cyan
(65,190,115), # Green
(180,75,160), # Purple
(50,55,240), # Red
(195,125,40), # Blue
# ARMS
(180,75,160), # Purple
(110,230,255), # Yellow
(195,125,40), # Blue
(255,225,110), # Cyan
(50,55,240), # Red
(65,190,115) # Green
]
class HPEInference():
""" Docstring
"""
def __init__(self,cfg):
self.model = get_pose_net(cfg, is_train=False)
self.model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE, map_location=torch.device('cpu')))
self.IMAGE_SIZE = cfg.MODEL.IMAGE_SIZE
self.OUT_WIDTH,self.OUT_HEIGHT = cfg.MODEL.EXTRA.HEATMAP_SIZE
self.transform = transforms.Compose([
transforms.Resize(self.IMAGE_SIZE),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
])
def gen_output(self,img):
tr_img = self.transform(img)
output = self.model(tr_img.unsqueeze(0))
output = output.squeeze(0)
_, OUT_HEIGHT, OUT_WIDTH = output.shape
print(output.shape)
return output
def heat_map(self,img):
since = time.time()
plt.figure(figsize=(15, 15))
output = self.gen_output(img)
for idx, pose_layer in enumerate(get_detached(output)):
# print(pose_layer.shape)
plt.subplot(4, 4, idx + 1)
plt.title(f'{idx} - {JOINTS[idx]}')
plt.imshow(img.resize((self.OUT_WIDTH, self.OUT_HEIGHT)), cmap='gray', interpolation='bicubic')
plt.imshow(pose_layer, alpha=0.5, cmap='jet', interpolation='bicubic')
plt.axis('off')
plt.show()
time_elapsed = time.time() - since
print('Inference complete in {:4.2f}ms'.format(
time_elapsed*1000))
def vis_pose(self,img,threshold = 0.5):
since = time.time()
output = self.gen_output(img)
THRESHOLD = threshold
image_p = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
OUT_SHAPE = (self.OUT_HEIGHT, self.OUT_WIDTH)
IMG_HEIGHT, IMG_WIDTH, _ = image_p.shape
scale_x = IMG_WIDTH / OUT_SHAPE[0]
scale_y = IMG_HEIGHT / OUT_SHAPE[1]
pose_layers = get_detached(x=output)
key_points = list(get_keypoints(pose_layers=pose_layers))
key_points = [(thres,(int(x*scale_x),int(y*scale_y))) for thres,(x,y) in key_points]
i=0
for from_j, to_j in POSE_PAIRS:
from_thr, (from_x_j, from_y_j) = key_points[from_j]
to_thr, (to_x_j, to_y_j) = key_points[to_j]
if from_thr > THRESHOLD and to_thr > THRESHOLD:
# this is a joint connection, plot a line
cv2.line(image_p, (from_x_j, from_y_j), (to_x_j, to_y_j), POSE_PAIRS_COL[i], 3)
i+=1
for thres,(x,y) in key_points:
if thres > THRESHOLD:
# this is a joint
cv2.ellipse(image_p, (x, y), (5, 5), 0, 0, 360, (255, 255, 255), cv2.FILLED)
time_elapsed = time.time() - since
print('Inference complete in {:4.2f}ms'.format(
time_elapsed*1000))
return Image.fromarray(cv2.cvtColor(image_p, cv2.COLOR_RGB2BGR))
def export_onnx_model(self, model_name = "simple_pose_estimation.onnx",quantization = False):
torch_model = copy.deepcopy(self.model)
batch_size = 1
rand_inp = torch.randn(batch_size, 3, *self.IMAGE_SIZE, requires_grad=True)
# Export the model
torch.onnx.export(torch_model, # model being run
rand_inp, # model input (or a tuple for multiple inputs)
model_name, # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names = ['input'], # the model's input names
output_names = ['output'], # the model's output names
dynamic_axes={'input' : {0 : 'batch_size'}, # variable lenght axes
'output' : {0 : 'batch_size'}})
if quantization:
import onnx
from onnxruntime.quantization import quantize
onnx_model = onnx.load(model_name)
quantized_model = quantize(onnx_model)
onnx.save(quantized_model, model_name.replace(".onnx",".8bit_quantized.onnx")) | S5_HumanPoseEstimation/src/inference.py | import torch
from PIL import Image
from torchvision import transforms
from .pose_resnet import *
from operator import itemgetter
import copy
import matplotlib.pyplot as plt
import cv2
import numpy as np
import time
get_detached = lambda x: copy.deepcopy(x.cpu().detach().numpy())
get_keypoints = lambda pose_layers: map(itemgetter(1, 3), [cv2.minMaxLoc(pose_layer) for pose_layer in pose_layers])
JOINTS = ['r-ankle', 'r-knee', 'r-hip', 'l-hip',
'l-knee', 'l-ankle', 'pelvis', 'thorax',
'upper-neck', 'head-top', 'r-wrist', 'r-elbow',
'r-shoulder', 'l-shoulder', 'l-elbow', 'l-wrist']
POSE_PAIRS = [
# UPPER BODY
[9, 8],
[8, 7],
[7, 3],
[7, 2],
# LOWER BODY
[6, 2],
[2, 1],
[1, 0],
[6, 3],
[3, 4],
[4, 5],
# ARMS
[8, 12],
[12, 11],
[11, 10],
[8, 13],
[13, 14],
[14, 15]
]
POSE_PAIRS_COL = [
# UPPER BODY
(65,190,115), # Green
(50,55,235), # Red
(110,230,255), # Yellow
(195,125,40), # Blue
# LOWER BODY
(180,75,160), # Purple
(255,225,110), # Cyan
(65,190,115), # Green
(180,75,160), # Purple
(50,55,240), # Red
(195,125,40), # Blue
# ARMS
(180,75,160), # Purple
(110,230,255), # Yellow
(195,125,40), # Blue
(255,225,110), # Cyan
(50,55,240), # Red
(65,190,115) # Green
]
class HPEInference():
""" Docstring
"""
def __init__(self,cfg):
self.model = get_pose_net(cfg, is_train=False)
self.model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE, map_location=torch.device('cpu')))
self.IMAGE_SIZE = cfg.MODEL.IMAGE_SIZE
self.OUT_WIDTH,self.OUT_HEIGHT = cfg.MODEL.EXTRA.HEATMAP_SIZE
self.transform = transforms.Compose([
transforms.Resize(self.IMAGE_SIZE),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
])
def gen_output(self,img):
tr_img = self.transform(img)
output = self.model(tr_img.unsqueeze(0))
output = output.squeeze(0)
_, OUT_HEIGHT, OUT_WIDTH = output.shape
print(output.shape)
return output
def heat_map(self,img):
since = time.time()
plt.figure(figsize=(15, 15))
output = self.gen_output(img)
for idx, pose_layer in enumerate(get_detached(output)):
# print(pose_layer.shape)
plt.subplot(4, 4, idx + 1)
plt.title(f'{idx} - {JOINTS[idx]}')
plt.imshow(img.resize((self.OUT_WIDTH, self.OUT_HEIGHT)), cmap='gray', interpolation='bicubic')
plt.imshow(pose_layer, alpha=0.5, cmap='jet', interpolation='bicubic')
plt.axis('off')
plt.show()
time_elapsed = time.time() - since
print('Inference complete in {:4.2f}ms'.format(
time_elapsed*1000))
def vis_pose(self,img,threshold = 0.5):
since = time.time()
output = self.gen_output(img)
THRESHOLD = threshold
image_p = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
OUT_SHAPE = (self.OUT_HEIGHT, self.OUT_WIDTH)
IMG_HEIGHT, IMG_WIDTH, _ = image_p.shape
scale_x = IMG_WIDTH / OUT_SHAPE[0]
scale_y = IMG_HEIGHT / OUT_SHAPE[1]
pose_layers = get_detached(x=output)
key_points = list(get_keypoints(pose_layers=pose_layers))
key_points = [(thres,(int(x*scale_x),int(y*scale_y))) for thres,(x,y) in key_points]
i=0
for from_j, to_j in POSE_PAIRS:
from_thr, (from_x_j, from_y_j) = key_points[from_j]
to_thr, (to_x_j, to_y_j) = key_points[to_j]
if from_thr > THRESHOLD and to_thr > THRESHOLD:
# this is a joint connection, plot a line
cv2.line(image_p, (from_x_j, from_y_j), (to_x_j, to_y_j), POSE_PAIRS_COL[i], 3)
i+=1
for thres,(x,y) in key_points:
if thres > THRESHOLD:
# this is a joint
cv2.ellipse(image_p, (x, y), (5, 5), 0, 0, 360, (255, 255, 255), cv2.FILLED)
time_elapsed = time.time() - since
print('Inference complete in {:4.2f}ms'.format(
time_elapsed*1000))
return Image.fromarray(cv2.cvtColor(image_p, cv2.COLOR_RGB2BGR))
def export_onnx_model(self, model_name = "simple_pose_estimation.onnx",quantization = False):
torch_model = copy.deepcopy(self.model)
batch_size = 1
rand_inp = torch.randn(batch_size, 3, *self.IMAGE_SIZE, requires_grad=True)
# Export the model
torch.onnx.export(torch_model, # model being run
rand_inp, # model input (or a tuple for multiple inputs)
model_name, # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names = ['input'], # the model's input names
output_names = ['output'], # the model's output names
dynamic_axes={'input' : {0 : 'batch_size'}, # variable lenght axes
'output' : {0 : 'batch_size'}})
if quantization:
import onnx
from onnxruntime.quantization import quantize
onnx_model = onnx.load(model_name)
quantized_model = quantize(onnx_model)
onnx.save(quantized_model, model_name.replace(".onnx",".8bit_quantized.onnx")) | 0.490236 | 0.322419 |
from sklearn.tree import DecisionTreeRegressor as SKDecisionTreeRegressor
from skopt.space import Integer
from blocktorch.model_family import ModelFamily
from blocktorch.pipelines.components.estimators import Estimator
from blocktorch.problem_types import ProblemTypes
from .blockwise_voting_regressor import BlockwiseVotingRegressor
class DecisionTreeRegressor(Estimator):
"""Decision Tree Regressor.
Args:
criterion ({"mse", "friedman_mse", "mae", "poisson"}): The function to measure the quality of a split.
Supported criteria are:
- "mse" for the mean squared error, which is equal to variance reduction as feature selection criterion and minimizes the L2 loss using the mean of each terminal node
- "friedman_mse", which uses mean squared error with Friedman"s improvement score for potential splits
- "mae" for the mean absolute error, which minimizes the L1 loss using the median of each terminal node,
- "poisson" which uses reduction in Poisson deviance to find splits.
max_features (int, float or {"auto", "sqrt", "log2"}): The number of features to consider when looking for the best split:
- If int, then consider max_features features at each split.
- If float, then max_features is a fraction and int(max_features * n_features) features are considered at each split.
- If "auto", then max_features=sqrt(n_features).
- If "sqrt", then max_features=sqrt(n_features).
- If "log2", then max_features=log2(n_features).
- If None, then max_features = n_features.
The search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than max_features features.
max_depth (int): The maximum depth of the tree. Defaults to 6.
min_samples_split (int or float): The minimum number of samples required to split an internal node:
- If int, then consider min_samples_split as the minimum number.
- If float, then min_samples_split is a fraction and ceil(min_samples_split * n_samples) are the minimum number of samples for each split.
Defaults to 2.
min_weight_fraction_leaf (float): The minimum weighted fraction of the sum total of weights
(of all the input samples) required to be at a leaf node. Defaults to 0.0.
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
name = "Decision Tree Regressor"
hyperparameter_ranges = {
"criterion": ["mse", "friedman_mse", "mae"],
"max_features": ["auto", "sqrt", "log2"],
"max_depth": Integer(4, 10),
}
"""{
"criterion": ["mse", "friedman_mse", "mae"],
"max_features": ["auto", "sqrt", "log2"],
"max_depth": Integer(4, 10),
}"""
model_family = ModelFamily.DECISION_TREE
"""ModelFamily.DECISION_TREE"""
supported_problem_types = [
ProblemTypes.REGRESSION,
ProblemTypes.TIME_SERIES_REGRESSION,
]
"""[
ProblemTypes.REGRESSION,
ProblemTypes.TIME_SERIES_REGRESSION,
]"""
def __init__(
self,
criterion="mse",
max_features="auto",
max_depth=6,
min_samples_split=2,
min_weight_fraction_leaf=0.0,
random_seed=0,
**kwargs,
):
parameters = {
"criterion": criterion,
"max_features": max_features,
"max_depth": max_depth,
"min_samples_split": min_samples_split,
"min_weight_fraction_leaf": min_weight_fraction_leaf,
}
parameters.update(kwargs)
dt_regressor = SKDecisionTreeRegressor(random_state=random_seed, **parameters)
dt_regressor = BlockwiseVotingRegressor(
dt_regressor,
)
super().__init__(
parameters=parameters, component_obj=dt_regressor, random_seed=random_seed
) | ml_source/src/blocktorch/blocktorch/pipelines/components/estimators/regressors/decision_tree_regressor.py | from sklearn.tree import DecisionTreeRegressor as SKDecisionTreeRegressor
from skopt.space import Integer
from blocktorch.model_family import ModelFamily
from blocktorch.pipelines.components.estimators import Estimator
from blocktorch.problem_types import ProblemTypes
from .blockwise_voting_regressor import BlockwiseVotingRegressor
class DecisionTreeRegressor(Estimator):
"""Decision Tree Regressor.
Args:
criterion ({"mse", "friedman_mse", "mae", "poisson"}): The function to measure the quality of a split.
Supported criteria are:
- "mse" for the mean squared error, which is equal to variance reduction as feature selection criterion and minimizes the L2 loss using the mean of each terminal node
- "friedman_mse", which uses mean squared error with Friedman"s improvement score for potential splits
- "mae" for the mean absolute error, which minimizes the L1 loss using the median of each terminal node,
- "poisson" which uses reduction in Poisson deviance to find splits.
max_features (int, float or {"auto", "sqrt", "log2"}): The number of features to consider when looking for the best split:
- If int, then consider max_features features at each split.
- If float, then max_features is a fraction and int(max_features * n_features) features are considered at each split.
- If "auto", then max_features=sqrt(n_features).
- If "sqrt", then max_features=sqrt(n_features).
- If "log2", then max_features=log2(n_features).
- If None, then max_features = n_features.
The search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than max_features features.
max_depth (int): The maximum depth of the tree. Defaults to 6.
min_samples_split (int or float): The minimum number of samples required to split an internal node:
- If int, then consider min_samples_split as the minimum number.
- If float, then min_samples_split is a fraction and ceil(min_samples_split * n_samples) are the minimum number of samples for each split.
Defaults to 2.
min_weight_fraction_leaf (float): The minimum weighted fraction of the sum total of weights
(of all the input samples) required to be at a leaf node. Defaults to 0.0.
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
name = "Decision Tree Regressor"
hyperparameter_ranges = {
"criterion": ["mse", "friedman_mse", "mae"],
"max_features": ["auto", "sqrt", "log2"],
"max_depth": Integer(4, 10),
}
"""{
"criterion": ["mse", "friedman_mse", "mae"],
"max_features": ["auto", "sqrt", "log2"],
"max_depth": Integer(4, 10),
}"""
model_family = ModelFamily.DECISION_TREE
"""ModelFamily.DECISION_TREE"""
supported_problem_types = [
ProblemTypes.REGRESSION,
ProblemTypes.TIME_SERIES_REGRESSION,
]
"""[
ProblemTypes.REGRESSION,
ProblemTypes.TIME_SERIES_REGRESSION,
]"""
def __init__(
self,
criterion="mse",
max_features="auto",
max_depth=6,
min_samples_split=2,
min_weight_fraction_leaf=0.0,
random_seed=0,
**kwargs,
):
parameters = {
"criterion": criterion,
"max_features": max_features,
"max_depth": max_depth,
"min_samples_split": min_samples_split,
"min_weight_fraction_leaf": min_weight_fraction_leaf,
}
parameters.update(kwargs)
dt_regressor = SKDecisionTreeRegressor(random_state=random_seed, **parameters)
dt_regressor = BlockwiseVotingRegressor(
dt_regressor,
)
super().__init__(
parameters=parameters, component_obj=dt_regressor, random_seed=random_seed
) | 0.961633 | 0.773388 |
"""Compute minibatch blobs for training a Fast R-CNN network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.random as npr
from scipy.misc import imread
from model.utils.config import cfg
from model.utils.blob import prep_im_for_blob, im_list_to_blob
import pdb
def get_minibatch(roidb, num_classes, target_size):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, target_size)
blobs = {'data': im_blob}
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
if cfg.TRAIN.USE_ALL_GT:
# Include all ground truth boxes
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
else:
# For the COCO ground truth boxes, exclude the ones that are ''iscrowd''
gt_inds = np.where((roidb[0]['gt_classes'] != 0) & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
blobs['gt_boxes'] = gt_boxes
blobs['im_info'] = np.array(
[[im_blob.shape[1], im_blob.shape[2], im_scales[0]]],
dtype=np.float32)
blobs['img_id'] = roidb[0]['img_id']
return blobs
def _get_image_blob(roidb, target_size):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in range(num_images):
#im = cv2.imread(roidb[i]['image'])
im = imread(roidb[i]['image'])
if len(im.shape) == 2:
im = im[:,:,np.newaxis]
im = np.concatenate((im,im,im), axis=2)
# flip the channel, since the original one using cv2
# rgb -> bgr
im = im[:,:,::-1]
if roidb[i]['flipped']:
im = im[:, ::-1, :]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size[i],
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales | lib/roi_data_layer/minibatch.py |
"""Compute minibatch blobs for training a Fast R-CNN network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.random as npr
from scipy.misc import imread
from model.utils.config import cfg
from model.utils.blob import prep_im_for_blob, im_list_to_blob
import pdb
def get_minibatch(roidb, num_classes, target_size):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, target_size)
blobs = {'data': im_blob}
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
if cfg.TRAIN.USE_ALL_GT:
# Include all ground truth boxes
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
else:
# For the COCO ground truth boxes, exclude the ones that are ''iscrowd''
gt_inds = np.where((roidb[0]['gt_classes'] != 0) & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
blobs['gt_boxes'] = gt_boxes
blobs['im_info'] = np.array(
[[im_blob.shape[1], im_blob.shape[2], im_scales[0]]],
dtype=np.float32)
blobs['img_id'] = roidb[0]['img_id']
return blobs
def _get_image_blob(roidb, target_size):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in range(num_images):
#im = cv2.imread(roidb[i]['image'])
im = imread(roidb[i]['image'])
if len(im.shape) == 2:
im = im[:,:,np.newaxis]
im = np.concatenate((im,im,im), axis=2)
# flip the channel, since the original one using cv2
# rgb -> bgr
im = im[:,:,::-1]
if roidb[i]['flipped']:
im = im[:, ::-1, :]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size[i],
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales | 0.877765 | 0.641338 |
# Standard library imports
from collections import OrderedDict
import os
import sys
from typing import Union, Optional, Tuple, List, Dict
import uuid
# Third part imports
from qtpy.QtCore import QEvent, QObject, QSize, Qt
from qtpy.QtWidgets import (
QAction, QProxyStyle, QStyle, QToolBar, QToolButton, QWidget)
# Local imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.translations import get_translation
from spyder.utils.icon_manager import ima
from spyder.utils.qthelpers import SpyderAction
from spyder.utils.stylesheet import (
APP_TOOLBAR_STYLESHEET, PANES_TOOLBAR_STYLESHEET)
# Translations
_ = get_translation('spyder')
# Generic type annotations
ToolbarItem = Union[SpyderAction, QWidget]
ToolbarItemEntry = Tuple[ToolbarItem, Optional[str], Optional[str],
Optional[str]]
# ---- Constants
# ----------------------------------------------------------------------------
class ToolbarLocation:
Top = Qt.TopToolBarArea
Bottom = Qt.BottomToolBarArea
# ---- Event filters
# ----------------------------------------------------------------------------
class ToolTipFilter(QObject):
"""
Filter tool tip events on toolbuttons.
"""
def eventFilter(self, obj, event):
event_type = event.type()
action = obj.defaultAction() if isinstance(obj, QToolButton) else None
if event_type == QEvent.ToolTip and action is not None:
if action.tip is None:
return action.text_beside_icon
return QObject.eventFilter(self, obj, event)
# ---- Styles
# ----------------------------------------------------------------------------
class ToolbarStyle(QProxyStyle):
# The toolbar type. This can be 'Application' or 'MainWidget'
TYPE = None
def pixelMetric(self, pm, option, widget):
"""
Adjust size of toolbar extension button (in pixels).
From https://stackoverflow.com/a/27042352/438386
"""
# Important: These values need to be updated in case we change the size
# of our toolbar buttons in utils/stylesheet.py. That's because Qt only
# allow to set them in pixels here, not em's.
if pm == QStyle.PM_ToolBarExtensionExtent:
if self.TYPE == 'Application':
if os.name == 'nt':
return 40
elif sys.platform == 'darwin':
return 54
else:
return 57
elif self.TYPE == 'MainWidget':
if os.name == 'nt':
return 36
elif sys.platform == 'darwin':
return 42
else:
return 44
else:
print("Unknown toolbar style type") # spyder: test-skip
return super().pixelMetric(pm, option, widget)
# ---- Toolbars
# ----------------------------------------------------------------------------
class SpyderToolbar(QToolBar):
"""
Spyder Toolbar.
This class provides toolbars with some predefined functionality.
"""
def __init__(self, parent, title):
super().__init__(parent=parent)
self._section_items = OrderedDict()
self._item_map = {} # type: Dict[str, ToolbarItem]
self._pending_items = {} # type: Dict[str, List[ToolbarItemEntry]]
self._title = title
self._default_section = "default_section"
self.setWindowTitle(title)
# Set icon for extension button.
# From https://stackoverflow.com/a/55412455/438386
ext_button = self.findChild(QToolButton, "qt_toolbar_ext_button")
ext_button.setIcon(ima.icon('toolbar_ext_button'))
ext_button.setToolTip(_("More"))
def add_item(self, action_or_widget: ToolbarItem,
section: Optional[str] = None, before: Optional[str] = None,
before_section: Optional[str] = None, omit_id: bool = False):
"""
Add action or widget item to given toolbar `section`.
Parameters
----------
item: SpyderAction or QWidget
The item to add to the `toolbar`.
toolbar_id: str or None
The application toolbar unique string identifier.
section: str or None
The section id in which to insert the `item` on the `toolbar`.
before: str or None
Make the item appear before another given item.
before_section: str or None
Make the item defined section appear before another given section
(must be already defined).
omit_id: bool
If True, then the toolbar will check if the item to add declares an
id, False otherwise. This flag exists only for items added on
Spyder 4 plugins. Default: False
"""
item_id = None
if (isinstance(action_or_widget, SpyderAction) or
hasattr(action_or_widget, 'action_id')):
item_id = action_or_widget.action_id
elif hasattr(action_or_widget, 'ID'):
item_id = action_or_widget.ID
if not omit_id and item_id is None and action_or_widget is not None:
raise SpyderAPIError(
f'Item {action_or_widget} must declare an ID attribute.')
if before is not None:
if before not in self._item_map:
before_pending_items = self._pending_items.get(before, [])
before_pending_items.append(
(action_or_widget, section, before, before_section))
self._pending_items[before] = before_pending_items
return
else:
before = self._item_map[before]
if section is None:
section = self._default_section
action_or_widget._section = section
if before is not None:
if section == self._default_section:
action_or_widget._section = before._section
section = before._section
if section not in self._section_items:
self._section_items[section] = [action_or_widget]
else:
if before is not None:
new_actions_or_widgets = []
for act_or_wid in self._section_items[section]:
if act_or_wid == before:
new_actions_or_widgets.append(action_or_widget)
new_actions_or_widgets.append(act_or_wid)
self._section_items[section] = new_actions_or_widgets
else:
self._section_items[section].append(action_or_widget)
if (before_section is not None and
before_section in self._section_items):
new_sections_keys = []
for sec in self._section_items.keys():
if sec == before_section:
new_sections_keys.append(section)
if sec != section:
new_sections_keys.append(sec)
self._section_items = OrderedDict(
(section_key, self._section_items[section_key])
for section_key in new_sections_keys)
if item_id is not None:
self._item_map[item_id] = action_or_widget
if item_id in self._pending_items:
item_pending = self._pending_items.pop(item_id)
for item, section, before, before_section in item_pending:
self.add_item(item, section=section, before=before,
before_section=before_section)
def remove_item(self, item_id: str):
"""Remove action or widget from toolbar by id."""
item = self._item_map.pop(item_id)
for section in list(self._section_items.keys()):
section_items = self._section_items[section]
if item in section_items:
section_items.remove(item)
if len(section_items) == 0:
self._section_items.pop(section)
self.clear()
self._render()
def _render(self):
"""
Create the toolbar taking into account sections and locations.
This method is called once on widget setup.
"""
sec_items = []
for sec, items in self._section_items.items():
for item in items:
sec_items.append([sec, item])
sep = QAction(self)
sep.setSeparator(True)
sec_items.append((None, sep))
if sec_items:
sec_items.pop()
for (sec, item) in sec_items:
if isinstance(item, QAction):
add_method = super().addAction
else:
add_method = super().addWidget
add_method(item)
if isinstance(item, QAction):
text_beside_icon = getattr(item, 'text_beside_icon', False)
widget = self.widgetForAction(item)
if text_beside_icon:
widget.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
if item.isCheckable():
widget.setCheckable(True)
class ApplicationToolbar(SpyderToolbar):
"""
Spyder Main application Toolbar.
"""
ID = None
"""
Unique string toolbar identifier.
This is used by Qt to be able to save and restore the state of widgets.
"""
def __init__(self, parent, title):
super().__init__(parent=parent, title=title)
self._style = ToolbarStyle(None)
self._style.TYPE = 'Application'
self._style.setParent(self)
self.setStyle(self._style)
self.setStyleSheet(str(APP_TOOLBAR_STYLESHEET))
class MainWidgetToolbar(SpyderToolbar):
"""
Spyder Widget toolbar class.
A toolbar used in Spyder dockable plugins to add internal toolbars
to their interface.
"""
ID = None
"""
Unique string toolbar identifier.
"""
def __init__(self, parent=None, title=None):
super().__init__(parent, title=title or '')
self._icon_size = QSize(16, 16)
# Setup
self.setObjectName("main_widget_toolbar_{}".format(
str(uuid.uuid4())[:8]))
self.setFloatable(False)
self.setMovable(False)
self.setContextMenuPolicy(Qt.PreventContextMenu)
self.setIconSize(self._icon_size)
self._style = ToolbarStyle(None)
self._style.TYPE = 'MainWidget'
self._style.setParent(self)
self.setStyle(self._style)
self.setStyleSheet(str(PANES_TOOLBAR_STYLESHEET))
self._filter = ToolTipFilter()
def set_icon_size(self, icon_size):
self._icon_size = icon_size
self.setIconSize(icon_size)
def _render(self):
"""
Create the toolbar taking into account the sections and locations.
This method is called once on widget setup.
"""
sec_items = []
for sec, items in self._section_items.items():
for item in items:
sec_items.append([sec, item])
sep = QAction(self)
sep.setSeparator(True)
sec_items.append((None, sep))
if sec_items:
sec_items.pop()
for (sec, item) in sec_items:
if isinstance(item, QAction):
add_method = super().addAction
else:
add_method = super().addWidget
add_method(item)
if isinstance(item, QAction):
widget = self.widgetForAction(item)
widget.installEventFilter(self._filter)
text_beside_icon = getattr(item, 'text_beside_icon', False)
if text_beside_icon:
widget.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
if item.isCheckable():
widget.setCheckable(True) | spyder/api/widgets/toolbars.py | # Standard library imports
from collections import OrderedDict
import os
import sys
from typing import Union, Optional, Tuple, List, Dict
import uuid
# Third part imports
from qtpy.QtCore import QEvent, QObject, QSize, Qt
from qtpy.QtWidgets import (
QAction, QProxyStyle, QStyle, QToolBar, QToolButton, QWidget)
# Local imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.translations import get_translation
from spyder.utils.icon_manager import ima
from spyder.utils.qthelpers import SpyderAction
from spyder.utils.stylesheet import (
APP_TOOLBAR_STYLESHEET, PANES_TOOLBAR_STYLESHEET)
# Translations
_ = get_translation('spyder')
# Generic type annotations
ToolbarItem = Union[SpyderAction, QWidget]
ToolbarItemEntry = Tuple[ToolbarItem, Optional[str], Optional[str],
Optional[str]]
# ---- Constants
# ----------------------------------------------------------------------------
class ToolbarLocation:
Top = Qt.TopToolBarArea
Bottom = Qt.BottomToolBarArea
# ---- Event filters
# ----------------------------------------------------------------------------
class ToolTipFilter(QObject):
"""
Filter tool tip events on toolbuttons.
"""
def eventFilter(self, obj, event):
event_type = event.type()
action = obj.defaultAction() if isinstance(obj, QToolButton) else None
if event_type == QEvent.ToolTip and action is not None:
if action.tip is None:
return action.text_beside_icon
return QObject.eventFilter(self, obj, event)
# ---- Styles
# ----------------------------------------------------------------------------
class ToolbarStyle(QProxyStyle):
# The toolbar type. This can be 'Application' or 'MainWidget'
TYPE = None
def pixelMetric(self, pm, option, widget):
"""
Adjust size of toolbar extension button (in pixels).
From https://stackoverflow.com/a/27042352/438386
"""
# Important: These values need to be updated in case we change the size
# of our toolbar buttons in utils/stylesheet.py. That's because Qt only
# allow to set them in pixels here, not em's.
if pm == QStyle.PM_ToolBarExtensionExtent:
if self.TYPE == 'Application':
if os.name == 'nt':
return 40
elif sys.platform == 'darwin':
return 54
else:
return 57
elif self.TYPE == 'MainWidget':
if os.name == 'nt':
return 36
elif sys.platform == 'darwin':
return 42
else:
return 44
else:
print("Unknown toolbar style type") # spyder: test-skip
return super().pixelMetric(pm, option, widget)
# ---- Toolbars
# ----------------------------------------------------------------------------
class SpyderToolbar(QToolBar):
"""
Spyder Toolbar.
This class provides toolbars with some predefined functionality.
"""
def __init__(self, parent, title):
super().__init__(parent=parent)
self._section_items = OrderedDict()
self._item_map = {} # type: Dict[str, ToolbarItem]
self._pending_items = {} # type: Dict[str, List[ToolbarItemEntry]]
self._title = title
self._default_section = "default_section"
self.setWindowTitle(title)
# Set icon for extension button.
# From https://stackoverflow.com/a/55412455/438386
ext_button = self.findChild(QToolButton, "qt_toolbar_ext_button")
ext_button.setIcon(ima.icon('toolbar_ext_button'))
ext_button.setToolTip(_("More"))
def add_item(self, action_or_widget: ToolbarItem,
section: Optional[str] = None, before: Optional[str] = None,
before_section: Optional[str] = None, omit_id: bool = False):
"""
Add action or widget item to given toolbar `section`.
Parameters
----------
item: SpyderAction or QWidget
The item to add to the `toolbar`.
toolbar_id: str or None
The application toolbar unique string identifier.
section: str or None
The section id in which to insert the `item` on the `toolbar`.
before: str or None
Make the item appear before another given item.
before_section: str or None
Make the item defined section appear before another given section
(must be already defined).
omit_id: bool
If True, then the toolbar will check if the item to add declares an
id, False otherwise. This flag exists only for items added on
Spyder 4 plugins. Default: False
"""
item_id = None
if (isinstance(action_or_widget, SpyderAction) or
hasattr(action_or_widget, 'action_id')):
item_id = action_or_widget.action_id
elif hasattr(action_or_widget, 'ID'):
item_id = action_or_widget.ID
if not omit_id and item_id is None and action_or_widget is not None:
raise SpyderAPIError(
f'Item {action_or_widget} must declare an ID attribute.')
if before is not None:
if before not in self._item_map:
before_pending_items = self._pending_items.get(before, [])
before_pending_items.append(
(action_or_widget, section, before, before_section))
self._pending_items[before] = before_pending_items
return
else:
before = self._item_map[before]
if section is None:
section = self._default_section
action_or_widget._section = section
if before is not None:
if section == self._default_section:
action_or_widget._section = before._section
section = before._section
if section not in self._section_items:
self._section_items[section] = [action_or_widget]
else:
if before is not None:
new_actions_or_widgets = []
for act_or_wid in self._section_items[section]:
if act_or_wid == before:
new_actions_or_widgets.append(action_or_widget)
new_actions_or_widgets.append(act_or_wid)
self._section_items[section] = new_actions_or_widgets
else:
self._section_items[section].append(action_or_widget)
if (before_section is not None and
before_section in self._section_items):
new_sections_keys = []
for sec in self._section_items.keys():
if sec == before_section:
new_sections_keys.append(section)
if sec != section:
new_sections_keys.append(sec)
self._section_items = OrderedDict(
(section_key, self._section_items[section_key])
for section_key in new_sections_keys)
if item_id is not None:
self._item_map[item_id] = action_or_widget
if item_id in self._pending_items:
item_pending = self._pending_items.pop(item_id)
for item, section, before, before_section in item_pending:
self.add_item(item, section=section, before=before,
before_section=before_section)
def remove_item(self, item_id: str):
"""Remove action or widget from toolbar by id."""
item = self._item_map.pop(item_id)
for section in list(self._section_items.keys()):
section_items = self._section_items[section]
if item in section_items:
section_items.remove(item)
if len(section_items) == 0:
self._section_items.pop(section)
self.clear()
self._render()
def _render(self):
"""
Create the toolbar taking into account sections and locations.
This method is called once on widget setup.
"""
sec_items = []
for sec, items in self._section_items.items():
for item in items:
sec_items.append([sec, item])
sep = QAction(self)
sep.setSeparator(True)
sec_items.append((None, sep))
if sec_items:
sec_items.pop()
for (sec, item) in sec_items:
if isinstance(item, QAction):
add_method = super().addAction
else:
add_method = super().addWidget
add_method(item)
if isinstance(item, QAction):
text_beside_icon = getattr(item, 'text_beside_icon', False)
widget = self.widgetForAction(item)
if text_beside_icon:
widget.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
if item.isCheckable():
widget.setCheckable(True)
class ApplicationToolbar(SpyderToolbar):
"""
Spyder Main application Toolbar.
"""
ID = None
"""
Unique string toolbar identifier.
This is used by Qt to be able to save and restore the state of widgets.
"""
def __init__(self, parent, title):
super().__init__(parent=parent, title=title)
self._style = ToolbarStyle(None)
self._style.TYPE = 'Application'
self._style.setParent(self)
self.setStyle(self._style)
self.setStyleSheet(str(APP_TOOLBAR_STYLESHEET))
class MainWidgetToolbar(SpyderToolbar):
"""
Spyder Widget toolbar class.
A toolbar used in Spyder dockable plugins to add internal toolbars
to their interface.
"""
ID = None
"""
Unique string toolbar identifier.
"""
def __init__(self, parent=None, title=None):
super().__init__(parent, title=title or '')
self._icon_size = QSize(16, 16)
# Setup
self.setObjectName("main_widget_toolbar_{}".format(
str(uuid.uuid4())[:8]))
self.setFloatable(False)
self.setMovable(False)
self.setContextMenuPolicy(Qt.PreventContextMenu)
self.setIconSize(self._icon_size)
self._style = ToolbarStyle(None)
self._style.TYPE = 'MainWidget'
self._style.setParent(self)
self.setStyle(self._style)
self.setStyleSheet(str(PANES_TOOLBAR_STYLESHEET))
self._filter = ToolTipFilter()
def set_icon_size(self, icon_size):
self._icon_size = icon_size
self.setIconSize(icon_size)
def _render(self):
"""
Create the toolbar taking into account the sections and locations.
This method is called once on widget setup.
"""
sec_items = []
for sec, items in self._section_items.items():
for item in items:
sec_items.append([sec, item])
sep = QAction(self)
sep.setSeparator(True)
sec_items.append((None, sep))
if sec_items:
sec_items.pop()
for (sec, item) in sec_items:
if isinstance(item, QAction):
add_method = super().addAction
else:
add_method = super().addWidget
add_method(item)
if isinstance(item, QAction):
widget = self.widgetForAction(item)
widget.installEventFilter(self._filter)
text_beside_icon = getattr(item, 'text_beside_icon', False)
if text_beside_icon:
widget.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
if item.isCheckable():
widget.setCheckable(True) | 0.677901 | 0.084041 |
windows = False
if windows:
directory = '//nas.sala.ubc.ca/ELabs/100_personal/nm/Databases/'
bca_dir = '//nas.sala.ubc.ca/ELabs/50_projects/16_PICS/07_BCA data/'
else:
directory = '/Volumes/Samsung_T5/Databases/'
bca_dir = '/Volumes/ELabs/50_projects/16_PICS/07_BCA data/'
modes = ['transit', 'bike', 'walk', 'drive']
radii = [1200, 400]
r_seeds = 6
trip_length = {'transit': 10.2, 'drive': 10.4, 'bike': 5.5, 'walk': 0.9}
ss_experiments = {
'e0': 2020,
# 'e1': 2030,
# 'e2': 2030,
'e3': 2040,
# 'e4': 2040,
'e5': 2040,
'e6': 2040,
# 'e7': 2050,
# 'e8': 2050
}
wb_experiments = {
'e0': 2020,
'e5': 2050,
'e6': 2050,
'e7': 2050,
}
hq_experiments = {
'e0': 2020,
'e1': 2040,
'e2': 2040,
'e3': 2040
}
experiments = {
# 'West Bowl': ['Prince George, British Columbia', wb_experiments],
# 'Hillside Quadra': ['Capital Regional District, British Columbia', hq_experiments],
'Sunset': ['Metro Vancouver, British Columbia', ss_experiments],
}
regions = {
'Canada': {
'British Columbia': [v[0].split(',')[0] for i, v in experiments.items()]
}
}
network_layers = {
'network_stops': ["frequency"],
# 'network_nodes': ["elevation"],
'network_walk': ["walk_length"],
'network_bike': ["bike_length"],
# 'network_drive': ["drive_length"],
# 'land_assessment_parcels': ["area_sqkm"],
'land_dissemination_area': ["population, 2016", "population density per square kilometre, 2016", "n_dwellings"],
'land_assessment_fabric': ["n_use", "number_of_bedrooms"],
}
rename_dict = {
'mob_network_stops_ct': ('Public transit stops', 'network'),
'frequency': ('Transit frequency', 'network'),
'network_nodes': ('Number of intersections', 'network'),
'elevation': ('Elevation', 'network'),
'connectivity': ('Axial connectivity', 'network'),
'axial_closeness': ('Axial closeness centrality', 'network'),
'axial_betweenness': ('Axial betweenness centrality', 'network'),
'axial_n_betweenness': ('Normalized axial betweenness centrality', 'network'),
'axial_length': ('Axial line length', 'network'),
'axial_eigenvector': ('Axial eigenvector centrality', 'network'),
'axial_katz': ('Axial katz centrality', 'network'),
'axial_pagerank': ('Axial page rank centrality', 'network'),
'axial_hits1': ('Axial hits centrality', 'network'),
'axial_degree': ('Axial degree centrality', 'network'),
'network_walk_ct': ('Intensity of walkable Network', 'network'),
'network_bike_ct': ('Intensity of bikeable Network', 'network'),
'network_drive_ct': ('Intensity of driveable Network', 'network'),
'walk_length': ('Length of walkable Network', 'network'),
'bike_length': ('Length of bikeable Network', 'network'),
'drive_length': ('Length of driveable Network', 'network'),
'walk_straight': ('Straightness of walkable Network', 'network'),
'bike_straight': ('Straightness of bikeable Network', 'network'),
'drive_straight': ('Straightness of driveable Network', 'network'),
'land_assessment_fabric_ct': ('Number of units', 'density'),
'n_use': ('Use diversity', 'landuse'),
'CM': ('Commercial', 'landuse'),
'SFD': ('Single-Family Detached', 'landuse'),
'SFA': ('Single-Family Attached', 'landuse'),
'MFL': ('Multi-Family Low-Rise', 'landuse'),
'MFH': ('Multi-Family High-Rise', 'landuse'),
'total_finished_area': ('Total finished area', 'density'),
'gross_building_area': ('Gross building area', 'density'),
'number_of_bedrooms': ('Number of bedrooms', 'density'),
'land_assessment_parcels_ct': ('Number of parcels', 'density'),
'area_sqkm': ('Parcel size', 'density'),
'n_size': ('Parcel diversity', 'density'),
'population density per square kilometre, 2016': ('Population density', 'density'),
'n_dwellings': ('Number of dwellings', 'density'),
'population, 2016': ('Population', 'density'),
}
"""
# MACC Curve
network_bus = {
'network_stops': ["frequency"],
'land_assessment_fabric': ["n_use", "total_finished_area"], # "gross_building_area", "year_built", "number_of_bedrooms"],
'land_assessment_parcels': ["area_sqkm", "n_size"],
'land_dissemination_area': ["population, 2016", "population density per square kilometre, 2016"], # "n_dwellings"]
}
network_bike = {
'network_cycle': ["cycle_length"],
'land_assessment_fabric': ["n_use", "total_finished_area"], # "gross_building_area", "year_built", "number_of_bedrooms"],
'land_assessment_parcels': ["area_sqkm", "n_size"],
'land_dissemination_area': ["population, 2016", "population density per square kilometre, 2016"], # "n_dwellings"]
}
"""
DIRECTORY = "/Volumes/SALA/Research/eLabs/50_projects/20_City_o_Vancouver/SSHRC Partnership Engage/Sandbox/shp/MainSt/Experiment" | SB0_Variables.py | windows = False
if windows:
directory = '//nas.sala.ubc.ca/ELabs/100_personal/nm/Databases/'
bca_dir = '//nas.sala.ubc.ca/ELabs/50_projects/16_PICS/07_BCA data/'
else:
directory = '/Volumes/Samsung_T5/Databases/'
bca_dir = '/Volumes/ELabs/50_projects/16_PICS/07_BCA data/'
modes = ['transit', 'bike', 'walk', 'drive']
radii = [1200, 400]
r_seeds = 6
trip_length = {'transit': 10.2, 'drive': 10.4, 'bike': 5.5, 'walk': 0.9}
ss_experiments = {
'e0': 2020,
# 'e1': 2030,
# 'e2': 2030,
'e3': 2040,
# 'e4': 2040,
'e5': 2040,
'e6': 2040,
# 'e7': 2050,
# 'e8': 2050
}
wb_experiments = {
'e0': 2020,
'e5': 2050,
'e6': 2050,
'e7': 2050,
}
hq_experiments = {
'e0': 2020,
'e1': 2040,
'e2': 2040,
'e3': 2040
}
experiments = {
# 'West Bowl': ['Prince George, British Columbia', wb_experiments],
# 'Hillside Quadra': ['Capital Regional District, British Columbia', hq_experiments],
'Sunset': ['Metro Vancouver, British Columbia', ss_experiments],
}
regions = {
'Canada': {
'British Columbia': [v[0].split(',')[0] for i, v in experiments.items()]
}
}
network_layers = {
'network_stops': ["frequency"],
# 'network_nodes': ["elevation"],
'network_walk': ["walk_length"],
'network_bike': ["bike_length"],
# 'network_drive': ["drive_length"],
# 'land_assessment_parcels': ["area_sqkm"],
'land_dissemination_area': ["population, 2016", "population density per square kilometre, 2016", "n_dwellings"],
'land_assessment_fabric': ["n_use", "number_of_bedrooms"],
}
rename_dict = {
'mob_network_stops_ct': ('Public transit stops', 'network'),
'frequency': ('Transit frequency', 'network'),
'network_nodes': ('Number of intersections', 'network'),
'elevation': ('Elevation', 'network'),
'connectivity': ('Axial connectivity', 'network'),
'axial_closeness': ('Axial closeness centrality', 'network'),
'axial_betweenness': ('Axial betweenness centrality', 'network'),
'axial_n_betweenness': ('Normalized axial betweenness centrality', 'network'),
'axial_length': ('Axial line length', 'network'),
'axial_eigenvector': ('Axial eigenvector centrality', 'network'),
'axial_katz': ('Axial katz centrality', 'network'),
'axial_pagerank': ('Axial page rank centrality', 'network'),
'axial_hits1': ('Axial hits centrality', 'network'),
'axial_degree': ('Axial degree centrality', 'network'),
'network_walk_ct': ('Intensity of walkable Network', 'network'),
'network_bike_ct': ('Intensity of bikeable Network', 'network'),
'network_drive_ct': ('Intensity of driveable Network', 'network'),
'walk_length': ('Length of walkable Network', 'network'),
'bike_length': ('Length of bikeable Network', 'network'),
'drive_length': ('Length of driveable Network', 'network'),
'walk_straight': ('Straightness of walkable Network', 'network'),
'bike_straight': ('Straightness of bikeable Network', 'network'),
'drive_straight': ('Straightness of driveable Network', 'network'),
'land_assessment_fabric_ct': ('Number of units', 'density'),
'n_use': ('Use diversity', 'landuse'),
'CM': ('Commercial', 'landuse'),
'SFD': ('Single-Family Detached', 'landuse'),
'SFA': ('Single-Family Attached', 'landuse'),
'MFL': ('Multi-Family Low-Rise', 'landuse'),
'MFH': ('Multi-Family High-Rise', 'landuse'),
'total_finished_area': ('Total finished area', 'density'),
'gross_building_area': ('Gross building area', 'density'),
'number_of_bedrooms': ('Number of bedrooms', 'density'),
'land_assessment_parcels_ct': ('Number of parcels', 'density'),
'area_sqkm': ('Parcel size', 'density'),
'n_size': ('Parcel diversity', 'density'),
'population density per square kilometre, 2016': ('Population density', 'density'),
'n_dwellings': ('Number of dwellings', 'density'),
'population, 2016': ('Population', 'density'),
}
"""
# MACC Curve
network_bus = {
'network_stops': ["frequency"],
'land_assessment_fabric': ["n_use", "total_finished_area"], # "gross_building_area", "year_built", "number_of_bedrooms"],
'land_assessment_parcels': ["area_sqkm", "n_size"],
'land_dissemination_area': ["population, 2016", "population density per square kilometre, 2016"], # "n_dwellings"]
}
network_bike = {
'network_cycle': ["cycle_length"],
'land_assessment_fabric': ["n_use", "total_finished_area"], # "gross_building_area", "year_built", "number_of_bedrooms"],
'land_assessment_parcels': ["area_sqkm", "n_size"],
'land_dissemination_area': ["population, 2016", "population density per square kilometre, 2016"], # "n_dwellings"]
}
"""
DIRECTORY = "/Volumes/SALA/Research/eLabs/50_projects/20_City_o_Vancouver/SSHRC Partnership Engage/Sandbox/shp/MainSt/Experiment" | 0.363421 | 0.359336 |
import pprint
import re # noqa: F401
import six
class PaymentRequestLivingAddress(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"address": "str",
"city": "str",
"country": "str",
"state": "str",
"zip": "str",
}
attribute_map = {
"address": "address",
"city": "city",
"country": "country",
"state": "state",
"zip": "zip",
}
def __init__(
self, address=None, city=None, country=None, state=None, zip=None
): # noqa: E501
"""PaymentRequestLivingAddress - a model defined in Swagger""" # noqa: E501
self._address = None
self._city = None
self._country = None
self._state = None
self._zip = None
self.discriminator = None
if address is not None:
self.address = address
if city is not None:
self.city = city
if country is not None:
self.country = country
if state is not None:
self.state = state
if zip is not None:
self.zip = zip
@property
def address(self):
"""Gets the address of this PaymentRequestLivingAddress. # noqa: E501
Customer home address # noqa: E501
:return: The address of this PaymentRequestLivingAddress. # noqa: E501
:rtype: str
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this PaymentRequestLivingAddress.
Customer home address # noqa: E501
:param address: The address of this PaymentRequestLivingAddress. # noqa: E501
:type: str
"""
if address is not None and len(address) > 100:
raise ValueError(
"Invalid value for `address`, length must be less than or equal to `100`"
) # noqa: E501
if address is not None and len(address) < 0:
raise ValueError(
"Invalid value for `address`, length must be greater than or equal to `0`"
) # noqa: E501
self._address = address
@property
def city(self):
"""Gets the city of this PaymentRequestLivingAddress. # noqa: E501
Customer city. # noqa: E501
:return: The city of this PaymentRequestLivingAddress. # noqa: E501
:rtype: str
"""
return self._city
@city.setter
def city(self, city):
"""Sets the city of this PaymentRequestLivingAddress.
Customer city. # noqa: E501
:param city: The city of this PaymentRequestLivingAddress. # noqa: E501
:type: str
"""
if city is not None and len(city) > 20:
raise ValueError(
"Invalid value for `city`, length must be less than or equal to `20`"
) # noqa: E501
if city is not None and len(city) < 0:
raise ValueError(
"Invalid value for `city`, length must be greater than or equal to `0`"
) # noqa: E501
self._city = city
@property
def country(self):
"""Gets the country of this PaymentRequestLivingAddress. # noqa: E501
ISO 3166-1 code of country: 2 or 3 latin letters or numeric code. # noqa: E501
:return: The country of this PaymentRequestLivingAddress. # noqa: E501
:rtype: str
"""
return self._country
@country.setter
def country(self, country):
"""Sets the country of this PaymentRequestLivingAddress.
ISO 3166-1 code of country: 2 or 3 latin letters or numeric code. # noqa: E501
:param country: The country of this PaymentRequestLivingAddress. # noqa: E501
:type: str
"""
if country is not None and len(country) > 3:
raise ValueError(
"Invalid value for `country`, length must be less than or equal to `3`"
) # noqa: E501
if country is not None and len(country) < 2:
raise ValueError(
"Invalid value for `country`, length must be greater than or equal to `2`"
) # noqa: E501
self._country = country
@property
def state(self):
"""Gets the state of this PaymentRequestLivingAddress. # noqa: E501
Living state or province. # noqa: E501
:return: The state of this PaymentRequestLivingAddress. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this PaymentRequestLivingAddress.
Living state or province. # noqa: E501
:param state: The state of this PaymentRequestLivingAddress. # noqa: E501
:type: str
"""
if state is not None and len(state) > 20:
raise ValueError(
"Invalid value for `state`, length must be less than or equal to `20`"
) # noqa: E501
if state is not None and len(state) < 0:
raise ValueError(
"Invalid value for `state`, length must be greater than or equal to `0`"
) # noqa: E501
self._state = state
@property
def zip(self):
"""Gets the zip of this PaymentRequestLivingAddress. # noqa: E501
Customer postal code # noqa: E501
:return: The zip of this PaymentRequestLivingAddress. # noqa: E501
:rtype: str
"""
return self._zip
@zip.setter
def zip(self, zip):
"""Sets the zip of this PaymentRequestLivingAddress.
Customer postal code # noqa: E501
:param zip: The zip of this PaymentRequestLivingAddress. # noqa: E501
:type: str
"""
if zip is not None and len(zip) > 17:
raise ValueError(
"Invalid value for `zip`, length must be less than or equal to `17`"
) # noqa: E501
if zip is not None and len(zip) < 0:
raise ValueError(
"Invalid value for `zip`, length must be greater than or equal to `0`"
) # noqa: E501
self._zip = zip
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
if value is not None:
result[attr] = value
if issubclass(PaymentRequestLivingAddress, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PaymentRequestLivingAddress):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | cardpay/model/payment_request_living_address.py | import pprint
import re # noqa: F401
import six
class PaymentRequestLivingAddress(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"address": "str",
"city": "str",
"country": "str",
"state": "str",
"zip": "str",
}
attribute_map = {
"address": "address",
"city": "city",
"country": "country",
"state": "state",
"zip": "zip",
}
def __init__(
self, address=None, city=None, country=None, state=None, zip=None
): # noqa: E501
"""PaymentRequestLivingAddress - a model defined in Swagger""" # noqa: E501
self._address = None
self._city = None
self._country = None
self._state = None
self._zip = None
self.discriminator = None
if address is not None:
self.address = address
if city is not None:
self.city = city
if country is not None:
self.country = country
if state is not None:
self.state = state
if zip is not None:
self.zip = zip
@property
def address(self):
"""Gets the address of this PaymentRequestLivingAddress. # noqa: E501
Customer home address # noqa: E501
:return: The address of this PaymentRequestLivingAddress. # noqa: E501
:rtype: str
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this PaymentRequestLivingAddress.
Customer home address # noqa: E501
:param address: The address of this PaymentRequestLivingAddress. # noqa: E501
:type: str
"""
if address is not None and len(address) > 100:
raise ValueError(
"Invalid value for `address`, length must be less than or equal to `100`"
) # noqa: E501
if address is not None and len(address) < 0:
raise ValueError(
"Invalid value for `address`, length must be greater than or equal to `0`"
) # noqa: E501
self._address = address
@property
def city(self):
"""Gets the city of this PaymentRequestLivingAddress. # noqa: E501
Customer city. # noqa: E501
:return: The city of this PaymentRequestLivingAddress. # noqa: E501
:rtype: str
"""
return self._city
@city.setter
def city(self, city):
"""Sets the city of this PaymentRequestLivingAddress.
Customer city. # noqa: E501
:param city: The city of this PaymentRequestLivingAddress. # noqa: E501
:type: str
"""
if city is not None and len(city) > 20:
raise ValueError(
"Invalid value for `city`, length must be less than or equal to `20`"
) # noqa: E501
if city is not None and len(city) < 0:
raise ValueError(
"Invalid value for `city`, length must be greater than or equal to `0`"
) # noqa: E501
self._city = city
@property
def country(self):
"""Gets the country of this PaymentRequestLivingAddress. # noqa: E501
ISO 3166-1 code of country: 2 or 3 latin letters or numeric code. # noqa: E501
:return: The country of this PaymentRequestLivingAddress. # noqa: E501
:rtype: str
"""
return self._country
@country.setter
def country(self, country):
"""Sets the country of this PaymentRequestLivingAddress.
ISO 3166-1 code of country: 2 or 3 latin letters or numeric code. # noqa: E501
:param country: The country of this PaymentRequestLivingAddress. # noqa: E501
:type: str
"""
if country is not None and len(country) > 3:
raise ValueError(
"Invalid value for `country`, length must be less than or equal to `3`"
) # noqa: E501
if country is not None and len(country) < 2:
raise ValueError(
"Invalid value for `country`, length must be greater than or equal to `2`"
) # noqa: E501
self._country = country
@property
def state(self):
"""Gets the state of this PaymentRequestLivingAddress. # noqa: E501
Living state or province. # noqa: E501
:return: The state of this PaymentRequestLivingAddress. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this PaymentRequestLivingAddress.
Living state or province. # noqa: E501
:param state: The state of this PaymentRequestLivingAddress. # noqa: E501
:type: str
"""
if state is not None and len(state) > 20:
raise ValueError(
"Invalid value for `state`, length must be less than or equal to `20`"
) # noqa: E501
if state is not None and len(state) < 0:
raise ValueError(
"Invalid value for `state`, length must be greater than or equal to `0`"
) # noqa: E501
self._state = state
@property
def zip(self):
"""Gets the zip of this PaymentRequestLivingAddress. # noqa: E501
Customer postal code # noqa: E501
:return: The zip of this PaymentRequestLivingAddress. # noqa: E501
:rtype: str
"""
return self._zip
@zip.setter
def zip(self, zip):
"""Sets the zip of this PaymentRequestLivingAddress.
Customer postal code # noqa: E501
:param zip: The zip of this PaymentRequestLivingAddress. # noqa: E501
:type: str
"""
if zip is not None and len(zip) > 17:
raise ValueError(
"Invalid value for `zip`, length must be less than or equal to `17`"
) # noqa: E501
if zip is not None and len(zip) < 0:
raise ValueError(
"Invalid value for `zip`, length must be greater than or equal to `0`"
) # noqa: E501
self._zip = zip
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
if value is not None:
result[attr] = value
if issubclass(PaymentRequestLivingAddress, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PaymentRequestLivingAddress):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | 0.778355 | 0.150934 |
import json
import random
import datetime
import core
import collections
import re
from core import localization
from core.helpers import Comparisons
''' Config
Config is a simple json object that is loaded into core.CONFIG as a dict
All sections and subsections must be capitalized. All keys must be lower-case.
No spaces, underscores, or hyphens.
Be terse but descriptive.
'''
base_file = 'core/base_config.cfg'
def default_profile():
return [k for k, v in core.CONFIG['Quality']['Profiles'].items() if v.get('default')][0]
def lang_names(lang):
lang_names = core.CONFIG['Languages'][lang]
if lang_names:
return re.split(',[ ]*', lang_names)
else:
return []
def new_config():
''' Copies base_file to config directory.
Automatically assigns random values to searchtimehr, searchtimemin,
installupdatehr, installupdatemin, and apikey.
Config template is stored as core/base_config.cfg
When adding keys to the base config:
Keys will have no spaces, hypens, underscores or other substitutions
for a space. Simply crush everything into one word.
Keys that access another dictionary should be capitalized. This can
be done in the way that makes most sense in context, but should
try to mimic camel case.
Keys that access a non-dictionary value should be lowercase.
Returns dict of newly created config
'''
with open(base_file, 'r') as f:
config = json.load(f)
config['Search']['searchtimehr'] = random.randint(0, 23)
config['Search']['searchtimemin'] = random.randint(0, 59)
config['Server']['installupdatehr'] = random.randint(0, 23)
config['Server']['installupdatemin'] = random.randint(0, 59)
config['Search']['popularmovieshour'] = random.randint(0, 23)
config['Search']['popularmoviesmin'] = random.randint(0, 59)
apikey = '%06x' % random.randint(0, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
config['Server']['apikey'] = apikey
config['Quality']['Profiles']['Default'] = base_profile
with open(core.CONF_FILE, 'w') as f:
json.dump(config, f, indent=4, sort_keys=True)
return config
def write(data):
''' Writes a dict to the config file.
data (dict): Config section with nested dict of keys and values:
{'Section': {'key': 'val', 'key2': 'val2'}, 'Section2': {'key': 'val'}}
MUST contain fully populated sections or data will be lost.
Only modifies supplied section.
After updating config file, copies to core.CONFIG via load()
Does not return.
'''
diff = Comparisons.compare_dict(data, core.CONFIG)
core.CONFIG.update(data)
with open(core.CONF_FILE, 'w') as f:
json.dump(core.CONFIG, f, indent=4, sort_keys=True)
load(config=core.CONFIG)
if diff:
restart_scheduler(diff)
l = diff.get('Server', {}).get('language')
if l:
localization.install(l)
return
def merge_new_options():
''' Merges new options in base_config with config
Opens base_config and config, then saves them merged with config taking priority.
Does not return
'''
new_config = {}
with open(base_file, 'r') as f:
base_config = json.load(f)
with open(core.CONF_FILE, 'r') as f:
config = json.load(f)
new_config = _merge(base_config, config)
# Convert imdb str to list
if type(new_config['Search']['Watchlists']['imdbrss']) == str:
new_config['Search']['Watchlists']['imdbrss'] = new_config['Search']['Watchlists']['imdbrss'].split(',')
# Convert from predb-only verifying to multiple choice
if new_config['Search'].get('predbcheck') is True:
new_config['Search']['verifyreleases'] = 'predb'
del new_config['Search']['predbcheck']
# Convert from hardlink option to multiple choice
if new_config['Postprocessing'].get('createhardlink') is True:
new_config['Postprocessing']['movermethod'] = 'hardlink'
del new_config['Postprocessing']['createhardlink']
# Add Default profile if there are none
if len(new_config['Quality']['Profiles']) == 0:
new_config['Quality']['Profiles']['Default'] = base_profile
# Set first profile to 'default': True if none are set
d = [prof.get('default') for prof in new_config['Quality']['Profiles'].values()]
if not any(d):
target = 'Default' if new_config['Quality']['Profiles'].get('Default') else list(new_config['Quality']['Profiles'].keys())[0]
print('Default Quality Profile not specified, setting *{}* to Default.'.format(target))
new_config['Quality']['Profiles'][target]['default'] = True
for indexer, setting in new_config['Indexers']['Torrent'].items():
if isinstance(setting, bool):
new_config['Indexers']['Torrent'][indexer] = {'enabled': setting, 'url': ''}
with open(core.CONF_FILE, 'w') as f:
json.dump(new_config, f, indent=4, sort_keys=True)
return
def _merge(d, u):
''' Deep merge dictionaries
d (dict): base dict to merge into
u (dict): dict to update from
If any k:v pair in u is not in d, adds k:v pair.
Will not overwrite any values in d, nor will it remove
any k:v pairs in d.
Returns dict
'''
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = _merge(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def dump(config=core.CONFIG):
''' Writes config to file
config (dict): Config contenst to write to disk <optional - default core.CONFIG>
Opposite of load. Writes config to disk
Returns bool
'''
try:
with open(core.CONF_FILE, 'w') as f:
json.dump(config, f, indent=4, sort_keys=True)
except Exception as e:
return False
return True
def load(config=None):
''' Stores entire config as dict to core.CONFIG
config (dict): Config file contents <optional>
If 'config' is not supplied, reads config from disk. If calling load() from
a method in this class pass the saved config so we don't have to read from
a file we just wrote to.
Sanitizes input when neccesary.
Opposite of dump -- loads config into memory.
Does not return
'''
if not config:
with open(core.CONF_FILE, 'r') as f:
config = json.load(f)
repl = config['Postprocessing']['replaceillegal']
if repl in ('"', '*', '?', '<', '>', '|', ':'):
config['Postprocessing']['replaceillegal'] = ''
core.CONFIG = config
return
def restart_scheduler(diff):
''' Restarts and scheduled tasks in diff
diff (dict): modified keys in config file
Reads diff and determines which tasks need to be restart_scheduler
Does not return
'''
now = datetime.datetime.today()
if 'Server' in diff:
d = diff['Server'].keys()
if any(i in d for i in ('checkupdates', 'checkupdatefrequency')):
hr = now.hour + core.CONFIG['Server']['checkupdatefrequency']
min = now.minute
interval = core.CONFIG['Server']['checkupdatefrequency'] * 3600
auto_start = core.CONFIG['Server']['checkupdates']
core.scheduler_plugin.task_list['Update Checker'].reload(hr, min, interval, auto_start=auto_start)
if 'Search' in diff:
d = diff['Search'].keys()
if 'rsssyncfrequency' in d:
hr = now.hour
min = now.minute + core.CONFIG['Search']['rsssyncfrequency']
interval = core.CONFIG['Search']['rsssyncfrequency'] * 60
auto_start = True
core.scheduler_plugin.task_list['Movie Search'].reload(hr, min, interval, auto_start=auto_start)
if 'Watchlists' in d:
d = diff['Search']['Watchlists'].keys()
if any(i in d for i in ('imdbfrequency', 'imdbsync')):
hr = now.hour
min = now.minute + core.CONFIG['Search']['Watchlists']['imdbfrequency']
interval = core.CONFIG['Search']['Watchlists']['imdbfrequency'] * 60
auto_start = core.CONFIG['Search']['Watchlists']['imdbsync']
core.scheduler_plugin.task_list['IMDB Sync'].reload(hr, min, interval, auto_start=auto_start)
if any(i in d for i in ('popularmoviessync', 'popularmovieshour', 'popularmoviesmin')):
hr = core.CONFIG['Search']['Watchlists']['popularmovieshour']
min = core.CONFIG['Search']['Watchlists']['popularmoviesmin']
interval = 24 * 3600
auto_start = core.CONFIG['Search']['Watchlists']['popularmoviessync']
core.scheduler_plugin.task_list['PopularMovies Sync'].reload(hr, min, interval, auto_start=auto_start)
if any(i in d for i in ('traktsync', 'traktfrequency')):
hr = now.hour
min = now.minute + core.CONFIG['Search']['Watchlists']['traktfrequency']
interval = core.CONFIG['Search']['Watchlists']['traktfrequency'] * 60
auto_start = core.CONFIG['Search']['Watchlists']['traktsync']
core.scheduler_plugin.task_list['Trakt Sync'].reload(hr, min, interval, auto_start=auto_start)
if 'Postprocessing' in diff:
d = diff['Postprocessing'].get('Scanner', {})
if any(i in d for i in ('interval', 'enabled')):
hr = now.hour
min = now.minute + core.CONFIG['Postprocessing']['Scanner']['interval']
interval = core.CONFIG['Postprocessing']['Scanner']['interval'] * 60
auto_start = core.CONFIG['Postprocessing']['Scanner']['enabled']
core.scheduler_plugin.task_list['PostProcessing Scan'].reload(hr, min, interval, auto_start=auto_start)
if 'System' in diff:
d = diff['System']['FileManagement'].keys()
if any(i in d for i in ('scanmissingfiles', 'scanmissinghour', 'scanmissingmin')):
hr = core.CONFIG['System']['FileManagement']['scanmissinghour']
min = core.CONFIG['System']['FileManagement']['scanmissingmin']
interval = 24 * 3600
auto_start = core.CONFIG['System']['FileManagement']['scanmissingfiles']
core.scheduler_plugin.task_list['Missing Files Scan'].reload(hr, min, interval, auto_start=auto_start)
if 'Downloader' in diff and 'Torrent' in diff['Downloader']:
auto_start = False
client = None
if core.CONFIG['Downloader']['Sources']['torrentenabled']:
for name, config in core.CONFIG['Downloader']['Torrent'].items():
ignore_remove_torrents = name == 'DelugeRPC' or name == 'DelugeWeb'
if config['enabled'] and (not ignore_remove_torrents and config.get('removetorrents') or config.get('removestalledfor')):
auto_start = True
client = name
break
if auto_start:
d = diff['Downloader']['Torrent'].get(client, {}).keys()
setting_keys = ['enabled', 'removestalledfor']
if client != 'DelugeRPC' and client != 'DelugeWeb':
setting_keys.append('removetorrents')
if any(i in d for i in setting_keys):
hr = (now.hour + 1) % 24
min = now.minute
core.scheduler_plugin.task_list['Torrents Status Check'].reload(hr, min, 3600, auto_start=True)
else:
core.scheduler_plugin.task_list['Torrents Status Check'].reload(0, 0, 3600, auto_start=False)
'''
base_profile is used as the template quality profile if none is present.
'''
base_profile = json.loads('''
{"Sources": {
"BluRay-1080P": [
true,
1
],
"BluRay-4K": [
false,
0
],
"BluRay-720P": [
true,
2
],
"CAM-SD": [
false,
13
],
"DVD-SD": [
false,
9
],
"Screener-1080P": [
false,
10
],
"Screener-720P": [
false,
11
],
"Telesync-SD": [
false,
12
],
"WebDL-1080P": [
true,
4
],
"WebDL-4K": [
false,
3
],
"WebDL-720P": [
true,
5
],
"WebRip-1080P": [
true,
7
],
"WebRip-4K": [
false,
6
],
"WebRip-720P": [
true,
8
],
"Unknown": [
false,
14
]
},
"ignoredwords": "subs,german,dutch,french,truefrench,danish,swedish,spanish,italian,korean,dubbed,swesub,korsub,dksubs,vain,HC,blurred",
"preferredwords": "",
"prefersmaller": false,
"requiredwords": "",
"scoretitle": true,
"default": true
}
''') | core/config.py | import json
import random
import datetime
import core
import collections
import re
from core import localization
from core.helpers import Comparisons
''' Config
Config is a simple json object that is loaded into core.CONFIG as a dict
All sections and subsections must be capitalized. All keys must be lower-case.
No spaces, underscores, or hyphens.
Be terse but descriptive.
'''
base_file = 'core/base_config.cfg'
def default_profile():
return [k for k, v in core.CONFIG['Quality']['Profiles'].items() if v.get('default')][0]
def lang_names(lang):
lang_names = core.CONFIG['Languages'][lang]
if lang_names:
return re.split(',[ ]*', lang_names)
else:
return []
def new_config():
''' Copies base_file to config directory.
Automatically assigns random values to searchtimehr, searchtimemin,
installupdatehr, installupdatemin, and apikey.
Config template is stored as core/base_config.cfg
When adding keys to the base config:
Keys will have no spaces, hypens, underscores or other substitutions
for a space. Simply crush everything into one word.
Keys that access another dictionary should be capitalized. This can
be done in the way that makes most sense in context, but should
try to mimic camel case.
Keys that access a non-dictionary value should be lowercase.
Returns dict of newly created config
'''
with open(base_file, 'r') as f:
config = json.load(f)
config['Search']['searchtimehr'] = random.randint(0, 23)
config['Search']['searchtimemin'] = random.randint(0, 59)
config['Server']['installupdatehr'] = random.randint(0, 23)
config['Server']['installupdatemin'] = random.randint(0, 59)
config['Search']['popularmovieshour'] = random.randint(0, 23)
config['Search']['popularmoviesmin'] = random.randint(0, 59)
apikey = '%06x' % random.randint(0, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
config['Server']['apikey'] = apikey
config['Quality']['Profiles']['Default'] = base_profile
with open(core.CONF_FILE, 'w') as f:
json.dump(config, f, indent=4, sort_keys=True)
return config
def write(data):
''' Writes a dict to the config file.
data (dict): Config section with nested dict of keys and values:
{'Section': {'key': 'val', 'key2': 'val2'}, 'Section2': {'key': 'val'}}
MUST contain fully populated sections or data will be lost.
Only modifies supplied section.
After updating config file, copies to core.CONFIG via load()
Does not return.
'''
diff = Comparisons.compare_dict(data, core.CONFIG)
core.CONFIG.update(data)
with open(core.CONF_FILE, 'w') as f:
json.dump(core.CONFIG, f, indent=4, sort_keys=True)
load(config=core.CONFIG)
if diff:
restart_scheduler(diff)
l = diff.get('Server', {}).get('language')
if l:
localization.install(l)
return
def merge_new_options():
''' Merges new options in base_config with config
Opens base_config and config, then saves them merged with config taking priority.
Does not return
'''
new_config = {}
with open(base_file, 'r') as f:
base_config = json.load(f)
with open(core.CONF_FILE, 'r') as f:
config = json.load(f)
new_config = _merge(base_config, config)
# Convert imdb str to list
if type(new_config['Search']['Watchlists']['imdbrss']) == str:
new_config['Search']['Watchlists']['imdbrss'] = new_config['Search']['Watchlists']['imdbrss'].split(',')
# Convert from predb-only verifying to multiple choice
if new_config['Search'].get('predbcheck') is True:
new_config['Search']['verifyreleases'] = 'predb'
del new_config['Search']['predbcheck']
# Convert from hardlink option to multiple choice
if new_config['Postprocessing'].get('createhardlink') is True:
new_config['Postprocessing']['movermethod'] = 'hardlink'
del new_config['Postprocessing']['createhardlink']
# Add Default profile if there are none
if len(new_config['Quality']['Profiles']) == 0:
new_config['Quality']['Profiles']['Default'] = base_profile
# Set first profile to 'default': True if none are set
d = [prof.get('default') for prof in new_config['Quality']['Profiles'].values()]
if not any(d):
target = 'Default' if new_config['Quality']['Profiles'].get('Default') else list(new_config['Quality']['Profiles'].keys())[0]
print('Default Quality Profile not specified, setting *{}* to Default.'.format(target))
new_config['Quality']['Profiles'][target]['default'] = True
for indexer, setting in new_config['Indexers']['Torrent'].items():
if isinstance(setting, bool):
new_config['Indexers']['Torrent'][indexer] = {'enabled': setting, 'url': ''}
with open(core.CONF_FILE, 'w') as f:
json.dump(new_config, f, indent=4, sort_keys=True)
return
def _merge(d, u):
''' Deep merge dictionaries
d (dict): base dict to merge into
u (dict): dict to update from
If any k:v pair in u is not in d, adds k:v pair.
Will not overwrite any values in d, nor will it remove
any k:v pairs in d.
Returns dict
'''
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = _merge(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def dump(config=core.CONFIG):
''' Writes config to file
config (dict): Config contenst to write to disk <optional - default core.CONFIG>
Opposite of load. Writes config to disk
Returns bool
'''
try:
with open(core.CONF_FILE, 'w') as f:
json.dump(config, f, indent=4, sort_keys=True)
except Exception as e:
return False
return True
def load(config=None):
''' Stores entire config as dict to core.CONFIG
config (dict): Config file contents <optional>
If 'config' is not supplied, reads config from disk. If calling load() from
a method in this class pass the saved config so we don't have to read from
a file we just wrote to.
Sanitizes input when neccesary.
Opposite of dump -- loads config into memory.
Does not return
'''
if not config:
with open(core.CONF_FILE, 'r') as f:
config = json.load(f)
repl = config['Postprocessing']['replaceillegal']
if repl in ('"', '*', '?', '<', '>', '|', ':'):
config['Postprocessing']['replaceillegal'] = ''
core.CONFIG = config
return
def restart_scheduler(diff):
''' Restarts and scheduled tasks in diff
diff (dict): modified keys in config file
Reads diff and determines which tasks need to be restart_scheduler
Does not return
'''
now = datetime.datetime.today()
if 'Server' in diff:
d = diff['Server'].keys()
if any(i in d for i in ('checkupdates', 'checkupdatefrequency')):
hr = now.hour + core.CONFIG['Server']['checkupdatefrequency']
min = now.minute
interval = core.CONFIG['Server']['checkupdatefrequency'] * 3600
auto_start = core.CONFIG['Server']['checkupdates']
core.scheduler_plugin.task_list['Update Checker'].reload(hr, min, interval, auto_start=auto_start)
if 'Search' in diff:
d = diff['Search'].keys()
if 'rsssyncfrequency' in d:
hr = now.hour
min = now.minute + core.CONFIG['Search']['rsssyncfrequency']
interval = core.CONFIG['Search']['rsssyncfrequency'] * 60
auto_start = True
core.scheduler_plugin.task_list['Movie Search'].reload(hr, min, interval, auto_start=auto_start)
if 'Watchlists' in d:
d = diff['Search']['Watchlists'].keys()
if any(i in d for i in ('imdbfrequency', 'imdbsync')):
hr = now.hour
min = now.minute + core.CONFIG['Search']['Watchlists']['imdbfrequency']
interval = core.CONFIG['Search']['Watchlists']['imdbfrequency'] * 60
auto_start = core.CONFIG['Search']['Watchlists']['imdbsync']
core.scheduler_plugin.task_list['IMDB Sync'].reload(hr, min, interval, auto_start=auto_start)
if any(i in d for i in ('popularmoviessync', 'popularmovieshour', 'popularmoviesmin')):
hr = core.CONFIG['Search']['Watchlists']['popularmovieshour']
min = core.CONFIG['Search']['Watchlists']['popularmoviesmin']
interval = 24 * 3600
auto_start = core.CONFIG['Search']['Watchlists']['popularmoviessync']
core.scheduler_plugin.task_list['PopularMovies Sync'].reload(hr, min, interval, auto_start=auto_start)
if any(i in d for i in ('traktsync', 'traktfrequency')):
hr = now.hour
min = now.minute + core.CONFIG['Search']['Watchlists']['traktfrequency']
interval = core.CONFIG['Search']['Watchlists']['traktfrequency'] * 60
auto_start = core.CONFIG['Search']['Watchlists']['traktsync']
core.scheduler_plugin.task_list['Trakt Sync'].reload(hr, min, interval, auto_start=auto_start)
if 'Postprocessing' in diff:
d = diff['Postprocessing'].get('Scanner', {})
if any(i in d for i in ('interval', 'enabled')):
hr = now.hour
min = now.minute + core.CONFIG['Postprocessing']['Scanner']['interval']
interval = core.CONFIG['Postprocessing']['Scanner']['interval'] * 60
auto_start = core.CONFIG['Postprocessing']['Scanner']['enabled']
core.scheduler_plugin.task_list['PostProcessing Scan'].reload(hr, min, interval, auto_start=auto_start)
if 'System' in diff:
d = diff['System']['FileManagement'].keys()
if any(i in d for i in ('scanmissingfiles', 'scanmissinghour', 'scanmissingmin')):
hr = core.CONFIG['System']['FileManagement']['scanmissinghour']
min = core.CONFIG['System']['FileManagement']['scanmissingmin']
interval = 24 * 3600
auto_start = core.CONFIG['System']['FileManagement']['scanmissingfiles']
core.scheduler_plugin.task_list['Missing Files Scan'].reload(hr, min, interval, auto_start=auto_start)
if 'Downloader' in diff and 'Torrent' in diff['Downloader']:
auto_start = False
client = None
if core.CONFIG['Downloader']['Sources']['torrentenabled']:
for name, config in core.CONFIG['Downloader']['Torrent'].items():
ignore_remove_torrents = name == 'DelugeRPC' or name == 'DelugeWeb'
if config['enabled'] and (not ignore_remove_torrents and config.get('removetorrents') or config.get('removestalledfor')):
auto_start = True
client = name
break
if auto_start:
d = diff['Downloader']['Torrent'].get(client, {}).keys()
setting_keys = ['enabled', 'removestalledfor']
if client != 'DelugeRPC' and client != 'DelugeWeb':
setting_keys.append('removetorrents')
if any(i in d for i in setting_keys):
hr = (now.hour + 1) % 24
min = now.minute
core.scheduler_plugin.task_list['Torrents Status Check'].reload(hr, min, 3600, auto_start=True)
else:
core.scheduler_plugin.task_list['Torrents Status Check'].reload(0, 0, 3600, auto_start=False)
'''
base_profile is used as the template quality profile if none is present.
'''
base_profile = json.loads('''
{"Sources": {
"BluRay-1080P": [
true,
1
],
"BluRay-4K": [
false,
0
],
"BluRay-720P": [
true,
2
],
"CAM-SD": [
false,
13
],
"DVD-SD": [
false,
9
],
"Screener-1080P": [
false,
10
],
"Screener-720P": [
false,
11
],
"Telesync-SD": [
false,
12
],
"WebDL-1080P": [
true,
4
],
"WebDL-4K": [
false,
3
],
"WebDL-720P": [
true,
5
],
"WebRip-1080P": [
true,
7
],
"WebRip-4K": [
false,
6
],
"WebRip-720P": [
true,
8
],
"Unknown": [
false,
14
]
},
"ignoredwords": "subs,german,dutch,french,truefrench,danish,swedish,spanish,italian,korean,dubbed,swesub,korsub,dksubs,vain,HC,blurred",
"preferredwords": "",
"prefersmaller": false,
"requiredwords": "",
"scoretitle": true,
"default": true
}
''') | 0.447943 | 0.108048 |
import json
import DeepInstinct
import demistomock as demisto
params = {
"apikey": "key",
"base_url": "https://demisto.poc.deepinstinctweb.com",
"after_id": 0
}
mock_device = {
"id": 1,
"os": "WINDOWS",
"osv": "Windows",
"ip_address": "192.168.88.80",
"mac_address": "00:00:00:00:00:00",
"hostname": "Mock_2020-04-09 17:49:39.408405_1",
"domain": "",
"scanned_files": 0,
"tag": "",
"connectivity_status": "OFFLINE",
"deployment_status": "REGISTERED",
"last_registration": "2020-04-09T14:49:39.722292Z",
"last_contact": "2020-04-09T14:49:39.711487Z",
"distinguished_name": "OU=Organizations & Sites,DC=bancshares,DC=mib",
"group_name": "Windows Default Group",
"group_id": 3,
"policy_name": "Windows Default Policy",
"policy_id": 3,
"log_status": "NA",
"agent_version": "2.3.1.12",
"brain_version": "115wt",
"msp_name": "MSP 1",
"msp_id": 1,
"tenant_name": "Tenant 1",
"tenant_id": 1
}
mock_groups = [
{
"name": "Android Default Group",
"os": "ANDROID",
"policy_id": 1,
"id": 1,
"is_default_group": True,
"msp_name": "MSP 1",
"msp_id": 1
},
{
"name": "iOS Default Group",
"os": "IOS",
"policy_id": 2,
"id": 2,
"is_default_group": True,
"msp_name": "MSP 1",
"msp_id": 1
}
]
mock_policies = [
{
"id": 2,
"os": "IOS",
"name": "iOS Default Policy",
"is_default_policy": True,
"msp_name": "MSP 1",
"msp_id": 1
},
{
"id": 3,
"os": "WINDOWS",
"name": "Windows Default Policy",
"is_default_policy": True,
"msp_name": "MSP 1",
"msp_id": 1
}
]
mock_events = {
"last_id": 2,
"events":
[
{
"file_type": "ZIP",
"file_hash": "d1838b541ff7ffe6489d120d89dfa855665fd2c708491f336c7267069387053f",
"file_archive_hash": "d1838b541ff7ffe6489d120d89dfa855665fd2c708491f336c7267069387053f",
"path": "c:\\temp\\file.exe",
"file_size": 18127052,
"threat_severity": "NONE",
"certificate_thumbprint": None,
"certificate_vendor_name": None,
"deep_classification": None,
"file_status": "NOT_UPLOADED",
"sandbox_status": "NOT_READY_TO_GENERATE",
"model": "FileEvent",
"id": 1,
"device_id": 1,
"type": "STATIC_ANALYSIS",
"trigger": "BRAIN",
"action": "PREVENTED",
"timestamp": "2020-04-09T14:49:41.154850Z",
"insertion_timestamp": "2020-04-09T14:49:41.170331Z",
"close_timestamp": "2020-04-12T14:11:39.145856Z",
"close_trigger": "CLOSED_BY_ADMIN",
"reoccurrence_count": 0,
"last_reoccurrence": None,
"last_action": None,
"status": "CLOSED",
"comment": None,
"recorded_device_info": {
"os": "WINDOWS",
"mac_address": "00:00:00:00:00:00",
"hostname": "Mock_2020-04-09 17:49:39.408405_1",
"tag": "",
"group_name": "Windows Default Group",
"policy_name": "Windows Default Policy",
"tenant_name": "Tenant 1"
},
"msp_name": "MSP 1",
"msp_id": 1,
"tenant_name": "Tenant 1",
"tenant_id": 1
},
{
"file_type": "ZIP",
"file_hash": "edf34902ff17838b4bc709ff15b5265dd49f652ee75a1adf69df9ae5bc52f960",
"file_archive_hash": "edf34902ff17838b4bc709ff15b5265dd49f652ee75a1adf69df9ae5bc52f960",
"path": "c:\\temp\\file2.exe",
"file_size": 15090736,
"threat_severity": "NONE",
"certificate_thumbprint": None,
"certificate_vendor_name": None,
"deep_classification": None,
"file_status": "NOT_UPLOADED",
"sandbox_status": "NOT_READY_TO_GENERATE",
"model": "FileEvent",
"id": 2,
"device_id": 2,
"type": "STATIC_ANALYSIS",
"trigger": "BRAIN",
"action": "PREVENTED",
"timestamp": "2020-04-09T14:49:41.805228Z",
"insertion_timestamp": "2020-04-09T14:49:41.810047Z",
"close_timestamp": None,
"close_trigger": None,
"reoccurrence_count": 0,
"last_reoccurrence": None,
"last_action": None,
"status": "OPEN",
"comment": None,
"recorded_device_info": {
"os": "WINDOWS",
"mac_address": "00:00:00:00:00:00",
"hostname": "Mock_2020-04-09 17:49:41.170765_1",
"tag": "",
"group_name": "Windows Default Group",
"policy_name": "Windows Default Policy",
"tenant_name": "Tenant 1"
},
"msp_name": "MSP 1",
"msp_id": 1,
"tenant_name": "Tenant 1",
"tenant_id": 1
}
]
}
def test_get_device_command(requests_mock, mocker):
mocker.patch.object(demisto, 'params', return_value=params)
mocker.patch.object(demisto, 'args', return_value={'device_id': mock_device['id']})
requests_mock.get("{0}/api/v1/devices/{1}".format(params['base_url'], mock_device['id']), json=mock_device)
mocker.patch.object(demisto, 'results')
DeepInstinct.get_specific_device()
result = demisto.results.call_args[0][0]
assert result['Contents'] == mock_device
def test_get_all_groups(requests_mock, mocker):
mocker.patch.object(demisto, 'params', return_value=params)
requests_mock.get("{0}/api/v1/groups".format(params['base_url']), json=mock_groups)
mocker.patch.object(demisto, 'results')
DeepInstinct.get_all_groups()
result = demisto.results.call_args[0][0]
assert result['Contents'] == mock_groups
def test_get_all_policies(requests_mock, mocker):
mocker.patch.object(demisto, 'params', return_value=params)
requests_mock.get("{0}/api/v1/policies".format(params['base_url']), json=mock_policies)
mocker.patch.object(demisto, 'results')
DeepInstinct.get_all_policies()
result = demisto.results.call_args[0][0]
assert result['Contents'] == mock_policies
def test_get_events(requests_mock, mocker):
mocker.patch.object(demisto, 'params', return_value=params)
mocker.patch.object(demisto, 'args', return_value={'first_event_id': 0})
requests_mock.get("{0}/api/v1/events/?after_id=0".format(params['base_url']), json=mock_events)
mocker.patch.object(demisto, 'results')
DeepInstinct.get_events()
result = demisto.results.call_args[0][0]
assert result['Contents'] == mock_events['events']
def test_fetch_incidents(requests_mock, mocker):
mocker.patch.object(demisto, 'params', return_value=params)
mocker.patch.object(demisto, 'args', return_value={'first_fetch_id': 0})
mocker.patch.object(demisto, 'getLastRun', return_value={'last_id': 0})
requests_mock.get("{0}/api/v1/events/?after_id=0".format(params['base_url']), json=mock_events)
requests_mock.get("{0}/api/v1/events/?after_id=2".format(params['base_url']), json={})
mocker.patch.object(demisto, "incidents")
DeepInstinct.fetch_incidents()
incidents = demisto.incidents.call_args[0][0]
assert len(incidents) == len(mock_events['events'])
assert incidents[0]['rawJSON'] == json.dumps(mock_events['events'][0])
assert incidents[1]['rawJSON'] == json.dumps(mock_events['events'][1]) | Packs/DeepInstinct/Integrations/DeepInstinct/DeepInstinct_test.py | import json
import DeepInstinct
import demistomock as demisto
params = {
"apikey": "key",
"base_url": "https://demisto.poc.deepinstinctweb.com",
"after_id": 0
}
mock_device = {
"id": 1,
"os": "WINDOWS",
"osv": "Windows",
"ip_address": "192.168.88.80",
"mac_address": "00:00:00:00:00:00",
"hostname": "Mock_2020-04-09 17:49:39.408405_1",
"domain": "",
"scanned_files": 0,
"tag": "",
"connectivity_status": "OFFLINE",
"deployment_status": "REGISTERED",
"last_registration": "2020-04-09T14:49:39.722292Z",
"last_contact": "2020-04-09T14:49:39.711487Z",
"distinguished_name": "OU=Organizations & Sites,DC=bancshares,DC=mib",
"group_name": "Windows Default Group",
"group_id": 3,
"policy_name": "Windows Default Policy",
"policy_id": 3,
"log_status": "NA",
"agent_version": "2.3.1.12",
"brain_version": "115wt",
"msp_name": "MSP 1",
"msp_id": 1,
"tenant_name": "Tenant 1",
"tenant_id": 1
}
mock_groups = [
{
"name": "Android Default Group",
"os": "ANDROID",
"policy_id": 1,
"id": 1,
"is_default_group": True,
"msp_name": "MSP 1",
"msp_id": 1
},
{
"name": "iOS Default Group",
"os": "IOS",
"policy_id": 2,
"id": 2,
"is_default_group": True,
"msp_name": "MSP 1",
"msp_id": 1
}
]
mock_policies = [
{
"id": 2,
"os": "IOS",
"name": "iOS Default Policy",
"is_default_policy": True,
"msp_name": "MSP 1",
"msp_id": 1
},
{
"id": 3,
"os": "WINDOWS",
"name": "Windows Default Policy",
"is_default_policy": True,
"msp_name": "MSP 1",
"msp_id": 1
}
]
mock_events = {
"last_id": 2,
"events":
[
{
"file_type": "ZIP",
"file_hash": "d1838b541ff7ffe6489d120d89dfa855665fd2c708491f336c7267069387053f",
"file_archive_hash": "d1838b541ff7ffe6489d120d89dfa855665fd2c708491f336c7267069387053f",
"path": "c:\\temp\\file.exe",
"file_size": 18127052,
"threat_severity": "NONE",
"certificate_thumbprint": None,
"certificate_vendor_name": None,
"deep_classification": None,
"file_status": "NOT_UPLOADED",
"sandbox_status": "NOT_READY_TO_GENERATE",
"model": "FileEvent",
"id": 1,
"device_id": 1,
"type": "STATIC_ANALYSIS",
"trigger": "BRAIN",
"action": "PREVENTED",
"timestamp": "2020-04-09T14:49:41.154850Z",
"insertion_timestamp": "2020-04-09T14:49:41.170331Z",
"close_timestamp": "2020-04-12T14:11:39.145856Z",
"close_trigger": "CLOSED_BY_ADMIN",
"reoccurrence_count": 0,
"last_reoccurrence": None,
"last_action": None,
"status": "CLOSED",
"comment": None,
"recorded_device_info": {
"os": "WINDOWS",
"mac_address": "00:00:00:00:00:00",
"hostname": "Mock_2020-04-09 17:49:39.408405_1",
"tag": "",
"group_name": "Windows Default Group",
"policy_name": "Windows Default Policy",
"tenant_name": "Tenant 1"
},
"msp_name": "MSP 1",
"msp_id": 1,
"tenant_name": "Tenant 1",
"tenant_id": 1
},
{
"file_type": "ZIP",
"file_hash": "edf34902ff17838b4bc709ff15b5265dd49f652ee75a1adf69df9ae5bc52f960",
"file_archive_hash": "edf34902ff17838b4bc709ff15b5265dd49f652ee75a1adf69df9ae5bc52f960",
"path": "c:\\temp\\file2.exe",
"file_size": 15090736,
"threat_severity": "NONE",
"certificate_thumbprint": None,
"certificate_vendor_name": None,
"deep_classification": None,
"file_status": "NOT_UPLOADED",
"sandbox_status": "NOT_READY_TO_GENERATE",
"model": "FileEvent",
"id": 2,
"device_id": 2,
"type": "STATIC_ANALYSIS",
"trigger": "BRAIN",
"action": "PREVENTED",
"timestamp": "2020-04-09T14:49:41.805228Z",
"insertion_timestamp": "2020-04-09T14:49:41.810047Z",
"close_timestamp": None,
"close_trigger": None,
"reoccurrence_count": 0,
"last_reoccurrence": None,
"last_action": None,
"status": "OPEN",
"comment": None,
"recorded_device_info": {
"os": "WINDOWS",
"mac_address": "00:00:00:00:00:00",
"hostname": "Mock_2020-04-09 17:49:41.170765_1",
"tag": "",
"group_name": "Windows Default Group",
"policy_name": "Windows Default Policy",
"tenant_name": "Tenant 1"
},
"msp_name": "MSP 1",
"msp_id": 1,
"tenant_name": "Tenant 1",
"tenant_id": 1
}
]
}
def test_get_device_command(requests_mock, mocker):
mocker.patch.object(demisto, 'params', return_value=params)
mocker.patch.object(demisto, 'args', return_value={'device_id': mock_device['id']})
requests_mock.get("{0}/api/v1/devices/{1}".format(params['base_url'], mock_device['id']), json=mock_device)
mocker.patch.object(demisto, 'results')
DeepInstinct.get_specific_device()
result = demisto.results.call_args[0][0]
assert result['Contents'] == mock_device
def test_get_all_groups(requests_mock, mocker):
mocker.patch.object(demisto, 'params', return_value=params)
requests_mock.get("{0}/api/v1/groups".format(params['base_url']), json=mock_groups)
mocker.patch.object(demisto, 'results')
DeepInstinct.get_all_groups()
result = demisto.results.call_args[0][0]
assert result['Contents'] == mock_groups
def test_get_all_policies(requests_mock, mocker):
mocker.patch.object(demisto, 'params', return_value=params)
requests_mock.get("{0}/api/v1/policies".format(params['base_url']), json=mock_policies)
mocker.patch.object(demisto, 'results')
DeepInstinct.get_all_policies()
result = demisto.results.call_args[0][0]
assert result['Contents'] == mock_policies
def test_get_events(requests_mock, mocker):
mocker.patch.object(demisto, 'params', return_value=params)
mocker.patch.object(demisto, 'args', return_value={'first_event_id': 0})
requests_mock.get("{0}/api/v1/events/?after_id=0".format(params['base_url']), json=mock_events)
mocker.patch.object(demisto, 'results')
DeepInstinct.get_events()
result = demisto.results.call_args[0][0]
assert result['Contents'] == mock_events['events']
def test_fetch_incidents(requests_mock, mocker):
mocker.patch.object(demisto, 'params', return_value=params)
mocker.patch.object(demisto, 'args', return_value={'first_fetch_id': 0})
mocker.patch.object(demisto, 'getLastRun', return_value={'last_id': 0})
requests_mock.get("{0}/api/v1/events/?after_id=0".format(params['base_url']), json=mock_events)
requests_mock.get("{0}/api/v1/events/?after_id=2".format(params['base_url']), json={})
mocker.patch.object(demisto, "incidents")
DeepInstinct.fetch_incidents()
incidents = demisto.incidents.call_args[0][0]
assert len(incidents) == len(mock_events['events'])
assert incidents[0]['rawJSON'] == json.dumps(mock_events['events'][0])
assert incidents[1]['rawJSON'] == json.dumps(mock_events['events'][1]) | 0.345436 | 0.341308 |
import asyncio
from tt_web import postgresql as db
from . import objects
def impact_from_row(row):
return objects.Impact(transaction=row['transaction'],
actor=objects.Object(row['actor_type'], row['actor']),
target=objects.Object(row['target_type'], row['target']),
amount=row['amount'],
turn=row['created_at_turn'],
time=row['created_at'].replace(tzinfo=None))
def actor_impact_from_row(row):
return objects.ActorImpact(actor=objects.Object(row['actor_type'], row['actor']),
target=objects.Object(row['target_type'], row['target']),
amount=row['amount'],
turn=row['updated_at_turn'],
time=row['updated_at'].replace(tzinfo=None))
def target_impact_from_row(row):
return objects.TargetImpact(target=objects.Object(row['target_type'], row['target']),
amount=row['amount'],
turn=row['updated_at_turn'],
time=row['updated_at'].replace(tzinfo=None))
async def add_impacts(impacts):
await db.transaction(_add_impacts, {'impacts': impacts})
async def _add_impacts(execute, arguments):
# order of executed queries and sorting of items in quiries
# required to prevent blocking with add_impacts and scale_impacts functions
impacts = list(arguments['impacts'])
for impact in impacts:
await _add_impact(execute, impact)
impacts.sort(key=lambda impact: (impact.actor.type, impact.actor.id, impact.target.type, impact.target.id))
for impact in impacts:
await _add_actor_impact(execute, impact)
impacts.sort(key=lambda impact: (impact.target.type, impact.target.id))
for impact in impacts:
await _add_target_impact(execute, impact)
async def _add_impact(execute, impact):
sql = '''INSERT INTO impacts (actor_type, actor, target_type, target, amount, transaction, created_at_turn, created_at)
VALUES (%(actor_type)s, %(actor)s, %(target_type)s, %(target)s, %(amount)s, %(transaction)s, %(turn)s, NOW())'''
await execute(sql, {'actor_type': impact.actor.type,
'actor': impact.actor.id,
'target_type': impact.target.type,
'target': impact.target.id,
'amount': impact.amount,
'transaction': impact.transaction,
'turn': impact.turn})
async def _add_actor_impact(execute, impact):
sql = '''INSERT INTO actors_impacts (actor_type, actor, target_type, target, amount, created_at, updated_at, updated_at_turn)
VALUES (%(actor_type)s, %(actor)s, %(target_type)s, %(target)s, %(amount)s, NOW(), NOW(), %(turn)s)
ON CONFLICT (actor_type, actor, target_type, target) DO UPDATE
SET amount = actors_impacts.amount + %(amount)s,
updated_at = GREATEST(actors_impacts.updated_at, NOW()),
updated_at_turn = GREATEST(actors_impacts.updated_at_turn, %(turn)s)'''
await execute(sql, {'actor_type': impact.actor.type,
'actor': impact.actor.id,
'target_type': impact.target.type,
'target': impact.target.id,
'amount': impact.amount,
'turn': impact.turn})
async def _add_target_impact(execute, impact):
sql = '''INSERT INTO targets_impacts (target_type, target, amount, created_at, updated_at, updated_at_turn)
VALUES (%(target_type)s, %(target)s, %(amount)s, NOW(), NOW(), %(turn)s)
ON CONFLICT (target_type, target) DO UPDATE
SET amount = targets_impacts.amount + %(amount)s,
updated_at = GREATEST(targets_impacts.updated_at, NOW()),
updated_at_turn = GREATEST(targets_impacts.updated_at_turn, %(turn)s)'''
await execute(sql, {'target_type': impact.target.type,
'target': impact.target.id,
'amount': impact.amount,
'turn': impact.turn})
async def last_impacts(limit):
results = await db.sql('SELECT * FROM impacts ORDER BY created_at DESC LIMIT %(limit)s', {'limit': limit})
return [impact_from_row(row) for row in results]
async def last_actor_impacts(actor, limit):
results = await db.sql('''SELECT * FROM impacts
WHERE actor_type=%(actor_type)s AND actor=%(actor)s
ORDER BY created_at DESC LIMIT %(limit)s''',
{'limit': limit,
'actor_type': actor.type,
'actor': actor.id})
return [impact_from_row(row) for row in results]
async def last_target_impacts(target, limit):
results = await db.sql('''SELECT * FROM impacts
WHERE target_type=%(target_type)s AND target=%(target)s
ORDER BY created_at DESC LIMIT %(limit)s''',
{'limit': limit,
'target_type': target.type,
'target': target.id})
return [impact_from_row(row) for row in results]
async def last_actor_target_impacts(actor, target, limit):
results = await db.sql('''SELECT * FROM impacts
WHERE actor_type=%(actor_type)s AND
actor=%(actor)s AND
target_type=%(target_type)s AND
target=%(target)s
ORDER BY created_at DESC LIMIT %(limit)s''',
{'limit': limit,
'actor_type': actor.type,
'actor': actor.id,
'target_type': target.type,
'target': target.id})
return [impact_from_row(row) for row in results]
async def get_targets_impacts(targets):
sql = 'SELECT * FROM targets_impacts WHERE {conditions}'
conditions = []
for target in targets:
conditions.append('(target_type={} AND target={})'.format(target.type, target.id))
sql = sql.format(conditions=' OR '.join(conditions) if conditions else 'TRUE')
results = await db.sql(sql)
return [target_impact_from_row(row) for row in results]
async def get_impacters_ratings(targets, actor_types, limit):
if not targets:
return {}
if not actor_types:
return {}
targets = frozenset(targets)
tasks = [get_impacters_target_ratings(target, actor_types, limit) for target in targets]
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_EXCEPTION)
return dict(task.result() for task in done)
async def get_actor_impacts(actor, target_types):
sql = '''SELECT * FROM actors_impacts
WHERE actor_type = %(actor_type)s AND
actor = %(actor_id)s AND
target_type IN %(target_types)s'''
results = await db.sql(sql, {'actor_type': actor.type,
'actor_id': actor.id,
'target_types': tuple(target_types)})
return [target_impact_from_row(row) for row in results]
async def get_impacters_target_ratings(target, actor_types, limit):
sql = '''SELECT * FROM actors_impacts
WHERE actor_type IN %(actor_types)s AND
target_type = %(target_type)s AND
target = %(target_id)s
ORDER BY amount DESC LIMIT %(limit)s'''
results = await db.sql(sql, {'target_type': target.type,
'target_id': target.id,
'actor_types': tuple(actor_types),
'limit': limit})
return target, [actor_impact_from_row(row) for row in results]
async def scale_impacts(target_types, scale):
if not target_types:
return
await db.transaction(_scale_impacts, {'target_types': target_types,
'scale': scale})
async def _scale_impacts(execute, arguments):
target_types = tuple(arguments['target_types'])
scale = arguments['scale']
# order of executed queries and sorting of items in quiries
# required to prevent blocking with add_impacts and scale_impacts functions
await execute('''UPDATE actors_impacts
SET amount =
CASE
WHEN amount < 0 THEN CEIL(amount * %(scale)s)
ELSE FLOOR(amount * %(scale)s)
END
WHERE id IN (SELECT id FROM actors_impacts
WHERE target_type IN %(target_types)s
ORDER BY (actor_type, actor, target_type, target)
FOR UPDATE)''',
{'target_types': target_types,
'scale': scale})
await execute('''UPDATE targets_impacts
SET amount =
CASE
WHEN amount < 0 THEN CEIL(amount * %(scale)s)
ELSE FLOOR(amount * %(scale)s)
END
WHERE id IN (SELECT id FROM targets_impacts
WHERE target_type IN %(target_types)s
ORDER BY (target_type, target)
FOR UPDATE)''',
{'target_types': target_types,
'scale': scale})
async def clean_database():
await db.sql('DELETE FROM targets_impacts')
await db.sql('DELETE FROM actors_impacts')
await db.sql('DELETE FROM impacts') | src/tt_impacts/tt_impacts/operations.py | import asyncio
from tt_web import postgresql as db
from . import objects
def impact_from_row(row):
return objects.Impact(transaction=row['transaction'],
actor=objects.Object(row['actor_type'], row['actor']),
target=objects.Object(row['target_type'], row['target']),
amount=row['amount'],
turn=row['created_at_turn'],
time=row['created_at'].replace(tzinfo=None))
def actor_impact_from_row(row):
return objects.ActorImpact(actor=objects.Object(row['actor_type'], row['actor']),
target=objects.Object(row['target_type'], row['target']),
amount=row['amount'],
turn=row['updated_at_turn'],
time=row['updated_at'].replace(tzinfo=None))
def target_impact_from_row(row):
return objects.TargetImpact(target=objects.Object(row['target_type'], row['target']),
amount=row['amount'],
turn=row['updated_at_turn'],
time=row['updated_at'].replace(tzinfo=None))
async def add_impacts(impacts):
await db.transaction(_add_impacts, {'impacts': impacts})
async def _add_impacts(execute, arguments):
# order of executed queries and sorting of items in quiries
# required to prevent blocking with add_impacts and scale_impacts functions
impacts = list(arguments['impacts'])
for impact in impacts:
await _add_impact(execute, impact)
impacts.sort(key=lambda impact: (impact.actor.type, impact.actor.id, impact.target.type, impact.target.id))
for impact in impacts:
await _add_actor_impact(execute, impact)
impacts.sort(key=lambda impact: (impact.target.type, impact.target.id))
for impact in impacts:
await _add_target_impact(execute, impact)
async def _add_impact(execute, impact):
sql = '''INSERT INTO impacts (actor_type, actor, target_type, target, amount, transaction, created_at_turn, created_at)
VALUES (%(actor_type)s, %(actor)s, %(target_type)s, %(target)s, %(amount)s, %(transaction)s, %(turn)s, NOW())'''
await execute(sql, {'actor_type': impact.actor.type,
'actor': impact.actor.id,
'target_type': impact.target.type,
'target': impact.target.id,
'amount': impact.amount,
'transaction': impact.transaction,
'turn': impact.turn})
async def _add_actor_impact(execute, impact):
sql = '''INSERT INTO actors_impacts (actor_type, actor, target_type, target, amount, created_at, updated_at, updated_at_turn)
VALUES (%(actor_type)s, %(actor)s, %(target_type)s, %(target)s, %(amount)s, NOW(), NOW(), %(turn)s)
ON CONFLICT (actor_type, actor, target_type, target) DO UPDATE
SET amount = actors_impacts.amount + %(amount)s,
updated_at = GREATEST(actors_impacts.updated_at, NOW()),
updated_at_turn = GREATEST(actors_impacts.updated_at_turn, %(turn)s)'''
await execute(sql, {'actor_type': impact.actor.type,
'actor': impact.actor.id,
'target_type': impact.target.type,
'target': impact.target.id,
'amount': impact.amount,
'turn': impact.turn})
async def _add_target_impact(execute, impact):
sql = '''INSERT INTO targets_impacts (target_type, target, amount, created_at, updated_at, updated_at_turn)
VALUES (%(target_type)s, %(target)s, %(amount)s, NOW(), NOW(), %(turn)s)
ON CONFLICT (target_type, target) DO UPDATE
SET amount = targets_impacts.amount + %(amount)s,
updated_at = GREATEST(targets_impacts.updated_at, NOW()),
updated_at_turn = GREATEST(targets_impacts.updated_at_turn, %(turn)s)'''
await execute(sql, {'target_type': impact.target.type,
'target': impact.target.id,
'amount': impact.amount,
'turn': impact.turn})
async def last_impacts(limit):
results = await db.sql('SELECT * FROM impacts ORDER BY created_at DESC LIMIT %(limit)s', {'limit': limit})
return [impact_from_row(row) for row in results]
async def last_actor_impacts(actor, limit):
results = await db.sql('''SELECT * FROM impacts
WHERE actor_type=%(actor_type)s AND actor=%(actor)s
ORDER BY created_at DESC LIMIT %(limit)s''',
{'limit': limit,
'actor_type': actor.type,
'actor': actor.id})
return [impact_from_row(row) for row in results]
async def last_target_impacts(target, limit):
results = await db.sql('''SELECT * FROM impacts
WHERE target_type=%(target_type)s AND target=%(target)s
ORDER BY created_at DESC LIMIT %(limit)s''',
{'limit': limit,
'target_type': target.type,
'target': target.id})
return [impact_from_row(row) for row in results]
async def last_actor_target_impacts(actor, target, limit):
results = await db.sql('''SELECT * FROM impacts
WHERE actor_type=%(actor_type)s AND
actor=%(actor)s AND
target_type=%(target_type)s AND
target=%(target)s
ORDER BY created_at DESC LIMIT %(limit)s''',
{'limit': limit,
'actor_type': actor.type,
'actor': actor.id,
'target_type': target.type,
'target': target.id})
return [impact_from_row(row) for row in results]
async def get_targets_impacts(targets):
sql = 'SELECT * FROM targets_impacts WHERE {conditions}'
conditions = []
for target in targets:
conditions.append('(target_type={} AND target={})'.format(target.type, target.id))
sql = sql.format(conditions=' OR '.join(conditions) if conditions else 'TRUE')
results = await db.sql(sql)
return [target_impact_from_row(row) for row in results]
async def get_impacters_ratings(targets, actor_types, limit):
if not targets:
return {}
if not actor_types:
return {}
targets = frozenset(targets)
tasks = [get_impacters_target_ratings(target, actor_types, limit) for target in targets]
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_EXCEPTION)
return dict(task.result() for task in done)
async def get_actor_impacts(actor, target_types):
sql = '''SELECT * FROM actors_impacts
WHERE actor_type = %(actor_type)s AND
actor = %(actor_id)s AND
target_type IN %(target_types)s'''
results = await db.sql(sql, {'actor_type': actor.type,
'actor_id': actor.id,
'target_types': tuple(target_types)})
return [target_impact_from_row(row) for row in results]
async def get_impacters_target_ratings(target, actor_types, limit):
sql = '''SELECT * FROM actors_impacts
WHERE actor_type IN %(actor_types)s AND
target_type = %(target_type)s AND
target = %(target_id)s
ORDER BY amount DESC LIMIT %(limit)s'''
results = await db.sql(sql, {'target_type': target.type,
'target_id': target.id,
'actor_types': tuple(actor_types),
'limit': limit})
return target, [actor_impact_from_row(row) for row in results]
async def scale_impacts(target_types, scale):
if not target_types:
return
await db.transaction(_scale_impacts, {'target_types': target_types,
'scale': scale})
async def _scale_impacts(execute, arguments):
target_types = tuple(arguments['target_types'])
scale = arguments['scale']
# order of executed queries and sorting of items in quiries
# required to prevent blocking with add_impacts and scale_impacts functions
await execute('''UPDATE actors_impacts
SET amount =
CASE
WHEN amount < 0 THEN CEIL(amount * %(scale)s)
ELSE FLOOR(amount * %(scale)s)
END
WHERE id IN (SELECT id FROM actors_impacts
WHERE target_type IN %(target_types)s
ORDER BY (actor_type, actor, target_type, target)
FOR UPDATE)''',
{'target_types': target_types,
'scale': scale})
await execute('''UPDATE targets_impacts
SET amount =
CASE
WHEN amount < 0 THEN CEIL(amount * %(scale)s)
ELSE FLOOR(amount * %(scale)s)
END
WHERE id IN (SELECT id FROM targets_impacts
WHERE target_type IN %(target_types)s
ORDER BY (target_type, target)
FOR UPDATE)''',
{'target_types': target_types,
'scale': scale})
async def clean_database():
await db.sql('DELETE FROM targets_impacts')
await db.sql('DELETE FROM actors_impacts')
await db.sql('DELETE FROM impacts') | 0.385375 | 0.177063 |
import argparse
import functools
import json
import os
import random
import subprocess
import sys
from multiprocessing import Pool
from pathlib import Path
from typing import Dict, Iterable, List, NamedTuple
import typing
import test_target
from test_target import TestTarget
import testvm
from test_config import CRATE_OPTIONS, TestOption, BUILD_FEATURES
from check_code_hygiene import (
has_platform_dependent_code,
has_crlf_line_endings,
)
USAGE = """\
Runs tests for crosvm locally, in a vm or on a remote device.
To build and run all tests locally:
$ ./tools/run_tests --target=host
To cross-compile tests for aarch64 and run them on a built-in VM:
$ ./tools/run_tests --target=vm:aarch64
The VM will be automatically set up and booted. It will remain running between
test runs and can be managed with `./tools/aarch64vm`.
Tests can also be run on a remote device via SSH. However it is your
responsiblity that runtime dependencies of crosvm are provided.
$ ./tools/run_tests --target=ssh:hostname
The default test target can be managed with `./tools/set_test_target`
To see full build and test output, add the `-v` or `--verbose` flag.
"""
Arch = test_target.Arch
# Print debug info. Overriden by -v
VERBOSE = False
# Timeouts for tests to prevent them from running too long.
TEST_TIMEOUT_SECS = 60
LARGE_TEST_TIMEOUT_SECS = 120
# Double the timeout if the test is running in an emulation environment, which will be
# significantly slower than native environments.
EMULATION_TIMEOUT_MULTIPLIER = 2
# Number of parallel processes for executing tests.
PARALLELISM = 4
CROSVM_ROOT = Path(__file__).parent.parent.parent.resolve()
COMMON_ROOT = CROSVM_ROOT / "common"
class ExecutableResults(object):
"""Container for results of a test executable."""
def __init__(self, name: str, success: bool, test_log: str):
self.name = name
self.success = success
self.test_log = test_log
class Executable(NamedTuple):
"""Container for info about an executable generated by cargo build/test."""
binary_path: Path
crate_name: str
cargo_target: str
kind: str
is_test: bool
is_fresh: bool
arch: Arch
@property
def name(self):
return f"{self.crate_name}:{self.cargo_target}"
class Crate(NamedTuple):
"""Container for info about crate."""
name: str
path: Path
def get_workspace_excludes(target_arch: Arch):
for crate, options in CRATE_OPTIONS.items():
if TestOption.DO_NOT_BUILD in options:
yield crate
elif TestOption.DO_NOT_BUILD_X86_64 in options and target_arch == "x86_64":
yield crate
elif TestOption.DO_NOT_BUILD_AARCH64 in options and target_arch == "aarch64":
yield crate
elif TestOption.DO_NOT_BUILD_ARMHF in options and target_arch == "armhf":
yield crate
def should_run_executable(executable: Executable, target_arch: Arch):
options = CRATE_OPTIONS.get(executable.crate_name, [])
if TestOption.DO_NOT_RUN in options:
return False
if TestOption.DO_NOT_RUN_X86_64 in options and target_arch == "x86_64":
return False
if TestOption.DO_NOT_RUN_AARCH64 in options and target_arch == "aarch64":
return False
if TestOption.DO_NOT_RUN_ARMHF in options and target_arch == "armhf":
return False
if TestOption.DO_NOT_RUN_ON_FOREIGN_KERNEL in options and target_arch != executable.arch:
return False
return True
def list_common_crates(target_arch: Arch):
excluded_crates = list(get_workspace_excludes(target_arch))
for path in COMMON_ROOT.glob("**/Cargo.toml"):
if not path.parent.name in excluded_crates:
yield Crate(name=path.parent.name, path=path.parent)
def exclude_crosvm(target_arch: Arch):
return "crosvm" in get_workspace_excludes(target_arch)
def cargo(
cargo_command: str, cwd: Path, flags: list[str], env: dict[str, str], build_arch: Arch
) -> Iterable[Executable]:
"""
Executes a cargo command and returns the list of test binaries generated.
The build log will be hidden by default and only printed if the build
fails. In VERBOSE mode the output will be streamed directly.
Note: Exits the program if the build fails.
"""
cmd = [
"cargo",
cargo_command,
"--message-format=json-diagnostic-rendered-ansi",
*flags,
]
if VERBOSE:
print("$", " ".join(cmd))
process = subprocess.Popen(
cmd,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
env=env,
)
messages: List[str] = []
# Read messages as cargo is running.
assert process.stdout
for line in iter(process.stdout.readline, ""):
# any non-json line is a message to print
if not line.startswith("{"):
if VERBOSE:
print(line.rstrip())
messages.append(line.rstrip())
continue
json_line = json.loads(line)
# 'message' type lines will be printed
if json_line.get("message"):
message = json_line.get("message").get("rendered")
if VERBOSE:
print(message)
messages.append(message)
# Collect info about test executables produced
elif json_line.get("executable"):
yield Executable(
Path(json_line.get("executable")),
crate_name=json_line.get("package_id", "").split(" ")[0],
cargo_target=json_line.get("target").get("name"),
kind=json_line.get("target").get("kind")[0],
is_test=json_line.get("profile", {}).get("test", False),
is_fresh=json_line.get("fresh", False),
arch=build_arch,
)
if process.wait() != 0:
if not VERBOSE:
for message in messages:
print(message)
sys.exit(-1)
def cargo_build_executables(
flags: list[str],
build_arch: Arch,
cwd: Path = Path("."),
env: Dict[str, str] = {},
) -> Iterable[Executable]:
"""Build all test binaries for the given list of crates."""
# Run build first, to make sure compiler errors of building non-test
# binaries are caught.
yield from cargo("build", cwd, flags, env, build_arch)
# Build all tests and return the collected executables
yield from cargo("test", cwd, ["--no-run", *flags], env, build_arch)
def build_common_crate(build_env: dict[str, str], build_arch: Arch, crate: Crate):
print(f"Building tests for: common/{crate.name}")
return list(cargo_build_executables([], build_arch, env=build_env, cwd=crate.path))
def build_all_binaries(target: TestTarget, build_arch: Arch):
"""Discover all crates and build them."""
build_env = os.environ.copy()
build_env.update(test_target.get_cargo_env(target, build_arch))
print("Building crosvm workspace")
yield from cargo_build_executables(
[
"--features=" + BUILD_FEATURES[build_arch],
"--verbose",
"--workspace",
*[f"--exclude={crate}" for crate in get_workspace_excludes(build_arch)],
],
build_arch,
cwd=CROSVM_ROOT,
env=build_env,
)
with Pool(PARALLELISM) as pool:
for executables in pool.imap(
functools.partial(build_common_crate, build_env, build_arch),
list_common_crates(build_arch),
):
yield from executables
def is_emulated(target: TestTarget, executable: Executable) -> bool:
if target.is_host:
# User-space emulation can run foreing-arch executables on the host.
return executable.arch != target.arch
elif target.vm:
return target.vm == "aarch64"
return False
def get_test_timeout(target: TestTarget, executable: Executable):
large = TestOption.LARGE in CRATE_OPTIONS.get(executable.crate_name, [])
timeout = LARGE_TEST_TIMEOUT_SECS if large else TEST_TIMEOUT_SECS
if is_emulated(target, executable):
return timeout * EMULATION_TIMEOUT_MULTIPLIER
else:
return timeout
def execute_test(target: TestTarget, executable: Executable):
"""
Executes a single test on the given test targed
Note: This function is run in a multiprocessing.Pool.
Test output is hidden unless the test fails or VERBOSE mode is enabled.
"""
options = CRATE_OPTIONS.get(executable.crate_name, [])
args: list[str] = []
if TestOption.SINGLE_THREADED in options:
args += ["--test-threads=1"]
# proc-macros and their tests are executed on the host.
if executable.kind == "proc-macro":
target = TestTarget("host")
if VERBOSE:
print(f"Running test {executable.name} on {target}...")
try:
# Pipe stdout/err to be printed in the main process if needed.
test_process = test_target.exec_file_on_target(
target,
executable.binary_path,
args=args,
timeout=get_test_timeout(target, executable),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
return ExecutableResults(
executable.name,
test_process.returncode == 0,
test_process.stdout,
)
except subprocess.TimeoutExpired as e:
# Append a note about the timeout to the stdout of the process.
msg = f"\n\nProcess timed out after {e.timeout}s\n"
return ExecutableResults(
executable.name,
False,
e.stdout.decode("utf-8") + msg,
)
def execute_all(
executables: list[Executable],
target: test_target.TestTarget,
repeat: int,
):
"""Executes all tests in the `executables` list in parallel."""
executables = [e for e in executables if should_run_executable(e, target.arch)]
if repeat > 1:
executables = executables * repeat
random.shuffle(executables)
sys.stdout.write(f"Running {len(executables)} test binaries on {target}")
sys.stdout.flush()
with Pool(PARALLELISM) as pool:
for result in pool.imap(functools.partial(execute_test, target), executables):
if not result.success or VERBOSE:
msg = "passed" if result.success else "failed"
print()
print("--------------------------------")
print("-", result.name, msg)
print("--------------------------------")
print(result.test_log)
else:
sys.stdout.write(".")
sys.stdout.flush()
yield result
print()
def find_crosvm_binary(executables: list[Executable]):
for executable in executables:
if not executable.is_test and executable.cargo_target == "crosvm":
return executable
raise Exception("Cannot find crosvm executable")
def main():
parser = argparse.ArgumentParser(usage=USAGE)
parser.add_argument(
"--verbose",
"-v",
action="store_true",
default=False,
help="Print all test output.",
)
parser.add_argument(
"--target",
help="Execute tests on the selected target. See ./tools/set_test_target",
)
parser.add_argument(
"--arch",
choices=typing.get_args(Arch),
help="Target architecture to build for.",
)
parser.add_argument(
"--build-only",
action="store_true",
)
parser.add_argument(
"--repeat",
type=int,
default=1,
help="Repeat each test N times to check for flakes.",
)
args = parser.parse_args()
global VERBOSE
VERBOSE = args.verbose # type: ignore
os.environ["RUST_BACKTRACE"] = "1"
target = (
test_target.TestTarget(args.target) if args.target else test_target.TestTarget.default()
)
print("Test target:", target)
build_arch = args.arch or target.arch
print("Building for architecture:", build_arch)
# Start booting VM while we build
if target.vm:
testvm.build_if_needed(target.vm)
testvm.up(target.vm)
hygiene, error = has_platform_dependent_code(Path("common/sys_util_core"))
if not hygiene:
print("Error: Platform dependent code not allowed in sys_util_core crate.")
print("Offending line: " + error)
sys.exit(-1)
crlf_endings = has_crlf_line_endings()
if crlf_endings:
print("Error: Following files have crlf(dos) line encodings")
print(*crlf_endings)
sys.exit(-1)
executables = list(build_all_binaries(target, build_arch))
if args.build_only:
print("Not running tests as requested.")
sys.exit(0)
# Upload dependencies plus the main crosvm binary for integration tests if the
# crosvm binary is not excluded from testing.
extra_files = (
[find_crosvm_binary(executables).binary_path] if not exclude_crosvm(build_arch) else []
)
test_target.prepare_target(target, extra_files=extra_files)
# Execute all test binaries
test_executables = [e for e in executables if e.is_test]
all_results = list(execute_all(test_executables, target, repeat=args.repeat))
failed = [r for r in all_results if not r.success]
if len(failed) == 0:
print("All tests passed.")
sys.exit(0)
else:
print(f"{len(failed)} of {len(all_results)} tests failed:")
for result in failed:
print(f" {result.name}")
sys.exit(-1)
if __name__ == "__main__":
try:
main()
except subprocess.CalledProcessError as e:
print("Command failed:", e.cmd)
print(e.stdout)
print(e.stderr)
sys.exit(-1) | tools/impl/test_runner.py |
import argparse
import functools
import json
import os
import random
import subprocess
import sys
from multiprocessing import Pool
from pathlib import Path
from typing import Dict, Iterable, List, NamedTuple
import typing
import test_target
from test_target import TestTarget
import testvm
from test_config import CRATE_OPTIONS, TestOption, BUILD_FEATURES
from check_code_hygiene import (
has_platform_dependent_code,
has_crlf_line_endings,
)
USAGE = """\
Runs tests for crosvm locally, in a vm or on a remote device.
To build and run all tests locally:
$ ./tools/run_tests --target=host
To cross-compile tests for aarch64 and run them on a built-in VM:
$ ./tools/run_tests --target=vm:aarch64
The VM will be automatically set up and booted. It will remain running between
test runs and can be managed with `./tools/aarch64vm`.
Tests can also be run on a remote device via SSH. However it is your
responsiblity that runtime dependencies of crosvm are provided.
$ ./tools/run_tests --target=ssh:hostname
The default test target can be managed with `./tools/set_test_target`
To see full build and test output, add the `-v` or `--verbose` flag.
"""
Arch = test_target.Arch
# Print debug info. Overriden by -v
VERBOSE = False
# Timeouts for tests to prevent them from running too long.
TEST_TIMEOUT_SECS = 60
LARGE_TEST_TIMEOUT_SECS = 120
# Double the timeout if the test is running in an emulation environment, which will be
# significantly slower than native environments.
EMULATION_TIMEOUT_MULTIPLIER = 2
# Number of parallel processes for executing tests.
PARALLELISM = 4
CROSVM_ROOT = Path(__file__).parent.parent.parent.resolve()
COMMON_ROOT = CROSVM_ROOT / "common"
class ExecutableResults(object):
"""Container for results of a test executable."""
def __init__(self, name: str, success: bool, test_log: str):
self.name = name
self.success = success
self.test_log = test_log
class Executable(NamedTuple):
"""Container for info about an executable generated by cargo build/test."""
binary_path: Path
crate_name: str
cargo_target: str
kind: str
is_test: bool
is_fresh: bool
arch: Arch
@property
def name(self):
return f"{self.crate_name}:{self.cargo_target}"
class Crate(NamedTuple):
"""Container for info about crate."""
name: str
path: Path
def get_workspace_excludes(target_arch: Arch):
for crate, options in CRATE_OPTIONS.items():
if TestOption.DO_NOT_BUILD in options:
yield crate
elif TestOption.DO_NOT_BUILD_X86_64 in options and target_arch == "x86_64":
yield crate
elif TestOption.DO_NOT_BUILD_AARCH64 in options and target_arch == "aarch64":
yield crate
elif TestOption.DO_NOT_BUILD_ARMHF in options and target_arch == "armhf":
yield crate
def should_run_executable(executable: Executable, target_arch: Arch):
options = CRATE_OPTIONS.get(executable.crate_name, [])
if TestOption.DO_NOT_RUN in options:
return False
if TestOption.DO_NOT_RUN_X86_64 in options and target_arch == "x86_64":
return False
if TestOption.DO_NOT_RUN_AARCH64 in options and target_arch == "aarch64":
return False
if TestOption.DO_NOT_RUN_ARMHF in options and target_arch == "armhf":
return False
if TestOption.DO_NOT_RUN_ON_FOREIGN_KERNEL in options and target_arch != executable.arch:
return False
return True
def list_common_crates(target_arch: Arch):
excluded_crates = list(get_workspace_excludes(target_arch))
for path in COMMON_ROOT.glob("**/Cargo.toml"):
if not path.parent.name in excluded_crates:
yield Crate(name=path.parent.name, path=path.parent)
def exclude_crosvm(target_arch: Arch):
return "crosvm" in get_workspace_excludes(target_arch)
def cargo(
cargo_command: str, cwd: Path, flags: list[str], env: dict[str, str], build_arch: Arch
) -> Iterable[Executable]:
"""
Executes a cargo command and returns the list of test binaries generated.
The build log will be hidden by default and only printed if the build
fails. In VERBOSE mode the output will be streamed directly.
Note: Exits the program if the build fails.
"""
cmd = [
"cargo",
cargo_command,
"--message-format=json-diagnostic-rendered-ansi",
*flags,
]
if VERBOSE:
print("$", " ".join(cmd))
process = subprocess.Popen(
cmd,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
env=env,
)
messages: List[str] = []
# Read messages as cargo is running.
assert process.stdout
for line in iter(process.stdout.readline, ""):
# any non-json line is a message to print
if not line.startswith("{"):
if VERBOSE:
print(line.rstrip())
messages.append(line.rstrip())
continue
json_line = json.loads(line)
# 'message' type lines will be printed
if json_line.get("message"):
message = json_line.get("message").get("rendered")
if VERBOSE:
print(message)
messages.append(message)
# Collect info about test executables produced
elif json_line.get("executable"):
yield Executable(
Path(json_line.get("executable")),
crate_name=json_line.get("package_id", "").split(" ")[0],
cargo_target=json_line.get("target").get("name"),
kind=json_line.get("target").get("kind")[0],
is_test=json_line.get("profile", {}).get("test", False),
is_fresh=json_line.get("fresh", False),
arch=build_arch,
)
if process.wait() != 0:
if not VERBOSE:
for message in messages:
print(message)
sys.exit(-1)
def cargo_build_executables(
flags: list[str],
build_arch: Arch,
cwd: Path = Path("."),
env: Dict[str, str] = {},
) -> Iterable[Executable]:
"""Build all test binaries for the given list of crates."""
# Run build first, to make sure compiler errors of building non-test
# binaries are caught.
yield from cargo("build", cwd, flags, env, build_arch)
# Build all tests and return the collected executables
yield from cargo("test", cwd, ["--no-run", *flags], env, build_arch)
def build_common_crate(build_env: dict[str, str], build_arch: Arch, crate: Crate):
print(f"Building tests for: common/{crate.name}")
return list(cargo_build_executables([], build_arch, env=build_env, cwd=crate.path))
def build_all_binaries(target: TestTarget, build_arch: Arch):
"""Discover all crates and build them."""
build_env = os.environ.copy()
build_env.update(test_target.get_cargo_env(target, build_arch))
print("Building crosvm workspace")
yield from cargo_build_executables(
[
"--features=" + BUILD_FEATURES[build_arch],
"--verbose",
"--workspace",
*[f"--exclude={crate}" for crate in get_workspace_excludes(build_arch)],
],
build_arch,
cwd=CROSVM_ROOT,
env=build_env,
)
with Pool(PARALLELISM) as pool:
for executables in pool.imap(
functools.partial(build_common_crate, build_env, build_arch),
list_common_crates(build_arch),
):
yield from executables
def is_emulated(target: TestTarget, executable: Executable) -> bool:
if target.is_host:
# User-space emulation can run foreing-arch executables on the host.
return executable.arch != target.arch
elif target.vm:
return target.vm == "aarch64"
return False
def get_test_timeout(target: TestTarget, executable: Executable):
large = TestOption.LARGE in CRATE_OPTIONS.get(executable.crate_name, [])
timeout = LARGE_TEST_TIMEOUT_SECS if large else TEST_TIMEOUT_SECS
if is_emulated(target, executable):
return timeout * EMULATION_TIMEOUT_MULTIPLIER
else:
return timeout
def execute_test(target: TestTarget, executable: Executable):
"""
Executes a single test on the given test targed
Note: This function is run in a multiprocessing.Pool.
Test output is hidden unless the test fails or VERBOSE mode is enabled.
"""
options = CRATE_OPTIONS.get(executable.crate_name, [])
args: list[str] = []
if TestOption.SINGLE_THREADED in options:
args += ["--test-threads=1"]
# proc-macros and their tests are executed on the host.
if executable.kind == "proc-macro":
target = TestTarget("host")
if VERBOSE:
print(f"Running test {executable.name} on {target}...")
try:
# Pipe stdout/err to be printed in the main process if needed.
test_process = test_target.exec_file_on_target(
target,
executable.binary_path,
args=args,
timeout=get_test_timeout(target, executable),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
return ExecutableResults(
executable.name,
test_process.returncode == 0,
test_process.stdout,
)
except subprocess.TimeoutExpired as e:
# Append a note about the timeout to the stdout of the process.
msg = f"\n\nProcess timed out after {e.timeout}s\n"
return ExecutableResults(
executable.name,
False,
e.stdout.decode("utf-8") + msg,
)
def execute_all(
executables: list[Executable],
target: test_target.TestTarget,
repeat: int,
):
"""Executes all tests in the `executables` list in parallel."""
executables = [e for e in executables if should_run_executable(e, target.arch)]
if repeat > 1:
executables = executables * repeat
random.shuffle(executables)
sys.stdout.write(f"Running {len(executables)} test binaries on {target}")
sys.stdout.flush()
with Pool(PARALLELISM) as pool:
for result in pool.imap(functools.partial(execute_test, target), executables):
if not result.success or VERBOSE:
msg = "passed" if result.success else "failed"
print()
print("--------------------------------")
print("-", result.name, msg)
print("--------------------------------")
print(result.test_log)
else:
sys.stdout.write(".")
sys.stdout.flush()
yield result
print()
def find_crosvm_binary(executables: list[Executable]):
for executable in executables:
if not executable.is_test and executable.cargo_target == "crosvm":
return executable
raise Exception("Cannot find crosvm executable")
def main():
parser = argparse.ArgumentParser(usage=USAGE)
parser.add_argument(
"--verbose",
"-v",
action="store_true",
default=False,
help="Print all test output.",
)
parser.add_argument(
"--target",
help="Execute tests on the selected target. See ./tools/set_test_target",
)
parser.add_argument(
"--arch",
choices=typing.get_args(Arch),
help="Target architecture to build for.",
)
parser.add_argument(
"--build-only",
action="store_true",
)
parser.add_argument(
"--repeat",
type=int,
default=1,
help="Repeat each test N times to check for flakes.",
)
args = parser.parse_args()
global VERBOSE
VERBOSE = args.verbose # type: ignore
os.environ["RUST_BACKTRACE"] = "1"
target = (
test_target.TestTarget(args.target) if args.target else test_target.TestTarget.default()
)
print("Test target:", target)
build_arch = args.arch or target.arch
print("Building for architecture:", build_arch)
# Start booting VM while we build
if target.vm:
testvm.build_if_needed(target.vm)
testvm.up(target.vm)
hygiene, error = has_platform_dependent_code(Path("common/sys_util_core"))
if not hygiene:
print("Error: Platform dependent code not allowed in sys_util_core crate.")
print("Offending line: " + error)
sys.exit(-1)
crlf_endings = has_crlf_line_endings()
if crlf_endings:
print("Error: Following files have crlf(dos) line encodings")
print(*crlf_endings)
sys.exit(-1)
executables = list(build_all_binaries(target, build_arch))
if args.build_only:
print("Not running tests as requested.")
sys.exit(0)
# Upload dependencies plus the main crosvm binary for integration tests if the
# crosvm binary is not excluded from testing.
extra_files = (
[find_crosvm_binary(executables).binary_path] if not exclude_crosvm(build_arch) else []
)
test_target.prepare_target(target, extra_files=extra_files)
# Execute all test binaries
test_executables = [e for e in executables if e.is_test]
all_results = list(execute_all(test_executables, target, repeat=args.repeat))
failed = [r for r in all_results if not r.success]
if len(failed) == 0:
print("All tests passed.")
sys.exit(0)
else:
print(f"{len(failed)} of {len(all_results)} tests failed:")
for result in failed:
print(f" {result.name}")
sys.exit(-1)
if __name__ == "__main__":
try:
main()
except subprocess.CalledProcessError as e:
print("Command failed:", e.cmd)
print(e.stdout)
print(e.stderr)
sys.exit(-1) | 0.666388 | 0.209844 |
import math
import torch
from catalyst.utils.metrics import average_precision
def test_average_precision_base():
"""
Tests for catalyst.utils.metrics.average_precision metric.
"""
outputs = torch.Tensor([0.1, 0.4, 0.35, 0.8])
targets = torch.Tensor([0, 0, 1, 1])
assert torch.isclose(
average_precision(outputs, targets)[0], torch.tensor(0.8333), atol=1e-3
)
def test_average_precision_weighted():
"""
Tests for catalyst.utils.metrics.average_precision metric.
"""
target = torch.Tensor([0, 1, 0, 1])
output = torch.Tensor([0.1, 0.2, 0.3, 4])
weight = torch.Tensor([0.5, 1.0, 2.0, 0.1])
ap = average_precision(outputs=output, targets=target, weights=weight)
val = (1 * 0.1 / 0.1 + 0 * 2.0 / 2.1 + 1.1 * 1 / 3.1 + 0 * 1 / 4) / 2.0
assert math.fabs(ap[0] - val) < 0.01, "ap test1 failed"
ap = average_precision(outputs=output, targets=target, weights=None)
val = (1 * 1.0 / 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3.0 + 0 * 1.0 / 4.0) / 2.0
assert math.fabs(ap[0] - val) < 0.01, "ap test2 failed"
target = torch.Tensor([0, 1, 0, 1])
output = torch.Tensor([4, 3, 2, 1])
weight = torch.Tensor([1, 2, 3, 4])
ap = average_precision(outputs=output, targets=target, weights=weight)
val = (
0 * 1.0 / 1.0 + 1.0 * 2.0 / 3.0 + 2.0 * 0 / 6.0 + 6.0 * 1.0 / 10.0
) / 2.0
assert math.fabs(ap[0] - val) < 0.01, "ap test3 failed"
ap = average_precision(outputs=output, targets=target, weights=None)
val = (0 * 1.0 + 1 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 2 * 1.0 / 4.0) / 2.0
assert math.fabs(ap[0] - val) < 0.01, "ap test4 failed"
target = torch.Tensor([0, 1, 0, 1])
output = torch.Tensor([1, 4, 2, 3])
weight = torch.Tensor([1, 2, 3, 4])
ap = average_precision(outputs=output, targets=target, weights=weight)
val = (
4 * 1.0 / 4.0 + 6 * 1.0 / 6.0 + 0 * 6.0 / 9.0 + 0 * 6.0 / 10.0
) / 2.0
assert math.fabs(ap[0] - val) < 0.01, "ap test5 failed"
ap = average_precision(outputs=output, targets=target, weights=None)
val = (1 * 1.0 + 2 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 0 * 1.0 / 4.0) / 2.0
assert math.fabs(ap[0] - val) < 0.01, "ap test6 failed"
target = torch.Tensor([0, 0, 0, 0])
output = torch.Tensor([1, 4, 2, 3])
weight = torch.Tensor([1.0, 0.1, 0.0, 0.5])
ap = average_precision(outputs=output, targets=target, weights=weight)
val = 0.0
assert math.fabs(ap[0] - val) < 0.01, "ap test7 failed"
ap = average_precision(outputs=output, targets=target, weights=None)
val = 0.0
assert math.fabs(ap[0] - val) < 0.01, "ap test8 failed"
target = torch.Tensor([1, 1, 0])
output = torch.Tensor([3, 1, 2])
weight = torch.Tensor([1, 0.1, 3])
ap = average_precision(outputs=output, targets=target, weights=weight)
val = (1 * 1.0 / 1.0 + 1 * 0.0 / 4.0 + 1.1 / 4.1) / 2.0
assert math.fabs(ap[0] - val) < 0.01, "ap test9 failed"
ap = average_precision(outputs=output, targets=target, weights=None)
val = (1 * 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3.0) / 2.0
assert math.fabs(ap[0] - val) < 0.01, "ap test10 failed"
# Test multiple K's
target = torch.Tensor([[0, 1, 0, 1], [0, 1, 0, 1]]).transpose(0, 1)
output = torch.Tensor([[0.1, 0.2, 0.3, 4], [4, 3, 2, 1]]).transpose(0, 1)
weight = torch.Tensor([[1.0, 0.5, 2.0, 3.0]]).transpose(0, 1)
ap = average_precision(outputs=output, targets=target, weights=weight)
assert (
math.fabs(
ap[0].sum()
- torch.Tensor(
[
(
1 * 3.0 / 3.0
+ 0 * 3.0 / 5.0
+ 3.5 * 1 / 5.5
+ 0 * 3.5 / 6.5
)
/ 2.0,
(
0 * 1.0 / 1.0
+ 1 * 0.5 / 1.5
+ 0 * 0.5 / 3.5
+ 1 * 3.5 / 6.5
)
/ 2.0,
]
).sum()
)
< 0.01
), "ap test11 failed"
ap = average_precision(outputs=output, targets=target, weights=None)
assert (
math.fabs(
ap[0].sum()
- torch.Tensor(
[
(1 * 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3 + 0 * 1.0 / 4.0)
/ 2.0,
(0 * 1.0 + 1 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 2.0 * 1.0 / 4.0)
/ 2.0,
]
).sum()
)
< 0.01
), "ap test12 failed" | catalyst/utils/metrics/tests/test_average_precision.py | import math
import torch
from catalyst.utils.metrics import average_precision
def test_average_precision_base():
"""
Tests for catalyst.utils.metrics.average_precision metric.
"""
outputs = torch.Tensor([0.1, 0.4, 0.35, 0.8])
targets = torch.Tensor([0, 0, 1, 1])
assert torch.isclose(
average_precision(outputs, targets)[0], torch.tensor(0.8333), atol=1e-3
)
def test_average_precision_weighted():
"""
Tests for catalyst.utils.metrics.average_precision metric.
"""
target = torch.Tensor([0, 1, 0, 1])
output = torch.Tensor([0.1, 0.2, 0.3, 4])
weight = torch.Tensor([0.5, 1.0, 2.0, 0.1])
ap = average_precision(outputs=output, targets=target, weights=weight)
val = (1 * 0.1 / 0.1 + 0 * 2.0 / 2.1 + 1.1 * 1 / 3.1 + 0 * 1 / 4) / 2.0
assert math.fabs(ap[0] - val) < 0.01, "ap test1 failed"
ap = average_precision(outputs=output, targets=target, weights=None)
val = (1 * 1.0 / 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3.0 + 0 * 1.0 / 4.0) / 2.0
assert math.fabs(ap[0] - val) < 0.01, "ap test2 failed"
target = torch.Tensor([0, 1, 0, 1])
output = torch.Tensor([4, 3, 2, 1])
weight = torch.Tensor([1, 2, 3, 4])
ap = average_precision(outputs=output, targets=target, weights=weight)
val = (
0 * 1.0 / 1.0 + 1.0 * 2.0 / 3.0 + 2.0 * 0 / 6.0 + 6.0 * 1.0 / 10.0
) / 2.0
assert math.fabs(ap[0] - val) < 0.01, "ap test3 failed"
ap = average_precision(outputs=output, targets=target, weights=None)
val = (0 * 1.0 + 1 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 2 * 1.0 / 4.0) / 2.0
assert math.fabs(ap[0] - val) < 0.01, "ap test4 failed"
target = torch.Tensor([0, 1, 0, 1])
output = torch.Tensor([1, 4, 2, 3])
weight = torch.Tensor([1, 2, 3, 4])
ap = average_precision(outputs=output, targets=target, weights=weight)
val = (
4 * 1.0 / 4.0 + 6 * 1.0 / 6.0 + 0 * 6.0 / 9.0 + 0 * 6.0 / 10.0
) / 2.0
assert math.fabs(ap[0] - val) < 0.01, "ap test5 failed"
ap = average_precision(outputs=output, targets=target, weights=None)
val = (1 * 1.0 + 2 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 0 * 1.0 / 4.0) / 2.0
assert math.fabs(ap[0] - val) < 0.01, "ap test6 failed"
target = torch.Tensor([0, 0, 0, 0])
output = torch.Tensor([1, 4, 2, 3])
weight = torch.Tensor([1.0, 0.1, 0.0, 0.5])
ap = average_precision(outputs=output, targets=target, weights=weight)
val = 0.0
assert math.fabs(ap[0] - val) < 0.01, "ap test7 failed"
ap = average_precision(outputs=output, targets=target, weights=None)
val = 0.0
assert math.fabs(ap[0] - val) < 0.01, "ap test8 failed"
target = torch.Tensor([1, 1, 0])
output = torch.Tensor([3, 1, 2])
weight = torch.Tensor([1, 0.1, 3])
ap = average_precision(outputs=output, targets=target, weights=weight)
val = (1 * 1.0 / 1.0 + 1 * 0.0 / 4.0 + 1.1 / 4.1) / 2.0
assert math.fabs(ap[0] - val) < 0.01, "ap test9 failed"
ap = average_precision(outputs=output, targets=target, weights=None)
val = (1 * 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3.0) / 2.0
assert math.fabs(ap[0] - val) < 0.01, "ap test10 failed"
# Test multiple K's
target = torch.Tensor([[0, 1, 0, 1], [0, 1, 0, 1]]).transpose(0, 1)
output = torch.Tensor([[0.1, 0.2, 0.3, 4], [4, 3, 2, 1]]).transpose(0, 1)
weight = torch.Tensor([[1.0, 0.5, 2.0, 3.0]]).transpose(0, 1)
ap = average_precision(outputs=output, targets=target, weights=weight)
assert (
math.fabs(
ap[0].sum()
- torch.Tensor(
[
(
1 * 3.0 / 3.0
+ 0 * 3.0 / 5.0
+ 3.5 * 1 / 5.5
+ 0 * 3.5 / 6.5
)
/ 2.0,
(
0 * 1.0 / 1.0
+ 1 * 0.5 / 1.5
+ 0 * 0.5 / 3.5
+ 1 * 3.5 / 6.5
)
/ 2.0,
]
).sum()
)
< 0.01
), "ap test11 failed"
ap = average_precision(outputs=output, targets=target, weights=None)
assert (
math.fabs(
ap[0].sum()
- torch.Tensor(
[
(1 * 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3 + 0 * 1.0 / 4.0)
/ 2.0,
(0 * 1.0 + 1 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 2.0 * 1.0 / 4.0)
/ 2.0,
]
).sum()
)
< 0.01
), "ap test12 failed" | 0.844152 | 0.761405 |
from datetime import datetime
from unittest.mock import ANY
from unittest.mock import MagicMock
import bs4
import pytest
from fakeparser import Parser
from reader import Content
from reader import HighlightedString
from reader import InvalidSearchQueryError
from reader import SearchError
from reader import StorageError
from reader._search import Search
from reader._search import strip_html
from reader._sqlite_utils import DBError
from reader._sqlite_utils import require_version
STRIP_HTML_DATA = [(i, i) for i in [None, 10, 11.2, b'aabb', b'aa<br>bb']] + [
('aabb', 'aabb'),
('aa<br>bb', 'aa\nbb'),
('aa<p>bb', 'aa\nbb'),
('<script>ss</script>bb', 'bb'),
('<noscript>ss</noscript>bb', 'bb'),
('<style>ss</style>bb', 'bb'),
('<title>ss</title>bb', 'bb'),
('aa<script>ss</script>bb', 'aa\nbb'),
('aa<noscript>ss</noscript>bb', 'aa\nbb'),
('aa<style>ss</style>bb', 'aa\nbb'),
('aa<title>tt</title>bb', 'aa\nbb'),
('<head><script>ss</script></head>bb', 'bb'),
('<head><noscript>ss</noscript>bb', 'bb'),
('<head><style>ss</style></head>bb', 'bb'),
('<head><title>tt</title>bb', 'bb'),
('<head>aa<script>ss</script>bb', 'aa\nbb'),
('<head>aa<noscript>ss</noscript></head>bb', 'aa\nbb'),
('<head>aa<style>ss</style>bb', 'aa\nbb'),
('<head>aa<title>tt</title></head>bb', 'aa\nbb'),
(
"""
<head>
aa
<title>tt</title>
<p>bb
<script>ss</script>
<b>cc
<noscript>nn</noscript>
<style>ss</style>
dd
</head>
ee
""",
'aa\nbb\ncc\ndd\nee',
),
]
@pytest.mark.parametrize('input, expected_output', STRIP_HTML_DATA)
# We test all bs4 parsers, since we don't know/care what the user has installed.
@pytest.mark.parametrize(
'features',
[
None,
pytest.param('lxml', marks=pytest.mark.requires_lxml),
'html.parser',
'html5lib',
],
)
def test_strip_html(input, expected_output, features):
output = strip_html(input, features)
if isinstance(output, str):
output = '\n'.join(output.split())
# Special-case different <noscript> handling by html5lib.
# https://www.crummy.com/software/BeautifulSoup/bs4/doc/#differences-between-parsers
is_html5lib = any(
[
features == 'html5lib',
features is None
and 'html5lib' in type(bs4.BeautifulSoup('').builder).__module__,
]
)
if is_html5lib and isinstance(input, str) and '<noscript>' in input:
assert '<noscript>' not in output
return
assert output == expected_output
def enable_search(storage, _, __):
Search(storage).enable()
def disable_search(storage, _, __):
Search(storage).disable()
def is_search_enabled(storage, _, __):
Search(storage).is_enabled()
def update_search(storage, _, __):
Search(storage).update()
def search_entries_chunk_size_0(storage, _, __):
list(
Search(storage).search_entries_page('entry', datetime(2010, 1, 1), chunk_size=0)
)
def search_entries_chunk_size_1(storage, _, __):
list(
Search(storage).search_entries_page('entry', datetime(2010, 1, 1), chunk_size=1)
)
def search_entry_counts(storage, _, __):
Search(storage).search_entry_counts('entry', now=datetime(2010, 1, 1))
def search_entry_last(storage, feed, entry):
Search(storage).search_entry_last('entry', (feed.url, entry.id))
@pytest.mark.slow
@pytest.mark.parametrize(
'pre_stuff, do_stuff',
[
(None, enable_search),
(None, disable_search),
(None, is_search_enabled),
(enable_search, update_search),
(enable_search, search_entries_chunk_size_0),
(enable_search, search_entries_chunk_size_1),
(enable_search, search_entry_counts),
(enable_search, search_entry_last),
],
)
def test_errors_locked(db_path, pre_stuff, do_stuff):
"""All methods should raise SearchError when the database is locked."""
from test_storage import check_errors_locked
check_errors_locked(db_path, pre_stuff, do_stuff, SearchError)
def enable_and_update_search(storage):
search = Search(storage)
search.enable()
search.update()
def iter_search_entries_chunk_size_0(storage):
return Search(storage).search_entries_page(
'entry', datetime(2010, 1, 1), chunk_size=0
)
def iter_search_entries_chunk_size_1(storage):
return Search(storage).search_entries_page(
'entry', datetime(2010, 1, 1), chunk_size=1
)
def iter_search_entries_chunk_size_2(storage):
return Search(storage).search_entries_page(
'entry', datetime(2010, 1, 1), chunk_size=2
)
def iter_search_entries_chunk_size_3(storage):
return Search(storage).search_entries_page(
'entry', datetime(2010, 1, 1), chunk_size=3
)
@pytest.mark.slow
@pytest.mark.parametrize(
'iter_stuff',
[
pytest.param(
iter_search_entries_chunk_size_0,
marks=pytest.mark.xfail(raises=StorageError, strict=True),
),
iter_search_entries_chunk_size_1,
iter_search_entries_chunk_size_2,
iter_search_entries_chunk_size_3,
],
)
def test_iter_locked(db_path, iter_stuff):
"""Methods that return an iterable shouldn't block the underlying storage
if the iterable is not consumed.
"""
from test_storage import check_iter_locked
check_iter_locked(db_path, enable_and_update_search, iter_stuff)
class ActuallyOK(Exception):
pass
def call_search_entries(search, query):
try:
next(search.search_entries(query, datetime(2010, 1, 1)))
except StopIteration:
raise ActuallyOK
def call_search_entry_counts(search, query):
search.search_entry_counts(query, datetime(2010, 1, 1))
raise ActuallyOK
@pytest.mark.parametrize(
'query, exc_type',
[
('\x00', InvalidSearchQueryError),
('"', InvalidSearchQueryError),
# For some reason, on CPython * works when the filtering is inside
# the CTE (it didn't when it was outside), hence the ActuallyOK.
# On PyPy 7.3.1 we still get a InvalidSearchQueryError.
# We're fine as long as we don't get another exception.
('*', (ActuallyOK, InvalidSearchQueryError)),
('O:', InvalidSearchQueryError),
('*p', InvalidSearchQueryError),
],
)
@pytest.mark.parametrize(
'call_method',
[
call_search_entries,
call_search_entry_counts,
],
)
def test_invalid_search_query_error(storage, query, exc_type, call_method):
# We're not testing this in test_reader_search.py because
# the invalid query strings are search-provider-dependent.
search = Search(storage)
search.enable()
with pytest.raises(exc_type) as excinfo:
call_method(search, query)
if isinstance(exc_type, tuple) and ActuallyOK in exc_type:
return
assert excinfo.value.message
assert excinfo.value.__cause__ is None
def test_minimum_sqlite_version(storage, monkeypatch):
mock = MagicMock(wraps=require_version, side_effect=DBError('version'))
monkeypatch.setattr('reader._search.require_version', mock)
search = Search(storage)
search.enable()
with pytest.raises(SearchError) as excinfo:
search.update()
assert 'version' in excinfo.value.message
mock.assert_called_with(ANY, (3, 18))
# TODO: test FTS5 column names | tests/test_search.py | from datetime import datetime
from unittest.mock import ANY
from unittest.mock import MagicMock
import bs4
import pytest
from fakeparser import Parser
from reader import Content
from reader import HighlightedString
from reader import InvalidSearchQueryError
from reader import SearchError
from reader import StorageError
from reader._search import Search
from reader._search import strip_html
from reader._sqlite_utils import DBError
from reader._sqlite_utils import require_version
STRIP_HTML_DATA = [(i, i) for i in [None, 10, 11.2, b'aabb', b'aa<br>bb']] + [
('aabb', 'aabb'),
('aa<br>bb', 'aa\nbb'),
('aa<p>bb', 'aa\nbb'),
('<script>ss</script>bb', 'bb'),
('<noscript>ss</noscript>bb', 'bb'),
('<style>ss</style>bb', 'bb'),
('<title>ss</title>bb', 'bb'),
('aa<script>ss</script>bb', 'aa\nbb'),
('aa<noscript>ss</noscript>bb', 'aa\nbb'),
('aa<style>ss</style>bb', 'aa\nbb'),
('aa<title>tt</title>bb', 'aa\nbb'),
('<head><script>ss</script></head>bb', 'bb'),
('<head><noscript>ss</noscript>bb', 'bb'),
('<head><style>ss</style></head>bb', 'bb'),
('<head><title>tt</title>bb', 'bb'),
('<head>aa<script>ss</script>bb', 'aa\nbb'),
('<head>aa<noscript>ss</noscript></head>bb', 'aa\nbb'),
('<head>aa<style>ss</style>bb', 'aa\nbb'),
('<head>aa<title>tt</title></head>bb', 'aa\nbb'),
(
"""
<head>
aa
<title>tt</title>
<p>bb
<script>ss</script>
<b>cc
<noscript>nn</noscript>
<style>ss</style>
dd
</head>
ee
""",
'aa\nbb\ncc\ndd\nee',
),
]
@pytest.mark.parametrize('input, expected_output', STRIP_HTML_DATA)
# We test all bs4 parsers, since we don't know/care what the user has installed.
@pytest.mark.parametrize(
'features',
[
None,
pytest.param('lxml', marks=pytest.mark.requires_lxml),
'html.parser',
'html5lib',
],
)
def test_strip_html(input, expected_output, features):
output = strip_html(input, features)
if isinstance(output, str):
output = '\n'.join(output.split())
# Special-case different <noscript> handling by html5lib.
# https://www.crummy.com/software/BeautifulSoup/bs4/doc/#differences-between-parsers
is_html5lib = any(
[
features == 'html5lib',
features is None
and 'html5lib' in type(bs4.BeautifulSoup('').builder).__module__,
]
)
if is_html5lib and isinstance(input, str) and '<noscript>' in input:
assert '<noscript>' not in output
return
assert output == expected_output
def enable_search(storage, _, __):
Search(storage).enable()
def disable_search(storage, _, __):
Search(storage).disable()
def is_search_enabled(storage, _, __):
Search(storage).is_enabled()
def update_search(storage, _, __):
Search(storage).update()
def search_entries_chunk_size_0(storage, _, __):
list(
Search(storage).search_entries_page('entry', datetime(2010, 1, 1), chunk_size=0)
)
def search_entries_chunk_size_1(storage, _, __):
list(
Search(storage).search_entries_page('entry', datetime(2010, 1, 1), chunk_size=1)
)
def search_entry_counts(storage, _, __):
Search(storage).search_entry_counts('entry', now=datetime(2010, 1, 1))
def search_entry_last(storage, feed, entry):
Search(storage).search_entry_last('entry', (feed.url, entry.id))
@pytest.mark.slow
@pytest.mark.parametrize(
'pre_stuff, do_stuff',
[
(None, enable_search),
(None, disable_search),
(None, is_search_enabled),
(enable_search, update_search),
(enable_search, search_entries_chunk_size_0),
(enable_search, search_entries_chunk_size_1),
(enable_search, search_entry_counts),
(enable_search, search_entry_last),
],
)
def test_errors_locked(db_path, pre_stuff, do_stuff):
"""All methods should raise SearchError when the database is locked."""
from test_storage import check_errors_locked
check_errors_locked(db_path, pre_stuff, do_stuff, SearchError)
def enable_and_update_search(storage):
search = Search(storage)
search.enable()
search.update()
def iter_search_entries_chunk_size_0(storage):
return Search(storage).search_entries_page(
'entry', datetime(2010, 1, 1), chunk_size=0
)
def iter_search_entries_chunk_size_1(storage):
return Search(storage).search_entries_page(
'entry', datetime(2010, 1, 1), chunk_size=1
)
def iter_search_entries_chunk_size_2(storage):
return Search(storage).search_entries_page(
'entry', datetime(2010, 1, 1), chunk_size=2
)
def iter_search_entries_chunk_size_3(storage):
return Search(storage).search_entries_page(
'entry', datetime(2010, 1, 1), chunk_size=3
)
@pytest.mark.slow
@pytest.mark.parametrize(
'iter_stuff',
[
pytest.param(
iter_search_entries_chunk_size_0,
marks=pytest.mark.xfail(raises=StorageError, strict=True),
),
iter_search_entries_chunk_size_1,
iter_search_entries_chunk_size_2,
iter_search_entries_chunk_size_3,
],
)
def test_iter_locked(db_path, iter_stuff):
"""Methods that return an iterable shouldn't block the underlying storage
if the iterable is not consumed.
"""
from test_storage import check_iter_locked
check_iter_locked(db_path, enable_and_update_search, iter_stuff)
class ActuallyOK(Exception):
pass
def call_search_entries(search, query):
try:
next(search.search_entries(query, datetime(2010, 1, 1)))
except StopIteration:
raise ActuallyOK
def call_search_entry_counts(search, query):
search.search_entry_counts(query, datetime(2010, 1, 1))
raise ActuallyOK
@pytest.mark.parametrize(
'query, exc_type',
[
('\x00', InvalidSearchQueryError),
('"', InvalidSearchQueryError),
# For some reason, on CPython * works when the filtering is inside
# the CTE (it didn't when it was outside), hence the ActuallyOK.
# On PyPy 7.3.1 we still get a InvalidSearchQueryError.
# We're fine as long as we don't get another exception.
('*', (ActuallyOK, InvalidSearchQueryError)),
('O:', InvalidSearchQueryError),
('*p', InvalidSearchQueryError),
],
)
@pytest.mark.parametrize(
'call_method',
[
call_search_entries,
call_search_entry_counts,
],
)
def test_invalid_search_query_error(storage, query, exc_type, call_method):
# We're not testing this in test_reader_search.py because
# the invalid query strings are search-provider-dependent.
search = Search(storage)
search.enable()
with pytest.raises(exc_type) as excinfo:
call_method(search, query)
if isinstance(exc_type, tuple) and ActuallyOK in exc_type:
return
assert excinfo.value.message
assert excinfo.value.__cause__ is None
def test_minimum_sqlite_version(storage, monkeypatch):
mock = MagicMock(wraps=require_version, side_effect=DBError('version'))
monkeypatch.setattr('reader._search.require_version', mock)
search = Search(storage)
search.enable()
with pytest.raises(SearchError) as excinfo:
search.update()
assert 'version' in excinfo.value.message
mock.assert_called_with(ANY, (3, 18))
# TODO: test FTS5 column names | 0.660939 | 0.370453 |
from google.cloud import bigquery
import pandas as pd
from datasources.covid_tracking_project_metadata import CtpMetadata
from datasources.data_source import DataSource
import ingestion.gcs_to_bq_util as gcs_to_bq_util
import ingestion.standardized_columns as col_std
from ingestion.standardized_columns import Race
# Covid Tracking Project race data by state from covidtracking.com/race
class CovidTrackingProject(DataSource):
@staticmethod
def get_id():
return "COVID_TRACKING_PROJECT"
@staticmethod
def get_table_name():
return "covid_tracking_project"
@staticmethod
def get_standard_columns():
"""Returns a dict containing conversions from Covid Tracking Project's
race categories to their standardized values. Unlike other datasets,
CTP doesn't use the race category id because it is not known at this
stage, until it is joined with CTP metadata. Instead it uses the race
column plus the includes_hispanic column."""
return {
"aian": Race.AIAN.race,
"asian": Race.ASIAN.race,
"black": Race.BLACK.race,
"nhpi": Race.NHPI.race,
"white": Race.WHITE.race,
"multiracial": Race.MULTI.race,
"other": Race.OTHER_NONSTANDARD.race,
"unknown": Race.UNKNOWN.race,
"ethnicity_hispanic": Race.HISP.race,
"ethnicity_nonhispanic": Race.NH.race,
"ethnicity_unknown": Race.ETHNICITY_UNKNOWN.race,
"total": Race.TOTAL.race,
}
def write_to_bq(self, dataset, gcs_bucket, **attrs):
filename = self.get_attr(attrs, "filename")
df = gcs_to_bq_util.load_csv_as_dataframe(
gcs_bucket, filename, parse_dates=["Date"], thousands=",")
df = self.standardize(df)
# Get the metadata table
metadata = self._download_metadata(dataset)
if len(metadata.index) == 0:
raise RuntimeError("BigQuery call to {} returned 0 rows".format(dataset))
merged = CovidTrackingProject.merge_with_metadata(df, metadata)
# Split into separate tables by variable type
for variable_type in ["cases", "deaths", "tests", "hosp"]:
result = merged.copy()
result = result.loc[result["variable_type"] == variable_type]
result.rename(columns={"value": variable_type}, inplace=True)
result.drop("variable_type", axis="columns", inplace=True)
# Write to BQ
gcs_to_bq_util.add_dataframe_to_bq(
result, dataset, self.get_table_name() + "_" + variable_type)
def standardize(self, df: pd.DataFrame) -> pd.DataFrame:
"""Reformats data into the standard format.
Args:
df: DataFrame representing the raw CTP data
Returns:
A pandas.DataFrame containing the standardized and formatted data."""
self.clean_frame_column_names(df)
df.drop(
columns=["cases_latinx", "deaths_latinx", "hosp_latinx", "tests_latinx"],
inplace=True)
df = df.melt(id_vars=["date", "state"])
df[["variable_type", col_std.RACE_COL]] = df.variable.str.split(
"_", 1, expand=True)
df.drop("variable", axis=1, inplace=True)
df.rename(columns={"state": col_std.STATE_POSTAL_COL}, inplace=True)
df.replace({col_std.RACE_COL: self.get_standard_columns()}, inplace=True)
df["date"] = df["date"].map(lambda ts: ts.strftime("%Y-%m-%d"))
return df
@staticmethod
def merge_with_metadata(df: pd.DataFrame, metadata: pd.DataFrame) -> pd.DataFrame:
"""Merges the standardized CTP data with the CTP metadata.
Args:
df: a pandas.DataFrame containing the standardized CTP data.
metadata: a pandas.DataFrame containing the CTP metadata.
Returns:
A pandas.DataFrame that contains the merged CTP data and metadata."""
# Merge the tables
merged = pd.merge(
df, metadata, how="left", on=[col_std.STATE_POSTAL_COL, "variable_type"])
# Rename combined race categories
CovidTrackingProject._rename_race_category(
merged, "reports_api", Race.ASIAN, Race.API)
CovidTrackingProject._rename_race_category(
merged, "reports_ind", Race.AIAN, Race.INDIGENOUS)
merged.drop(columns=["reports_api", "reports_ind"], inplace=True)
return merged
@staticmethod
def _download_metadata(dataset: str) -> pd.DataFrame:
"""Downloads the metadata table from BigQuery by executing a query.
Args:
dataset: Name of the dataset to request metadata from
Returns:
A pandas.DataFrame containing the contents of the requested table."""
client = bigquery.Client()
job_config = bigquery.QueryJobConfig(
default_dataset=client.get_dataset(dataset))
sql = """
SELECT *
FROM {};
""".format(CtpMetadata.get_table_name())
return client.query(sql, job_config=job_config).to_dataframe()
@staticmethod
def _rename_race_category(
df: pd.DataFrame, indicator_column: str, old_name: Race, new_name: Race
):
"""Renames values in df.race_and_ethnicity from old_name to new_name
based on indicator_column.
Args:
df: pandas.DataFrame to modify
indicator_column: Name of the column to be used to decide whether
to rename the race value. Values should be Boolean.
old_name: The race category to change
new_name: The race category to rename to"""
df[col_std.RACE_COL] = df.apply(
CovidTrackingProject._replace_value,
axis=1,
args=(indicator_column, old_name, new_name),
)
@staticmethod
def _replace_value(
row: pd.Series, indicator_column: str, old_name: Race, new_name: Race
):
"""Helper method for _rename_race_category. Conditionally replaces
the race value for a given row.
Args:
row: A single row (pandas.Series) to modify
indicator_column: Name of the column that indicates whether to modify
the race value.
old_name: The race category to change
new_name: The race category to rename to"""
if row[indicator_column] == 1 and row[col_std.RACE_COL] == old_name.race:
return new_name.race
return row[col_std.RACE_COL] | python/datasources/covid_tracking_project.py | from google.cloud import bigquery
import pandas as pd
from datasources.covid_tracking_project_metadata import CtpMetadata
from datasources.data_source import DataSource
import ingestion.gcs_to_bq_util as gcs_to_bq_util
import ingestion.standardized_columns as col_std
from ingestion.standardized_columns import Race
# Covid Tracking Project race data by state from covidtracking.com/race
class CovidTrackingProject(DataSource):
@staticmethod
def get_id():
return "COVID_TRACKING_PROJECT"
@staticmethod
def get_table_name():
return "covid_tracking_project"
@staticmethod
def get_standard_columns():
"""Returns a dict containing conversions from Covid Tracking Project's
race categories to their standardized values. Unlike other datasets,
CTP doesn't use the race category id because it is not known at this
stage, until it is joined with CTP metadata. Instead it uses the race
column plus the includes_hispanic column."""
return {
"aian": Race.AIAN.race,
"asian": Race.ASIAN.race,
"black": Race.BLACK.race,
"nhpi": Race.NHPI.race,
"white": Race.WHITE.race,
"multiracial": Race.MULTI.race,
"other": Race.OTHER_NONSTANDARD.race,
"unknown": Race.UNKNOWN.race,
"ethnicity_hispanic": Race.HISP.race,
"ethnicity_nonhispanic": Race.NH.race,
"ethnicity_unknown": Race.ETHNICITY_UNKNOWN.race,
"total": Race.TOTAL.race,
}
def write_to_bq(self, dataset, gcs_bucket, **attrs):
filename = self.get_attr(attrs, "filename")
df = gcs_to_bq_util.load_csv_as_dataframe(
gcs_bucket, filename, parse_dates=["Date"], thousands=",")
df = self.standardize(df)
# Get the metadata table
metadata = self._download_metadata(dataset)
if len(metadata.index) == 0:
raise RuntimeError("BigQuery call to {} returned 0 rows".format(dataset))
merged = CovidTrackingProject.merge_with_metadata(df, metadata)
# Split into separate tables by variable type
for variable_type in ["cases", "deaths", "tests", "hosp"]:
result = merged.copy()
result = result.loc[result["variable_type"] == variable_type]
result.rename(columns={"value": variable_type}, inplace=True)
result.drop("variable_type", axis="columns", inplace=True)
# Write to BQ
gcs_to_bq_util.add_dataframe_to_bq(
result, dataset, self.get_table_name() + "_" + variable_type)
def standardize(self, df: pd.DataFrame) -> pd.DataFrame:
"""Reformats data into the standard format.
Args:
df: DataFrame representing the raw CTP data
Returns:
A pandas.DataFrame containing the standardized and formatted data."""
self.clean_frame_column_names(df)
df.drop(
columns=["cases_latinx", "deaths_latinx", "hosp_latinx", "tests_latinx"],
inplace=True)
df = df.melt(id_vars=["date", "state"])
df[["variable_type", col_std.RACE_COL]] = df.variable.str.split(
"_", 1, expand=True)
df.drop("variable", axis=1, inplace=True)
df.rename(columns={"state": col_std.STATE_POSTAL_COL}, inplace=True)
df.replace({col_std.RACE_COL: self.get_standard_columns()}, inplace=True)
df["date"] = df["date"].map(lambda ts: ts.strftime("%Y-%m-%d"))
return df
@staticmethod
def merge_with_metadata(df: pd.DataFrame, metadata: pd.DataFrame) -> pd.DataFrame:
"""Merges the standardized CTP data with the CTP metadata.
Args:
df: a pandas.DataFrame containing the standardized CTP data.
metadata: a pandas.DataFrame containing the CTP metadata.
Returns:
A pandas.DataFrame that contains the merged CTP data and metadata."""
# Merge the tables
merged = pd.merge(
df, metadata, how="left", on=[col_std.STATE_POSTAL_COL, "variable_type"])
# Rename combined race categories
CovidTrackingProject._rename_race_category(
merged, "reports_api", Race.ASIAN, Race.API)
CovidTrackingProject._rename_race_category(
merged, "reports_ind", Race.AIAN, Race.INDIGENOUS)
merged.drop(columns=["reports_api", "reports_ind"], inplace=True)
return merged
@staticmethod
def _download_metadata(dataset: str) -> pd.DataFrame:
"""Downloads the metadata table from BigQuery by executing a query.
Args:
dataset: Name of the dataset to request metadata from
Returns:
A pandas.DataFrame containing the contents of the requested table."""
client = bigquery.Client()
job_config = bigquery.QueryJobConfig(
default_dataset=client.get_dataset(dataset))
sql = """
SELECT *
FROM {};
""".format(CtpMetadata.get_table_name())
return client.query(sql, job_config=job_config).to_dataframe()
@staticmethod
def _rename_race_category(
df: pd.DataFrame, indicator_column: str, old_name: Race, new_name: Race
):
"""Renames values in df.race_and_ethnicity from old_name to new_name
based on indicator_column.
Args:
df: pandas.DataFrame to modify
indicator_column: Name of the column to be used to decide whether
to rename the race value. Values should be Boolean.
old_name: The race category to change
new_name: The race category to rename to"""
df[col_std.RACE_COL] = df.apply(
CovidTrackingProject._replace_value,
axis=1,
args=(indicator_column, old_name, new_name),
)
@staticmethod
def _replace_value(
row: pd.Series, indicator_column: str, old_name: Race, new_name: Race
):
"""Helper method for _rename_race_category. Conditionally replaces
the race value for a given row.
Args:
row: A single row (pandas.Series) to modify
indicator_column: Name of the column that indicates whether to modify
the race value.
old_name: The race category to change
new_name: The race category to rename to"""
if row[indicator_column] == 1 and row[col_std.RACE_COL] == old_name.race:
return new_name.race
return row[col_std.RACE_COL] | 0.807802 | 0.371707 |
from functools import partial
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import config
from base_model import resnet50
from seg_opr.seg_oprs import ConvBnRelu
from seg_opr.loss_opr import AntimagnetLoss
class CPNet(nn.Module):
def __init__(self, out_planes, criterion, pretrained_model=None,
norm_layer=nn.BatchNorm2d):
super(CPNet, self).__init__()
self.backbone = resnet50(pretrained_model, norm_layer=norm_layer,
bn_eps=config.bn_eps,
bn_momentum=config.bn_momentum,
deep_stem=True, stem_width=64)
self.generate_dilation(self.backbone.layer3, dilation=2)
self.generate_dilation(self.backbone.layer4, dilation=4)
self.business_layer = []
self.context = ObjectContext(2048, 512, norm_layer)
self.head_layer = nn.Sequential(
ConvBnRelu(2048 + 512, 512, 3, 1, 1,
has_bn=True,
has_relu=True, has_bias=False, norm_layer=norm_layer),
nn.Dropout2d(0.1, inplace=False),
nn.Conv2d(512, out_planes, kernel_size=1)
)
self.aux_layer = nn.Sequential(
ConvBnRelu(1024, 512, 3, 1, 1,
has_bn=True,
has_relu=True, has_bias=False, norm_layer=norm_layer),
nn.Dropout2d(0.1, inplace=False),
nn.Conv2d(512, out_planes, kernel_size=1)
)
self.business_layer.append(self.context)
self.business_layer.append(self.head_layer)
self.business_layer.append(self.aux_layer)
self.criterion = criterion
# self.bce_criterion = nn.BCELoss(reduction='mean')
# self.antimagnet_criterion = AntimagnetLoss(reduction='mean')
def forward(self, data, label=None):
blocks = self.backbone(data)
fm = self.context(blocks[-1])
fm = self.head_layer(fm)
fm = F.interpolate(fm, scale_factor=8, mode='bilinear',
align_corners=True)
softmax_fm = F.log_softmax(fm, dim=1)
aux_fm = self.aux_layer(blocks[-2])
aux_fm = F.interpolate(aux_fm, scale_factor=8, mode='bilinear',
align_corners=True)
if label is not None:
main_loss = self.criterion(fm, label)
aux_loss = self.criterion(aux_fm, label)
# intra_sim_loss = self.bce_criterion(intra_sim_map, aux_label)
# antimagnet_loss = self.antimagnet_criterion(intra_sim_map,
# aux_label)
loss = main_loss + 0.4 * aux_loss # + intra_sim_loss + antimagnet_loss
return loss
return softmax_fm
def generate_dilation(self, module, dilation, multi_grid=None):
for idx, block in enumerate(module):
if multi_grid is None:
grid = 1
else:
grid = multi_grid[idx % len(multi_grid)]
dilation = dilation * grid
block.apply(partial(self._nostride_dilate, dilate=dilation))
@staticmethod
def _nostride_dilate(m, dilate):
if isinstance(m, nn.Conv2d):
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate // 2, dilate // 2)
m.padding = (dilate // 2, dilate // 2)
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
class SymmetricConv(nn.Module):
def __init__(self, in_channels, ksize, norm_layer=nn.BatchNorm2d):
super(SymmetricConv, self).__init__()
padding = ksize // 2
self.t1 = nn.Conv2d(in_channels, in_channels, kernel_size=(ksize, 1),
stride=1, padding=(padding, 0))
self.t2 = nn.Conv2d(in_channels, in_channels, kernel_size=(1, ksize),
stride=1, padding=(0, padding))
self.p1 = nn.Conv2d(in_channels, in_channels, kernel_size=(1, ksize),
stride=1, padding=(0, padding))
self.p2 = nn.Conv2d(in_channels, in_channels, kernel_size=(ksize, 1),
stride=1, padding=(padding, 0))
self.bn = norm_layer(in_channels)
self.relu = nn.ReLU()
def forward(self, x):
x1 = self.t1(x)
x1 = self.t2(x1)
x2 = self.p1(x)
x2 = self.p2(x2)
output = self.relu(self.bn(x1 + x2))
return output
class ObjectContext(nn.Module):
def __init__(self, in_channels, inner_channel, norm_layer=nn.BatchNorm2d):
super(ObjectContext, self).__init__()
self.in_channels = in_channels
self.inner_channel = inner_channel
self.reduce_conv = ConvBnRelu(self.in_channels, self.inner_channel,
3, 1, 1,
has_bn=True, has_relu=True,
has_bias=False, norm_layer=norm_layer)
self.intra_similarity_branch = nn.Sequential(
SymmetricConv(self.inner_channel, 7, norm_layer),
nn.Conv2d(self.inner_channel, 3600, 1, 1, 0, groups=16, bias=False),
norm_layer(3600)
)
self.intra_post_conv = ConvBnRelu(self.inner_channel,
self.inner_channel,
1, 1, 0, has_bn=True, has_relu=True,
has_bias=False, norm_layer=norm_layer)
self.inter_post_conv = ConvBnRelu(self.inner_channel,
self.inner_channel,
1, 1, 0, has_bn=True, has_relu=True,
has_bias=False, norm_layer=norm_layer)
def forward(self, x):
b, h, w = x.size(0), x.size(2), x.size(3)
value = self.reduce_conv(x)
intra_similarity_map = self.intra_similarity_branch(value)
intra_similarity_map = intra_similarity_map.view(b, h * w, -1)
intra_similarity_map = intra_similarity_map.permute(0, 2, 1)
intra_similarity_map = torch.sigmoid(intra_similarity_map)
# inter_similarity_map = 1 - intra_similarity_map
value = value.view(b, self.inner_channel, -1)
value = value.permute(0, 2, 1)
intra_context = torch.bmm(intra_similarity_map, value)
intra_context = intra_context.div(3600)
intra_context = intra_context.permute(0, 2, 1).contiguous()
intra_context = intra_context.view(b, self.inner_channel, *x.size()[2:])
intra_context = self.intra_post_conv(intra_context)
# inter_context = torch.bmm(inter_similarity_map, value)
# inter_context = inter_context.div(3600)
# inter_context = inter_context.permute(0, 2, 1).contiguous()
# inter_context = inter_context.view(b, self.inner_channel, *x.size()[2:])
# inter_context = self.inter_post_conv(inter_context)
output = torch.cat([x, intra_context], dim=1)
return output
if __name__ == "__main__":
model = PSPNet(150, None)
print(model) | model/cpn/ablation_study/ade.cpn.R50_v1c.lcm/network.py | from functools import partial
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import config
from base_model import resnet50
from seg_opr.seg_oprs import ConvBnRelu
from seg_opr.loss_opr import AntimagnetLoss
class CPNet(nn.Module):
def __init__(self, out_planes, criterion, pretrained_model=None,
norm_layer=nn.BatchNorm2d):
super(CPNet, self).__init__()
self.backbone = resnet50(pretrained_model, norm_layer=norm_layer,
bn_eps=config.bn_eps,
bn_momentum=config.bn_momentum,
deep_stem=True, stem_width=64)
self.generate_dilation(self.backbone.layer3, dilation=2)
self.generate_dilation(self.backbone.layer4, dilation=4)
self.business_layer = []
self.context = ObjectContext(2048, 512, norm_layer)
self.head_layer = nn.Sequential(
ConvBnRelu(2048 + 512, 512, 3, 1, 1,
has_bn=True,
has_relu=True, has_bias=False, norm_layer=norm_layer),
nn.Dropout2d(0.1, inplace=False),
nn.Conv2d(512, out_planes, kernel_size=1)
)
self.aux_layer = nn.Sequential(
ConvBnRelu(1024, 512, 3, 1, 1,
has_bn=True,
has_relu=True, has_bias=False, norm_layer=norm_layer),
nn.Dropout2d(0.1, inplace=False),
nn.Conv2d(512, out_planes, kernel_size=1)
)
self.business_layer.append(self.context)
self.business_layer.append(self.head_layer)
self.business_layer.append(self.aux_layer)
self.criterion = criterion
# self.bce_criterion = nn.BCELoss(reduction='mean')
# self.antimagnet_criterion = AntimagnetLoss(reduction='mean')
def forward(self, data, label=None):
blocks = self.backbone(data)
fm = self.context(blocks[-1])
fm = self.head_layer(fm)
fm = F.interpolate(fm, scale_factor=8, mode='bilinear',
align_corners=True)
softmax_fm = F.log_softmax(fm, dim=1)
aux_fm = self.aux_layer(blocks[-2])
aux_fm = F.interpolate(aux_fm, scale_factor=8, mode='bilinear',
align_corners=True)
if label is not None:
main_loss = self.criterion(fm, label)
aux_loss = self.criterion(aux_fm, label)
# intra_sim_loss = self.bce_criterion(intra_sim_map, aux_label)
# antimagnet_loss = self.antimagnet_criterion(intra_sim_map,
# aux_label)
loss = main_loss + 0.4 * aux_loss # + intra_sim_loss + antimagnet_loss
return loss
return softmax_fm
def generate_dilation(self, module, dilation, multi_grid=None):
for idx, block in enumerate(module):
if multi_grid is None:
grid = 1
else:
grid = multi_grid[idx % len(multi_grid)]
dilation = dilation * grid
block.apply(partial(self._nostride_dilate, dilate=dilation))
@staticmethod
def _nostride_dilate(m, dilate):
if isinstance(m, nn.Conv2d):
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate // 2, dilate // 2)
m.padding = (dilate // 2, dilate // 2)
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
class SymmetricConv(nn.Module):
def __init__(self, in_channels, ksize, norm_layer=nn.BatchNorm2d):
super(SymmetricConv, self).__init__()
padding = ksize // 2
self.t1 = nn.Conv2d(in_channels, in_channels, kernel_size=(ksize, 1),
stride=1, padding=(padding, 0))
self.t2 = nn.Conv2d(in_channels, in_channels, kernel_size=(1, ksize),
stride=1, padding=(0, padding))
self.p1 = nn.Conv2d(in_channels, in_channels, kernel_size=(1, ksize),
stride=1, padding=(0, padding))
self.p2 = nn.Conv2d(in_channels, in_channels, kernel_size=(ksize, 1),
stride=1, padding=(padding, 0))
self.bn = norm_layer(in_channels)
self.relu = nn.ReLU()
def forward(self, x):
x1 = self.t1(x)
x1 = self.t2(x1)
x2 = self.p1(x)
x2 = self.p2(x2)
output = self.relu(self.bn(x1 + x2))
return output
class ObjectContext(nn.Module):
def __init__(self, in_channels, inner_channel, norm_layer=nn.BatchNorm2d):
super(ObjectContext, self).__init__()
self.in_channels = in_channels
self.inner_channel = inner_channel
self.reduce_conv = ConvBnRelu(self.in_channels, self.inner_channel,
3, 1, 1,
has_bn=True, has_relu=True,
has_bias=False, norm_layer=norm_layer)
self.intra_similarity_branch = nn.Sequential(
SymmetricConv(self.inner_channel, 7, norm_layer),
nn.Conv2d(self.inner_channel, 3600, 1, 1, 0, groups=16, bias=False),
norm_layer(3600)
)
self.intra_post_conv = ConvBnRelu(self.inner_channel,
self.inner_channel,
1, 1, 0, has_bn=True, has_relu=True,
has_bias=False, norm_layer=norm_layer)
self.inter_post_conv = ConvBnRelu(self.inner_channel,
self.inner_channel,
1, 1, 0, has_bn=True, has_relu=True,
has_bias=False, norm_layer=norm_layer)
def forward(self, x):
b, h, w = x.size(0), x.size(2), x.size(3)
value = self.reduce_conv(x)
intra_similarity_map = self.intra_similarity_branch(value)
intra_similarity_map = intra_similarity_map.view(b, h * w, -1)
intra_similarity_map = intra_similarity_map.permute(0, 2, 1)
intra_similarity_map = torch.sigmoid(intra_similarity_map)
# inter_similarity_map = 1 - intra_similarity_map
value = value.view(b, self.inner_channel, -1)
value = value.permute(0, 2, 1)
intra_context = torch.bmm(intra_similarity_map, value)
intra_context = intra_context.div(3600)
intra_context = intra_context.permute(0, 2, 1).contiguous()
intra_context = intra_context.view(b, self.inner_channel, *x.size()[2:])
intra_context = self.intra_post_conv(intra_context)
# inter_context = torch.bmm(inter_similarity_map, value)
# inter_context = inter_context.div(3600)
# inter_context = inter_context.permute(0, 2, 1).contiguous()
# inter_context = inter_context.view(b, self.inner_channel, *x.size()[2:])
# inter_context = self.inter_post_conv(inter_context)
output = torch.cat([x, intra_context], dim=1)
return output
if __name__ == "__main__":
model = PSPNet(150, None)
print(model) | 0.951006 | 0.274771 |
from typing import Dict, List, Optional, Tuple
from ee.clickhouse.client import sync_execute
from ee.clickhouse.models.cohort import format_cohort_table_name
from ee.clickhouse.sql.cohort import COHORT_DISTINCT_ID_FILTER_SQL
from ee.clickhouse.sql.events import EVENT_PROP_CLAUSE, SELECT_PROP_VALUES_SQL, SELECT_PROP_VALUES_SQL_WITH_FILTER
from posthog.models.cohort import Cohort
from posthog.models.property import Property
from posthog.models.team import Team
def parse_filter(filters: List[Property]) -> Tuple[str, Dict]:
result = ""
params = {}
for idx, prop in enumerate(filters):
result += "{cond}(ep.key = %(k{idx})s) AND (trim(BOTH '\"' FROM ep.value) = %(v{idx})s)".format(
idx=idx, cond=" AND " if idx > 0 else ""
)
params.update({"k{}".format(idx): prop.key, "v{}".format(idx): prop.value})
return result, params
def parse_prop_clauses(key: str, filters: List[Property], team: Team, prepend: str = "") -> Tuple[str, Dict]:
final = ""
params = {}
for idx, prop in enumerate(filters):
if prop.type == "cohort":
cohort = Cohort.objects.get(pk=prop.value)
clause = COHORT_DISTINCT_ID_FILTER_SQL.format(table_name=format_cohort_table_name(cohort))
final += "{cond} ({clause}) ".format(cond="AND distinct_id IN", clause=clause)
else:
filter = "(ep.key = %(k{prepend}_{idx})s) AND (ep.value {operator} %(v{prepend}_{idx})s)".format(
idx=idx, operator=get_operator(prop.operator), prepend=prepend
)
clause = EVENT_PROP_CLAUSE.format(team_id=team.pk, filters=filter)
final += "{cond} ({clause}) ".format(cond="AND {key} IN".format(key=key), clause=clause)
params.update(
{"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): _pad_value(prop.value)}
)
return final, params
def _pad_value(val: str):
if not val.startswith('"'):
val = '"' + val
if not val.endswith('"'):
val = val + '"'
return val
# TODO: handle all operators
def get_operator(operator: Optional[str]):
if operator == "is_not":
return "!="
elif operator == "icontains":
return "LIKE"
elif operator == "not_icontains":
return "NOT LIKE"
elif operator == "regex":
return "="
elif operator == "not_regex":
return "="
elif operator == "gt":
return ">"
elif operator == "lt":
return "<"
else:
return "="
def get_property_values_for_key(key: str, team: Team, value: Optional[str] = None):
if value:
return sync_execute(
SELECT_PROP_VALUES_SQL_WITH_FILTER, {"team_id": team.pk, "key": key, "value": "%{}%".format(value)}
)
return sync_execute(SELECT_PROP_VALUES_SQL, {"team_id": team.pk, "key": key}) | ee/clickhouse/models/property.py | from typing import Dict, List, Optional, Tuple
from ee.clickhouse.client import sync_execute
from ee.clickhouse.models.cohort import format_cohort_table_name
from ee.clickhouse.sql.cohort import COHORT_DISTINCT_ID_FILTER_SQL
from ee.clickhouse.sql.events import EVENT_PROP_CLAUSE, SELECT_PROP_VALUES_SQL, SELECT_PROP_VALUES_SQL_WITH_FILTER
from posthog.models.cohort import Cohort
from posthog.models.property import Property
from posthog.models.team import Team
def parse_filter(filters: List[Property]) -> Tuple[str, Dict]:
result = ""
params = {}
for idx, prop in enumerate(filters):
result += "{cond}(ep.key = %(k{idx})s) AND (trim(BOTH '\"' FROM ep.value) = %(v{idx})s)".format(
idx=idx, cond=" AND " if idx > 0 else ""
)
params.update({"k{}".format(idx): prop.key, "v{}".format(idx): prop.value})
return result, params
def parse_prop_clauses(key: str, filters: List[Property], team: Team, prepend: str = "") -> Tuple[str, Dict]:
final = ""
params = {}
for idx, prop in enumerate(filters):
if prop.type == "cohort":
cohort = Cohort.objects.get(pk=prop.value)
clause = COHORT_DISTINCT_ID_FILTER_SQL.format(table_name=format_cohort_table_name(cohort))
final += "{cond} ({clause}) ".format(cond="AND distinct_id IN", clause=clause)
else:
filter = "(ep.key = %(k{prepend}_{idx})s) AND (ep.value {operator} %(v{prepend}_{idx})s)".format(
idx=idx, operator=get_operator(prop.operator), prepend=prepend
)
clause = EVENT_PROP_CLAUSE.format(team_id=team.pk, filters=filter)
final += "{cond} ({clause}) ".format(cond="AND {key} IN".format(key=key), clause=clause)
params.update(
{"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): _pad_value(prop.value)}
)
return final, params
def _pad_value(val: str):
if not val.startswith('"'):
val = '"' + val
if not val.endswith('"'):
val = val + '"'
return val
# TODO: handle all operators
def get_operator(operator: Optional[str]):
if operator == "is_not":
return "!="
elif operator == "icontains":
return "LIKE"
elif operator == "not_icontains":
return "NOT LIKE"
elif operator == "regex":
return "="
elif operator == "not_regex":
return "="
elif operator == "gt":
return ">"
elif operator == "lt":
return "<"
else:
return "="
def get_property_values_for_key(key: str, team: Team, value: Optional[str] = None):
if value:
return sync_execute(
SELECT_PROP_VALUES_SQL_WITH_FILTER, {"team_id": team.pk, "key": key, "value": "%{}%".format(value)}
)
return sync_execute(SELECT_PROP_VALUES_SQL, {"team_id": team.pk, "key": key}) | 0.547948 | 0.179153 |
import numpy as nm
from sfepy.discrete.fem import Mesh
def refine_2_3(mesh_in):
"""
Refines mesh out of triangles by cutting cutting each edge in half
and making 4 new finer triangles out of one coarser one.
"""
cmesh = mesh_in.cmesh
# Unique edge centres.
e_centres = cmesh.get_centroids(cmesh.dim - 1)
# New coordinates after the original ones.
coors = nm.r_[mesh_in.coors, e_centres]
o1 = mesh_in.n_nod
cc = cmesh.get_conn(cmesh.dim, cmesh.dim - 1)
conn = mesh_in.get_conn('2_3')
n_el = conn.shape[0]
e_nodes = cc.indices.reshape((n_el, 3)) + o1
c = nm.c_[conn, e_nodes].T
new_conn = nm.vstack([c[0], c[3], c[5],
c[3], c[4], c[5],
c[1], c[4], c[3],
c[2], c[5], c[4]]).T
new_conn = new_conn.reshape((4 * n_el, 3))
new_mat_id = cmesh.cell_groups.repeat(4)
mesh = Mesh.from_data(mesh_in.name + '_r', coors, None, [new_conn],
[new_mat_id], mesh_in.descs )
return mesh
def refine_2_4(mesh_in):
"""
Refines mesh out of quadrilaterals by cutting cutting each edge in
half and making 4 new finer quadrilaterals out of one coarser one.
"""
cmesh = mesh_in.cmesh
# Unique edge centres.
e_centres = cmesh.get_centroids(cmesh.dim - 1)
# Unique element centres.
centres = cmesh.get_centroids(cmesh.dim)
# New coordinates after the original ones.
coors = nm.r_[mesh_in.coors, e_centres, centres]
o1 = mesh_in.n_nod
o2 = o1 + e_centres.shape[0]
cc = cmesh.get_conn(cmesh.dim, cmesh.dim - 1)
conn = mesh_in.get_conn('2_4')
n_el = conn.shape[0]
e_nodes = cc.indices.reshape((n_el, 4)) + o1
nodes = nm.arange(n_el) + o2
c = nm.c_[conn, e_nodes, nodes].T
new_conn = nm.vstack([c[0], c[4], c[8], c[7],
c[1], c[5], c[8], c[4],
c[2], c[6], c[8], c[5],
c[3], c[7], c[8], c[6]]).T
new_conn = new_conn.reshape((4 * n_el, 4))
new_mat_id = cmesh.cell_groups.repeat(4)
mesh = Mesh.from_data(mesh_in.name + '_r', coors, None, [new_conn],
[new_mat_id], mesh_in.descs )
return mesh
def refine_3_4(mesh_in):
"""
Refines tetrahedra by cutting each edge in half and making 8 new
finer tetrahedra out of one coarser one. Old nodal coordinates come
first in `coors`, then the new ones. The new tetrahedra are similar
to the old one, no degeneration is supposed to occur as at most 3
congruence classes of tetrahedra appear, even when re-applied
iteratively (provided that `conns` are not modified between two
applications - ordering of vertices in tetrahedra matters not only
for positivity of volumes).
References:
- <NAME>: Simplicial grid refinement: on Freudenthal s algorithm and
the optimal number of congruence classes, Numer.Math. 85 (2000),
no. 1, 1--29, or
- <NAME>: Tetrahedral grid refinement, Computing 55 (1995),
no. 4, 355--378, or
http://citeseer.ist.psu.edu/bey95tetrahedral.html
"""
cmesh = mesh_in.cmesh
# Unique edge centres.
e_centres = cmesh.get_centroids(cmesh.dim - 2)
# New coordinates after the original ones.
coors = nm.r_[mesh_in.coors, e_centres]
o1 = mesh_in.n_nod
cc = cmesh.get_conn(cmesh.dim, cmesh.dim - 2)
conn = mesh_in.get_conn('3_4')
n_el = conn.shape[0]
e_nodes = cc.indices.reshape((n_el, 6)) + o1
c = nm.c_[conn, e_nodes].T
new_conn = nm.vstack([c[0], c[4], c[6], c[7],
c[4], c[1], c[5], c[8],
c[6], c[5], c[2], c[9],
c[7], c[8], c[9], c[3],
c[4], c[6], c[7], c[8],
c[4], c[6], c[8], c[5],
c[6], c[7], c[8], c[9],
c[6], c[5], c[9], c[8]]).T
new_conn = new_conn.reshape((8 * n_el, 4))
new_mat_id = cmesh.cell_groups.repeat(8)
mesh = Mesh.from_data(mesh_in.name + '_r', coors, None, [new_conn],
[new_mat_id], mesh_in.descs )
return mesh
def refine_3_8(mesh_in):
"""
Refines hexahedral mesh by cutting cutting each edge in half and
making 8 new finer hexahedrons out of one coarser one.
"""
cmesh = mesh_in.cmesh
# Unique edge centres.
e_centres = cmesh.get_centroids(cmesh.dim - 2)
# Unique face centres.
f_centres = cmesh.get_centroids(cmesh.dim - 1)
# Unique element centres.
centres = cmesh.get_centroids(cmesh.dim)
# New coordinates after the original ones.
coors = nm.r_[mesh_in.coors, e_centres, f_centres, centres]
o1 = mesh_in.n_nod
o2 = o1 + e_centres.shape[0]
o3 = o2 + f_centres.shape[0]
ecc = cmesh.get_conn(cmesh.dim, cmesh.dim - 2)
fcc = cmesh.get_conn(cmesh.dim, cmesh.dim - 1)
conn = mesh_in.get_conn('3_8')
n_el = conn.shape[0]
st = nm.vstack
e_nodes = ecc.indices.reshape((n_el, 12)) + o1
f_nodes = fcc.indices.reshape((n_el, 6)) + o2
nodes = nm.arange(n_el) + o3
c = nm.c_[conn, e_nodes, f_nodes, nodes].T
new_conn = st([c[0], c[8], c[20], c[11], c[16], c[22], c[26], c[21],
c[1], c[9], c[20], c[8], c[17], c[24], c[26], c[22],
c[2], c[10], c[20], c[9], c[18], c[25], c[26], c[24],
c[3], c[11], c[20], c[10], c[19], c[21], c[26], c[25],
c[4], c[15], c[23], c[12], c[16], c[21], c[26], c[22],
c[5], c[12], c[23], c[13], c[17], c[22], c[26], c[24],
c[6], c[13], c[23], c[14], c[18], c[24], c[26], c[25],
c[7], c[14], c[23], c[15], c[19], c[25], c[26], c[21]]).T
new_conn = new_conn.reshape((8 * n_el, 8))
new_mat_id = cmesh.cell_groups.repeat(8)
mesh = Mesh.from_data(mesh_in.name + '_r', coors, None, [new_conn],
[new_mat_id], mesh_in.descs )
return mesh
def refine_reference(geometry, level):
"""
Refine reference element given by `geometry`.
Notes
-----
The error edges must be generated in the order of the connectivity
of the previous (lower) level.
"""
from sfepy.discrete.fem import FEDomain
from sfepy.discrete.fem.geometry_element import geometry_data
gcoors, gconn = geometry.coors, geometry.conn
if level == 0:
return gcoors, gconn, None
gd = geometry_data[geometry.name]
conn = nm.array([gd.conn], dtype=nm.int32)
mat_id = conn[:, 0].copy()
mat_id[:] = 0
mesh = Mesh.from_data('aux', gd.coors, None, [conn],
[mat_id], [geometry.name])
domain = FEDomain('aux', mesh)
for ii in range(level):
domain = domain.refine()
coors = domain.mesh.coors
conn = domain.get_conn()
n_el = conn.shape[0]
if geometry.name == '2_3':
aux_conn = conn.reshape((n_el / 4, 4, 3))
ir = [[0, 1, 2], [2, 2, 3], [3, 3, 0]]
ic = [[0, 0, 0], [0, 1, 0], [0, 1, 0]]
elif geometry.name == '2_4':
aux_conn = conn.reshape((n_el / 4, 4, 4))
ir = [[0, 0, 1], [1, 1, 2], [2, 2, 3], [3, 3, 0], [0, 0, 2], [3, 3, 1]]
ic = [[0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 1, 0], [1, 2, 1], [1, 2, 1]]
elif geometry.name == '3_4':
aux_conn = conn.reshape((n_el / 8, 8, 4))
ir = [[0, 0, 1], [1, 1, 2], [2, 0, 0], [3, 1, 1], [3, 2, 2], [3, 0, 0]]
ic = [[0, 1, 1], [1, 2, 2], [2, 2, 0], [3, 3, 1], [3, 3, 2], [3, 3, 0]]
elif geometry.name == '3_8':
aux_conn = conn.reshape((n_el / 8, 8, 8))
ir = [[0, 0, 1], [1, 1, 2], [2, 2, 3], [3, 0, 0], [0, 0, 2], [0, 0, 1],
[0, 0, 1], [1, 1, 2], [2, 2, 3], [3, 0, 0], [0, 0, 2], [0, 0, 1],
[4, 4, 5], [5, 5, 6], [6, 6, 7], [7, 4, 4], [4, 4, 6], [4, 4, 5],
[0, 0, 4], [1, 1, 5], [2, 2, 6], [3, 3, 7],
[0, 0, 4], [1, 1, 5], [2, 2, 6], [0, 0, 4],
[0, 0, 4]]
ic = [[0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 3, 0], [1, 2, 1], [3, 2, 1],
[4, 5, 4], [4, 5, 4], [4, 5, 4], [4, 7, 4], [5, 6, 5], [7, 6, 5],
[0, 3, 0], [0, 3, 0], [0, 3, 0], [0, 1, 0], [3, 2, 3], [1, 2, 3],
[0, 4, 0], [0, 4, 0], [0, 4, 0], [0, 4, 0],
[1, 5, 3], [1, 5, 3], [1, 5, 3], [3, 7, 1],
[2, 6, 2]]
else:
raise ValueError('unsupported geometry! (%s)' % geometry.name)
conn = nm.array(conn, dtype=nm.int32)
error_edges = aux_conn[:, ir, ic]
return coors, conn, error_edges | sfepy/discrete/fem/refine.py | import numpy as nm
from sfepy.discrete.fem import Mesh
def refine_2_3(mesh_in):
"""
Refines mesh out of triangles by cutting cutting each edge in half
and making 4 new finer triangles out of one coarser one.
"""
cmesh = mesh_in.cmesh
# Unique edge centres.
e_centres = cmesh.get_centroids(cmesh.dim - 1)
# New coordinates after the original ones.
coors = nm.r_[mesh_in.coors, e_centres]
o1 = mesh_in.n_nod
cc = cmesh.get_conn(cmesh.dim, cmesh.dim - 1)
conn = mesh_in.get_conn('2_3')
n_el = conn.shape[0]
e_nodes = cc.indices.reshape((n_el, 3)) + o1
c = nm.c_[conn, e_nodes].T
new_conn = nm.vstack([c[0], c[3], c[5],
c[3], c[4], c[5],
c[1], c[4], c[3],
c[2], c[5], c[4]]).T
new_conn = new_conn.reshape((4 * n_el, 3))
new_mat_id = cmesh.cell_groups.repeat(4)
mesh = Mesh.from_data(mesh_in.name + '_r', coors, None, [new_conn],
[new_mat_id], mesh_in.descs )
return mesh
def refine_2_4(mesh_in):
"""
Refines mesh out of quadrilaterals by cutting cutting each edge in
half and making 4 new finer quadrilaterals out of one coarser one.
"""
cmesh = mesh_in.cmesh
# Unique edge centres.
e_centres = cmesh.get_centroids(cmesh.dim - 1)
# Unique element centres.
centres = cmesh.get_centroids(cmesh.dim)
# New coordinates after the original ones.
coors = nm.r_[mesh_in.coors, e_centres, centres]
o1 = mesh_in.n_nod
o2 = o1 + e_centres.shape[0]
cc = cmesh.get_conn(cmesh.dim, cmesh.dim - 1)
conn = mesh_in.get_conn('2_4')
n_el = conn.shape[0]
e_nodes = cc.indices.reshape((n_el, 4)) + o1
nodes = nm.arange(n_el) + o2
c = nm.c_[conn, e_nodes, nodes].T
new_conn = nm.vstack([c[0], c[4], c[8], c[7],
c[1], c[5], c[8], c[4],
c[2], c[6], c[8], c[5],
c[3], c[7], c[8], c[6]]).T
new_conn = new_conn.reshape((4 * n_el, 4))
new_mat_id = cmesh.cell_groups.repeat(4)
mesh = Mesh.from_data(mesh_in.name + '_r', coors, None, [new_conn],
[new_mat_id], mesh_in.descs )
return mesh
def refine_3_4(mesh_in):
"""
Refines tetrahedra by cutting each edge in half and making 8 new
finer tetrahedra out of one coarser one. Old nodal coordinates come
first in `coors`, then the new ones. The new tetrahedra are similar
to the old one, no degeneration is supposed to occur as at most 3
congruence classes of tetrahedra appear, even when re-applied
iteratively (provided that `conns` are not modified between two
applications - ordering of vertices in tetrahedra matters not only
for positivity of volumes).
References:
- <NAME>: Simplicial grid refinement: on Freudenthal s algorithm and
the optimal number of congruence classes, Numer.Math. 85 (2000),
no. 1, 1--29, or
- <NAME>: Tetrahedral grid refinement, Computing 55 (1995),
no. 4, 355--378, or
http://citeseer.ist.psu.edu/bey95tetrahedral.html
"""
cmesh = mesh_in.cmesh
# Unique edge centres.
e_centres = cmesh.get_centroids(cmesh.dim - 2)
# New coordinates after the original ones.
coors = nm.r_[mesh_in.coors, e_centres]
o1 = mesh_in.n_nod
cc = cmesh.get_conn(cmesh.dim, cmesh.dim - 2)
conn = mesh_in.get_conn('3_4')
n_el = conn.shape[0]
e_nodes = cc.indices.reshape((n_el, 6)) + o1
c = nm.c_[conn, e_nodes].T
new_conn = nm.vstack([c[0], c[4], c[6], c[7],
c[4], c[1], c[5], c[8],
c[6], c[5], c[2], c[9],
c[7], c[8], c[9], c[3],
c[4], c[6], c[7], c[8],
c[4], c[6], c[8], c[5],
c[6], c[7], c[8], c[9],
c[6], c[5], c[9], c[8]]).T
new_conn = new_conn.reshape((8 * n_el, 4))
new_mat_id = cmesh.cell_groups.repeat(8)
mesh = Mesh.from_data(mesh_in.name + '_r', coors, None, [new_conn],
[new_mat_id], mesh_in.descs )
return mesh
def refine_3_8(mesh_in):
"""
Refines hexahedral mesh by cutting cutting each edge in half and
making 8 new finer hexahedrons out of one coarser one.
"""
cmesh = mesh_in.cmesh
# Unique edge centres.
e_centres = cmesh.get_centroids(cmesh.dim - 2)
# Unique face centres.
f_centres = cmesh.get_centroids(cmesh.dim - 1)
# Unique element centres.
centres = cmesh.get_centroids(cmesh.dim)
# New coordinates after the original ones.
coors = nm.r_[mesh_in.coors, e_centres, f_centres, centres]
o1 = mesh_in.n_nod
o2 = o1 + e_centres.shape[0]
o3 = o2 + f_centres.shape[0]
ecc = cmesh.get_conn(cmesh.dim, cmesh.dim - 2)
fcc = cmesh.get_conn(cmesh.dim, cmesh.dim - 1)
conn = mesh_in.get_conn('3_8')
n_el = conn.shape[0]
st = nm.vstack
e_nodes = ecc.indices.reshape((n_el, 12)) + o1
f_nodes = fcc.indices.reshape((n_el, 6)) + o2
nodes = nm.arange(n_el) + o3
c = nm.c_[conn, e_nodes, f_nodes, nodes].T
new_conn = st([c[0], c[8], c[20], c[11], c[16], c[22], c[26], c[21],
c[1], c[9], c[20], c[8], c[17], c[24], c[26], c[22],
c[2], c[10], c[20], c[9], c[18], c[25], c[26], c[24],
c[3], c[11], c[20], c[10], c[19], c[21], c[26], c[25],
c[4], c[15], c[23], c[12], c[16], c[21], c[26], c[22],
c[5], c[12], c[23], c[13], c[17], c[22], c[26], c[24],
c[6], c[13], c[23], c[14], c[18], c[24], c[26], c[25],
c[7], c[14], c[23], c[15], c[19], c[25], c[26], c[21]]).T
new_conn = new_conn.reshape((8 * n_el, 8))
new_mat_id = cmesh.cell_groups.repeat(8)
mesh = Mesh.from_data(mesh_in.name + '_r', coors, None, [new_conn],
[new_mat_id], mesh_in.descs )
return mesh
def refine_reference(geometry, level):
"""
Refine reference element given by `geometry`.
Notes
-----
The error edges must be generated in the order of the connectivity
of the previous (lower) level.
"""
from sfepy.discrete.fem import FEDomain
from sfepy.discrete.fem.geometry_element import geometry_data
gcoors, gconn = geometry.coors, geometry.conn
if level == 0:
return gcoors, gconn, None
gd = geometry_data[geometry.name]
conn = nm.array([gd.conn], dtype=nm.int32)
mat_id = conn[:, 0].copy()
mat_id[:] = 0
mesh = Mesh.from_data('aux', gd.coors, None, [conn],
[mat_id], [geometry.name])
domain = FEDomain('aux', mesh)
for ii in range(level):
domain = domain.refine()
coors = domain.mesh.coors
conn = domain.get_conn()
n_el = conn.shape[0]
if geometry.name == '2_3':
aux_conn = conn.reshape((n_el / 4, 4, 3))
ir = [[0, 1, 2], [2, 2, 3], [3, 3, 0]]
ic = [[0, 0, 0], [0, 1, 0], [0, 1, 0]]
elif geometry.name == '2_4':
aux_conn = conn.reshape((n_el / 4, 4, 4))
ir = [[0, 0, 1], [1, 1, 2], [2, 2, 3], [3, 3, 0], [0, 0, 2], [3, 3, 1]]
ic = [[0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 1, 0], [1, 2, 1], [1, 2, 1]]
elif geometry.name == '3_4':
aux_conn = conn.reshape((n_el / 8, 8, 4))
ir = [[0, 0, 1], [1, 1, 2], [2, 0, 0], [3, 1, 1], [3, 2, 2], [3, 0, 0]]
ic = [[0, 1, 1], [1, 2, 2], [2, 2, 0], [3, 3, 1], [3, 3, 2], [3, 3, 0]]
elif geometry.name == '3_8':
aux_conn = conn.reshape((n_el / 8, 8, 8))
ir = [[0, 0, 1], [1, 1, 2], [2, 2, 3], [3, 0, 0], [0, 0, 2], [0, 0, 1],
[0, 0, 1], [1, 1, 2], [2, 2, 3], [3, 0, 0], [0, 0, 2], [0, 0, 1],
[4, 4, 5], [5, 5, 6], [6, 6, 7], [7, 4, 4], [4, 4, 6], [4, 4, 5],
[0, 0, 4], [1, 1, 5], [2, 2, 6], [3, 3, 7],
[0, 0, 4], [1, 1, 5], [2, 2, 6], [0, 0, 4],
[0, 0, 4]]
ic = [[0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 3, 0], [1, 2, 1], [3, 2, 1],
[4, 5, 4], [4, 5, 4], [4, 5, 4], [4, 7, 4], [5, 6, 5], [7, 6, 5],
[0, 3, 0], [0, 3, 0], [0, 3, 0], [0, 1, 0], [3, 2, 3], [1, 2, 3],
[0, 4, 0], [0, 4, 0], [0, 4, 0], [0, 4, 0],
[1, 5, 3], [1, 5, 3], [1, 5, 3], [3, 7, 1],
[2, 6, 2]]
else:
raise ValueError('unsupported geometry! (%s)' % geometry.name)
conn = nm.array(conn, dtype=nm.int32)
error_edges = aux_conn[:, ir, ic]
return coors, conn, error_edges | 0.795539 | 0.602179 |
import yaml
import sys
import re
class Constants:
KEY_CONFIG_FILES = "config_files"
KEY_BASE_DOCKERFILE_TEMPLATE = "base_dockerfile_template"
KEY_CONFIG_LOADER_SH_TEMPLATE = "config_loader_sh_template"
KEY_OUTPUT_DIR = "output_dir"
KEY_PATH = "path"
KEY_FILENAME = "filename"
CONFIG_LOADER_STD_STATEMENT_FMT = "load_config \"{property}\" \"${{{env_var_name}:={env_var_default_value}}}\" \"{config_filename}\""
CONFIG_LOADER_SUBST_STATEMENT_FMT = "load_config \"{property}\" \"{substitution}\" \"{config_filename}\""
CONFIG_LOADER_OPT_SUBST_STATEMENT_FMT = "load_config_with_opt \"{property}\" \"{check}\" \"{substitution_not_null}\" \"{substitution_null}\" \"{config_filename}\""
CONFIG_LOADER_SECTION_STATEMENT_FMT = "\nprintf \"\\n{section}\\n\" {op} \"${{AIRFLOW_CONF_DIR}}/{config_filename}\""
CONFIGURATION_SECTION_KEY_REGEX = re.compile("<[a-z_A-Z]+>")
class ConfigLoaderGenerator:
def __init__(self, config_filename):
self.__config = yaml.safe_load(open(config_filename, "r"))
def __get_infos(self, data, config_filename, init_section=True, property_prefix=None, env_var_prefix=None):
init_section = init_section
load_fn_calls = []
for property, value in data.items():
env_var_name = env_var_prefix + "_" + property if env_var_prefix else property
property_name = property_prefix + "_" + property \
if property_prefix and not Constants.CONFIGURATION_SECTION_KEY_REGEX.match(property_prefix) \
else property
env_var_name = env_var_name.upper().replace("<", "").replace(">", "").replace("-", "_")
property_name = property_name[:-1] if property_name[-1] == "_" else property_name
if Constants.CONFIGURATION_SECTION_KEY_REGEX.match(property_name):
load_fn_calls.append(
Constants.CONFIG_LOADER_SECTION_STATEMENT_FMT.format(
section=property_name.replace("<", "[").replace(">", "]"),
op=">" if init_section else ">>",
config_filename=config_filename
)
)
init_section = False
if isinstance(value, dict):
_load_fn_calls = self.__get_infos(value, config_filename, init_section, property_name, env_var_name)
load_fn_calls.extend(_load_fn_calls)
else:
if value.find("=") == 0: # standard load config statement
load_fn_calls.append(Constants.CONFIG_LOADER_SUBST_STATEMENT_FMT.format(
property=property_name, substitution=value[1:], config_filename=config_filename)
)
elif value.find("?") == 0: # optional load config statement
tokens = value.split(",")
load_fn_calls.append(Constants.CONFIG_LOADER_OPT_SUBST_STATEMENT_FMT.format(
property=property_name, check=tokens[0][1:].strip(), substitution_not_null=tokens[1].strip(), substitution_null=tokens[2].strip(), config_filename=config_filename
))
else:
load_fn_calls.append(Constants.CONFIG_LOADER_STD_STATEMENT_FMT.format(property=property_name,
env_var_name=env_var_name,
env_var_default_value=value,
config_filename=config_filename))
return load_fn_calls
def generate(self):
load_fn_calls = []
for config_info in self.__config[Constants.KEY_CONFIG_FILES]:
config_file_path = config_info[Constants.KEY_PATH]
config_filename = config_info[Constants.KEY_FILENAME]
_load_fn_calls = self.__get_infos(yaml.safe_load(open(config_file_path, "r")), config_filename)
load_fn_calls.extend(_load_fn_calls)
open("{loc}/config_loader.sh".format(loc=self.__config[Constants.KEY_OUTPUT_DIR]), "w").write(
self.__config[Constants.KEY_CONFIG_LOADER_SH_TEMPLATE].format(load_fn_calls="\n".join(load_fn_calls))
)
if __name__ == "__main__":
ConfigLoaderGenerator(sys.argv[1]).generate() | src/utils/config_loader_generator/config_loader_generator.py |
import yaml
import sys
import re
class Constants:
KEY_CONFIG_FILES = "config_files"
KEY_BASE_DOCKERFILE_TEMPLATE = "base_dockerfile_template"
KEY_CONFIG_LOADER_SH_TEMPLATE = "config_loader_sh_template"
KEY_OUTPUT_DIR = "output_dir"
KEY_PATH = "path"
KEY_FILENAME = "filename"
CONFIG_LOADER_STD_STATEMENT_FMT = "load_config \"{property}\" \"${{{env_var_name}:={env_var_default_value}}}\" \"{config_filename}\""
CONFIG_LOADER_SUBST_STATEMENT_FMT = "load_config \"{property}\" \"{substitution}\" \"{config_filename}\""
CONFIG_LOADER_OPT_SUBST_STATEMENT_FMT = "load_config_with_opt \"{property}\" \"{check}\" \"{substitution_not_null}\" \"{substitution_null}\" \"{config_filename}\""
CONFIG_LOADER_SECTION_STATEMENT_FMT = "\nprintf \"\\n{section}\\n\" {op} \"${{AIRFLOW_CONF_DIR}}/{config_filename}\""
CONFIGURATION_SECTION_KEY_REGEX = re.compile("<[a-z_A-Z]+>")
class ConfigLoaderGenerator:
def __init__(self, config_filename):
self.__config = yaml.safe_load(open(config_filename, "r"))
def __get_infos(self, data, config_filename, init_section=True, property_prefix=None, env_var_prefix=None):
init_section = init_section
load_fn_calls = []
for property, value in data.items():
env_var_name = env_var_prefix + "_" + property if env_var_prefix else property
property_name = property_prefix + "_" + property \
if property_prefix and not Constants.CONFIGURATION_SECTION_KEY_REGEX.match(property_prefix) \
else property
env_var_name = env_var_name.upper().replace("<", "").replace(">", "").replace("-", "_")
property_name = property_name[:-1] if property_name[-1] == "_" else property_name
if Constants.CONFIGURATION_SECTION_KEY_REGEX.match(property_name):
load_fn_calls.append(
Constants.CONFIG_LOADER_SECTION_STATEMENT_FMT.format(
section=property_name.replace("<", "[").replace(">", "]"),
op=">" if init_section else ">>",
config_filename=config_filename
)
)
init_section = False
if isinstance(value, dict):
_load_fn_calls = self.__get_infos(value, config_filename, init_section, property_name, env_var_name)
load_fn_calls.extend(_load_fn_calls)
else:
if value.find("=") == 0: # standard load config statement
load_fn_calls.append(Constants.CONFIG_LOADER_SUBST_STATEMENT_FMT.format(
property=property_name, substitution=value[1:], config_filename=config_filename)
)
elif value.find("?") == 0: # optional load config statement
tokens = value.split(",")
load_fn_calls.append(Constants.CONFIG_LOADER_OPT_SUBST_STATEMENT_FMT.format(
property=property_name, check=tokens[0][1:].strip(), substitution_not_null=tokens[1].strip(), substitution_null=tokens[2].strip(), config_filename=config_filename
))
else:
load_fn_calls.append(Constants.CONFIG_LOADER_STD_STATEMENT_FMT.format(property=property_name,
env_var_name=env_var_name,
env_var_default_value=value,
config_filename=config_filename))
return load_fn_calls
def generate(self):
load_fn_calls = []
for config_info in self.__config[Constants.KEY_CONFIG_FILES]:
config_file_path = config_info[Constants.KEY_PATH]
config_filename = config_info[Constants.KEY_FILENAME]
_load_fn_calls = self.__get_infos(yaml.safe_load(open(config_file_path, "r")), config_filename)
load_fn_calls.extend(_load_fn_calls)
open("{loc}/config_loader.sh".format(loc=self.__config[Constants.KEY_OUTPUT_DIR]), "w").write(
self.__config[Constants.KEY_CONFIG_LOADER_SH_TEMPLATE].format(load_fn_calls="\n".join(load_fn_calls))
)
if __name__ == "__main__":
ConfigLoaderGenerator(sys.argv[1]).generate() | 0.269133 | 0.051035 |
import unittest
from adjudicator.state import data_to_state
from adjudicator import order
from adjudicator.territory import CoastalTerritory, InlandTerritory, \
SeaTerritory
class TestDataToState(unittest.TestCase):
def test_sea_territory(self):
data = {
'orders': [],
'pieces': [],
'named_coasts': [],
'territories': [
{
'_id': 1,
'type': 'sea',
'name': 'Adriatic Sea',
'neighbour_ids': [2, 3, 4, 5],
'shared_coast_ids': None,
}
]
}
state = data_to_state(data)
self.assertEqual(type(state.territories[0]), SeaTerritory)
self.assertEqual(state.territories[0].name, 'Adriatic Sea')
self.assertEqual(state.territories[0].id, 1)
def test_coastal_territory(self):
data = {
'orders': [],
'pieces': [],
'named_coasts': [],
'territories': [
{
'_id': 1,
'type': 'coastal',
'name': 'Brest',
'nationality': 1,
'controlled_by': 1,
'supply_center': True,
'neighbour_ids': [2, 3, 4, 5],
'shared_coast_ids': [2, 3],
}
]
}
state = data_to_state(data)
self.assertEqual(type(state.territories[0]), CoastalTerritory)
self.assertEqual(state.territories[0].name, 'Brest')
self.assertEqual(state.territories[0].nationality, 1)
self.assertEqual(state.territories[0].controlled_by, 1)
self.assertEqual(state.territories[0].supply_center, True)
self.assertEqual(state.territories[0].id, 1)
def test_inland_territory(self):
data = {
'orders': [],
'pieces': [],
'named_coasts': [],
'territories': [
{
'_id': 1,
'type': 'inland',
'name': 'Paris',
'nationality': 1,
'controlled_by': 1,
'supply_center': True,
'neighbour_ids': [2, 3, 4, 5],
'shared_coast_ids': None,
}
]
}
state = data_to_state(data)
self.assertEqual(type(state.territories[0]), InlandTerritory)
self.assertEqual(state.territories[0].name, 'Paris')
self.assertEqual(state.territories[0].nationality, 1)
self.assertEqual(state.territories[0].controlled_by, 1)
self.assertEqual(state.territories[0].supply_center, True)
self.assertEqual(state.territories[0].id, 1)
def test_named_coast(self):
data = {
'orders': [],
'pieces': [],
'named_coasts': [
{
'_id': 1,
'name': 'Spain South Coast',
'territory_id': 1,
'neighbour_ids': [2],
}
],
'territories': [
{
'_id': 1,
'type': 'coastal',
'name': 'Spain',
'nationality': None,
'controlled_by': None,
'supply_center': True,
'neighbour_ids': [2, 3, 4, 5],
'shared_coast_ids': [2, 3],
}
]
}
state = data_to_state(data)
self.assertEqual(type(state.named_coasts[0].parent), CoastalTerritory)
self.assertEqual(state.named_coasts[0].name, 'Spain South Coast')
self.assertEqual(state.named_coasts[0].id, 1)
def test_piece(self):
data = {
'orders': [],
'pieces': [
{
'_id': 0,
'type': 'army',
'nation': 1,
'territory_id': 1,
}
],
'named_coasts': [],
'territories': [
{
'_id': 1,
'type': 'coastal',
'name': 'Spain',
'nationality': None,
'controlled_by': None,
'supply_center': True,
'neighbour_ids': [2, 3, 4, 5],
'shared_coast_ids': [2, 3],
}
]
}
state = data_to_state(data)
self.assertEqual(type(state.pieces[0].territory), CoastalTerritory)
self.assertEqual(state.pieces[0].__class__.__name__, 'Army')
self.assertEqual(state.pieces[0].nation, 1)
def test_hold(self):
data = {
'orders': [
{
'_id': 1,
'type': 'hold',
'nation': 1,
'source_id': 1,
'piece_type': None,
}
],
'pieces': [],
'named_coasts': [],
'territories': [
{
'_id': 1,
'type': 'coastal',
'name': 'Spain',
'nationality': None,
'controlled_by': None,
'supply_center': True,
'neighbour_ids': [2, 3, 4, 5],
'shared_coast_ids': [2, 3],
},
]
}
state = data_to_state(data)
self.assertEqual(type(state.orders[0].source), CoastalTerritory)
self.assertEqual(type(state.orders[0]), order.Hold)
self.assertEqual(state.orders[0].nation, 1)
def test_move(self):
data = {
'orders': [
{
'_id': 1,
'type': 'move',
'nation': 1,
'source_id': 1,
'target_id': 2,
'piece_type': None,
}
],
'pieces': [],
'named_coasts': [],
'territories': [
{
'_id': 1,
'type': 'coastal',
'name': 'Spain',
'nationality': None,
'controlled_by': None,
'supply_center': True,
'neighbour_ids': [2, 3, 4, 5],
'shared_coast_ids': [2, 3],
},
{
'_id': 2,
'type': 'coastal',
'name': 'Portugal',
'nationality': None,
'controlled_by': None,
'supply_center': True,
'neighbour_ids': [1, 3, 4, 5],
'shared_coast_ids': [1, 3],
}
]
}
state = data_to_state(data)
self.assertEqual(type(state.orders[0].source), CoastalTerritory)
self.assertEqual(type(state.orders[0].target), CoastalTerritory)
self.assertEqual(type(state.orders[0]), order.Move)
self.assertEqual(state.orders[0].nation, 1)
def test_support(self):
data = {
'orders': [
{
'_id': 1,
'type': 'support',
'nation': 1,
'source_id': 1,
'target_id': 2,
'aux_id': 2,
'piece_type': None,
}
],
'pieces': [],
'named_coasts': [],
'territories': [
{
'_id': 1,
'type': 'coastal',
'name': 'Spain',
'nationality': None,
'controlled_by': None,
'supply_center': True,
'neighbour_ids': [2, 3, 4, 5],
'shared_coast_ids': [2, 3],
},
{
'_id': 2,
'type': 'coastal',
'name': 'Portugal',
'nationality': None,
'controlled_by': None,
'supply_center': True,
'neighbour_ids': [1, 3, 4, 5],
'shared_coast_ids': [1, 3],
}
]
}
state = data_to_state(data)
self.assertEqual(type(state.orders[0].source), CoastalTerritory)
self.assertEqual(type(state.orders[0].target), CoastalTerritory)
self.assertEqual(type(state.orders[0].aux), CoastalTerritory)
self.assertEqual(type(state.orders[0]), order.Support)
self.assertEqual(state.orders[0].nation, 1)
def test_convoy(self):
data = {
'orders': [
{
'_id': 1,
'type': 'convoy',
'nation': 1,
'source_id': 1,
'target_id': 2,
'aux_id': 2,
'piece_type': None,
}
],
'pieces': [],
'named_coasts': [],
'territories': [
{
'_id': 1,
'type': 'coastal',
'name': 'Spain',
'nationality': None,
'controlled_by': None,
'supply_center': True,
'neighbour_ids': [2, 3, 4, 5],
'shared_coast_ids': [2, 3],
},
{
'_id': 2,
'type': 'coastal',
'name': 'Portugal',
'nationality': None,
'controlled_by': None,
'supply_center': True,
'neighbour_ids': [1, 3, 4, 5],
'shared_coast_ids': [1, 3],
}
]
}
state = data_to_state(data)
self.assertEqual(type(state.orders[0].source), CoastalTerritory)
self.assertEqual(type(state.orders[0].target), CoastalTerritory)
self.assertEqual(type(state.orders[0].aux), CoastalTerritory)
self.assertEqual(type(state.orders[0]), order.Convoy)
self.assertEqual(state.orders[0].nation, 1) | adjudicator/tests/test_data_to_state.py | import unittest
from adjudicator.state import data_to_state
from adjudicator import order
from adjudicator.territory import CoastalTerritory, InlandTerritory, \
SeaTerritory
class TestDataToState(unittest.TestCase):
def test_sea_territory(self):
data = {
'orders': [],
'pieces': [],
'named_coasts': [],
'territories': [
{
'_id': 1,
'type': 'sea',
'name': 'Adriatic Sea',
'neighbour_ids': [2, 3, 4, 5],
'shared_coast_ids': None,
}
]
}
state = data_to_state(data)
self.assertEqual(type(state.territories[0]), SeaTerritory)
self.assertEqual(state.territories[0].name, 'Adriatic Sea')
self.assertEqual(state.territories[0].id, 1)
def test_coastal_territory(self):
data = {
'orders': [],
'pieces': [],
'named_coasts': [],
'territories': [
{
'_id': 1,
'type': 'coastal',
'name': 'Brest',
'nationality': 1,
'controlled_by': 1,
'supply_center': True,
'neighbour_ids': [2, 3, 4, 5],
'shared_coast_ids': [2, 3],
}
]
}
state = data_to_state(data)
self.assertEqual(type(state.territories[0]), CoastalTerritory)
self.assertEqual(state.territories[0].name, 'Brest')
self.assertEqual(state.territories[0].nationality, 1)
self.assertEqual(state.territories[0].controlled_by, 1)
self.assertEqual(state.territories[0].supply_center, True)
self.assertEqual(state.territories[0].id, 1)
def test_inland_territory(self):
data = {
'orders': [],
'pieces': [],
'named_coasts': [],
'territories': [
{
'_id': 1,
'type': 'inland',
'name': 'Paris',
'nationality': 1,
'controlled_by': 1,
'supply_center': True,
'neighbour_ids': [2, 3, 4, 5],
'shared_coast_ids': None,
}
]
}
state = data_to_state(data)
self.assertEqual(type(state.territories[0]), InlandTerritory)
self.assertEqual(state.territories[0].name, 'Paris')
self.assertEqual(state.territories[0].nationality, 1)
self.assertEqual(state.territories[0].controlled_by, 1)
self.assertEqual(state.territories[0].supply_center, True)
self.assertEqual(state.territories[0].id, 1)
def test_named_coast(self):
data = {
'orders': [],
'pieces': [],
'named_coasts': [
{
'_id': 1,
'name': 'Spain South Coast',
'territory_id': 1,
'neighbour_ids': [2],
}
],
'territories': [
{
'_id': 1,
'type': 'coastal',
'name': 'Spain',
'nationality': None,
'controlled_by': None,
'supply_center': True,
'neighbour_ids': [2, 3, 4, 5],
'shared_coast_ids': [2, 3],
}
]
}
state = data_to_state(data)
self.assertEqual(type(state.named_coasts[0].parent), CoastalTerritory)
self.assertEqual(state.named_coasts[0].name, 'Spain South Coast')
self.assertEqual(state.named_coasts[0].id, 1)
def test_piece(self):
data = {
'orders': [],
'pieces': [
{
'_id': 0,
'type': 'army',
'nation': 1,
'territory_id': 1,
}
],
'named_coasts': [],
'territories': [
{
'_id': 1,
'type': 'coastal',
'name': 'Spain',
'nationality': None,
'controlled_by': None,
'supply_center': True,
'neighbour_ids': [2, 3, 4, 5],
'shared_coast_ids': [2, 3],
}
]
}
state = data_to_state(data)
self.assertEqual(type(state.pieces[0].territory), CoastalTerritory)
self.assertEqual(state.pieces[0].__class__.__name__, 'Army')
self.assertEqual(state.pieces[0].nation, 1)
def test_hold(self):
data = {
'orders': [
{
'_id': 1,
'type': 'hold',
'nation': 1,
'source_id': 1,
'piece_type': None,
}
],
'pieces': [],
'named_coasts': [],
'territories': [
{
'_id': 1,
'type': 'coastal',
'name': 'Spain',
'nationality': None,
'controlled_by': None,
'supply_center': True,
'neighbour_ids': [2, 3, 4, 5],
'shared_coast_ids': [2, 3],
},
]
}
state = data_to_state(data)
self.assertEqual(type(state.orders[0].source), CoastalTerritory)
self.assertEqual(type(state.orders[0]), order.Hold)
self.assertEqual(state.orders[0].nation, 1)
def test_move(self):
data = {
'orders': [
{
'_id': 1,
'type': 'move',
'nation': 1,
'source_id': 1,
'target_id': 2,
'piece_type': None,
}
],
'pieces': [],
'named_coasts': [],
'territories': [
{
'_id': 1,
'type': 'coastal',
'name': 'Spain',
'nationality': None,
'controlled_by': None,
'supply_center': True,
'neighbour_ids': [2, 3, 4, 5],
'shared_coast_ids': [2, 3],
},
{
'_id': 2,
'type': 'coastal',
'name': 'Portugal',
'nationality': None,
'controlled_by': None,
'supply_center': True,
'neighbour_ids': [1, 3, 4, 5],
'shared_coast_ids': [1, 3],
}
]
}
state = data_to_state(data)
self.assertEqual(type(state.orders[0].source), CoastalTerritory)
self.assertEqual(type(state.orders[0].target), CoastalTerritory)
self.assertEqual(type(state.orders[0]), order.Move)
self.assertEqual(state.orders[0].nation, 1)
def test_support(self):
data = {
'orders': [
{
'_id': 1,
'type': 'support',
'nation': 1,
'source_id': 1,
'target_id': 2,
'aux_id': 2,
'piece_type': None,
}
],
'pieces': [],
'named_coasts': [],
'territories': [
{
'_id': 1,
'type': 'coastal',
'name': 'Spain',
'nationality': None,
'controlled_by': None,
'supply_center': True,
'neighbour_ids': [2, 3, 4, 5],
'shared_coast_ids': [2, 3],
},
{
'_id': 2,
'type': 'coastal',
'name': 'Portugal',
'nationality': None,
'controlled_by': None,
'supply_center': True,
'neighbour_ids': [1, 3, 4, 5],
'shared_coast_ids': [1, 3],
}
]
}
state = data_to_state(data)
self.assertEqual(type(state.orders[0].source), CoastalTerritory)
self.assertEqual(type(state.orders[0].target), CoastalTerritory)
self.assertEqual(type(state.orders[0].aux), CoastalTerritory)
self.assertEqual(type(state.orders[0]), order.Support)
self.assertEqual(state.orders[0].nation, 1)
def test_convoy(self):
data = {
'orders': [
{
'_id': 1,
'type': 'convoy',
'nation': 1,
'source_id': 1,
'target_id': 2,
'aux_id': 2,
'piece_type': None,
}
],
'pieces': [],
'named_coasts': [],
'territories': [
{
'_id': 1,
'type': 'coastal',
'name': 'Spain',
'nationality': None,
'controlled_by': None,
'supply_center': True,
'neighbour_ids': [2, 3, 4, 5],
'shared_coast_ids': [2, 3],
},
{
'_id': 2,
'type': 'coastal',
'name': 'Portugal',
'nationality': None,
'controlled_by': None,
'supply_center': True,
'neighbour_ids': [1, 3, 4, 5],
'shared_coast_ids': [1, 3],
}
]
}
state = data_to_state(data)
self.assertEqual(type(state.orders[0].source), CoastalTerritory)
self.assertEqual(type(state.orders[0].target), CoastalTerritory)
self.assertEqual(type(state.orders[0].aux), CoastalTerritory)
self.assertEqual(type(state.orders[0]), order.Convoy)
self.assertEqual(state.orders[0].nation, 1) | 0.433862 | 0.57081 |
import re
from resolv.shared import ResolverError, TechnicalError, unescape, Task
class PutlockerTask(Task):
result_type = "video"
name = "PutLocker"
author = "<NAME>"
author_url = "http://cryto.net/~joepie91"
def run(self):
try:
import mechanize
except ImportError:
self.state = "failed"
raise TechnicalError("The Python mechanize module is required to resolve PutLocker URLs.")
matches = re.search("https?:\/\/(www\.)?putlocker\.com\/(file|embed)\/([A-Z0-9]+)", self.url)
if matches is None:
self.state = "invalid"
raise ResolverError("The provided URL is not a valid PutLocker URL.")
video_id = matches.group(3)
try:
browser = mechanize.Browser()
browser.set_handle_robots(False)
browser.open("http://putlocker.com/embed/%s" % video_id)
except:
self.state = "failed"
raise TechnicalError("The PutLocker site could not be reached.")
try:
browser.select_form(nr=0)
result = browser.submit()
page = result.read()
except Exception, e:
self.state = "nonexistent"
raise ResolverError("The file was removed, or the URL is incorrect.")
matches = re.search("playlist: '([^']+)'", page)
if matches is None:
self.state = "failed"
raise ResolverError("No playlist was found on the given URL; the PutLocker server for this file may be in maintenance mode, or the given URL may not be a video file. The PutLocker resolver currently only supports video links.")
playlist = matches.group(1)
try:
browser.open("http://www.putlocker.com%s" % playlist)
except:
self.state = "failed"
raise TechnicalError("The playlist file for the given URL could not be loaded.")
matches = re.search("url=\"([^\"]+)\" type=\"video\/x-flv\"", browser.response().read())
if matches is None:
self.state = "failed"
raise ResolverError("The playlist file does not contain any video URLs. The PutLocker resolver currently only supports video links.")
video_file = matches.group(1)
try:
video_title = unescape(re.search('<a href="\/file\/[^"]+"[^>]*><strong>([^<]*)<\/strong><\/a>', page).group(1))
except:
self.state = "failed"
raise TechnicalError("Could not find the video title.")
stream_dict = {
'url' : video_file,
'method' : "GET",
'quality' : "unknown",
'priority' : 1,
'format' : "unknown"
}
self.results = {
'title': video_title,
'videos': [stream_dict]
}
self.state = "finished"
return self | resolv/resolvers/putlocker.py | import re
from resolv.shared import ResolverError, TechnicalError, unescape, Task
class PutlockerTask(Task):
result_type = "video"
name = "PutLocker"
author = "<NAME>"
author_url = "http://cryto.net/~joepie91"
def run(self):
try:
import mechanize
except ImportError:
self.state = "failed"
raise TechnicalError("The Python mechanize module is required to resolve PutLocker URLs.")
matches = re.search("https?:\/\/(www\.)?putlocker\.com\/(file|embed)\/([A-Z0-9]+)", self.url)
if matches is None:
self.state = "invalid"
raise ResolverError("The provided URL is not a valid PutLocker URL.")
video_id = matches.group(3)
try:
browser = mechanize.Browser()
browser.set_handle_robots(False)
browser.open("http://putlocker.com/embed/%s" % video_id)
except:
self.state = "failed"
raise TechnicalError("The PutLocker site could not be reached.")
try:
browser.select_form(nr=0)
result = browser.submit()
page = result.read()
except Exception, e:
self.state = "nonexistent"
raise ResolverError("The file was removed, or the URL is incorrect.")
matches = re.search("playlist: '([^']+)'", page)
if matches is None:
self.state = "failed"
raise ResolverError("No playlist was found on the given URL; the PutLocker server for this file may be in maintenance mode, or the given URL may not be a video file. The PutLocker resolver currently only supports video links.")
playlist = matches.group(1)
try:
browser.open("http://www.putlocker.com%s" % playlist)
except:
self.state = "failed"
raise TechnicalError("The playlist file for the given URL could not be loaded.")
matches = re.search("url=\"([^\"]+)\" type=\"video\/x-flv\"", browser.response().read())
if matches is None:
self.state = "failed"
raise ResolverError("The playlist file does not contain any video URLs. The PutLocker resolver currently only supports video links.")
video_file = matches.group(1)
try:
video_title = unescape(re.search('<a href="\/file\/[^"]+"[^>]*><strong>([^<]*)<\/strong><\/a>', page).group(1))
except:
self.state = "failed"
raise TechnicalError("Could not find the video title.")
stream_dict = {
'url' : video_file,
'method' : "GET",
'quality' : "unknown",
'priority' : 1,
'format' : "unknown"
}
self.results = {
'title': video_title,
'videos': [stream_dict]
}
self.state = "finished"
return self | 0.134378 | 0.108566 |
import typing as _typing
import apache.thrift.metadata.lite_types as _fbthrift_metadata
import folly.iobuf as _fbthrift_iobuf
from thrift.py3lite.client import (
AsyncClient as _fbthrift_py3lite_AsyncClient,
SyncClient as _fbthrift_py3lite_SyncClient,
Client as _fbthrift_py3lite_Client,
)
import thrift.py3lite.exceptions as _fbthrift_py3lite_exceptions
import thrift.py3lite.types as _fbthrift_py3lite_types
import module.lite_types
import module.lite_metadata
class MyService(_fbthrift_py3lite_Client["MyService.Async", "MyService.Sync"]):
@staticmethod
def __get_thrift_name__() -> str:
return "module.MyService"
@staticmethod
def __get_metadata__() -> _fbthrift_metadata.ThriftMetadata:
return module.lite_metadata.gen_metadata_service_MyService()
class Async(_fbthrift_py3lite_AsyncClient):
@staticmethod
def __get_thrift_name__() -> str:
return "module.MyService"
@staticmethod
def __get_metadata__() -> _fbthrift_metadata.ThriftMetadata:
return module.lite_metadata.gen_metadata_service_MyService()
async def ping(
self
) -> None:
resp = await self._send_request(
"MyService",
"ping",
module.lite_types._fbthrift_MyService_ping_args(),
module.lite_types._fbthrift_MyService_ping_result,
)
async def getRandomData(
self
) -> str:
resp = await self._send_request(
"MyService",
"getRandomData",
module.lite_types._fbthrift_MyService_getRandomData_args(),
module.lite_types._fbthrift_MyService_getRandomData_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
async def sink(
self,
sink: int
) -> None:
resp = await self._send_request(
"MyService",
"sink",
module.lite_types._fbthrift_MyService_sink_args(
sink=sink,),
module.lite_types._fbthrift_MyService_sink_result,
)
async def putDataById(
self,
id: int,
data: str
) -> None:
resp = await self._send_request(
"MyService",
"putDataById",
module.lite_types._fbthrift_MyService_putDataById_args(
id=id,
data=data,),
module.lite_types._fbthrift_MyService_putDataById_result,
)
async def hasDataById(
self,
id: int
) -> bool:
resp = await self._send_request(
"MyService",
"hasDataById",
module.lite_types._fbthrift_MyService_hasDataById_args(
id=id,),
module.lite_types._fbthrift_MyService_hasDataById_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
async def getDataById(
self,
id: int
) -> str:
resp = await self._send_request(
"MyService",
"getDataById",
module.lite_types._fbthrift_MyService_getDataById_args(
id=id,),
module.lite_types._fbthrift_MyService_getDataById_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
async def deleteDataById(
self,
id: int
) -> None:
resp = await self._send_request(
"MyService",
"deleteDataById",
module.lite_types._fbthrift_MyService_deleteDataById_args(
id=id,),
module.lite_types._fbthrift_MyService_deleteDataById_result,
)
async def lobDataById(
self,
id: int,
data: str
) -> None:
resp = await self._send_request(
"MyService",
"lobDataById",
module.lite_types._fbthrift_MyService_lobDataById_args(
id=id,
data=data,),
None,
)
class Sync(_fbthrift_py3lite_SyncClient):
@staticmethod
def __get_thrift_name__() -> str:
return "module.MyService"
@staticmethod
def __get_metadata__() -> _fbthrift_metadata.ThriftMetadata:
return module.lite_metadata.gen_metadata_service_MyService()
def ping(
self
) -> None:
resp = self._send_request(
"MyService",
"ping",
module.lite_types._fbthrift_MyService_ping_args(),
module.lite_types._fbthrift_MyService_ping_result,
)
def getRandomData(
self
) -> str:
resp = self._send_request(
"MyService",
"getRandomData",
module.lite_types._fbthrift_MyService_getRandomData_args(),
module.lite_types._fbthrift_MyService_getRandomData_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def sink(
self,
sink: int
) -> None:
resp = self._send_request(
"MyService",
"sink",
module.lite_types._fbthrift_MyService_sink_args(
sink=sink,),
module.lite_types._fbthrift_MyService_sink_result,
)
def putDataById(
self,
id: int,
data: str
) -> None:
resp = self._send_request(
"MyService",
"putDataById",
module.lite_types._fbthrift_MyService_putDataById_args(
id=id,
data=data,),
module.lite_types._fbthrift_MyService_putDataById_result,
)
def hasDataById(
self,
id: int
) -> bool:
resp = self._send_request(
"MyService",
"hasDataById",
module.lite_types._fbthrift_MyService_hasDataById_args(
id=id,),
module.lite_types._fbthrift_MyService_hasDataById_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def getDataById(
self,
id: int
) -> str:
resp = self._send_request(
"MyService",
"getDataById",
module.lite_types._fbthrift_MyService_getDataById_args(
id=id,),
module.lite_types._fbthrift_MyService_getDataById_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def deleteDataById(
self,
id: int
) -> None:
resp = self._send_request(
"MyService",
"deleteDataById",
module.lite_types._fbthrift_MyService_deleteDataById_args(
id=id,),
module.lite_types._fbthrift_MyService_deleteDataById_result,
)
def lobDataById(
self,
id: int,
data: str
) -> None:
resp = self._send_request(
"MyService",
"lobDataById",
module.lite_types._fbthrift_MyService_lobDataById_args(
id=id,
data=data,),
None,
)
class DbMixedStackArguments(_fbthrift_py3lite_Client["DbMixedStackArguments.Async", "DbMixedStackArguments.Sync"]):
@staticmethod
def __get_thrift_name__() -> str:
return "module.DbMixedStackArguments"
@staticmethod
def __get_metadata__() -> _fbthrift_metadata.ThriftMetadata:
return module.lite_metadata.gen_metadata_service_DbMixedStackArguments()
class Async(_fbthrift_py3lite_AsyncClient):
@staticmethod
def __get_thrift_name__() -> str:
return "module.DbMixedStackArguments"
@staticmethod
def __get_metadata__() -> _fbthrift_metadata.ThriftMetadata:
return module.lite_metadata.gen_metadata_service_DbMixedStackArguments()
async def getDataByKey0(
self,
key: str
) -> bytes:
resp = await self._send_request(
"DbMixedStackArguments",
"getDataByKey0",
module.lite_types._fbthrift_DbMixedStackArguments_getDataByKey0_args(
key=key,),
module.lite_types._fbthrift_DbMixedStackArguments_getDataByKey0_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
async def getDataByKey1(
self,
key: str
) -> bytes:
resp = await self._send_request(
"DbMixedStackArguments",
"getDataByKey1",
module.lite_types._fbthrift_DbMixedStackArguments_getDataByKey1_args(
key=key,),
module.lite_types._fbthrift_DbMixedStackArguments_getDataByKey1_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
class Sync(_fbthrift_py3lite_SyncClient):
@staticmethod
def __get_thrift_name__() -> str:
return "module.DbMixedStackArguments"
@staticmethod
def __get_metadata__() -> _fbthrift_metadata.ThriftMetadata:
return module.lite_metadata.gen_metadata_service_DbMixedStackArguments()
def getDataByKey0(
self,
key: str
) -> bytes:
resp = self._send_request(
"DbMixedStackArguments",
"getDataByKey0",
module.lite_types._fbthrift_DbMixedStackArguments_getDataByKey0_args(
key=key,),
module.lite_types._fbthrift_DbMixedStackArguments_getDataByKey0_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def getDataByKey1(
self,
key: str
) -> bytes:
resp = self._send_request(
"DbMixedStackArguments",
"getDataByKey1",
module.lite_types._fbthrift_DbMixedStackArguments_getDataByKey1_args(
key=key,),
module.lite_types._fbthrift_DbMixedStackArguments_getDataByKey1_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
) | thrift/compiler/test/fixtures/basic/gen-py3lite/module/lite_clients.py |
import typing as _typing
import apache.thrift.metadata.lite_types as _fbthrift_metadata
import folly.iobuf as _fbthrift_iobuf
from thrift.py3lite.client import (
AsyncClient as _fbthrift_py3lite_AsyncClient,
SyncClient as _fbthrift_py3lite_SyncClient,
Client as _fbthrift_py3lite_Client,
)
import thrift.py3lite.exceptions as _fbthrift_py3lite_exceptions
import thrift.py3lite.types as _fbthrift_py3lite_types
import module.lite_types
import module.lite_metadata
class MyService(_fbthrift_py3lite_Client["MyService.Async", "MyService.Sync"]):
@staticmethod
def __get_thrift_name__() -> str:
return "module.MyService"
@staticmethod
def __get_metadata__() -> _fbthrift_metadata.ThriftMetadata:
return module.lite_metadata.gen_metadata_service_MyService()
class Async(_fbthrift_py3lite_AsyncClient):
@staticmethod
def __get_thrift_name__() -> str:
return "module.MyService"
@staticmethod
def __get_metadata__() -> _fbthrift_metadata.ThriftMetadata:
return module.lite_metadata.gen_metadata_service_MyService()
async def ping(
self
) -> None:
resp = await self._send_request(
"MyService",
"ping",
module.lite_types._fbthrift_MyService_ping_args(),
module.lite_types._fbthrift_MyService_ping_result,
)
async def getRandomData(
self
) -> str:
resp = await self._send_request(
"MyService",
"getRandomData",
module.lite_types._fbthrift_MyService_getRandomData_args(),
module.lite_types._fbthrift_MyService_getRandomData_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
async def sink(
self,
sink: int
) -> None:
resp = await self._send_request(
"MyService",
"sink",
module.lite_types._fbthrift_MyService_sink_args(
sink=sink,),
module.lite_types._fbthrift_MyService_sink_result,
)
async def putDataById(
self,
id: int,
data: str
) -> None:
resp = await self._send_request(
"MyService",
"putDataById",
module.lite_types._fbthrift_MyService_putDataById_args(
id=id,
data=data,),
module.lite_types._fbthrift_MyService_putDataById_result,
)
async def hasDataById(
self,
id: int
) -> bool:
resp = await self._send_request(
"MyService",
"hasDataById",
module.lite_types._fbthrift_MyService_hasDataById_args(
id=id,),
module.lite_types._fbthrift_MyService_hasDataById_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
async def getDataById(
self,
id: int
) -> str:
resp = await self._send_request(
"MyService",
"getDataById",
module.lite_types._fbthrift_MyService_getDataById_args(
id=id,),
module.lite_types._fbthrift_MyService_getDataById_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
async def deleteDataById(
self,
id: int
) -> None:
resp = await self._send_request(
"MyService",
"deleteDataById",
module.lite_types._fbthrift_MyService_deleteDataById_args(
id=id,),
module.lite_types._fbthrift_MyService_deleteDataById_result,
)
async def lobDataById(
self,
id: int,
data: str
) -> None:
resp = await self._send_request(
"MyService",
"lobDataById",
module.lite_types._fbthrift_MyService_lobDataById_args(
id=id,
data=data,),
None,
)
class Sync(_fbthrift_py3lite_SyncClient):
@staticmethod
def __get_thrift_name__() -> str:
return "module.MyService"
@staticmethod
def __get_metadata__() -> _fbthrift_metadata.ThriftMetadata:
return module.lite_metadata.gen_metadata_service_MyService()
def ping(
self
) -> None:
resp = self._send_request(
"MyService",
"ping",
module.lite_types._fbthrift_MyService_ping_args(),
module.lite_types._fbthrift_MyService_ping_result,
)
def getRandomData(
self
) -> str:
resp = self._send_request(
"MyService",
"getRandomData",
module.lite_types._fbthrift_MyService_getRandomData_args(),
module.lite_types._fbthrift_MyService_getRandomData_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def sink(
self,
sink: int
) -> None:
resp = self._send_request(
"MyService",
"sink",
module.lite_types._fbthrift_MyService_sink_args(
sink=sink,),
module.lite_types._fbthrift_MyService_sink_result,
)
def putDataById(
self,
id: int,
data: str
) -> None:
resp = self._send_request(
"MyService",
"putDataById",
module.lite_types._fbthrift_MyService_putDataById_args(
id=id,
data=data,),
module.lite_types._fbthrift_MyService_putDataById_result,
)
def hasDataById(
self,
id: int
) -> bool:
resp = self._send_request(
"MyService",
"hasDataById",
module.lite_types._fbthrift_MyService_hasDataById_args(
id=id,),
module.lite_types._fbthrift_MyService_hasDataById_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def getDataById(
self,
id: int
) -> str:
resp = self._send_request(
"MyService",
"getDataById",
module.lite_types._fbthrift_MyService_getDataById_args(
id=id,),
module.lite_types._fbthrift_MyService_getDataById_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def deleteDataById(
self,
id: int
) -> None:
resp = self._send_request(
"MyService",
"deleteDataById",
module.lite_types._fbthrift_MyService_deleteDataById_args(
id=id,),
module.lite_types._fbthrift_MyService_deleteDataById_result,
)
def lobDataById(
self,
id: int,
data: str
) -> None:
resp = self._send_request(
"MyService",
"lobDataById",
module.lite_types._fbthrift_MyService_lobDataById_args(
id=id,
data=data,),
None,
)
class DbMixedStackArguments(_fbthrift_py3lite_Client["DbMixedStackArguments.Async", "DbMixedStackArguments.Sync"]):
@staticmethod
def __get_thrift_name__() -> str:
return "module.DbMixedStackArguments"
@staticmethod
def __get_metadata__() -> _fbthrift_metadata.ThriftMetadata:
return module.lite_metadata.gen_metadata_service_DbMixedStackArguments()
class Async(_fbthrift_py3lite_AsyncClient):
@staticmethod
def __get_thrift_name__() -> str:
return "module.DbMixedStackArguments"
@staticmethod
def __get_metadata__() -> _fbthrift_metadata.ThriftMetadata:
return module.lite_metadata.gen_metadata_service_DbMixedStackArguments()
async def getDataByKey0(
self,
key: str
) -> bytes:
resp = await self._send_request(
"DbMixedStackArguments",
"getDataByKey0",
module.lite_types._fbthrift_DbMixedStackArguments_getDataByKey0_args(
key=key,),
module.lite_types._fbthrift_DbMixedStackArguments_getDataByKey0_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
async def getDataByKey1(
self,
key: str
) -> bytes:
resp = await self._send_request(
"DbMixedStackArguments",
"getDataByKey1",
module.lite_types._fbthrift_DbMixedStackArguments_getDataByKey1_args(
key=key,),
module.lite_types._fbthrift_DbMixedStackArguments_getDataByKey1_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
class Sync(_fbthrift_py3lite_SyncClient):
@staticmethod
def __get_thrift_name__() -> str:
return "module.DbMixedStackArguments"
@staticmethod
def __get_metadata__() -> _fbthrift_metadata.ThriftMetadata:
return module.lite_metadata.gen_metadata_service_DbMixedStackArguments()
def getDataByKey0(
self,
key: str
) -> bytes:
resp = self._send_request(
"DbMixedStackArguments",
"getDataByKey0",
module.lite_types._fbthrift_DbMixedStackArguments_getDataByKey0_args(
key=key,),
module.lite_types._fbthrift_DbMixedStackArguments_getDataByKey0_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def getDataByKey1(
self,
key: str
) -> bytes:
resp = self._send_request(
"DbMixedStackArguments",
"getDataByKey1",
module.lite_types._fbthrift_DbMixedStackArguments_getDataByKey1_args(
key=key,),
module.lite_types._fbthrift_DbMixedStackArguments_getDataByKey1_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
) | 0.433622 | 0.051678 |
from a10sdk.common.A10BaseClass import A10BaseClass
class SslModuleStats(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param total_enabled_crypto_engines: {"type": "number", "description": "Number of enabled crypto engines", "format": "number"}
:param requests_handled: {"type": "number", "description": "Number of requests handled", "format": "number"}
:param ssl_modules_index: {"type": "number", "description": "SSL module index", "format": "number"}
:param total_available_crypto_engines: {"type": "number", "description": "Number of available crypto engines", "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "ssl-module-stats"
self.DeviceProxy = ""
self.total_enabled_crypto_engines = ""
self.requests_handled = ""
self.ssl_modules_index = ""
self.total_available_crypto_engines = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Oper(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param ssl_modules_count: {"type": "number", "description": "SSL module count", "format": "number"}
:param max_ssl_contexts: {"type": "number", "description": "Maximum SSL contexts", "format": "number"}
:param client_cert_auth_fail: {"type": "number", "description": "SSL client certificate authorization failed", "format": "number"}
:param thales_hsm_status: {"enum": ["Enabled", "Disabled"], "type": "string", "description": "Thales HSM Status", "format": "enum"}
:param current_serverside_connections: {"type": "number", "description": "Current serverside SSL connections", "format": "number"}
:param failed_crypto: {"type": "number", "description": "Failed crypto operations", "format": "number"}
:param total_reuse_server_ssl: {"type": "number", "description": "Total times of reusing SSL sessions(IDs) in server ssl", "format": "number"}
:param hw_ring_full: {"type": "number", "description": "HW ring full", "format": "number"}
:param current_clientside_connections: {"type": "number", "description": "Current clientside SSL connections", "format": "number"}
:param total_clientside_connections: {"type": "number", "description": "Total clientside SSL connections", "format": "number"}
:param clientssl_context_malloc_fail: {"type": "number", "description": "Total client ssl context malloc failures", "format": "number"}
:param hw_context_alloc_fail: {"type": "number", "description": "HW Context Memory alloc failed", "format": "number"}
:param ssl_memory_usage: {"type": "number", "description": "SSL memory usage", "format": "number"}
:param hw_context_total: {"type": "number", "description": "HW Context Memory Total Count", "format": "number"}
:param ssl_module_type: {"type": "string", "description": "SSL module", "format": "string"}
:param curr_ssl_contexts: {"type": "number", "description": "Current SSL contexts in use", "format": "number"}
:param server_cert_errors: {"type": "number", "description": "SSL server certificate errors", "format": "number"}
:param total_reuse_client_ssl: {"type": "number", "description": "Total times of reusing SSL sessions(IDs) in client ssl", "format": "number"}
:param failed_handshakes: {"type": "number", "description": "Failed SSL handshakes", "format": "number"}
:param record_too_big: {"type": "number", "description": "Record too big", "format": "number"}
:param config_module_type: {"type": "string", "description": "Number of SSL modules", "format": "string"}
:param hw_context_usage: {"type": "number", "description": "HW Context Memory In Use", "format": "number"}
:param total_serverside_connections: {"type": "number", "description": "Total serverside SSL connections", "format": "number"}
:param ca_verification_failures: {"type": "number", "description": "SSL fail CA verification", "format": "number"}
:param ssl_module_stats: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "total-enabled-crypto-engines": {"type": "number", "description": "Number of enabled crypto engines", "format": "number"}, "requests-handled": {"type": "number", "description": "Number of requests handled", "format": "number"}, "ssl-modules-index": {"type": "number", "description": "SSL module index", "format": "number"}, "total-available-crypto-engines": {"type": "number", "description": "Number of available crypto engines", "format": "number"}}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "oper"
self.DeviceProxy = ""
self.ssl_modules_count = ""
self.max_ssl_contexts = ""
self.client_cert_auth_fail = ""
self.thales_hsm_status = ""
self.current_serverside_connections = ""
self.failed_crypto = ""
self.total_reuse_server_ssl = ""
self.hw_ring_full = ""
self.current_clientside_connections = ""
self.total_clientside_connections = ""
self.clientssl_context_malloc_fail = ""
self.hw_context_alloc_fail = ""
self.ssl_memory_usage = ""
self.hw_context_total = ""
self.ssl_module_type = ""
self.curr_ssl_contexts = ""
self.server_cert_errors = ""
self.total_reuse_client_ssl = ""
self.failed_handshakes = ""
self.record_too_big = ""
self.config_module_type = ""
self.hw_context_usage = ""
self.total_serverside_connections = ""
self.ca_verification_failures = ""
self.ssl_module_stats = []
for keys, value in kwargs.items():
setattr(self,keys, value)
class SslStats(A10BaseClass):
"""Class Description::
Operational Status for the object ssl-stats.
Class ssl-stats supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/slb/ssl-stats/oper`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "ssl-stats"
self.a10_url="/axapi/v3/slb/ssl-stats/oper"
self.DeviceProxy = ""
self.oper = {}
for keys, value in kwargs.items():
setattr(self,keys, value) | a10sdk/core/slb/slb_ssl_stats_oper.py | from a10sdk.common.A10BaseClass import A10BaseClass
class SslModuleStats(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param total_enabled_crypto_engines: {"type": "number", "description": "Number of enabled crypto engines", "format": "number"}
:param requests_handled: {"type": "number", "description": "Number of requests handled", "format": "number"}
:param ssl_modules_index: {"type": "number", "description": "SSL module index", "format": "number"}
:param total_available_crypto_engines: {"type": "number", "description": "Number of available crypto engines", "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "ssl-module-stats"
self.DeviceProxy = ""
self.total_enabled_crypto_engines = ""
self.requests_handled = ""
self.ssl_modules_index = ""
self.total_available_crypto_engines = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Oper(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param ssl_modules_count: {"type": "number", "description": "SSL module count", "format": "number"}
:param max_ssl_contexts: {"type": "number", "description": "Maximum SSL contexts", "format": "number"}
:param client_cert_auth_fail: {"type": "number", "description": "SSL client certificate authorization failed", "format": "number"}
:param thales_hsm_status: {"enum": ["Enabled", "Disabled"], "type": "string", "description": "Thales HSM Status", "format": "enum"}
:param current_serverside_connections: {"type": "number", "description": "Current serverside SSL connections", "format": "number"}
:param failed_crypto: {"type": "number", "description": "Failed crypto operations", "format": "number"}
:param total_reuse_server_ssl: {"type": "number", "description": "Total times of reusing SSL sessions(IDs) in server ssl", "format": "number"}
:param hw_ring_full: {"type": "number", "description": "HW ring full", "format": "number"}
:param current_clientside_connections: {"type": "number", "description": "Current clientside SSL connections", "format": "number"}
:param total_clientside_connections: {"type": "number", "description": "Total clientside SSL connections", "format": "number"}
:param clientssl_context_malloc_fail: {"type": "number", "description": "Total client ssl context malloc failures", "format": "number"}
:param hw_context_alloc_fail: {"type": "number", "description": "HW Context Memory alloc failed", "format": "number"}
:param ssl_memory_usage: {"type": "number", "description": "SSL memory usage", "format": "number"}
:param hw_context_total: {"type": "number", "description": "HW Context Memory Total Count", "format": "number"}
:param ssl_module_type: {"type": "string", "description": "SSL module", "format": "string"}
:param curr_ssl_contexts: {"type": "number", "description": "Current SSL contexts in use", "format": "number"}
:param server_cert_errors: {"type": "number", "description": "SSL server certificate errors", "format": "number"}
:param total_reuse_client_ssl: {"type": "number", "description": "Total times of reusing SSL sessions(IDs) in client ssl", "format": "number"}
:param failed_handshakes: {"type": "number", "description": "Failed SSL handshakes", "format": "number"}
:param record_too_big: {"type": "number", "description": "Record too big", "format": "number"}
:param config_module_type: {"type": "string", "description": "Number of SSL modules", "format": "string"}
:param hw_context_usage: {"type": "number", "description": "HW Context Memory In Use", "format": "number"}
:param total_serverside_connections: {"type": "number", "description": "Total serverside SSL connections", "format": "number"}
:param ca_verification_failures: {"type": "number", "description": "SSL fail CA verification", "format": "number"}
:param ssl_module_stats: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "total-enabled-crypto-engines": {"type": "number", "description": "Number of enabled crypto engines", "format": "number"}, "requests-handled": {"type": "number", "description": "Number of requests handled", "format": "number"}, "ssl-modules-index": {"type": "number", "description": "SSL module index", "format": "number"}, "total-available-crypto-engines": {"type": "number", "description": "Number of available crypto engines", "format": "number"}}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "oper"
self.DeviceProxy = ""
self.ssl_modules_count = ""
self.max_ssl_contexts = ""
self.client_cert_auth_fail = ""
self.thales_hsm_status = ""
self.current_serverside_connections = ""
self.failed_crypto = ""
self.total_reuse_server_ssl = ""
self.hw_ring_full = ""
self.current_clientside_connections = ""
self.total_clientside_connections = ""
self.clientssl_context_malloc_fail = ""
self.hw_context_alloc_fail = ""
self.ssl_memory_usage = ""
self.hw_context_total = ""
self.ssl_module_type = ""
self.curr_ssl_contexts = ""
self.server_cert_errors = ""
self.total_reuse_client_ssl = ""
self.failed_handshakes = ""
self.record_too_big = ""
self.config_module_type = ""
self.hw_context_usage = ""
self.total_serverside_connections = ""
self.ca_verification_failures = ""
self.ssl_module_stats = []
for keys, value in kwargs.items():
setattr(self,keys, value)
class SslStats(A10BaseClass):
"""Class Description::
Operational Status for the object ssl-stats.
Class ssl-stats supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/slb/ssl-stats/oper`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "ssl-stats"
self.a10_url="/axapi/v3/slb/ssl-stats/oper"
self.DeviceProxy = ""
self.oper = {}
for keys, value in kwargs.items():
setattr(self,keys, value) | 0.822403 | 0.327668 |
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks import workflows \
as network_workflows
LOG = logging.getLogger(__name__)
class CreateSubnetInfoAction(network_workflows.CreateSubnetInfoAction):
with_subnet = forms.BooleanField(initial=True, required=False,
widget=forms.HiddenInput())
class Meta:
name = _("Subnet")
help_text = _('You can create a subnet associated with the '
'network. Advanced configuration are available '
'at "Subnet Detail" tab.')
def clean(self):
cleaned_data = workflows.Action.clean(self)
self._check_subnet_data(cleaned_data)
return cleaned_data
class CreateSubnetInfo(network_workflows.CreateSubnetInfo):
action_class = CreateSubnetInfoAction
depends_on = ("network_id",)
class CreateSubnet(network_workflows.CreateNetwork):
slug = "create_subnet"
name = _("Create Subnet")
finalize_button_name = _("Create")
success_message = _('Created subnet "%s".')
failure_message = _('Unable to create subnet "%s".')
default_steps = (CreateSubnetInfo,
network_workflows.CreateSubnetDetail)
def format_status_message(self, message):
name = self.context.get('subnet_name') or self.context.get('subnet_id')
return message % name
def get_success_url(self):
return reverse("horizon:project:networks:detail",
args=(self.context.get('network_id'),))
def get_failure_url(self):
return reverse("horizon:project:networks:detail",
args=(self.context.get('network_id'),))
def handle(self, request, data):
subnet = self._create_subnet(request, data)
return True if subnet else False
class UpdateSubnetInfoAction(CreateSubnetInfoAction):
cidr = forms.IPField(label=_("Network Address"),
required=False,
initial="",
widget=forms.TextInput(
attrs={'readonly': 'readonly'}),
help_text=_("Network address in CIDR format "
"(e.g. 192.168.0.0/24)"),
version=forms.IPv4 | forms.IPv6,
mask=True)
# NOTE(amotoki): When 'disabled' attribute is set for the ChoiceField
# and ValidationError is raised for POST request, the initial value of
# the ip_version ChoiceField is not set in the re-displayed form
# As a result, 'IPv4' is displayed even when IPv6 is used if
# ValidationError is detected. In addition 'required=True' check complains
# when re-POST since the value of the ChoiceField is not set.
# Thus now I use HiddenInput for the ip_version ChoiceField as a work
# around.
ip_version = forms.ChoiceField(choices=[(4, 'IPv4'), (6, 'IPv6')],
widget=forms.HiddenInput(),
label=_("IP Version"))
gateway_ip = forms.IPField(
label=_("Gateway IP (optional)"),
required=False,
initial="",
help_text=_("IP address of Gateway (e.g. 192.168.0.254). "
"You need to specify an explicit address "
"to set the gateway. "
"If you want to use no gateway, "
"check 'Disable Gateway' below."),
version=forms.IPv4 | forms.IPv6,
mask=False)
no_gateway = forms.BooleanField(label=_("Disable Gateway"),
initial=False, required=False)
class Meta:
name = _("Subnet")
help_text = _('You can update a subnet associated with the '
'network. Advanced configuration are available '
'at "Subnet Detail" tab.')
def clean(self):
cleaned_data = workflows.Action.clean(self)
self._check_subnet_data(cleaned_data, is_create=False)
return cleaned_data
class UpdateSubnetInfo(CreateSubnetInfo):
action_class = UpdateSubnetInfoAction
depends_on = ("network_id", "subnet_id")
class UpdateSubnetDetailAction(network_workflows.CreateSubnetDetailAction):
allocation_pools = forms.CharField(widget=forms.HiddenInput(),
required=False)
class Meta:
name = _("Subnet Detail")
help_text = _('You can specify additional attributes for the subnet.')
class UpdateSubnetDetail(network_workflows.CreateSubnetDetail):
action_class = UpdateSubnetDetailAction
class UpdateSubnet(network_workflows.CreateNetwork):
slug = "update_subnet"
name = _("Edit Subnet")
finalize_button_name = _("Save")
success_message = _('Updated subnet "%s".')
failure_message = _('Unable to update subnet "%s".')
success_url = "horizon:project:networks:detail"
failure_url = "horizon:project:networks:detail"
default_steps = (UpdateSubnetInfo,
UpdateSubnetDetail)
def format_status_message(self, message):
name = self.context.get('subnet_name') or self.context.get('subnet_id')
return message % name
def get_success_url(self):
return reverse(self.success_url,
args=(self.context.get('network_id'),))
def _update_subnet(self, request, data):
network_id = self.context.get('network_id')
try:
subnet_id = self.context.get('subnet_id')
params = {}
params['name'] = data['subnet_name']
if data['no_gateway']:
params['gateway_ip'] = None
elif data['gateway_ip']:
params['gateway_ip'] = data['gateway_ip']
# We should send gateway_ip only when it is changed, because
# updating gateway_ip is prohibited when the ip is used.
# See bug 1227268.
subnet = api.neutron.subnet_get(request, subnet_id)
if params['gateway_ip'] == subnet.gateway_ip:
del params['gateway_ip']
self._setup_subnet_parameters(params, data, is_create=False)
subnet = api.neutron.subnet_update(request, subnet_id, **params)
msg = _('Subnet "%s" was successfully updated.') % data['cidr']
LOG.debug(msg)
return subnet
except Exception as e:
msg = (_('Failed to update subnet "%(sub)s": '
' %(reason)s') %
{"sub": data['cidr'], "reason": e})
redirect = reverse(self.failure_url, args=(network_id,))
exceptions.handle(request, msg, redirect=redirect)
return False
def handle(self, request, data):
subnet = self._update_subnet(request, data)
return True if subnet else False | openstack_dashboard/dashboards/project/networks/subnets/workflows.py |
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks import workflows \
as network_workflows
LOG = logging.getLogger(__name__)
class CreateSubnetInfoAction(network_workflows.CreateSubnetInfoAction):
with_subnet = forms.BooleanField(initial=True, required=False,
widget=forms.HiddenInput())
class Meta:
name = _("Subnet")
help_text = _('You can create a subnet associated with the '
'network. Advanced configuration are available '
'at "Subnet Detail" tab.')
def clean(self):
cleaned_data = workflows.Action.clean(self)
self._check_subnet_data(cleaned_data)
return cleaned_data
class CreateSubnetInfo(network_workflows.CreateSubnetInfo):
action_class = CreateSubnetInfoAction
depends_on = ("network_id",)
class CreateSubnet(network_workflows.CreateNetwork):
slug = "create_subnet"
name = _("Create Subnet")
finalize_button_name = _("Create")
success_message = _('Created subnet "%s".')
failure_message = _('Unable to create subnet "%s".')
default_steps = (CreateSubnetInfo,
network_workflows.CreateSubnetDetail)
def format_status_message(self, message):
name = self.context.get('subnet_name') or self.context.get('subnet_id')
return message % name
def get_success_url(self):
return reverse("horizon:project:networks:detail",
args=(self.context.get('network_id'),))
def get_failure_url(self):
return reverse("horizon:project:networks:detail",
args=(self.context.get('network_id'),))
def handle(self, request, data):
subnet = self._create_subnet(request, data)
return True if subnet else False
class UpdateSubnetInfoAction(CreateSubnetInfoAction):
cidr = forms.IPField(label=_("Network Address"),
required=False,
initial="",
widget=forms.TextInput(
attrs={'readonly': 'readonly'}),
help_text=_("Network address in CIDR format "
"(e.g. 192.168.0.0/24)"),
version=forms.IPv4 | forms.IPv6,
mask=True)
# NOTE(amotoki): When 'disabled' attribute is set for the ChoiceField
# and ValidationError is raised for POST request, the initial value of
# the ip_version ChoiceField is not set in the re-displayed form
# As a result, 'IPv4' is displayed even when IPv6 is used if
# ValidationError is detected. In addition 'required=True' check complains
# when re-POST since the value of the ChoiceField is not set.
# Thus now I use HiddenInput for the ip_version ChoiceField as a work
# around.
ip_version = forms.ChoiceField(choices=[(4, 'IPv4'), (6, 'IPv6')],
widget=forms.HiddenInput(),
label=_("IP Version"))
gateway_ip = forms.IPField(
label=_("Gateway IP (optional)"),
required=False,
initial="",
help_text=_("IP address of Gateway (e.g. 192.168.0.254). "
"You need to specify an explicit address "
"to set the gateway. "
"If you want to use no gateway, "
"check 'Disable Gateway' below."),
version=forms.IPv4 | forms.IPv6,
mask=False)
no_gateway = forms.BooleanField(label=_("Disable Gateway"),
initial=False, required=False)
class Meta:
name = _("Subnet")
help_text = _('You can update a subnet associated with the '
'network. Advanced configuration are available '
'at "Subnet Detail" tab.')
def clean(self):
cleaned_data = workflows.Action.clean(self)
self._check_subnet_data(cleaned_data, is_create=False)
return cleaned_data
class UpdateSubnetInfo(CreateSubnetInfo):
action_class = UpdateSubnetInfoAction
depends_on = ("network_id", "subnet_id")
class UpdateSubnetDetailAction(network_workflows.CreateSubnetDetailAction):
allocation_pools = forms.CharField(widget=forms.HiddenInput(),
required=False)
class Meta:
name = _("Subnet Detail")
help_text = _('You can specify additional attributes for the subnet.')
class UpdateSubnetDetail(network_workflows.CreateSubnetDetail):
action_class = UpdateSubnetDetailAction
class UpdateSubnet(network_workflows.CreateNetwork):
slug = "update_subnet"
name = _("Edit Subnet")
finalize_button_name = _("Save")
success_message = _('Updated subnet "%s".')
failure_message = _('Unable to update subnet "%s".')
success_url = "horizon:project:networks:detail"
failure_url = "horizon:project:networks:detail"
default_steps = (UpdateSubnetInfo,
UpdateSubnetDetail)
def format_status_message(self, message):
name = self.context.get('subnet_name') or self.context.get('subnet_id')
return message % name
def get_success_url(self):
return reverse(self.success_url,
args=(self.context.get('network_id'),))
def _update_subnet(self, request, data):
network_id = self.context.get('network_id')
try:
subnet_id = self.context.get('subnet_id')
params = {}
params['name'] = data['subnet_name']
if data['no_gateway']:
params['gateway_ip'] = None
elif data['gateway_ip']:
params['gateway_ip'] = data['gateway_ip']
# We should send gateway_ip only when it is changed, because
# updating gateway_ip is prohibited when the ip is used.
# See bug 1227268.
subnet = api.neutron.subnet_get(request, subnet_id)
if params['gateway_ip'] == subnet.gateway_ip:
del params['gateway_ip']
self._setup_subnet_parameters(params, data, is_create=False)
subnet = api.neutron.subnet_update(request, subnet_id, **params)
msg = _('Subnet "%s" was successfully updated.') % data['cidr']
LOG.debug(msg)
return subnet
except Exception as e:
msg = (_('Failed to update subnet "%(sub)s": '
' %(reason)s') %
{"sub": data['cidr'], "reason": e})
redirect = reverse(self.failure_url, args=(network_id,))
exceptions.handle(request, msg, redirect=redirect)
return False
def handle(self, request, data):
subnet = self._update_subnet(request, data)
return True if subnet else False | 0.516108 | 0.086323 |
import json
import os
from typing import Any, Dict, List, Optional, Tuple, Union
import structlog
from celery import group
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.core.cache import cache
from django.db.models import Q
from django.db.models.expressions import F
from django.utils import timezone
from sentry_sdk import capture_exception
from statshog.defaults.django import statsd
from posthog.celery import update_cache_item_task
from posthog.constants import (
INSIGHT_FUNNELS,
INSIGHT_PATHS,
INSIGHT_RETENTION,
INSIGHT_STICKINESS,
INSIGHT_TRENDS,
TRENDS_STICKINESS,
FunnelVizType,
)
from posthog.decorators import CacheType
from posthog.models import Dashboard, Filter, Insight, Team
from posthog.models.filters.stickiness_filter import StickinessFilter
from posthog.models.filters.utils import get_filter
from posthog.types import FilterType
from posthog.utils import generate_cache_key
PARALLEL_INSIGHT_CACHE = int(os.environ.get("PARALLEL_DASHBOARD_ITEM_CACHE", 5))
logger = structlog.get_logger(__name__)
from ee.clickhouse.queries.funnels import ClickhouseFunnelTimeToConvert, ClickhouseFunnelTrends
from ee.clickhouse.queries.funnels.utils import get_funnel_order_class
from ee.clickhouse.queries.paths import ClickhousePaths
from ee.clickhouse.queries.retention.clickhouse_retention import ClickhouseRetention
from ee.clickhouse.queries.stickiness.clickhouse_stickiness import ClickhouseStickiness
from ee.clickhouse.queries.trends.clickhouse_trends import ClickhouseTrends
CACHE_TYPE_TO_INSIGHT_CLASS = {
CacheType.TRENDS: ClickhouseTrends,
CacheType.STICKINESS: ClickhouseStickiness,
CacheType.RETENTION: ClickhouseRetention,
CacheType.PATHS: ClickhousePaths,
}
def update_cache_item(key: str, cache_type: CacheType, payload: dict) -> List[Dict[str, Any]]:
timer = statsd.timer("update_cache_item_timer").start()
result: Optional[Union[List, Dict]] = None
filter_dict = json.loads(payload["filter"])
team_id = int(payload["team_id"])
filter = get_filter(data=filter_dict, team=Team(pk=team_id))
# Doing the filtering like this means we'll update _all_ Insights with the same filters hash
dashboard_items = Insight.objects.filter(team_id=team_id, filters_hash=key)
dashboard_items.update(refreshing=True)
try:
if cache_type == CacheType.FUNNEL:
result = _calculate_funnel(filter, key, team_id)
else:
result = _calculate_by_filter(filter, key, team_id, cache_type)
cache.set(
key, {"result": result, "type": cache_type, "last_refresh": timezone.now()}, settings.CACHED_RESULTS_TTL
)
except Exception as e:
timer.stop()
statsd.incr("update_cache_item_error")
dashboard_items.filter(refresh_attempt=None).update(refresh_attempt=0)
dashboard_items.update(refreshing=False, refresh_attempt=F("refresh_attempt") + 1)
raise e
timer.stop()
statsd.incr("update_cache_item_success")
dashboard_items.update(last_refresh=timezone.now(), refreshing=False, refresh_attempt=0)
return result
def update_dashboard_item_cache(dashboard_item: Insight, dashboard: Optional[Dashboard]) -> List[Dict[str, Any]]:
cache_key, cache_type, payload = dashboard_item_update_task_params(dashboard_item, dashboard)
result = update_cache_item(cache_key, cache_type, payload)
dashboard_item.refresh_from_db()
return result
def get_cache_type(filter: FilterType) -> CacheType:
if filter.insight == INSIGHT_FUNNELS:
return CacheType.FUNNEL
elif filter.insight == INSIGHT_PATHS:
return CacheType.PATHS
elif filter.insight == INSIGHT_RETENTION:
return CacheType.RETENTION
elif (
filter.insight == INSIGHT_TRENDS
and isinstance(filter, StickinessFilter)
and filter.shown_as == TRENDS_STICKINESS
) or filter.insight == INSIGHT_STICKINESS:
return CacheType.STICKINESS
else:
return CacheType.TRENDS
def update_cached_items() -> None:
tasks = []
items = (
Insight.objects.filter(
Q(Q(dashboard__is_shared=True) | Q(dashboard__last_accessed_at__gt=timezone.now() - relativedelta(days=7)))
)
.exclude(dashboard__deleted=True)
.exclude(refreshing=True)
.exclude(deleted=True)
.exclude(refresh_attempt__gt=2)
.exclude(filters={})
.order_by(F("last_refresh").asc(nulls_first=True))
)
for item in items[0:PARALLEL_INSIGHT_CACHE]:
try:
cache_key, cache_type, payload = dashboard_item_update_task_params(item)
if item.filters_hash != cache_key:
item.save() # force update if the saved key is different from the cache key
tasks.append(update_cache_item_task.s(cache_key, cache_type, payload))
except Exception as e:
item.refresh_attempt = (item.refresh_attempt or 0) + 1
item.save()
capture_exception(e)
logger.info("Found {} items to refresh".format(len(tasks)))
taskset = group(tasks)
taskset.apply_async()
statsd.gauge("update_cache_queue_depth", items.count())
def dashboard_item_update_task_params(
item: Insight, dashboard: Optional[Dashboard] = None
) -> Tuple[str, CacheType, Dict]:
filter = get_filter(data=item.dashboard_filters(dashboard), team=item.team)
cache_key = generate_cache_key("{}_{}".format(filter.toJSON(), item.team_id))
cache_type = get_cache_type(filter)
payload = {"filter": filter.toJSON(), "team_id": item.team_id}
return cache_key, cache_type, payload
def _calculate_by_filter(filter: FilterType, key: str, team_id: int, cache_type: CacheType) -> List[Dict[str, Any]]:
insight_class = CACHE_TYPE_TO_INSIGHT_CLASS[cache_type]
if cache_type == CacheType.PATHS:
result = insight_class(filter, Team(pk=team_id)).run(filter, Team(pk=team_id))
else:
result = insight_class().run(filter, Team(pk=team_id))
return result
def _calculate_funnel(filter: Filter, key: str, team_id: int) -> List[Dict[str, Any]]:
team = Team(pk=team_id)
if filter.funnel_viz_type == FunnelVizType.TRENDS:
result = ClickhouseFunnelTrends(team=team, filter=filter).run()
elif filter.funnel_viz_type == FunnelVizType.TIME_TO_CONVERT:
result = ClickhouseFunnelTimeToConvert(team=team, filter=filter).run()
else:
funnel_order_class = get_funnel_order_class(filter)
result = funnel_order_class(team=team, filter=filter).run()
return result | posthog/tasks/update_cache.py | import json
import os
from typing import Any, Dict, List, Optional, Tuple, Union
import structlog
from celery import group
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.core.cache import cache
from django.db.models import Q
from django.db.models.expressions import F
from django.utils import timezone
from sentry_sdk import capture_exception
from statshog.defaults.django import statsd
from posthog.celery import update_cache_item_task
from posthog.constants import (
INSIGHT_FUNNELS,
INSIGHT_PATHS,
INSIGHT_RETENTION,
INSIGHT_STICKINESS,
INSIGHT_TRENDS,
TRENDS_STICKINESS,
FunnelVizType,
)
from posthog.decorators import CacheType
from posthog.models import Dashboard, Filter, Insight, Team
from posthog.models.filters.stickiness_filter import StickinessFilter
from posthog.models.filters.utils import get_filter
from posthog.types import FilterType
from posthog.utils import generate_cache_key
PARALLEL_INSIGHT_CACHE = int(os.environ.get("PARALLEL_DASHBOARD_ITEM_CACHE", 5))
logger = structlog.get_logger(__name__)
from ee.clickhouse.queries.funnels import ClickhouseFunnelTimeToConvert, ClickhouseFunnelTrends
from ee.clickhouse.queries.funnels.utils import get_funnel_order_class
from ee.clickhouse.queries.paths import ClickhousePaths
from ee.clickhouse.queries.retention.clickhouse_retention import ClickhouseRetention
from ee.clickhouse.queries.stickiness.clickhouse_stickiness import ClickhouseStickiness
from ee.clickhouse.queries.trends.clickhouse_trends import ClickhouseTrends
CACHE_TYPE_TO_INSIGHT_CLASS = {
CacheType.TRENDS: ClickhouseTrends,
CacheType.STICKINESS: ClickhouseStickiness,
CacheType.RETENTION: ClickhouseRetention,
CacheType.PATHS: ClickhousePaths,
}
def update_cache_item(key: str, cache_type: CacheType, payload: dict) -> List[Dict[str, Any]]:
timer = statsd.timer("update_cache_item_timer").start()
result: Optional[Union[List, Dict]] = None
filter_dict = json.loads(payload["filter"])
team_id = int(payload["team_id"])
filter = get_filter(data=filter_dict, team=Team(pk=team_id))
# Doing the filtering like this means we'll update _all_ Insights with the same filters hash
dashboard_items = Insight.objects.filter(team_id=team_id, filters_hash=key)
dashboard_items.update(refreshing=True)
try:
if cache_type == CacheType.FUNNEL:
result = _calculate_funnel(filter, key, team_id)
else:
result = _calculate_by_filter(filter, key, team_id, cache_type)
cache.set(
key, {"result": result, "type": cache_type, "last_refresh": timezone.now()}, settings.CACHED_RESULTS_TTL
)
except Exception as e:
timer.stop()
statsd.incr("update_cache_item_error")
dashboard_items.filter(refresh_attempt=None).update(refresh_attempt=0)
dashboard_items.update(refreshing=False, refresh_attempt=F("refresh_attempt") + 1)
raise e
timer.stop()
statsd.incr("update_cache_item_success")
dashboard_items.update(last_refresh=timezone.now(), refreshing=False, refresh_attempt=0)
return result
def update_dashboard_item_cache(dashboard_item: Insight, dashboard: Optional[Dashboard]) -> List[Dict[str, Any]]:
cache_key, cache_type, payload = dashboard_item_update_task_params(dashboard_item, dashboard)
result = update_cache_item(cache_key, cache_type, payload)
dashboard_item.refresh_from_db()
return result
def get_cache_type(filter: FilterType) -> CacheType:
if filter.insight == INSIGHT_FUNNELS:
return CacheType.FUNNEL
elif filter.insight == INSIGHT_PATHS:
return CacheType.PATHS
elif filter.insight == INSIGHT_RETENTION:
return CacheType.RETENTION
elif (
filter.insight == INSIGHT_TRENDS
and isinstance(filter, StickinessFilter)
and filter.shown_as == TRENDS_STICKINESS
) or filter.insight == INSIGHT_STICKINESS:
return CacheType.STICKINESS
else:
return CacheType.TRENDS
def update_cached_items() -> None:
tasks = []
items = (
Insight.objects.filter(
Q(Q(dashboard__is_shared=True) | Q(dashboard__last_accessed_at__gt=timezone.now() - relativedelta(days=7)))
)
.exclude(dashboard__deleted=True)
.exclude(refreshing=True)
.exclude(deleted=True)
.exclude(refresh_attempt__gt=2)
.exclude(filters={})
.order_by(F("last_refresh").asc(nulls_first=True))
)
for item in items[0:PARALLEL_INSIGHT_CACHE]:
try:
cache_key, cache_type, payload = dashboard_item_update_task_params(item)
if item.filters_hash != cache_key:
item.save() # force update if the saved key is different from the cache key
tasks.append(update_cache_item_task.s(cache_key, cache_type, payload))
except Exception as e:
item.refresh_attempt = (item.refresh_attempt or 0) + 1
item.save()
capture_exception(e)
logger.info("Found {} items to refresh".format(len(tasks)))
taskset = group(tasks)
taskset.apply_async()
statsd.gauge("update_cache_queue_depth", items.count())
def dashboard_item_update_task_params(
item: Insight, dashboard: Optional[Dashboard] = None
) -> Tuple[str, CacheType, Dict]:
filter = get_filter(data=item.dashboard_filters(dashboard), team=item.team)
cache_key = generate_cache_key("{}_{}".format(filter.toJSON(), item.team_id))
cache_type = get_cache_type(filter)
payload = {"filter": filter.toJSON(), "team_id": item.team_id}
return cache_key, cache_type, payload
def _calculate_by_filter(filter: FilterType, key: str, team_id: int, cache_type: CacheType) -> List[Dict[str, Any]]:
insight_class = CACHE_TYPE_TO_INSIGHT_CLASS[cache_type]
if cache_type == CacheType.PATHS:
result = insight_class(filter, Team(pk=team_id)).run(filter, Team(pk=team_id))
else:
result = insight_class().run(filter, Team(pk=team_id))
return result
def _calculate_funnel(filter: Filter, key: str, team_id: int) -> List[Dict[str, Any]]:
team = Team(pk=team_id)
if filter.funnel_viz_type == FunnelVizType.TRENDS:
result = ClickhouseFunnelTrends(team=team, filter=filter).run()
elif filter.funnel_viz_type == FunnelVizType.TIME_TO_CONVERT:
result = ClickhouseFunnelTimeToConvert(team=team, filter=filter).run()
else:
funnel_order_class = get_funnel_order_class(filter)
result = funnel_order_class(team=team, filter=filter).run()
return result | 0.645679 | 0.109539 |
import os
from pathlib import Path
import json
import attr
from ..utils.messenger import send_message, make_message, gen_uuid, now, AuditFlag
from .helpers import ensure_list, gather_runtime_info
class Audit:
"""Handle provenance tracking and resource utilization."""
def __init__(self, audit_flags, messengers, messenger_args, develop=None):
"""
Initialize the auditing functionality.
Parameters
----------
audit_flags : :class:`AuditFlag`
Base configuration of auditing.
messengers : :obj:`pydra.util.messenger.Messenger` or list of :class:`pydra.util.messenger.Messenger`, optional
Specify types of messenger used by Audit to send a message.
Could be `PrintMessenger`, `FileMessenger`, or `RemoteRESTMessenger`.
messenger_args : :obj:`dict`, optional
Optional arguments for the `Messenger.send` method.
develop : :obj:`bool`, optional
If True, the local context.jsonld file is used, otherwise the one from github is used.
"""
self.audit_flags = audit_flags
self.messengers = ensure_list(messengers)
self.messenger_args = messenger_args
self.develop = develop
def start_audit(self, odir):
"""
Start recording provenance.
Monitored information is not sent until directory is created,
in case message directory is inside task output directory.
Parameters
----------
odir : :obj:`os.pathlike`
Message output directory.
"""
self.odir = odir
if self.audit_check(AuditFlag.PROV):
self.aid = f"uid:{gen_uuid()}"
start_message = {"@id": self.aid, "@type": "task", "startedAtTime": now()}
os.chdir(self.odir)
if self.audit_check(AuditFlag.PROV):
self.audit_message(start_message, AuditFlag.PROV)
if self.audit_check(AuditFlag.RESOURCE):
from ..utils.profiler import ResourceMonitor
self.resource_monitor = ResourceMonitor(os.getpid(), logdir=self.odir)
def monitor(self):
"""Start resource monitoring."""
if self.audit_check(AuditFlag.RESOURCE):
self.resource_monitor.start()
if self.audit_check(AuditFlag.PROV):
self.mid = f"uid:{gen_uuid()}"
self.audit_message(
{
"@id": self.mid,
"@type": "monitor",
"startedAtTime": now(),
"wasStartedBy": self.aid,
},
AuditFlag.PROV,
)
def finalize_audit(self, result):
"""End auditing."""
if self.audit_check(AuditFlag.RESOURCE):
self.resource_monitor.stop()
result.runtime = gather_runtime_info(self.resource_monitor.fname)
if self.audit_check(AuditFlag.PROV):
self.audit_message(
{"@id": self.mid, "endedAtTime": now(), "wasEndedBy": self.aid},
AuditFlag.PROV,
)
# audit resources/runtime information
self.eid = f"uid:{gen_uuid()}"
entity = attr.asdict(result.runtime)
entity.update(
**{
"@id": self.eid,
"@type": "runtime",
"prov:wasGeneratedBy": self.aid,
}
)
self.audit_message(entity, AuditFlag.PROV)
self.audit_message(
{
"@type": "prov:Generation",
"entity_generated": self.eid,
"hadActivity": self.mid,
},
AuditFlag.PROV,
)
self.resource_monitor = None
if self.audit_check(AuditFlag.PROV):
# audit outputs
self.audit_message(
{"@id": self.aid, "endedAtTime": now(), "errored": result.errored},
AuditFlag.PROV,
)
def audit_message(self, message, flags=None):
"""
Send auditing message.
Parameters
----------
message : :obj:`dict`
A message in Pydra is a JSON-LD message object.
flags : :obj:`bool`, optional
If True and self.audit_flag, the message is sent.
"""
if self.develop:
with open(
Path(os.path.dirname(__file__)) / ".." / "schema/context.jsonld"
) as fp:
context = json.load(fp)
else:
context = {
"@context": "https://raw.githubusercontent.com/nipype/pydra"
"/master/pydra/schema/context.jsonld"
}
if self.audit_flags & flags:
if self.messenger_args:
send_message(
make_message(message, context=context),
messengers=self.messengers,
**self.messenger_args,
)
else:
send_message(
make_message(message, context=context), messengers=self.messengers
)
def audit_check(self, flag):
"""
Determine whether auditing is enabled for a particular flag.
Parameters
----------
flag : :obj: `bool`
The flag that is checked.
Returns
-------
bool
Boolean AND for self.oudit_flags and flag
"""
return self.audit_flags & flag | pydra/engine/audit.py | import os
from pathlib import Path
import json
import attr
from ..utils.messenger import send_message, make_message, gen_uuid, now, AuditFlag
from .helpers import ensure_list, gather_runtime_info
class Audit:
"""Handle provenance tracking and resource utilization."""
def __init__(self, audit_flags, messengers, messenger_args, develop=None):
"""
Initialize the auditing functionality.
Parameters
----------
audit_flags : :class:`AuditFlag`
Base configuration of auditing.
messengers : :obj:`pydra.util.messenger.Messenger` or list of :class:`pydra.util.messenger.Messenger`, optional
Specify types of messenger used by Audit to send a message.
Could be `PrintMessenger`, `FileMessenger`, or `RemoteRESTMessenger`.
messenger_args : :obj:`dict`, optional
Optional arguments for the `Messenger.send` method.
develop : :obj:`bool`, optional
If True, the local context.jsonld file is used, otherwise the one from github is used.
"""
self.audit_flags = audit_flags
self.messengers = ensure_list(messengers)
self.messenger_args = messenger_args
self.develop = develop
def start_audit(self, odir):
"""
Start recording provenance.
Monitored information is not sent until directory is created,
in case message directory is inside task output directory.
Parameters
----------
odir : :obj:`os.pathlike`
Message output directory.
"""
self.odir = odir
if self.audit_check(AuditFlag.PROV):
self.aid = f"uid:{gen_uuid()}"
start_message = {"@id": self.aid, "@type": "task", "startedAtTime": now()}
os.chdir(self.odir)
if self.audit_check(AuditFlag.PROV):
self.audit_message(start_message, AuditFlag.PROV)
if self.audit_check(AuditFlag.RESOURCE):
from ..utils.profiler import ResourceMonitor
self.resource_monitor = ResourceMonitor(os.getpid(), logdir=self.odir)
def monitor(self):
"""Start resource monitoring."""
if self.audit_check(AuditFlag.RESOURCE):
self.resource_monitor.start()
if self.audit_check(AuditFlag.PROV):
self.mid = f"uid:{gen_uuid()}"
self.audit_message(
{
"@id": self.mid,
"@type": "monitor",
"startedAtTime": now(),
"wasStartedBy": self.aid,
},
AuditFlag.PROV,
)
def finalize_audit(self, result):
"""End auditing."""
if self.audit_check(AuditFlag.RESOURCE):
self.resource_monitor.stop()
result.runtime = gather_runtime_info(self.resource_monitor.fname)
if self.audit_check(AuditFlag.PROV):
self.audit_message(
{"@id": self.mid, "endedAtTime": now(), "wasEndedBy": self.aid},
AuditFlag.PROV,
)
# audit resources/runtime information
self.eid = f"uid:{gen_uuid()}"
entity = attr.asdict(result.runtime)
entity.update(
**{
"@id": self.eid,
"@type": "runtime",
"prov:wasGeneratedBy": self.aid,
}
)
self.audit_message(entity, AuditFlag.PROV)
self.audit_message(
{
"@type": "prov:Generation",
"entity_generated": self.eid,
"hadActivity": self.mid,
},
AuditFlag.PROV,
)
self.resource_monitor = None
if self.audit_check(AuditFlag.PROV):
# audit outputs
self.audit_message(
{"@id": self.aid, "endedAtTime": now(), "errored": result.errored},
AuditFlag.PROV,
)
def audit_message(self, message, flags=None):
"""
Send auditing message.
Parameters
----------
message : :obj:`dict`
A message in Pydra is a JSON-LD message object.
flags : :obj:`bool`, optional
If True and self.audit_flag, the message is sent.
"""
if self.develop:
with open(
Path(os.path.dirname(__file__)) / ".." / "schema/context.jsonld"
) as fp:
context = json.load(fp)
else:
context = {
"@context": "https://raw.githubusercontent.com/nipype/pydra"
"/master/pydra/schema/context.jsonld"
}
if self.audit_flags & flags:
if self.messenger_args:
send_message(
make_message(message, context=context),
messengers=self.messengers,
**self.messenger_args,
)
else:
send_message(
make_message(message, context=context), messengers=self.messengers
)
def audit_check(self, flag):
"""
Determine whether auditing is enabled for a particular flag.
Parameters
----------
flag : :obj: `bool`
The flag that is checked.
Returns
-------
bool
Boolean AND for self.oudit_flags and flag
"""
return self.audit_flags & flag | 0.677367 | 0.170335 |
from argparse import ArgumentParser
import yaml
from pathlib import Path
import dir_search
import yaml_utils
def parser():
# parser information setup
prog='parse_dir'
description = 'Parse directories ' + \
'to create yaml file read on database creation'
usage = 'usage: python3 {} ' .format(__file__)
usage += '[-t target(dir or design_hier)][-o outfile]'
parse = ArgumentParser(
prog=prog,
description=description,
usage=usage,
add_help=True
)
# Target Flow
parse.add_argument(
'-t',
'--target',
type=str,
action='store',
required=True,
help='target yaml configuration file (dir, design_hier, incdir, srcdir)'
)
# Directory parsing configuration file
parse.add_argument(
'-c',
'--config',
type=str,
action='store',
default='config.yaml',
help='Set directory parsing configuration (Default: config.yaml)'
)
# Output Filename
parse.add_argument(
'-o',
'--output',
type=str,
action='store',
default='main.yaml',
help='Set output yaml file name (Default: main.yaml)'
)
return parse.parse_args()
if __name__ == '__main__' :
options = parser()
conf_file = options.config
target = options.target
out_file = options.output
# directory parse configutaion
config = yaml_utils.read_yaml(conf_file)
rtl_ext = config['verilog'] + config['systemverilog']
src_ext = config['verilog-src'] + config['systemverilog-src']
inc_ext = config['verilog-header'] + config['systemverilog-header']
skip = config['skip']
# path instance
path = Path('..')
# process switch
list_files = False
if target == "dir" :
print("Directory structure analysis mode")
search_ext = rtl_ext
elif target == "incdir" :
print("Include directory search mode")
search_ext = inc_ext
elif target == "srcdir" :
print("Source directory search mode")
search_ext = src_ext
elif target == "files" :
print("Source files search mode")
search_ext = rtl_ext
list_files = True
else :
print("invalid target mode: " + target)
# search directory
dir_list = dir_search.dir_parse(path, search_ext, list_files, skip)
top_dir, conf_list = dir_search.dir_analyze(dir_list, list_files)
# dump output
print('Output yaml: ' + out_file)
with open(out_file, 'w') as outf:
outf.write('# Top Directory: ' + str(top_dir) + '\n')
yaml.dump(conf_list, outf) | doc/scripts/parse_dir.py | from argparse import ArgumentParser
import yaml
from pathlib import Path
import dir_search
import yaml_utils
def parser():
# parser information setup
prog='parse_dir'
description = 'Parse directories ' + \
'to create yaml file read on database creation'
usage = 'usage: python3 {} ' .format(__file__)
usage += '[-t target(dir or design_hier)][-o outfile]'
parse = ArgumentParser(
prog=prog,
description=description,
usage=usage,
add_help=True
)
# Target Flow
parse.add_argument(
'-t',
'--target',
type=str,
action='store',
required=True,
help='target yaml configuration file (dir, design_hier, incdir, srcdir)'
)
# Directory parsing configuration file
parse.add_argument(
'-c',
'--config',
type=str,
action='store',
default='config.yaml',
help='Set directory parsing configuration (Default: config.yaml)'
)
# Output Filename
parse.add_argument(
'-o',
'--output',
type=str,
action='store',
default='main.yaml',
help='Set output yaml file name (Default: main.yaml)'
)
return parse.parse_args()
if __name__ == '__main__' :
options = parser()
conf_file = options.config
target = options.target
out_file = options.output
# directory parse configutaion
config = yaml_utils.read_yaml(conf_file)
rtl_ext = config['verilog'] + config['systemverilog']
src_ext = config['verilog-src'] + config['systemverilog-src']
inc_ext = config['verilog-header'] + config['systemverilog-header']
skip = config['skip']
# path instance
path = Path('..')
# process switch
list_files = False
if target == "dir" :
print("Directory structure analysis mode")
search_ext = rtl_ext
elif target == "incdir" :
print("Include directory search mode")
search_ext = inc_ext
elif target == "srcdir" :
print("Source directory search mode")
search_ext = src_ext
elif target == "files" :
print("Source files search mode")
search_ext = rtl_ext
list_files = True
else :
print("invalid target mode: " + target)
# search directory
dir_list = dir_search.dir_parse(path, search_ext, list_files, skip)
top_dir, conf_list = dir_search.dir_analyze(dir_list, list_files)
# dump output
print('Output yaml: ' + out_file)
with open(out_file, 'w') as outf:
outf.write('# Top Directory: ' + str(top_dir) + '\n')
yaml.dump(conf_list, outf) | 0.386995 | 0.077413 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from requests import codes
from time import time
from uber_rides.errors import ClientError
from uber_rides.errors import UberIllegalState
from uber_rides.utils import auth
EXPIRES_THRESHOLD_SECONDS = 500
class Session(object):
"""A class to store credentials.
A Session can be initialized with a Server Token or with a set of
OAuth 2.0 Credentials, but not with both. A Session uses credentials
to properly construct requests to Uber and access protected resources.
"""
def __init__(
self,
server_token=None,
oauth2credential=None,
):
"""Initialize a Session.
Parameters
sever_token (str)
Your application's server token. Available at
developer.uber.com.
oauth2credential (OAuth2Credential)
Access token and additional OAuth 2.0 credentials used
to access protected resources.
Raises
UberIllegalState (APIError)
Raised if there is an attempt to create session with
both server token and access token.
"""
if server_token and oauth2credential:
message = (
'Session cannot have both Server '
'and OAuth 2.0 Credentials.'
)
raise UberIllegalState(message)
if server_token is None and oauth2credential is None:
message = (
'Session must have either Server '
'Token or OAuth 2.0 Credentials.'
)
raise UberIllegalState(message)
if server_token:
self.server_token = server_token
self.token_type = auth.SERVER_TOKEN_TYPE
self.oauth2credential = None
elif oauth2credential:
self.oauth2credential = oauth2credential
self.token_type = auth.OAUTH_TOKEN_TYPE
self.server_token = None
class OAuth2Credential(object):
"""A class to store OAuth 2.0 credentials.
OAuth 2.0 credentials are used to properly construct requests
to Uber and access protected resources. The class also stores
app information (such as client_id) to refresh or request new
access tokens if they expire or are revoked.
"""
def __init__(
self,
client_id,
access_token,
expires_in_seconds,
scopes,
grant_type,
redirect_url=None,
client_secret=None,
refresh_token=None,
):
"""Initialize an OAuth2Credential.
Parameters
client_id (str)
Your app's Client ID.
access_token (str)
Access token received from OAuth 2.0 Authorization.
expires_in_seconds (int)
Seconds after initial grant when access token will expire.
scopes (set)
Set of permission scopes to request.
(e.g. {'profile', 'history'}) Keep this list minimal so
users feel safe granting your app access to their information.
grant_type (str)
Type of OAuth 2.0 Grant used to obtain access token.
(e.g. 'authorization_code')
redirect_url (str)
The URL that the Uber server will redirect to.
client_secret (str)
Your app's Client Secret.
refresh_token (str)
Optional refresh token used to get a new access token.
Only used for Authorization Code Grant.
"""
self.client_id = client_id
self.access_token = access_token
self.expires_in_seconds = self._now() + int(expires_in_seconds)
self.scopes = scopes
self.grant_type = grant_type
self.redirect_url = redirect_url
self.client_secret = client_secret
self.refresh_token = refresh_token
@classmethod
def make_from_response(
cls,
response,
grant_type,
client_id,
client_secret=None,
redirect_url=None,
):
"""Alternate constructor for OAuth2Credential().
Create an OAuth2Credential from an HTTP Response.
Parameters
response (Response)
HTTP Response containing OAuth 2.0 credentials.
grant_type (str)
Type of OAuth 2.0 Grant used to obtain access token.
(e.g. 'authorization_code')
client_id (str)
Your app's Client ID.
client_secret (str)
Your app's Client Secret.
redirect_url (str)
The URL that the Uber server will redirect to.
Returns
(OAuth2Credential)
Raises
ClientError (APIError)
Raised if the response is invalid.
"""
if response.status_code != codes.ok:
message = 'Error with Access Token Request: {}'
message = message.format(response.reason)
raise ClientError(response, message)
response = response.json()
# convert space delimited string to set
scopes = response.get('scope')
scopes_set = {scope for scope in scopes.split()}
return cls(
client_id=client_id,
client_secret=client_secret,
redirect_url=redirect_url,
access_token=response.get('access_token'),
expires_in_seconds=response.get('expires_in'),
scopes=scopes_set,
grant_type=grant_type,
refresh_token=response.get('refresh_token', None),
)
def is_stale(self):
"""Check whether the session's current access token is about to expire.
Returns
(bool)
True if access_token expires within threshold
"""
expires_in_seconds = self.expires_in_seconds - self._now()
return expires_in_seconds < EXPIRES_THRESHOLD_SECONDS
def _now(self):
return int(time()) | venv/lib/python2.7/site-packages/uber_rides/session.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from requests import codes
from time import time
from uber_rides.errors import ClientError
from uber_rides.errors import UberIllegalState
from uber_rides.utils import auth
EXPIRES_THRESHOLD_SECONDS = 500
class Session(object):
"""A class to store credentials.
A Session can be initialized with a Server Token or with a set of
OAuth 2.0 Credentials, but not with both. A Session uses credentials
to properly construct requests to Uber and access protected resources.
"""
def __init__(
self,
server_token=None,
oauth2credential=None,
):
"""Initialize a Session.
Parameters
sever_token (str)
Your application's server token. Available at
developer.uber.com.
oauth2credential (OAuth2Credential)
Access token and additional OAuth 2.0 credentials used
to access protected resources.
Raises
UberIllegalState (APIError)
Raised if there is an attempt to create session with
both server token and access token.
"""
if server_token and oauth2credential:
message = (
'Session cannot have both Server '
'and OAuth 2.0 Credentials.'
)
raise UberIllegalState(message)
if server_token is None and oauth2credential is None:
message = (
'Session must have either Server '
'Token or OAuth 2.0 Credentials.'
)
raise UberIllegalState(message)
if server_token:
self.server_token = server_token
self.token_type = auth.SERVER_TOKEN_TYPE
self.oauth2credential = None
elif oauth2credential:
self.oauth2credential = oauth2credential
self.token_type = auth.OAUTH_TOKEN_TYPE
self.server_token = None
class OAuth2Credential(object):
"""A class to store OAuth 2.0 credentials.
OAuth 2.0 credentials are used to properly construct requests
to Uber and access protected resources. The class also stores
app information (such as client_id) to refresh or request new
access tokens if they expire or are revoked.
"""
def __init__(
self,
client_id,
access_token,
expires_in_seconds,
scopes,
grant_type,
redirect_url=None,
client_secret=None,
refresh_token=None,
):
"""Initialize an OAuth2Credential.
Parameters
client_id (str)
Your app's Client ID.
access_token (str)
Access token received from OAuth 2.0 Authorization.
expires_in_seconds (int)
Seconds after initial grant when access token will expire.
scopes (set)
Set of permission scopes to request.
(e.g. {'profile', 'history'}) Keep this list minimal so
users feel safe granting your app access to their information.
grant_type (str)
Type of OAuth 2.0 Grant used to obtain access token.
(e.g. 'authorization_code')
redirect_url (str)
The URL that the Uber server will redirect to.
client_secret (str)
Your app's Client Secret.
refresh_token (str)
Optional refresh token used to get a new access token.
Only used for Authorization Code Grant.
"""
self.client_id = client_id
self.access_token = access_token
self.expires_in_seconds = self._now() + int(expires_in_seconds)
self.scopes = scopes
self.grant_type = grant_type
self.redirect_url = redirect_url
self.client_secret = client_secret
self.refresh_token = refresh_token
@classmethod
def make_from_response(
cls,
response,
grant_type,
client_id,
client_secret=None,
redirect_url=None,
):
"""Alternate constructor for OAuth2Credential().
Create an OAuth2Credential from an HTTP Response.
Parameters
response (Response)
HTTP Response containing OAuth 2.0 credentials.
grant_type (str)
Type of OAuth 2.0 Grant used to obtain access token.
(e.g. 'authorization_code')
client_id (str)
Your app's Client ID.
client_secret (str)
Your app's Client Secret.
redirect_url (str)
The URL that the Uber server will redirect to.
Returns
(OAuth2Credential)
Raises
ClientError (APIError)
Raised if the response is invalid.
"""
if response.status_code != codes.ok:
message = 'Error with Access Token Request: {}'
message = message.format(response.reason)
raise ClientError(response, message)
response = response.json()
# convert space delimited string to set
scopes = response.get('scope')
scopes_set = {scope for scope in scopes.split()}
return cls(
client_id=client_id,
client_secret=client_secret,
redirect_url=redirect_url,
access_token=response.get('access_token'),
expires_in_seconds=response.get('expires_in'),
scopes=scopes_set,
grant_type=grant_type,
refresh_token=response.get('refresh_token', None),
)
def is_stale(self):
"""Check whether the session's current access token is about to expire.
Returns
(bool)
True if access_token expires within threshold
"""
expires_in_seconds = self.expires_in_seconds - self._now()
return expires_in_seconds < EXPIRES_THRESHOLD_SECONDS
def _now(self):
return int(time()) | 0.847306 | 0.118131 |
import os
from cStringIO \
import \
StringIO
from bento.core \
import \
PackageDescription, PackageOptions
from bento.core.pkg_objects \
import \
Extension, CompiledLibrary
from bento.core.package \
import \
raw_parse, raw_to_pkg_kw, build_ast_from_raw_dict, PackageDescription
from bento.commands.configure \
import \
ConfigureCommand, _setup_options_parser
from bento.commands.build \
import \
BuildCommand
from bento.commands.options \
import \
OptionsContext
from bento.commands.context \
import \
ConfigureYakuContext, BuildYakuContext
DUMMY_C = r"""\
#include <Python.h>
#include <stdio.h>
static PyObject*
hello(PyObject *self, PyObject *args)
{
printf("Hello from C\n");
Py_INCREF(Py_None);
return Py_None;
}
static PyMethodDef HelloMethods[] = {
{"hello", hello, METH_VARARGS, "Print a hello world."},
{NULL, NULL, 0, NULL} /* Sentinel */
};
PyMODINIT_FUNC
init%(name)s(void)
{
(void) Py_InitModule("%(name)s", HelloMethods);
}
"""
DUMMY_CLIB = r"""\
int hello(void)
{
return 0;
}
"""
class FakeGlobalContext(object):
def __init__(self):
self._cmd_opts = {}
def add_option(self, command_name, option, group=None):
self._cmd_opts[command_name].add_option(option, group)
def prepare_configure(run_node, bento_info, context_klass=ConfigureYakuContext, cmd_argv=None):
if cmd_argv is None:
cmd_argv = []
top_node = run_node._ctx.srcnode
top_node.make_node("bento.info").safe_write(bento_info)
package = PackageDescription.from_string(bento_info)
package_options = PackageOptions.from_string(bento_info)
configure = ConfigureCommand()
opts = OptionsContext.from_command(configure)
# FIXME: this emulates the big ugly hack inside bentomaker.
_setup_options_parser(opts, package_options)
context = context_klass(cmd_argv, opts, package, run_node)
context.package_options = package_options
return context, configure
def prepare_options(cmd_name, cmd, context_klass):
opts = OptionsContext.from_command(cmd)
g_context = FakeGlobalContext()
g_context._cmd_opts[cmd_name] = opts
# FIXME: the way new options are registered for custom contexts sucks:
# there should be a context class independent way to do it
if context_klass.__name__ == "BuildWafContext":
from bento.commands.extras.waf import register_options
register_options(g_context)
return opts
def prepare_build(run_node, pkg, context_klass=BuildYakuContext):
build = BuildCommand()
opts = prepare_options("build", build, context_klass)
bld = context_klass([], opts, pkg, run_node)
return bld, build
def create_fake_package_from_bento_info(top_node, bento_info):
from bento.core.package import raw_parse, raw_to_pkg_kw
d = raw_parse(bento_info)
_kw, files = raw_to_pkg_kw(d, {}, None)
kw = {}
if "extensions" in _kw:
kw["extensions"] = _kw["extensions"].values()
if "py_modules" in _kw:
kw["modules"] = _kw["py_modules"]
if "packages" in _kw:
kw["packages"] = _kw["packages"]
if "compiled_libraries" in _kw:
kw["compiled_libraries"] = _kw["compiled_libraries"]
return create_fake_package(top_node, **kw)
def create_fake_package_from_bento_infos(top_node, bento_infos, bscripts=None):
if bscripts is None:
bscripts = {}
for loc, content in bento_infos.iteritems():
n = top_node.make_node(loc)
n.parent.mkdir()
n.write(content)
for loc, content in bscripts.iteritems():
n = top_node.make_node(loc)
n.parent.mkdir()
n.write(content)
d = raw_parse(bento_infos["bento.info"])
_kw, files = raw_to_pkg_kw(d, {}, None)
subpackages = _kw.get("subpackages", {})
py_modules = _kw.get("py_modules", [])
if "extensions" in _kw:
extensions = _kw["extensions"].values()
else:
extensions = []
if "compiled_libraries" in _kw:
compiled_libraries = _kw["compiled_libraries"].values()
else:
compiled_libraries = []
packages = _kw.get("packages", [])
for name, spkg in subpackages.iteritems():
n = top_node.search(name)
n.write(bento_infos[name])
d = n.parent
for py_module in spkg.py_modules:
m = d.make_node(py_module)
py_modules.append(m.path_from(top_node))
extensions.extend(flatten_extensions(top_node, spkg))
compiled_libraries.extend(flatten_compiled_libraries(top_node, spkg))
packages.extend(flatten_packages(top_node, spkg))
return create_fake_package(top_node, packages, py_modules, extensions, compiled_libraries)
def create_fake_package(top_node, packages=None, modules=None, extensions=None, compiled_libraries=None):
if packages is None:
packages = []
if modules is None:
modules = []
if extensions is None:
extensions = []
if compiled_libraries is None:
compiled_libraries = []
for p in packages:
d = p.replace(".", os.sep)
n = top_node.make_node(d)
n.mkdir()
init = n.make_node("__init__.py")
init.write("")
for m in modules:
d = m.replace(".", os.sep)
n = top_node.make_node("%s.py" % d)
n.parent.mkdir()
n.write("")
for extension in extensions:
main = extension.sources[0]
n = top_node.make_node(main)
n.parent.mkdir()
n.write(DUMMY_C % {"name": extension.name.split(".")[-1]})
for s in extension.sources[1:]:
n = top_node.make_node(s)
n.write("")
for library in compiled_libraries:
main = library.sources[0]
n = top_node.make_node(main)
n.parent.mkdir()
n.write(DUMMY_CLIB % {"name": library.name.split(".")[-1]})
for s in library.sources[1:]:
n = top_node.make_node(s)
n.write("")
# FIXME: Those flatten extensions are almost redundant with the ones in
# bento.core.subpackages. Here, we do not ensure that the nodes actually exist
# on the fs (make_node vs find_node). But maybe we do not need to check file
# existence in bento.core.subpackages either (do it at another layer)
def flatten_extensions(top_node, subpackage):
ret = []
d = top_node.find_dir(subpackage.rdir)
root_name = ".".join(subpackage.rdir.split("/"))
for extension in subpackage.extensions.values():
sources = [d.make_node(s).path_from(top_node) for s in extension.sources]
full_name = root_name + ".%s" % extension.name
ret.append(Extension(full_name, sources))
return ret
def flatten_compiled_libraries(top_node, subpackage):
ret = []
d = top_node.find_dir(subpackage.rdir)
root_name = ".".join(subpackage.rdir.split("/"))
for library in subpackage.compiled_libraries.values():
sources = [d.make_node(s).path_from(top_node) for s in library.sources]
full_name = root_name + ".%s" % library.name
ret.append(CompiledLibrary(full_name, sources))
return ret
def flatten_packages(top_node, subpackage):
ret = {}
d = top_node.find_dir(subpackage.rdir)
parent_pkg = ".".join(subpackage.rdir.split("/"))
return ["%s.%s" % (parent_pkg, p) for p in subpackage.packages]
# Super ugly stuff to make waf and nose happy: nose happily override
# sys.stdout/sys.stderr, and waf expects real files (with encoding and co). We
# fake it until waf is happy
class EncodedStringIO(object):
def __init__(self):
self._data = StringIO()
self.encoding = "ascii"
def read(self):
return self._data.read()
def write(self, data):
return self._data.write(data) | bento/commands/tests/utils.py | import os
from cStringIO \
import \
StringIO
from bento.core \
import \
PackageDescription, PackageOptions
from bento.core.pkg_objects \
import \
Extension, CompiledLibrary
from bento.core.package \
import \
raw_parse, raw_to_pkg_kw, build_ast_from_raw_dict, PackageDescription
from bento.commands.configure \
import \
ConfigureCommand, _setup_options_parser
from bento.commands.build \
import \
BuildCommand
from bento.commands.options \
import \
OptionsContext
from bento.commands.context \
import \
ConfigureYakuContext, BuildYakuContext
DUMMY_C = r"""\
#include <Python.h>
#include <stdio.h>
static PyObject*
hello(PyObject *self, PyObject *args)
{
printf("Hello from C\n");
Py_INCREF(Py_None);
return Py_None;
}
static PyMethodDef HelloMethods[] = {
{"hello", hello, METH_VARARGS, "Print a hello world."},
{NULL, NULL, 0, NULL} /* Sentinel */
};
PyMODINIT_FUNC
init%(name)s(void)
{
(void) Py_InitModule("%(name)s", HelloMethods);
}
"""
DUMMY_CLIB = r"""\
int hello(void)
{
return 0;
}
"""
class FakeGlobalContext(object):
def __init__(self):
self._cmd_opts = {}
def add_option(self, command_name, option, group=None):
self._cmd_opts[command_name].add_option(option, group)
def prepare_configure(run_node, bento_info, context_klass=ConfigureYakuContext, cmd_argv=None):
if cmd_argv is None:
cmd_argv = []
top_node = run_node._ctx.srcnode
top_node.make_node("bento.info").safe_write(bento_info)
package = PackageDescription.from_string(bento_info)
package_options = PackageOptions.from_string(bento_info)
configure = ConfigureCommand()
opts = OptionsContext.from_command(configure)
# FIXME: this emulates the big ugly hack inside bentomaker.
_setup_options_parser(opts, package_options)
context = context_klass(cmd_argv, opts, package, run_node)
context.package_options = package_options
return context, configure
def prepare_options(cmd_name, cmd, context_klass):
opts = OptionsContext.from_command(cmd)
g_context = FakeGlobalContext()
g_context._cmd_opts[cmd_name] = opts
# FIXME: the way new options are registered for custom contexts sucks:
# there should be a context class independent way to do it
if context_klass.__name__ == "BuildWafContext":
from bento.commands.extras.waf import register_options
register_options(g_context)
return opts
def prepare_build(run_node, pkg, context_klass=BuildYakuContext):
build = BuildCommand()
opts = prepare_options("build", build, context_klass)
bld = context_klass([], opts, pkg, run_node)
return bld, build
def create_fake_package_from_bento_info(top_node, bento_info):
from bento.core.package import raw_parse, raw_to_pkg_kw
d = raw_parse(bento_info)
_kw, files = raw_to_pkg_kw(d, {}, None)
kw = {}
if "extensions" in _kw:
kw["extensions"] = _kw["extensions"].values()
if "py_modules" in _kw:
kw["modules"] = _kw["py_modules"]
if "packages" in _kw:
kw["packages"] = _kw["packages"]
if "compiled_libraries" in _kw:
kw["compiled_libraries"] = _kw["compiled_libraries"]
return create_fake_package(top_node, **kw)
def create_fake_package_from_bento_infos(top_node, bento_infos, bscripts=None):
if bscripts is None:
bscripts = {}
for loc, content in bento_infos.iteritems():
n = top_node.make_node(loc)
n.parent.mkdir()
n.write(content)
for loc, content in bscripts.iteritems():
n = top_node.make_node(loc)
n.parent.mkdir()
n.write(content)
d = raw_parse(bento_infos["bento.info"])
_kw, files = raw_to_pkg_kw(d, {}, None)
subpackages = _kw.get("subpackages", {})
py_modules = _kw.get("py_modules", [])
if "extensions" in _kw:
extensions = _kw["extensions"].values()
else:
extensions = []
if "compiled_libraries" in _kw:
compiled_libraries = _kw["compiled_libraries"].values()
else:
compiled_libraries = []
packages = _kw.get("packages", [])
for name, spkg in subpackages.iteritems():
n = top_node.search(name)
n.write(bento_infos[name])
d = n.parent
for py_module in spkg.py_modules:
m = d.make_node(py_module)
py_modules.append(m.path_from(top_node))
extensions.extend(flatten_extensions(top_node, spkg))
compiled_libraries.extend(flatten_compiled_libraries(top_node, spkg))
packages.extend(flatten_packages(top_node, spkg))
return create_fake_package(top_node, packages, py_modules, extensions, compiled_libraries)
def create_fake_package(top_node, packages=None, modules=None, extensions=None, compiled_libraries=None):
if packages is None:
packages = []
if modules is None:
modules = []
if extensions is None:
extensions = []
if compiled_libraries is None:
compiled_libraries = []
for p in packages:
d = p.replace(".", os.sep)
n = top_node.make_node(d)
n.mkdir()
init = n.make_node("__init__.py")
init.write("")
for m in modules:
d = m.replace(".", os.sep)
n = top_node.make_node("%s.py" % d)
n.parent.mkdir()
n.write("")
for extension in extensions:
main = extension.sources[0]
n = top_node.make_node(main)
n.parent.mkdir()
n.write(DUMMY_C % {"name": extension.name.split(".")[-1]})
for s in extension.sources[1:]:
n = top_node.make_node(s)
n.write("")
for library in compiled_libraries:
main = library.sources[0]
n = top_node.make_node(main)
n.parent.mkdir()
n.write(DUMMY_CLIB % {"name": library.name.split(".")[-1]})
for s in library.sources[1:]:
n = top_node.make_node(s)
n.write("")
# FIXME: Those flatten extensions are almost redundant with the ones in
# bento.core.subpackages. Here, we do not ensure that the nodes actually exist
# on the fs (make_node vs find_node). But maybe we do not need to check file
# existence in bento.core.subpackages either (do it at another layer)
def flatten_extensions(top_node, subpackage):
ret = []
d = top_node.find_dir(subpackage.rdir)
root_name = ".".join(subpackage.rdir.split("/"))
for extension in subpackage.extensions.values():
sources = [d.make_node(s).path_from(top_node) for s in extension.sources]
full_name = root_name + ".%s" % extension.name
ret.append(Extension(full_name, sources))
return ret
def flatten_compiled_libraries(top_node, subpackage):
ret = []
d = top_node.find_dir(subpackage.rdir)
root_name = ".".join(subpackage.rdir.split("/"))
for library in subpackage.compiled_libraries.values():
sources = [d.make_node(s).path_from(top_node) for s in library.sources]
full_name = root_name + ".%s" % library.name
ret.append(CompiledLibrary(full_name, sources))
return ret
def flatten_packages(top_node, subpackage):
ret = {}
d = top_node.find_dir(subpackage.rdir)
parent_pkg = ".".join(subpackage.rdir.split("/"))
return ["%s.%s" % (parent_pkg, p) for p in subpackage.packages]
# Super ugly stuff to make waf and nose happy: nose happily override
# sys.stdout/sys.stderr, and waf expects real files (with encoding and co). We
# fake it until waf is happy
class EncodedStringIO(object):
def __init__(self):
self._data = StringIO()
self.encoding = "ascii"
def read(self):
return self._data.read()
def write(self, data):
return self._data.write(data) | 0.197987 | 0.105441 |
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class AddRemove(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Enum indicating if user is adding or removing a favorite lab
"""
ADD = "Add" #: Indicates that a user is adding a favorite lab.
REMOVE = "Remove" #: Indicates that a user is removing a favorite lab.
class ConfigurationState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes the user's progress in configuring their environment setting
"""
NOT_APPLICABLE = "NotApplicable" #: User either hasn't started configuring their template or they haven't started the configuration process.
COMPLETED = "Completed" #: User is finished modifying the template.
class LabUserAccessMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Lab user access mode (open to all vs. restricted to those listed on the lab).
"""
RESTRICTED = "Restricted" #: Only users registered with the lab can access VMs.
OPEN = "Open" #: Any user can register with the lab and access its VMs.
class ManagedLabVmSize(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The size category
"""
BASIC = "Basic" #: The base VM size.
STANDARD = "Standard" #: The standard or default VM size.
PERFORMANCE = "Performance" #: The most performant VM size.
class PublishingState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes the readiness of this environment setting
"""
DRAFT = "Draft" #: Initial state of an environment setting.
PUBLISHING = "Publishing" #: Currently provisioning resources.
PUBLISHED = "Published" #: All resources are currently provisioned.
PUBLISH_FAILED = "PublishFailed" #: Failed to provision all the necessary resources.
SCALING = "Scaling" #: Currently provisioning resources without recreating VM image. | sdk/labservices/azure-mgmt-labservices/azure/mgmt/labservices/models/_managed_labs_client_enums.py |
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class AddRemove(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Enum indicating if user is adding or removing a favorite lab
"""
ADD = "Add" #: Indicates that a user is adding a favorite lab.
REMOVE = "Remove" #: Indicates that a user is removing a favorite lab.
class ConfigurationState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes the user's progress in configuring their environment setting
"""
NOT_APPLICABLE = "NotApplicable" #: User either hasn't started configuring their template or they haven't started the configuration process.
COMPLETED = "Completed" #: User is finished modifying the template.
class LabUserAccessMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Lab user access mode (open to all vs. restricted to those listed on the lab).
"""
RESTRICTED = "Restricted" #: Only users registered with the lab can access VMs.
OPEN = "Open" #: Any user can register with the lab and access its VMs.
class ManagedLabVmSize(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The size category
"""
BASIC = "Basic" #: The base VM size.
STANDARD = "Standard" #: The standard or default VM size.
PERFORMANCE = "Performance" #: The most performant VM size.
class PublishingState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes the readiness of this environment setting
"""
DRAFT = "Draft" #: Initial state of an environment setting.
PUBLISHING = "Publishing" #: Currently provisioning resources.
PUBLISHED = "Published" #: All resources are currently provisioned.
PUBLISH_FAILED = "PublishFailed" #: Failed to provision all the necessary resources.
SCALING = "Scaling" #: Currently provisioning resources without recreating VM image. | 0.783368 | 0.125467 |
from typing import Callable, Dict, Optional, Sequence, Tuple
from functools import partial
import numpy as np
import torch
from torch import Tensor
from torch.nn import functional as F
# @TODO:
# after full classification metrics re-implementation, make a reference to
# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics
# as a baseline
def process_multiclass_components(
outputs: torch.Tensor,
targets: torch.Tensor,
argmax_dim: int = -1,
num_classes: Optional[int] = None,
) -> Tuple[torch.Tensor, torch.Tensor, int]:
"""
Preprocess input in case multiclass classification task.
Args:
outputs: estimated targets as predicted by a model
with shape [bs; ..., (num_classes or 1)]
targets: ground truth (correct) target values
with shape [bs; ..., 1]
argmax_dim: int, that specifies dimension for argmax transformation
in case of scores/probabilities in ``outputs``
num_classes: int, that specifies number of classes if it known
Returns:
preprocessed outputs, targets and num_classes
"""
# @TODO: better multiclass preprocessing, label -> class_id mapping
if not torch.is_tensor(outputs):
outputs = torch.from_numpy(np.array(outputs))
if not torch.is_tensor(targets):
targets = torch.from_numpy(np.array(targets))
if outputs.dim() == targets.dim() + 1:
# looks like we have scores/probabilities in our outputs
# let's convert them to final model predictions
num_classes = max(
outputs.shape[argmax_dim], int(targets.max().detach().item() + 1)
)
outputs = torch.argmax(outputs, dim=argmax_dim)
if num_classes is None:
# as far as we expect the outputs/targets tensors to be int64
# we could find number of classes as max available number
num_classes = max(
int(outputs.max().detach().item() + 1),
int(targets.max().detach().item() + 1),
)
if outputs.dim() == 1:
outputs = outputs.view(-1, 1)
elif outputs.dim() == 2 and outputs.size(0) == 1:
# transpose case
outputs.permute(1, 0)
else:
assert outputs.size(1) == 1 and outputs.dim() == 2, (
"Wrong `outputs` shape, "
"expected 1D or 2D with size 1 in the second dim "
"got {}".format(outputs.shape)
)
if targets.dim() == 1:
targets = targets.view(-1, 1)
elif targets.dim() == 2 and targets.size(0) == 1:
# transpose case
targets.permute(1, 0)
else:
assert targets.size(1) == 1 and targets.dim() == 2, (
"Wrong `outputs` shape, "
"expected 1D or 2D with size 1 in the second dim"
)
return outputs, targets, num_classes
def process_multilabel_components(
outputs: torch.Tensor,
targets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""General preprocessing for multi-label-based metrics.
Args:
outputs: NxK tensor that for each of the N examples
indicates the probability of the example belonging to each of
the K classes, according to the model.
targets: binary NxK tensor that encodes which of the K
classes are associated with the N-th input
(eg: a row [0, 1, 0, 1] indicates that the example is
associated with classes 2 and 4)
weights: importance for each sample
Returns:
processed ``outputs`` and ``targets``
with [batch_size; num_classes] shape
"""
if not torch.is_tensor(outputs):
outputs = torch.from_numpy(outputs)
if not torch.is_tensor(targets):
targets = torch.from_numpy(targets)
if weights is not None:
if not torch.is_tensor(weights):
weights = torch.from_numpy(weights)
weights = weights.squeeze()
if outputs.dim() == 1:
outputs = outputs.view(-1, 1)
else:
assert outputs.dim() == 2, (
"wrong `outputs` size "
"(should be 1D or 2D with one column per class)"
)
if targets.dim() == 1:
if outputs.shape[1] > 1:
# multi-class case
num_classes = outputs.shape[1]
targets = F.one_hot(targets, num_classes).float()
else:
# binary case
targets = targets.view(-1, 1)
else:
assert targets.dim() == 2, (
"wrong `targets` size "
"(should be 1D or 2D with one column per class)"
)
if weights is not None:
assert weights.dim() == 1, "Weights dimension should be 1"
assert weights.numel() == targets.size(
0
), "Weights dimension 1 should be the same as that of target"
assert torch.min(weights) >= 0, "Weight should be non-negative only"
assert torch.equal(
targets ** 2, targets
), "targets should be binary (0 or 1)"
return outputs, targets, weights
def get_binary_statistics(
outputs: Tensor, targets: Tensor, label: int = 1,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""
Computes the number of true negative, false positive,
false negative, true negative and support
for a binary classification problem for a given label.
Args:
outputs: estimated targets as predicted by a model
with shape [bs; ..., 1]
targets: ground truth (correct) target values
with shape [bs; ..., 1]
label: integer, that specifies label of interest for statistics compute
Returns:
Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: stats
Example:
>>> y_pred = torch.tensor([[0, 0, 1, 1, 0, 1, 0, 1]])
>>> y_true = torch.tensor([[0, 1, 0, 1, 0, 0, 1, 1]])
>>> tn, fp, fn, tp, support = get_binary_statistics(y_pred, y_true)
tensor(2) tensor(2) tensor(2) tensor(2) tensor(4)
"""
tn = ((outputs != label) * (targets != label)).to(torch.long).sum()
fp = ((outputs == label) * (targets != label)).to(torch.long).sum()
fn = ((outputs != label) * (targets == label)).to(torch.long).sum()
tp = ((outputs == label) * (targets == label)).to(torch.long).sum()
support = (targets == label).to(torch.long).sum()
return tn, fp, fn, tp, support
def get_multiclass_statistics(
outputs: Tensor,
targets: Tensor,
argmax_dim: int = -1,
num_classes: Optional[int] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""
Computes the number of true negative, false positive,
false negative, true negative and support
for a multi-class classification problem.
Args:
outputs: estimated targets as predicted by a model
with shape [bs; ..., (num_classes or 1)]
targets: ground truth (correct) target values
with shape [bs; ..., 1]
argmax_dim: int, that specifies dimension for argmax transformation
in case of scores/probabilities in ``outputs``
num_classes: int, that specifies number of classes if it known
Returns:
Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: stats
Example:
>>> y_pred = torch.tensor([1, 2, 3, 0])
>>> y_true = torch.tensor([1, 3, 4, 0])
>>> tn, fp, fn, tp, support = get_multiclass_statistics(y_pred, y_true)
tensor([3., 3., 3., 2., 3.]), tensor([0., 0., 1., 1., 0.]),
tensor([0., 0., 0., 1., 1.]), tensor([1., 1., 0., 0., 0.]),
tensor([1., 1., 0., 1., 1.])
"""
outputs, targets, num_classes = process_multiclass_components(
outputs=outputs,
targets=targets,
argmax_dim=argmax_dim,
num_classes=num_classes,
)
tn = torch.zeros((num_classes,), device=outputs.device)
fp = torch.zeros((num_classes,), device=outputs.device)
fn = torch.zeros((num_classes,), device=outputs.device)
tp = torch.zeros((num_classes,), device=outputs.device)
support = torch.zeros((num_classes,), device=outputs.device)
for class_index in range(num_classes):
(
tn[class_index],
fp[class_index],
fn[class_index],
tp[class_index],
support[class_index],
) = get_binary_statistics(
outputs=outputs, targets=targets, label=class_index
)
return tn, fp, fn, tp, support
def get_multilabel_statistics(
outputs: Tensor, targets: Tensor,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""
Computes the number of true negative, false positive,
false negative, true negative and support
for a multi-label classification problem.
Args:
outputs: estimated targets as predicted by a model
with shape [bs; ..., (num_classes or 1)]
targets: ground truth (correct) target values
with shape [bs; ..., 1]
Returns:
Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: stats
Example:
>>> y_pred = torch.tensor([[0, 0, 1, 1], [0, 1, 0, 1]])
>>> y_true = torch.tensor([[0, 1, 0, 1], [0, 0, 1, 1]])
>>> tn, fp, fn, tp, support = get_multilabel_statistics(y_pred, y_true)
tensor([2., 0., 0., 0.]) tensor([0., 1., 1., 0.]),
tensor([0., 1., 1., 0.]) tensor([0., 0., 0., 2.]),
tensor([0., 1., 1., 2.])
>>> y_pred = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> y_true = torch.tensor([0, 1, 2])
>>> tn, fp, fn, tp, support = get_multilabel_statistics(y_pred, y_true)
tensor([2., 2., 2.]) tensor([0., 0., 0.])
tensor([0., 0., 0.]) tensor([1., 1., 1.])
tensor([1., 1., 1.])
>>> y_pred = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> y_true = torch.nn.functional.one_hot(torch.tensor([0, 1, 2]))
>>> tn, fp, fn, tp, support = get_multilabel_statistics(y_pred, y_true)
tensor([2., 2., 2.]) tensor([0., 0., 0.])
tensor([0., 0., 0.]) tensor([1., 1., 1.])
tensor([1., 1., 1.])
"""
outputs, targets, _ = process_multilabel_components(
outputs=outputs, targets=targets
)
assert outputs.shape == targets.shape
num_classes = outputs.shape[-1]
tn = torch.zeros((num_classes,), device=outputs.device)
fp = torch.zeros((num_classes,), device=outputs.device)
fn = torch.zeros((num_classes,), device=outputs.device)
tp = torch.zeros((num_classes,), device=outputs.device)
support = torch.zeros((num_classes,), device=outputs.device)
for class_index in range(num_classes):
class_outputs = outputs[..., class_index]
class_targets = targets[..., class_index]
(
tn[class_index],
fp[class_index],
fn[class_index],
tp[class_index],
support[class_index],
) = get_binary_statistics(
outputs=class_outputs, targets=class_targets, label=1
)
return tn, fp, fn, tp, support
def get_default_topk_args(num_classes: int) -> Sequence[int]:
"""Calculate list params for ``Accuracy@k`` and ``mAP@k``.
Args:
num_classes: number of classes
Returns:
iterable: array of accuracy arguments
Examples:
>>> get_default_topk_args(num_classes=4)
[1, 3]
>>> get_default_topk_args(num_classes=8)
[1, 3, 5]
"""
result = [1]
if num_classes is None:
return result
if num_classes > 3:
result.append(3)
if num_classes > 5:
result.append(5)
return result
def wrap_class_metric2dict(
metric_fn: Callable, class_args: Sequence[str] = None
) -> Callable:
"""# noqa: D202
Logging wrapper for metrics with torch.Tensor output
and [num_classes] shape.
Computes the metric and sync each element from the output Tensor
with passed `class` argument.
Args:
metric_fn: metric function to compute
class_args: class names for logging.
default: None - class indexes will be used.
Returns:
wrapped metric function with List[Dict] output
"""
def class_metric_with_dict_output(*args, **kwargs):
output = metric_fn(*args, **kwargs)
num_classes = len(output)
output_class_args = class_args or [
f"/class_{i:02}" for i in range(num_classes)
]
mean_stats = torch.mean(output).item()
output = {
key: value.item() for key, value in zip(output_class_args, output)
}
output["/mean"] = mean_stats
return output
return class_metric_with_dict_output
def wrap_topk_metric2dict(
metric_fn: Callable, topk_args: Sequence[int]
) -> Callable:
"""
Logging wrapper for metrics with
Sequence[Union[torch.Tensor, int, float, Dict]] output.
Computes the metric and sync each element from the output sequence
with passed `topk` argument.
Args:
metric_fn: metric function to compute
topk_args: topk args to sync outputs with
Returns:
wrapped metric function with List[Dict] output
Raises:
NotImplementedError: if metrics returned values are out of
torch.Tensor, int, float, Dict union.
"""
metric_fn = partial(metric_fn, topk=topk_args)
def topk_metric_with_dict_output(*args, **kwargs):
output: Sequence = metric_fn(*args, **kwargs)
if isinstance(output[0], (int, float, torch.Tensor)):
output = {
f"{topk_key:02}": metric_value
for topk_key, metric_value in zip(topk_args, output)
}
elif isinstance(output[0], Dict):
output = {
{
f"{metric_key}{topk_key:02}": metric_value
for metric_key, metric_value in metric_dict_value.items()
}
for topk_key, metric_dict_value in zip(topk_args, output)
}
else:
raise NotImplementedError()
return output
return topk_metric_with_dict_output
__all__ = [
"process_multilabel_components",
"get_binary_statistics",
"get_multiclass_statistics",
"get_multilabel_statistics",
"get_default_topk_args",
"wrap_topk_metric2dict",
"wrap_class_metric2dict",
] | catalyst/metrics/functional.py | from typing import Callable, Dict, Optional, Sequence, Tuple
from functools import partial
import numpy as np
import torch
from torch import Tensor
from torch.nn import functional as F
# @TODO:
# after full classification metrics re-implementation, make a reference to
# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics
# as a baseline
def process_multiclass_components(
outputs: torch.Tensor,
targets: torch.Tensor,
argmax_dim: int = -1,
num_classes: Optional[int] = None,
) -> Tuple[torch.Tensor, torch.Tensor, int]:
"""
Preprocess input in case multiclass classification task.
Args:
outputs: estimated targets as predicted by a model
with shape [bs; ..., (num_classes or 1)]
targets: ground truth (correct) target values
with shape [bs; ..., 1]
argmax_dim: int, that specifies dimension for argmax transformation
in case of scores/probabilities in ``outputs``
num_classes: int, that specifies number of classes if it known
Returns:
preprocessed outputs, targets and num_classes
"""
# @TODO: better multiclass preprocessing, label -> class_id mapping
if not torch.is_tensor(outputs):
outputs = torch.from_numpy(np.array(outputs))
if not torch.is_tensor(targets):
targets = torch.from_numpy(np.array(targets))
if outputs.dim() == targets.dim() + 1:
# looks like we have scores/probabilities in our outputs
# let's convert them to final model predictions
num_classes = max(
outputs.shape[argmax_dim], int(targets.max().detach().item() + 1)
)
outputs = torch.argmax(outputs, dim=argmax_dim)
if num_classes is None:
# as far as we expect the outputs/targets tensors to be int64
# we could find number of classes as max available number
num_classes = max(
int(outputs.max().detach().item() + 1),
int(targets.max().detach().item() + 1),
)
if outputs.dim() == 1:
outputs = outputs.view(-1, 1)
elif outputs.dim() == 2 and outputs.size(0) == 1:
# transpose case
outputs.permute(1, 0)
else:
assert outputs.size(1) == 1 and outputs.dim() == 2, (
"Wrong `outputs` shape, "
"expected 1D or 2D with size 1 in the second dim "
"got {}".format(outputs.shape)
)
if targets.dim() == 1:
targets = targets.view(-1, 1)
elif targets.dim() == 2 and targets.size(0) == 1:
# transpose case
targets.permute(1, 0)
else:
assert targets.size(1) == 1 and targets.dim() == 2, (
"Wrong `outputs` shape, "
"expected 1D or 2D with size 1 in the second dim"
)
return outputs, targets, num_classes
def process_multilabel_components(
outputs: torch.Tensor,
targets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""General preprocessing for multi-label-based metrics.
Args:
outputs: NxK tensor that for each of the N examples
indicates the probability of the example belonging to each of
the K classes, according to the model.
targets: binary NxK tensor that encodes which of the K
classes are associated with the N-th input
(eg: a row [0, 1, 0, 1] indicates that the example is
associated with classes 2 and 4)
weights: importance for each sample
Returns:
processed ``outputs`` and ``targets``
with [batch_size; num_classes] shape
"""
if not torch.is_tensor(outputs):
outputs = torch.from_numpy(outputs)
if not torch.is_tensor(targets):
targets = torch.from_numpy(targets)
if weights is not None:
if not torch.is_tensor(weights):
weights = torch.from_numpy(weights)
weights = weights.squeeze()
if outputs.dim() == 1:
outputs = outputs.view(-1, 1)
else:
assert outputs.dim() == 2, (
"wrong `outputs` size "
"(should be 1D or 2D with one column per class)"
)
if targets.dim() == 1:
if outputs.shape[1] > 1:
# multi-class case
num_classes = outputs.shape[1]
targets = F.one_hot(targets, num_classes).float()
else:
# binary case
targets = targets.view(-1, 1)
else:
assert targets.dim() == 2, (
"wrong `targets` size "
"(should be 1D or 2D with one column per class)"
)
if weights is not None:
assert weights.dim() == 1, "Weights dimension should be 1"
assert weights.numel() == targets.size(
0
), "Weights dimension 1 should be the same as that of target"
assert torch.min(weights) >= 0, "Weight should be non-negative only"
assert torch.equal(
targets ** 2, targets
), "targets should be binary (0 or 1)"
return outputs, targets, weights
def get_binary_statistics(
outputs: Tensor, targets: Tensor, label: int = 1,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""
Computes the number of true negative, false positive,
false negative, true negative and support
for a binary classification problem for a given label.
Args:
outputs: estimated targets as predicted by a model
with shape [bs; ..., 1]
targets: ground truth (correct) target values
with shape [bs; ..., 1]
label: integer, that specifies label of interest for statistics compute
Returns:
Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: stats
Example:
>>> y_pred = torch.tensor([[0, 0, 1, 1, 0, 1, 0, 1]])
>>> y_true = torch.tensor([[0, 1, 0, 1, 0, 0, 1, 1]])
>>> tn, fp, fn, tp, support = get_binary_statistics(y_pred, y_true)
tensor(2) tensor(2) tensor(2) tensor(2) tensor(4)
"""
tn = ((outputs != label) * (targets != label)).to(torch.long).sum()
fp = ((outputs == label) * (targets != label)).to(torch.long).sum()
fn = ((outputs != label) * (targets == label)).to(torch.long).sum()
tp = ((outputs == label) * (targets == label)).to(torch.long).sum()
support = (targets == label).to(torch.long).sum()
return tn, fp, fn, tp, support
def get_multiclass_statistics(
outputs: Tensor,
targets: Tensor,
argmax_dim: int = -1,
num_classes: Optional[int] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""
Computes the number of true negative, false positive,
false negative, true negative and support
for a multi-class classification problem.
Args:
outputs: estimated targets as predicted by a model
with shape [bs; ..., (num_classes or 1)]
targets: ground truth (correct) target values
with shape [bs; ..., 1]
argmax_dim: int, that specifies dimension for argmax transformation
in case of scores/probabilities in ``outputs``
num_classes: int, that specifies number of classes if it known
Returns:
Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: stats
Example:
>>> y_pred = torch.tensor([1, 2, 3, 0])
>>> y_true = torch.tensor([1, 3, 4, 0])
>>> tn, fp, fn, tp, support = get_multiclass_statistics(y_pred, y_true)
tensor([3., 3., 3., 2., 3.]), tensor([0., 0., 1., 1., 0.]),
tensor([0., 0., 0., 1., 1.]), tensor([1., 1., 0., 0., 0.]),
tensor([1., 1., 0., 1., 1.])
"""
outputs, targets, num_classes = process_multiclass_components(
outputs=outputs,
targets=targets,
argmax_dim=argmax_dim,
num_classes=num_classes,
)
tn = torch.zeros((num_classes,), device=outputs.device)
fp = torch.zeros((num_classes,), device=outputs.device)
fn = torch.zeros((num_classes,), device=outputs.device)
tp = torch.zeros((num_classes,), device=outputs.device)
support = torch.zeros((num_classes,), device=outputs.device)
for class_index in range(num_classes):
(
tn[class_index],
fp[class_index],
fn[class_index],
tp[class_index],
support[class_index],
) = get_binary_statistics(
outputs=outputs, targets=targets, label=class_index
)
return tn, fp, fn, tp, support
def get_multilabel_statistics(
outputs: Tensor, targets: Tensor,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""
Computes the number of true negative, false positive,
false negative, true negative and support
for a multi-label classification problem.
Args:
outputs: estimated targets as predicted by a model
with shape [bs; ..., (num_classes or 1)]
targets: ground truth (correct) target values
with shape [bs; ..., 1]
Returns:
Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: stats
Example:
>>> y_pred = torch.tensor([[0, 0, 1, 1], [0, 1, 0, 1]])
>>> y_true = torch.tensor([[0, 1, 0, 1], [0, 0, 1, 1]])
>>> tn, fp, fn, tp, support = get_multilabel_statistics(y_pred, y_true)
tensor([2., 0., 0., 0.]) tensor([0., 1., 1., 0.]),
tensor([0., 1., 1., 0.]) tensor([0., 0., 0., 2.]),
tensor([0., 1., 1., 2.])
>>> y_pred = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> y_true = torch.tensor([0, 1, 2])
>>> tn, fp, fn, tp, support = get_multilabel_statistics(y_pred, y_true)
tensor([2., 2., 2.]) tensor([0., 0., 0.])
tensor([0., 0., 0.]) tensor([1., 1., 1.])
tensor([1., 1., 1.])
>>> y_pred = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> y_true = torch.nn.functional.one_hot(torch.tensor([0, 1, 2]))
>>> tn, fp, fn, tp, support = get_multilabel_statistics(y_pred, y_true)
tensor([2., 2., 2.]) tensor([0., 0., 0.])
tensor([0., 0., 0.]) tensor([1., 1., 1.])
tensor([1., 1., 1.])
"""
outputs, targets, _ = process_multilabel_components(
outputs=outputs, targets=targets
)
assert outputs.shape == targets.shape
num_classes = outputs.shape[-1]
tn = torch.zeros((num_classes,), device=outputs.device)
fp = torch.zeros((num_classes,), device=outputs.device)
fn = torch.zeros((num_classes,), device=outputs.device)
tp = torch.zeros((num_classes,), device=outputs.device)
support = torch.zeros((num_classes,), device=outputs.device)
for class_index in range(num_classes):
class_outputs = outputs[..., class_index]
class_targets = targets[..., class_index]
(
tn[class_index],
fp[class_index],
fn[class_index],
tp[class_index],
support[class_index],
) = get_binary_statistics(
outputs=class_outputs, targets=class_targets, label=1
)
return tn, fp, fn, tp, support
def get_default_topk_args(num_classes: int) -> Sequence[int]:
"""Calculate list params for ``Accuracy@k`` and ``mAP@k``.
Args:
num_classes: number of classes
Returns:
iterable: array of accuracy arguments
Examples:
>>> get_default_topk_args(num_classes=4)
[1, 3]
>>> get_default_topk_args(num_classes=8)
[1, 3, 5]
"""
result = [1]
if num_classes is None:
return result
if num_classes > 3:
result.append(3)
if num_classes > 5:
result.append(5)
return result
def wrap_class_metric2dict(
metric_fn: Callable, class_args: Sequence[str] = None
) -> Callable:
"""# noqa: D202
Logging wrapper for metrics with torch.Tensor output
and [num_classes] shape.
Computes the metric and sync each element from the output Tensor
with passed `class` argument.
Args:
metric_fn: metric function to compute
class_args: class names for logging.
default: None - class indexes will be used.
Returns:
wrapped metric function with List[Dict] output
"""
def class_metric_with_dict_output(*args, **kwargs):
output = metric_fn(*args, **kwargs)
num_classes = len(output)
output_class_args = class_args or [
f"/class_{i:02}" for i in range(num_classes)
]
mean_stats = torch.mean(output).item()
output = {
key: value.item() for key, value in zip(output_class_args, output)
}
output["/mean"] = mean_stats
return output
return class_metric_with_dict_output
def wrap_topk_metric2dict(
metric_fn: Callable, topk_args: Sequence[int]
) -> Callable:
"""
Logging wrapper for metrics with
Sequence[Union[torch.Tensor, int, float, Dict]] output.
Computes the metric and sync each element from the output sequence
with passed `topk` argument.
Args:
metric_fn: metric function to compute
topk_args: topk args to sync outputs with
Returns:
wrapped metric function with List[Dict] output
Raises:
NotImplementedError: if metrics returned values are out of
torch.Tensor, int, float, Dict union.
"""
metric_fn = partial(metric_fn, topk=topk_args)
def topk_metric_with_dict_output(*args, **kwargs):
output: Sequence = metric_fn(*args, **kwargs)
if isinstance(output[0], (int, float, torch.Tensor)):
output = {
f"{topk_key:02}": metric_value
for topk_key, metric_value in zip(topk_args, output)
}
elif isinstance(output[0], Dict):
output = {
{
f"{metric_key}{topk_key:02}": metric_value
for metric_key, metric_value in metric_dict_value.items()
}
for topk_key, metric_dict_value in zip(topk_args, output)
}
else:
raise NotImplementedError()
return output
return topk_metric_with_dict_output
__all__ = [
"process_multilabel_components",
"get_binary_statistics",
"get_multiclass_statistics",
"get_multilabel_statistics",
"get_default_topk_args",
"wrap_topk_metric2dict",
"wrap_class_metric2dict",
] | 0.894709 | 0.777553 |
import sys
from jsonschema import ValidationError
from .error import MetadataNotFoundError
from .metadata_app_utils import AppBase, CliOption, Flag, SchemaProperty, MetadataSchemaProperty
from .metadata import Metadata
from .manager import MetadataManager
from .schema import SchemaManager
class NamespaceBase(AppBase):
"""Simple attribute-only base class for the various namespace subcommand classes """
# These will be set on class creation when subcommand creates the namespace-specific class
description = None
namespace = None
schemas = None
options = []
def print_help(self):
super(NamespaceBase, self).print_help()
print()
print("Options")
print("-------")
print()
for option in self.options:
option.print_help()
def start(self):
# Process client options since all subclasses are option processer
self.process_cli_options(self.options)
class NamespaceList(NamespaceBase):
"""Handles the 'list' subcommand functionality for a specific namespace."""
json_flag = Flag("--json", name='json',
description='List complete instances as JSON', default_value=False)
valid_only_flag = Flag("--valid-only", name='valid-only',
description='Only list valid instances (default includes invalid instances)',
default_value=False)
# 'List' flags
options = [json_flag, valid_only_flag]
def __init__(self, **kwargs):
super(NamespaceList, self).__init__(**kwargs)
self.metadata_manager = MetadataManager(namespace=self.namespace)
def start(self):
self.process_cli_options(self.options) # process options
include_invalid = not self.valid_only_flag.value
try:
metadata_instances = self.metadata_manager.get_all(include_invalid=include_invalid)
except MetadataNotFoundError:
metadata_instances = None
if self.json_flag.value:
if metadata_instances is None:
metadata_instances = []
print(metadata_instances)
else:
if not metadata_instances:
print("No metadata instances found for {}".format(self.namespace))
return
validity_clause = "includes invalid" if include_invalid else "valid only"
print("Available metadata instances for {} ({}):".format(self.namespace, validity_clause))
sorted_instances = sorted(metadata_instances, key=lambda inst: (inst.schema_name, inst.name))
# pad to width of longest instance
max_schema_name_len = len('Schema')
max_name_len = len('Instance')
max_resource_len = len('Resource')
for instance in sorted_instances:
max_schema_name_len = max(len(instance.schema_name), max_schema_name_len)
max_name_len = max(len(instance.name), max_name_len)
max_resource_len = max(len(instance.resource), max_resource_len)
print()
print("%s %s %s " % ('Schema'.ljust(max_schema_name_len),
'Instance'.ljust(max_name_len),
'Resource'.ljust(max_resource_len)))
print("%s %s %s " % ('------'.ljust(max_schema_name_len),
'--------'.ljust(max_name_len),
'--------'.ljust(max_resource_len)))
for instance in sorted_instances:
invalid = ""
if instance.reason and len(instance.reason) > 0:
invalid = "**INVALID** ({})".format(instance.reason)
print("%s %s %s %s" % (instance.schema_name.ljust(max_schema_name_len),
instance.name.ljust(max_name_len),
instance.resource.ljust(max_resource_len),
invalid))
class NamespaceRemove(NamespaceBase):
"""Handles the 'remove' subcommand functionality for a specific namespace."""
name_option = CliOption("--name", name='name',
description='The name of the metadata instance to remove', required=True)
# 'Remove' options
options = [name_option]
def __init__(self, **kwargs):
super(NamespaceRemove, self).__init__(**kwargs)
self.metadata_manager = MetadataManager(namespace=self.namespace)
def start(self):
super(NamespaceRemove, self).start() # process options
name = self.name_option.value
try:
self.metadata_manager.get(name)
except MetadataNotFoundError as mnfe:
self.log_and_exit(mnfe)
except ValidationError: # Probably deleting invalid instance
pass
self.metadata_manager.remove(name)
print("Metadata instance '{}' removed from namespace '{}'.".format(name, self.namespace))
class NamespaceInstall(NamespaceBase):
"""Handles the 'install' subcommand functionality for a specific namespace."""
# Known options, others will be derived from schema based on schema_name...
replace_flag = Flag("--replace", name='replace',
description='Replace existing instance', default_value=False)
schema_name_option = CliOption("--schema_name", name='schema_name',
description='The schema_name of the metadata instance to install', required=True)
name_option = CliOption("--name", name='name',
description='The name of the metadata instance to install', required=True)
# 'Install' options
options = [replace_flag, schema_name_option, name_option]
def __init__(self, **kwargs):
super(NamespaceInstall, self).__init__(**kwargs)
self.metadata_manager = MetadataManager(namespace=self.namespace)
# First, process the schema_name option so we can then load the appropriate schema
# file to build the schema-based options. If help is requested, give it to them.
self.process_cli_option(self.schema_name_option, check_help=True)
schema_name = self.schema_name_option.value
# If schema is not registered in the set of schemas for this namespace, bail.
if schema_name not in self.schemas:
self.log_and_exit("Schema name '{}' not found in {} schemas!".format(schema_name, self.namespace))
# Schema appears to be a valid name, convert its properties to options and continue
schema = self.schemas[schema_name]
self.schema_options = NamespaceInstall.schema_to_options(schema)
self.options.extend(self.schema_options)
def start(self):
super(NamespaceInstall, self).start() # process options
# Get known options, then gather display_name and build metadata dict.
name = self.name_option.value
schema_name = self.schema_name_option.value
display_name = None
metadata = {}
# Walk the options looking for SchemaProperty instances. Any MetadataSchemaProperty instances go
# into the metadata dict.
for option in self.options:
if isinstance(option, SchemaProperty):
if option.name == 'display_name': # Be sure we have a display_name
display_name = option.value
continue
if isinstance(option, MetadataSchemaProperty):
# skip adding any non required properties that have no value (unless its a null type).
if not option.required and not option.value and option.type != 'null':
continue
metadata[option.name] = option.value
if display_name is None:
self.log_and_exit("Could not determine display_name from schema '{}'".format(schema_name))
instance = Metadata(schema_name=schema_name, name=name,
display_name=display_name, metadata=metadata)
ex_msg = None
new_instance = None
try:
if self.replace_flag.value:
new_instance = self.metadata_manager.update(name, instance)
else:
new_instance = self.metadata_manager.create(name, instance)
except Exception as ex:
ex_msg = str(ex)
if new_instance:
print("Metadata instance '{}' for schema '{}' has been written to: {}"
.format(name, schema_name, new_instance.resource))
else:
if ex_msg:
self.log_and_exit("The following exception occurred saving metadata instance '{}' for schema '{}': {}"
.format(name, schema_name, ex_msg), display_help=True)
else:
self.log_and_exit("A failure occurred saving metadata instance '{}' for schema '{}'."
.format(name, schema_name), display_help=True)
class SubcommandBase(AppBase):
"""Handles building the appropriate subcommands based on existing namespaces."""
subcommand_desciption = None # Overridden in subclass
namespace_base_class = None # Overridden in subclass
def __init__(self, **kwargs):
super(SubcommandBase, self).__init__(**kwargs)
self.namespace_schemas = kwargs['namespace_schemas']
# For each namespace in current schemas, add a corresponding subcommand
# This requires a new subclass of the NamespaceList class with an appropriate description
self.subcommands = {}
for namespace, schemas in self.namespace_schemas.items():
subcommand_desciption = self.subcommand_desciption.format(namespace=namespace)
# Create the appropriate namespace class, initialized with its description,
# namespace, and corresponding schemas as attributes,
namespace_class = type(namespace, (self.namespace_base_class,),
{'description': subcommand_desciption,
'namespace': namespace,
'schemas': schemas})
self.subcommands[namespace] = (namespace_class, namespace_class.description)
def start(self):
subcommand = self.get_subcommand()
if subcommand is None:
self.exit_no_subcommand()
subinstance = subcommand[0](argv=self.argv, namespace_schemas=self.namespace_schemas)
return subinstance.start()
def print_help(self):
super(SubcommandBase, self).print_help()
self.print_subcommands()
class List(SubcommandBase):
"""Lists a metadata instances of a given namespace."""
description = "List metadata instances for a given namespace."
subcommand_desciption = "List installed metadata for {namespace}."
namespace_base_class = NamespaceList
def __init__(self, **kwargs):
super(List, self).__init__(**kwargs)
class Remove(SubcommandBase):
"""Removes a metadata instance from a given namespace."""
description = "Remove a metadata instance from a given namespace."
subcommand_desciption = "Remove a metadata instance from namespace '{namespace}'."
namespace_base_class = NamespaceRemove
def __init__(self, **kwargs):
super(Remove, self).__init__(**kwargs)
class Install(SubcommandBase):
"""Installs a metadata instance into a given namespace."""
description = "Install a metadata instance into a given namespace."
subcommand_desciption = "Install a metadata instance into namespace '{namespace}'."
namespace_base_class = NamespaceInstall
def __init__(self, **kwargs):
super(Install, self).__init__(**kwargs)
class MetadataApp(AppBase):
"""Lists, installs and removes metadata for a given namespace."""
name = "elyra-metadata"
description = """Manage Elyra metadata."""
subcommands = {
'list': (List, List.description.splitlines()[0]),
'install': (Install, Install.description.splitlines()[0]),
'remove': (Remove, Remove.description.splitlines()[0]),
}
@classmethod
def main(cls):
elyra_metadata = cls(argv=sys.argv[1:])
elyra_metadata.start()
def __init__(self, **kwargs):
super(MetadataApp, self).__init__(**kwargs)
self.namespace_schemas = SchemaManager.load_namespace_schemas()
def start(self):
subcommand = self.get_subcommand()
if subcommand is None:
self.exit_no_subcommand()
subinstance = subcommand[0](argv=self.argv, namespace_schemas=self.namespace_schemas)
return subinstance.start()
def print_help(self):
super(MetadataApp, self).print_help()
self.print_subcommands()
if __name__ == '__main__':
MetadataApp.main() | elyra/metadata/metadata_app.py | import sys
from jsonschema import ValidationError
from .error import MetadataNotFoundError
from .metadata_app_utils import AppBase, CliOption, Flag, SchemaProperty, MetadataSchemaProperty
from .metadata import Metadata
from .manager import MetadataManager
from .schema import SchemaManager
class NamespaceBase(AppBase):
"""Simple attribute-only base class for the various namespace subcommand classes """
# These will be set on class creation when subcommand creates the namespace-specific class
description = None
namespace = None
schemas = None
options = []
def print_help(self):
super(NamespaceBase, self).print_help()
print()
print("Options")
print("-------")
print()
for option in self.options:
option.print_help()
def start(self):
# Process client options since all subclasses are option processer
self.process_cli_options(self.options)
class NamespaceList(NamespaceBase):
"""Handles the 'list' subcommand functionality for a specific namespace."""
json_flag = Flag("--json", name='json',
description='List complete instances as JSON', default_value=False)
valid_only_flag = Flag("--valid-only", name='valid-only',
description='Only list valid instances (default includes invalid instances)',
default_value=False)
# 'List' flags
options = [json_flag, valid_only_flag]
def __init__(self, **kwargs):
super(NamespaceList, self).__init__(**kwargs)
self.metadata_manager = MetadataManager(namespace=self.namespace)
def start(self):
self.process_cli_options(self.options) # process options
include_invalid = not self.valid_only_flag.value
try:
metadata_instances = self.metadata_manager.get_all(include_invalid=include_invalid)
except MetadataNotFoundError:
metadata_instances = None
if self.json_flag.value:
if metadata_instances is None:
metadata_instances = []
print(metadata_instances)
else:
if not metadata_instances:
print("No metadata instances found for {}".format(self.namespace))
return
validity_clause = "includes invalid" if include_invalid else "valid only"
print("Available metadata instances for {} ({}):".format(self.namespace, validity_clause))
sorted_instances = sorted(metadata_instances, key=lambda inst: (inst.schema_name, inst.name))
# pad to width of longest instance
max_schema_name_len = len('Schema')
max_name_len = len('Instance')
max_resource_len = len('Resource')
for instance in sorted_instances:
max_schema_name_len = max(len(instance.schema_name), max_schema_name_len)
max_name_len = max(len(instance.name), max_name_len)
max_resource_len = max(len(instance.resource), max_resource_len)
print()
print("%s %s %s " % ('Schema'.ljust(max_schema_name_len),
'Instance'.ljust(max_name_len),
'Resource'.ljust(max_resource_len)))
print("%s %s %s " % ('------'.ljust(max_schema_name_len),
'--------'.ljust(max_name_len),
'--------'.ljust(max_resource_len)))
for instance in sorted_instances:
invalid = ""
if instance.reason and len(instance.reason) > 0:
invalid = "**INVALID** ({})".format(instance.reason)
print("%s %s %s %s" % (instance.schema_name.ljust(max_schema_name_len),
instance.name.ljust(max_name_len),
instance.resource.ljust(max_resource_len),
invalid))
class NamespaceRemove(NamespaceBase):
"""Handles the 'remove' subcommand functionality for a specific namespace."""
name_option = CliOption("--name", name='name',
description='The name of the metadata instance to remove', required=True)
# 'Remove' options
options = [name_option]
def __init__(self, **kwargs):
super(NamespaceRemove, self).__init__(**kwargs)
self.metadata_manager = MetadataManager(namespace=self.namespace)
def start(self):
super(NamespaceRemove, self).start() # process options
name = self.name_option.value
try:
self.metadata_manager.get(name)
except MetadataNotFoundError as mnfe:
self.log_and_exit(mnfe)
except ValidationError: # Probably deleting invalid instance
pass
self.metadata_manager.remove(name)
print("Metadata instance '{}' removed from namespace '{}'.".format(name, self.namespace))
class NamespaceInstall(NamespaceBase):
"""Handles the 'install' subcommand functionality for a specific namespace."""
# Known options, others will be derived from schema based on schema_name...
replace_flag = Flag("--replace", name='replace',
description='Replace existing instance', default_value=False)
schema_name_option = CliOption("--schema_name", name='schema_name',
description='The schema_name of the metadata instance to install', required=True)
name_option = CliOption("--name", name='name',
description='The name of the metadata instance to install', required=True)
# 'Install' options
options = [replace_flag, schema_name_option, name_option]
def __init__(self, **kwargs):
super(NamespaceInstall, self).__init__(**kwargs)
self.metadata_manager = MetadataManager(namespace=self.namespace)
# First, process the schema_name option so we can then load the appropriate schema
# file to build the schema-based options. If help is requested, give it to them.
self.process_cli_option(self.schema_name_option, check_help=True)
schema_name = self.schema_name_option.value
# If schema is not registered in the set of schemas for this namespace, bail.
if schema_name not in self.schemas:
self.log_and_exit("Schema name '{}' not found in {} schemas!".format(schema_name, self.namespace))
# Schema appears to be a valid name, convert its properties to options and continue
schema = self.schemas[schema_name]
self.schema_options = NamespaceInstall.schema_to_options(schema)
self.options.extend(self.schema_options)
def start(self):
super(NamespaceInstall, self).start() # process options
# Get known options, then gather display_name and build metadata dict.
name = self.name_option.value
schema_name = self.schema_name_option.value
display_name = None
metadata = {}
# Walk the options looking for SchemaProperty instances. Any MetadataSchemaProperty instances go
# into the metadata dict.
for option in self.options:
if isinstance(option, SchemaProperty):
if option.name == 'display_name': # Be sure we have a display_name
display_name = option.value
continue
if isinstance(option, MetadataSchemaProperty):
# skip adding any non required properties that have no value (unless its a null type).
if not option.required and not option.value and option.type != 'null':
continue
metadata[option.name] = option.value
if display_name is None:
self.log_and_exit("Could not determine display_name from schema '{}'".format(schema_name))
instance = Metadata(schema_name=schema_name, name=name,
display_name=display_name, metadata=metadata)
ex_msg = None
new_instance = None
try:
if self.replace_flag.value:
new_instance = self.metadata_manager.update(name, instance)
else:
new_instance = self.metadata_manager.create(name, instance)
except Exception as ex:
ex_msg = str(ex)
if new_instance:
print("Metadata instance '{}' for schema '{}' has been written to: {}"
.format(name, schema_name, new_instance.resource))
else:
if ex_msg:
self.log_and_exit("The following exception occurred saving metadata instance '{}' for schema '{}': {}"
.format(name, schema_name, ex_msg), display_help=True)
else:
self.log_and_exit("A failure occurred saving metadata instance '{}' for schema '{}'."
.format(name, schema_name), display_help=True)
class SubcommandBase(AppBase):
"""Handles building the appropriate subcommands based on existing namespaces."""
subcommand_desciption = None # Overridden in subclass
namespace_base_class = None # Overridden in subclass
def __init__(self, **kwargs):
super(SubcommandBase, self).__init__(**kwargs)
self.namespace_schemas = kwargs['namespace_schemas']
# For each namespace in current schemas, add a corresponding subcommand
# This requires a new subclass of the NamespaceList class with an appropriate description
self.subcommands = {}
for namespace, schemas in self.namespace_schemas.items():
subcommand_desciption = self.subcommand_desciption.format(namespace=namespace)
# Create the appropriate namespace class, initialized with its description,
# namespace, and corresponding schemas as attributes,
namespace_class = type(namespace, (self.namespace_base_class,),
{'description': subcommand_desciption,
'namespace': namespace,
'schemas': schemas})
self.subcommands[namespace] = (namespace_class, namespace_class.description)
def start(self):
subcommand = self.get_subcommand()
if subcommand is None:
self.exit_no_subcommand()
subinstance = subcommand[0](argv=self.argv, namespace_schemas=self.namespace_schemas)
return subinstance.start()
def print_help(self):
super(SubcommandBase, self).print_help()
self.print_subcommands()
class List(SubcommandBase):
"""Lists a metadata instances of a given namespace."""
description = "List metadata instances for a given namespace."
subcommand_desciption = "List installed metadata for {namespace}."
namespace_base_class = NamespaceList
def __init__(self, **kwargs):
super(List, self).__init__(**kwargs)
class Remove(SubcommandBase):
"""Removes a metadata instance from a given namespace."""
description = "Remove a metadata instance from a given namespace."
subcommand_desciption = "Remove a metadata instance from namespace '{namespace}'."
namespace_base_class = NamespaceRemove
def __init__(self, **kwargs):
super(Remove, self).__init__(**kwargs)
class Install(SubcommandBase):
"""Installs a metadata instance into a given namespace."""
description = "Install a metadata instance into a given namespace."
subcommand_desciption = "Install a metadata instance into namespace '{namespace}'."
namespace_base_class = NamespaceInstall
def __init__(self, **kwargs):
super(Install, self).__init__(**kwargs)
class MetadataApp(AppBase):
"""Lists, installs and removes metadata for a given namespace."""
name = "elyra-metadata"
description = """Manage Elyra metadata."""
subcommands = {
'list': (List, List.description.splitlines()[0]),
'install': (Install, Install.description.splitlines()[0]),
'remove': (Remove, Remove.description.splitlines()[0]),
}
@classmethod
def main(cls):
elyra_metadata = cls(argv=sys.argv[1:])
elyra_metadata.start()
def __init__(self, **kwargs):
super(MetadataApp, self).__init__(**kwargs)
self.namespace_schemas = SchemaManager.load_namespace_schemas()
def start(self):
subcommand = self.get_subcommand()
if subcommand is None:
self.exit_no_subcommand()
subinstance = subcommand[0](argv=self.argv, namespace_schemas=self.namespace_schemas)
return subinstance.start()
def print_help(self):
super(MetadataApp, self).print_help()
self.print_subcommands()
if __name__ == '__main__':
MetadataApp.main() | 0.449634 | 0.086942 |
import dgl
import numpy as np
from pathlib import Path
import torch
from deepstochlog.term import Term, List
from deepstochlog.context import ContextualizedTerm, Context
from deepstochlog.dataset import ContextualizedTermDataset
root_path = Path(__file__).parent
dataset = dgl.data.CiteseerGraphDataset()
g = dataset[0]
# get node feature
documents = g.ndata['feat']
# get data split
train_ids = np.where(g.ndata['train_mask'].numpy())[0]
val_ids = np.where(g.ndata['val_mask'].numpy())[0]
test_ids = np.where(g.ndata['test_mask'].numpy())[0]
# get labels
labels = g.ndata['label'].numpy()
edges = []
pretraining_data = documents[train_ids], torch.tensor(labels[train_ids])
citations = []
for eid in range(g.num_edges()):
a, b = g.find_edges(eid)
a, b = a.numpy().tolist()[0], b.numpy().tolist()[0],
edges.append((a,b))
citations.append("cite(%d, %d)." % (a,b))
citations = "\n".join(citations)
def queries_from_ids(ids, labels, is_test = False):
queries = []
class CiteseerDataset(ContextualizedTermDataset):
def __init__(
self,
split: str,
labels,
documents):
if split == "train":
self.ids = train_ids
elif split =="valid":
self.ids = val_ids
elif split == "test":
self.ids = test_ids
else:
raise Exception("Unkonw split %s" % split)
self.labels = labels
self.is_test = True if split in ("test", "valid") else False
self.documents = documents
self.dataset = []
context = {Term(str(i)): d for i, d in enumerate(self.documents)}
for i in range(6):
context[Term("class" + str(i))] = torch.tensor([i])
context = Context(context)
self.queries_for_model = []
for did in self.ids:
label = Term("class" + str(self.labels[did]))
query = ContextualizedTerm(
context=context,
term=Term("s", label, List(did)))
self.dataset.append(query)
query_model = Term("s", Term("_"), List(did))
self.queries_for_model.append(query_model)
def __len__(self):
return len(self.dataset)
def __getitem__(self, item):
if type(item) is slice:
return (self[i] for i in range(*item.indices(len(self))))
return self.dataset[item]
train_dataset = CiteseerDataset(split="train", documents=documents, labels=labels)
valid_dataset = CiteseerDataset(split="valid", documents=documents, labels=labels)
test_dataset = CiteseerDataset(split="test", documents=documents, labels=labels)
queries_for_model = train_dataset.queries_for_model + valid_dataset.queries_for_model + test_dataset.queries_for_model | examples/citeseer/with_rule_weights/citeseer_data_withrules.py | import dgl
import numpy as np
from pathlib import Path
import torch
from deepstochlog.term import Term, List
from deepstochlog.context import ContextualizedTerm, Context
from deepstochlog.dataset import ContextualizedTermDataset
root_path = Path(__file__).parent
dataset = dgl.data.CiteseerGraphDataset()
g = dataset[0]
# get node feature
documents = g.ndata['feat']
# get data split
train_ids = np.where(g.ndata['train_mask'].numpy())[0]
val_ids = np.where(g.ndata['val_mask'].numpy())[0]
test_ids = np.where(g.ndata['test_mask'].numpy())[0]
# get labels
labels = g.ndata['label'].numpy()
edges = []
pretraining_data = documents[train_ids], torch.tensor(labels[train_ids])
citations = []
for eid in range(g.num_edges()):
a, b = g.find_edges(eid)
a, b = a.numpy().tolist()[0], b.numpy().tolist()[0],
edges.append((a,b))
citations.append("cite(%d, %d)." % (a,b))
citations = "\n".join(citations)
def queries_from_ids(ids, labels, is_test = False):
queries = []
class CiteseerDataset(ContextualizedTermDataset):
def __init__(
self,
split: str,
labels,
documents):
if split == "train":
self.ids = train_ids
elif split =="valid":
self.ids = val_ids
elif split == "test":
self.ids = test_ids
else:
raise Exception("Unkonw split %s" % split)
self.labels = labels
self.is_test = True if split in ("test", "valid") else False
self.documents = documents
self.dataset = []
context = {Term(str(i)): d for i, d in enumerate(self.documents)}
for i in range(6):
context[Term("class" + str(i))] = torch.tensor([i])
context = Context(context)
self.queries_for_model = []
for did in self.ids:
label = Term("class" + str(self.labels[did]))
query = ContextualizedTerm(
context=context,
term=Term("s", label, List(did)))
self.dataset.append(query)
query_model = Term("s", Term("_"), List(did))
self.queries_for_model.append(query_model)
def __len__(self):
return len(self.dataset)
def __getitem__(self, item):
if type(item) is slice:
return (self[i] for i in range(*item.indices(len(self))))
return self.dataset[item]
train_dataset = CiteseerDataset(split="train", documents=documents, labels=labels)
valid_dataset = CiteseerDataset(split="valid", documents=documents, labels=labels)
test_dataset = CiteseerDataset(split="test", documents=documents, labels=labels)
queries_for_model = train_dataset.queries_for_model + valid_dataset.queries_for_model + test_dataset.queries_for_model | 0.301362 | 0.47171 |
import numpy as np
import matplotlib.pyplot as plt
## Performs LU factorization with partial pivoting
def lup(A):
m = A.shape[0]
U = A.copy()
L, P = np.eye(m), np.eye(m)
for k in range(m-1):
# Selects the index of the max element and swaps the required rows
i = k + np.argmax(abs(U[k:,k]))
U[[k,i],k:] = U[[i,k],k:]
L[[k,i],:k] = L[[i,k],:k]
P[[k,i],:] = P[[i,k],:]
for j in range(k+1,m):
L[j,k] = U[j,k]/U[k,k]
U[j,k:] = U[j,k:] - L[j,k] * U[k,k:]
return P, L, U
## Solves Ax=b using the partial pivoting LU decomposition as done above
def solveLup(P,L,U,b):
# From Ax=b => PAx = Pb
m = P.shape[0]
b = P @ b
# From PAx=b => LUx = b
# Taking Ux=w => Lw = b
# Solving the above equation to get w
w = np.zeros(m)
for i in range(m):
rhs = b[i]
for j in range(i):
rhs = rhs - L[i,j] * w[j]
w[i] = (rhs * 1.)/(L[i,i] * 1.)
# Solving Ux = w to get x
x = np.zeros(m)
i = m-1
while i>=0:
rhs = w[i]
for j in range(i+1,m):
rhs = rhs - U[i,j] * x[j]
x[i] = (rhs * 1.)/(U[i,i] * 1.)
i = i-1
return x.reshape(-1,1)
## Matrix A for which catastrophic rounding results occur
def instabilityMatrix(m):
A = np.eye(m)
A[:,m-1] = 1.
for i in range(1,m):
A[i,:i] = -1.
return A
## Performs rook pivoting of the matrix U
def rook_pivot(U,row,col,k):
row_pivot = k + np.argmax(abs(U[k:,col]))
col_pivot = k + np.argmax(abs(U[row_pivot,k:]))
if row_pivot==row and col_pivot==col:
return row,col
else:
return rook_pivot(U,row_pivot,col_pivot,k)
## Performs LU factorization with rook pivoting
def lupq(A):
m = A.shape[0]
U = A.copy()
L, P, Q = np.eye(m), np.eye(m), np.eye(m)
for k in range(m-1):
# Selects the index of the max element as obtained by rook pivoting
i,l = rook_pivot(U,k,k,k)
# Swaps the k,i rows and k,l columns of U
U[[k,i],k:] = U[[i,k],k:]
U[:,[k,l]] = U[:,[l,k]]
# Swaps the k,i rows of L
L[[k,i],:k] = L[[i,k],:k]
# Swaps the k,i rows of P and k,l columns of Q
P[[k,i],:] = P[[i,k],:]
Q[:,[k,l]] = Q[:,[l,k]]
for j in range(k+1,m):
L[j,k] = U[j,k]/U[k,k]
U[j,k:] = U[j,k:] - L[j,k] * U[k,k:]
return P, Q, L, U
## Solves Ax=b using the rook pivoting LU decomposition as done above
def solveLupq(P,Q,L,U,b):
# Making use of the solveLup function to get an x
x = solveLup(P,L,U,b)
# Now, making use of the Q matrix to get x=Qx
x = Q @ x
return x.reshape(-1,1)
## Generates the plots for growth-factor and backward-error
def get_plots(strin):
m = range(1,61)
rho_1, rho_2 = [],[]
error_1, error_2 = [],[]
for i in m:
A = instabilityMatrix(i)
b = np.random.randn(i,1)
P, L, U = lup(A)
x = (solveLup(P,L,U,b))
rho_1.append((abs(U).max())/(abs(A).max()))
error_1.append((np.linalg.norm((A @ x) - b,2))/(np.linalg.norm(b,2)))
P, Q, L, U = lupq(A)
x = (solveLupq(P, Q, L, U, b))
rho_2.append((abs(U).max())/(abs(A).max()))
error_2.append((np.linalg.norm((A @ x) - b,2))/(np.linalg.norm(b,2)))
if strin=='growth':
plt.plot(m,rho_1,'r',m,rho_2,'b')
plt.xlabel('The dimensions of the matrix')
plt.ylabel('The growth factor corresponding to the Instability matrix')
plt.legend(['LU with partial pivoting','LU with rook pivoting'])
plt.yscale('log')
plt.title('Semi-log plot of growth factor v/s matrix dimensions')
plt.savefig('q6_plot_growth.png')
else:
plt.plot(m,error_1,'r',m,error_2,'b')
plt.xlabel('The dimensions of the matrix')
plt.ylabel('The relative backward error for solving Ax=b')
plt.legend(['LU with partial pivoting','LU with rook pivoting'])
plt.yscale('log')
plt.title('Semi-log plot of Relative Backward Error v/s matrix dimensions')
plt.savefig('q6_plot_error.png')
## Checks for the correctness of the algorithms implemented
def check_correctness():
A = instabilityMatrix(5)
b = np.array([1.0, 2.0, 13.0,4.0,-2.0]).reshape(-1,1)
print('Solved with the decomposition: PA = LU')
P, L, U = lup(A)
print(solveLup(P,L,U,b))
print()
print('Solved with the decomposition: PAQ = LU')
P, Q, L, U = lupq(A)
print(solveLupq(P, Q, L, U, b))
print()
print('Solved using the numpy solver')
print(np.linalg.solve(A,b))
print()
if __name__ == '__main__':
get_plots('error') | A3/Q6.py | import numpy as np
import matplotlib.pyplot as plt
## Performs LU factorization with partial pivoting
def lup(A):
m = A.shape[0]
U = A.copy()
L, P = np.eye(m), np.eye(m)
for k in range(m-1):
# Selects the index of the max element and swaps the required rows
i = k + np.argmax(abs(U[k:,k]))
U[[k,i],k:] = U[[i,k],k:]
L[[k,i],:k] = L[[i,k],:k]
P[[k,i],:] = P[[i,k],:]
for j in range(k+1,m):
L[j,k] = U[j,k]/U[k,k]
U[j,k:] = U[j,k:] - L[j,k] * U[k,k:]
return P, L, U
## Solves Ax=b using the partial pivoting LU decomposition as done above
def solveLup(P,L,U,b):
# From Ax=b => PAx = Pb
m = P.shape[0]
b = P @ b
# From PAx=b => LUx = b
# Taking Ux=w => Lw = b
# Solving the above equation to get w
w = np.zeros(m)
for i in range(m):
rhs = b[i]
for j in range(i):
rhs = rhs - L[i,j] * w[j]
w[i] = (rhs * 1.)/(L[i,i] * 1.)
# Solving Ux = w to get x
x = np.zeros(m)
i = m-1
while i>=0:
rhs = w[i]
for j in range(i+1,m):
rhs = rhs - U[i,j] * x[j]
x[i] = (rhs * 1.)/(U[i,i] * 1.)
i = i-1
return x.reshape(-1,1)
## Matrix A for which catastrophic rounding results occur
def instabilityMatrix(m):
A = np.eye(m)
A[:,m-1] = 1.
for i in range(1,m):
A[i,:i] = -1.
return A
## Performs rook pivoting of the matrix U
def rook_pivot(U,row,col,k):
row_pivot = k + np.argmax(abs(U[k:,col]))
col_pivot = k + np.argmax(abs(U[row_pivot,k:]))
if row_pivot==row and col_pivot==col:
return row,col
else:
return rook_pivot(U,row_pivot,col_pivot,k)
## Performs LU factorization with rook pivoting
def lupq(A):
m = A.shape[0]
U = A.copy()
L, P, Q = np.eye(m), np.eye(m), np.eye(m)
for k in range(m-1):
# Selects the index of the max element as obtained by rook pivoting
i,l = rook_pivot(U,k,k,k)
# Swaps the k,i rows and k,l columns of U
U[[k,i],k:] = U[[i,k],k:]
U[:,[k,l]] = U[:,[l,k]]
# Swaps the k,i rows of L
L[[k,i],:k] = L[[i,k],:k]
# Swaps the k,i rows of P and k,l columns of Q
P[[k,i],:] = P[[i,k],:]
Q[:,[k,l]] = Q[:,[l,k]]
for j in range(k+1,m):
L[j,k] = U[j,k]/U[k,k]
U[j,k:] = U[j,k:] - L[j,k] * U[k,k:]
return P, Q, L, U
## Solves Ax=b using the rook pivoting LU decomposition as done above
def solveLupq(P,Q,L,U,b):
# Making use of the solveLup function to get an x
x = solveLup(P,L,U,b)
# Now, making use of the Q matrix to get x=Qx
x = Q @ x
return x.reshape(-1,1)
## Generates the plots for growth-factor and backward-error
def get_plots(strin):
m = range(1,61)
rho_1, rho_2 = [],[]
error_1, error_2 = [],[]
for i in m:
A = instabilityMatrix(i)
b = np.random.randn(i,1)
P, L, U = lup(A)
x = (solveLup(P,L,U,b))
rho_1.append((abs(U).max())/(abs(A).max()))
error_1.append((np.linalg.norm((A @ x) - b,2))/(np.linalg.norm(b,2)))
P, Q, L, U = lupq(A)
x = (solveLupq(P, Q, L, U, b))
rho_2.append((abs(U).max())/(abs(A).max()))
error_2.append((np.linalg.norm((A @ x) - b,2))/(np.linalg.norm(b,2)))
if strin=='growth':
plt.plot(m,rho_1,'r',m,rho_2,'b')
plt.xlabel('The dimensions of the matrix')
plt.ylabel('The growth factor corresponding to the Instability matrix')
plt.legend(['LU with partial pivoting','LU with rook pivoting'])
plt.yscale('log')
plt.title('Semi-log plot of growth factor v/s matrix dimensions')
plt.savefig('q6_plot_growth.png')
else:
plt.plot(m,error_1,'r',m,error_2,'b')
plt.xlabel('The dimensions of the matrix')
plt.ylabel('The relative backward error for solving Ax=b')
plt.legend(['LU with partial pivoting','LU with rook pivoting'])
plt.yscale('log')
plt.title('Semi-log plot of Relative Backward Error v/s matrix dimensions')
plt.savefig('q6_plot_error.png')
## Checks for the correctness of the algorithms implemented
def check_correctness():
A = instabilityMatrix(5)
b = np.array([1.0, 2.0, 13.0,4.0,-2.0]).reshape(-1,1)
print('Solved with the decomposition: PA = LU')
P, L, U = lup(A)
print(solveLup(P,L,U,b))
print()
print('Solved with the decomposition: PAQ = LU')
P, Q, L, U = lupq(A)
print(solveLupq(P, Q, L, U, b))
print()
print('Solved using the numpy solver')
print(np.linalg.solve(A,b))
print()
if __name__ == '__main__':
get_plots('error') | 0.400046 | 0.513485 |
import dataclasses
from typing import Collection
import structlog
from covidactnow.datapublic.common_fields import CommonFields
from covidactnow.datapublic.common_fields import PdFields
from libs.datasets import timeseries
import pandas as pd
_log = structlog.get_logger()
DROPPING_TIMESERIES_WITH_ONLY_ZEROS = "Dropping timeseries with only zeros"
def drop_all_zero_timeseries(
ds_in: timeseries.MultiRegionDataset, fields: Collection[CommonFields]
) -> timeseries.MultiRegionDataset:
"""Returns a dataset with `fields` timeseries dropped if they contain only NA and 0.
When first built this is dropping a timeseries in Loving County, TX which has so few people
that the all-zero timeseries is likely accurate. It may be worth only applying this to
locations with a population over some threshold. Or perhaps an automatic filter isn't worth
the trouble after all :-(
"""
ts_wide = ds_in.timeseries_wide_dates()
# Separate into timeseries in `fields` and all others.
variable_mask = ts_wide.index.get_level_values(PdFields.VARIABLE).isin(fields)
ts_wide_other_variables = ts_wide.loc[~variable_mask]
ts_wide_variables = ts_wide.loc[variable_mask]
# Keep rows/timeseries that have at least one value that is not 0 or NA
to_keep_mask = ts_wide_variables.replace(pd.NA, 0).any(axis=1)
to_drop = ts_wide_variables.loc[~to_keep_mask].index
if not to_drop.empty:
# Maybe add filtering to not log about the known bad data in OH counties and Loving
# County Texas using a RegionMask(level=County, state=OH) and some kind of RegionMask
# representing counties with a small population.
_log.info(DROPPING_TIMESERIES_WITH_ONLY_ZEROS, dropped=to_drop)
ts_wide_kept = ts_wide_variables.loc[to_keep_mask]
ts_wide_out = pd.concat([ts_wide_kept, ts_wide_other_variables])
# Make a new dataset without the dropped timeseries. This does not drop the tags of the
# dropped timeseries but keeping the provenance tags doesn't seem to be a problem. Maybe it'd
# be cleaner to add a method 'MultiRegionDataset.drop_timeseries' similar to 'remove_regions' or
# move this into 'MultiRegionDataset' similar to 'drop_stale_timeseries'.
return dataclasses.replace(
ds_in, timeseries=ts_wide_out.stack().unstack(PdFields.VARIABLE).sort_index()
) | libs/datasets/sources/zeros_filter.py | import dataclasses
from typing import Collection
import structlog
from covidactnow.datapublic.common_fields import CommonFields
from covidactnow.datapublic.common_fields import PdFields
from libs.datasets import timeseries
import pandas as pd
_log = structlog.get_logger()
DROPPING_TIMESERIES_WITH_ONLY_ZEROS = "Dropping timeseries with only zeros"
def drop_all_zero_timeseries(
ds_in: timeseries.MultiRegionDataset, fields: Collection[CommonFields]
) -> timeseries.MultiRegionDataset:
"""Returns a dataset with `fields` timeseries dropped if they contain only NA and 0.
When first built this is dropping a timeseries in Loving County, TX which has so few people
that the all-zero timeseries is likely accurate. It may be worth only applying this to
locations with a population over some threshold. Or perhaps an automatic filter isn't worth
the trouble after all :-(
"""
ts_wide = ds_in.timeseries_wide_dates()
# Separate into timeseries in `fields` and all others.
variable_mask = ts_wide.index.get_level_values(PdFields.VARIABLE).isin(fields)
ts_wide_other_variables = ts_wide.loc[~variable_mask]
ts_wide_variables = ts_wide.loc[variable_mask]
# Keep rows/timeseries that have at least one value that is not 0 or NA
to_keep_mask = ts_wide_variables.replace(pd.NA, 0).any(axis=1)
to_drop = ts_wide_variables.loc[~to_keep_mask].index
if not to_drop.empty:
# Maybe add filtering to not log about the known bad data in OH counties and Loving
# County Texas using a RegionMask(level=County, state=OH) and some kind of RegionMask
# representing counties with a small population.
_log.info(DROPPING_TIMESERIES_WITH_ONLY_ZEROS, dropped=to_drop)
ts_wide_kept = ts_wide_variables.loc[to_keep_mask]
ts_wide_out = pd.concat([ts_wide_kept, ts_wide_other_variables])
# Make a new dataset without the dropped timeseries. This does not drop the tags of the
# dropped timeseries but keeping the provenance tags doesn't seem to be a problem. Maybe it'd
# be cleaner to add a method 'MultiRegionDataset.drop_timeseries' similar to 'remove_regions' or
# move this into 'MultiRegionDataset' similar to 'drop_stale_timeseries'.
return dataclasses.replace(
ds_in, timeseries=ts_wide_out.stack().unstack(PdFields.VARIABLE).sort_index()
) | 0.750644 | 0.312213 |
from pprint import pformat
from six import iteritems
import re
class Subnet(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'enabled': 'bool',
'gateway': 'str',
'interfaces': 'list[Reference]',
'link_aggregation_group': 'Reference',
'mtu': 'int',
'prefix': 'str',
'services': 'list[str]',
'vlan': 'int'
}
attribute_map = {
'id': 'id',
'name': 'name',
'enabled': 'enabled',
'gateway': 'gateway',
'interfaces': 'interfaces',
'link_aggregation_group': 'link_aggregation_group',
'mtu': 'mtu',
'prefix': 'prefix',
'services': 'services',
'vlan': 'vlan'
}
def __init__(self, id=None, name=None, enabled=None, gateway=None, interfaces=None, link_aggregation_group=None, mtu=None, prefix=None, services=None, vlan=None):
"""
Subnet - a model defined in Swagger
"""
self._id = None
self._name = None
self._enabled = None
self._gateway = None
self._interfaces = None
self._link_aggregation_group = None
self._mtu = None
self._prefix = None
self._services = None
self._vlan = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if enabled is not None:
self.enabled = enabled
if gateway is not None:
self.gateway = gateway
if interfaces is not None:
self.interfaces = interfaces
if link_aggregation_group is not None:
self.link_aggregation_group = link_aggregation_group
if mtu is not None:
self.mtu = mtu
if prefix is not None:
self.prefix = prefix
if services is not None:
self.services = services
if vlan is not None:
self.vlan = vlan
@property
def id(self):
"""
Gets the id of this Subnet.
A non-modifiable, globally unique ID chosen by the system.
:return: The id of this Subnet.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Subnet.
A non-modifiable, globally unique ID chosen by the system.
:param id: The id of this Subnet.
:type: str
"""
self._id = id
@property
def name(self):
"""
Gets the name of this Subnet.
The name of the object (e.g., a file system or snapshot).
:return: The name of this Subnet.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Subnet.
The name of the object (e.g., a file system or snapshot).
:param name: The name of this Subnet.
:type: str
"""
self._name = name
@property
def enabled(self):
"""
Gets the enabled of this Subnet.
Indicates if subnet is enabled (true) or disabled (false). Enabled by default.
:return: The enabled of this Subnet.
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""
Sets the enabled of this Subnet.
Indicates if subnet is enabled (true) or disabled (false). Enabled by default.
:param enabled: The enabled of this Subnet.
:type: bool
"""
self._enabled = enabled
@property
def gateway(self):
"""
Gets the gateway of this Subnet.
The IPv4 or IPv6 address of the gateway through which the specified subnet is to communicate with the network.
:return: The gateway of this Subnet.
:rtype: str
"""
return self._gateway
@gateway.setter
def gateway(self, gateway):
"""
Sets the gateway of this Subnet.
The IPv4 or IPv6 address of the gateway through which the specified subnet is to communicate with the network.
:param gateway: The gateway of this Subnet.
:type: str
"""
self._gateway = gateway
@property
def interfaces(self):
"""
Gets the interfaces of this Subnet.
List of network interfaces associated with this subnet.
:return: The interfaces of this Subnet.
:rtype: list[Reference]
"""
return self._interfaces
@interfaces.setter
def interfaces(self, interfaces):
"""
Sets the interfaces of this Subnet.
List of network interfaces associated with this subnet.
:param interfaces: The interfaces of this Subnet.
:type: list[Reference]
"""
self._interfaces = interfaces
@property
def link_aggregation_group(self):
"""
Gets the link_aggregation_group of this Subnet.
reference of the associated LAG.
:return: The link_aggregation_group of this Subnet.
:rtype: Reference
"""
return self._link_aggregation_group
@link_aggregation_group.setter
def link_aggregation_group(self, link_aggregation_group):
"""
Sets the link_aggregation_group of this Subnet.
reference of the associated LAG.
:param link_aggregation_group: The link_aggregation_group of this Subnet.
:type: Reference
"""
self._link_aggregation_group = link_aggregation_group
@property
def mtu(self):
"""
Gets the mtu of this Subnet.
Maximum message transfer unit (packet) size for the subnet in bytes. MTU setting cannot exceed the MTU of the corresponding physical interface. 1500 by default.
:return: The mtu of this Subnet.
:rtype: int
"""
return self._mtu
@mtu.setter
def mtu(self, mtu):
"""
Sets the mtu of this Subnet.
Maximum message transfer unit (packet) size for the subnet in bytes. MTU setting cannot exceed the MTU of the corresponding physical interface. 1500 by default.
:param mtu: The mtu of this Subnet.
:type: int
"""
if mtu is not None and mtu > 9216:
raise ValueError("Invalid value for `mtu`, must be a value less than or equal to `9216`")
if mtu is not None and mtu < 1280:
raise ValueError("Invalid value for `mtu`, must be a value greater than or equal to `1280`")
self._mtu = mtu
@property
def prefix(self):
"""
Gets the prefix of this Subnet.
The IPv4 or IPv6 address to be associated with the specified subnet.
:return: The prefix of this Subnet.
:rtype: str
"""
return self._prefix
@prefix.setter
def prefix(self, prefix):
"""
Sets the prefix of this Subnet.
The IPv4 or IPv6 address to be associated with the specified subnet.
:param prefix: The prefix of this Subnet.
:type: str
"""
self._prefix = prefix
@property
def services(self):
"""
Gets the services of this Subnet.
The services provided by this subnet, as inherited from all of its interfaces.
:return: The services of this Subnet.
:rtype: list[str]
"""
return self._services
@services.setter
def services(self, services):
"""
Sets the services of this Subnet.
The services provided by this subnet, as inherited from all of its interfaces.
:param services: The services of this Subnet.
:type: list[str]
"""
self._services = services
@property
def vlan(self):
"""
Gets the vlan of this Subnet.
VLAN ID
:return: The vlan of this Subnet.
:rtype: int
"""
return self._vlan
@vlan.setter
def vlan(self, vlan):
"""
Sets the vlan of this Subnet.
VLAN ID
:param vlan: The vlan of this Subnet.
:type: int
"""
self._vlan = vlan
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Subnet):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other | purity_fb/purity_fb_1dot9/models/subnet.py | from pprint import pformat
from six import iteritems
import re
class Subnet(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'enabled': 'bool',
'gateway': 'str',
'interfaces': 'list[Reference]',
'link_aggregation_group': 'Reference',
'mtu': 'int',
'prefix': 'str',
'services': 'list[str]',
'vlan': 'int'
}
attribute_map = {
'id': 'id',
'name': 'name',
'enabled': 'enabled',
'gateway': 'gateway',
'interfaces': 'interfaces',
'link_aggregation_group': 'link_aggregation_group',
'mtu': 'mtu',
'prefix': 'prefix',
'services': 'services',
'vlan': 'vlan'
}
def __init__(self, id=None, name=None, enabled=None, gateway=None, interfaces=None, link_aggregation_group=None, mtu=None, prefix=None, services=None, vlan=None):
"""
Subnet - a model defined in Swagger
"""
self._id = None
self._name = None
self._enabled = None
self._gateway = None
self._interfaces = None
self._link_aggregation_group = None
self._mtu = None
self._prefix = None
self._services = None
self._vlan = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if enabled is not None:
self.enabled = enabled
if gateway is not None:
self.gateway = gateway
if interfaces is not None:
self.interfaces = interfaces
if link_aggregation_group is not None:
self.link_aggregation_group = link_aggregation_group
if mtu is not None:
self.mtu = mtu
if prefix is not None:
self.prefix = prefix
if services is not None:
self.services = services
if vlan is not None:
self.vlan = vlan
@property
def id(self):
"""
Gets the id of this Subnet.
A non-modifiable, globally unique ID chosen by the system.
:return: The id of this Subnet.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Subnet.
A non-modifiable, globally unique ID chosen by the system.
:param id: The id of this Subnet.
:type: str
"""
self._id = id
@property
def name(self):
"""
Gets the name of this Subnet.
The name of the object (e.g., a file system or snapshot).
:return: The name of this Subnet.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Subnet.
The name of the object (e.g., a file system or snapshot).
:param name: The name of this Subnet.
:type: str
"""
self._name = name
@property
def enabled(self):
"""
Gets the enabled of this Subnet.
Indicates if subnet is enabled (true) or disabled (false). Enabled by default.
:return: The enabled of this Subnet.
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""
Sets the enabled of this Subnet.
Indicates if subnet is enabled (true) or disabled (false). Enabled by default.
:param enabled: The enabled of this Subnet.
:type: bool
"""
self._enabled = enabled
@property
def gateway(self):
"""
Gets the gateway of this Subnet.
The IPv4 or IPv6 address of the gateway through which the specified subnet is to communicate with the network.
:return: The gateway of this Subnet.
:rtype: str
"""
return self._gateway
@gateway.setter
def gateway(self, gateway):
"""
Sets the gateway of this Subnet.
The IPv4 or IPv6 address of the gateway through which the specified subnet is to communicate with the network.
:param gateway: The gateway of this Subnet.
:type: str
"""
self._gateway = gateway
@property
def interfaces(self):
"""
Gets the interfaces of this Subnet.
List of network interfaces associated with this subnet.
:return: The interfaces of this Subnet.
:rtype: list[Reference]
"""
return self._interfaces
@interfaces.setter
def interfaces(self, interfaces):
"""
Sets the interfaces of this Subnet.
List of network interfaces associated with this subnet.
:param interfaces: The interfaces of this Subnet.
:type: list[Reference]
"""
self._interfaces = interfaces
@property
def link_aggregation_group(self):
"""
Gets the link_aggregation_group of this Subnet.
reference of the associated LAG.
:return: The link_aggregation_group of this Subnet.
:rtype: Reference
"""
return self._link_aggregation_group
@link_aggregation_group.setter
def link_aggregation_group(self, link_aggregation_group):
"""
Sets the link_aggregation_group of this Subnet.
reference of the associated LAG.
:param link_aggregation_group: The link_aggregation_group of this Subnet.
:type: Reference
"""
self._link_aggregation_group = link_aggregation_group
@property
def mtu(self):
"""
Gets the mtu of this Subnet.
Maximum message transfer unit (packet) size for the subnet in bytes. MTU setting cannot exceed the MTU of the corresponding physical interface. 1500 by default.
:return: The mtu of this Subnet.
:rtype: int
"""
return self._mtu
@mtu.setter
def mtu(self, mtu):
"""
Sets the mtu of this Subnet.
Maximum message transfer unit (packet) size for the subnet in bytes. MTU setting cannot exceed the MTU of the corresponding physical interface. 1500 by default.
:param mtu: The mtu of this Subnet.
:type: int
"""
if mtu is not None and mtu > 9216:
raise ValueError("Invalid value for `mtu`, must be a value less than or equal to `9216`")
if mtu is not None and mtu < 1280:
raise ValueError("Invalid value for `mtu`, must be a value greater than or equal to `1280`")
self._mtu = mtu
@property
def prefix(self):
"""
Gets the prefix of this Subnet.
The IPv4 or IPv6 address to be associated with the specified subnet.
:return: The prefix of this Subnet.
:rtype: str
"""
return self._prefix
@prefix.setter
def prefix(self, prefix):
"""
Sets the prefix of this Subnet.
The IPv4 or IPv6 address to be associated with the specified subnet.
:param prefix: The prefix of this Subnet.
:type: str
"""
self._prefix = prefix
@property
def services(self):
"""
Gets the services of this Subnet.
The services provided by this subnet, as inherited from all of its interfaces.
:return: The services of this Subnet.
:rtype: list[str]
"""
return self._services
@services.setter
def services(self, services):
"""
Sets the services of this Subnet.
The services provided by this subnet, as inherited from all of its interfaces.
:param services: The services of this Subnet.
:type: list[str]
"""
self._services = services
@property
def vlan(self):
"""
Gets the vlan of this Subnet.
VLAN ID
:return: The vlan of this Subnet.
:rtype: int
"""
return self._vlan
@vlan.setter
def vlan(self, vlan):
"""
Sets the vlan of this Subnet.
VLAN ID
:param vlan: The vlan of this Subnet.
:type: int
"""
self._vlan = vlan
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Subnet):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other | 0.69987 | 0.126677 |
import base64
import logging
import os
import sys
import urllib.parse
import uuid
# Installed packages
import boto3
from boto3.dynamodb.conditions import Key
from flask import Blueprint
from flask import Flask
from flask import request
from flask import Response
from prometheus_flask_exporter import PrometheusMetrics
import simplejson as json
# The application
app = Flask(__name__)
metrics = PrometheusMetrics(app)
metrics.info('app_info', 'Database process')
bp = Blueprint('app', __name__)
# default to us-east-1 if no region is specified
# (us-east-1 is the default/only supported region for a starter account)
region = os.getenv('AWS_REGION', 'us-east-1')
# these must be present; if they are missing, we should probably bail now
access_key = os.getenv('AWS_ACCESS_KEY_ID')
secret_access_key = os.getenv('AWS_SECRET_ACCESS_KEY')
# Must be presented to authorize call to `/load`
loader_token = os.getenv('SVC_LOADER_TOKEN')
# In some testing contexts, we pass in the DynamoDB URL
dynamodb_url = os.getenv('DYNAMODB_URL', '')
if dynamodb_url == '':
dynamodb = boto3.resource(
'dynamodb',
region_name=region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_access_key)
else:
# See
# https://stackoverflow.com/questions/31948742/localhost-endpoint-to-dynamodb-local-with-boto3
dynamodb = boto3.resource(
'dynamodb',
endpoint_url=dynamodb_url,
region_name=region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_access_key)
# Change the implementation of this: you should probably have a separate
# driver class for interfacing with a db like dynamodb in a different file.
@bp.route('/update', methods=['PUT'])
def update():
headers = request.headers # noqa: F841
# check header here
content = request.get_json()
objtype = urllib.parse.unquote_plus(request.args.get('objtype'))
objkey = urllib.parse.unquote_plus(request.args.get('objkey'))
table_name = objtype.capitalize()+"-ZZ-REG-ID"
table_id = objtype + "_id"
table = dynamodb.Table(table_name)
expression = 'SET '
x = 1
attrvals = {}
for k in content.keys():
expression += k + ' = :val' + str(x) + ', '
attrvals[':val' + str(x)] = content[k]
x += 1
expression = expression[:-2]
response = table.update_item(Key={table_id: objkey},
UpdateExpression=expression,
ExpressionAttributeValues=attrvals)
return response
@bp.route('/read', methods=['GET'])
def read():
headers = request.headers # noqa: F841
# check header here
objtype = urllib.parse.unquote_plus(request.args.get('objtype'))
objkey = urllib.parse.unquote_plus(request.args.get('objkey'))
table_name = objtype.capitalize()+"-ZZ-REG-ID"
table_id = objtype + "_id"
table = dynamodb.Table(table_name)
response = table.query(Select='ALL_ATTRIBUTES',
KeyConditionExpression=Key(table_id).eq(objkey))
return response
@bp.route('/write', methods=['POST'])
def write():
headers = request.headers # noqa: F841
# check header here
content = request.get_json()
table_name = content['objtype'].capitalize()+"-ZZ-REG-ID"
objtype = content['objtype']
table_id = objtype + "_id"
payload = {table_id: str(uuid.uuid4())}
del content['objtype']
for k in content.keys():
payload[k] = content[k]
table = dynamodb.Table(table_name)
response = table.put_item(Item=payload)
returnval = ''
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
returnval = {"message": "fail"}
return json.dumps(
({table_id: payload[table_id]}, returnval)['returnval' in globals()])
def decode_auth_token(token):
'''Given an auth token in Base64 encoding, return the original string'''
return base64.standard_b64decode(token).decode()
def load_auth(headers):
'''Return True if caller authorized to do a `/load` '''
global loader_token
if 'Authorization' not in headers:
return False
# Auth string is 'Basic ' concatenated with base64 encoding of uname:passwd
auth_string = headers['Authorization'].split()[1]
name, pwd = decode_auth_token(auth_string).split(':')
if name != 'svc-loader' or pwd != loader_token:
return False
return True
@bp.route('/load', methods=['POST'])
def load():
'''
Load a value into the database
This differs from write() in the following ways:
1. The caller must specify the UUID in `content`. http_status_code
400 is returned if this condition is not met.
2. The caller must include an "Authorization" header accepted
by load_auth(). A 401 status is returned for authorization failure.
3. If the database returns a non-200 status code, this routine
responds with an {http_status_code: status} object.
This routine potentially could share a common subroutine with
write() but the HTTP error processing in write() seems wrong
so this routine has its own code.
'''
headers = request.headers
if not load_auth(headers):
return Response(
json.dumps({"http_status_code": 401,
"reason": "Invalid authorization for /load"}),
status=401,
mimetype='application/json')
content = request.get_json()
if 'uuid' not in content:
return json.dumps({"http_status_code": 400, "reason": 'Missing uuid'})
table_name = content['objtype'].capitalize()+"-ZZ-REG-ID"
objtype = content['objtype']
table_id = objtype + "_id"
payload = {table_id: content['uuid']}
del content['objtype']
del content['uuid']
for k in content.keys():
payload[k] = content[k]
table = dynamodb.Table(table_name)
response = table.put_item(Item=payload)
status = response['ResponseMetadata']['HTTPStatusCode']
if status != 200:
return json.dumps({"http_status_code": status})
return json.dumps({table_id: payload[table_id]})
@bp.route('/delete', methods=['DELETE'])
def delete():
headers = request.headers # noqa: F841
# check header here
objtype = urllib.parse.unquote_plus(request.args.get('objtype'))
objkey = urllib.parse.unquote_plus(request.args.get('objkey'))
table_name = objtype.capitalize()+"-ZZ-REG-ID"
table_id = objtype + "_id"
table = dynamodb.Table(table_name)
response = table.delete_item(Key={table_id: objkey})
return response
@bp.route('/health')
@metrics.do_not_track()
def health():
return Response("", status=200, mimetype="application/json")
@bp.route('/readiness')
@metrics.do_not_track()
def readiness():
return Response("", status=200, mimetype="application/json")
# All database calls will have this prefix. Prometheus metric
# calls will not---they will have route '/metrics'. This is
# the conventional organization.
app.register_blueprint(bp, url_prefix='/api/v1/datastore/')
if __name__ == '__main__':
if len(sys.argv) < 2:
logging.error("missing port arg 1")
sys.exit(-1)
p = int(sys.argv[1])
# Do not set debug=True---that will disable the Prometheus metrics
app.run(host='0.0.0.0', port=p, threaded=True) | db/app-tpl.py | import base64
import logging
import os
import sys
import urllib.parse
import uuid
# Installed packages
import boto3
from boto3.dynamodb.conditions import Key
from flask import Blueprint
from flask import Flask
from flask import request
from flask import Response
from prometheus_flask_exporter import PrometheusMetrics
import simplejson as json
# The application
app = Flask(__name__)
metrics = PrometheusMetrics(app)
metrics.info('app_info', 'Database process')
bp = Blueprint('app', __name__)
# default to us-east-1 if no region is specified
# (us-east-1 is the default/only supported region for a starter account)
region = os.getenv('AWS_REGION', 'us-east-1')
# these must be present; if they are missing, we should probably bail now
access_key = os.getenv('AWS_ACCESS_KEY_ID')
secret_access_key = os.getenv('AWS_SECRET_ACCESS_KEY')
# Must be presented to authorize call to `/load`
loader_token = os.getenv('SVC_LOADER_TOKEN')
# In some testing contexts, we pass in the DynamoDB URL
dynamodb_url = os.getenv('DYNAMODB_URL', '')
if dynamodb_url == '':
dynamodb = boto3.resource(
'dynamodb',
region_name=region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_access_key)
else:
# See
# https://stackoverflow.com/questions/31948742/localhost-endpoint-to-dynamodb-local-with-boto3
dynamodb = boto3.resource(
'dynamodb',
endpoint_url=dynamodb_url,
region_name=region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_access_key)
# Change the implementation of this: you should probably have a separate
# driver class for interfacing with a db like dynamodb in a different file.
@bp.route('/update', methods=['PUT'])
def update():
headers = request.headers # noqa: F841
# check header here
content = request.get_json()
objtype = urllib.parse.unquote_plus(request.args.get('objtype'))
objkey = urllib.parse.unquote_plus(request.args.get('objkey'))
table_name = objtype.capitalize()+"-ZZ-REG-ID"
table_id = objtype + "_id"
table = dynamodb.Table(table_name)
expression = 'SET '
x = 1
attrvals = {}
for k in content.keys():
expression += k + ' = :val' + str(x) + ', '
attrvals[':val' + str(x)] = content[k]
x += 1
expression = expression[:-2]
response = table.update_item(Key={table_id: objkey},
UpdateExpression=expression,
ExpressionAttributeValues=attrvals)
return response
@bp.route('/read', methods=['GET'])
def read():
headers = request.headers # noqa: F841
# check header here
objtype = urllib.parse.unquote_plus(request.args.get('objtype'))
objkey = urllib.parse.unquote_plus(request.args.get('objkey'))
table_name = objtype.capitalize()+"-ZZ-REG-ID"
table_id = objtype + "_id"
table = dynamodb.Table(table_name)
response = table.query(Select='ALL_ATTRIBUTES',
KeyConditionExpression=Key(table_id).eq(objkey))
return response
@bp.route('/write', methods=['POST'])
def write():
headers = request.headers # noqa: F841
# check header here
content = request.get_json()
table_name = content['objtype'].capitalize()+"-ZZ-REG-ID"
objtype = content['objtype']
table_id = objtype + "_id"
payload = {table_id: str(uuid.uuid4())}
del content['objtype']
for k in content.keys():
payload[k] = content[k]
table = dynamodb.Table(table_name)
response = table.put_item(Item=payload)
returnval = ''
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
returnval = {"message": "fail"}
return json.dumps(
({table_id: payload[table_id]}, returnval)['returnval' in globals()])
def decode_auth_token(token):
'''Given an auth token in Base64 encoding, return the original string'''
return base64.standard_b64decode(token).decode()
def load_auth(headers):
'''Return True if caller authorized to do a `/load` '''
global loader_token
if 'Authorization' not in headers:
return False
# Auth string is 'Basic ' concatenated with base64 encoding of uname:passwd
auth_string = headers['Authorization'].split()[1]
name, pwd = decode_auth_token(auth_string).split(':')
if name != 'svc-loader' or pwd != loader_token:
return False
return True
@bp.route('/load', methods=['POST'])
def load():
'''
Load a value into the database
This differs from write() in the following ways:
1. The caller must specify the UUID in `content`. http_status_code
400 is returned if this condition is not met.
2. The caller must include an "Authorization" header accepted
by load_auth(). A 401 status is returned for authorization failure.
3. If the database returns a non-200 status code, this routine
responds with an {http_status_code: status} object.
This routine potentially could share a common subroutine with
write() but the HTTP error processing in write() seems wrong
so this routine has its own code.
'''
headers = request.headers
if not load_auth(headers):
return Response(
json.dumps({"http_status_code": 401,
"reason": "Invalid authorization for /load"}),
status=401,
mimetype='application/json')
content = request.get_json()
if 'uuid' not in content:
return json.dumps({"http_status_code": 400, "reason": 'Missing uuid'})
table_name = content['objtype'].capitalize()+"-ZZ-REG-ID"
objtype = content['objtype']
table_id = objtype + "_id"
payload = {table_id: content['uuid']}
del content['objtype']
del content['uuid']
for k in content.keys():
payload[k] = content[k]
table = dynamodb.Table(table_name)
response = table.put_item(Item=payload)
status = response['ResponseMetadata']['HTTPStatusCode']
if status != 200:
return json.dumps({"http_status_code": status})
return json.dumps({table_id: payload[table_id]})
@bp.route('/delete', methods=['DELETE'])
def delete():
headers = request.headers # noqa: F841
# check header here
objtype = urllib.parse.unquote_plus(request.args.get('objtype'))
objkey = urllib.parse.unquote_plus(request.args.get('objkey'))
table_name = objtype.capitalize()+"-ZZ-REG-ID"
table_id = objtype + "_id"
table = dynamodb.Table(table_name)
response = table.delete_item(Key={table_id: objkey})
return response
@bp.route('/health')
@metrics.do_not_track()
def health():
return Response("", status=200, mimetype="application/json")
@bp.route('/readiness')
@metrics.do_not_track()
def readiness():
return Response("", status=200, mimetype="application/json")
# All database calls will have this prefix. Prometheus metric
# calls will not---they will have route '/metrics'. This is
# the conventional organization.
app.register_blueprint(bp, url_prefix='/api/v1/datastore/')
if __name__ == '__main__':
if len(sys.argv) < 2:
logging.error("missing port arg 1")
sys.exit(-1)
p = int(sys.argv[1])
# Do not set debug=True---that will disable the Prometheus metrics
app.run(host='0.0.0.0', port=p, threaded=True) | 0.384103 | 0.061848 |
from untils import until, Event
from Addon import Addon, middelware, addon_init
from Template import str_back
L = {'азербайджанский': 'az', 'малаялам': 'ml', 'албанский': 'sq', 'мальтийский': 'mt', 'амхарский': 'am',
'македонский': 'mk', 'английский': 'en', 'маори': 'mi', 'арабский': 'ar', 'маратхи': 'mr',
'армянский': 'hy', 'марийский': 'mhr', 'африкаанс': 'af', 'монгольский': 'mn', 'баскский': 'eu',
'немецкий': 'de', 'башкирский': 'ba', 'непальский': 'ne', 'белорусский': 'be', 'норвежский': 'no',
'бенгальский': 'bn', 'панджаби': 'pa', 'бирманский': 'my', 'папьяменто': 'pap', 'болгарский': 'bg',
'персидский': 'fa', 'боснийский': 'bs', 'польский': 'pl', 'валлийский': 'cy', 'португальский': 'pt',
'венгерский': 'hu', 'румынский': 'ro', 'вьетнамский': 'vi', 'русский': 'ru', 'гаитянский': 'ht',
'себуанский': 'ceb', 'галисийский': 'gl', 'сербский': 'sr', 'голландский': 'nl', 'сингальский': 'si',
'горномарийский': 'mrj', 'словацкий': 'sk', 'греческий': 'el', 'словенский': 'sl', 'грузинский': 'ka',
'суахили': 'sw', 'гуджарати': 'gu', 'сунданский': 'su', 'датский': 'da', 'таджикский': 'tg',
'иврит': 'he', 'тайский': 'th', 'идиш': 'yi', 'тагальский': 'tl', 'индонезийский': 'id',
'тамильский': 'ta', 'ирландский': 'ga', 'татарский': 'tt', 'итальянский': 'it', 'телугу': 'te',
'исландский': 'is', 'турецкий': 'tr', 'испанский': 'es', 'удмуртский': 'udm', 'казахский': 'kk',
'узбекский': 'uz', 'каннада': 'kn', 'украинский': 'uk', 'каталанский': 'ca', 'урду': 'ur',
'киргизский': 'ky', 'финский': 'fi', 'китайский': 'zh', 'французский': 'fr', 'корейский': 'ko',
'хинди': 'hi', 'коса': 'xh', 'хорватский': 'hr', 'кхмерский': 'km', 'чешский': 'cs', 'лаосский': 'lo',
'шведский': 'sv', 'латынь': 'la', 'шотландский': 'gd', 'латышский': 'lv', 'эстонский': 'et',
'литовский': 'lt', 'эсперанто': 'eo', 'люксембургский': 'lb', 'яванский': 'jv', 'малагасийский': 'mg',
'японский': 'ja', 'малайский': 'ms'}
keyb = ['!Все языки%b', str_back]
keyb2 = ['!Сменить язык%b', '!Быстрая команда%b', str_back]
NotWork = 0
Start = 1
ChangeLang = 2
@addon_init(["!ПЕРЕВОДЧИК", 'ПЕРЕВОД'], '🇬🇧', True, 3)
class Translator(Addon):
__slots__ = 'LANG', 'now_lang', 'first'
def __init__(self, username, user_id):
super(Translator, self).__init__(username, user_id)
self.lock = 1
self.LANG = self.state('LANG', 'en-ru')
self.now_lang = self.state('now_lang', ['Английского', 'Русский'])
self.first = self.state('first_translator', 1, return_value=0)
def get_lang_list(self):
return list(L.keys())
def state(self, key, value, update=False, return_value=-1):
return self.set_condition(self.user_id, key, value, update, return_value)
async def choise_lang(self, event: Event) -> Event:
message = event.text.lower().split()
if len(message) != 2:
return event.answer('Нужно указать 2 слова через пробел\n\nПример: японский английский')
m1, m2 = message[0], message[1]
a = L.get(m1, 0)
b = L.get(m2, 0)
if a == 0 or b == 0:
l = self.get_lang_list()
for i in l:
if i[0] == m1[0]:
if event.distance(i, m1) <= 2:
a = i
if i[0] == m2[0]:
if event.distance(i, m2) <= 2:
b = i
m1, m2 = a, b
a = L.get(a, 0)
b = L.get(b, 0)
if a == 0 or b == 0:
return event.answer(f'Видимо такого языка у меня нету в базе или ты '
f'{event.gender("допустил", "допустила")}'
f' ошибку...\n\nМожно узнать какие языки я '
f'поддерживаю командой !Все языки')
self.LANG = a + '-' + b
if self.step:
self.setstep(Start)
self.now_lang = [m1, m2]
self.state('now_lang', self.now_lang, update=True)
self.state('LANG', self.LANG, update=True)
if self.step:
return event.answer(f'Ты {event.gender("выбрал", "выбрала")} {m1} и {m2}')
else:
return event
async def get_translate(self, event: Event) -> Event:
return event.answer(f'Перевод:\n{await event.translate(event.text, self.LANG)}')
@middelware
async def mainapp(self, event: Event) -> Event:
if event.check('!ВСЕ ЯЗЫКИ', '/ВСЕ ЯЗЫКИ') and self.step >= 1:
a = ''
for i in list(L.keys()):
a += i + '\n'
return event.answer(a).keyboard(*keyb)
if event.check('!БЫСТРАЯ КОМАНДА'):
return event.answer('Доступна быстрая команда, пример:\n\n'
'?? [языки] [текст для перевода]\n'
'или если языки уже выбраны\n'
'?? [текст для перевода]'
).keyboard(*keyb2).attachment('photo-168691465_457250877')
if event.check('!СМЕНИТЬ ЯЗЫК', '/СМЕНИТЬ ЯЗЫК' '!OTHER LANGUAGE'):
self.setstep(ChangeLang)
return event.answer('С какого на какой язык перевести? '
'Напиши пару языков например:\n\n'
'русский английский').keyboard(*keyb)
if self.isstep(NotWork, Start):
if self.first == 0:
self.first = 1
return event.answer(f'{self.username}, я могу превести с {self.now_lang[0]} на '
f'{self.now_lang[1]}, что перевести?\n\n'
f'Сменить языки командой - !Сменить язык'
).keyboard(*keyb2)
else:
return event.answer(f'Выбраны языки:\n{self.now_lang[0]} - '
f'{self.now_lang[1]}\nЧто перевести?\n\n'
f'➡ !Сменить язык\n'
f'➡ !Быстрая команда'
).keyboard(*keyb2)
if self.isstep(Start):
event.keyboard(*keyb2)
if event.text:
return await self.get_translate(event)
else:
return event.answer('Нету текста для перевода')
if self.isstep(ChangeLang):
event.keyboard(*keyb)
return await self.choise_lang(event) | addons/translator/translator.py | from untils import until, Event
from Addon import Addon, middelware, addon_init
from Template import str_back
L = {'азербайджанский': 'az', 'малаялам': 'ml', 'албанский': 'sq', 'мальтийский': 'mt', 'амхарский': 'am',
'македонский': 'mk', 'английский': 'en', 'маори': 'mi', 'арабский': 'ar', 'маратхи': 'mr',
'армянский': 'hy', 'марийский': 'mhr', 'африкаанс': 'af', 'монгольский': 'mn', 'баскский': 'eu',
'немецкий': 'de', 'башкирский': 'ba', 'непальский': 'ne', 'белорусский': 'be', 'норвежский': 'no',
'бенгальский': 'bn', 'панджаби': 'pa', 'бирманский': 'my', 'папьяменто': 'pap', 'болгарский': 'bg',
'персидский': 'fa', 'боснийский': 'bs', 'польский': 'pl', 'валлийский': 'cy', 'португальский': 'pt',
'венгерский': 'hu', 'румынский': 'ro', 'вьетнамский': 'vi', 'русский': 'ru', 'гаитянский': 'ht',
'себуанский': 'ceb', 'галисийский': 'gl', 'сербский': 'sr', 'голландский': 'nl', 'сингальский': 'si',
'горномарийский': 'mrj', 'словацкий': 'sk', 'греческий': 'el', 'словенский': 'sl', 'грузинский': 'ka',
'суахили': 'sw', 'гуджарати': 'gu', 'сунданский': 'su', 'датский': 'da', 'таджикский': 'tg',
'иврит': 'he', 'тайский': 'th', 'идиш': 'yi', 'тагальский': 'tl', 'индонезийский': 'id',
'тамильский': 'ta', 'ирландский': 'ga', 'татарский': 'tt', 'итальянский': 'it', 'телугу': 'te',
'исландский': 'is', 'турецкий': 'tr', 'испанский': 'es', 'удмуртский': 'udm', 'казахский': 'kk',
'узбекский': 'uz', 'каннада': 'kn', 'украинский': 'uk', 'каталанский': 'ca', 'урду': 'ur',
'киргизский': 'ky', 'финский': 'fi', 'китайский': 'zh', 'французский': 'fr', 'корейский': 'ko',
'хинди': 'hi', 'коса': 'xh', 'хорватский': 'hr', 'кхмерский': 'km', 'чешский': 'cs', 'лаосский': 'lo',
'шведский': 'sv', 'латынь': 'la', 'шотландский': 'gd', 'латышский': 'lv', 'эстонский': 'et',
'литовский': 'lt', 'эсперанто': 'eo', 'люксембургский': 'lb', 'яванский': 'jv', 'малагасийский': 'mg',
'японский': 'ja', 'малайский': 'ms'}
keyb = ['!Все языки%b', str_back]
keyb2 = ['!Сменить язык%b', '!Быстрая команда%b', str_back]
NotWork = 0
Start = 1
ChangeLang = 2
@addon_init(["!ПЕРЕВОДЧИК", 'ПЕРЕВОД'], '🇬🇧', True, 3)
class Translator(Addon):
__slots__ = 'LANG', 'now_lang', 'first'
def __init__(self, username, user_id):
super(Translator, self).__init__(username, user_id)
self.lock = 1
self.LANG = self.state('LANG', 'en-ru')
self.now_lang = self.state('now_lang', ['Английского', 'Русский'])
self.first = self.state('first_translator', 1, return_value=0)
def get_lang_list(self):
return list(L.keys())
def state(self, key, value, update=False, return_value=-1):
return self.set_condition(self.user_id, key, value, update, return_value)
async def choise_lang(self, event: Event) -> Event:
message = event.text.lower().split()
if len(message) != 2:
return event.answer('Нужно указать 2 слова через пробел\n\nПример: японский английский')
m1, m2 = message[0], message[1]
a = L.get(m1, 0)
b = L.get(m2, 0)
if a == 0 or b == 0:
l = self.get_lang_list()
for i in l:
if i[0] == m1[0]:
if event.distance(i, m1) <= 2:
a = i
if i[0] == m2[0]:
if event.distance(i, m2) <= 2:
b = i
m1, m2 = a, b
a = L.get(a, 0)
b = L.get(b, 0)
if a == 0 or b == 0:
return event.answer(f'Видимо такого языка у меня нету в базе или ты '
f'{event.gender("допустил", "допустила")}'
f' ошибку...\n\nМожно узнать какие языки я '
f'поддерживаю командой !Все языки')
self.LANG = a + '-' + b
if self.step:
self.setstep(Start)
self.now_lang = [m1, m2]
self.state('now_lang', self.now_lang, update=True)
self.state('LANG', self.LANG, update=True)
if self.step:
return event.answer(f'Ты {event.gender("выбрал", "выбрала")} {m1} и {m2}')
else:
return event
async def get_translate(self, event: Event) -> Event:
return event.answer(f'Перевод:\n{await event.translate(event.text, self.LANG)}')
@middelware
async def mainapp(self, event: Event) -> Event:
if event.check('!ВСЕ ЯЗЫКИ', '/ВСЕ ЯЗЫКИ') and self.step >= 1:
a = ''
for i in list(L.keys()):
a += i + '\n'
return event.answer(a).keyboard(*keyb)
if event.check('!БЫСТРАЯ КОМАНДА'):
return event.answer('Доступна быстрая команда, пример:\n\n'
'?? [языки] [текст для перевода]\n'
'или если языки уже выбраны\n'
'?? [текст для перевода]'
).keyboard(*keyb2).attachment('photo-168691465_457250877')
if event.check('!СМЕНИТЬ ЯЗЫК', '/СМЕНИТЬ ЯЗЫК' '!OTHER LANGUAGE'):
self.setstep(ChangeLang)
return event.answer('С какого на какой язык перевести? '
'Напиши пару языков например:\n\n'
'русский английский').keyboard(*keyb)
if self.isstep(NotWork, Start):
if self.first == 0:
self.first = 1
return event.answer(f'{self.username}, я могу превести с {self.now_lang[0]} на '
f'{self.now_lang[1]}, что перевести?\n\n'
f'Сменить языки командой - !Сменить язык'
).keyboard(*keyb2)
else:
return event.answer(f'Выбраны языки:\n{self.now_lang[0]} - '
f'{self.now_lang[1]}\nЧто перевести?\n\n'
f'➡ !Сменить язык\n'
f'➡ !Быстрая команда'
).keyboard(*keyb2)
if self.isstep(Start):
event.keyboard(*keyb2)
if event.text:
return await self.get_translate(event)
else:
return event.answer('Нету текста для перевода')
if self.isstep(ChangeLang):
event.keyboard(*keyb)
return await self.choise_lang(event) | 0.169578 | 0.399929 |
import os
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
class TestRule4c:
def __init__(self):
self.g_backend = default_backend()
self.g_salt1 = b"12345678"
self.g_salt2 = bytes("12345678", "utf8")
def p_example1_hard_coded1(self, password, data):
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=b"12345678", iterations=1000,
backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example2_hard_coded2(self, password, data):
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=bytes("12345678", "utf8"), iterations=1000,
backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example3_local_variable1(self, password, data):
salt = b"12345678"
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=salt, iterations=1000, backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example4_local_variable2(self, password, data):
salt = bytes("12345678", "utf8")
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=salt, iterations=1000, backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example5_nested_local_variable1(self, password, data):
salt1 = b"12345678"
salt2 = salt1
salt3 = salt2
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=salt3, iterations=1000, backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example6_nested_local_variable2(self, password, data):
salt1 = bytes("12345678", "utf8")
salt2 = salt1
salt3 = salt2
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=salt3, iterations=1000, backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example_method_call(self, password, salt, data):
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=salt, iterations=1000, backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example_nested_method_call(self, password, salt, data):
return self.p_example_method_call(password, salt, data)
def p_example7_direct_method_call1(self, password, data):
salt = b"<PASSWORD>"
return self.p_example_method_call(password, salt, data)
def p_example8_direct_method_call2(self, password, data):
salt = bytes("12345678", "utf8")
return self.p_example_method_call(password, salt, data)
def p_example9_nested_method_call1(self, password, data):
salt = b"<PASSWORD>"
return self.p_example_nested_method_call(password, salt, data)
def p_example10_nested_method_call2(self, password, data):
salt = bytes("12345678", "utf8")
return self.p_example_nested_method_call(password, salt, data)
def p_example11_direct_g_variable_access1(self, password, data):
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=self.g_salt1, iterations=1000,
backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example12_direct_g_variable_access2(self, password, data):
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=self.g_salt2, iterations=1000,
backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example13_indirect_g_variable_access1(self, password, data):
salt = self.g_salt1
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=salt, iterations=1000, backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example14_indirect_g_variable_access2(self, password, data):
salt = self.g_salt2
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=salt, iterations=1000, backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example15_warning_parameter_not_resolvable(self, password, salt, data):
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=salt, iterations=1000, backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def n_example1_random_salt(self, password, data):
salt = os.urandom(8) # Recommended by the cryptography developers
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=salt, iterations=1000, backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text | resources/test_cases/python/cryptography/TestRule4c.py | import os
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
class TestRule4c:
def __init__(self):
self.g_backend = default_backend()
self.g_salt1 = b"12345678"
self.g_salt2 = bytes("12345678", "utf8")
def p_example1_hard_coded1(self, password, data):
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=b"12345678", iterations=1000,
backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example2_hard_coded2(self, password, data):
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=bytes("12345678", "utf8"), iterations=1000,
backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example3_local_variable1(self, password, data):
salt = b"12345678"
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=salt, iterations=1000, backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example4_local_variable2(self, password, data):
salt = bytes("12345678", "utf8")
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=salt, iterations=1000, backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example5_nested_local_variable1(self, password, data):
salt1 = b"12345678"
salt2 = salt1
salt3 = salt2
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=salt3, iterations=1000, backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example6_nested_local_variable2(self, password, data):
salt1 = bytes("12345678", "utf8")
salt2 = salt1
salt3 = salt2
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=salt3, iterations=1000, backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example_method_call(self, password, salt, data):
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=salt, iterations=1000, backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example_nested_method_call(self, password, salt, data):
return self.p_example_method_call(password, salt, data)
def p_example7_direct_method_call1(self, password, data):
salt = b"<PASSWORD>"
return self.p_example_method_call(password, salt, data)
def p_example8_direct_method_call2(self, password, data):
salt = bytes("12345678", "utf8")
return self.p_example_method_call(password, salt, data)
def p_example9_nested_method_call1(self, password, data):
salt = b"<PASSWORD>"
return self.p_example_nested_method_call(password, salt, data)
def p_example10_nested_method_call2(self, password, data):
salt = bytes("12345678", "utf8")
return self.p_example_nested_method_call(password, salt, data)
def p_example11_direct_g_variable_access1(self, password, data):
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=self.g_salt1, iterations=1000,
backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example12_direct_g_variable_access2(self, password, data):
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=self.g_salt2, iterations=1000,
backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example13_indirect_g_variable_access1(self, password, data):
salt = self.g_salt1
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=salt, iterations=1000, backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example14_indirect_g_variable_access2(self, password, data):
salt = self.g_salt2
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=salt, iterations=1000, backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example15_warning_parameter_not_resolvable(self, password, salt, data):
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=salt, iterations=1000, backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def n_example1_random_salt(self, password, data):
salt = os.urandom(8) # Recommended by the cryptography developers
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=salt, iterations=1000, backend=self.g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=self.g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text | 0.712932 | 0.232735 |
from __future__ import division
from __future__ import print_function
from builtins import range
import numpy as np
import os
import gzip
import argparse
try:
import pickle
except ImportError:
import cPickle as pickle
from singa import initializer
from singa import optimizer
from singa import device
from singa import tensor
def load_train_data(file_path):
f = gzip.open(file_path, 'rb')
train_set, valid_set, test_set = pickle.load(f)
traindata = train_set[0].astype(np.float32)
validdata = valid_set[0].astype(np.float32)
print(traindata.shape, validdata.shape)
return traindata, validdata
def train(data_file, use_gpu, num_epoch=10, batch_size=100):
print('Start intialization............')
lr = 0.1 # Learning rate
weight_decay = 0.0002
hdim = 1000
vdim = 784
tweight = tensor.Tensor((vdim, hdim))
tweight.gaussian(0.0, 0.1)
tvbias = tensor.from_numpy(np.zeros(vdim, dtype=np.float32))
thbias = tensor.from_numpy(np.zeros(hdim, dtype=np.float32))
opt = optimizer.SGD(momentum=0.5, weight_decay=weight_decay)
print('Loading data ..................')
train_x, valid_x = load_train_data(data_file)
if use_gpu:
dev = device.create_cuda_gpu()
else:
dev = device.get_default_device()
for t in [tweight, tvbias, thbias]:
t.to_device(dev)
num_train_batch = train_x.shape[0] // batch_size
print("num_train_batch = %d " % (num_train_batch))
for epoch in range(num_epoch):
trainerrorsum = 0.0
print('Epoch %d' % epoch)
for b in range(num_train_batch):
# positive phase
tdata = tensor.from_numpy(
train_x[(b * batch_size):((b + 1) * batch_size), :])
tdata.to_device(dev)
tposhidprob = tensor.mult(tdata, tweight)
tposhidprob.add_row(thbias)
tposhidprob = tensor.sigmoid(tposhidprob)
tposhidrandom = tensor.Tensor(tposhidprob.shape, dev)
tposhidrandom.uniform(0.0, 1.0)
tposhidsample = tensor.gt(tposhidprob, tposhidrandom)
# negative phase
tnegdata = tensor.mult(tposhidsample, tweight.T())
tnegdata.add_row(tvbias)
tnegdata = tensor.sigmoid(tnegdata)
tneghidprob = tensor.mult(tnegdata, tweight)
tneghidprob.add_row(thbias)
tneghidprob = tensor.sigmoid(tneghidprob)
error = tensor.sum(tensor.square((tdata - tnegdata)))
trainerrorsum = error + trainerrorsum
tgweight = tensor.mult(tnegdata.T(), tneghidprob) \
- tensor.mult(tdata.T(), tposhidprob)
tgvbias = tensor.sum(tnegdata, 0) - tensor.sum(tdata, 0)
tghbias = tensor.sum(tneghidprob, 0) - tensor.sum(tposhidprob, 0)
opt.apply_with_lr(epoch, lr / batch_size, tgweight, tweight, 'w')
opt.apply_with_lr(epoch, lr / batch_size, tgvbias, tvbias, 'vb')
opt.apply_with_lr(epoch, lr / batch_size, tghbias, thbias, 'hb')
print('training erroraverage = %f' %
(tensor.to_numpy(trainerrorsum) / train_x.shape[0]))
tvaliddata = tensor.from_numpy(valid_x)
tvaliddata.to_device(dev)
tvalidposhidprob = tensor.mult(tvaliddata, tweight)
tvalidposhidprob.add_row(thbias)
tvalidposhidprob = tensor.sigmoid(tvalidposhidprob)
tvalidposhidrandom = tensor.Tensor(tvalidposhidprob.shape, dev)
initializer.uniform(tvalidposhidrandom, 0.0, 1.0)
tvalidposhidsample = tensor.gt(tvalidposhidprob, tvalidposhidrandom)
tvalidnegdata = tensor.mult(tvalidposhidsample, tweight.T())
tvalidnegdata.add_row(tvbias)
tvalidnegdata = tensor.sigmoid(tvalidnegdata)
validerrorsum = tensor.sum(tensor.square((tvaliddata - tvalidnegdata)))
print('valid erroraverage = %f' %
(tensor.to_numpy(validerrorsum) / valid_x.shape[0]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train RBM over MNIST')
parser.add_argument('file', type=str, help='the dataset path')
parser.add_argument('--use_gpu', action='store_true')
args = parser.parse_args()
assert os.path.exists(args.file), 'Pls download the MNIST dataset from' \
'https://github.com/mnielsen/neural-networks-and-deep-learning/raw/master/data/mnist.pkl.gz'
train(args.file, args.use_gpu) | examples/mnist/train.py | from __future__ import division
from __future__ import print_function
from builtins import range
import numpy as np
import os
import gzip
import argparse
try:
import pickle
except ImportError:
import cPickle as pickle
from singa import initializer
from singa import optimizer
from singa import device
from singa import tensor
def load_train_data(file_path):
f = gzip.open(file_path, 'rb')
train_set, valid_set, test_set = pickle.load(f)
traindata = train_set[0].astype(np.float32)
validdata = valid_set[0].astype(np.float32)
print(traindata.shape, validdata.shape)
return traindata, validdata
def train(data_file, use_gpu, num_epoch=10, batch_size=100):
print('Start intialization............')
lr = 0.1 # Learning rate
weight_decay = 0.0002
hdim = 1000
vdim = 784
tweight = tensor.Tensor((vdim, hdim))
tweight.gaussian(0.0, 0.1)
tvbias = tensor.from_numpy(np.zeros(vdim, dtype=np.float32))
thbias = tensor.from_numpy(np.zeros(hdim, dtype=np.float32))
opt = optimizer.SGD(momentum=0.5, weight_decay=weight_decay)
print('Loading data ..................')
train_x, valid_x = load_train_data(data_file)
if use_gpu:
dev = device.create_cuda_gpu()
else:
dev = device.get_default_device()
for t in [tweight, tvbias, thbias]:
t.to_device(dev)
num_train_batch = train_x.shape[0] // batch_size
print("num_train_batch = %d " % (num_train_batch))
for epoch in range(num_epoch):
trainerrorsum = 0.0
print('Epoch %d' % epoch)
for b in range(num_train_batch):
# positive phase
tdata = tensor.from_numpy(
train_x[(b * batch_size):((b + 1) * batch_size), :])
tdata.to_device(dev)
tposhidprob = tensor.mult(tdata, tweight)
tposhidprob.add_row(thbias)
tposhidprob = tensor.sigmoid(tposhidprob)
tposhidrandom = tensor.Tensor(tposhidprob.shape, dev)
tposhidrandom.uniform(0.0, 1.0)
tposhidsample = tensor.gt(tposhidprob, tposhidrandom)
# negative phase
tnegdata = tensor.mult(tposhidsample, tweight.T())
tnegdata.add_row(tvbias)
tnegdata = tensor.sigmoid(tnegdata)
tneghidprob = tensor.mult(tnegdata, tweight)
tneghidprob.add_row(thbias)
tneghidprob = tensor.sigmoid(tneghidprob)
error = tensor.sum(tensor.square((tdata - tnegdata)))
trainerrorsum = error + trainerrorsum
tgweight = tensor.mult(tnegdata.T(), tneghidprob) \
- tensor.mult(tdata.T(), tposhidprob)
tgvbias = tensor.sum(tnegdata, 0) - tensor.sum(tdata, 0)
tghbias = tensor.sum(tneghidprob, 0) - tensor.sum(tposhidprob, 0)
opt.apply_with_lr(epoch, lr / batch_size, tgweight, tweight, 'w')
opt.apply_with_lr(epoch, lr / batch_size, tgvbias, tvbias, 'vb')
opt.apply_with_lr(epoch, lr / batch_size, tghbias, thbias, 'hb')
print('training erroraverage = %f' %
(tensor.to_numpy(trainerrorsum) / train_x.shape[0]))
tvaliddata = tensor.from_numpy(valid_x)
tvaliddata.to_device(dev)
tvalidposhidprob = tensor.mult(tvaliddata, tweight)
tvalidposhidprob.add_row(thbias)
tvalidposhidprob = tensor.sigmoid(tvalidposhidprob)
tvalidposhidrandom = tensor.Tensor(tvalidposhidprob.shape, dev)
initializer.uniform(tvalidposhidrandom, 0.0, 1.0)
tvalidposhidsample = tensor.gt(tvalidposhidprob, tvalidposhidrandom)
tvalidnegdata = tensor.mult(tvalidposhidsample, tweight.T())
tvalidnegdata.add_row(tvbias)
tvalidnegdata = tensor.sigmoid(tvalidnegdata)
validerrorsum = tensor.sum(tensor.square((tvaliddata - tvalidnegdata)))
print('valid erroraverage = %f' %
(tensor.to_numpy(validerrorsum) / valid_x.shape[0]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train RBM over MNIST')
parser.add_argument('file', type=str, help='the dataset path')
parser.add_argument('--use_gpu', action='store_true')
args = parser.parse_args()
assert os.path.exists(args.file), 'Pls download the MNIST dataset from' \
'https://github.com/mnielsen/neural-networks-and-deep-learning/raw/master/data/mnist.pkl.gz'
train(args.file, args.use_gpu) | 0.581065 | 0.315169 |
import pathlib
import pickle
import time
from flexcache import DiskCacheByHash
# These sleep time is needed when run on GitHub Actions
# If not given or too short, some mtime changes are not visible.
FS_SLEEP = 0.010
def parser(p: pathlib.Path):
return p.read_bytes()
def test_file_changed(tmp_path):
# Generate a definition file
dfile = tmp_path / "definitions.txt"
dfile.write_bytes(b"1234")
dc = DiskCacheByHash(tmp_path)
content = dc.load(dfile)[0]
assert len(tuple(tmp_path.glob("*.pickle"))) == 0
assert len(tuple(tmp_path.glob("*.json"))) == 0
time.sleep(FS_SLEEP)
# First, the cache should be missed
assert content is None
dc.save(pickle.dumps(dfile.read_bytes()), dfile)
# There should be a cache file now
assert len(tuple(tmp_path.glob("*.pickle"))) == 1
assert len(tuple(tmp_path.glob("*.json"))) == 1
content = pickle.loads(dc.load(dfile)[0])
# Now, the cache should be hit
assert content == b"1234"
# Modify the definition file
# Add some sleep to make sure that the time stamp difference is significant.
dfile.write_bytes(b"1235")
# Verify that the cache was not loaded as the content of the original file
# has changed.
assert dc.load(dfile)[0] is None
def test_cache_miss(tmp_path):
# Generate a definition file
dfile = tmp_path / "definitions.txt"
dfile.write_bytes(b"1234")
dc = DiskCacheByHash(tmp_path)
content = dc.load(dfile)[0]
assert len(tuple(tmp_path.glob("*.pickle"))) == 0
assert len(tuple(tmp_path.glob("*.json"))) == 0
time.sleep(FS_SLEEP)
# First, the cache should be missed
assert content is None
dc.save(pickle.dumps(dfile.read_bytes()), dfile)
# There should be a cache file now
assert len(tuple(tmp_path.glob("*.pickle"))) == 1
assert len(tuple(tmp_path.glob("*.json"))) == 1
content = pickle.loads(dc.load(dfile)[0])
# Now, the cache should be hit
assert content == b"1234"
# Modify the definition file
# Add some sleep to make sure that the time stamp difference is significant.
time.sleep(FS_SLEEP)
dfile.write_bytes(b"1235")
# Verify that the cached was not loaded
content = dc.load(dfile)[0]
assert content is None
def test_func(tmp_path):
# Generate a definition file
dfile = tmp_path / "definitions.txt"
dfile.write_bytes(b"1234")
dc = DiskCacheByHash(tmp_path)
assert dc.load(dfile, converter=parser)[0] == b"1234"
# There should be a cache file now
assert len(tuple(tmp_path.glob("*.pickle"))) == 1
assert len(tuple(tmp_path.glob("*.json"))) == 1
# Modify the definition file
# Add some sleep to make sure that the time stamp difference is significant.
dfile.write_bytes(b"1235")
# Verify that the cache was not loaded as the content of the original file
# has changed.
assert dc.load(dfile, converter=parser)[0] == b"1235"
# There should be TWO cache files now
assert len(tuple(tmp_path.glob("*.pickle"))) == 2
assert len(tuple(tmp_path.glob("*.json"))) == 2 | flexcache/testsuite/test_byhash.py | import pathlib
import pickle
import time
from flexcache import DiskCacheByHash
# These sleep time is needed when run on GitHub Actions
# If not given or too short, some mtime changes are not visible.
FS_SLEEP = 0.010
def parser(p: pathlib.Path):
return p.read_bytes()
def test_file_changed(tmp_path):
# Generate a definition file
dfile = tmp_path / "definitions.txt"
dfile.write_bytes(b"1234")
dc = DiskCacheByHash(tmp_path)
content = dc.load(dfile)[0]
assert len(tuple(tmp_path.glob("*.pickle"))) == 0
assert len(tuple(tmp_path.glob("*.json"))) == 0
time.sleep(FS_SLEEP)
# First, the cache should be missed
assert content is None
dc.save(pickle.dumps(dfile.read_bytes()), dfile)
# There should be a cache file now
assert len(tuple(tmp_path.glob("*.pickle"))) == 1
assert len(tuple(tmp_path.glob("*.json"))) == 1
content = pickle.loads(dc.load(dfile)[0])
# Now, the cache should be hit
assert content == b"1234"
# Modify the definition file
# Add some sleep to make sure that the time stamp difference is significant.
dfile.write_bytes(b"1235")
# Verify that the cache was not loaded as the content of the original file
# has changed.
assert dc.load(dfile)[0] is None
def test_cache_miss(tmp_path):
# Generate a definition file
dfile = tmp_path / "definitions.txt"
dfile.write_bytes(b"1234")
dc = DiskCacheByHash(tmp_path)
content = dc.load(dfile)[0]
assert len(tuple(tmp_path.glob("*.pickle"))) == 0
assert len(tuple(tmp_path.glob("*.json"))) == 0
time.sleep(FS_SLEEP)
# First, the cache should be missed
assert content is None
dc.save(pickle.dumps(dfile.read_bytes()), dfile)
# There should be a cache file now
assert len(tuple(tmp_path.glob("*.pickle"))) == 1
assert len(tuple(tmp_path.glob("*.json"))) == 1
content = pickle.loads(dc.load(dfile)[0])
# Now, the cache should be hit
assert content == b"1234"
# Modify the definition file
# Add some sleep to make sure that the time stamp difference is significant.
time.sleep(FS_SLEEP)
dfile.write_bytes(b"1235")
# Verify that the cached was not loaded
content = dc.load(dfile)[0]
assert content is None
def test_func(tmp_path):
# Generate a definition file
dfile = tmp_path / "definitions.txt"
dfile.write_bytes(b"1234")
dc = DiskCacheByHash(tmp_path)
assert dc.load(dfile, converter=parser)[0] == b"1234"
# There should be a cache file now
assert len(tuple(tmp_path.glob("*.pickle"))) == 1
assert len(tuple(tmp_path.glob("*.json"))) == 1
# Modify the definition file
# Add some sleep to make sure that the time stamp difference is significant.
dfile.write_bytes(b"1235")
# Verify that the cache was not loaded as the content of the original file
# has changed.
assert dc.load(dfile, converter=parser)[0] == b"1235"
# There should be TWO cache files now
assert len(tuple(tmp_path.glob("*.pickle"))) == 2
assert len(tuple(tmp_path.glob("*.json"))) == 2 | 0.398758 | 0.36591 |
import py
from pypy.lang.smalltalk import squeakimage
from pypy.lang.smalltalk.squeakimage import chrs2int
from pypy.lang.smalltalk import objspace
space = objspace.ObjSpace()
# ----- helpers ----------------------------------------------
def ints2str(*ints):
import struct
return struct.pack(">" + "i" * len(ints), *ints)
def joinbits(values, lengths):
result = 0
for each, length in reversed(zip(values, lengths)):
result = result << length
result += each
return result
def imagereader_mock(string):
import StringIO
f = StringIO.StringIO(string)
stream = squeakimage.Stream(f)
return squeakimage.ImageReader(space, stream)
# ----- tests ------------------------------------------------
def test_chrs2int():
assert 1 == chrs2int('\x00\x00\x00\x01')
assert -1 == chrs2int('\xFF\xFF\xFF\xFF')
def test_stream():
stream = imagereader_mock('\x00\x00\x19\x66').stream
n = stream.peek()
assert n == 6502
n = stream.next()
assert n == 6502
py.test.raises(IndexError, lambda: stream.next())
def test_stream_swap():
stream = imagereader_mock('\x66\x19\x00\x00').stream
stream.swap = True
first = stream.next()
assert first == 6502
py.test.raises(IndexError, lambda: stream.next())
def test_stream_many():
stream = imagereader_mock('\x00\x00\x19\x66' * 5).stream
for each in range(5):
first = stream.peek()
assert first == 6502
value = stream.next()
assert value == 6502
py.test.raises(IndexError, lambda: stream.next())
def test_stream_skipbytes():
stream = imagereader_mock('\xFF\xFF\xFF\x00\x00\x19\x66').stream
stream.skipbytes(3)
value = stream.next()
assert value == 6502
py.test.raises(IndexError, lambda: stream.next())
def test_stream_count():
stream = imagereader_mock('\xFF' * 20).stream
stream.next()
stream.next()
stream.reset_count()
assert stream.count == 0
stream.next()
assert stream.count == 4
stream.next()
assert stream.count == 8
def test_simple_joinbits():
assert 0x01010101 == joinbits(([1] * 4), [8,8,8,8])
assert 0xFfFfFfFf == joinbits([255] * 4, [8,8,8,8])
def test_fancy_joinbits():
assert 0x01020304 == joinbits([4,3,2,1], [8,8,8,8])
assert 0x3Ff == joinbits([1,3,7,15], [1,2,3,4])
def test_ints2str():
assert "\x00\x00\x00\x02" == ints2str(2)
assert '\x00\x00\x19\x66\x00\x00\x00\x02' == ints2str(6502,2)
def test_freeblock():
r = imagereader_mock("\x00\x00\x00\x02")
py.test.raises(squeakimage.CorruptImageError, lambda: r.read_object())
def test_1wordobjectheader():
s = ints2str(joinbits([3, 1, 2, 3, 4], [2,6,4,5,12]))
r = imagereader_mock(s)
assert (squeakimage.ImageChunk(space, 1, 2, 3, 4), 0) == r.read_1wordobjectheader()
def test_1wordobjectheader2():
s = ints2str(joinbits([3, 1, 2, 3, 4], [2,6,4,5,12]))
r = imagereader_mock(s * 3)
assert (squeakimage.ImageChunk(space, 1, 2, 3, 4), 0) == r.read_1wordobjectheader()
assert (squeakimage.ImageChunk(space, 1, 2, 3, 4), 4) == r.read_1wordobjectheader()
assert (squeakimage.ImageChunk(space, 1, 2, 3, 4), 8) == r.read_1wordobjectheader()
def test_2wordobjectheader():
s = ints2str(4200 + 1, joinbits([1, 1, 2, 3, 4], [2,6,4,5,12]))
r = imagereader_mock(s)
assert (squeakimage.ImageChunk(space, 1, 2, 4200, 4), 4) == r.read_2wordobjectheader()
def test_3wordobjectheader():
s = ints2str(1701 << 2, 4200 + 0, joinbits([0, 1, 2, 3, 4], [2,6,4,5,12]))
r = imagereader_mock(s)
assert (squeakimage.ImageChunk(space, 1701, 2, 4200, 4), 8) == r.read_3wordobjectheader()
def test_read3wordheaderobject():
size = 42
s = ints2str(size << 2, 4200 + 0, joinbits([0, 1, 2, 3, 4], [2,6,4,5,12]))
r = imagereader_mock(s + '\x00\x00\x19\x66' * (size - 1))
chunk, pos = r.read_object()
chunk0 = squeakimage.ImageChunk(space, size, 2, 4200, 4)
chunk0.data = [6502] * (size - 1)
assert pos == 8
assert chunk0 == chunk | pypy/lang/smalltalk/test/test_squeakimage.py | import py
from pypy.lang.smalltalk import squeakimage
from pypy.lang.smalltalk.squeakimage import chrs2int
from pypy.lang.smalltalk import objspace
space = objspace.ObjSpace()
# ----- helpers ----------------------------------------------
def ints2str(*ints):
import struct
return struct.pack(">" + "i" * len(ints), *ints)
def joinbits(values, lengths):
result = 0
for each, length in reversed(zip(values, lengths)):
result = result << length
result += each
return result
def imagereader_mock(string):
import StringIO
f = StringIO.StringIO(string)
stream = squeakimage.Stream(f)
return squeakimage.ImageReader(space, stream)
# ----- tests ------------------------------------------------
def test_chrs2int():
assert 1 == chrs2int('\x00\x00\x00\x01')
assert -1 == chrs2int('\xFF\xFF\xFF\xFF')
def test_stream():
stream = imagereader_mock('\x00\x00\x19\x66').stream
n = stream.peek()
assert n == 6502
n = stream.next()
assert n == 6502
py.test.raises(IndexError, lambda: stream.next())
def test_stream_swap():
stream = imagereader_mock('\x66\x19\x00\x00').stream
stream.swap = True
first = stream.next()
assert first == 6502
py.test.raises(IndexError, lambda: stream.next())
def test_stream_many():
stream = imagereader_mock('\x00\x00\x19\x66' * 5).stream
for each in range(5):
first = stream.peek()
assert first == 6502
value = stream.next()
assert value == 6502
py.test.raises(IndexError, lambda: stream.next())
def test_stream_skipbytes():
stream = imagereader_mock('\xFF\xFF\xFF\x00\x00\x19\x66').stream
stream.skipbytes(3)
value = stream.next()
assert value == 6502
py.test.raises(IndexError, lambda: stream.next())
def test_stream_count():
stream = imagereader_mock('\xFF' * 20).stream
stream.next()
stream.next()
stream.reset_count()
assert stream.count == 0
stream.next()
assert stream.count == 4
stream.next()
assert stream.count == 8
def test_simple_joinbits():
assert 0x01010101 == joinbits(([1] * 4), [8,8,8,8])
assert 0xFfFfFfFf == joinbits([255] * 4, [8,8,8,8])
def test_fancy_joinbits():
assert 0x01020304 == joinbits([4,3,2,1], [8,8,8,8])
assert 0x3Ff == joinbits([1,3,7,15], [1,2,3,4])
def test_ints2str():
assert "\x00\x00\x00\x02" == ints2str(2)
assert '\x00\x00\x19\x66\x00\x00\x00\x02' == ints2str(6502,2)
def test_freeblock():
r = imagereader_mock("\x00\x00\x00\x02")
py.test.raises(squeakimage.CorruptImageError, lambda: r.read_object())
def test_1wordobjectheader():
s = ints2str(joinbits([3, 1, 2, 3, 4], [2,6,4,5,12]))
r = imagereader_mock(s)
assert (squeakimage.ImageChunk(space, 1, 2, 3, 4), 0) == r.read_1wordobjectheader()
def test_1wordobjectheader2():
s = ints2str(joinbits([3, 1, 2, 3, 4], [2,6,4,5,12]))
r = imagereader_mock(s * 3)
assert (squeakimage.ImageChunk(space, 1, 2, 3, 4), 0) == r.read_1wordobjectheader()
assert (squeakimage.ImageChunk(space, 1, 2, 3, 4), 4) == r.read_1wordobjectheader()
assert (squeakimage.ImageChunk(space, 1, 2, 3, 4), 8) == r.read_1wordobjectheader()
def test_2wordobjectheader():
s = ints2str(4200 + 1, joinbits([1, 1, 2, 3, 4], [2,6,4,5,12]))
r = imagereader_mock(s)
assert (squeakimage.ImageChunk(space, 1, 2, 4200, 4), 4) == r.read_2wordobjectheader()
def test_3wordobjectheader():
s = ints2str(1701 << 2, 4200 + 0, joinbits([0, 1, 2, 3, 4], [2,6,4,5,12]))
r = imagereader_mock(s)
assert (squeakimage.ImageChunk(space, 1701, 2, 4200, 4), 8) == r.read_3wordobjectheader()
def test_read3wordheaderobject():
size = 42
s = ints2str(size << 2, 4200 + 0, joinbits([0, 1, 2, 3, 4], [2,6,4,5,12]))
r = imagereader_mock(s + '\x00\x00\x19\x66' * (size - 1))
chunk, pos = r.read_object()
chunk0 = squeakimage.ImageChunk(space, size, 2, 4200, 4)
chunk0.data = [6502] * (size - 1)
assert pos == 8
assert chunk0 == chunk | 0.382141 | 0.682529 |
import tensorflow as tf
from ...utils.utils import merge_two_last_dims, shape_list
class TimeReduction(tf.keras.layers.Layer):
def __init__(self, factor: int, name: str = "TimeReduction", **kwargs):
super(TimeReduction, self).__init__(name=name, **kwargs)
self.time_reduction_factor = factor
def padding(self, time):
new_time = tf.math.ceil(time / self.time_reduction_factor) * self.time_reduction_factor
return tf.cast(new_time, dtype=tf.int32) - time
def call(self, inputs, **kwargs):
shape = shape_list(inputs)
outputs = tf.pad(inputs, [[0, 0], [0, self.padding(shape[1])], [0, 0]])
outputs = tf.reshape(outputs, [shape[0], -1, shape[-1] * self.time_reduction_factor])
return outputs
def get_config(self):
config = super(TimeReduction, self).get_config()
config.update({"factor": self.time_reduction_factor})
return config
class VggSubsampling(tf.keras.layers.Layer):
def __init__(self,
filters: tuple or list = (32, 64),
kernel_size: int or list or tuple = 3,
strides: int or list or tuple = 2,
kernel_regularizer=None,
bias_regularizer=None,
name="VggSubsampling",
**kwargs):
super(VggSubsampling, self).__init__(name=name, **kwargs)
self.conv1 = tf.keras.layers.Conv2D(
filters=filters[0], kernel_size=kernel_size, strides=1,
padding="same", name=f"{name}_conv_1",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer
)
self.conv2 = tf.keras.layers.Conv2D(
filters=filters[0], kernel_size=kernel_size, strides=1,
padding="same", name=f"{name}_conv_2",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer
)
self.maxpool1 = tf.keras.layers.MaxPool2D(
pool_size=strides,
padding="same", name=f"{name}_maxpool_1"
)
self.conv3 = tf.keras.layers.Conv2D(
filters=filters[1], kernel_size=kernel_size, strides=1,
padding="same", name=f"{name}_conv_3",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer
)
self.conv4 = tf.keras.layers.Conv2D(
filters=filters[1], kernel_size=kernel_size, strides=1,
padding="same", name=f"{name}_conv_4",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer
)
self.maxpool2 = tf.keras.layers.MaxPool2D(
pool_size=strides,
padding="same", name=f"{name}_maxpool_2"
)
self.time_reduction_factor = self.maxpool1.pool_size[0] + self.maxpool2.pool_size[0]
def call(self, inputs, training=False, **kwargs):
outputs = self.conv1(inputs, training=training)
outputs = tf.nn.relu(outputs)
outputs = self.conv2(outputs, training=training)
outputs = tf.nn.relu(outputs)
outputs = self.maxpool1(outputs, training=training)
outputs = self.conv3(outputs, training=training)
outputs = tf.nn.relu(outputs)
outputs = self.conv4(outputs, training=training)
outputs = tf.nn.relu(outputs)
outputs = self.maxpool2(outputs, training=training)
return merge_two_last_dims(outputs)
def get_config(self):
conf = super(VggSubsampling, self).get_config()
conf.update(self.conv1.get_config())
conf.update(self.conv2.get_config())
conf.update(self.maxpool1.get_config())
conf.update(self.conv3.get_config())
conf.update(self.conv4.get_config())
conf.update(self.maxpool2.get_config())
return conf
class Conv2dSubsampling(tf.keras.layers.Layer):
def __init__(self,
filters: int,
strides: list or tuple or int = 2,
kernel_size: int or list or tuple = 3,
kernel_regularizer=None,
bias_regularizer=None,
name="Conv2dSubsampling",
**kwargs):
super(Conv2dSubsampling, self).__init__(name=name, **kwargs)
self.conv1 = tf.keras.layers.Conv2D(
filters=filters, kernel_size=kernel_size,
strides=strides, padding="same", name=f"{name}_1",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer
)
self.conv2 = tf.keras.layers.Conv2D(
filters=filters, kernel_size=kernel_size,
strides=strides, padding="same", name=f"{name}_2",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer
)
self.time_reduction_factor = self.conv1.strides[0] + self.conv2.strides[0]
def call(self, inputs, training=False, **kwargs):
outputs = self.conv1(inputs, training=training)
outputs = tf.nn.relu(outputs)
outputs = self.conv2(outputs, training=training)
outputs = tf.nn.relu(outputs)
return merge_two_last_dims(outputs)
def get_config(self):
conf = super(Conv2dSubsampling, self).get_config()
conf.update(self.conv1.get_config())
conf.update(self.conv2.get_config())
return conf | tensorflow_asr/models/layers/subsampling.py |
import tensorflow as tf
from ...utils.utils import merge_two_last_dims, shape_list
class TimeReduction(tf.keras.layers.Layer):
def __init__(self, factor: int, name: str = "TimeReduction", **kwargs):
super(TimeReduction, self).__init__(name=name, **kwargs)
self.time_reduction_factor = factor
def padding(self, time):
new_time = tf.math.ceil(time / self.time_reduction_factor) * self.time_reduction_factor
return tf.cast(new_time, dtype=tf.int32) - time
def call(self, inputs, **kwargs):
shape = shape_list(inputs)
outputs = tf.pad(inputs, [[0, 0], [0, self.padding(shape[1])], [0, 0]])
outputs = tf.reshape(outputs, [shape[0], -1, shape[-1] * self.time_reduction_factor])
return outputs
def get_config(self):
config = super(TimeReduction, self).get_config()
config.update({"factor": self.time_reduction_factor})
return config
class VggSubsampling(tf.keras.layers.Layer):
def __init__(self,
filters: tuple or list = (32, 64),
kernel_size: int or list or tuple = 3,
strides: int or list or tuple = 2,
kernel_regularizer=None,
bias_regularizer=None,
name="VggSubsampling",
**kwargs):
super(VggSubsampling, self).__init__(name=name, **kwargs)
self.conv1 = tf.keras.layers.Conv2D(
filters=filters[0], kernel_size=kernel_size, strides=1,
padding="same", name=f"{name}_conv_1",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer
)
self.conv2 = tf.keras.layers.Conv2D(
filters=filters[0], kernel_size=kernel_size, strides=1,
padding="same", name=f"{name}_conv_2",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer
)
self.maxpool1 = tf.keras.layers.MaxPool2D(
pool_size=strides,
padding="same", name=f"{name}_maxpool_1"
)
self.conv3 = tf.keras.layers.Conv2D(
filters=filters[1], kernel_size=kernel_size, strides=1,
padding="same", name=f"{name}_conv_3",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer
)
self.conv4 = tf.keras.layers.Conv2D(
filters=filters[1], kernel_size=kernel_size, strides=1,
padding="same", name=f"{name}_conv_4",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer
)
self.maxpool2 = tf.keras.layers.MaxPool2D(
pool_size=strides,
padding="same", name=f"{name}_maxpool_2"
)
self.time_reduction_factor = self.maxpool1.pool_size[0] + self.maxpool2.pool_size[0]
def call(self, inputs, training=False, **kwargs):
outputs = self.conv1(inputs, training=training)
outputs = tf.nn.relu(outputs)
outputs = self.conv2(outputs, training=training)
outputs = tf.nn.relu(outputs)
outputs = self.maxpool1(outputs, training=training)
outputs = self.conv3(outputs, training=training)
outputs = tf.nn.relu(outputs)
outputs = self.conv4(outputs, training=training)
outputs = tf.nn.relu(outputs)
outputs = self.maxpool2(outputs, training=training)
return merge_two_last_dims(outputs)
def get_config(self):
conf = super(VggSubsampling, self).get_config()
conf.update(self.conv1.get_config())
conf.update(self.conv2.get_config())
conf.update(self.maxpool1.get_config())
conf.update(self.conv3.get_config())
conf.update(self.conv4.get_config())
conf.update(self.maxpool2.get_config())
return conf
class Conv2dSubsampling(tf.keras.layers.Layer):
def __init__(self,
filters: int,
strides: list or tuple or int = 2,
kernel_size: int or list or tuple = 3,
kernel_regularizer=None,
bias_regularizer=None,
name="Conv2dSubsampling",
**kwargs):
super(Conv2dSubsampling, self).__init__(name=name, **kwargs)
self.conv1 = tf.keras.layers.Conv2D(
filters=filters, kernel_size=kernel_size,
strides=strides, padding="same", name=f"{name}_1",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer
)
self.conv2 = tf.keras.layers.Conv2D(
filters=filters, kernel_size=kernel_size,
strides=strides, padding="same", name=f"{name}_2",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer
)
self.time_reduction_factor = self.conv1.strides[0] + self.conv2.strides[0]
def call(self, inputs, training=False, **kwargs):
outputs = self.conv1(inputs, training=training)
outputs = tf.nn.relu(outputs)
outputs = self.conv2(outputs, training=training)
outputs = tf.nn.relu(outputs)
return merge_two_last_dims(outputs)
def get_config(self):
conf = super(Conv2dSubsampling, self).get_config()
conf.update(self.conv1.get_config())
conf.update(self.conv2.get_config())
return conf | 0.953264 | 0.468608 |
from qtpy.QtWidgets import QProgressBar, QVBoxLayout, QHBoxLayout, QLabel, QSlider, QWidget, QSpacerItem, QSizePolicy
from qtpy import QtCore
import pyqtgraph as pg
import numpy as np
from __code._utilities.parent import Parent
from __code.radial_profile.event_handler import EventHandler
class Initialization(Parent):
def pyqtgraph(self):
self.parent.ui.image_view = pg.ImageView(view=pg.PlotItem())
self.parent.ui.image_view.ui.roiBtn.hide()
self.parent.ui.image_view.ui.menuBtn.hide()
bottom_layout = QHBoxLayout()
# file index slider
label_1 = QLabel("File Index")
self.parent.ui.slider = QSlider(QtCore.Qt.Horizontal)
self.parent.ui.slider.setMaximum(len(self.parent.list_images) - 1)
self.parent.ui.slider.setMinimum(0)
self.parent.ui.slider.valueChanged.connect(self.parent.file_index_changed)
# spacer
spacer = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
bottom_layout.addWidget(label_1)
bottom_layout.addWidget(self.parent.ui.slider)
bottom_layout.addItem(spacer)
bottom_widget = QWidget()
bottom_widget.setLayout(bottom_layout)
vertical_layout = QVBoxLayout()
vertical_layout.addWidget(self.parent.ui.image_view)
vertical_layout.addWidget(bottom_widget)
self.parent.ui.widget.setLayout(vertical_layout)
# profile
self.parent.ui.profile_plot = pg.PlotWidget()
vertical_layout = QVBoxLayout()
vertical_layout.addWidget(self.parent.ui.profile_plot)
self.parent.ui.widget_profile.setLayout(vertical_layout)
def crosshair(self):
x0 = float(str(self.parent.ui.circle_x.text()))
y0 = float(str(self.parent.ui.circle_y.text()))
self.parent.vLine = pg.InfiniteLine(pos=x0, angle=90, movable=True)
self.parent.hLine = pg.InfiniteLine(pos=y0, angle=0, movable=True)
self.parent.vLine.sigDragged.connect(self.parent.manual_circle_center_changed)
self.parent.hLine.sigDragged.connect(self.parent.manual_circle_center_changed)
self.parent.ui.image_view.addItem(self.parent.vLine, ignoreBounds=False)
self.parent.ui.image_view.addItem(self.parent.hLine, ignoreBounds=False)
def widgets(self):
# self.parent.ui.circle_y.setText(str(np.int(self.parent.width / 2)))
self.parent.ui.circle_y.setText(str(600))
self.parent.ui.circle_x.setText(str(np.int(self.parent.height / 2)))
# self.parent.ui.lineEdit.setText(str(self.parent.grid_size))
self.parent.ui.guide_red_slider.setValue(self.parent.guide_color_slider['red'])
self.parent.ui.guide_green_slider.setValue(self.parent.guide_color_slider['green'])
self.parent.ui.guide_blue_slider.setValue(self.parent.guide_color_slider['blue'])
self.parent.ui.guide_alpha_slider.setValue(self.parent.guide_color_slider['alpha'])
self.parent.ui.sector_from_value.setText(str(self.parent.sector_range['from']))
self.parent.ui.sector_to_value.setText(str(self.parent.sector_range['to']))
self.parent.ui.sector_from_units.setText(u"\u00B0")
self.parent.ui.sector_to_units.setText(u"\u00B0")
self.parent.ui.from_angle_slider.setValue(self.parent.sector_range['from'])
self.parent.ui.to_angle_slider.setValue(self.parent.sector_range['to'])
self.parent.sector_radio_button_changed()
# defines the maximum value of the radius slider
o_event = EventHandler(parent=self.parent)
max_radius = o_event.retrieve_max_radius_possible()
self.parent.ui.max_radius_slider.setMaximum(max_radius)
self.parent.ui.max_radius_slider.setValue(np.int(max_radius/2))
def statusbar(self):
self.parent.eventProgress = QProgressBar(self.parent.ui.statusbar)
self.parent.eventProgress.setMinimumSize(20, 14)
self.parent.eventProgress.setMaximumSize(540, 100)
self.parent.eventProgress.setVisible(False)
self.parent.ui.statusbar.addPermanentWidget(self.parent.eventProgress) | notebooks/__code/radial_profile/initialization.py | from qtpy.QtWidgets import QProgressBar, QVBoxLayout, QHBoxLayout, QLabel, QSlider, QWidget, QSpacerItem, QSizePolicy
from qtpy import QtCore
import pyqtgraph as pg
import numpy as np
from __code._utilities.parent import Parent
from __code.radial_profile.event_handler import EventHandler
class Initialization(Parent):
def pyqtgraph(self):
self.parent.ui.image_view = pg.ImageView(view=pg.PlotItem())
self.parent.ui.image_view.ui.roiBtn.hide()
self.parent.ui.image_view.ui.menuBtn.hide()
bottom_layout = QHBoxLayout()
# file index slider
label_1 = QLabel("File Index")
self.parent.ui.slider = QSlider(QtCore.Qt.Horizontal)
self.parent.ui.slider.setMaximum(len(self.parent.list_images) - 1)
self.parent.ui.slider.setMinimum(0)
self.parent.ui.slider.valueChanged.connect(self.parent.file_index_changed)
# spacer
spacer = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
bottom_layout.addWidget(label_1)
bottom_layout.addWidget(self.parent.ui.slider)
bottom_layout.addItem(spacer)
bottom_widget = QWidget()
bottom_widget.setLayout(bottom_layout)
vertical_layout = QVBoxLayout()
vertical_layout.addWidget(self.parent.ui.image_view)
vertical_layout.addWidget(bottom_widget)
self.parent.ui.widget.setLayout(vertical_layout)
# profile
self.parent.ui.profile_plot = pg.PlotWidget()
vertical_layout = QVBoxLayout()
vertical_layout.addWidget(self.parent.ui.profile_plot)
self.parent.ui.widget_profile.setLayout(vertical_layout)
def crosshair(self):
x0 = float(str(self.parent.ui.circle_x.text()))
y0 = float(str(self.parent.ui.circle_y.text()))
self.parent.vLine = pg.InfiniteLine(pos=x0, angle=90, movable=True)
self.parent.hLine = pg.InfiniteLine(pos=y0, angle=0, movable=True)
self.parent.vLine.sigDragged.connect(self.parent.manual_circle_center_changed)
self.parent.hLine.sigDragged.connect(self.parent.manual_circle_center_changed)
self.parent.ui.image_view.addItem(self.parent.vLine, ignoreBounds=False)
self.parent.ui.image_view.addItem(self.parent.hLine, ignoreBounds=False)
def widgets(self):
# self.parent.ui.circle_y.setText(str(np.int(self.parent.width / 2)))
self.parent.ui.circle_y.setText(str(600))
self.parent.ui.circle_x.setText(str(np.int(self.parent.height / 2)))
# self.parent.ui.lineEdit.setText(str(self.parent.grid_size))
self.parent.ui.guide_red_slider.setValue(self.parent.guide_color_slider['red'])
self.parent.ui.guide_green_slider.setValue(self.parent.guide_color_slider['green'])
self.parent.ui.guide_blue_slider.setValue(self.parent.guide_color_slider['blue'])
self.parent.ui.guide_alpha_slider.setValue(self.parent.guide_color_slider['alpha'])
self.parent.ui.sector_from_value.setText(str(self.parent.sector_range['from']))
self.parent.ui.sector_to_value.setText(str(self.parent.sector_range['to']))
self.parent.ui.sector_from_units.setText(u"\u00B0")
self.parent.ui.sector_to_units.setText(u"\u00B0")
self.parent.ui.from_angle_slider.setValue(self.parent.sector_range['from'])
self.parent.ui.to_angle_slider.setValue(self.parent.sector_range['to'])
self.parent.sector_radio_button_changed()
# defines the maximum value of the radius slider
o_event = EventHandler(parent=self.parent)
max_radius = o_event.retrieve_max_radius_possible()
self.parent.ui.max_radius_slider.setMaximum(max_radius)
self.parent.ui.max_radius_slider.setValue(np.int(max_radius/2))
def statusbar(self):
self.parent.eventProgress = QProgressBar(self.parent.ui.statusbar)
self.parent.eventProgress.setMinimumSize(20, 14)
self.parent.eventProgress.setMaximumSize(540, 100)
self.parent.eventProgress.setVisible(False)
self.parent.ui.statusbar.addPermanentWidget(self.parent.eventProgress) | 0.530236 | 0.192084 |
import tensorflow.compat.v1 as tf
from model_pruning.python import pruning_hook
class MockPruningObject(object):
"""Mock Pruning Object that has a run_update_step() function."""
def __init__(self):
self.logged_steps = []
def run_update_step(self, session, global_step): # pylint: disable=unused-argument
self.logged_steps.append(global_step)
class PruningHookTest(tf.test.TestCase):
def test_prune_after_session_creation(self):
every_steps = 10
pruning_obj = MockPruningObject()
listener = pruning_hook.ModelPruningListener(pruning_obj)
hook = pruning_hook.ModelPruningHook(every_steps=every_steps,
listeners=[listener])
mon_sess = tf.train.MonitoredSession(hooks=[hook]) # pylint: disable=unused-variable.
self.evaluate(tf.global_variables_initializer())
self.assertEqual(len(pruning_obj.logged_steps), 1)
self.assertEqual(pruning_obj.logged_steps[0], 0)
def test_prune_every_n_steps(self):
every_steps = 10
pruning_obj = MockPruningObject()
with tf.Graph().as_default():
listener = pruning_hook.ModelPruningListener(pruning_obj)
hook = pruning_hook.ModelPruningHook(every_steps=every_steps,
listeners=[listener])
global_step = tf.train.get_or_create_global_step()
train_op = tf.constant(0)
global_step_increment_op = tf.assign_add(global_step, 1)
with tf.train.MonitoredSession(tf.train.ChiefSessionCreator(),
hooks=[hook]) as mon_sess:
mon_sess.run(tf.global_variables_initializer())
mon_sess.run(train_op)
mon_sess.run(global_step_increment_op)
# ModelPruningHook runs once after session creation, at step 0.
self.assertEqual(len(pruning_obj.logged_steps), 1)
self.assertEqual(pruning_obj.logged_steps[0], 0)
for _ in range(every_steps-1):
mon_sess.run(train_op)
mon_sess.run(global_step_increment_op)
self.assertEqual(len(pruning_obj.logged_steps), 2)
self.assertSameElements(pruning_obj.logged_steps, [0, every_steps])
for _ in range(every_steps-1):
mon_sess.run(train_op)
mon_sess.run(global_step_increment_op)
self.assertEqual(len(pruning_obj.logged_steps), 2)
self.assertSameElements(pruning_obj.logged_steps, [0, every_steps])
if __name__ == '__main__':
tf.test.main() | model_pruning/python/pruning_hook_test.py | import tensorflow.compat.v1 as tf
from model_pruning.python import pruning_hook
class MockPruningObject(object):
"""Mock Pruning Object that has a run_update_step() function."""
def __init__(self):
self.logged_steps = []
def run_update_step(self, session, global_step): # pylint: disable=unused-argument
self.logged_steps.append(global_step)
class PruningHookTest(tf.test.TestCase):
def test_prune_after_session_creation(self):
every_steps = 10
pruning_obj = MockPruningObject()
listener = pruning_hook.ModelPruningListener(pruning_obj)
hook = pruning_hook.ModelPruningHook(every_steps=every_steps,
listeners=[listener])
mon_sess = tf.train.MonitoredSession(hooks=[hook]) # pylint: disable=unused-variable.
self.evaluate(tf.global_variables_initializer())
self.assertEqual(len(pruning_obj.logged_steps), 1)
self.assertEqual(pruning_obj.logged_steps[0], 0)
def test_prune_every_n_steps(self):
every_steps = 10
pruning_obj = MockPruningObject()
with tf.Graph().as_default():
listener = pruning_hook.ModelPruningListener(pruning_obj)
hook = pruning_hook.ModelPruningHook(every_steps=every_steps,
listeners=[listener])
global_step = tf.train.get_or_create_global_step()
train_op = tf.constant(0)
global_step_increment_op = tf.assign_add(global_step, 1)
with tf.train.MonitoredSession(tf.train.ChiefSessionCreator(),
hooks=[hook]) as mon_sess:
mon_sess.run(tf.global_variables_initializer())
mon_sess.run(train_op)
mon_sess.run(global_step_increment_op)
# ModelPruningHook runs once after session creation, at step 0.
self.assertEqual(len(pruning_obj.logged_steps), 1)
self.assertEqual(pruning_obj.logged_steps[0], 0)
for _ in range(every_steps-1):
mon_sess.run(train_op)
mon_sess.run(global_step_increment_op)
self.assertEqual(len(pruning_obj.logged_steps), 2)
self.assertSameElements(pruning_obj.logged_steps, [0, every_steps])
for _ in range(every_steps-1):
mon_sess.run(train_op)
mon_sess.run(global_step_increment_op)
self.assertEqual(len(pruning_obj.logged_steps), 2)
self.assertSameElements(pruning_obj.logged_steps, [0, every_steps])
if __name__ == '__main__':
tf.test.main() | 0.803058 | 0.401864 |
import errno
from abc import abstractmethod
import os
import struct
from twitter.common import log
from twitter.common.lang import Compatibility, Interface
from .filelike import FileLike
class RecordIO(object):
class Error(Exception): pass
class PrematureEndOfStream(Error): pass
class RecordSizeExceeded(Error): pass
class InvalidTypeException(Error): pass
class InvalidFileHandle(Error): pass
class InvalidArgument(Error): pass
class InvalidCodec(Error): pass
RECORD_HEADER_SIZE = 4
MAXIMUM_RECORD_SIZE = 64 * 1024 * 1024
class Codec(Interface):
"""
An encoder/decoder interface for bespoke RecordReader/Writers.
"""
@abstractmethod
def encode(self, blob):
"""
Given: blob in custom format
Return: serialized byte data
Raises: InvalidTypeException if a bad blob type is supplied
"""
@abstractmethod
def decode(self, blob):
"""
Given: deserialized byte data
Return: blob in custom format
Raises: InvalidTypeException if a bad blob type is supplied
"""
class _Stream(object):
"""
Shared initialization functionality for Reader/Writer
"""
def __init__(self, fp, codec):
try:
self._fp = FileLike.get(fp)
except ValueError as err:
raise RecordIO.InvalidFileHandle(err)
if not isinstance(codec, RecordIO.Codec):
raise RecordIO.InvalidCodec("Codec must be subclass of RecordIO.Codec")
self._codec = codec
def close(self):
"""
Close the underlying filehandle of the RecordIO stream.
"""
self._fp.close()
class Reader(_Stream):
def __init__(self, fp, codec):
"""
Initialize a Reader from file-like fp, with RecordIO.Codec codec
"""
RecordIO._Stream.__init__(self, fp, codec)
if ('w' in self._fp.mode or 'a' in self._fp.mode) and '+' not in self._fp.mode:
raise RecordIO.InvalidFileHandle(
'Filehandle supplied to RecordReader does not appear to be readable!')
def __iter__(self):
"""
Return an iterator over the entire contents of the underlying file handle.
May raise:
RecordIO.Error or subclasses
"""
try:
dup_fp = self._fp.dup()
except self._fp.Error:
log.error('Failed to dup %r' % self._fp)
return
try:
while True:
blob = RecordIO.Reader.do_read(dup_fp, self._codec)
if blob:
yield blob
else:
break
finally:
dup_fp.close()
@staticmethod
def do_read(fp, decoder):
"""
Read a single record from the given filehandle and decode using the supplied decoder.
May raise:
RecordIO.PrematureEndOfStream if the stream is truncated in the middle of
an expected message
RecordIO.RecordSizeExceeded if the message exceeds RecordIO.MAXIMUM_RECORD_SIZE
"""
# read header
header = fp.read(RecordIO.RECORD_HEADER_SIZE)
if len(header) == 0:
log.debug("%s has no data (current offset = %d)" % (fp.name, fp.tell()))
# Reset EOF (appears to be only necessary on OS X)
fp.seek(fp.tell())
return None
elif len(header) != RecordIO.RECORD_HEADER_SIZE:
raise RecordIO.PrematureEndOfStream(
"Expected %d bytes in header, got %d" % (RecordIO.RECORD_HEADER_SIZE, len(header)))
blob_len = struct.unpack('>L', header)[0]
if blob_len > RecordIO.MAXIMUM_RECORD_SIZE:
raise RecordIO.RecordSizeExceeded("Record exceeds maximum allowable size")
# read frame
read_blob = fp.read(blob_len)
if len(read_blob) != blob_len:
raise RecordIO.PrematureEndOfStream(
'Expected %d bytes in frame, got %d' % (blob_len, len(read_blob)))
return decoder.decode(read_blob)
def read(self):
"""
Read a single record from this stream. Updates the file position on both
success and failure (unless no data is available, in which case the file
position is unchanged and None is returned.)
Returns string blob or None if no data available.
May raise:
RecordIO.PrematureEndOfStream if the stream is truncated in the middle of
an expected message
RecordIO.RecordSizeExceeded if the message exceeds RecordIO.MAXIMUM_RECORD_SIZE
"""
return RecordIO.Reader.do_read(self._fp, self._codec)
def try_read(self):
"""
Attempt to read a single record from the stream. Only updates the file position
if a read was successful.
Returns string blob or None if no data available.
May raise:
RecordIO.RecordSizeExceeded
"""
pos = self._fp.tell()
try:
return self.read()
except RecordIO.PrematureEndOfStream as e:
log.debug('Got premature end of stream [%s], skipping - %s' % (self._fp.name, e))
self._fp.seek(pos)
return None
class Writer(_Stream):
def __init__(self, fp, codec, sync=False):
"""
Initialize a Writer from the FileLike fp, with RecordIO.Codec codec.
If sync=True is supplied, then all mutations are fsynced after write, otherwise
standard filesystem buffering is employed.
"""
RecordIO._Stream.__init__(self, fp, codec)
if 'w' not in self._fp.mode and 'a' not in self._fp.mode and '+' not in self._fp.mode:
raise RecordIO.InvalidFileHandle(
'Filehandle supplied to RecordWriter does not appear to be writeable!')
self.set_sync(sync)
def set_sync(self, value):
self._sync = bool(value)
@staticmethod
def do_write(fp, record, codec, sync=False):
"""
Write a record to the specified fp using the supplied codec.
Returns True on success, False on any filesystem failure.
"""
blob = codec.encode(record)
header = struct.pack(">L", len(blob))
try:
fp.write(header)
fp.write(blob)
except (IOError, OSError) as e:
log.debug("Got exception in write(%s): %s" % (fp.name, e))
return False
if sync:
fp.flush()
return True
@staticmethod
def append(filename, record, codec):
"""
Given a filename stored in RecordIO format, open the file, append a
record to it and close.
Returns True if it succeeds, or False if it fails for any reason.
Raises IOError, OSError if there is a problem opening filename for appending.
"""
if not isinstance(codec, RecordIO.Codec):
raise RecordIO.InvalidCodec("append called with an invalid codec!")
if not os.path.exists(filename):
return False
try:
fp = None
with open(filename, "a+") as fp:
return RecordIO.Writer.do_write(fp, record, codec)
except (IOError, OSError) as e:
if fp:
log.debug("Unexpected exception (%s), but continuing" % e)
return False
else:
raise
def write(self, blob):
"""
Append the blob to the current RecordWriter.
Returns True on success, False on any filesystem failure.
"""
return RecordIO.Writer.do_write(self._fp, blob, self._codec, sync=self._sync)
class StringCodec(RecordIO.Codec):
"""
A simple string-based implementation of Codec.
Performs no actual encoding/decoding; simply verifies that input is a string
"""
@staticmethod
def _validate(blob):
if not isinstance(blob, Compatibility.string):
raise RecordIO.InvalidTypeException("blob (type=%s) not StringType!" % type(blob))
return blob
def encode(self, blob):
return self._validate(blob)
def decode(self, blob):
return self._validate(blob)
class StringRecordReader(RecordIO.Reader):
"""
Simple RecordReader that deserializes strings.
"""
def __init__(self, fp):
RecordIO.Reader.__init__(self, fp, StringCodec())
class StringRecordWriter(RecordIO.Writer):
"""
Write framed string records to a stream.
Max record size is 64MB for the sake of sanity.
"""
def __init__(self, fp):
RecordIO.Writer.__init__(self, fp, StringCodec())
@staticmethod
def append(filename, blob, codec=StringCodec()):
return RecordIO.Writer.append(filename, blob, codec)
RecordReader = StringRecordReader
RecordWriter = StringRecordWriter | src/python/twitter/common/recordio/recordio.py | import errno
from abc import abstractmethod
import os
import struct
from twitter.common import log
from twitter.common.lang import Compatibility, Interface
from .filelike import FileLike
class RecordIO(object):
class Error(Exception): pass
class PrematureEndOfStream(Error): pass
class RecordSizeExceeded(Error): pass
class InvalidTypeException(Error): pass
class InvalidFileHandle(Error): pass
class InvalidArgument(Error): pass
class InvalidCodec(Error): pass
RECORD_HEADER_SIZE = 4
MAXIMUM_RECORD_SIZE = 64 * 1024 * 1024
class Codec(Interface):
"""
An encoder/decoder interface for bespoke RecordReader/Writers.
"""
@abstractmethod
def encode(self, blob):
"""
Given: blob in custom format
Return: serialized byte data
Raises: InvalidTypeException if a bad blob type is supplied
"""
@abstractmethod
def decode(self, blob):
"""
Given: deserialized byte data
Return: blob in custom format
Raises: InvalidTypeException if a bad blob type is supplied
"""
class _Stream(object):
"""
Shared initialization functionality for Reader/Writer
"""
def __init__(self, fp, codec):
try:
self._fp = FileLike.get(fp)
except ValueError as err:
raise RecordIO.InvalidFileHandle(err)
if not isinstance(codec, RecordIO.Codec):
raise RecordIO.InvalidCodec("Codec must be subclass of RecordIO.Codec")
self._codec = codec
def close(self):
"""
Close the underlying filehandle of the RecordIO stream.
"""
self._fp.close()
class Reader(_Stream):
def __init__(self, fp, codec):
"""
Initialize a Reader from file-like fp, with RecordIO.Codec codec
"""
RecordIO._Stream.__init__(self, fp, codec)
if ('w' in self._fp.mode or 'a' in self._fp.mode) and '+' not in self._fp.mode:
raise RecordIO.InvalidFileHandle(
'Filehandle supplied to RecordReader does not appear to be readable!')
def __iter__(self):
"""
Return an iterator over the entire contents of the underlying file handle.
May raise:
RecordIO.Error or subclasses
"""
try:
dup_fp = self._fp.dup()
except self._fp.Error:
log.error('Failed to dup %r' % self._fp)
return
try:
while True:
blob = RecordIO.Reader.do_read(dup_fp, self._codec)
if blob:
yield blob
else:
break
finally:
dup_fp.close()
@staticmethod
def do_read(fp, decoder):
"""
Read a single record from the given filehandle and decode using the supplied decoder.
May raise:
RecordIO.PrematureEndOfStream if the stream is truncated in the middle of
an expected message
RecordIO.RecordSizeExceeded if the message exceeds RecordIO.MAXIMUM_RECORD_SIZE
"""
# read header
header = fp.read(RecordIO.RECORD_HEADER_SIZE)
if len(header) == 0:
log.debug("%s has no data (current offset = %d)" % (fp.name, fp.tell()))
# Reset EOF (appears to be only necessary on OS X)
fp.seek(fp.tell())
return None
elif len(header) != RecordIO.RECORD_HEADER_SIZE:
raise RecordIO.PrematureEndOfStream(
"Expected %d bytes in header, got %d" % (RecordIO.RECORD_HEADER_SIZE, len(header)))
blob_len = struct.unpack('>L', header)[0]
if blob_len > RecordIO.MAXIMUM_RECORD_SIZE:
raise RecordIO.RecordSizeExceeded("Record exceeds maximum allowable size")
# read frame
read_blob = fp.read(blob_len)
if len(read_blob) != blob_len:
raise RecordIO.PrematureEndOfStream(
'Expected %d bytes in frame, got %d' % (blob_len, len(read_blob)))
return decoder.decode(read_blob)
def read(self):
"""
Read a single record from this stream. Updates the file position on both
success and failure (unless no data is available, in which case the file
position is unchanged and None is returned.)
Returns string blob or None if no data available.
May raise:
RecordIO.PrematureEndOfStream if the stream is truncated in the middle of
an expected message
RecordIO.RecordSizeExceeded if the message exceeds RecordIO.MAXIMUM_RECORD_SIZE
"""
return RecordIO.Reader.do_read(self._fp, self._codec)
def try_read(self):
"""
Attempt to read a single record from the stream. Only updates the file position
if a read was successful.
Returns string blob or None if no data available.
May raise:
RecordIO.RecordSizeExceeded
"""
pos = self._fp.tell()
try:
return self.read()
except RecordIO.PrematureEndOfStream as e:
log.debug('Got premature end of stream [%s], skipping - %s' % (self._fp.name, e))
self._fp.seek(pos)
return None
class Writer(_Stream):
def __init__(self, fp, codec, sync=False):
"""
Initialize a Writer from the FileLike fp, with RecordIO.Codec codec.
If sync=True is supplied, then all mutations are fsynced after write, otherwise
standard filesystem buffering is employed.
"""
RecordIO._Stream.__init__(self, fp, codec)
if 'w' not in self._fp.mode and 'a' not in self._fp.mode and '+' not in self._fp.mode:
raise RecordIO.InvalidFileHandle(
'Filehandle supplied to RecordWriter does not appear to be writeable!')
self.set_sync(sync)
def set_sync(self, value):
self._sync = bool(value)
@staticmethod
def do_write(fp, record, codec, sync=False):
"""
Write a record to the specified fp using the supplied codec.
Returns True on success, False on any filesystem failure.
"""
blob = codec.encode(record)
header = struct.pack(">L", len(blob))
try:
fp.write(header)
fp.write(blob)
except (IOError, OSError) as e:
log.debug("Got exception in write(%s): %s" % (fp.name, e))
return False
if sync:
fp.flush()
return True
@staticmethod
def append(filename, record, codec):
"""
Given a filename stored in RecordIO format, open the file, append a
record to it and close.
Returns True if it succeeds, or False if it fails for any reason.
Raises IOError, OSError if there is a problem opening filename for appending.
"""
if not isinstance(codec, RecordIO.Codec):
raise RecordIO.InvalidCodec("append called with an invalid codec!")
if not os.path.exists(filename):
return False
try:
fp = None
with open(filename, "a+") as fp:
return RecordIO.Writer.do_write(fp, record, codec)
except (IOError, OSError) as e:
if fp:
log.debug("Unexpected exception (%s), but continuing" % e)
return False
else:
raise
def write(self, blob):
"""
Append the blob to the current RecordWriter.
Returns True on success, False on any filesystem failure.
"""
return RecordIO.Writer.do_write(self._fp, blob, self._codec, sync=self._sync)
class StringCodec(RecordIO.Codec):
"""
A simple string-based implementation of Codec.
Performs no actual encoding/decoding; simply verifies that input is a string
"""
@staticmethod
def _validate(blob):
if not isinstance(blob, Compatibility.string):
raise RecordIO.InvalidTypeException("blob (type=%s) not StringType!" % type(blob))
return blob
def encode(self, blob):
return self._validate(blob)
def decode(self, blob):
return self._validate(blob)
class StringRecordReader(RecordIO.Reader):
"""
Simple RecordReader that deserializes strings.
"""
def __init__(self, fp):
RecordIO.Reader.__init__(self, fp, StringCodec())
class StringRecordWriter(RecordIO.Writer):
"""
Write framed string records to a stream.
Max record size is 64MB for the sake of sanity.
"""
def __init__(self, fp):
RecordIO.Writer.__init__(self, fp, StringCodec())
@staticmethod
def append(filename, blob, codec=StringCodec()):
return RecordIO.Writer.append(filename, blob, codec)
RecordReader = StringRecordReader
RecordWriter = StringRecordWriter | 0.630685 | 0.186206 |
from fatartifacts.database import base
from datetime import datetime
from pony import orm
from typing import *
import threading
def declare_entities(db):
class Location(db.Entity):
name = orm.Optional(str)
parent = orm.Optional('Location', reverse='children')
children = orm.Set('Location', cascade_delete=True)
metadata = orm.Required(orm.Json)
date_created = orm.Required(datetime)
date_updated = orm.Required(datetime)
object = orm.Optional('Object', cascade_delete=True)
orm.composite_index(name, parent)
@staticmethod
def get_root() -> 'Location':
root = Location.get(name='', parent=None)
if not root:
now = datetime.utcnow()
root = Location(name='', parent=None, metadata={}, date_created=now,
date_updated=now)
return root
@classmethod
def get_by_db_location(cls, loc:base.Location) -> Optional['Location']:
current = cls.get_root()
for i in range(len(loc)):
current = cls.get(name=loc[i], parent=current)
if not current:
return None
return current
@classmethod
def from_db_location(cls, loc:base.Location, metadata:Dict) -> 'Location':
now = datetime.utcnow()
parent = cls.get_by_db_location(loc.parent)
if not parent:
raise base.LocationDoesNotExist(loc.parent)
entity = cls(name=loc[-1], parent=parent, metadata=metadata,
date_created=now, date_updated=now)
return entity
def as_db_location(self) -> base.Location:
parts = []
while self:
if not self.name:
assert self == Location.get_root()
break
parts.append(self.name)
self = self.parent
return base.Location(reversed(parts))
def as_db_location_info(self) -> base.LocationInfo:
return base.LocationInfo(
self.as_db_location(),
self.metadata,
self.date_created,
self.date_updated)
def collect_objects(self) -> Iterable['Object']:
if self.object: yield self.object
for child in self.children:
yield from child.collect_objects()
def validate(self):
if not self.name and self.parent:
raise ValueError('non-root level can not have a zero-length name')
def before_insert(self):
self.validate()
def before_update(self):
self.date_updated = datetime.utcnow()
self.validate()
class Object(db.Entity):
location = orm.PrimaryKey(Location)
filename = orm.Required(str)
mime = orm.Required(str)
uri = orm.Required(str)
@classmethod
def from_db_location(cls, loc:base.Location, metadata:Dict,
filename: str, mime: str, uri: str) -> 'Object':
location = Location.from_db_location(loc, metadata)
now = datetime.utcnow()
entity = cls(location=location, filename=filename, mime=mime, uri=uri)
return entity
def as_db_object_info(self) -> base.ObjectInfo:
return base.ObjectInfo(
self.location.as_db_location(),
self.location.metadata,
self.location.date_created,
self.location.date_updated,
self.filename,
self.mime,
self.uri)
class PonyDatabase(base.Database):
def __init__(self, num_levels):
self._num_levels = num_levels
self._db = orm.Database()
declare_entities(self._db)
def connect(self, *args, **kwargs):
create_tables = kwargs.pop('create_tables', True)
self._db.bind(*args, **kwargs)
self._db.generate_mapping(create_tables=create_tables)
with orm.db_session():
self._db.Location.get_root() # ensure that the root exists.
def num_levels(self):
return self._num_levels
def query_context(self):
return orm.db_session
def get_location(self, location):
if len(location) >= self._num_levels:
raise base.InvalidLocationQuery(location)
entity = self._db.Location.get_by_db_location(location)
if not entity:
raise base.LocationDoesNotExist(location)
return entity.as_db_location_info()
def get_object(self, location):
if len(location) != self._num_levels:
raise base.InvalidLocationQuery(location)
entity = self._db.Location.get_by_db_location(location)
if not entity:
raise base.LocationDoesNotExist(location)
return entity.object.as_db_object_info()
def list_location(self, location, filter=None):
# XXX Implement filter.
if len(location) >= self._num_levels:
raise base.InvalidLocationQuery(location)
entity = self._db.Location.get_by_db_location(location)
if not entity:
raise base.LocationDoesNotExist(location)
return (x.as_db_location_info() for x in entity.children)
def list_objects(self, location, filter=None):
# XXX Implement filter.
if len(location) not in (self._num_levels, self._num_levels - 1):
raise base.InvalidLocationQuery(location)
entity = self._db.Location.get_by_db_location(location)
if not entity:
raise base.LocationDoesNotExist(location)
if len(location) == self._num_levels:
yield entity.object.as_db_object_info()
else:
yield from (x.object.as_db_object_info() for x in entity.children if x.object)
def create_location(self, info, update_if_exists=False):
if len(info.location) > (self._num_levels - 1):
raise base.InvalidLocationQuery(info.location)
entity = self._db.Location.get_by_db_location(info.location)
if entity and not update_if_exists:
raise base.LocationAlreadyExists(info.location)
if entity:
if info.metadata is not None:
entity.metadata = info.metadata
return False # updated
else:
entity = self._db.Location.from_db_location(
info.location,
metadata=info.metadata or {})
assert entity.as_db_location() == info.location, (entity.as_db_location(), info.location)
return True # newly created location
def create_object(self, info, update_if_exists=False):
if len(info.location) != self._num_levels:
raise base.InvalidLocationQuery(info.location)
entity = self._db.Location.get_by_db_location(info.location)
if entity and not update_if_exists:
raise base.LocationAlreadyExists(info.location)
if entity:
if info.metadata is not None:
entity.metadata = info.metadata
if entity.object:
entity.object.filename = info.filename
entity.object.mime = info.mime
entity.object.uri = info.uri
else:
entity.object = self._db.Object(location=entity,
filename=info.filename, uri=info.uri, mime=info.mime)
return False # updated
else:
entity = self._db.Object.from_db_location(
info.location,
metadata=info.metadata or {},
filename=info.filename,
mime=info.mime,
uri=info.uri)
assert entity.location.as_db_location() == info.location, (entity.as_db_location(), info.location)
return True # newly created location
def delete_location(self, location, recursive):
if len(location) > self._num_levels:
raise base.InvalidLocationQuery(location)
entity = self._db.Location.get_by_db_location(location)
if not entity:
raise base.LocationDoesNotExist(location)
if entity.children and not recursive:
raise base.LocationHasChildren(location)
objects = [x.as_db_object_info() for x in entity.collect_objects()]
if len(location) == 0:
# The root location can not be deleted, but it's children can be.
entity.children.select().delete()
else:
if not entity:
raise base.LocationDoesNotExist(location)
entity.delete()
return objects | fatartifacts/database/ponyorm.py | from fatartifacts.database import base
from datetime import datetime
from pony import orm
from typing import *
import threading
def declare_entities(db):
class Location(db.Entity):
name = orm.Optional(str)
parent = orm.Optional('Location', reverse='children')
children = orm.Set('Location', cascade_delete=True)
metadata = orm.Required(orm.Json)
date_created = orm.Required(datetime)
date_updated = orm.Required(datetime)
object = orm.Optional('Object', cascade_delete=True)
orm.composite_index(name, parent)
@staticmethod
def get_root() -> 'Location':
root = Location.get(name='', parent=None)
if not root:
now = datetime.utcnow()
root = Location(name='', parent=None, metadata={}, date_created=now,
date_updated=now)
return root
@classmethod
def get_by_db_location(cls, loc:base.Location) -> Optional['Location']:
current = cls.get_root()
for i in range(len(loc)):
current = cls.get(name=loc[i], parent=current)
if not current:
return None
return current
@classmethod
def from_db_location(cls, loc:base.Location, metadata:Dict) -> 'Location':
now = datetime.utcnow()
parent = cls.get_by_db_location(loc.parent)
if not parent:
raise base.LocationDoesNotExist(loc.parent)
entity = cls(name=loc[-1], parent=parent, metadata=metadata,
date_created=now, date_updated=now)
return entity
def as_db_location(self) -> base.Location:
parts = []
while self:
if not self.name:
assert self == Location.get_root()
break
parts.append(self.name)
self = self.parent
return base.Location(reversed(parts))
def as_db_location_info(self) -> base.LocationInfo:
return base.LocationInfo(
self.as_db_location(),
self.metadata,
self.date_created,
self.date_updated)
def collect_objects(self) -> Iterable['Object']:
if self.object: yield self.object
for child in self.children:
yield from child.collect_objects()
def validate(self):
if not self.name and self.parent:
raise ValueError('non-root level can not have a zero-length name')
def before_insert(self):
self.validate()
def before_update(self):
self.date_updated = datetime.utcnow()
self.validate()
class Object(db.Entity):
location = orm.PrimaryKey(Location)
filename = orm.Required(str)
mime = orm.Required(str)
uri = orm.Required(str)
@classmethod
def from_db_location(cls, loc:base.Location, metadata:Dict,
filename: str, mime: str, uri: str) -> 'Object':
location = Location.from_db_location(loc, metadata)
now = datetime.utcnow()
entity = cls(location=location, filename=filename, mime=mime, uri=uri)
return entity
def as_db_object_info(self) -> base.ObjectInfo:
return base.ObjectInfo(
self.location.as_db_location(),
self.location.metadata,
self.location.date_created,
self.location.date_updated,
self.filename,
self.mime,
self.uri)
class PonyDatabase(base.Database):
def __init__(self, num_levels):
self._num_levels = num_levels
self._db = orm.Database()
declare_entities(self._db)
def connect(self, *args, **kwargs):
create_tables = kwargs.pop('create_tables', True)
self._db.bind(*args, **kwargs)
self._db.generate_mapping(create_tables=create_tables)
with orm.db_session():
self._db.Location.get_root() # ensure that the root exists.
def num_levels(self):
return self._num_levels
def query_context(self):
return orm.db_session
def get_location(self, location):
if len(location) >= self._num_levels:
raise base.InvalidLocationQuery(location)
entity = self._db.Location.get_by_db_location(location)
if not entity:
raise base.LocationDoesNotExist(location)
return entity.as_db_location_info()
def get_object(self, location):
if len(location) != self._num_levels:
raise base.InvalidLocationQuery(location)
entity = self._db.Location.get_by_db_location(location)
if not entity:
raise base.LocationDoesNotExist(location)
return entity.object.as_db_object_info()
def list_location(self, location, filter=None):
# XXX Implement filter.
if len(location) >= self._num_levels:
raise base.InvalidLocationQuery(location)
entity = self._db.Location.get_by_db_location(location)
if not entity:
raise base.LocationDoesNotExist(location)
return (x.as_db_location_info() for x in entity.children)
def list_objects(self, location, filter=None):
# XXX Implement filter.
if len(location) not in (self._num_levels, self._num_levels - 1):
raise base.InvalidLocationQuery(location)
entity = self._db.Location.get_by_db_location(location)
if not entity:
raise base.LocationDoesNotExist(location)
if len(location) == self._num_levels:
yield entity.object.as_db_object_info()
else:
yield from (x.object.as_db_object_info() for x in entity.children if x.object)
def create_location(self, info, update_if_exists=False):
if len(info.location) > (self._num_levels - 1):
raise base.InvalidLocationQuery(info.location)
entity = self._db.Location.get_by_db_location(info.location)
if entity and not update_if_exists:
raise base.LocationAlreadyExists(info.location)
if entity:
if info.metadata is not None:
entity.metadata = info.metadata
return False # updated
else:
entity = self._db.Location.from_db_location(
info.location,
metadata=info.metadata or {})
assert entity.as_db_location() == info.location, (entity.as_db_location(), info.location)
return True # newly created location
def create_object(self, info, update_if_exists=False):
if len(info.location) != self._num_levels:
raise base.InvalidLocationQuery(info.location)
entity = self._db.Location.get_by_db_location(info.location)
if entity and not update_if_exists:
raise base.LocationAlreadyExists(info.location)
if entity:
if info.metadata is not None:
entity.metadata = info.metadata
if entity.object:
entity.object.filename = info.filename
entity.object.mime = info.mime
entity.object.uri = info.uri
else:
entity.object = self._db.Object(location=entity,
filename=info.filename, uri=info.uri, mime=info.mime)
return False # updated
else:
entity = self._db.Object.from_db_location(
info.location,
metadata=info.metadata or {},
filename=info.filename,
mime=info.mime,
uri=info.uri)
assert entity.location.as_db_location() == info.location, (entity.as_db_location(), info.location)
return True # newly created location
def delete_location(self, location, recursive):
if len(location) > self._num_levels:
raise base.InvalidLocationQuery(location)
entity = self._db.Location.get_by_db_location(location)
if not entity:
raise base.LocationDoesNotExist(location)
if entity.children and not recursive:
raise base.LocationHasChildren(location)
objects = [x.as_db_object_info() for x in entity.collect_objects()]
if len(location) == 0:
# The root location can not be deleted, but it's children can be.
entity.children.select().delete()
else:
if not entity:
raise base.LocationDoesNotExist(location)
entity.delete()
return objects | 0.647687 | 0.120801 |
from tornado.escape import json_encode
from tornado.web import RequestHandler
from webspider import constants
from webspider.exceptions import BaseException, ResourceNotFoundWebException
from webspider.web.formatter import Formatter
from webspider.utils.sql import remove_sessions
class BaseApiHandler(RequestHandler):
def write_error(self, status_code, **kwargs):
exception = kwargs['exc_info'][1]
# TODO 后端改成纯 API 后,删除其逻辑
# 生产环境下, 且请求非 API 接口, 渲染错误页面
if not constants.DEBUG and isinstance(self, BasePageHandler):
self._handler_production_page_error(exception)
return
if isinstance(exception, BaseException):
self.render_exception(exception)
else:
RequestHandler.write_error(self, status_code=status_code, **kwargs)
def auto_render(self, data):
formatted_dict = Formatter.format(data)
self.render_json(formatted_dict)
def _handler_production_page_error(self, exception):
"""处理生产环境下页面的错误"""
if isinstance(exception, ResourceNotFoundWebException):
self.render('404.html')
else:
self.render('500.html')
def render_exception(self, exception):
self.set_status(
status_code=exception.STATUS_CODE,
reason=exception.message
)
error_dict = {
'error': {
'code': exception.code,
'name': exception.__class__.__name__,
'message': exception.message,
'data': exception.data if exception.data else '',
'debug_message': exception.debug_message if exception.data else ''
}
}
self.render_json(error_dict)
def render_json(self, data):
self.set_header('Content-Type', 'application/json')
self.finish(json_encode(data))
def on_finish(self):
remove_sessions()
# TODO page to api
class BasePageHandler(BaseApiHandler):
"""前后端代码混合型的页面 Handler"""
pass | webspider/web/handlers/base.py | from tornado.escape import json_encode
from tornado.web import RequestHandler
from webspider import constants
from webspider.exceptions import BaseException, ResourceNotFoundWebException
from webspider.web.formatter import Formatter
from webspider.utils.sql import remove_sessions
class BaseApiHandler(RequestHandler):
def write_error(self, status_code, **kwargs):
exception = kwargs['exc_info'][1]
# TODO 后端改成纯 API 后,删除其逻辑
# 生产环境下, 且请求非 API 接口, 渲染错误页面
if not constants.DEBUG and isinstance(self, BasePageHandler):
self._handler_production_page_error(exception)
return
if isinstance(exception, BaseException):
self.render_exception(exception)
else:
RequestHandler.write_error(self, status_code=status_code, **kwargs)
def auto_render(self, data):
formatted_dict = Formatter.format(data)
self.render_json(formatted_dict)
def _handler_production_page_error(self, exception):
"""处理生产环境下页面的错误"""
if isinstance(exception, ResourceNotFoundWebException):
self.render('404.html')
else:
self.render('500.html')
def render_exception(self, exception):
self.set_status(
status_code=exception.STATUS_CODE,
reason=exception.message
)
error_dict = {
'error': {
'code': exception.code,
'name': exception.__class__.__name__,
'message': exception.message,
'data': exception.data if exception.data else '',
'debug_message': exception.debug_message if exception.data else ''
}
}
self.render_json(error_dict)
def render_json(self, data):
self.set_header('Content-Type', 'application/json')
self.finish(json_encode(data))
def on_finish(self):
remove_sessions()
# TODO page to api
class BasePageHandler(BaseApiHandler):
"""前后端代码混合型的页面 Handler"""
pass | 0.245899 | 0.061171 |
from typing import cast, List, Optional, Tuple
import argparse
import asyncio
from dataclasses import dataclass
import itertools
import random
import sys
import numpy as np
import cirq
def build_circuit() -> Tuple[cirq.Circuit, List[cirq.Qid]]:
# Builds an arbitrary circuit to test. Do not include a measurement gate.
# The circuit need not be Clifford, but if it is, simulations will be
# faster.
qubits: List[cirq.Qid] = cast(List[cirq.Qid], cirq.LineQubit.range(3))
circuit: cirq.Circuit = cirq.Circuit(cirq.CNOT(qubits[0], qubits[2]),
cirq.Z(qubits[0]), cirq.H(qubits[2]),
cirq.CNOT(qubits[2], qubits[1]),
cirq.X(qubits[0]), cirq.X(qubits[1]),
cirq.CNOT(qubits[0], qubits[2]))
print('Circuit used:')
print(circuit)
return circuit, qubits
def compute_characteristic_function(circuit: cirq.Circuit,
pauli_string: cirq.PauliString,
qubits: List[cirq.Qid],
density_matrix: np.ndarray):
n_qubits = len(qubits)
d = 2**n_qubits
qubit_map = dict(zip(qubits, range(n_qubits)))
# rho_i or sigma_i in https://arxiv.org/abs/1104.3835
trace = pauli_string.expectation_from_density_matrix(
density_matrix, qubit_map)
assert np.isclose(trace.imag, 0.0, atol=1e-6)
trace = trace.real
prob = trace * trace / d # Pr(i) in https://arxiv.org/abs/1104.3835
return trace, prob
async def estimate_characteristic_function(circuit: cirq.Circuit,
pauli_string: cirq.PauliString,
qubits: List[cirq.Qid],
sampler: cirq.Sampler,
samples_per_term: int):
"""
Estimates the characteristic function using a (noisy) circuit simulator by
sampling the results.
Args:
circuit: The circuit to run the simulation on.
pauli_string: The Pauli string.
qubits: The list of qubits.
sampler: Either a noisy simulator or an engine.
samples_per_term: An integer greater than 0, the number of samples.
Returns:
The estimated characteristic function.
"""
p = cirq.PauliSumCollector(circuit=circuit,
observable=pauli_string,
samples_per_term=samples_per_term)
await p.collect_async(sampler=sampler)
sigma_i = p.estimated_energy()
assert np.isclose(sigma_i.imag, 0.0, atol=1e-6)
sigma_i = sigma_i.real
return sigma_i
def _randomly_sample_from_stabilizer_bases(
stabilizer_basis: List[cirq.DensePauliString],
n_measured_operators: int, n_qubits: int):
"""
Given a stabilizer basis, randomly creates Pauli states by including the
basis vector or not.
Args:
stabilizer_basis: A list of Pauli strings that is the stabilizer basis
to sample from.
n_measured_operators: The total number of Pauli measurements, or None to
explore each Pauli state once.
n_qubits: An integer that is the number of qubits.
Returns:
A list of Pauli strings that is the Pauli states built.
"""
dense_pauli_strings = []
for _ in range(n_measured_operators):
# Build the Pauli string as a random sample of the basis elements.
dense_pauli_string = cirq.DensePauliString.eye(n_qubits)
for stabilizer in stabilizer_basis:
if np.random.randint(2) == 1:
dense_pauli_string *= stabilizer
dense_pauli_strings.append(dense_pauli_string)
return dense_pauli_strings
def _enumerate_all_from_stabilizer_bases(
stabilizer_basis: List[cirq.DensePauliString], n_qubits: int):
"""
Given a stabilizer basis, creates the exhaustive list of Pauli states that
are spanned by the basis.
Args:
stabilizer_basis: A list of Pauli strings that is the stabilizer basis
to build all the Pauli strings.
n_qubits: An integer that is the number of qubits.
Returns:
A list of Pauli strings that is the Pauli states built.
"""
dense_pauli_strings = []
for coefficients in itertools.product([False, True], repeat=n_qubits):
dense_pauli_string = cirq.DensePauliString.eye(n_qubits)
for (keep, stabilizer) in zip(coefficients, stabilizer_basis):
if keep:
dense_pauli_string *= stabilizer
dense_pauli_strings.append(dense_pauli_string)
return dense_pauli_strings
@dataclass
class PauliTrace:
"""
A class that contains the Pauli states as described on page 2 of:
https://arxiv.org/abs/1104.3835
"""
# Pauli string.
P_i: cirq.PauliString
# Coefficient of the ideal pure state expanded in the Pauli basis scaled by
# sqrt(dim H), formally defined at bottom of left column of page 2.
rho_i: float
# A probablity (between 0.0 and 1.0) that is the relevance distribution,
# formally defined at top of right column of page 2.
Pr_i: float
def _estimate_pauli_traces_clifford(n_qubits: int,
clifford_state: cirq.CliffordState,
n_measured_operators: Optional[int]
) -> List[PauliTrace]:
"""
Estimates the Pauli traces in case the circuit is Clifford. When we have a
Clifford circuit, there are 2**n Pauli traces that have probability 1/2**n
and all the other traces have probability 0. In addition, there is a fast
way to compute find out what the traces are. See the documentation of
cirq.CliffordState for more detail. This function uses the speedup to sample
the Pauli states with non-zero probability.
Args:
n_qubits: An integer that is the number of qubits.
clifford_state: The basis of the Pauli states with non-zero probability.
n_measured_operators: The total number of Pauli measurements, or None to
explore each Pauli state once.
Returns:
A list of Pauli states (represented as tuples of Pauli string, rho_i,
and probability.
"""
# When the circuit consists of Clifford gates only, we can sample the
# Pauli states more efficiently as described on page 4 of:
# https://arxiv.org/abs/1104.4695
d = 2**n_qubits
# The stabilizers_basis variable only contains basis vectors. For
# example, if we have n=3 qubits, then we should have 2**n=8 Pauli
# states that we can sample, but the basis will still have 3 entries. We
# must flip a coin for each, whether or not to include them.
stabilizer_basis: List[cirq.DensePauliString] = clifford_state.stabilizers()
if n_measured_operators is not None:
dense_pauli_strings = _randomly_sample_from_stabilizer_bases(
stabilizer_basis, n_measured_operators, n_qubits)
assert len(dense_pauli_strings) == n_measured_operators
else:
dense_pauli_strings = _enumerate_all_from_stabilizer_bases(
stabilizer_basis, n_qubits)
assert len(dense_pauli_strings) == 2**n_qubits
pauli_traces: List[PauliTrace] = []
for dense_pauli_string in dense_pauli_strings:
# The code below is equivalent to calling
# clifford_state.wave_function() and then calling
# compute_characteristic_function() on the results (albeit with a
# wave function instead of a density matrix). It is, however,
# unncessary to do so. Instead we directly obtain the scalar rho_i.
rho_i = dense_pauli_string.coefficient
assert np.isclose(rho_i.imag, 0.0, atol=1e-6)
rho_i = rho_i.real
dense_pauli_string *= rho_i
assert np.isclose(abs(rho_i), 1.0, atol=1e-6)
Pr_i = 1.0 / d
pauli_traces.append(
PauliTrace(P_i=dense_pauli_string.sparse(), rho_i=rho_i, Pr_i=Pr_i))
return pauli_traces
def _estimate_pauli_traces_general(qubits: List[cirq.Qid],
circuit: cirq.Circuit,
n_measured_operators: Optional[int]
) -> List[PauliTrace]:
"""
Estimates the Pauli traces in case the circuit is not Clifford. In this case
we cannot use the speedup implemented in the function
_estimate_pauli_traces_clifford() above, and so do a slow, density matrix
simulation.
Args:
qubits: The list of qubits.
circuit: The (non Clifford) circuit.
n_measured_operators: The total number of Pauli measurements, or None to
explore each Pauli state once.
Returns:
A list of Pauli states (represented as tuples of Pauli string, rho_i,
and probability.
"""
n_qubits = len(qubits)
dense_simulator = cirq.DensityMatrixSimulator()
# rho in https://arxiv.org/abs/1104.3835
clean_density_matrix = cast(
cirq.DensityMatrixTrialResult,
dense_simulator.simulate(circuit)).final_density_matrix
all_operators = itertools.product([cirq.I, cirq.X, cirq.Y, cirq.Z],
repeat=n_qubits)
if n_measured_operators is not None:
dense_operators = random.sample(tuple(all_operators),
n_measured_operators)
else:
dense_operators = list(all_operators)
pauli_traces: List[PauliTrace] = []
for P_i in dense_operators:
pauli_string = cirq.PauliString(dict(zip(qubits, P_i)))
rho_i, Pr_i = compute_characteristic_function(circuit, pauli_string,
qubits,
clean_density_matrix)
pauli_traces.append(PauliTrace(P_i=pauli_string, rho_i=rho_i,
Pr_i=Pr_i))
return pauli_traces
@dataclass
class TrialResult:
"""
Contains the results of a trial, either by simulator or actual run
"""
# The Pauli trace that was measured
pauli_trace: PauliTrace
# Coefficient of the measured/simulated pure state expanded in the Pauli
# basis scaled by sqrt(dim H), formally defined at bottom of left column of
# second page of https://arxiv.org/abs/1104.3835
sigma_i: float
@dataclass
class DFEIntermediateResult:
"""
A container for the various debug and run data from calling the function
direct_fidelity_estimation(). This is useful when running a long-computation
on an actual computer, which is expensive. This way, runs can be more easily
debugged offline.
"""
# If the circuit is Clifford, the Clifford state from which we can extract
# a list of Pauli strings for a basis of the stabilizers.
clifford_state: Optional[cirq.CliffordState]
# The list of Pauli traces we can sample from.
pauli_traces: List[PauliTrace]
# Measurement results from sampling the circuit.
trial_results: List[TrialResult]
def direct_fidelity_estimation(circuit: cirq.Circuit, qubits: List[cirq.Qid],
sampler: cirq.Sampler,
n_measured_operators: Optional[int],
samples_per_term: int):
"""
Implementation of direct fidelity estimation, as per 'Direct Fidelity
Estimation from Few Pauli Measurements' https://arxiv.org/abs/1104.4695 and
'Practical characterization of quantum devices without tomography'
https://arxiv.org/abs/1104.3835.
Args:
circuit: The circuit to run the simulation on.
qubits: The list of qubits.
sampler: Either a noisy simulator or an engine.
n_measured_operators: The total number of Pauli measurements, or None to
explore each Pauli state once.
samples_per_term: if set to 0, we use the 'sampler' parameter above as
a noise (must be of type cirq.DensityMatrixSimulator) and
simulate noise in the circuit. If greater than 0, we instead use the
'sampler' parameter directly to estimate the characteristic
function.
Returns:
The estimated fidelity and a log of the run.
"""
# n_measured_operators is upper-case N in https://arxiv.org/abs/1104.3835
# Number of qubits, lower-case n in https://arxiv.org/abs/1104.3835
n_qubits = len(qubits)
clifford_circuit = True
clifford_state: Optional[cirq.CliffordState] = None
try:
clifford_state = cirq.CliffordState(
qubit_map={qubits[i]: i for i in range(len(qubits))})
for gate in circuit.all_operations():
clifford_state.apply_unitary(gate)
except ValueError:
clifford_circuit = False
# Computes for every \hat{P_i} of https://arxiv.org/abs/1104.3835
# estimate rho_i and Pr(i). We then collect tuples (rho_i, Pr(i), \hat{Pi})
# inside the variable 'pauli_traces'.
if clifford_circuit:
assert clifford_state is not None
pauli_traces = _estimate_pauli_traces_clifford(
n_qubits, cast(cirq.CliffordState, clifford_state),
n_measured_operators)
else:
pauli_traces = _estimate_pauli_traces_general(qubits, circuit,
n_measured_operators)
p = np.asarray([x.Pr_i for x in pauli_traces])
if n_measured_operators is None:
# Since we enumerate all the possible traces, the probs should add to 1.
assert np.isclose(np.sum(p), 1.0, atol=1e-6)
p /= np.sum(p)
fidelity = 0.0
if samples_per_term == 0:
# sigma in https://arxiv.org/abs/1104.3835
if not isinstance(sampler, cirq.DensityMatrixSimulator):
raise TypeError('sampler is not a cirq.DensityMatrixSimulator '
'but samples_per_term is zero.')
noisy_simulator = cast(cirq.DensityMatrixSimulator, sampler)
noisy_density_matrix = cast(
cirq.DensityMatrixTrialResult,
noisy_simulator.simulate(circuit)).final_density_matrix
if clifford_circuit and n_measured_operators is None:
# In case the circuit is Clifford and we compute an exhaustive list of
# Pauli traces, instead of sampling we can simply enumerate them because
# they all have the same probability.
measured_pauli_traces = pauli_traces
else:
# Otherwise, randomly sample as per probability.
measured_pauli_traces = np.random.choice(pauli_traces,
size=len(pauli_traces),
p=p)
trial_results: List[TrialResult] = []
for pauli_trace in measured_pauli_traces:
measure_pauli_string: cirq.PauliString = pauli_trace.P_i
rho_i = pauli_trace.rho_i
if samples_per_term > 0:
sigma_i = asyncio.get_event_loop().run_until_complete(
estimate_characteristic_function(circuit, measure_pauli_string,
qubits, sampler,
samples_per_term))
else:
sigma_i, _ = compute_characteristic_function(
circuit, measure_pauli_string, qubits, noisy_density_matrix)
trial_results.append(
TrialResult(pauli_trace=pauli_trace, sigma_i=sigma_i))
fidelity += sigma_i / rho_i
estimated_fidelity = fidelity / len(pauli_traces)
dfe_intermediate_result = DFEIntermediateResult(
clifford_state=clifford_state,
pauli_traces=pauli_traces,
trial_results=trial_results)
return estimated_fidelity, dfe_intermediate_result
def parse_arguments(args):
"""Helper function that parses the given arguments."""
parser = argparse.ArgumentParser('Direct fidelity estimation.')
# TODO(#2802): Offer some guidance on how to set this flag. Maybe have an
# option to do an exhaustive sample and do numerical studies to know which
# choice is the best.
parser.add_argument('--n_measured_operators',
default=10,
type=int,
help='Numbers of measured operators (Pauli strings). '
'If the circuit is Clifford, these operators are '
'computed by sampling for the basis of stabilizers. If '
'the circuit is not Clifford, this is a random sample '
'all the possible operators. If the value of this '
'parameter is None, we enumerate all the operators '
'which is 2**n_qubit for Clifford circuits and '
'4**n_qubits otherwise.')
parser.add_argument('--samples_per_term',
default=0,
type=int,
help='Number of samples per trial or 0 if no sampling.')
return vars(parser.parse_args(args))
def main(*, n_measured_operators: Optional[int], samples_per_term: int):
circuit, qubits = build_circuit()
noise = cirq.ConstantQubitNoiseModel(cirq.depolarize(0.1))
print('Noise model: %s' % (noise))
noisy_simulator = cirq.DensityMatrixSimulator(noise=noise)
estimated_fidelity, _ = direct_fidelity_estimation(
circuit,
qubits,
noisy_simulator,
n_measured_operators=n_measured_operators,
samples_per_term=samples_per_term)
print('Estimated fidelity: %f' % (estimated_fidelity))
if __name__ == '__main__':
main(**parse_arguments(sys.argv[1:])) | examples/direct_fidelity_estimation.py | from typing import cast, List, Optional, Tuple
import argparse
import asyncio
from dataclasses import dataclass
import itertools
import random
import sys
import numpy as np
import cirq
def build_circuit() -> Tuple[cirq.Circuit, List[cirq.Qid]]:
# Builds an arbitrary circuit to test. Do not include a measurement gate.
# The circuit need not be Clifford, but if it is, simulations will be
# faster.
qubits: List[cirq.Qid] = cast(List[cirq.Qid], cirq.LineQubit.range(3))
circuit: cirq.Circuit = cirq.Circuit(cirq.CNOT(qubits[0], qubits[2]),
cirq.Z(qubits[0]), cirq.H(qubits[2]),
cirq.CNOT(qubits[2], qubits[1]),
cirq.X(qubits[0]), cirq.X(qubits[1]),
cirq.CNOT(qubits[0], qubits[2]))
print('Circuit used:')
print(circuit)
return circuit, qubits
def compute_characteristic_function(circuit: cirq.Circuit,
pauli_string: cirq.PauliString,
qubits: List[cirq.Qid],
density_matrix: np.ndarray):
n_qubits = len(qubits)
d = 2**n_qubits
qubit_map = dict(zip(qubits, range(n_qubits)))
# rho_i or sigma_i in https://arxiv.org/abs/1104.3835
trace = pauli_string.expectation_from_density_matrix(
density_matrix, qubit_map)
assert np.isclose(trace.imag, 0.0, atol=1e-6)
trace = trace.real
prob = trace * trace / d # Pr(i) in https://arxiv.org/abs/1104.3835
return trace, prob
async def estimate_characteristic_function(circuit: cirq.Circuit,
pauli_string: cirq.PauliString,
qubits: List[cirq.Qid],
sampler: cirq.Sampler,
samples_per_term: int):
"""
Estimates the characteristic function using a (noisy) circuit simulator by
sampling the results.
Args:
circuit: The circuit to run the simulation on.
pauli_string: The Pauli string.
qubits: The list of qubits.
sampler: Either a noisy simulator or an engine.
samples_per_term: An integer greater than 0, the number of samples.
Returns:
The estimated characteristic function.
"""
p = cirq.PauliSumCollector(circuit=circuit,
observable=pauli_string,
samples_per_term=samples_per_term)
await p.collect_async(sampler=sampler)
sigma_i = p.estimated_energy()
assert np.isclose(sigma_i.imag, 0.0, atol=1e-6)
sigma_i = sigma_i.real
return sigma_i
def _randomly_sample_from_stabilizer_bases(
stabilizer_basis: List[cirq.DensePauliString],
n_measured_operators: int, n_qubits: int):
"""
Given a stabilizer basis, randomly creates Pauli states by including the
basis vector or not.
Args:
stabilizer_basis: A list of Pauli strings that is the stabilizer basis
to sample from.
n_measured_operators: The total number of Pauli measurements, or None to
explore each Pauli state once.
n_qubits: An integer that is the number of qubits.
Returns:
A list of Pauli strings that is the Pauli states built.
"""
dense_pauli_strings = []
for _ in range(n_measured_operators):
# Build the Pauli string as a random sample of the basis elements.
dense_pauli_string = cirq.DensePauliString.eye(n_qubits)
for stabilizer in stabilizer_basis:
if np.random.randint(2) == 1:
dense_pauli_string *= stabilizer
dense_pauli_strings.append(dense_pauli_string)
return dense_pauli_strings
def _enumerate_all_from_stabilizer_bases(
stabilizer_basis: List[cirq.DensePauliString], n_qubits: int):
"""
Given a stabilizer basis, creates the exhaustive list of Pauli states that
are spanned by the basis.
Args:
stabilizer_basis: A list of Pauli strings that is the stabilizer basis
to build all the Pauli strings.
n_qubits: An integer that is the number of qubits.
Returns:
A list of Pauli strings that is the Pauli states built.
"""
dense_pauli_strings = []
for coefficients in itertools.product([False, True], repeat=n_qubits):
dense_pauli_string = cirq.DensePauliString.eye(n_qubits)
for (keep, stabilizer) in zip(coefficients, stabilizer_basis):
if keep:
dense_pauli_string *= stabilizer
dense_pauli_strings.append(dense_pauli_string)
return dense_pauli_strings
@dataclass
class PauliTrace:
"""
A class that contains the Pauli states as described on page 2 of:
https://arxiv.org/abs/1104.3835
"""
# Pauli string.
P_i: cirq.PauliString
# Coefficient of the ideal pure state expanded in the Pauli basis scaled by
# sqrt(dim H), formally defined at bottom of left column of page 2.
rho_i: float
# A probablity (between 0.0 and 1.0) that is the relevance distribution,
# formally defined at top of right column of page 2.
Pr_i: float
def _estimate_pauli_traces_clifford(n_qubits: int,
clifford_state: cirq.CliffordState,
n_measured_operators: Optional[int]
) -> List[PauliTrace]:
"""
Estimates the Pauli traces in case the circuit is Clifford. When we have a
Clifford circuit, there are 2**n Pauli traces that have probability 1/2**n
and all the other traces have probability 0. In addition, there is a fast
way to compute find out what the traces are. See the documentation of
cirq.CliffordState for more detail. This function uses the speedup to sample
the Pauli states with non-zero probability.
Args:
n_qubits: An integer that is the number of qubits.
clifford_state: The basis of the Pauli states with non-zero probability.
n_measured_operators: The total number of Pauli measurements, or None to
explore each Pauli state once.
Returns:
A list of Pauli states (represented as tuples of Pauli string, rho_i,
and probability.
"""
# When the circuit consists of Clifford gates only, we can sample the
# Pauli states more efficiently as described on page 4 of:
# https://arxiv.org/abs/1104.4695
d = 2**n_qubits
# The stabilizers_basis variable only contains basis vectors. For
# example, if we have n=3 qubits, then we should have 2**n=8 Pauli
# states that we can sample, but the basis will still have 3 entries. We
# must flip a coin for each, whether or not to include them.
stabilizer_basis: List[cirq.DensePauliString] = clifford_state.stabilizers()
if n_measured_operators is not None:
dense_pauli_strings = _randomly_sample_from_stabilizer_bases(
stabilizer_basis, n_measured_operators, n_qubits)
assert len(dense_pauli_strings) == n_measured_operators
else:
dense_pauli_strings = _enumerate_all_from_stabilizer_bases(
stabilizer_basis, n_qubits)
assert len(dense_pauli_strings) == 2**n_qubits
pauli_traces: List[PauliTrace] = []
for dense_pauli_string in dense_pauli_strings:
# The code below is equivalent to calling
# clifford_state.wave_function() and then calling
# compute_characteristic_function() on the results (albeit with a
# wave function instead of a density matrix). It is, however,
# unncessary to do so. Instead we directly obtain the scalar rho_i.
rho_i = dense_pauli_string.coefficient
assert np.isclose(rho_i.imag, 0.0, atol=1e-6)
rho_i = rho_i.real
dense_pauli_string *= rho_i
assert np.isclose(abs(rho_i), 1.0, atol=1e-6)
Pr_i = 1.0 / d
pauli_traces.append(
PauliTrace(P_i=dense_pauli_string.sparse(), rho_i=rho_i, Pr_i=Pr_i))
return pauli_traces
def _estimate_pauli_traces_general(qubits: List[cirq.Qid],
circuit: cirq.Circuit,
n_measured_operators: Optional[int]
) -> List[PauliTrace]:
"""
Estimates the Pauli traces in case the circuit is not Clifford. In this case
we cannot use the speedup implemented in the function
_estimate_pauli_traces_clifford() above, and so do a slow, density matrix
simulation.
Args:
qubits: The list of qubits.
circuit: The (non Clifford) circuit.
n_measured_operators: The total number of Pauli measurements, or None to
explore each Pauli state once.
Returns:
A list of Pauli states (represented as tuples of Pauli string, rho_i,
and probability.
"""
n_qubits = len(qubits)
dense_simulator = cirq.DensityMatrixSimulator()
# rho in https://arxiv.org/abs/1104.3835
clean_density_matrix = cast(
cirq.DensityMatrixTrialResult,
dense_simulator.simulate(circuit)).final_density_matrix
all_operators = itertools.product([cirq.I, cirq.X, cirq.Y, cirq.Z],
repeat=n_qubits)
if n_measured_operators is not None:
dense_operators = random.sample(tuple(all_operators),
n_measured_operators)
else:
dense_operators = list(all_operators)
pauli_traces: List[PauliTrace] = []
for P_i in dense_operators:
pauli_string = cirq.PauliString(dict(zip(qubits, P_i)))
rho_i, Pr_i = compute_characteristic_function(circuit, pauli_string,
qubits,
clean_density_matrix)
pauli_traces.append(PauliTrace(P_i=pauli_string, rho_i=rho_i,
Pr_i=Pr_i))
return pauli_traces
@dataclass
class TrialResult:
"""
Contains the results of a trial, either by simulator or actual run
"""
# The Pauli trace that was measured
pauli_trace: PauliTrace
# Coefficient of the measured/simulated pure state expanded in the Pauli
# basis scaled by sqrt(dim H), formally defined at bottom of left column of
# second page of https://arxiv.org/abs/1104.3835
sigma_i: float
@dataclass
class DFEIntermediateResult:
"""
A container for the various debug and run data from calling the function
direct_fidelity_estimation(). This is useful when running a long-computation
on an actual computer, which is expensive. This way, runs can be more easily
debugged offline.
"""
# If the circuit is Clifford, the Clifford state from which we can extract
# a list of Pauli strings for a basis of the stabilizers.
clifford_state: Optional[cirq.CliffordState]
# The list of Pauli traces we can sample from.
pauli_traces: List[PauliTrace]
# Measurement results from sampling the circuit.
trial_results: List[TrialResult]
def direct_fidelity_estimation(circuit: cirq.Circuit, qubits: List[cirq.Qid],
sampler: cirq.Sampler,
n_measured_operators: Optional[int],
samples_per_term: int):
"""
Implementation of direct fidelity estimation, as per 'Direct Fidelity
Estimation from Few Pauli Measurements' https://arxiv.org/abs/1104.4695 and
'Practical characterization of quantum devices without tomography'
https://arxiv.org/abs/1104.3835.
Args:
circuit: The circuit to run the simulation on.
qubits: The list of qubits.
sampler: Either a noisy simulator or an engine.
n_measured_operators: The total number of Pauli measurements, or None to
explore each Pauli state once.
samples_per_term: if set to 0, we use the 'sampler' parameter above as
a noise (must be of type cirq.DensityMatrixSimulator) and
simulate noise in the circuit. If greater than 0, we instead use the
'sampler' parameter directly to estimate the characteristic
function.
Returns:
The estimated fidelity and a log of the run.
"""
# n_measured_operators is upper-case N in https://arxiv.org/abs/1104.3835
# Number of qubits, lower-case n in https://arxiv.org/abs/1104.3835
n_qubits = len(qubits)
clifford_circuit = True
clifford_state: Optional[cirq.CliffordState] = None
try:
clifford_state = cirq.CliffordState(
qubit_map={qubits[i]: i for i in range(len(qubits))})
for gate in circuit.all_operations():
clifford_state.apply_unitary(gate)
except ValueError:
clifford_circuit = False
# Computes for every \hat{P_i} of https://arxiv.org/abs/1104.3835
# estimate rho_i and Pr(i). We then collect tuples (rho_i, Pr(i), \hat{Pi})
# inside the variable 'pauli_traces'.
if clifford_circuit:
assert clifford_state is not None
pauli_traces = _estimate_pauli_traces_clifford(
n_qubits, cast(cirq.CliffordState, clifford_state),
n_measured_operators)
else:
pauli_traces = _estimate_pauli_traces_general(qubits, circuit,
n_measured_operators)
p = np.asarray([x.Pr_i for x in pauli_traces])
if n_measured_operators is None:
# Since we enumerate all the possible traces, the probs should add to 1.
assert np.isclose(np.sum(p), 1.0, atol=1e-6)
p /= np.sum(p)
fidelity = 0.0
if samples_per_term == 0:
# sigma in https://arxiv.org/abs/1104.3835
if not isinstance(sampler, cirq.DensityMatrixSimulator):
raise TypeError('sampler is not a cirq.DensityMatrixSimulator '
'but samples_per_term is zero.')
noisy_simulator = cast(cirq.DensityMatrixSimulator, sampler)
noisy_density_matrix = cast(
cirq.DensityMatrixTrialResult,
noisy_simulator.simulate(circuit)).final_density_matrix
if clifford_circuit and n_measured_operators is None:
# In case the circuit is Clifford and we compute an exhaustive list of
# Pauli traces, instead of sampling we can simply enumerate them because
# they all have the same probability.
measured_pauli_traces = pauli_traces
else:
# Otherwise, randomly sample as per probability.
measured_pauli_traces = np.random.choice(pauli_traces,
size=len(pauli_traces),
p=p)
trial_results: List[TrialResult] = []
for pauli_trace in measured_pauli_traces:
measure_pauli_string: cirq.PauliString = pauli_trace.P_i
rho_i = pauli_trace.rho_i
if samples_per_term > 0:
sigma_i = asyncio.get_event_loop().run_until_complete(
estimate_characteristic_function(circuit, measure_pauli_string,
qubits, sampler,
samples_per_term))
else:
sigma_i, _ = compute_characteristic_function(
circuit, measure_pauli_string, qubits, noisy_density_matrix)
trial_results.append(
TrialResult(pauli_trace=pauli_trace, sigma_i=sigma_i))
fidelity += sigma_i / rho_i
estimated_fidelity = fidelity / len(pauli_traces)
dfe_intermediate_result = DFEIntermediateResult(
clifford_state=clifford_state,
pauli_traces=pauli_traces,
trial_results=trial_results)
return estimated_fidelity, dfe_intermediate_result
def parse_arguments(args):
"""Helper function that parses the given arguments."""
parser = argparse.ArgumentParser('Direct fidelity estimation.')
# TODO(#2802): Offer some guidance on how to set this flag. Maybe have an
# option to do an exhaustive sample and do numerical studies to know which
# choice is the best.
parser.add_argument('--n_measured_operators',
default=10,
type=int,
help='Numbers of measured operators (Pauli strings). '
'If the circuit is Clifford, these operators are '
'computed by sampling for the basis of stabilizers. If '
'the circuit is not Clifford, this is a random sample '
'all the possible operators. If the value of this '
'parameter is None, we enumerate all the operators '
'which is 2**n_qubit for Clifford circuits and '
'4**n_qubits otherwise.')
parser.add_argument('--samples_per_term',
default=0,
type=int,
help='Number of samples per trial or 0 if no sampling.')
return vars(parser.parse_args(args))
def main(*, n_measured_operators: Optional[int], samples_per_term: int):
circuit, qubits = build_circuit()
noise = cirq.ConstantQubitNoiseModel(cirq.depolarize(0.1))
print('Noise model: %s' % (noise))
noisy_simulator = cirq.DensityMatrixSimulator(noise=noise)
estimated_fidelity, _ = direct_fidelity_estimation(
circuit,
qubits,
noisy_simulator,
n_measured_operators=n_measured_operators,
samples_per_term=samples_per_term)
print('Estimated fidelity: %f' % (estimated_fidelity))
if __name__ == '__main__':
main(**parse_arguments(sys.argv[1:])) | 0.919199 | 0.645399 |
import pytest
import pandas as pd
from pandas import Series, TimedeltaIndex
class TestTimedeltaIndexRendering:
@pytest.mark.parametrize("method", ["__repr__", "__str__"])
def test_representation(self, method):
idx1 = TimedeltaIndex([], freq="D")
idx2 = TimedeltaIndex(["1 days"], freq="D")
idx3 = TimedeltaIndex(["1 days", "2 days"], freq="D")
idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D")
idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"])
exp1 = "TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"
exp2 = "TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', freq='D')"
exp3 = "TimedeltaIndex(['1 days', '2 days'], dtype='timedelta64[ns]', freq='D')"
exp4 = (
"TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')"
)
exp5 = (
"TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)"
)
with pd.option_context("display.width", 300):
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]
):
result = getattr(idx, method)()
assert result == expected
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq="D")
idx2 = TimedeltaIndex(["1 days"], freq="D")
idx3 = TimedeltaIndex(["1 days", "2 days"], freq="D")
idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D")
idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = "0 1 days\ndtype: timedelta64[ns]"
exp3 = "0 1 days\n1 2 days\ndtype: timedelta64[ns]"
exp4 = "0 1 days\n1 2 days\n2 3 days\ndtype: timedelta64[ns]"
exp5 = (
"0 1 days 00:00:01\n"
"1 2 days 00:00:00\n"
"2 3 days 00:00:00\n"
"dtype: timedelta64[ns]"
)
with pd.option_context("display.width", 300):
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]
):
result = repr(Series(idx))
assert result == expected
def test_summary(self):
# GH#9116
idx1 = TimedeltaIndex([], freq="D")
idx2 = TimedeltaIndex(["1 days"], freq="D")
idx3 = TimedeltaIndex(["1 days", "2 days"], freq="D")
idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D")
idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"])
exp1 = "TimedeltaIndex: 0 entries\nFreq: D"
exp2 = "TimedeltaIndex: 1 entries, 1 days to 1 days\nFreq: D"
exp3 = "TimedeltaIndex: 2 entries, 1 days to 2 days\nFreq: D"
exp4 = "TimedeltaIndex: 3 entries, 1 days to 3 days\nFreq: D"
exp5 = "TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days 00:00:00"
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]
):
result = idx._summary()
assert result == expected | pandas/tests/indexes/timedeltas/test_formats.py | import pytest
import pandas as pd
from pandas import Series, TimedeltaIndex
class TestTimedeltaIndexRendering:
@pytest.mark.parametrize("method", ["__repr__", "__str__"])
def test_representation(self, method):
idx1 = TimedeltaIndex([], freq="D")
idx2 = TimedeltaIndex(["1 days"], freq="D")
idx3 = TimedeltaIndex(["1 days", "2 days"], freq="D")
idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D")
idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"])
exp1 = "TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"
exp2 = "TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', freq='D')"
exp3 = "TimedeltaIndex(['1 days', '2 days'], dtype='timedelta64[ns]', freq='D')"
exp4 = (
"TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')"
)
exp5 = (
"TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)"
)
with pd.option_context("display.width", 300):
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]
):
result = getattr(idx, method)()
assert result == expected
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq="D")
idx2 = TimedeltaIndex(["1 days"], freq="D")
idx3 = TimedeltaIndex(["1 days", "2 days"], freq="D")
idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D")
idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = "0 1 days\ndtype: timedelta64[ns]"
exp3 = "0 1 days\n1 2 days\ndtype: timedelta64[ns]"
exp4 = "0 1 days\n1 2 days\n2 3 days\ndtype: timedelta64[ns]"
exp5 = (
"0 1 days 00:00:01\n"
"1 2 days 00:00:00\n"
"2 3 days 00:00:00\n"
"dtype: timedelta64[ns]"
)
with pd.option_context("display.width", 300):
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]
):
result = repr(Series(idx))
assert result == expected
def test_summary(self):
# GH#9116
idx1 = TimedeltaIndex([], freq="D")
idx2 = TimedeltaIndex(["1 days"], freq="D")
idx3 = TimedeltaIndex(["1 days", "2 days"], freq="D")
idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D")
idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"])
exp1 = "TimedeltaIndex: 0 entries\nFreq: D"
exp2 = "TimedeltaIndex: 1 entries, 1 days to 1 days\nFreq: D"
exp3 = "TimedeltaIndex: 2 entries, 1 days to 2 days\nFreq: D"
exp4 = "TimedeltaIndex: 3 entries, 1 days to 3 days\nFreq: D"
exp5 = "TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days 00:00:00"
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]
):
result = idx._summary()
assert result == expected | 0.560253 | 0.725916 |