input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<reponame>8ball030/AutonomousHegician
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2020 eightballer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This package contains the handlers of the erc1155 deploy skill AEA."""
from typing import Optional, cast
from aea.crypto.ledger_apis import LedgerApis
from aea.protocols.base import Message
from aea.skills.base import Handler
from packages.eightballer.skills.option_management.db_communication import (
CLOSED,
FAILED,
OPEN,
OPTIONS_ESTIMATE,
PENDING_PLACEMENT,
PLACING,
)
from packages.eightballer.skills.option_management.dialogues import (
ContractApiDialogue,
ContractApiDialogues,
LedgerApiDialogue,
LedgerApiDialogues,
SigningDialogue,
SigningDialogues,
)
from packages.eightballer.skills.option_management.strategy import Strategy
from packages.fetchai.connections.ledger.base import (
CONNECTION_ID as LEDGER_CONNECTION_PUBLIC_ID,
)
from packages.fetchai.protocols.contract_api.message import ContractApiMessage
from packages.fetchai.protocols.ledger_api.message import LedgerApiMessage
from packages.fetchai.protocols.signing.message import SigningMessage
LEDGER_API_ADDRESS = str(LEDGER_CONNECTION_PUBLIC_ID)
class LedgerApiHandler(Handler):
"""Implement the ledger api handler."""
SUPPORTED_PROTOCOL = LedgerApiMessage.protocol_id
def setup(self) -> None:
"""Implement the setup for the handler."""
pass
def handle(self, message: Message) -> None:
"""
Implement the reaction to a message.
:param message: the message
:return: None
"""
ledger_api_msg = cast(LedgerApiMessage, message)
# recover dialogue
ledger_api_dialogues = cast(
LedgerApiDialogues, self.context.ledger_api_dialogues
)
ledger_api_dialogue = cast(
Optional[LedgerApiDialogue], ledger_api_dialogues.update(ledger_api_msg)
)
if ledger_api_dialogue is None:
self._handle_unidentified_dialogue(ledger_api_msg)
return
# handle message
if ledger_api_msg.performative is LedgerApiMessage.Performative.BALANCE:
self._handle_balance(ledger_api_msg)
elif (
ledger_api_msg.performative
is LedgerApiMessage.Performative.TRANSACTION_DIGEST
):
self._handle_transaction_digest(ledger_api_msg, ledger_api_dialogue)
elif (
ledger_api_msg.performative
is LedgerApiMessage.Performative.TRANSACTION_RECEIPT
):
self._handle_transaction_receipt(ledger_api_msg, ledger_api_dialogue)
elif ledger_api_msg.performative == LedgerApiMessage.Performative.ERROR:
self._handle_error(ledger_api_msg, ledger_api_dialogue)
else:
self._handle_invalid(ledger_api_msg, ledger_api_dialogue)
def teardown(self) -> None:
"""
Implement the handler teardown.
:return: None
"""
pass
def _handle_unidentified_dialogue(self, ledger_api_msg: LedgerApiMessage) -> None:
"""
Handle an unidentified dialogue.
:param msg: the message
"""
self.context.logger.info(
"received invalid ledger_api message={}, unidentified dialogue.".format(
ledger_api_msg
)
)
def _handle_balance(self, ledger_api_msg: LedgerApiMessage) -> None:
"""
Handle a message of balance performative.
:param ledger_api_message: the ledger api message
"""
strategy = cast(Strategy, self.context.strategy)
strategy.eth_balance = ledger_api_msg.balance
def _handle_transaction_digest(
self, ledger_api_msg: LedgerApiMessage, ledger_api_dialogue: LedgerApiDialogue
) -> None:
"""
Handle a message of transaction_digest performative.
:param ledger_api_message: the ledger api message
:param ledger_api_dialogue: the ledger api dialogue
"""
self.context.logger.info(
"transaction was successfully submitted. Transaction digest={}".format(
ledger_api_msg.transaction_digest
)
)
msg = ledger_api_dialogue.reply(
performative=LedgerApiMessage.Performative.GET_TRANSACTION_RECEIPT,
target_message=ledger_api_msg,
transaction_digest=ledger_api_msg.transaction_digest,
)
self.context.outbox.put_message(message=msg)
self.context.logger.info("requesting transaction receipt.")
def _handle_transaction_receipt(
self, ledger_api_msg: LedgerApiMessage, ledger_api_dialogue: LedgerApiDialogue
) -> None:
"""
Handle a message of transaction_receipt performative.
:param ledger_api_message: the ledger api message
"""
strategy = cast(Strategy, self.context.strategy)
is_transaction_successful = LedgerApis.is_transaction_settled(
ledger_api_msg.transaction_receipt.ledger_id,
ledger_api_msg.transaction_receipt.receipt,
)
contract_reference = ledger_api_dialogue._associated_signing_dialogue._associated_contract_api_dialogue.dialogue_label.dialogue_reference[
0
]
for contract, status in strategy.contract_status.items():
if status[0] is None:
continue
elif status[1] == contract_reference:
self.context.logger.info(f"retrieved deployment {contract}")
strategy.contract_status[contract] = (
"deployed",
ledger_api_msg.transaction_receipt.receipt["contractAddress"],
)
self.context.logger.info(
f"** {ledger_api_msg.transaction_receipt.receipt['contractAddress']} Retireved and stored)"
)
order = strategy.get_order(strategy.current_order.id)
if is_transaction_successful:
if order.status_code_id == PLACING: # we have created our order
strategy.update_current_order(
order,
{
"status_code_id": OPEN,
"tx_hash": ledger_api_msg.transaction_receipt.transaction[
"hash"
],
},
) # now we mark for placement
elif order.status_code_id == OPEN: # we have excercised
strategy.update_current_order(
order,
{
"status_code_id": CLOSED,
"tx_hash": ledger_api_msg.transaction_receipt.transaction[
"hash"
],
},
) # now we mark for placement
self.context.logger.info(
"transaction was successfully settled. Transaction receipt={}".format(
"ledger_api_msg.transaction_receipt"
)
)
else:
strategy.update_current_order(
order,
{
"status_code_id": FAILED,
"tx_hash": ledger_api_msg.transaction_receipt.transaction["hash"],
},
) # now we mark for placement
self.context.logger.error(
"transaction failed. Transaction receipt={}".format(
ledger_api_msg.transaction_receipt
)
)
strategy.is_order_behaviour_active = False
strategy.is_price_behaviour_active = True
def _handle_error(
self, ledger_api_msg: LedgerApiMessage, ledger_api_dialogue: LedgerApiDialogue
) -> None:
"""
Handle a message of error performative.
:param ledger_api_message: the ledger api message
:param ledger_api_dialogue: the ledger api dialogue
"""
strategy = cast(Strategy, self.context.strategy)
order = strategy.get_order(strategy.current_order.id)
self.context.logger.info(
"received ledger_api error message={} in dialogue={}.".format(
ledger_api_msg, ledger_api_dialogue
)
)
strategy.update_current_order(
order,
{
"status_code_id": FAILED,
"tx_hash": ledger_api_msg.transaction_receipt.body,
},
)
strategy.is_order_behaviour_active = False
strategy.is_price_behaviour_active = True
def _handle_invalid(
self, ledger_api_msg: LedgerApiMessage, ledger_api_dialogue: LedgerApiDialogue
) -> None:
"""
Handle a message of invalid performative.
:param ledger_api_message: the ledger api message
:param ledger_api_dialogue: the ledger api dialogue
"""
strategy = cast(Strategy, self.context.strategy)
self.context.logger.warning(
"cannot handle ledger_api message of performative={} in dialogue={}.".format(
ledger_api_msg.performative,
ledger_api_dialogue,
)
)
strategy.is_order_behaviour_active = False
strategy.is_price_behaviour_active = True
class ContractApiHandler(Handler):
"""Implement the contract api handler."""
SUPPORTED_PROTOCOL = ContractApiMessage.protocol_id
def setup(self) -> None:
"""Implement the setup for the handler."""
pass
def handle(self, message: Message) -> None:
"""
Implement the reaction to a message.
:param message: the message
:return: None
"""
contract_api_msg = cast(ContractApiMessage, message)
# recover dialogue
contract_api_dialogues = cast(
ContractApiDialogues, self.context.contract_api_dialogues
)
contract_api_dialogue = cast(
Optional[ContractApiDialogue],
contract_api_dialogues.update(contract_api_msg),
)
if contract_api_dialogue is None:
self._handle_unidentified_dialogue(contract_api_msg)
return
# handle message
if (
contract_api_msg.performative
is ContractApiMessage.Performative.RAW_TRANSACTION
):
self._handle_raw_transaction(contract_api_msg, contract_api_dialogue)
elif contract_api_msg.performative is ContractApiMessage.Performative.STATE:
self._handle_state(contract_api_msg, contract_api_dialogue)
elif contract_api_msg.performative == ContractApiMessage.Performative.ERROR:
self._handle_error(contract_api_msg, contract_api_dialogue)
else:
self._handle_invalid(contract_api_msg, contract_api_dialogue)
def _handle_state(
self,
contract_api_msg: ContractApiMessage,
contract_api_dialogue: ContractApiDialogue,
) -> None:
"""Handle state reading of the contract apis."""
strategy = cast(Strategy, self.context.strategy)
contract_reference = contract_api_dialogue.dialogue_label.dialogue_reference[0]
for contract, status in strategy.contract_status.items():
if status[0] is None:
continue
elif status[1] == contract_reference:
# self.context.logger.info(f"retrieved deployment {contract} state query")
strategy.contract_status[contract] = (
"results",
contract_api_msg.state.body,
)
strategy.deployment_status = "pending"
break
if contract in ["btcoptions_estimate", "ethoptions_estimate"]:
order = strategy.get_order(strategy.current_order.id)
if (
order.status_code_id == OPTIONS_ESTIMATE
): # we have received our estimates
strategy.update_current_order(
order,
{
"status_code_id": PENDING_PLACEMENT,
"ledger_id": contract_api_msg.state.body["option_id"],
"fees": contract_api_msg.state.body["fee_estimate"],
},
) # now we mark for placement
strategy.is_order_behaviour_active = False
elif contract in [
"btcpriceprovider_get_latest_answer",
"priceprovider_get_latest_answer",
]:
if not strategy.is_order_behaviour_active:
strategy.is_price_behaviour_active = True
else:
raise ValueError(f"State transaction not handled!: {contract}")
def teardown(self) -> None:
"""
Implement the handler teardown.
:return: None
"""
pass
def _handle_unidentified_dialogue(
self, contract_api_msg: ContractApiMessage
) -> None:
"""
Handle an unidentified dialogue.
:param msg: the message
"""
self.context.logger.info(
"received invalid contract_api message={}, unidentified dialogue.".format(
contract_api_msg
)
)
def _handle_raw_transaction(
self,
contract_api_msg: ContractApiMessage,
contract_api_dialogue: ContractApiDialogue,
) -> None:
"""
Handle a message of raw_transaction performative.
:param contract_api_message: the ledger api message
:param contract_api_dialogue: the ledger api dialogue
"""
self.context.logger.info(
"Received raw transaction={}".format("contract_api_msg")
)
signing_dialogues = cast(SigningDialogues, self.context.signing_dialogues)
signing_msg, signing_dialogue = signing_dialogues.create(
counterparty=self.context.decision_maker_address,
performative=SigningMessage.Performative.SIGN_TRANSACTION,
raw_transaction=contract_api_msg.raw_transaction,
terms=contract_api_dialogue.terms,
)
signing_dialogue = cast(SigningDialogue, signing_dialogue)
signing_dialogue.associated_contract_api_dialogue = contract_api_dialogue
self.context.decision_maker_message_queue.put_nowait(signing_msg)
self.context.logger.info(
"proposing the transaction to the decision maker. Waiting for confirmation ..."
)
def _handle_error(
self,
contract_api_msg: ContractApiMessage,
contract_api_dialogue: ContractApiDialogue,
) -> None:
"""
Handle a message of error performative.
:param contract_api_message: the ledger api message
:param contract_api_dialogue: the ledger api dialogue
"""
strategy = cast(Strategy, self.context.strategy)
self.context.logger.info(
"received ledger_api error message={} in dialogue={}.".format(
contract_api_msg, contract_api_dialogue
)
)
order = strategy.current_order
strategy.update_current_order(order, {"status_code_id": FAILED})
strategy.is_order_behaviour_active = False
strategy.is_price_behaviour_active = True
def _handle_invalid(
self,
contract_api_msg: ContractApiMessage,
contract_api_dialogue: ContractApiDialogue,
) -> None:
"""
Handle a message of invalid performative.
:param contract_api_message: the ledger api message
:param contract_api_dialogue: the ledger api dialogue
"""
self.context.logger.warning(
"cannot handle contract_api message of performative={} in dialogue={}.".format(
contract_api_msg.performative,
contract_api_dialogue,
)
)
class SigningHandler(Handler):
"""Implement the transaction handler."""
SUPPORTED_PROTOCOL = SigningMessage.protocol_id
def setup(self) -> None:
"""Implement the setup for the handler."""
pass
def handle(self, message: Message) -> None:
"""
Implement the reaction to a message.
:param message: the message
:return: None
"""
signing_msg = cast(SigningMessage, message)
# recover dialogue
signing_dialogues = cast(SigningDialogues, self.context.signing_dialogues)
signing_dialogue = cast(
Optional[SigningDialogue], signing_dialogues.update(signing_msg)
)
if signing_dialogue is None:
self._handle_unidentified_dialogue(signing_msg)
return
# handle message
if signing_msg.performative is SigningMessage.Performative.SIGNED_TRANSACTION:
self._handle_signed_transaction(signing_msg, signing_dialogue)
elif signing_msg.performative is SigningMessage.Performative.ERROR:
self._handle_error(signing_msg, signing_dialogue)
else:
self._handle_invalid(signing_msg, signing_dialogue)
def teardown(self) -> None:
"""
Implement the handler teardown.
:return: None
"""
pass
def _handle_unidentified_dialogue(self, signing_msg: SigningMessage) -> None:
"""
Handle an unidentified dialogue.
:param msg: the message
"""
self.context.logger.info(
"received invalid signing message={}, unidentified dialogue.".format(
signing_msg
)
)
def _handle_signed_transaction(
self, signing_msg: SigningMessage, signing_dialogue: SigningDialogue
) -> None:
"""
Handle an oef search message.
:param signing_msg: the signing message
:param signing_dialogue: the dialogue
:return: None
"""
self.context.logger.info("transaction signing was successful.")
ledger_api_dialogues = cast(
LedgerApiDialogues, self.context.ledger_api_dialogues
)
ledger_api_msg, ledger_api_dialogue = ledger_api_dialogues.create(
counterparty=LEDGER_API_ADDRESS,
performative=LedgerApiMessage.Performative.SEND_SIGNED_TRANSACTION,
signed_transaction=signing_msg.signed_transaction,
)
ledger_api_dialogue = cast(LedgerApiDialogue, ledger_api_dialogue)
ledger_api_dialogue.associated_signing_dialogue = signing_dialogue
self.context.outbox.put_message(message=ledger_api_msg)
self.context.logger.info("sending transaction to ledger.")
def _handle_error(
self, signing_msg: SigningMessage, signing_dialogue: SigningDialogue
) -> None:
"""
Handle an oef search message.
:param signing_msg: the signing message
:param signing_dialogue: the dialogue
:return: None
"""
self.context.logger.info(
"transaction signing was not successful. Error_code={} in dialogue={}".format(
signing_msg.error_code, signing_dialogue
)
)
def | |
true values of the energy for electrons.
:parameter pred_ele: array containing the predicted energies for electrons.
:parameter tr_pi0: array containing the true values of the energy for neutral pions.
:parameter pred_pi0: array containing the predicted energies for neutral pions.
:parameter tr_chPi: array containing the true values of the energy for charged pions.
:parameter pred_chPi: array containing the predicted energies for charged pions.
"""
# 4 subplots sharing both x/y axes
f, axes2d = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(5, 5))
# f.suptitle('Predicted energy X True energy', fontsize=14)
ax1 = axes2d[0, 0]
ax2 = axes2d[0, 1]
ax3 = axes2d[1, 0]
ax4 = axes2d[1, 1]
ax1.hist2d(tr_gamma, pred_gamma, bins=200, norm=LogNorm(), cmap="cool")
ax1.set_title('Photons')
ax2.hist2d(tr_ele, pred_ele, bins=200, norm=LogNorm(), cmap="cool")
ax2.set_title('Electrons')
ax3.hist2d(tr_pi0, pred_pi0, bins=200, norm=LogNorm(), cmap="cool")
ax3.set_title('Neutral pions')
ax4.hist2d(tr_chPi, pred_chPi, bins=200, norm=LogNorm(), cmap="cool")
ax4.set_title('Charged pions')
plt.xticks(np.arange(0, 600, 100.0))
plt.yticks(np.arange(0, 600, 100.0))
# tick.label.set_fontsize(14)
# axes2d.set_xlabel("True energy (GeV)", fontsize=14)
# axes2d.set_ylabel("Predicted energy (GeV)", fontsize=14)
f.text(0.5, 0, "True energy (GeV)", ha='center', va='center', fontsize=14)
f.text(0, 0.5, "Predicted energy (GeV)", ha='center', va='center', rotation='vertical', fontsize=14)
#plt.show()
def plotLosses(loss_gamma, loss_ele, loss_pi0, loss_chPi):
'''
Plots 4 plots of loss/epoch during training, one for each type of particle.
:param loss_gamma: path to HDF5 file.
:param loss_ele:
:param loss_pi0:
:param loss_chPi:
:return:
'''
gammaLoss = losses_from_HDF5(loss_gamma)
eleLoss = losses_from_HDF5(loss_ele)
pi0Loss = losses_from_HDF5(loss_pi0)
chPiLoss = losses_from_HDF5(loss_chPi)
gamma_loss = gammaLoss[0]
gamma_vaLoss = gammaLoss[1]
ele_loss = eleLoss[0]
ele_vaLoss = eleLoss[1]
pi0_loss = pi0Loss[0]
pi0_vaLoss = pi0Loss[1]
chPi_loss = chPiLoss[0]
chPi_vaLoss = chPiLoss[1]
# 4 subplots sharing both x/y axes
f, axes2d = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(5, 5))
# f.suptitle('Predicted energy X True energy', fontsize=14)
ax1 = axes2d[0, 0]
ax2 = axes2d[0, 1]
ax3 = axes2d[1, 0]
ax4 = axes2d[1, 1]
ax1.hist2d(tr_gamma, pred_gamma, bins=200, norm=LogNorm(), cmap="cool")
ax1.set_title('Photons')
ax2.hist2d(tr_ele, pred_ele, bins=200, norm=LogNorm(), cmap="cool")
ax2.set_title('Electrons')
ax3.hist2d(tr_pi0, pred_pi0, bins=200, norm=LogNorm(), cmap="cool")
ax3.set_title('Neutral pions')
ax4.hist2d(tr_chPi, pred_chPi, bins=200, norm=LogNorm(), cmap="cool")
ax4.set_title('Charged pions')
plt.xticks(np.arange(0, 600, 100.0))
plt.yticks(np.arange(0, 600, 100.0))
# tick.label.set_fontsize(14)
# axes2d.set_xlabel("True energy (GeV)", fontsize=14)
# axes2d.set_ylabel("Predicted energy (GeV)", fontsize=14)
f.text(0.5, 0, "True energy (GeV)", ha='center', va='center', fontsize=14)
f.text(0, 0.5, "Predicted energy (GeV)", ha='center', va='center', rotation='vertical', fontsize=14)
def stats_particle(difference):
""""
Returns the relevant statistics metrics about the distribution of the reative energy difference, as the mean,
standard deviation, standard error and an appropriate label.
:parameter difference: array containing the difference between true energy and the predicted energy for a particle.
:type difference: numpy.ndarray
:return: mean, std, error and label
:rtype: float, float, float, str.
"""
mean = np.mean(difference)
std = np.std(difference)
error = std / np.sqrt(len(difference))
label_part = 'Mean: %.3f $\pm$ %.3f \nStd. dev.: %.2f' % (mean, error, std)
return mean, std, error, label_part
def hists(tr_gamma, pred_gamma, tr_ele, pred_ele, tr_pi0, pred_pi0, tr_chPi, pred_chPi, nbins=550):
"""
Plots 4 relative energy histograms, one for each kind of particle.
:parameter tr_gamma: array containing the true values of the energy for photons.
:parameter pred_gamma: array containing the predicted energies for photons.
:parameter tr_ele: array containing the true values of the energy for electrons.
:parameter pred_ele: array containing the predicted energies for electrons.
:parameter tr_pi0: array containing the true values of the energy for neutral pions.
:parameter pred_pi0: array containing the predicted energies for neutral pions.
:parameter tr_chPi: array containing the true values of the energy for charged pions.
:parameter pred_chPi: array containing the predicted energies for charged pions.
:parameter nbins: number of bins for the histograms.
"""
# to implement: *kwargs
# give multiple arrays in the input and use them correctly iteratively (?)
# 4 subplots sharing both x and y axes
f, axes2d = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(5, 5))
#f.suptitle('Relative energy difference', fontsize=16)
# relative differences for different particles
difference_gamma = _rDif_pctg(tr_gamma, pred_gamma)
difference_ele = _rDif_pctg(tr_ele, pred_ele)
difference_pi0 = _rDif_pctg(tr_pi0, pred_pi0)
difference_chPi = _rDif_pctg(tr_chPi, pred_chPi)
# relevant stats
mean_gamma, std_gamma, error_gamma, label_gamma = stats_particle(difference_gamma)
mean_ele, std_ele, error_ele, label_ele = stats_particle(difference_ele)
mean_pi0, std_pi0, error_pi0, label_pi0 = stats_particle(difference_pi0)
mean_chPi, std_chPi, error_chPi, label_chPi = stats_particle(difference_chPi)
# defining axes
ax1 = axes2d[0, 0]
ax2 = axes2d[0, 1]
ax3 = axes2d[1, 0]
ax4 = axes2d[1, 1]
# plotting histograms
ax1.hist(difference_gamma, nbins, normed=1, facecolor='green', alpha=0.75, label=label_gamma)
ax2.hist(difference_ele, nbins, normed=1, facecolor='red', alpha=0.75, label=label_ele)
ax3.hist(difference_pi0, nbins, normed=1, facecolor='blue', alpha=0.75, label=label_pi0)
ax4.hist(difference_chPi, nbins, normed=1, facecolor='orange', alpha=0.75, label=label_chPi)
# setting titles and legends
ax1.set_title('Photons')
ax2.set_title('Electrons')
ax3.set_title('Neutral pions')
ax4.set_title('Charged pions')
ax1.legend()
ax2.legend()
ax3.legend()
ax4.legend()
# ax1.set_xlim(-10, 10)
# ax2.set_xlim(-10, 10)
# ax3.set_xlim(-10, 10)
# ax4.set_xlim(-20, 20)
plt.xlim(-20, 20)
f.text(0.5, 0, r'$\frac{(E_{true} - E_{pred})}{E_{true}}$ (%)', ha='center', va='center', fontsize=16)
f.text(0, 0.5, 'Probability', ha='center', va='center', rotation='vertical', fontsize=14)
plt.show()
# plt.savefig("hists%d_%d.jpg" % (lim_l, lim_r))
def out_hists(tr_gamma, pred_gamma, tr_ele, pred_ele, tr_pi0, pred_pi0, tr_chPi, pred_chPi, nbins=550):
"""
Plots outline histograms of 4 particles.
:parameter tr_gamma: array containing the true values of the energy for photons.
:parameter pred_gamma: array containing the predicted energies for photons.
:parameter tr_ele: array containing the true values of the energy for electrons.
:parameter pred_ele: array containing the predicted energies for electrons.
:parameter tr_pi0: array containing the true values of the energy for neutral pions.
:parameter pred_pi0: array containing the predicted energies for neutral pions.
:parameter tr_chPi: array containing the true values of the energy for charged pions.
:parameter pred_chPi: array containing the predicted energies for charged pions.
:parameter nbins: number of bins for the histograms.
"""
# relative differences for different particles
difference_gamma = _rDif_pctg(tr_gamma, pred_gamma)
difference_ele = _rDif_pctg(tr_ele, pred_ele)
difference_pi0 = _rDif_pctg(tr_pi0, pred_pi0)
difference_chPi = _rDif_pctg(tr_chPi, pred_chPi)
# relevant stats
mean_gamma, std_gamma, error_gamma, label_gamma = stats_particle(difference_gamma)
mean_ele, std_ele, error_ele, label_ele = stats_particle(difference_ele)
mean_pi0, std_pi0, error_pi0, label_pi0 = stats_particle(difference_pi0)
mean_chPi, std_chPi, error_chPi, label_chPi = stats_particle(difference_chPi)
# plotting histograms
plt.hist(difference_gamma, histtype="step", bins=nbins, normed=1, edgecolor='green', linewidth=1.5,
facecolor='white', alpha=0.5, label="Photons\n" + label_gamma)
plt.hist(difference_ele, histtype="step", bins=nbins, normed=1, edgecolor='red', linewidth=1.5, facecolor='white',
alpha=0.5, label="Electrons\n" + label_ele)
plt.hist(difference_pi0, histtype="step", bins=nbins, normed=1, edgecolor='blue', linewidth=1.5, facecolor='white',
alpha=0.5, label="Neutral Pions\n" + label_pi0)
plt.hist(difference_chPi, histtype="step", bins=nbins, normed=1, edgecolor='orange', linewidth=1.5,
facecolor='white', alpha=0.5, label="Charged Pions\n" + label_chPi)
plt.xlim(-20, 20)
plt.legend()
plt.xlabel(r'$\frac{(E_{true} - E_{pred})}{E_{true}}$ (%)', size=18)
plt.ylabel("Probability", size=16)
plt.show()
#########################
# STOP!
# Everything starting here has not been tested.
# Contains bugs, do not use.
#########################
def multiMeans(tr_gamma, pred_gamma, tr_ele, pred_ele, tr_pi0, pred_pi0, nbins=10):
"""
Plots the energy bins means of different particles together, each one having a different color.
:type tr_gamma: numpy.ndarray
:parameter tr_gamma: array containing the true values of the energy for photons.
:type pred_gamma: numpy.ndarray
:parameter pred_gamma: array containing the predicted energies for photons.
:type tr_ele: numpy.ndarray
:parameter tr_ele: array containing the true values of the energy for electrons.
:type pred_ele: numpy.ndarray
:parameter pred_ele: array containing the predicted energies for electrons.
:type tr_pi0: numpy.ndarray
:parameter tr_pi0: array containing the true values of the energy for neutral pions.
:type pred_pi0: numpy-ndarray.
:parameter pred_pi0: array containing the predicted energies for neutral pions.
:type nbins: int
:parameter nbins: number of bins of energy.
"""
x_gamma, y_gamma, means_gamma, rMeans_gamma, stds_gamma, rStds_gamma, sizes_gamma, res_gamma = binning(
nbins, tr_gamma, pred_gamma)
x_ele, y_ele, means_ele, rMeans_ele, stds_ele, rStds_ele, sizes_ele, res_ele = binning(nbins, tr_ele, pred_ele)
x_pi0, y_pi0, means_pi0, rMeans_pi0, stds_pi0, rStds_pi0, sizes_pi0, res_pi0 = binning(nbins, tr_pi0, pred_pi0)
# subplots
f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
plt.figure(figsize=(5, 5))
returnMeans(means_gamma, stds_gamma, sizes_gamma, ax1, col='blue', particle_name="Photons")
returnMeans(means_ele, stds_ele, sizes_ele, ax2, col='red', particle_name="Electrons")
returnMeans(means_pi0, stds_pi0, sizes_pi0, ax3, col='green', particle_name="Neutral pions")
plt.xlabel("Energy", size=16)
plt.ylabel("$\mu(\Delta E)$ (GeV)", size=19)
plt.title("Means", size=16)
plt.xlim(0, 500)
plt.legend(loc='best'
#, bbox_to_anchor=(1.52, 0.9)
)
plt.show()
# plt.savefig("means_particles.jpg")
def returnMeans(means_particle, stds_particle, sizes_particle, ax, col='blue', particle_name=""):
"""
Helper for multiMeans(). Plots the energy bin means for individual particles in subplots.
:parameter means_particle: array containing the means of the energy bins for a particle.
:type means_particle: numpy.ndarray
:parameter stds_particle: array containing the standard deviations of the energy bins for a particle.
:type stds_particle: numpy.ndarray
:parameter sizes_particle: array containing the sizes of the energy bins for a particle.
:type sizes_particle: numpy.ndarray
:parameter ax: axis (subplot).
:type ax: matplotlib.axes._subplots.AxesSubplot
:parameter col: color of the distribution.
:type col: str
:parameter particle_name: name of the particle.
:type particle_name: str
"""
n_particle = len(means_particle)
iSize_particle = 500 / n_particle
for i in range(0, n_particle):
x_axis = (i * iSize_particle + (i + 1) * iSize_particle) / 2
| |
")
for ns in ("", "processor"):
new_command("print-time", print_time_cmd,
{"": [arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(flag_t, "-s"),
arg(flag_t, "-c"),
arg(flag_t, "-all")],
"processor": [ arg(flag_t, "-s"),
arg(flag_t, "-c")]}[ns],
namespace = ns,
alias = "ptime",
type = ["Execution", "Profiling"],
short = "print number of steps and cycles executed",
repeat = print_time_cmd,
doc = """
Prints the number of steps and cycles that a processor has executed.
The cycle count is also displayed as simulated time in seconds.
If called from a processor namespace (e.g., <i>cpu0</i><tt>.print-time</tt>),
the time for that processor is printed. Otherwise, the time for the
current processor is printed, or, if the <arg>-all</arg> flag is
given, the time for all processors.
if the <arg>-c</arg> flag used, the cycle count for the processor is returned
and nothing is printed. The <arg>-s</arg> flag is similar and returns the
step count.
A step is a completed instruction or an exception. An instruction that
fails to complete because of an exception will count as a single step,
including the exception.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="156")
#
# -------------------- penable --------------------
#
def penable_cmd(poly): # flag_t or obj_t
if not poly:
penable_cmd((obj_t, current_processor()))
elif poly[0] == flag_t: # -all
limit = SIM_number_processors()
for i in range(limit):
penable_cmd((obj_t, SIM_get_processor(i)))
else:
cpu = poly[1]
try:
SIM_enable_processor(cpu)
print "Enabling processor", cpu.name
except Exception, msg:
print "Failed enabling processor.", msg
new_command("penable", penable_cmd,
[arg((obj_t('processor', 'processor'), flag_t),
("cpu-name", "-all"), "?")],
type = ["Execution", "Changing Simulated State"],
short = "switch processor on",
doc = """
Enables a processor. If no processor is specified, the current processor will
be enabled. If the flag <arg>-all</arg> is passed, all processors will be
enabled.
<b>pdisable</b> takes processor as parameter. If no processor is
given, it will list all enabled processors. The method variant can also be used
to disable a processor. A disabled processor is simply stalled for an infinite
amount of time. Make sure that you always have at least one enabled processor.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="208")
def obj_enable_cmd(obj):
try:
SIM_enable_processor(obj)
print "Enabling processor", obj.name
except Exception, msg:
print "Failed enabling processor.", msg
new_command("enable", obj_enable_cmd,
[],
namespace = "processor",
short = "switch processor on",
doc_with = "penable", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="232")
#
# -------------------- pdisable --------------------
#
def pdisable_cmd(poly):
if not poly:
pdisable_cmd((obj_t, current_processor()))
elif poly[0] == flag_t: # -all
limit = SIM_number_processors()
for i in range(limit):
pdisable_cmd((obj_t, SIM_get_processor(i)))
else:
cpu = poly[1]
try:
SIM_disable_processor(cpu)
print "Disabling processor", cpu.name
except Exception, msg:
print "Failed disabling processor.", msg
new_command("pdisable", pdisable_cmd,
[arg((obj_t('processor', 'processor'), flag_t),
("cpu-name", "-all"), "?")],
alias = "",
short = "switch processor off",
doc_with = "penable", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="257")
def obj_disable_cmd(obj):
try:
SIM_disable_processor(obj)
print "Disabling processor", obj.name
except Exception, msg:
print "Failed disabling processor.", msg
new_command("disable", obj_disable_cmd,
[],
namespace = "processor",
short = "switch processor off",
doc_with = "penable", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="271")
#
# -------------------- pstatus --------------------
#
def pstatus_cmd():
list_processors()
new_command("pstatus", pstatus_cmd,
[],
alias = "",
type = ["Execution", "Inspecting Simulated State"],
short = "show processors' status",
doc = """
Show the enabled/disabled status of all processors in the Simics session.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="284")
#
# -------------------------- memory profiling --------------------------
#
def supports_mem_profiling(cpu):
try:
cpu.memory_profilers
except:
print "%s does not support memory read/write profiling." % cpu.name
return 0
return 1
cpu_mem_prof_type = {"read" : Sim_RW_Read,
"write" : Sim_RW_Write}
def cpu_mem_prof_type_expander(string):
return get_completions(string, cpu_mem_prof_type.keys())
def print_valid_mem_prof_types():
print "Valid types are: %s" % ", ".join(cpu_mem_prof_type.keys())
def add_memory_profiler_cmd(cpu, type, obj):
if not supports_mem_profiling(cpu):
return
try:
i = cpu_mem_prof_type[type]
except:
print "'%s' is not a valid profiler type." % type
print_valid_mem_prof_types()
return
if cpu.memory_profilers[i]:
print ("There is an active profiler for memory %s already: %s"
% (type, cpu.memory_profilers[i].name))
return
if obj:
cpu.memory_profilers[i] = obj
else:
# create a new profiler
name = "%s_%s_mem_prof" % (cpu.name, type)
try:
prof = SIM_get_object(name)
cpu.memory_profilers[i] = prof
print ("[%s] Existing profiler added for memory %s: %s"
% (cpu.name, type, name))
except:
try:
gran = cpu.memory_profiling_granularity_log2
desc = "data profiler"
prof = SIM_create_object('data-profiler', name,
[['description', desc],
['granularity', gran],
['physical_addresses', 1]])
cpu.memory_profilers[i] = prof
print ("[%s] New profiler added for memory %s: %s"
% (cpu.name, type, name))
except:
print "Could not add memory profiler."
def remove_memory_profiler_cmd(cpu, type):
if not supports_mem_profiling(cpu):
return
try:
cpu.memory_profilers[cpu_mem_prof_type[type]] = None
except:
print "'%s' is not a valid profiler type." % type
print_valid_mem_prof_types()
def list_memory_profilers_cmd(cpu):
if not supports_mem_profiling(cpu):
return
for t in cpu_mem_prof_type.keys():
try:
name = cpu.memory_profilers[cpu_mem_prof_type[t]].name
except:
name = ""
print "%20s: %s" % (t, name)
new_command("add-memory-profiler", add_memory_profiler_cmd,
[arg(str_t, "type", expander = cpu_mem_prof_type_expander),
arg(obj_t("data-profiler", "data-profiler"), "profiler", "?")],
namespace = "processor",
type = ["Memory", "Profiling"],
short="add a memory profiler to the processor",
doc = """
Add a data profiler to the specified processor that will record either
reads or writes to memory (indexed on physical address) depending on
whether the <tt>type</tt> argument is 'read' or 'write'. An existing
data profiler may be specified with the <tt>profiler</tt> argument;
otherwise, a new data profiler will be created.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="373")
new_command("remove-memory-profiler", remove_memory_profiler_cmd,
[arg(str_t, "type", expander = cpu_mem_prof_type_expander)],
namespace = "processor",
type = ["Memory", "Profiling"],
short="remove a memory profiler from the processor",
doc = """
Remove any memory profiler of the specified <tt>type</tt> ('read' or
'write') currently attached to the processor.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="386")
new_command("list-memory-profilers", list_memory_profilers_cmd,
[],
namespace = "processor",
type = ["Memory", "Profiling"],
short="list memory profilers connected to the processor",
doc = """
List all memory profilers connected to the processor, and what kind of
data they are collecting.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="395")
#
# -------------------- pstats --------------------
#
def all_processors():
return filter(SIM_object_is_processor, SIM_get_all_objects())
def print_stat_per_cpu(cpu):
pr("\nStatistics for cpu %s\n" % cpu.name);
pr(" User Supervisor Total Description\n");
elided = 0
for (name, uval, sval) in cpu.mode_counters:
if uval or sval:
pr("%11d %11d %11d %s\n" % (uval, sval, uval + sval, name))
else:
elided = 1
if elided:
pr("\n(counters whose values are all zero were not displayed)\n")
def pstats_cmd(args):
if args[0] == flag_t:
for c in all_processors():
print_stat_per_cpu(c)
else:
cpu = args[1]
if not cpu:
cpu, _ = get_cpu()
print_stat_per_cpu(cpu)
def obj_pstats_cmd(obj):
pstats_cmd((obj_t, obj))
new_command("print-statistics", pstats_cmd,
[arg((obj_t('processor', 'processor'), flag_t), ("cpu-name","-all"), "?",
(obj_t,None), expander = (cpu_expander,0))],
type = ["Profiling"],
alias = "pstats",
short = "print various statistics",
doc = """
Prints various statistics from the simulation. The <b>print-statistics</b>
command prints statistics for the currently selected CPU if no argument
is given and for all CPUs if the -all flag given.
Any statistics that have been compiled into the
simulator are printed, as well as user-defined per-mode counters.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="438")
new_command("print-statistics", obj_pstats_cmd,
[],
short = "print various statistics",
namespace = "processor",
doc_with = "print-statistics", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="451")
#
# -------------------- step-break-absolute, step-break --------------------
#
def sim_break_absolute_cmd(cpu, cycles):
if not cpu:
(cpu, _) = get_cpu()
sim_break_cmd(cpu, cycles - SIM_step_count(cpu))
def sim_break_cmd(cpu, cycles):
if (cycles < 0):
print "Cannot break on negative time"
return
if not cpu:
(cpu, _) = get_cpu()
SIM_break_step(cpu, cycles)
new_command("step-break-absolute", sim_break_absolute_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(int_t, "instructions")],
alias = ["sba", "sim-break-absolute"],
type = ["Execution", "Breakpoints", "Debugging"],
short = "set absolute time breakpoint",
group_short = "set step breakpoints",
namespace_copy = ("processor", sim_break_absolute_cmd),
see_also = ["step-break-absolute", "cycle-break", "cycle-break-absolute", "list-breakpoints"],
doc = """
Set a breakpoint so that the selected CPU will stop after its step counter has
reached the <i>instructions</i> value. If the CPU is not specified the selected
frontend processor will be used (see <b>pselect</b>).
To list all breakpoints set use the command <b>list-breakpoints</b>.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="474")
new_command("step-break", sim_break_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(int_t, "instructions")],
alias = ["sb", "sim-break"],
short = "set time breakpoint",
namespace_copy = ("processor", sim_break_cmd),
see_also = ["step-break-absolute", "cycle-break", "cycle-break-absolute", "list-breakpoints"],
doc = """
Sets a breakpoint so that the CPU will stop after executing <i>instructions</i>
number of steps from the time the command was issued. If the CPU is not
specified the selected frontend processor will be used (see <b>pselect</b>).
To list all breakpoints set use the command <b>list-breakpoints</b>.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="491")
def cycle_break_absolute_cmd(cpu, cycles):
if not cpu:
(cpu, _) = get_cpu()
if (cycles < SIM_cycle_count(cpu)):
print "Cannot break on negative time"
return
SIM_break_cycle(cpu, cycles - SIM_cycle_count(cpu))
def cycle_break_cmd(cpu, cycles):
if not cpu:
(cpu, _) = get_cpu()
if (cycles < 0):
print "Cannot break on negative time"
return
SIM_break_cycle(cpu, cycles)
new_command("cycle-break-absolute", cycle_break_absolute_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(int_t, "cycles")],
alias = "cba",
type = ["Execution", "Breakpoints", "Debugging"],
short = "set absolute cycle breakpoint",
namespace_copy = ("processor", cycle_break_absolute_cmd),
see_also = ["cycle-break", "step-break", "step-break-absolute", "list-breakpoints"],
doc = """
Set a breakpoint so that the selected CPU will stop after its cycle counter has
reached the <i>cycles</i> value. If the CPU is not specified the selected
frontend processor will be used (see <b>pselect</b>).
To list all breakpoints | |
###################################################################################################
#ESNet: An Efficient Symmetric Network for Real-time Semantic Segmentation
#Paper-Link: https://arxiv.org/pdf/1906.09826.pdf
###################################################################################################
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torchsummary import summary
class DownsamplerBlock(nn.Module):
def __init__(self, ninput, noutput):
super().__init__()
self.conv = nn.Conv2d(ninput, noutput-ninput, (6, 6), stride=2, padding=2, bias=True)
self.pool = nn.MaxPool2d(2, stride=2)
self.bn = nn.BatchNorm2d(noutput, eps=1e-3)
self.relu = nn.ReLU(inplace=True)
def forward(self, input):
x1 = self.pool(input)
x2 = self.conv(input)
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
output = torch.cat([x2, x1], 1)
output = self.bn(output)
output = self.relu(output)
return output
class UpsamplerBlock (nn.Module):
def __init__(self, ninput, noutput):
super().__init__()
# self.conv = nn.ConvTranspose2d(ninput, noutput, 3, stride=2, padding=1, output_padding=1, bias=True)
self.conv = nn.ConvTranspose2d(ninput, noutput, 6, stride=2, padding=2, output_padding=0, bias=True)
self.bn = nn.BatchNorm2d(noutput, eps=1e-3)
def forward(self, input):
output = self.conv(input)
output = self.bn(output)
return F.relu(output)
class FCU(nn.Module):
def __init__(self, chann, kernel_size,dropprob, dilated):
"""
Factorized Convolution Unit
"""
super(FCU,self).__init__()
padding = int((kernel_size-1)//2) * dilated
self.conv3x1_1 = nn.Conv2d(chann, chann, (kernel_size,1), stride=1, padding=(int((kernel_size-1)//2)*1,0), bias=True)
self.conv1x3_1 = nn.Conv2d(chann, chann, (1,kernel_size), stride=1, padding=(0,int((kernel_size-1)//2)*1), bias=True)
self.bn1 = nn.BatchNorm2d(chann, eps=1e-03)
self.conv3x1_2 = nn.Conv2d(chann, chann, (kernel_size,1), stride=1, padding=(padding,0), bias=True, dilation = (dilated,1))
self.conv1x3_2 = nn.Conv2d(chann, chann, (1,kernel_size), stride=1, padding=(0,padding), bias=True, dilation = (1, dilated))
self.bn2 = nn.BatchNorm2d(chann, eps=1e-03)
self.relu = nn.ReLU(inplace = True)
self.dropout = nn.Dropout2d(dropprob)
def forward(self, input):
residual = input
output = self.conv3x1_1(input)
output = self.relu(output)
output = self.conv1x3_1(output)
output = self.bn1(output)
output = self.relu(output)
output = self.conv3x1_2(output)
output = self.relu(output)
output = self.conv1x3_2(output)
output = self.bn2(output)
if (self.dropout.p != 0):
output = self.dropout(output)
return F.relu(residual+output,inplace=True)
class PFCU(nn.Module):
def __init__(self,chann):
"""
Parallel Factorized Convolution Unit
"""
super(PFCU,self).__init__()
self.conv3x1_1 = nn.Conv2d(chann, chann, (3,1), stride=1, padding=(1,0), bias=True)
self.conv1x3_1 = nn.Conv2d(chann, chann, (1,3), stride=1, padding=(0,1), bias=True)
self.bn1 = nn.BatchNorm2d(chann, eps=1e-03)
self.conv3x1_22 = nn.Conv2d(chann, chann, (3,1), stride=1, padding=(2,0), bias=True, dilation = (2,1))
self.conv1x3_22 = nn.Conv2d(chann, chann, (1,3), stride=1, padding=(0,2), bias=True, dilation = (1,2))
self.conv3x1_25 = nn.Conv2d(chann, chann, (3,1), stride=1, padding=(5,0), bias=True, dilation = (5,1))
self.conv1x3_25 = nn.Conv2d(chann, chann, (1,3), stride=1, padding=(0,5), bias=True, dilation = (1,5))
self.conv3x1_29 = nn.Conv2d(chann, chann, (3,1), stride=1, padding=(9,0), bias=True, dilation = (9,1))
self.conv1x3_29 = nn.Conv2d(chann, chann, (1,3), stride=1, padding=(0,9), bias=True, dilation = (1,9))
self.bn2 = nn.BatchNorm2d(chann, eps=1e-03)
self.dropout = nn.Dropout2d(0.3)
def forward(self, input):
residual = input
output = self.conv3x1_1(input)
output = F.relu(output)
output = self.conv1x3_1(output)
output = self.bn1(output)
output = F.relu(output)
output2 = self.conv3x1_22(output)
output2 = F.relu(output2)
output2 = self.conv1x3_22(output2)
output2 = self.bn2(output2)
if (self.dropout.p != 0):
output2 = self.dropout(output2)
output5 = self.conv3x1_25(output)
output5 = F.relu(output5)
output5 = self.conv1x3_25(output5)
output5 = self.bn2(output5)
if (self.dropout.p != 0):
output5 = self.dropout(output5)
output9 = self.conv3x1_29(output)
output9 = F.relu(output9)
output9 = self.conv1x3_29(output9)
output9 = self.bn2(output9)
if (self.dropout.p != 0):
output9 = self.dropout(output9)
return F.relu(residual+output2+output5+output9,inplace=True)
class ESNetG(nn.Module):
def __init__(self, classes):
super().__init__()
#-----ESNetG---------#
self.initial_block = DownsamplerBlock(3,16)
self.layers = nn.ModuleList()
for x in range(0, 3):
self.layers.append(FCU(16, 3, 0.03, 1))
self.layers.append(DownsamplerBlock(16,64))
for x in range(0, 2):
self.layers.append(FCU(64, 5, 0.03, 1))
self.layers.append(DownsamplerBlock(64,128))
for x in range(0, 3):
self.layers.append(PFCU(chann=128))
self.layers.append(UpsamplerBlock(128,64))
self.layers.append(FCU(64, 5, 0, 1))
self.layers.append(FCU(64, 5, 0, 1))
self.layers.append(UpsamplerBlock(64,16))
self.layers.append(FCU(16, 3, 0, 1))
self.layers.append(FCU(16, 3, 0, 1))
# self.output_conv = nn.ConvTranspose2d( 16, classes, 2, stride=2, padding=0, output_padding=0, bias=True)
self.output_conv = nn.ConvTranspose2d(16, classes, 6, stride=2, padding=2, output_padding=0, bias=True)
def forward(self, input):
output = self.initial_block(input)
for layer in self.layers:
output = layer(output)
output = self.output_conv(output)
return output
"""print layers and params of network"""
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = ESNetG(classes=11).to(device)
summary(model,(3,360,480))
from fvcore.nn.flop_count import flop_count # https://github.com/facebookresearch/fvcore
from tools.flops_counter.ptflops import get_model_complexity_info
from thop import profile # https://github.com/Lyken17/pytorch-OpCounter
x = torch.randn(1, 3, 512, 1024)
from fvcore.nn.jit_handles import batchnorm_flop_jit
from fvcore.nn.jit_handles import generic_activation_jit
supported_ops = {
"aten::batch_norm": batchnorm_flop_jit,
}
flop_dict, _ = flop_count(model, (x,), supported_ops)
flops_count, params_count = get_model_complexity_info(model, (3, 512, 1024),
as_strings=False,
print_per_layer_stat=True)
input = x
macs, params = profile(model, inputs=(input,))
print(flop_dict)
print(flops_count, params_count)
print(macs, params)
'''
"D:\ProgramData\Anaconda3\envs\tensorflow 1\python.exe" D:/GitHub/Efficient-Segmentation-Networks/model/ESNetG.py
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
MaxPool2d-1 [-1, 3, 180, 240] 0
Conv2d-2 [-1, 13, 180, 240] 1,417
BatchNorm2d-3 [-1, 16, 180, 240] 32
ReLU-4 [-1, 16, 180, 240] 0
DownsamplerBlock-5 [-1, 16, 180, 240] 0
Conv2d-6 [-1, 16, 180, 240] 784
ReLU-7 [-1, 16, 180, 240] 0
Conv2d-8 [-1, 16, 180, 240] 784
BatchNorm2d-9 [-1, 16, 180, 240] 32
ReLU-10 [-1, 16, 180, 240] 0
Conv2d-11 [-1, 16, 180, 240] 784
ReLU-12 [-1, 16, 180, 240] 0
Conv2d-13 [-1, 16, 180, 240] 784
BatchNorm2d-14 [-1, 16, 180, 240] 32
Dropout2d-15 [-1, 16, 180, 240] 0
FCU-16 [-1, 16, 180, 240] 0
Conv2d-17 [-1, 16, 180, 240] 784
ReLU-18 [-1, 16, 180, 240] 0
Conv2d-19 [-1, 16, 180, 240] 784
BatchNorm2d-20 [-1, 16, 180, 240] 32
ReLU-21 [-1, 16, 180, 240] 0
Conv2d-22 [-1, 16, 180, 240] 784
ReLU-23 [-1, 16, 180, 240] 0
Conv2d-24 [-1, 16, 180, 240] 784
BatchNorm2d-25 [-1, 16, 180, 240] 32
Dropout2d-26 [-1, 16, 180, 240] 0
FCU-27 [-1, 16, 180, 240] 0
Conv2d-28 [-1, 16, 180, 240] 784
ReLU-29 [-1, 16, 180, 240] 0
Conv2d-30 [-1, 16, 180, 240] 784
BatchNorm2d-31 [-1, 16, 180, 240] 32
ReLU-32 [-1, 16, 180, 240] 0
Conv2d-33 [-1, 16, 180, 240] 784
ReLU-34 [-1, 16, 180, 240] 0
Conv2d-35 [-1, 16, 180, 240] 784
BatchNorm2d-36 [-1, 16, 180, 240] 32
Dropout2d-37 [-1, 16, 180, 240] 0
FCU-38 [-1, 16, 180, 240] 0
MaxPool2d-39 [-1, 16, 90, 120] 0
Conv2d-40 [-1, 48, 90, 120] 27,696
BatchNorm2d-41 [-1, 64, 90, 120] 128
ReLU-42 [-1, 64, 90, 120] 0
DownsamplerBlock-43 [-1, 64, 90, 120] 0
Conv2d-44 [-1, 64, 90, 120] 20,544
ReLU-45 [-1, 64, 90, 120] 0
Conv2d-46 [-1, 64, 90, 120] 20,544
BatchNorm2d-47 [-1, 64, 90, 120] 128
ReLU-48 [-1, 64, 90, 120] 0
Conv2d-49 [-1, 64, 90, 120] 20,544
ReLU-50 [-1, 64, 90, 120] 0
Conv2d-51 [-1, 64, 90, 120] 20,544
BatchNorm2d-52 [-1, 64, 90, 120] 128
Dropout2d-53 [-1, 64, 90, 120] 0
FCU-54 [-1, 64, 90, 120] 0
Conv2d-55 [-1, 64, 90, 120] 20,544
ReLU-56 [-1, 64, 90, 120] 0
Conv2d-57 [-1, 64, 90, 120] 20,544
BatchNorm2d-58 [-1, 64, 90, 120] 128
ReLU-59 [-1, 64, 90, 120] 0
Conv2d-60 [-1, 64, 90, 120] 20,544
ReLU-61 [-1, 64, 90, 120] 0
Conv2d-62 [-1, 64, 90, 120] 20,544
BatchNorm2d-63 [-1, 64, 90, 120] 128
Dropout2d-64 [-1, 64, 90, 120] 0
FCU-65 [-1, 64, 90, 120] 0
MaxPool2d-66 [-1, 64, 45, 60] 0
Conv2d-67 [-1, 64, 45, 60] 147,520
BatchNorm2d-68 [-1, 128, 45, 60] 256
ReLU-69 [-1, 128, 45, 60] 0
DownsamplerBlock-70 [-1, 128, 45, 60] 0
Conv2d-71 [-1, 128, 45, 60] 49,280
Conv2d-72 [-1, 128, 45, 60] 49,280
BatchNorm2d-73 [-1, 128, 45, 60] 256
Conv2d-74 [-1, 128, 45, 60] 49,280
Conv2d-75 [-1, 128, 45, 60] 49,280
BatchNorm2d-76 [-1, 128, 45, 60] 256
Dropout2d-77 [-1, 128, 45, 60] 0
Conv2d-78 [-1, 128, 45, 60] 49,280
Conv2d-79 [-1, 128, 45, 60] 49,280
BatchNorm2d-80 [-1, 128, 45, 60] 256
Dropout2d-81 [-1, 128, 45, 60] 0
Conv2d-82 [-1, 128, 45, 60] 49,280
Conv2d-83 [-1, 128, 45, 60] 49,280
BatchNorm2d-84 [-1, 128, 45, 60] 256
Dropout2d-85 [-1, 128, 45, 60] 0
PFCU-86 [-1, 128, 45, 60] 0
Conv2d-87 [-1, 128, 45, 60] 49,280
Conv2d-88 [-1, 128, 45, 60] 49,280
BatchNorm2d-89 [-1, 128, 45, 60] 256
Conv2d-90 [-1, 128, 45, 60] 49,280
Conv2d-91 [-1, 128, 45, 60] 49,280
BatchNorm2d-92 [-1, 128, 45, 60] 256
Dropout2d-93 [-1, 128, 45, 60] 0
Conv2d-94 [-1, 128, 45, 60] 49,280
Conv2d-95 [-1, 128, 45, 60] 49,280
BatchNorm2d-96 [-1, 128, 45, 60] 256
Dropout2d-97 [-1, 128, 45, 60] 0
Conv2d-98 [-1, 128, 45, 60] 49,280
Conv2d-99 [-1, 128, 45, 60] 49,280
BatchNorm2d-100 [-1, 128, 45, 60] 256
Dropout2d-101 [-1, 128, 45, 60] 0
PFCU-102 [-1, 128, 45, 60] 0
Conv2d-103 [-1, 128, 45, 60] 49,280
Conv2d-104 [-1, 128, 45, 60] 49,280
BatchNorm2d-105 [-1, 128, 45, 60] 256
Conv2d-106 [-1, 128, 45, 60] 49,280
Conv2d-107 [-1, 128, 45, 60] | |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# pylint:disable=bad-whitespace
# pylint:disable=line-too-long
# pylint:disable=too-many-lines
# pylint:disable=invalid-name
# #########################################################
#
# ************** !! WARNING !! ***************
# ******* THIS FILE WAS AUTO-GENERATED *******
# ********* DO NOT MODIFY THIS FILE **********
#
# #########################################################
the_rel_defines_dict = {
'1337X': ['https://en.wikipedia.org/wiki/1337x'],
'4Chan': ['https://en.wikipedia.org/wiki/4chan'],
'51% Attack': ['https://en.wikipedia.org/wiki/Double-spending#51%_attack'],
'ABAP for SAP HANA 2.0': [ 'https://training.sap.com/certification/E_HANAAW_14/'],
'ABAP with SAP NetWeaver 7.40': [ 'https://training.sap.com/certification/C_TAW12_740/'],
'ABAP with SAP NetWeaver 7.50': [ 'https://training.sap.com/certification/C_TAW12_750/'],
'AIX Certified System Administrator': [ 'https://www.ibm.com/certify/cert.html?id=03005009'],
'API': ['https://en.wikipedia.org/wiki/API'],
'ASIC': [ 'https://en.wikipedia.org/wiki/Application-specific_integrated_circuit'],
'ASV Certification': [ 'https://www.pcisecuritystandards.org/assessors_and_solutions/become_asv'],
'AWS': ['https://en.wikipedia.org/wiki/Amazon_Web_Services'],
'AWS Certified Advanced Networking': [ 'https://aws.amazon.com/certification/'],
'AWS Certified Alexa Skill Builder': [ 'https://aws.amazon.com/certification/'],
'AWS Certified Big Data': ['https://aws.amazon.com/certification/'],
'AWS Certified Cloud Practitioner': [ 'https://aws.amazon.com/training/path-cloudpractitioner/'],
'AWS Certified DevOps Engineer': ['https://aws.amazon.com/certification/'],
'AWS Certified Developer': ['https://aws.amazon.com/certification/'],
'AWS Certified Machine Learning': ['https://aws.amazon.com/certification/'],
'AWS Certified Security': ['https://aws.amazon.com/certification/'],
'AWS Certified Solutions Architect': [ 'https://aws.amazon.com/certification/'],
'AWS Certified SysOps Administrator': [ 'https://aws.amazon.com/certification/'],
'Abbvie Inc.': ['https://en.wikipedia.org/wiki/AbbVie_Inc.'],
'Abstract Algebra': ['https://en.wikipedia.org/wiki/Abstract_algebra'],
'Accessibility': ['https://en.wikipedia.org/wiki/Accessibility'],
'Accounting': ['https://en.wikipedia.org/wiki/Accounting'],
'Accounts Payable': ['https://en.wikipedia.org/wiki/Accounts_payable'],
'Accumulo': ['https://en.wikipedia.org/wiki/Apache_Accumulo'],
'Active Record Pattern': [ 'https://en.wikipedia.org/wiki/Active_record_pattern'],
'ActiveMQ': ['https://en.wikipedia.org/wiki/Apache_ActiveMQ'],
'Advanced Identity Manager': [ 'https://www.necam.com/AdvancedRecognitionSystems/Products/AIMXM/'],
'Aesthetics': ['https://en.wikipedia.org/wiki/Aesthetics'],
'AiCure': ['https://aicure.com/'],
'Airavata': ['https://en.wikipedia.org/wiki/Apache_Airavata'],
'Algorithm': ['https://en.wikipedia.org/wiki/Algorithm'],
'AllegroGraph': ['https://en.wikipedia.org/wiki/AllegroGraph'],
'Allianz': ['https://en.wikipedia.org/wiki/Allianz'],
'Allura': ['https://en.wikipedia.org/wiki/Apache_Allura'],
'Almirall': ['https://en.wikipedia.org/wiki/Almirall'],
'Alpine Linux': ['https://en.wikipedia.org/wiki/Alpine_Linux'],
'Altcoin': ['https://en.wikipedia.org/wiki/Cryptocurrency#Altcoin'],
'Amazon Machine Image': [ 'https://en.wikipedia.org/wiki/Amazon_Machine_Image'],
'Amazon Neptune': ['https://en.wikipedia.org/wiki/Amazon_Neptune'],
'Ambari': ['https://en.wikipedia.org/wiki/Apache_Ambari'],
'Amgen': ['https://en.wikipedia.org/wiki/Amgen'],
'Analytical Activity': [],
'Analytics': ['https://en.wikipedia.org/wiki/Analytics'],
'Android': ['https://en.wikipedia.org/wiki/Android_(operating_system)'],
'AngularJS': ['https://en.wikipedia.org/wiki/AngularJS'],
'Ansible': ['https://en.wikipedia.org/wiki/Ansible_(software)'],
'Ant': ['https://en.wikipedia.org/wiki/Apache_Ant'],
'Antergos Linux': ['https://en.wikipedia.org/wiki/Antergos'],
'Anthropology': ['https://en.wikipedia.org/wiki/Anthropology'],
'Anzo Graph': ['https://en.wikipedia.org/wiki/Cambridge_Semantics'],
'Apache Cassandra': ['https://en.wikipedia.org/wiki/Apache_Cassandra'],
'Apache Software': [ 'https://en.wikipedia.org/wiki/Apache_Software_Foundation'],
'Apple': ['https://en.wikipedia.org/wiki/Apple_Inc.'],
'Application Virtualization': [ 'https://en.wikipedia.org/wiki/Application_virtualization'],
'Arch Linux': ['https://en.wikipedia.org/wiki/Arch_Linux'],
'Archaeology': ['https://en.wikipedia.org/wiki/Archaeology'],
'Architecture': ['https://en.wikipedia.org/wiki/Software_architecture'],
'Ariba Integration': [ 'https://training.sap.com/certification/C_AR_INT_13/'],
'ArrangoDB': ['https://en.wikipedia.org/wiki/ArangoDB'],
'Art History': ['https://en.wikipedia.org/wiki/Art_history'],
'Artificial Intelligence': [ 'https://en.wikipedia.org/wiki/Artificial_intelligence'],
'Artificial Neural Network': [ 'https://en.wikipedia.org/wiki/Artificial_neural_network'],
'Astellas Pharma': ['https://en.wikipedia.org/wiki/Astellas_Pharma'],
'Astra Linux': ['https://en.wikipedia.org/wiki/Astra_Linux'],
'Astrazeneca': ['https://en.wikipedia.org/wiki/AstraZeneca'],
'Astronomy': ['https://en.wikipedia.org/wiki/Astronomy'],
'Astrophysics': ['https://en.wikipedia.org/wiki/Astrophysics'],
'Asynchronous Transfer Mode': [ 'https://en.wikipedia.org/wiki/Asynchronous_Transfer_Mode'],
'Aurora': ['https://en.wikipedia.org/wiki/Apache_Aurora'],
'Azure AI Engineer Associate': [ 'https://www.microsoft.com/en-us/learning/azure-ai-engineer.aspx'],
'Azure Administrator Associate': [ 'https://www.microsoft.com/en-us/learning/azure-administrator.aspx'],
'Azure Certification': [ 'https://www.microsoft.com/en-us/learning/azure-fundamentals.aspx'],
'Azure Data Engineer Associate': [ 'https://www.microsoft.com/en-us/learning/azure-data-engineer.aspx'],
'Azure Data Scientist Associate': [ 'https://www.microsoft.com/en-us/learning/azure-data-scientist.aspx'],
'Azure DevOps Engineer Expert': [ 'https://www.microsoft.com/en-us/learning/azure-devops.aspx'],
'Azure Developer Associate': [ 'https://www.microsoft.com/en-us/learning/azure-developer.aspx'],
'Azure Security Engineer Associate': [ 'https://www.microsoft.com/en-us/learning/azure-security-engineer.aspx'],
'Azure Solutions Architect Expert': [ 'https://www.microsoft.com/en-us/learning/azure-solutions-architect.aspx'],
'BPEL': [ 'https://en.wikipedia.org/wiki/Business_Process_Execution_Language'],
'BSD License': ['https://en.wikipedia.org/wiki/BSD_licenses'],
'Back End': ['https://en.wikipedia.org/wiki/Data_access_layer'],
'Backup as a Service': [ 'https://en.wikipedia.org/wiki/Remote_backup_service'],
'Base Programming Credentials': [ 'https://www.sas.com/en_us/certification.html'],
'Basf': ['https://en.wikipedia.org/wiki/BASF'],
'Bayer': ['https://en.wikipedia.org/wiki/Bayer'],
'Beam': ['https://en.wikipedia.org/wiki/Apache_Beam'],
'Benchmark': ['https://en.wikipedia.org/wiki/Benchmarking'],
'Big Data': ['https://en.wikipedia.org/wiki/Big_data'],
'Bioinformatics': ['https://en.wikipedia.org/wiki/Bioinformatics'],
'Biological Anthropology': [ 'https://en.wikipedia.org/wiki/Biological_anthropology'],
'Biology': ['https://en.wikipedia.org/wiki/Biology'],
'Biophysics': ['https://en.wikipedia.org/wiki/Biophysics'],
'Biostatistics': ['https://en.wikipedia.org/wiki/Biostatistics'],
'Biotechnology': ['https://en.wikipedia.org/wiki/Biotechnology'],
'Bitcoin': ['https://en.wikipedia.org/wiki/Bitcoin'],
'Bitmain': ['https://en.wikipedia.org/wiki/Bitmain'],
'BlackBerry Enterprise Server': [ 'https://en.wikipedia.org/wiki/BlackBerry_Enterprise_Server'],
'Blackberry Mobile Device': ['https://en.wikipedia.org/wiki/BlackBerry'],
'Blockchain': ['https://en.wikipedia.org/wiki/Blockchain'],
'Boehringer Ingelheim': [ 'https://en.wikipedia.org/wiki/Boehringer_Ingelheim'],
'Botnet': ['https://en.wikipedia.org/wiki/Botnet'],
'Bourne': ['https://en.wikipedia.org/wiki/Bourne_shell'],
'Bristol-myers Squibb': [ 'https://en.wikipedia.org/wiki/Bristol-Myers_Squibb'],
'Business Foundation & Integration with SAP ERP 6.07': [ 'https://training.sap.com/certification/C_TERP10_67/'],
'Business Intelligence': [ 'https://en.wikipedia.org/wiki/Business_intelligence'],
'Business Intelligence with SAP BW 7.4 & SAP BI 4.1': [ 'https://training.sap.com/certification/C_TBI30_74/'],
'Business Model': ['https://en.wikipedia.org/wiki/Business_model'],
'Business Process': ['https://en.wikipedia.org/wiki/Business_process'],
'Business Process Integration with SAP S/4HANA 1610': [ 'https://training.sap.com/certification/C_TS410_1610/'],
'Business Process Integration with SAP S/4HANA 1709': [ 'https://training.sap.com/certification/C_TS410_1709/'],
'BusyBox': ['https://en.wikipedia.org/wiki/BusyBox'],
'C Language': ['https://en.wikipedia.org/wiki/C_(programming_language)'],
'C#': ['https://en.wikipedia.org/wiki/C_Sharp_(programming_language)'],
'CAP Certification': ['https://www.isc2.org/Certifications/CAP'],
'CCDA': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCDE': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCDP': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCENT': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCIE Collaboration Certification': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications/expert/ccie-collaboration.html'],
'CCIE Data Center': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCIE Routing and Switching': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCIE Security': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCIE Service Provider': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCIE Wireless': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCNA Cloud Certification': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCNA Collaboration Certification': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications/associate/ccna-collaboration.html'],
'CCNA Cyber Ops': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCNA Data Center Certification': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCNA Industrial': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCNA Routing and Switching': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCNA Security': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCNA Service Provider': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCNA Wireless': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCNP Cloud Certification': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications/professional/ccnp-cloud.html'],
'CCNP Collaboration Certification': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications/professional/ccnp-collaboration.html'],
'CCNP Data Center': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCNP Routing and Switching': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCNP Security': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCNP Service Provider': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCNP Wireless': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCSK Certification': ['https://cloudsecurityalliance.org/education/ccsk/'],
'CCSP Certification': ['https://www.isc2.org/Certifications/CCSP'],
'CCT Data Center': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CCT Routing & Switching': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'CGEIT Certification': [ 'http://www.isaca.org/Certification/CGEIT-Certified-in-the-Governance-of-Enterprise-IT/Pages/default.aspx'],
'CISA Certification': [ 'http://www.isaca.org/CERTIFICATION/CISA-CERTIFIED-INFORMATION-SYSTEMS-AUDITOR/Pages/default.aspx'],
'CISM Certification': [ 'http://www.isaca.org/Certification/CISM-Certified-Information-Security-Manager/Pages/default.aspx'],
'CISSP Certification': ['https://www.isc2.org/Certifications/CISSP'],
'CJ Healthcare': ['https://en.wikipedia.org/wiki/CJ_HealthCare'],
'CLI': ['https://en.wikipedia.org/wiki/Command-line_interface'],
'CMOS Device': ['https://en.wikipedia.org/wiki/CMOS'],
'CPU': ['https://en.wikipedia.org/wiki/Central_processing_unit'],
'CRISC Certification': [ 'http://www.isaca.org/Certification/CRISC-Certified-in-Risk-and-Information-Systems-Control/Pages/default.aspx'],
'CRM': ['https://en.wikipedia.org/wiki/Customer_relationship_management'],
'CSS': ['https://en.wikipedia.org/wiki/Cascading_Style_Sheets'],
'CSSA Certification': [ 'https://www.infosecinstitute.com/skills/learning-paths/certified-scada-security-architect-cssa/'],
'CSSLP Certification': ['https://www.isc2.org/Certifications/CSSLP'],
'Camel': ['http://camel.apache.org'],
'Cameyo': ['https://en.wikipedia.org/wiki/Cameyo'],
'Capability Maturity Model': [ 'https://en.wikipedia.org/wiki/Capability_Maturity_Model'],
'Category Theory': ['https://en.wikipedia.org/wiki/Category_theory'],
'Ceedo': ['https://en.wikipedia.org/wiki/Ceedo'],
'CentOS': ['https://en.wikipedia.org/wiki/CentOS'],
'Central Finance in SAP S/4HANA': [ 'https://training.sap.com/certification/C_S4FCF_1809/'],
'Ceph Storage': ['https://www.redhat.com/en/technologies/storage/ceph'],
'Certification': [ 'https://en.wikipedia.org/wiki/Professional_certification_(computer_technology)'],
'Certified Associate in Project Management': [ 'https://www.pmi.org/certifications/types/certified-associate-capm'],
'Certified Desktop Administrator': [ 'https://www.microsoft.com/en-us/learning/modern-desktop.aspx'],
'Certified Enterprise Administrator': [ 'https://www.microsoft.com/en-us/learning/m365-enterprise-administrator.aspx'],
'Certified Hyperledger Fabric Administrator': [ 'https://training.linuxfoundation.org/certification/certified-hyperledger-fabric-administrator-chfa/'],
'Certified Hyperledger Sawtooth Administrator': [ 'https://training.linuxfoundation.org/certification/certified-hyperledger-sawtooth-administrator-chsa/'],
'Certified Kubernetes Administrator': [ 'https://www.cncf.io/certification/cka/ '
'or '
'https://training.linuxfoundation.org/certification/certified-kubernetes-administrator-cka/'],
'Certified Kubernetes Application Developer': [ 'https://www.cncf.io/certification/ckad/ '
'or '
'https://training.linuxfoundation.org/certification/certified-kubernetes-application-developer-ckad/'],
'Certified Messaging Administrator': [ 'https://www.microsoft.com/en-us/learning/m365-messaging-administrator.aspx'],
'Certified Microsoft Access': [ 'https://www.microsoft.com/en-us/learning/browse-all-certifications.aspx'],
'Certified Microsoft Excel': [ 'https://www.microsoft.com/en-us/learning/browse-all-certifications.aspx'],
'Certified Microsoft Office': [ 'https://www.microsoft.com/en-us/learning/browse-all-certifications.aspx'],
'Certified Microsoft Outlook': [ 'https://www.microsoft.com/en-us/learning/browse-all-certifications.aspx'],
'Certified Microsoft PowerPoint': [ 'https://www.microsoft.com/en-us/learning/browse-all-certifications.aspx'],
'Certified Microsoft Word': [ 'https://www.microsoft.com/en-us/learning/browse-all-certifications.aspx'],
'Certified Security Administrator': [ 'https://www.microsoft.com/en-us/learning/m365-security-administrator.aspx'],
'Certified Teamwork Administrator': [ 'https://www.microsoft.com/en-us/learning/m365-teamwork-administrator.aspx'],
'Certified Windows Server 2012': [ 'https://www.microsoft.com/en-us/learning/browse-all-certifications.aspx'],
'Certified Windows Server 2016': [ 'https://www.microsoft.com/en-us/learning/browse-all-certifications.aspx'],
'Change Request': ['https://en.wikipedia.org/wiki/Change_request'],
'Cheat Engine': ['https://en.wikipedia.org/wiki/Cheat_Engine'],
'Cheating In Online Games': [ 'https://en.wikipedia.org/wiki/Cheating_in_online_games'],
'Chef': ['https://en.wikipedia.org/wiki/Chef_(software)'],
'Circuit Design': ['https://en.wikipedia.org/wiki/Circuit_design'],
'Cisco ACI': [ 'https://www.sdxcentral.com/data-center/definitions/what-is-cisco-aci/'],
'Cisco Business Architecture Analyst': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications/digital-transformation-specialist/business/architecture-analyst.html'],
'Cisco Business Architecture Practitioner': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications/digital-transformation-specialist/business/architecture-practitioner.html'],
'Cisco Business Architecture Specialist': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications/digital-transformation-specialist/business/architecture-specialist.html'],
'Cisco Certified Design Associate': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications/associate/ccda.html'],
'Cisco Certified Design Expert': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications/expert/ccde.html'],
'Cisco Certified Design Professional': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications/professional/ccdp.html'],
'Cisco Certified Internetwork Expert Routing and Switching': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications/expert/ccie-routing-switching.html'],
'Cisco Certified Network Professional': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications/professional/ccnp-routing-switching.html'],
'Cisco Certified Technican Routing and Switching': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications/entry/technician-cct/routing-switching.html'],
'Cisco Cyber Security Specialist': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'Cisco Industrial Networking Specialist': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'Cisco Network Programmability Design and Implementation Specialist': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'Cisco Network Programmability Developer Specialist': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'Cisco Routing and Switching Certification': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications/associate/ccna-routing-switching.html'],
'Cisco Service Provider Mobility CDMA to LTE Specialist': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'Cisco Service Provider Mobility UMTS to LTE Specialist': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'Cisco TelePresence Solutions Specialist': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications/specialist/telepresence-solutions.html'],
'Cisco Unified Contact Center Enterprise Specialist': [ 'https://www.cisco.com/c/en_ca/training-events/career-certifications/specialist/collaboration/ucces.html'],
'Cisco Video Network Specialist': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications/specialist/video-network.html'],
'Cisco and NetApp FlexPod Design Specialist': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'Cisco and NetApp FlexPod Implementation and Administration Specialist': [ 'https://www.cisco.com/c/en/us/training-events/training-certifications/certifications.html'],
'Citrix': ['https://en.wikipedia.org/wiki/Citrix_Systems'],
'Classics': ['https://en.wikipedia.org/wiki/Classics'],
'Clojure': ['https://en.wikipedia.org/wiki/Clojure'],
'Cloud Foundry Certified Developer': [ 'https://training.linuxfoundation.org/certification/cloud-foundry-certified-developer-cfcd/'],
'Cloud Security Alliance': ['https://cloudsecurityalliance.org/'],
'Cloudera': ['https://en.wikipedia.org/wiki/Cloudera'],
'Cloudera Administrator Certification': [ 'https://www.cloudera.com/about/training/certification/cca-admin.html'],
'Cloudera Certified Data Engineer': [ 'https://www.cloudera.com/about/training/certification/ccp-data-engineer.html'],
'Cloudera Data Analyst Certification': [ 'https://www.cloudera.com/about/training/certification/cca-data-analyst.html'],
'Cloudera Spark and Hadoop Certification': [ 'https://www.cloudera.com/about/training/certification/cca-spark.html'],
'Cluster Analysis': ['https://en.wikipedia.org/wiki/Cluster_analysis'],
'Coffeescript': ['https://en.wikipedia.org/wiki/CoffeeScript'],
'Cognitive Psychology': [ 'https://en.wikipedia.org/wiki/Cognitive_psychology'],
'Cognitive Science': ['https://en.wikipedia.org/wiki/Cognitive_science'],
'Column Oriented Database': [ 'https://en.wikipedia.org/wiki/Column-oriented_DBMS'],
'Common Lisp': ['https://en.wikipedia.org/wiki/Common_Lisp'],
'Communication Protocol': [ 'https://en.wikipedia.org/wiki/Communication_protocol'],
'Comparative Literature': [ 'https://en.wikipedia.org/wiki/Comparative_literature'],
'Comparative Religion': [ 'https://en.wikipedia.org/wiki/Comparative_religion'],
'Computational Biology': [ 'https://en.wikipedia.org/wiki/Computational_biology'],
'Computational Chemistry': [ 'https://en.wikipedia.org/wiki/Computational_chemistry'],
'Computational Complexity': [ 'https://en.wikipedia.org/wiki/Computational_complexity'],
'Computational Linguistics': [ 'https://en.wikipedia.org/wiki/Computational_linguistics'],
'Computational Neuroscience': [ 'https://en.wikipedia.org/wiki/Computational_neuroscience'],
'Computational Physics': [ 'https://en.wikipedia.org/wiki/Computational_physics'],
'Computational Science': [ 'https://en.wikipedia.org/wiki/Computational_science'],
'Computer Configuration': [ 'https://en.wikipedia.org/wiki/Computer_configuration'],
'Computer Hardware': ['https://en.wikipedia.org/wiki/Computer_hardware'],
'Computer Science': ['https://en.wikipedia.org/wiki/Computer_science'],
'Computer Vision': ['https://en.wikipedia.org/wiki/Computer_vision'],
'Configuration Management': [ 'https://en.wikipedia.org/wiki/System_configuration'],
'Configuration Management Software': [ 'https://en.wikipedia.org/wiki/Software_configuration_management'],
'Container Software': [ 'https://en.wikipedia.org/wiki/OS-level_virtualisation'],
'Content Designer': ['https://en.wikipedia.org/wiki/Content_designer'],
'Continental Philosophy': [ 'https://en.wikipedia.org/wiki/Continental_philosophy'],
'Contrail Cloud': [ 'https://www.juniper.net/us/en/products-services/sdn/contrail/contrail-cloud/'],
'Contrail Edge Cloud': [ 'https://www.juniper.net/us/en/products-services/sdn/contrail/contrail-edge-cloud/'],
'Contrail Enterprise Multicloud': [ 'https://www.juniper.net/us/en/products-services/sdn/contrail/contrail-enterprise-multicloud/'],
'Contrail SDWAN': [ 'https://www.juniper.net/us/en/products-services/sdn/contrail/contrail-sd-wan/'],
'Corporate Finance': ['https://en.wikipedia.org/wiki/Corporate_finance'],
'Couch DB': ['https://en.wikipedia.org/wiki/Apache_CouchDB'],
'Coursera': ['https://en.wikipedia.org/wiki/Coursera'],
'Critical Theory': ['https://en.wikipedia.org/wiki/Critical_theory'],
'Cryptocurrency': ['https://en.wikipedia.org/wiki/Cryptocurrency'],
'Cryptography': ['https://en.wikipedia.org/wiki/Cryptography'],
'Crystal Report': ['https://en.wikipedia.org/wiki/Crystal_Reports'],
'Cultural Anthropology': [ 'https://en.wikipedia.org/wiki/Cultural_anthropology'],
'Cultural Studies': ['https://en.wikipedia.org/wiki/Cultural_studies'],
'Cython': ['https://en.wikipedia.org/wiki/Cython'],
'D3.js': ['https://en.wikipedia.org/wiki/D3.js'],
'DB2': ['https://en.wikipedia.org/wiki/IBM_Db2_Family'],
'Dashboard': ['https://en.wikipedia.org/wiki/Dashboard_(business)'],
'Data Engineering with Azure': [ 'https://www.microsoft.com/en-us/learning/browse-all-certifications.aspx'],
'Data Integration with SAP Data Services 4.2': [ 'https://training.sap.com/certification/C_DS_42/'],
'Data Mining': ['https://en.wikipedia.org/wiki/Data_mining'],
'Data Model': ['https://en.wikipedia.org/wiki/Data_model'],
'Data Scientist': ['https://en.wikipedia.org/wiki/Data_science'],
'Data Structure': ['https://en.wikipedia.org/wiki/Data_structure'],
'Data at Rest': ['https://en.wikipedia.org/wiki/Data_at_rest'],
'Database Design': ['https://en.wikipedia.org/wiki/Database_design'],
'Database Dimension': [ 'https://en.wikipedia.org/wiki/Dimension_(data_warehouse)'],
'Database Function': [],
'Database Index': ['https://en.wikipedia.org/wiki/Database_index'],
'Database Schema': ['https://en.wikipedia.org/wiki/Database_schema'],
'Database Table': ['https://en.wikipedia.org/wiki/Table_(database)'],
'Debian Linux': ['https://en.wikipedia.org/wiki/Debian'],
'Debugging': ['https://en.wikipedia.org/wiki/Debugging'],
'Declarative Language': [ 'https://en.wikipedia.org/wiki/Declarative_language'],
'Deep Learning': ['https://en.wikipedia.org/wiki/Deep_learning'],
'Defect': ['https://en.wikipedia.org/wiki/Product_defect'],
'Dell': ['https://en.wikipedia.org/wiki/Dell_EMC'],
'Dell Cloud Certification': [ 'https://education.dellemc.com/content/emc/en-us/home/certification-overview.html#explore-certification%7Cexplore12'],
'Dell Converged Infrastructure Certification': [ 'https://education.dellemc.com/content/emc/en-us/home/certification-overview.html#explore-certification|explore13'],
'Dell Data Protection Certification': [ 'https://education.dellemc.com/content/emc/en-us/home/certification-overview.html#explore-certification|explore14'],
'Dell Data Science Certifictation': [ 'https://education.dellemc.com/content/emc/en-us/home/certification-overview.html#explore-certification|explore15'],
'Dell Enterprise Architect Certification': [ 'https://education.dellemc.com/content/emc/en-us/home/certification-overview.html#explore-certification|explore110'],
'Dell Networking Certification': [ 'https://education.dellemc.com/content/emc/en-us/home/certification-overview.html#explore-certification|explore16'],
'Dell Security Certification': [ 'https://education.dellemc.com/content/emc/en-us/home/certification-overview.html#explore-certification|explore17'],
'Dell Server Certification': [ 'https://education.dellemc.com/content/emc/en-us/home/certification-overview.html#explore-certification|explore18'],
'Dell Storage Certification': [ 'https://education.dellemc.com/content/emc/en-us/home/certification-overview.html#explore-certification|explore19'],
'Delphi': ['https://en.wikipedia.org/wiki/Object_Pascal'],
'Demisto': ['https://www.demisto.com'],
'Denial-Of-Service Attack': [ 'https://en.wikipedia.org/wiki/Denial-of-service_attack'],
'Design Pattern': [ 'https://en.wikipedia.org/wiki/Architectural_pattern_(computer_science)'],
'Design Thinking': ['https://training.sap.com/certification/C_THINK_01/'],
'Developer': ['https://en.wikipedia.org/wiki/Programmer'],
'Digital Pen': ['https://en.wikipedia.org/wiki/Digital_pen'],
'Dimensionality Reduction': [ 'https://en.wikipedia.org/wiki/Dimensionality_reduction'],
'Discoverability': ['https://en.wikipedia.org/wiki/Discoverability'],
'Display Resolution': ['https://en.wikipedia.org/wiki/Display_resolution'],
'Distributed Computing': [ 'https://en.wikipedia.org/wiki/Distributed_computing'],
'Distributed Ledger': ['https://en.wikipedia.org/wiki/Distributed_ledger'],
'Distributed Software': [ 'https://en.wikipedia.org/wiki/Distributed_application'],
'Django': ['https://en.wikipedia.org/wiki/Django_(web_framework)'],
'Docker': ['https://en.wikipedia.org/wiki/Docker_(software)'],
'Document | |
import os
import argparse
import json
import psutil
import numpy
from onnx import TensorProto
"""
This profiler tool could run a transformer model and print out the kernel time spent on each Node of the model.
Example of profiling of longformer model:
python profiler.py --model longformer-base-4096_fp32.onnx --batch_size 1 --sequence_length 4096 --global_length 8 --samples 1000 --thread_num 8 --dummy_inputs longformer --use_gpu
"""
NODES_TYPE_CONTAINING_SUBGRAPH = ['Scan', 'Loop', 'If']
def parse_arguments(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', required=True, type=str, help="onnx model path")
parser.add_argument('-b', '--batch_size', required=False, type=int, default=1, help="batch size of input")
parser.add_argument('-s',
'--sequence_length',
required=False,
type=int,
default=32,
help="sequence length of input")
parser.add_argument('--past_sequence_length',
required=False,
type=int,
default=1,
help="past sequence length for gpt2")
parser.add_argument('--global_length',
required=False,
type=int,
default=1,
help="number of global tokens for longformer")
parser.add_argument(
'--samples',
required=False,
type=int,
default=1000,
help="number of samples to test. Set it large enough to reduce the variance of performance result.")
parser.add_argument(
'--threshold',
required=False,
type=float,
default=0.01,
help="Threshold of run time ratio among all nodes. Nodes with larger ratio will show in top expensive nodes.")
parser.add_argument("--thread_num", required=False, type=int, default=-1, help="number of threads to use")
parser.add_argument('--input_ids_name',
required=False,
type=str,
default=None,
help="input name for input IDs, for bert")
parser.add_argument('--segment_ids_name',
required=False,
type=str,
default=None,
help="input name for segment IDs, for bert")
parser.add_argument('--input_mask_name',
required=False,
type=str,
default=None,
help="input name for attention mask, for bert")
parser.add_argument('--dummy_inputs',
required=False,
default='default',
choices=['bert', 'gpt2', 'longformer', 'default'],
help="Type of model inputs. The default will create dummy inputs with ones.")
parser.add_argument('-g', '--use_gpu', required=False, action='store_true', help="use GPU")
parser.set_defaults(use_gpu=False)
parser.add_argument(
'--basic_optimization',
required=False,
action='store_true',
help="Enable only basic graph optimizations. By default, all optimizations are enabled in OnnxRuntime")
parser.set_defaults(basic_optimization=False)
parser.add_argument('--kernel_time_only',
required=False,
action='store_true',
help="Only include the kernel time and no fence time")
parser.set_defaults(kernel_time_only=False)
parser.add_argument('-v', '--verbose', required=False, action='store_true')
parser.set_defaults(verbose=False)
return parser.parse_args(argv)
def run_profile(onnx_model_path, use_gpu, basic_optimization, thread_num, all_inputs):
from benchmark_helper import create_onnxruntime_session
session = create_onnxruntime_session(onnx_model_path,
use_gpu,
enable_all_optimization=not basic_optimization,
num_threads=thread_num,
enable_profiling=True)
for inputs in all_inputs:
_ = session.run(None, inputs)
profile_file = session.end_profiling()
return profile_file
def load_profile_json(profile_file):
print(f"loading profile output {profile_file} ...")
with open(profile_file, "r") as opened_file:
sess_time = json.load(opened_file)
assert isinstance(sess_time, list)
return sess_time
def parse_profile_results(sess_time, kernel_time_only=False, threshold=0):
"""Parse profile data and output nodes in two sections - nodes in the original order, and top expensive nodes.
Args:
sess_time (List[Dict]): profile data
kernel_time_only (bool, optional): Only include items for kernel time. Defaults to False.
threshold (int, optional): Minimum ratio of duration among all. Defaults to 0.
Returns:
List[str]: lines of string for output.
"""
node_name_list = []
node_time = {}
node_freq = {}
node_provider = {}
total = 0
for item in sess_time:
if item["cat"] == "Node" and "dur" in item and "args" in item and "op_name" in item["args"]:
node_name = item["name"].replace("_kernel_time", "").replace("_fence_before",
"").replace("_fence_after", "")
if "provider" in item["args"]:
device = "CPU" if item["args"]["provider"] == "CPUExecutionProvider" else "CUDA"
if node_name not in node_provider:
node_provider[node_name] = device
else:
assert node_provider[node_name] == device
elif kernel_time_only:
continue
op_name = item["args"]["op_name"]
if op_name in NODES_TYPE_CONTAINING_SUBGRAPH:
continue
if node_name in node_time:
node_time[node_name] += item["dur"]
node_freq[node_name] += 1
else:
node_time[node_name] = item["dur"]
node_freq[node_name] = 1
node_name_list.append(node_name)
total += item["dur"]
# Output items in the original order.
lines = [
"Results:", "-" * 64,
"Duration(μs)\tPercentage\tBefore(Exclusive)\tAfter(Inclusive)\tCalls\tProvider\tNode_Name"
]
before_percentage = 0.0
for node_name in node_name_list:
duration = node_time[node_name]
calls = node_freq[node_name]
avg_time = duration / float(calls)
percentage = (duration / total) * 100.0
provider = node_provider[node_name] if node_name in node_provider else ""
lines.append(
f"{avg_time:.1f}\t{percentage:5.2f}\t{before_percentage:5.1f}\t{100.0 - before_percentage:5.1f}\t{calls}\t{provider}\t{node_name}"
)
before_percentage += percentage
# Output items with run time ratio > thresholds, and sorted by duration in the descending order.
lines.append(f"\nTop expensive nodes with threshold={threshold:.2f}:")
lines.append("-" * 64)
lines.append("Duration(μs)\tPercentage\tProvider\tName")
for node_name, duration in sorted(node_time.items(), key=lambda x: x[1], reverse=True):
ratio = duration / total
if ratio < threshold:
continue
calls = node_freq[node_name]
avg_time = duration / float(calls)
provider = node_provider[node_name] if node_name in node_provider else ""
lines.append(f"{avg_time:.1f}\t{ratio * 100.0:5.2f}\t{provider}\t{node_name}")
return lines
def group_profile_results(sess_time, kernel_time_only, use_gpu):
"""Group results by operator name.
Args:
sess_time (List[Dict]): profile data
kernel_time_only (bool): Only include items for kernel time.
use_gpu (bool): GPU is used in profiling or not.
Returns:
List[str]: lines of string for output.
"""
op_time = {}
op_records = {}
op_cpu_time = {}
op_cpu_records = {}
total = 0
for item in sess_time:
if item["cat"] == "Node" and "dur" in item and "args" in item and "op_name" in item["args"]:
if kernel_time_only and "provider" not in item["args"]:
continue
op_name = item["args"]["op_name"]
if op_name in NODES_TYPE_CONTAINING_SUBGRAPH:
continue
if op_name in op_time:
op_time[op_name] += item["dur"]
op_records[op_name] += 1
else:
op_time[op_name] = item["dur"]
op_records[op_name] = 1
total += item["dur"]
is_cpu = "provider" in item["args"] and item["args"]["provider"] == "CPUExecutionProvider"
if is_cpu:
if op_name in op_cpu_time:
op_cpu_time[op_name] += item["dur"]
op_cpu_records[op_name] += 1
else:
op_cpu_time[op_name] = item["dur"]
op_cpu_records[op_name] = 1
if use_gpu:
lines = ["Average(μs)\tTotal(μs)\tTotal_Percentage\tCalls\tCpu_Duration\tCpu_Calls\tName"]
else:
lines = ["Average(μs)\tTotal(μs)\tTotal_Percentage\tCalls\tName"]
for op_name, duration in sorted(op_time.items(), key=lambda x: x[1], reverse=True):
ratio = duration / total
calls = op_records[op_name]
cpu_time = op_cpu_time[op_name] if op_name in op_cpu_time else 0
cpu_calls = op_cpu_records[op_name] if op_name in op_cpu_records else 0
avg_time = duration / float(calls)
if use_gpu:
lines.append(
f"{avg_time:.1f}\t{duration}\t{ratio * 100.0:5.2f}\t{calls}\t{cpu_time}\t{cpu_calls}\t{op_name}")
else:
lines.append(f"{avg_time:.1f}\t{duration}\t{ratio * 100.0:5.2f}\t{calls}\t{op_name}")
return lines
def get_dim_from_type_proto(dim):
return getattr(dim, dim.WhichOneof('value')) if type(dim.WhichOneof('value')) == str else None
def get_shape_from_type_proto(type_proto):
return [get_dim_from_type_proto(d) for d in type_proto.tensor_type.shape.dim]
def create_dummy_inputs(onnx_model, batch_size, sequence_length, samples):
"""Create dummy inputs for ONNX model.
Args:
onnx_model (OnnxModel): ONNX model
batch_size (int): batch size
sequence_length (int): sequence length
samples (int): number of samples
Returns:
List[Dict]: list of inputs
"""
dummy_inputs = {}
for graph_input in onnx_model.get_graph_inputs_excluding_initializers():
shape = get_shape_from_type_proto(graph_input.type)
symbol_dims = []
for i, dim in enumerate(shape):
if isinstance(dim, str):
symbol_dims.append(i)
# allowed symbolic dimensions: batch_size and sequence_length
if len(symbol_dims) > 2:
return None
if len(symbol_dims) > 0:
shape[symbol_dims[0]] = batch_size
if len(symbol_dims) > 1:
shape[symbol_dims[1]] = sequence_length
elem_type = graph_input.type.tensor_type.elem_type
assert elem_type in [TensorProto.FLOAT, TensorProto.INT32, TensorProto.INT64]
data_type = numpy.float32 if elem_type == TensorProto.FLOAT else (
numpy.int64 if elem_type == TensorProto.INT64 else numpy.int32)
data = numpy.ones(shape, dtype=data_type)
dummy_inputs[graph_input.name] = data
all_inputs = [dummy_inputs for _ in range(samples)]
return all_inputs
def create_bert_inputs(onnx_model,
batch_size,
sequence_length,
samples,
input_ids_name=None,
segment_ids_name=None,
input_mask_name=None):
"""Create dummy inputs for BERT model.
Args:
onnx_model (OnnxModel): ONNX model
batch_size (int): batch size
sequence_length (int): sequence length
samples (int): number of samples
input_ids_name (str, optional): Name of graph input for input IDs. Defaults to None.
segment_ids_name (str, optional): Name of graph input for segment IDs. Defaults to None.
input_mask_name (str, optional): Name of graph input for attention mask. Defaults to None.
Returns:
List[Dict]: list of inputs
"""
from bert_test_data import find_bert_inputs, generate_test_data
input_ids, segment_ids, input_mask = find_bert_inputs(onnx_model, input_ids_name, segment_ids_name, input_mask_name)
all_inputs = generate_test_data(batch_size,
sequence_length,
test_cases=samples,
seed=123,
verbose=False,
input_ids=input_ids,
segment_ids=segment_ids,
input_mask=input_mask,
random_mask_length=False)
return all_inputs
def create_gpt2_inputs(onnx_model, batch_size, sequence_length, past_sequence_length, samples):
"""Create dummy inputs for GPT-2 model.
Args:
onnx_model (OnnxModel): ONNX model
batch_size (int): batch size
sequence_length (int): sequence length
past_sequence_length (int): past sequence length
samples (int): number of samples
Raises:
RuntimeError: symbolic is not supported. Use the tool convert_to_onnx.py to export ONNX model instead.
Returns:
List[Dict]: list of inputs
"""
# The symbolic names shall be same as those used in Gpt2Helper.export_onnx(...) function.
symbols = {
'batch_size': batch_size,
'seq_len': sequence_length,
'past_seq_len': past_sequence_length,
'total_seq_len': sequence_length + past_sequence_length
}
dummy_inputs = {}
for graph_input in onnx_model.get_graph_inputs_excluding_initializers():
shape = get_shape_from_type_proto(graph_input.type)
for i, dim in enumerate(shape):
if isinstance(dim, str) and dim not in symbols.keys():
raise RuntimeError(f"symbol is not supported: {dim}")
else:
shape[i] = symbols[dim]
elem_type = graph_input.type.tensor_type.elem_type
assert elem_type in [TensorProto.FLOAT, TensorProto.INT32, TensorProto.INT64]
data_type = numpy.float32 if elem_type == TensorProto.FLOAT else (
numpy.int64 if elem_type == TensorProto.INT64 else numpy.int32)
data = numpy.ones(shape, dtype=data_type)
dummy_inputs[graph_input.name] = data
all_inputs = [dummy_inputs for _ in range(samples)]
return all_inputs
def create_longformer_inputs(onnx_model, batch_size, sequence_length, global_length, samples):
"""Create dummy inputs for Longformer model.
Args:
onnx_model (OnnxModel): ONNX model
batch_size (int): batch size
sequence_length (int): sequence length
global_length (int): number of global tokens
samples (int): number of samples
Raises:
RuntimeError: symbolic is not supported. Use the tool convert_longformer_to_onnx.py to export ONNX model instead.
Returns:
List[Dict]: list of inputs
"""
symbols = {'batch_size': batch_size, 'sequence_length': sequence_length}
dummy_inputs = {}
for graph_input in onnx_model.get_graph_inputs_excluding_initializers():
shape = get_shape_from_type_proto(graph_input.type)
for i, dim in enumerate(shape):
if isinstance(dim, str) and dim not in symbols.keys():
raise RuntimeError(f"symbol is not supported: {dim}")
else:
shape[i] = symbols[dim]
elem_type = graph_input.type.tensor_type.elem_type
| |
Sets the size of the start annotation arrow head,
relative to `arrowwidth`. A value of 1 (default) gives
a head about 3x as wide as the line.
startstandoff
Sets a distance, in pixels, to move the start arrowhead
away from the position it is pointing at, for example
to point at the edge of a marker independent of zoom.
Note that this shortens the arrow from the `ax` / `ay`
vector, in contrast to `xshift` / `yshift` which moves
everything by this amount.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
text
Sets the text associated with this annotation. Plotly
uses a subset of HTML tags to do things like newline
(<br>), bold (<b></b>), italics (<i></i>), hyperlinks
(<a href='...'></a>). Tags <em>, <sup>, <sub> <span>
are also supported.
textangle
Sets the angle at which the `text` is drawn with
respect to the horizontal.
valign
Sets the vertical alignment of the `text` within the
box. Has an effect only if an explicit height is set to
override the text height.
visible
Determines whether or not this annotation is visible.
width
Sets an explicit width for the text box. null (default)
lets the text set the box width. Wider text will be
clipped. There is no automatic wrapping; use <br> to
start a new line.
x
Sets the annotation's x position.
xanchor
Sets the text box's horizontal position anchor This
anchor binds the `x` position to the "left", "center"
or "right" of the annotation. For example, if `x` is
set to 1, `xref` to "paper" and `xanchor` to "right"
then the right-most portion of the annotation lines up
with the right-most edge of the plotting area. If
"auto", the anchor is equivalent to "center" for data-
referenced annotations or if there is an arrow, whereas
for paper-referenced with no arrow, the anchor picked
corresponds to the closest side.
xshift
Shifts the position of the whole annotation and arrow
to the right (positive) or left (negative) by this many
pixels.
y
Sets the annotation's y position.
yanchor
Sets the text box's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the annotation. For example, if `y` is set
to 1, `yref` to "paper" and `yanchor` to "top" then the
top-most portion of the annotation lines up with the
top-most edge of the plotting area. If "auto", the
anchor is equivalent to "middle" for data-referenced
annotations or if there is an arrow, whereas for paper-
referenced with no arrow, the anchor picked corresponds
to the closest side.
yshift
Shifts the position of the whole annotation and arrow
up (positive) or down (negative) by this many pixels.
z
Sets the annotation's z position.
"""
def __init__(
self,
arg=None,
align=None,
arrowcolor=None,
arrowhead=None,
arrowside=None,
arrowsize=None,
arrowwidth=None,
ax=None,
ay=None,
bgcolor=None,
bordercolor=None,
borderpad=None,
borderwidth=None,
captureevents=None,
font=None,
height=None,
hoverlabel=None,
hovertext=None,
name=None,
opacity=None,
showarrow=None,
standoff=None,
startarrowhead=None,
startarrowsize=None,
startstandoff=None,
templateitemname=None,
text=None,
textangle=None,
valign=None,
visible=None,
width=None,
x=None,
xanchor=None,
xshift=None,
y=None,
yanchor=None,
yshift=None,
z=None,
**kwargs
):
"""
Construct a new Annotation object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.Annotation`
align
Sets the horizontal alignment of the `text` within the
box. Has an effect only if `text` spans two or more
lines (i.e. `text` contains one or more <br> HTML tags)
or if an explicit width is set to override the text
width.
arrowcolor
Sets the color of the annotation arrow.
arrowhead
Sets the end annotation arrow head style.
arrowside
Sets the annotation arrow head position.
arrowsize
Sets the size of the end annotation arrow head,
relative to `arrowwidth`. A value of 1 (default) gives
a head about 3x as wide as the line.
arrowwidth
Sets the width (in px) of annotation arrow line.
ax
Sets the x component of the arrow tail about the arrow
head (in pixels).
ay
Sets the y component of the arrow tail about the arrow
head (in pixels).
bgcolor
Sets the background color of the annotation.
bordercolor
Sets the color of the border enclosing the annotation
`text`.
borderpad
Sets the padding (in px) between the `text` and the
enclosing border.
borderwidth
Sets the width (in px) of the border enclosing the
annotation `text`.
captureevents
Determines whether the annotation text box captures
mouse move and click events, or allows those events to
pass through to data points in the plot that may be
behind the annotation. By default `captureevents` is
False unless `hovertext` is provided. If you use the
event `plotly_clickannotation` without `hovertext` you
must explicitly enable `captureevents`.
font
Sets the annotation text font.
height
Sets an explicit height for the text box. null
(default) lets the text set the box height. Taller text
will be clipped.
hoverlabel
:class:`plotly.graph_objects.layout.scene.annotation.Ho
verlabel` instance or dict with compatible properties
hovertext
Sets text to appear when hovering over this annotation.
If omitted or blank, no hover label will appear.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the annotation (text + arrow).
showarrow
Determines whether or not the annotation is drawn with
an arrow. If True, `text` is placed near the arrow's
tail. If False, `text` lines up with the `x` and `y`
provided.
standoff
Sets a distance, in pixels, to move the end arrowhead
away from the position it is pointing at, for example
to point at the edge of a marker independent of zoom.
Note that this shortens the arrow from the `ax` / `ay`
vector, in contrast to `xshift` / `yshift` which moves
everything by this amount.
startarrowhead
Sets the start annotation arrow head style.
startarrowsize
Sets the size of the start annotation arrow head,
relative to `arrowwidth`. A value of 1 (default) gives
a head about 3x as wide as the line.
startstandoff
Sets a distance, in pixels, to move the start arrowhead
away from the position it is pointing at, for example
to point at the edge of a marker independent of zoom.
Note that this shortens the arrow from the `ax` / `ay`
vector, in contrast to `xshift` / `yshift` which moves
everything by this amount.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
text
Sets the text associated with this annotation. Plotly
uses a subset of HTML tags to do things like newline
(<br>), bold (<b></b>), italics (<i></i>), hyperlinks
(<a href='...'></a>). Tags <em>, <sup>, <sub> <span>
are also supported.
textangle
Sets the angle at which the `text` is drawn with
respect to the horizontal.
valign
Sets the vertical alignment of the `text` within the
box. Has an effect only if an explicit height is set to
override the text | |
<gh_stars>0
import os
import os.path
import cv2
import glob
import h5py
from PIL import Image
import skimage
import skimage.io
import numpy as np
import pandas as pd
import torch
from torchvision import transforms
import torchvision.transforms.functional as TF
import utils
DATASET_REGISTRY = {}
def build_dataset(name, *args, **kwargs):
return DATASET_REGISTRY[name](*args, **kwargs)
def register_dataset(name):
def register_dataset_fn(fn):
if name in DATASET_REGISTRY:
raise ValueError("Cannot register duplicate dataset ({})".format(name))
DATASET_REGISTRY[name] = fn
return fn
return register_dataset_fn
@register_dataset("DAVIS")
def load_DAVIS(data, batch_size=100, num_workers=0, image_size=None, stride=64, n_frames=5):
train_dataset = DAVIS(data, datatype="train", patch_size=image_size, stride=stride, n_frames=n_frames)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, num_workers=8, shuffle=True)
valid_dataset = DAVIS(data, datatype="val", n_frames=n_frames)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1, num_workers=8, shuffle=False)
test_dataset = DAVIS(data, datatype="test", n_frames=n_frames)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, num_workers=8, shuffle=False)
return train_loader, valid_loader, test_loader
@register_dataset("ImageDAVIS")
def load_ImageDAVIS(data, batch_size=100, num_workers=0, image_size=None, stride=64, n_frames=1):
train_dataset = ImageDAVIS(data, datatype="train", patch_size=image_size, stride=stride)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, num_workers=4, shuffle=True)
valid_dataset = ImageDAVIS(data, datatype="val")
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1, num_workers=4, shuffle=False)
test_dataset = ImageDAVIS(data, datatype="test")
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, num_workers=4, shuffle=False)
return train_loader, valid_loader, test_loader
@register_dataset("Set8")
def load_Set8(data, batch_size=100, num_workers=0, n_frames=5):
test_dataset = Set8(data, n_frames=n_frames)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, num_workers=8, shuffle=False)
return test_loader
@register_dataset("CTC")
def load_CTC(data, batch_size=100, num_workers=0, image_size=None, stride=64, n_frames=5):
train_dataset = CTC(data, patch_size=image_size, stride=stride, n_frames=n_frames)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, num_workers=4, shuffle=True)
valid_dataset = CTC(data, n_frames=n_frames)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1, num_workers=4, shuffle=False)
return train_loader, valid_loader
@register_dataset("SingleVideo")
def load_SingleVideo(data, batch_size=8, dataset="DAVIS", video="giant-slalom",image_size=None, stride=64, n_frames=5,
aug=0, dist="G", mode="S", noise_std=30, min_noise=0, max_noise=100, sample=False, heldout=False):
train_dataset = SingleVideo(data, dataset=dataset, video=video, patch_size=image_size, stride=stride, n_frames=n_frames,
aug=aug, dist=dist, mode=mode, noise_std=noise_std, min_noise=min_noise, max_noise=max_noise,
sample=sample, heldout=heldout
)
test_dataset = SingleVideo(data, dataset=dataset, video=video, patch_size=None, stride=stride, n_frames=n_frames,
aug=0, dist=dist, mode=mode, noise_std=noise_std, min_noise=min_noise, max_noise=max_noise,
sample=False, heldout=False
)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, num_workers=2, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, num_workers=1, shuffle=False)
return train_loader, test_loader
@register_dataset("Nanoparticles")
def load_Nanoparticles(data, batch_size=8, image_size=None, stride=64, n_frames=5, aug=0):
train_dataset = Nanoparticles(data, datatype="train", patch_size=image_size, stride=stride, n_frames=n_frames, aug=aug)
test_dataset = Nanoparticles(data, datatype="test", patch_size=None, stride=200, n_frames=n_frames, aug=0)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, num_workers=2, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, num_workers=1, shuffle=False)
return train_loader, test_loader
@register_dataset("RawVideo")
def load_RawVideo(data, batch_size=8, image_size=None, stride=64, n_frames=5, aug=0, scenes=[7, 8, 9, 10, 11], isos = [1600, 3200, 6400, 12800, 25600]):
train_dataset = RawVideo(data, datatype="train", patch_size=image_size, stride=stride, n_frames=n_frames, aug=aug, scenes=scenes, isos=isos)
valid_dataset = RawVideo(data, datatype="val", patch_size=1080, stride=1920-1080, n_frames=n_frames, aug=0, scenes=scenes, isos=isos)
test_dataset = RawVideo(data, datatype="test", patch_size=None, stride=64, n_frames=n_frames, aug=0, scenes=scenes, isos=isos)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, num_workers=2, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1, num_workers=1, shuffle=False)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, num_workers=1, shuffle=False)
return train_loader, valid_loader, test_loader
class DAVIS(torch.utils.data.Dataset):
def __init__(self, data_path, datatype="train", patch_size=None, stride=64, n_frames=5):
super().__init__()
self.data_path = data_path
self.datatype = datatype
self.size = patch_size
self.stride = stride
self.n_frames = n_frames
if self.datatype == "train":
self.folders = pd.read_csv(os.path.join(data_path, "ImageSets", "2017", "train.txt"), header=None)
elif self.datatype == "val":
self.folders = pd.read_csv(os.path.join(data_path, "ImageSets", "2017", "val.txt"), header=None)
else:
self.folders = pd.read_csv(os.path.join(data_path, "ImageSets", "2017", "test-dev.txt"), header=None)
self.len = 0
self.bounds = []
for folder in self.folders.values:
files = sorted(glob.glob(os.path.join(data_path, "JPEGImages", "480p", folder[0], "*.jpg")))
self.len += len(files)
self.bounds.append(self.len)
if self.size is not None:
self.n_H = (int((480-self.size)/self.stride)+1)
self.n_W = (int((854-self.size)/self.stride)+1)
self.n_patches = self.n_H * self.n_W
self.len *= self.n_patches
self.transform = transforms.Compose([transforms.ToTensor()])
def __len__(self):
return self.len
def __getitem__(self, index):
if self.size is not None:
patch = index % self.n_patches
index = index // self.n_patches
ends = 0
x = (self.n_frames-1) // 2
for i, bound in enumerate(self.bounds):
if index < bound:
folder = self.folders.values[i][0]
if i>0:
index -= self.bounds[i-1]
newbound = bound - self.bounds[i-1]
else:
newbound = bound
if(index < x):
ends = x-index
elif(newbound-1-index < x):
ends = -(x-(newbound-1-index))
break
files = sorted(glob.glob(os.path.join(self.data_path, "JPEGImages", "480p", folder, "*.jpg")))
Img = Image.open(files[index])
Img = np.array(Img)
for i in range(1,x+1):
end = max(0, ends)
off = max(0,i-x+end)
img = Image.open(files[index-i+off])
img = np.array(img)
Img = np.concatenate((img, Img), axis=2)
for i in range(1,x+1):
end = -min(0,ends)
off = max(0,i-x+end)
img = Image.open(files[index+i-off])
img = np.array(img)
Img = np.concatenate((Img, img), axis=2)
if self.size is not None:
nh = (patch // self.n_W)*self.stride
nw = (patch % self.n_W)*self.stride
Img = Img[nh:(nh+self.size), nw:(nw+self.size), :]
return self.transform(np.array(Img)).type(torch.FloatTensor)
class ImageDAVIS(torch.utils.data.Dataset):
def __init__(self, data_path, datatype="train", patch_size=None, stride=40):
super().__init__()
self.data_path = data_path
self.datatype = datatype
self.size = patch_size
self.stride = stride
if self.datatype == "train":
self.folders = pd.read_csv(os.path.join(data_path, "ImageSets", "2017", "train.txt"), header=None)
elif self.datatype == "val":
self.folders = pd.read_csv(os.path.join(data_path, "ImageSets", "2017", "val.txt"), header=None)
else:
self.folders = pd.read_csv(os.path.join(data_path, "ImageSets", "2017", "test-dev.txt"), header=None)
self.len = 0
self.bounds = []
for folder in self.folders.values:
files = sorted(glob.glob(os.path.join(data_path, "JPEGImages", "480p", folder[0], "*.jpg")))
self.len += len(files)
self.bounds.append(self.len)
if self.size is not None:
self.n_H = (int((480-self.size)/self.stride)+1)
self.n_W = (int((854-self.size)/self.stride)+1)
self.n_patches = self.n_H * self.n_W
self.len *= self.n_patches
self.transform = transforms.Compose([transforms.ToTensor()])
def __len__(self):
return self.len
def __getitem__(self, index):
if self.size is not None:
patch = index % self.n_patches
index = index // self.n_patches
for i, bound in enumerate(self.bounds):
if index < bound:
folder = self.folders.values[i][0]
if i>0:
index -= self.bounds[i-1]
break
files = sorted(glob.glob(os.path.join(self.data_path, "JPEGImages", "480p", folder, "*.jpg")))
Img = np.array(Image.open(files[index]))
if self.size is not None:
nh = (patch // self.n_W)*self.stride
nw = (patch % self.n_W)*self.stride
Img = Img[nh:(nh+self.size), nw:(nw+self.size), :]
return self.transform(Img).type(torch.FloatTensor)
class Set8(torch.utils.data.Dataset):
def __init__(self, data_path, n_frames=5, hop=1):
super().__init__()
self.data_path = data_path
self.len = 0
self.bounds = []
self.hop = hop
self.n_frames = n_frames
self.folders = []
self.folders += sorted(glob.glob(os.path.join(data_path, "GoPro/snowboard")))
self.folders += sorted(glob.glob(os.path.join(data_path, "GoPro/hypersmooth")))
self.folders += sorted(glob.glob(os.path.join(data_path, "GoPro/rafting")))
self.folders += sorted(glob.glob(os.path.join(data_path, "GoPro/motorbike")))
self.folders += sorted(glob.glob(os.path.join(data_path, "Derfs/tractor")))
self.folders += sorted(glob.glob(os.path.join(data_path, "Derfs/sunflower")))
self.folders += sorted(glob.glob(os.path.join(data_path, "Derfs/touchdown")))
self.folders += sorted(glob.glob(os.path.join(data_path, "Derfs/park_joy")))
for folder in self.folders:
files = sorted(glob.glob(os.path.join(folder, "*.png")))
self.len += len(files)
self.bounds.append(self.len)
self.transform = transforms.Compose([transforms.ToTensor()])
def __len__(self):
return self.len
def __getitem__(self, index):
ends = 0
x = ((self.n_frames-1) // 2)*self.hop
for i, bound in enumerate(self.bounds):
if index < bound:
folder = self.folders[i]
if i>0:
index -= self.bounds[i-1]
newbound = bound - self.bounds[i-1]
else:
newbound = bound
if(index < x):
ends = x-index
elif(newbound-1-index < x):
ends = -(x-(newbound-1-index))
break
files = sorted(glob.glob(os.path.join(folder, "*.png")))
Img = Image.open(files[index])
Img = np.array(Img)
for i in range(self.hop, x+1, self.hop):
end = max(0, ends)
off = max(0,i-x+end)
img = Image.open(files[index-i+off])
img = np.array(img)
Img = np.concatenate((img, Img), axis=2)
for i in range(self.hop, x+1, self.hop):
end = -min(0,ends)
off = max(0,i-x+end)
img = Image.open(files[index+i-off])
img = np.array(img)
Img = np.concatenate((Img, img), axis=2)
return self.transform(Img).type(torch.FloatTensor)
class CTC(torch.utils.data.Dataset):
def __init__(self, data_path, patch_size=None, stride=64, n_frames=5):
super().__init__()
self.data_path = data_path
self.size = patch_size
self.stride = stride
self.len = 0
self.bounds = [0]
self.nHs = []
self.nWs = []
self.n_frames = n_frames
parent_folders = sorted([x for x in glob.glob(os.path.join(data_path, "*/*")) if os.path.isdir(x)])
self.folders = []
for folder in parent_folders:
self.folders.append(os.path.join(folder, "01"))
self.folders.append(os.path.join(folder, "02"))
for folder in self.folders:
files = sorted(glob.glob(os.path.join(folder, "*.tif")))
if self.size is not None:
(h, w) = np.array(cv2.imread(files[0], cv2.IMREAD_GRAYSCALE)).shape
nH = (int((h-self.size)/self.stride)+1)
nW = (int((w-self.size)/self.stride)+1)
self.len += len(files)*nH*nW
self.nHs.append(nH)
self.nWs.append(nW)
else:
self.len += len(files)
self.bounds.append(self.len)
self.transform = transforms.Compose([transforms.ToTensor()])
def __len__(self):
return self.len
def __getitem__(self, index):
ends = 0
x = (self.n_frames-1) // 2
for i, bound in enumerate(self.bounds):
if index < bound:
folder = self.folders[i-1]
index -= self.bounds[i-1]
newbound = bound - self.bounds[i-1]
if self.size is not None:
nH = self.nHs[i-1]
nW = self.nWs[i-1]
patch = index % (nH*nW)
index = index // (nH*nW)
newbound = newbound // (nH*nW)
if(index < x):
ends = x-index
elif(newbound-1-index < x):
ends = -(x-(newbound-1-index))
break
files = sorted(glob.glob(os.path.join(folder, "*.tif")))
img = cv2.imread(files[index], cv2.IMREAD_GRAYSCALE)
(h, w) = np.array(img).shape
Img = np.reshape(np.array(img), (h,w,1))
for i in range(1,x+1):
end = max(0, ends)
off = max(0,i-x+end)
img = cv2.imread(files[index-i+off], cv2.IMREAD_GRAYSCALE)
img = np.reshape(np.array(img), (h,w,1))
Img = np.concatenate((img, Img), axis=2)
for i in range(1,x+1):
end = -min(0,ends)
off = max(0,i-x+end)
img = cv2.imread(files[index+i-off], cv2.IMREAD_GRAYSCALE)
img = np.reshape(np.array(img), (h,w,1))
Img = np.concatenate((Img, img), axis=2)
if self.size is not None:
nh = (patch // nW)*self.stride
nw = (patch % nW)*self.stride
Img = Img[nh:(nh+self.size), nw:(nw+self.size), :]
return self.transform(Img).type(torch.FloatTensor)
class SingleVideo(torch.utils.data.Dataset):
def __init__(self, data_path, dataset="DAVIS", video="giant-slalom", patch_size=None, stride=64, n_frames=5,
aug=0, dist="G", mode="S", noise_std=30, min_noise=0, max_noise=100, sample=True, heldout=False):
super().__init__()
self.data_path = data_path
self.dataset = dataset
self.size = patch_size
self.stride = stride
self.n_frames = n_frames
self.aug = aug
self.heldout = heldout
if dataset == "DAVIS":
self.files = sorted(glob.glob(os.path.join(data_path, "JPEGImages", "480p", video, "*.jpg")))
elif dataset == "GoPro" or dataset == "Derfs":
self.files = sorted(glob.glob(os.path.join(data_path, video, "*.png")))
elif dataset == "Vid3oC":
self.files = sorted(glob.glob(os.path.join(data_path, "TrainingHR", video, "*.png")))
elif dataset == | |
<reponame>NicEscobar/InertialNavigation
#!/usr/bin/env python
'''
parse a MAVLink protocol XML file and generate a Node.js javascript module implementation
Based on original work Copyright <NAME> 2011
Released under GNU GPL version 3 or later
'''
from __future__ import print_function
from builtins import range
import os
import textwrap
from . import mavtemplate
t = mavtemplate.MAVTemplate()
def generate_preamble(outf, msgs, args, xml):
print("Generating preamble")
t.write(outf, """
/*
MAVLink protocol implementation for node.js (auto-generated by mavgen_javascript.py)
Generated from: ${FILELIST}
Note: this file has been auto-generated. DO NOT EDIT
*/
jspack = require("jspack").jspack,
_ = require("underscore"),
events = require("events"),
util = require("util");
// Add a convenience method to Buffer
Buffer.prototype.toByteArray = function () {
return Array.prototype.slice.call(this, 0)
}
mavlink = function(){};
// Implement the X25CRC function (present in the Python version through the mavutil.py package)
mavlink.x25Crc = function(buffer, crc) {
var bytes = buffer;
var crc = crc || 0xffff;
_.each(bytes, function(e) {
var tmp = e ^ (crc & 0xff);
tmp = (tmp ^ (tmp << 4)) & 0xff;
crc = (crc >> 8) ^ (tmp << 8) ^ (tmp << 3) ^ (tmp >> 4);
crc = crc & 0xffff;
});
return crc;
}
mavlink.WIRE_PROTOCOL_VERSION = "${WIRE_PROTOCOL_VERSION}";
mavlink.MAVLINK_TYPE_CHAR = 0
mavlink.MAVLINK_TYPE_UINT8_T = 1
mavlink.MAVLINK_TYPE_INT8_T = 2
mavlink.MAVLINK_TYPE_UINT16_T = 3
mavlink.MAVLINK_TYPE_INT16_T = 4
mavlink.MAVLINK_TYPE_UINT32_T = 5
mavlink.MAVLINK_TYPE_INT32_T = 6
mavlink.MAVLINK_TYPE_UINT64_T = 7
mavlink.MAVLINK_TYPE_INT64_T = 8
mavlink.MAVLINK_TYPE_FLOAT = 9
mavlink.MAVLINK_TYPE_DOUBLE = 10
// Mavlink headers incorporate sequence, source system (platform) and source component.
mavlink.header = function(msgId, mlen, seq, srcSystem, srcComponent) {
this.mlen = ( typeof mlen === 'undefined' ) ? 0 : mlen;
this.seq = ( typeof seq === 'undefined' ) ? 0 : seq;
this.srcSystem = ( typeof srcSystem === 'undefined' ) ? 0 : srcSystem;
this.srcComponent = ( typeof srcComponent === 'undefined' ) ? 0 : srcComponent;
this.msgId = msgId
}
mavlink.header.prototype.pack = function() {
return jspack.Pack('BBBBBB', [${PROTOCOL_MARKER}, this.mlen, this.seq, this.srcSystem, this.srcComponent, this.msgId]);
}
// Base class declaration: mavlink.message will be the parent class for each
// concrete implementation in mavlink.messages.
mavlink.message = function() {};
// Convenience setter to facilitate turning the unpacked array of data into member properties
mavlink.message.prototype.set = function(args) {
_.each(this.fieldnames, function(e, i) {
this[e] = args[i];
}, this);
};
// This pack function builds the header and produces a complete MAVLink message,
// including header and message CRC.
mavlink.message.prototype.pack = function(mav, crc_extra, payload) {
this.payload = payload;
this.header = new mavlink.header(this.id, payload.length, mav.seq, mav.srcSystem, mav.srcComponent);
this.msgbuf = this.header.pack().concat(payload);
var crc = mavlink.x25Crc(this.msgbuf.slice(1));
// For now, assume always using crc_extra = True. TODO: check/fix this.
crc = mavlink.x25Crc([crc_extra], crc);
this.msgbuf = this.msgbuf.concat(jspack.Pack('<H', [crc] ) );
return this.msgbuf;
}
""", {'FILELIST' : ",".join(args),
'PROTOCOL_MARKER' : xml.protocol_marker,
'crc_extra' : xml.crc_extra,
'WIRE_PROTOCOL_VERSION' : xml.wire_protocol_version })
def generate_enums(outf, enums):
print("Generating enums")
outf.write("\n// enums\n")
wrapper = textwrap.TextWrapper(initial_indent="", subsequent_indent=" // ")
for e in enums:
outf.write("\n// %s\n" % e.name)
for entry in e.entry:
outf.write("mavlink.%s = %u // %s\n" % (entry.name, entry.value, wrapper.fill(entry.description)))
def generate_message_ids(outf, msgs):
print("Generating message IDs")
outf.write("\n// message IDs\n")
outf.write("mavlink.MAVLINK_MSG_ID_BAD_DATA = -1\n")
for m in msgs:
outf.write("mavlink.MAVLINK_MSG_ID_%s = %u\n" % (m.name.upper(), m.id))
def generate_classes(outf, msgs):
"""
Generate the implementations of the classes representing MAVLink messages.
"""
print("Generating class definitions")
wrapper = textwrap.TextWrapper(initial_indent="", subsequent_indent="")
outf.write("\nmavlink.messages = {};\n\n");
def field_descriptions(fields):
ret = ""
for f in fields:
ret += " %-18s : %s (%s)\n" % (f.name, f.description.strip(), f.type)
return ret
for m in msgs:
comment = "%s\n\n%s" % (wrapper.fill(m.description.strip()), field_descriptions(m.fields))
selffieldnames = 'self, '
for f in m.fields:
# if f.omit_arg:
# selffieldnames += '%s=%s, ' % (f.name, f.const_value)
#else:
# -- Omitting the code above because it is rarely used (only once?) and would need some special handling
# in javascript. Specifically, inside the method definition, it needs to check for a value then assign
# a default.
selffieldnames += '%s, ' % f.name
selffieldnames = selffieldnames[:-2]
sub = {'NAMELOWER' : m.name.lower(),
'SELFFIELDNAMES' : selffieldnames,
'COMMENT' : comment,
'FIELDNAMES' : ", ".join(m.fieldnames)}
t.write(outf, """
/*
${COMMENT}
*/
""", sub)
# function signature + declaration
outf.write("mavlink.messages.%s = function(" % (m.name.lower()))
if len(m.fields) != 0:
outf.write(", ".join(m.fieldnames))
outf.write(") {")
# body: set message type properties
outf.write("""
this.format = '%s';
this.id = mavlink.MAVLINK_MSG_ID_%s;
this.order_map = %s;
this.crc_extra = %u;
this.name = '%s';
""" % (m.fmtstr, m.name.upper(), m.order_map, m.crc_extra, m.name.upper()))
# body: set own properties
if len(m.fieldnames) != 0:
outf.write(" this.fieldnames = ['%s'];\n" % "', '".join(m.fieldnames))
outf.write("""
this.set(arguments);
}
""")
# inherit methods from the base message class
outf.write("""
mavlink.messages.%s.prototype = new mavlink.message;
""" % m.name.lower())
# Implement the pack() function for this message
outf.write("""
mavlink.messages.%s.prototype.pack = function(mav) {
return mavlink.message.prototype.pack.call(this, mav, this.crc_extra, jspack.Pack(this.format""" % m.name.lower())
if len(m.fields) != 0:
outf.write(", [ this." + ", this.".join(m.ordered_fieldnames) + ']')
outf.write("));\n}\n\n")
def mavfmt(field):
'''work out the struct format for a type'''
map = {
'float' : 'f',
'double' : 'd',
'char' : 'c',
'int8_t' : 'b',
'uint8_t' : 'B',
'uint8_t_mavlink_version' : 'B',
'int16_t' : 'h',
'uint16_t' : 'H',
'int32_t' : 'i',
'uint32_t' : 'I',
'int64_t' : 'q',
'uint64_t' : 'Q',
}
if field.array_length:
if field.type in ['char', 'int8_t', 'uint8_t']:
return str(field.array_length)+'s'
return str(field.array_length)+map[field.type]
return map[field.type]
def generate_mavlink_class(outf, msgs, xml):
print("Generating MAVLink class")
# Write mapper to enable decoding based on the integer message type
outf.write("\n\nmavlink.map = {\n");
for m in msgs:
outf.write(" %s: { format: '%s', type: mavlink.messages.%s, order_map: %s, crc_extra: %u },\n" % (
m.id, m.fmtstr, m.name.lower(), m.order_map, m.crc_extra))
outf.write("}\n\n")
t.write(outf, """
// Special mavlink message to capture malformed data packets for debugging
mavlink.messages.bad_data = function(data, reason) {
this.id = mavlink.MAVLINK_MSG_ID_BAD_DATA;
this.data = data;
this.reason = reason;
this.msgbuf = data;
}
/* MAVLink protocol handling class */
MAVLink = function(logger, srcSystem, srcComponent) {
this.logger = logger;
this.seq = 0;
this.buf = new Buffer(0);
this.bufInError = new Buffer(0);
this.srcSystem = (typeof srcSystem === 'undefined') ? 0 : srcSystem;
this.srcComponent = (typeof srcComponent === 'undefined') ? 0 : srcComponent;
// The first packet we expect is a valid header, 6 bytes.
this.expected_length = 6;
this.have_prefix_error = false;
this.protocol_marker = 254;
this.little_endian = true;
this.crc_extra = true;
this.sort_fields = true;
this.total_packets_sent = 0;
this.total_bytes_sent = 0;
this.total_packets_received = 0;
this.total_bytes_received = 0;
this.total_receive_errors = 0;
this.startup_time = Date.now();
}
// Implements EventEmitter
util.inherits(MAVLink, events.EventEmitter);
// If the logger exists, this function will add a message to it.
// Assumes the logger is a winston object.
MAVLink.prototype.log = function(message) {
if(this.logger) {
this.logger.info(message);
}
}
MAVLink.prototype.log = function(level, message) {
if(this.logger) {
this.logger.log(level, message);
}
}
MAVLink.prototype.send = function(mavmsg) {
buf = mavmsg.pack(this);
this.file.write(buf);
this.seq = (this.seq + 1) % 256;
this.total_packets_sent +=1;
this.total_bytes_sent += buf.length;
}
// return number of bytes needed for next parsing stage
MAVLink.prototype.bytes_needed = function() {
ret = this.expected_length - this.buf.length;
return ( ret <= 0 ) ? 1 : ret;
}
// add data to the local buffer
MAVLink.prototype.pushBuffer = function(data) {
if(data) {
this.buf = Buffer.concat([this.buf, data]);
this.total_bytes_received += data.length;
}
}
// Decode prefix. Elides the prefix.
MAVLink.prototype.parsePrefix = function() {
// Test for a message prefix.
if( this.buf.length >= 1 && this.buf[0] != 254 ) {
// Strip the offending initial byte and throw an error.
var badPrefix = this.buf[0];
this.bufInError = this.buf.slice(0,1);
this.buf = this.buf.slice(1);
this.expected_length = 6;
// TODO: enable subsequent prefix error suppression if robust_parsing is implemented
//if(!this.have_prefix_error) {
// this.have_prefix_error = true;
throw new Error("Bad prefix ("+badPrefix+")");
//}
}
//else if( this.buf.length >= 1 && this.buf[0] == 254 ) {
// this.have_prefix_error = false;
//}
}
// Determine the length. Leaves buffer untouched.
MAVLink.prototype.parseLength = function() {
if( this.buf.length >= 2 ) {
var unpacked = jspack.Unpack('BB', this.buf.slice(0, 2));
this.expected_length = unpacked[1] + 8; // length of message + header + CRC
}
}
// input some data bytes, possibly returning a new message
MAVLink.prototype.parseChar = function(c) {
var m = null;
try {
this.pushBuffer(c);
this.parsePrefix();
this.parseLength();
m = this.parsePayload();
} catch(e) {
this.log('error', e.message);
this.total_receive_errors += 1;
m = new mavlink.messages.bad_data(this.bufInError, e.message);
this.bufInError = new Buffer(0);
}
if(null != m) {
this.emit(m.name, m);
this.emit('message', m);
}
return m;
}
MAVLink.prototype.parsePayload = function() {
var m = null;
// If we have enough bytes to try and read it, read it.
if( this.expected_length >= 8 && this.buf.length >= this.expected_length ) {
// Slice off the expected packet length, reset expectation to be to find a header.
var mbuf = this.buf.slice(0, this.expected_length);
// TODO: slicing off the buffer should depend on the error produced by the decode() function
// - if a message we | |
add referring words.')
def get_sight(self):
return self._sense['sight']
def set_sight(self, string):
self._sense['sight'] = discourse_model.reformat(string)
sight = property(get_sight, set_sight,
'What is seen when an Item is looked at.')
def get_touch(self):
return self._sense['touch']
def set_touch(self, string):
'Setter. Needed because strings must be reformatted before being set.'
self._sense['touch'] = discourse_model.reformat(string)
touch = property(get_touch, set_touch,
'What is felt when an Item is touched.')
def get_hearing(self):
return self._sense['hearing']
def set_hearing(self, string):
self._sense['hearing'] = discourse_model.reformat(string)
hearing = property(get_hearing, set_hearing,
'What is heard when an Item is listened to.')
def get_smell(self):
return self._sense['smell']
def set_smell(self, string):
self._sense['smell'] = discourse_model.reformat(string)
smell = property(get_smell, set_smell,
'What is smelled when an Item is sniffed.')
def get_taste(self):
return self._sense['taste']
def set_taste(self, string):
self._sense['taste'] = discourse_model.reformat(string)
taste = property(get_taste, set_taste,
'What is tasted when an Item is sampled.')
def _update_referring(self):
'Determine or update the triple of referring words.'
if self._referring_extra is None:
self._referring = ('', '', '')
else:
optional, _, names = self._referring_extra.partition('|')
before = set(optional.strip().split() + self._called[0])
after = set(optional.strip().split() + self._called[2])
names = set(names.strip().split())
if not ' ' in self._called[1]:
names.add(self._called[1])
if self.number == 'singular':
if self.gender == 'neuter':
names.add('it')
elif self.gender == 'female':
names.add('her')
else:
names.add('him')
else:
names.add('them')
for i in self.qualities:
if i in discourse_model.QUALITY_WORDS:
(q_before,
q_names) = discourse_model.QUALITY_WORDS[i].split('|')
before.update(q_before.strip().split())
names.update(q_names.strip().split())
self._referring = (before, names, after)
def blank(self):
'Erase an Item when nothing is known about it by an Actor.'
self.article = 'the'
self.called = 'object'
if self.room:
self.called = 'place'
elif self.actor:
self.called = 'individual'
self.referring = None
for attr in ['link', 'parent', 'sight', 'touch', 'hearing', 'smell',
'taste']:
setattr(self, attr, '')
self._children = []
self.allowed = can.not_have_items
self.blanked = True
def noun_phrase(self, discourse=None, entire=True, extra_adjs='',
length=0.0):
'Return the noun phrase representing this Item.'
string = self.called[1]
if len(self.called[0]) > 0 and length > 0.0:
before_adjs = random.choice(self.called[0] + [''])
string = (before_adjs + ' ' + string).strip()
if len(self.called[2]) > 0 and length > 0.0:
after_adjs = random.choice(self.called[2] + [''])
string = (string + ' ' + after_adjs).strip()
string = (extra_adjs + ' ' + string).strip()
if discourse is None:
# This method was called without a discourse parameter. In this
# case, the correct article can't be generated and the givens list
# can't be updated; so, return the noun phrase without an article.
return string
if entire:
use_article = self.article
if (self.article in discourse.indefinite and
str(self) in discourse.givens):
use_article = 'the'
else:
if self.article in ['a', 'an']:
use_article = 'a'
if string[:1] in ['a', 'e', 'i', 'o', 'u']:
use_article += 'n'
if len(use_article) > 0:
string = use_article + ' ' + string
discourse.givens.add(str(self))
return string
def place(self, world):
'Returns the Room this Item is located in, according to World.'
tag = str(self)
while not world.has('room', tag) and not tag == '@cosmos':
tag = world.item[tag].parent
return world.item[tag]
@property
def children(self):
'Return the children of this Item.'
return self._children
def add_child(self, link, item, making_change=True):
'Add (or remove) a child from this Item.'
if not making_change:
self.remove_child(link, item)
else:
if (link, item) not in self._children:
self._children.append((link, item))
def remove_child(self, link, item, making_change=True):
'Remove (or add) a child from this Item.'
if not making_change:
self.add_child(link, item)
else:
if (link, item) in self._children:
self._children.remove((link, item))
def prevent(self, _, __):
'By default, items do not prevent actions Subclasses can override.'
return False
def react(self, _, __):
'By default, items do nothing when reacting. Subclasses can override.'
return []
def react_to_failed(self, _, __):
'By default, items do nothing when reacting to a failed action.'
return []
class Actor(Item):
"""Any Item that can initiate action, whether human-like or not.
Features of interest:
alive: True | False
Actors can only act and react if alive. If not specified, this feature
will always be True. Things can also have an alive feature, but it must
be set when needed. It should probably be set on a subclass created
for a particular Thing that can react and prevent.
refuses: list of (string, when.function(world), string)
Determines what an actor will refuse to do when commanded. The first
string is matched against actions; the function determines whether or
not the refusal will take place given a match; and the final string
is a template used to generate a message explaining the refusal."""
def __init__(self, tag_and_parent, **keywords):
if 'alive' not in keywords:
self.alive = True
if 'refuses' not in keywords:
self.refuses = []
else:
self.refuses = keywords['refuses']
del(keywords['refuses'])
Item.__init__(self, tag_and_parent, 'actor', **keywords)
def exits(self, concept):
"Return this Actor's current Room's exit dictionary."
return concept.room_of(str(self)).exits
def act(self, command_map, concept):
'The default act method runs a script, if there is one.'
if hasattr(self, 'script') and len(self.script) > 0:
next_command = self.script.pop(0)
if hasattr(self, 'script_loops'):
self.script.append(next_command)
next_command = next_command.split()
return [self.do_command(next_command, command_map, concept)]
return []
def do_command(self, command_words, command_map, concept):
'Return the Action that would result from the provided command.'
if type(command_words) == types.StringType:
command_words = command_words.split()
head = command_words[0].lower()
if not hasattr(command_map, head):
raise StandardError('The command headed with "' + head +
'" is defined in the discourse, but the routine to build an ' +
'action from it is missing.')
else:
mapping = getattr(command_map, head)
return mapping(str(self), command_words, concept)
class Door(Item):
"""An Item representing a doorway, portal, or passage between two places.
Features of interest
connects: list of two strings
Each string is the tag of a Room; This Door connects the two."""
def __init__(self, tag, **keywords):
check_attributes(tag, ['connects'], ['parent', 'shared'], keywords)
tag_and_parent = tag + ' of @cosmos'
keywords['allowed'] = can.permit_any_item
Item.__init__(self, tag_and_parent, 'door', **keywords)
class Room(Item):
"""An Item representing a physical location.
Features that are particular to Rooms:
exits: dictionary of string: string
The key is a direction; the value is the tag of the Door or Room in
that direction.
shared: list of strings
Each string is the tag of a SharedThing; That Item is present in this
room and all other rooms that list it.
view: dictionary of string: (float, string)
The key is the tag of a Room which is visible from this one; the tuple
that is the value has the visibility of that room (a floating point
number in (0, 1)) and a string which is used to generate a textual
description of the direction of that room."""
def __init__(self, tag, **keywords):
check_attributes(tag, ['exits'], ['parent'], keywords)
tag_and_parent = tag + ' of @cosmos'
keywords['allowed'] = can.contain_permit_and_have_parts
self.exits = keywords['exits']
del(keywords['exits'])
self.view = {}
if 'view' in keywords:
self.view = keywords['view']
del(keywords['view'])
if 'glow' not in keywords:
keywords['glow'] = 1.0
keywords['prominence'] = 1.0
Item.__init__(self, tag_and_parent, 'room', **keywords)
def exit(self, direction):
'Return the Room or Door that lies in this direction, if there is one.'
if direction in self.exits and self.exits[direction][0] == '@':
# The key exists in the dictionary and the value begins with
# '@', which means it is a tag. If someone writes a template
# beginning with '@', this will fail.
return self.exits[direction]
else:
return None
class Thing(Item):
'An item that is not a room, has no concept, and cannot act.'
def __init__(self, tag_and_parent, **keywords):
check_attributes(tag_and_parent, [], ['exits', 'refuses', 'shared'],
keywords)
Item.__init__(self, tag_and_parent, 'thing', **keywords)
class SharedThing(Thing):
"""A special sort of (large) Thing that appears in more than one room.
Note that SharedThing is a subclass of Thing and shares the same category:
example.thing is True for a SharedThing; there is no 'sharedthing' category.
However, all SharedThings will have an attribute "sharedthing" that is set
to True. Testing hasattr(item, 'sharedthing') will determine if the item is
a SharedThing.
SharedThing is provided to allow implementation of things like the sky, the
sun, or a massive wall of the sort the United States has erected along the
US/Mexico border. Because shared things are meant to represent these sorts
of entities, they have an allowed expression that always returns False.
Nothing can be placed in one, on one, through one, be part of one, or be
held by one. If it were possible, for instance, to place | |
# Author: <NAME> (<EMAIL>)
# Center for Machine Perception, Czech Technical University in Prague
"""Implementation of the pose error functions described in:
Hodan, Michel et al., "BOP: Benchmark for 6D Object Pose Estimation", ECCV'18
Hodan et al., "On Evaluation of 6D Object Pose Estimation", ECCVW'16
"""
import math
import numpy as np
from scipy import spatial
import misc
import visibility
def vsd(R_est, t_est, R_gt, t_gt, depth_test, K, delta, taus,
normalized_by_diameter, diameter, renderer, obj_id, cost_type='step'):
"""Visible Surface Discrepancy -- by Hodan, Michel et al. (ECCV 2018).
:param R_est: 3x3 ndarray with the estimated rotation matrix.
:param t_est: 3x1 ndarray with the estimated translation vector.
:param R_gt: 3x3 ndarray with the ground-truth rotation matrix.
:param t_gt: 3x1 ndarray with the ground-truth translation vector.
:param depth_test: hxw ndarray with the test depth image.
:param K: 3x3 ndarray with an intrinsic camera matrix.
:param delta: Tolerance used for estimation of the visibility masks.
:param taus: A list of misalignment tolerance values.
:param normalized_by_diameter: Whether to normalize the pixel-wise distances
by the object diameter.
:param diameter: Object diameter.
:param renderer: Instance of the Renderer class (see renderer.py).
:param obj_id: Object identifier.
:param cost_type: Type of the pixel-wise matching cost:
'tlinear' - Used in the original definition of VSD in:
Hodan et al., On Evaluation of 6D Object Pose Estimation, ECCVW'16
'step' - Used for SIXD Challenge 2017 onwards.
:return: List of calculated errors (one for each misalignment tolerance).
"""
# Render depth images of the model in the estimated and the ground-truth pose.
fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2]
depth_est = renderer.render_object(
obj_id, R_est, t_est, fx, fy, cx, cy)['depth']
depth_gt = renderer.render_object(
obj_id, R_gt, t_gt, fx, fy, cx, cy)['depth']
# Convert depth images to distance images.
dist_test = misc.depth_im_to_dist_im_fast(depth_test, K)
dist_gt = misc.depth_im_to_dist_im_fast(depth_gt, K)
dist_est = misc.depth_im_to_dist_im_fast(depth_est, K)
# Visibility mask of the model in the ground-truth pose.
visib_gt = visibility.estimate_visib_mask_gt(
dist_test, dist_gt, delta, visib_mode='bop19')
# Visibility mask of the model in the estimated pose.
visib_est = visibility.estimate_visib_mask_est(
dist_test, dist_est, visib_gt, delta, visib_mode='bop19')
# Intersection and union of the visibility masks.
visib_inter = np.logical_and(visib_gt, visib_est)
visib_union = np.logical_or(visib_gt, visib_est)
visib_union_count = visib_union.sum()
visib_comp_count = visib_union_count - visib_inter.sum()
# Pixel-wise distances.
dists = np.abs(dist_gt[visib_inter] - dist_est[visib_inter])
# Normalization of pixel-wise distances by object diameter.
if normalized_by_diameter:
dists /= diameter
# Calculate VSD for each provided value of the misalignment tolerance.
if visib_union_count == 0:
errors = [1.0] * len(taus)
else:
errors = []
for tau in taus:
# Pixel-wise matching cost.
if cost_type == 'step':
costs = dists >= tau
elif cost_type == 'tlinear': # Truncated linear function.
costs = dists / tau
costs[costs > 1.0] = 1.0
else:
raise ValueError('Unknown pixel matching cost.')
e = (np.sum(costs) + visib_comp_count) / float(visib_union_count)
errors.append(e)
return errors
def mssd(R_est, t_est, R_gt, t_gt, pts, syms):
"""Maximum Symmetry-Aware Surface Distance (MSSD).
See: http://bop.felk.cvut.cz/challenges/bop-challenge-2019/
:param R_est: 3x3 ndarray with the estimated rotation matrix.
:param t_est: 3x1 ndarray with the estimated translation vector.
:param R_gt: 3x3 ndarray with the ground-truth rotation matrix.
:param t_gt: 3x1 ndarray with the ground-truth translation vector.
:param pts: nx3 ndarray with 3D model points.
:param syms: Set of symmetry transformations, each given by a dictionary with:
- 'R': 3x3 ndarray with the rotation matrix.
- 't': 3x1 ndarray with the translation vector.
:return: The calculated error.
"""
pts_est = misc.transform_pts_Rt(pts, R_est, t_est)
es = []
for sym in syms:
R_gt_sym = R_gt.dot(sym['R'])
t_gt_sym = R_gt.dot(sym['t']) + t_gt
pts_gt_sym = misc.transform_pts_Rt(pts, R_gt_sym, t_gt_sym)
es.append(np.linalg.norm(pts_est - pts_gt_sym, axis=1).max())
return min(es)
def mspd(R_est, t_est, R_gt, t_gt, K, pts, syms):
"""Maximum Symmetry-Aware Projection Distance (MSPD).
See: http://bop.felk.cvut.cz/challenges/bop-challenge-2019/
:param R_est: 3x3 ndarray with the estimated rotation matrix.
:param t_est: 3x1 ndarray with the estimated translation vector.
:param R_gt: 3x3 ndarray with the ground-truth rotation matrix.
:param t_gt: 3x1 ndarray with the ground-truth translation vector.
:param K: 3x3 ndarray with the intrinsic camera matrix.
:param pts: nx3 ndarray with 3D model points.
:param syms: Set of symmetry transformations, each given by a dictionary with:
- 'R': 3x3 ndarray with the rotation matrix.
- 't': 3x1 ndarray with the translation vector.
:return: The calculated error.
"""
proj_est = misc.project_pts(pts, K, R_est, t_est)
es = []
for sym in syms:
R_gt_sym = R_gt.dot(sym['R'])
t_gt_sym = R_gt.dot(sym['t']) + t_gt
proj_gt_sym = misc.project_pts(pts, K, R_gt_sym, t_gt_sym)
es.append(np.linalg.norm(proj_est - proj_gt_sym, axis=1).max())
return min(es)
def add(R_est, t_est, R_gt, t_gt, pts):
"""Average Distance of Model Points for objects with no indistinguishable
views - by Hinterstoisser et al. (ACCV'12).
:param R_est: 3x3 ndarray with the estimated rotation matrix.
:param t_est: 3x1 ndarray with the estimated translation vector.
:param R_gt: 3x3 ndarray with the ground-truth rotation matrix.
:param t_gt: 3x1 ndarray with the ground-truth translation vector.
:param pts: nx3 ndarray with 3D model points.
:return: The calculated error.
"""
pts_est = misc.transform_pts_Rt(pts, R_est, t_est)
pts_gt = misc.transform_pts_Rt(pts, R_gt, t_gt)
e = np.linalg.norm(pts_est - pts_gt, axis=1).mean()
return e
def adi(R_est, t_est, R_gt, t_gt, pts):
"""Average Distance of Model Points for objects with indistinguishable views
- by Hinterstoisser et al. (ACCV'12).
:param R_est: 3x3 ndarray with the estimated rotation matrix.
:param t_est: 3x1 ndarray with the estimated translation vector.
:param R_gt: 3x3 ndarray with the ground-truth rotation matrix.
:param t_gt: 3x1 ndarray with the ground-truth translation vector.
:param pts: nx3 ndarray with 3D model points.
:return: The calculated error.
"""
pts_est = misc.transform_pts_Rt(pts, R_est, t_est)
pts_gt = misc.transform_pts_Rt(pts, R_gt, t_gt)
# Calculate distances to the nearest neighbors from vertices in the
# ground-truth pose to vertices in the estimated pose.
nn_index = spatial.cKDTree(pts_est)
nn_dists, _ = nn_index.query(pts_gt, k=1)
e = nn_dists.mean()
return e
def re(R_est, R_gt):
"""Rotational Error.
:param R_est: 3x3 ndarray with the estimated rotation matrix.
:param R_gt: 3x3 ndarray with the ground-truth rotation matrix.
:return: The calculated error.
"""
assert (R_est.shape == R_gt.shape == (3, 3))
error_cos = float(0.5 * (np.trace(R_est.dot(np.linalg.inv(R_gt))) - 1.0))
# Avoid invalid values due to numerical errors.
error_cos = min(1.0, max(-1.0, error_cos))
error = math.acos(error_cos)
error = 180.0 * error / np.pi # Convert [rad] to [deg].
return error
def te(t_est, t_gt):
"""Translational Error.
:param t_est: 3x1 ndarray with the estimated translation vector.
:param t_gt: 3x1 ndarray with the ground-truth translation vector.
:return: The calculated error.
"""
assert (t_est.size == t_gt.size == 3)
error = np.linalg.norm(t_gt - t_est)
return error
def proj(R_est, t_est, R_gt, t_gt, K, pts):
"""Average distance of projections of object model vertices [px]
- by Brachmann et al. (CVPR'16).
:param R_est: 3x3 ndarray with the estimated rotation matrix.
:param t_est: 3x1 ndarray with the estimated translation vector.
:param R_gt: 3x3 ndarray with the ground-truth rotation matrix.
:param t_gt: 3x1 ndarray with the ground-truth translation vector.
:param K: 3x3 ndarray with an intrinsic camera matrix.
:param pts: nx3 ndarray with 3D model points.
:return: The calculated error.
"""
proj_est = misc.project_pts(pts, K, R_est, t_est)
proj_gt = misc.project_pts(pts, K, R_gt, t_gt)
e = np.linalg.norm(proj_est - proj_gt, axis=1).mean()
return e
def cou_mask(mask_est, mask_gt):
"""Complement over Union of 2D binary masks.
:param mask_est: hxw ndarray with the estimated mask.
:param mask_gt: hxw ndarray with the ground-truth mask.
:return: The calculated error.
"""
mask_est_bool = mask_est.astype(np.bool)
mask_gt_bool = mask_gt.astype(np.bool)
inter = np.logical_and(mask_gt_bool, mask_est_bool)
union = np.logical_or(mask_gt_bool, mask_est_bool)
union_count = float(union.sum())
if union_count > 0:
e = 1.0 - inter.sum() / union_count
else:
e = 1.0
return e
def cus(R_est, t_est, R_gt, t_gt, K, renderer, obj_id):
"""Complement over Union of projected 2D masks.
:param R_est: 3x3 ndarray with the estimated rotation matrix.
:param t_est: 3x1 ndarray with the estimated translation vector.
:param R_gt: 3x3 ndarray with the ground-truth rotation matrix.
:param t_gt: 3x1 ndarray with the ground-truth translation vector.
:param K: 3x3 ndarray with an intrinsic camera matrix.
:param renderer: Instance of the Renderer class (see renderer.py).
:param obj_id: Object identifier.
:return: The calculated error.
"""
# Render depth images of the model at the estimated and the ground-truth pose.
fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2]
depth_est = | |
.raw bytes crypto material only without code
.pad int number of pad chars given raw
.qb64 str in Base64 fully qualified with derivation code + crypto mat
.qb64b bytes in Base64 fully qualified with derivation code + crypto mat
.qb2 bytes in binary with derivation code + crypto material
.nontrans True when non-transferable derivation code False otherwise
Properties:
Methods:
Hidden:
._digest is digest method
._derive is derivation method
"""
def __init__(self, limen=None, sith=None, digs=None, keys=None, ked=None,
code=MtrDex.Blake3_256, **kwa):
"""
Assign digest verification function to ._verify
Inherited Parameters:
raw is bytes of unqualified crypto material usable for crypto operations
qb64b is bytes of fully qualified crypto material
qb64 is str or bytes of fully qualified crypto material
qb2 is bytes of fully qualified crypto material
code is str of derivation code
index is int of count of attached receipts for CryCntDex codes
Parameters:
limen is string extracted from sith expression in event
sith is int threshold or lowercase hex str no leading zeros
digs is list of qb64 digests of public keys
keys is list of keys each is qb64 public key str
ked is key event dict
Raises error if not any of raw, digs,keys, ked
if not raw
use digs
If digs not provided
use keys
if keys not provided
get keys from ked
compute digs from keys
If sith not provided
get sith from ked
but if not ked then compute sith as simple majority of keys
"""
try:
super(Nexter, self).__init__(code=code, **kwa)
except EmptyMaterialError as ex:
if not digs and not keys and not ked:
raise ex
if code == MtrDex.Blake3_256:
self._digest = self._blake3_256
else:
raise ValueError("Unsupported code = {} for nexter.".format(code))
raw = self._derive(code=code, limen=limen, sith=sith, digs=digs,
keys=keys, ked=ked) # derive nxt raw
super(Nexter, self).__init__(raw=raw, code=code, **kwa) # attaches code etc
else:
if self.code == MtrDex.Blake3_256:
self._digest = self._blake3_256
else:
raise ValueError("Unsupported code = {} for nexter.".format(code))
def verify(self, raw=b'', limen=None, sith=None, digs=None, keys=None, ked=None):
"""
Returns True if digest of bytes nxt raw matches .raw
Uses .raw as reference nxt raw for ._verify algorithm determined by .code
If raw not provided then extract raw from either (sith, keys) or ked
Parameters:
raw is bytes serialization
sith is str lowercase hex
keys is list of keys qb64
ked is key event dict
"""
if not raw:
raw = self._derive(code=self.code, limen=limen, sith=sith, digs=digs,
keys=keys, ked=ked)
return (raw == self.raw)
def _derive(self, code, limen=None, sith=None, digs=None, keys=None, ked=None):
"""
Returns ser where ser is serialization derived from code, sith, keys, or ked
"""
if not digs:
if not keys:
try:
keys = ked["k"]
except KeyError as ex:
raise DerivationError("Error extracting keys from"
" ked = {}".format(ex))
if not keys: # empty keys
raise DerivationError("Empty keys.")
keydigs = [self._digest(key.encode("utf-8")) for key in keys]
else:
digers = [Diger(qb64=dig) for dig in digs]
for diger in digers:
if diger.code != code:
raise DerivationError("Mismatch of public key digest "
"code = {} for next digest code = {}."
"".format(diger.code, code))
keydigs = [diger.raw for diger in digers]
if limen is None: # compute default limen
if sith is None: # need len keydigs to compute default sith
try:
sith = ked["kt"]
except Exception as ex:
# default simple majority
sith = "{:x}".format(max(1, ceil(len(keydigs) / 2)))
limen = Tholder(sith=sith).limen
kints = [int.from_bytes(keydig, 'big') for keydig in keydigs]
sint = int.from_bytes(self._digest(limen.encode("utf-8")), 'big')
for kint in kints:
sint ^= kint # xor together
return (sint.to_bytes(Matter._rawSize(code), 'big'))
@staticmethod
def _blake3_256(raw):
"""
Returns digest of raw using Blake3_256
Parameters:
raw is bytes serialization of nxt raw
"""
return(blake3.blake3(raw).digest())
class Prefixer(Matter):
"""
Prefixer is Matter subclass for autonomic identifier prefix using
derivation as determined by code from ked
Attributes:
Inherited Properties: (see Matter)
.pad is int number of pad chars given raw
.code is str derivation code to indicate cypher suite
.raw is bytes crypto material only without code
.index is int count of attached crypto material by context (receipts)
.qb64 is str in Base64 fully qualified with derivation code + crypto mat
.qb64b is bytes in Base64 fully qualified with derivation code + crypto mat
.qb2 is bytes in binary with derivation code + crypto material
.nontrans is Boolean, True when non-transferable derivation code False otherwise
Properties:
Methods:
verify(): Verifies derivation of aid prefix from a ked
Hidden:
._pad is method to compute .pad property
._code is str value for .code property
._raw is bytes value for .raw property
._index is int value for .index property
._infil is method to compute fully qualified Base64 from .raw and .code
._exfil is method to extract .code and .raw from fully qualified Base64
"""
Dummy = "#" # dummy spaceholder char for pre. Must not be a valid Base64 char
# element labels to exclude in digest or signature derivation from inception icp
IcpExcludes = ["i"]
# element labels to exclude in digest or signature derivation from delegated inception dip
DipExcludes = ["i"]
def __init__(self, raw=None, code=None, ked=None,
seed=None, secret=None, **kwa):
"""
assign ._derive to derive derivatin of aid prefix from ked
assign ._verify to verify derivation of aid prefix from ked
Default code is None to force EmptyMaterialError when only raw provided but
not code.
Inherited Parameters:
raw is bytes of unqualified crypto material usable for crypto operations
qb64b is bytes of fully qualified crypto material
qb64 is str or bytes of fully qualified crypto material
qb2 is bytes of fully qualified crypto material
code is str of derivation code
index is int of count of attached receipts for CryCntDex codes
Parameters:
seed is bytes seed when signature derivation
secret is qb64 when signature derivation when applicable
one of seed or secret must be provided when signature derivation
"""
try:
super(Prefixer, self).__init__(raw=raw, code=code, **kwa)
except EmptyMaterialError as ex:
if not ked or (not code and "i" not in ked):
raise ex
if not code: # get code from pre in ked
super(Prefixer, self).__init__(qb64=ked["i"], code=code, **kwa)
code = self.code
if code == MtrDex.Ed25519N:
self._derive = self._derive_ed25519N
elif code == MtrDex.Ed25519:
self._derive = self._derive_ed25519
elif code == MtrDex.Blake3_256:
self._derive = self._derive_blake3_256
elif code == MtrDex.Ed25519_Sig:
self._derive = self._derive_sig_ed25519
else:
raise ValueError("Unsupported code = {} for prefixer.".format(code))
# use ked and ._derive from code to derive aid prefix and code
raw, code = self._derive(ked=ked, seed=seed, secret=secret)
super(Prefixer, self).__init__(raw=raw, code=code, **kwa)
if self.code == MtrDex.Ed25519N:
self._verify = self._verify_ed25519N
elif self.code == MtrDex.Ed25519:
self._verify = self._verify_ed25519
elif self.code == MtrDex.Blake3_256:
self._verify = self._verify_blake3_256
elif code == MtrDex.Ed25519_Sig:
self._verify = self._verify_sig_ed25519
else:
raise ValueError("Unsupported code = {} for prefixer.".format(self.code))
def derive(self, ked, seed=None, secret=None):
"""
Returns tuple (raw, code) of aid prefix as derived from key event dict ked.
uses a derivation code specific _derive method
Parameters:
ked is inception key event dict
seed is only used for sig derivation it is the secret key/secret
"""
if ked["t"] not in (Ilks.icp, Ilks.dip):
raise ValueError("Nonincepting ilk={} for prefix derivation.".format(ked["t"]))
return (self._derive(ked=ked, seed=seed, secret=secret))
def verify(self, ked, prefixed=False):
"""
Returns True if derivation from ked for .code matches .qb64 and
If prefixed also verifies ked["i"] matches .qb64
False otherwise
Parameters:
ked is inception key event dict
"""
if ked["t"] not in (Ilks.icp, Ilks.dip):
raise ValueError("Nonincepting ilk={} for prefix derivation.".format(ked["t"]))
return (self._verify(ked=ked, pre=self.qb64, prefixed=prefixed))
def _derive_ed25519N(self, ked, seed=None, secret=None):
"""
Returns tuple (raw, code) of basic nontransferable Ed25519 prefix (qb64)
as derived from inception key event dict ked keys[0]
"""
ked = dict(ked) # make copy so don't clobber original ked
try:
keys = ked["k"]
if len(keys) != 1:
raise DerivationError("Basic derivation needs at most 1 key "
" got {} keys instead".format(len(keys)))
verfer = Verfer(qb64=keys[0])
except Exception as ex:
raise DerivationError("Error extracting public key ="
" = {}".format(ex))
if verfer.code not in [MtrDex.Ed25519N]:
raise DerivationError("Mismatch derivation code = {}."
"".format(verfer.code))
try:
if verfer.code == MtrDex.Ed25519N and ked["n"]:
raise DerivationError("Non-empty nxt = {} for non-transferable"
" code = {}".format(ked["n"],
verfer.code))
| |
<reponame>metagov/discord-research-bot<filename>app/database.py
from abc import ABC, abstractproperty
from tinydb.table import Document
from tinydb.queries import Query
from tinydb import TinyDB, where
from helpers import user_to_hash
from datetime import datetime
from typing import Generator, List, Optional
from enum import IntEnum
from constants import *
import logging
import discord
logger = logging.getLogger(__name__)
# Again, to ensure that I do not make a spelling mistake.
STATUSES_TABLE_NAME = 'statuses'
ALTERNATES_TABLE_NAME = 'alternates'
CHANNELS_TABLE_NAME = 'channels'
USERS_TABLE_NAME = 'users'
HOOKS_TABLE_NAME = 'hooks'
COMMENTS_TABLE_NAME = 'comments'
MESSAGES_TABLE_NAME = 'messages'
ADMINS_TABLE_NAME = 'admins'
BRIDGES_TABLE_NAME = 'bridges'
class LiveDocument(ABC):
def __init__(self, handle, **kwargs):
self.handle = handle
@abstractproperty
def base_query(self) -> Query:
raise NotImplementedError()
class MessageStatus(IntEnum):
CURATED = 0
REQUESTED = 1
APPROVED = 2
ANONYMOUS = 3
DENIED = 4
class AlternateType(IntEnum):
"""When we send a message in the pending channel for a given guild, or when
we send the message to a message's author that requests for their
permission, we need a way to tie all of these messages together. If we want
to, say, delete the pending message when the author fulfills the permission
request, then we need a way to go from the request message to the pending
one. Using these enums, we can do this."""
PENDING = 0
REQUEST = 1
APPROVED = 2
COMMENT = 3
class Message(LiveDocument):
def __init__(self, handle, message=None, channel_id=0, message_id=0):
super().__init__(handle)
self.channel_id = channel_id
self.message_id = message_id
if isinstance(message, discord.Message):
self.channel_id = message.channel.id
self.message_id = message.id
elif message is not None: # This class.
self.channel_id = message.channel_id
self.message_id = message.message_id
@property
def base_query(self) -> Query:
return (where('original_cid') == self.channel_id) & \
(where('original_mid') == self.message_id)
@property
def status(self) -> Optional[MessageStatus]:
result = self.handle.table(STATUSES_TABLE_NAME).get(self.base_query)
return None if result is None else MessageStatus(result['status'])
@status.setter
def status(self, new_status):
self.handle.table(STATUSES_TABLE_NAME).upsert({
'original_cid': self.channel_id,
'original_mid': self.message_id,
'status': int(new_status)
}, self.base_query)
# ...
def get_alternate(self, altype) -> Optional['Message']:
"""Gets the alternate message for an original one i.e., the pending
or approved messages.
:param altype: The type of alternate.
:type altype: AternateType
:return: Either the alternate or `None` if it is not set.
:rtype: Optional[Message]
"""
query = self.base_query & (where('altype') == int(altype))
result = self.handle.table(ALTERNATES_TABLE_NAME).get(query)
return None if result is None else \
Message(self.handle, channel_id=result['message_cid'],
message_id=result['message_mid'])
def set_alternate(self, message, altype):
"""Sets the alternate message for an original one i.e., the pending
or approved messages.
:param message: The message or alternate.
:type message: Union[discord.Message, Message]
:param altype: The type of alternate.
:type altype: AternateType
"""
channel_id = 0
message_id = 0
if isinstance(message, discord.Message):
channel_id = message.channel.id
message_id = message.id
else: # This class.
channel_id = message.channel_id
message_id = message.message_id
logger.debug('Setting %s for %s/%s to %s/%s', altype, self.channel_id,
self.message_id, channel_id, message_id)
query = self.base_query & (where('altype') == int(altype))
self.handle.table(ALTERNATES_TABLE_NAME).upsert({
'original_cid': self.channel_id,
'original_mid': self.message_id,
'altype': int(altype),
'message_cid': channel_id,
'message_mid': message_id
}, query)
# ...
@property
def pending_message(self):
return self.get_alternate(AlternateType.PENDING)
@pending_message.setter
def pending_message(self, new_pending):
self.set_alternate(new_pending, AlternateType.PENDING)
@property
def request_message(self):
return self.get_alternate(AlternateType.REQUEST)
@request_message.setter
def request_message(self, new_request):
self.set_alternate(new_request, AlternateType.REQUEST)
@property
def approved_message(self):
return self.get_alternate(AlternateType.APPROVED)
@approved_message.setter
def approved_message(self, new_approved):
self.set_alternate(new_approved, AlternateType.APPROVED)
@property
def original_message(self):
query = (where('message_cid') == self.channel_id) & \
(where('message_mid') == self.message_id)
result = self.handle.table(ALTERNATES_TABLE_NAME).get(query)
return Message(self.handle, channel_id=result['original_cid'],
message_id=result['original_mid'])
async def fetch(self, bot):
channel = await bot.fetch_channel(self.channel_id)
return await channel.fetch_message(self.message_id)
# ...
def add_comment_hook(self, comment_message):
channel_id = 0
message_id = 0
if isinstance(comment_message, discord.Message):
channel_id = comment_message.channel.id
message_id = comment_message.id
else: # This class.
channel_id = comment_message.channel_id
message_id = comment_message.message_id
logger.debug('Adding %s/%s as comment hook for %s/%s',
channel_id, message_id, self.channel_id, self.message_id)
query = self.base_query & \
(where('altype') == int(AlternateType.COMMENT)) & \
(where('message_cid') == channel_id) & \
(where('message_mid') == message_id)
self.handle.table(ALTERNATES_TABLE_NAME).upsert({
'original_cid': self.channel_id,
'original_mid': self.message_id,
'altype': int(AlternateType.COMMENT),
'message_cid': channel_id,
'message_mid': message_id
}, query)
@property
def is_comment_hook(self) -> bool:
"""Checks if this message is a registered comment hook of another
message. A comment hook is a message that, when replied to, adds a
comment onto the original message."""
query = (where('message_cid') == self.channel_id) & \
(where('message_mid') == self.message_id) & \
(where('altype') == int(AlternateType.COMMENT))
return self.handle.table(ALTERNATES_TABLE_NAME).get(query) is not None
# ...
def add_comment(self, user, content):
logger.debug('User %s commented on message %s/%s: %s', user.id,
self.channel_id, self.message_id, content)
self.handle.table(COMMENTS_TABLE_NAME).insert({
'original_cid': self.channel_id,
'original_mid': self.message_id,
'author': {
'id': user.id,
'name': user.name,
'discriminator': user.discriminator
},
'content': content
})
# ...
@property
def comments(self) -> Generator[dict, None, None]:
results = self.handle.table(COMMENTS_TABLE_NAME).search(self.base_query)
for document in results:
# Yields elements that look like:
# {
# 'author': {
# 'id': 0,
# 'name': '',
# 'discriminator': 0
# },
# 'content': ''
# }
yield {
'author': document.get('author'),
'content': document.get('content')
}
# ...
async def add_to_database(self, bot, anonymize=False):
logger.debug('Adding %s/%s to database',
self.channel_id, self.message_id)
# Fetch the actual message.
message = await self.fetch(bot)
doc = {
'original_cid': self.channel_id,
'original_mid': self.message_id,
# ...
'author_hash': user_to_hash(message.author),
'added_at': datetime.utcnow().isoformat(),
'content': message.content,
'created_at': message.created_at.isoformat(),
'edited_at': message.edited_at.isoformat() if message.edited_at else None,
# ...
'channel': {
'name': message.channel.name,
'id': message.channel.id
},
'guild': {
'name': message.guild.name,
'id': message.guild.id
}
}
# Add info about author.
if not anonymize:
doc['author'] = {
'name': message.author.name,
'discriminator': message.author.discriminator,
'id': message.author.id
}
self.handle.table(MESSAGES_TABLE_NAME).upsert(doc, self.base_query)
def add_metadata(self, metadata):
result = self.get_metadata()
result.update(metadata)
self.set_metadata(result)
def set_metadata(self, metadata):
logger.debug('Setting metadata of %s/%s to %s',
self.channel_id, self.message_id, metadata)
self.handle.table(MESSAGES_TABLE_NAME).upsert({
'original_cid': self.channel_id,
'original_mid': self.message_id,
'metadata': metadata
}, self.base_query)
def get_metadata(self) -> dict:
result = self.handle.table(MESSAGES_TABLE_NAME).get(self.base_query)
return {} if result is None else result.get('metadata', {})
class Channel(LiveDocument):
def __init__(self, handle, channel=None, id=0):
self.handle = handle
self.id = id
if channel is not None:
self.id = channel.id
@property
def base_query(self) -> Query:
return where('channel_id') == self.id
async def fetch(self, bot):
return await bot.fetch_channel(self.id)
@property
def group(self) -> Optional[str]:
document = self.handle.table(BRIDGES_TABLE_NAME).get(self.base_query)
return None if document is None else document.get('group', None)
@group.setter
def group(self, value):
logger.debug('Group for %s set to %s', self.id, value)
self.handle.table(BRIDGES_TABLE_NAME).upsert({
'channel_id': self.id,
'group': value
}, self.base_query)
@group.deleter
def group(self):
logger.debug('Group for %s removed', self.id)
query = where('channel_id') == self.id
self.handle.table(BRIDGES_TABLE_NAME).remove(query)
def get_channels_in_group(self, group) -> Generator['Channel', None, None]:
query = where('group') == group
results = self.handle.table(BRIDGES_TABLE_NAME).search(query)
for document in results:
yield Channel(self.handle, id=document['channel_id'])
class Guild(LiveDocument):
def __init__(self, handle, guild=None, id=0):
self.handle = handle
self.id = id
if guild is not None:
self.id = guild.id
class ChannelType(IntEnum):
PENDING = 0
APPROVED = 1
BRIDGE = 2
@property
def base_query(self):
return where('guild_id') == self.id
def set_channel(self, channel, type):
logger.debug('Channel (%s) for %s set to %s', type, self.id,
channel.id)
self.handle.table(CHANNELS_TABLE_NAME).upsert({
'guild_id': self.id,
'type': int(type),
'channel_id': channel.id
}, self.base_query & (where('type') == int(type)))
def get_channel(self, type):
query = self.base_query & (where('type') == int(type))
result = self.handle.table(CHANNELS_TABLE_NAME).get(query)
return None if result is None else \
Channel(self.handle, id=result['channel_id'])
# ...
@property
def pending_channel(self):
return self.get_channel(Guild.ChannelType.PENDING)
@pending_channel.setter
def pending_channel(self, new_channel):
self.set_channel(new_channel, Guild.ChannelType.PENDING)
@property
def approved_channel(self):
return self.get_channel(Guild.ChannelType.APPROVED)
@approved_channel.setter
def approved_channel(self, new_channel):
self.set_channel(new_channel, Guild.ChannelType.APPROVED)
@property
def bridge_channel(self):
return self.get_channel(Guild.ChannelType.BRIDGE)
@bridge_channel.setter
def bridge_channel(self, new_channel):
self.set_channel(new_channel, Guild.ChannelType.BRIDGE)
class User(LiveDocument):
def __init__(self, handle, user=None, id=0):
self.handle = handle
self.id = id
if user is not None:
self.id = user.id
@property
def base_query(self) -> Query:
# We will be accessing by `doc_id`, so we don't need this.
raise NotImplementedError()
@property
def have_met(self) -> bool:
result = self.handle.table(USERS_TABLE_NAME).get(doc_id=self.id)
return False if result is None else result.get('have_met', False)
@have_met.setter
def have_met(self, new_met):
logger.debug('Setting `have_met` for %s to %s', self.id, new_met)
self.handle.table(USERS_TABLE_NAME).upsert(Document({
'have_met': new_met
}, doc_id=self.id))
@property
def is_admin(self) -> bool:
result = self.handle.table(USERS_TABLE_NAME).get(doc_id=self.id)
return False if result is None else result.get('is_admin', False)
@is_admin.setter
def is_admin(self, new_status):
logger.debug('Setting `is_admin` for %s to %s', self.id, new_status)
self.handle.table(USERS_TABLE_NAME).upsert(Document({
'is_admin': new_status
}, doc_id=self.id))
class Database:
def __init__(self, filename):
self.handle = TinyDB(filename, indent=4)
logger.info('Opening %s as database', filename)
def message(self, *args, **kwargs) -> Message:
"""Gets the live document referring to a message from the database.
Examples:
`db.message(ctx.origin_message).status`
`await db.message(ctx.origin_message).original.fetch(bot)`
:return: A live document referring to a specific message.
:rtype: Message
"""
return Message(self.handle, *args, **kwargs)
def guild(self, *args, **kwargs) -> Guild:
"""Gets the live document referring to a guild from the database.
Examples:
`db.guild(ctx.guild).pending_channel`
`db.guild(ctx.guild).approved_channel
:return: A live document | |
<filename>notebooks/Users/pavan.r.r@koantek.com/GANS_MAFIA (1).py<gh_stars>0
# Databricks notebook source
# from google.colab import drive
# drive.mount('/content/drive')
# COMMAND ----------
# from google.colab.patches import cv2_imshow
# COMMAND ----------
!git clone https://github.com/dattasiddhartha/segmented-style-transfer
# COMMAND ----------
!git clone https://github.com/levindabhi/cloth-segmentation.git
# COMMAND ----------
# MAGIC %sh git clone https://github.com/levindabhi/cloth-segmentation.git --depth 1 --branch=master /dbfs/FileStore/TheData2/
# COMMAND ----------
# MAGIC %sh git clone https://github.com/dattasiddhartha/segmented-style-transfer --depth 1 --branch=master /dbfs/FileStore/TheData/
# COMMAND ----------
import sys
sys.path.append("/Workspace/Repos/roopesh.<EMAIL>/Revamp")
# COMMAND ----------
sys.path.append("/Workspace/Repos/ro<EMAIL>/Revamp3")
# COMMAND ----------
import sys
sys.path.append("/Workspace/Repos/roopesh.mangeshkar<EMAIL>k.com/cloth-segmentation")
# COMMAND ----------
sys.path.append("/Workspace/Repos/ro<EMAIL>/segmented-style-transfer")
# COMMAND ----------
import os
# from tqdm import tqdm
from tqdm.notebook import tqdm
from PIL import Image
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
from data.base_dataset import Normalize_image
from utils.saving_utils import load_checkpoint_mgpu
from networks import U2NET
#from PIL import Image
import matplotlib.pyplot as plt
import matplotlib, random
import torch, torchvision
import torchvision.transforms as T
import numpy as np
import numpy.ma as ma
import cv2
from vision.faststyletransfer_eval import FasterStyleTransfer
import collections
import ntpath
# COMMAND ----------
# MAGIC %scala
# MAGIC dbutils.fs.mount(
# MAGIC source = "wasbs://revamp-dataset-1@koanteklndblob.blob.core.windows.net/",
# MAGIC mountPoint = "/mnt/revamp-dataset-1",
# MAGIC extraConfigs=Map("fs.azure.account.key.koanteklndblob.blob.core.windows.net" -> "<KEY>))
# COMMAND ----------
dbutils.fs.ls()
# COMMAND ----------
# x=cv2.imread('/content/drive/MyDrive/sample_tendulkar.jpg')
# COMMAND ----------
device='cuda'
# COMMAND ----------
image_to_mask_mapping={}
# COMMAND ----------
image_dir = '/dbfs/mnt/revamp-dataset-1/Input_Folder'
mask_dir = '/dbfs/mnt/revamp-dataset-1/Mask_Folder'
output_dir='/dbfs/mnt/revamp-dataset-1/Output_Folder'
style_transfer_mask_dir='/dbfs/mnt/revamp-dataset-1/Styled_Folder'
checkpoint_path = '/dbfs/mnt/revamp-dataset-1/cloth_segm_u2net_latest.pth'
# COMMAND ----------
transforms_list = []
transforms_list += [transforms.ToTensor()]
transforms_list += [Normalize_image(0.5, 0.5)]
transform_rgb = transforms.Compose(transforms_list)
# COMMAND ----------
net = U2NET(in_ch=3, out_ch=4)
net = load_checkpoint_mgpu(net, checkpoint_path)
# net = net.to(device)
net = net.eval()
# COMMAND ----------
def get_palette(num_cls):
""" Returns the color map for visualizing the segmentation mask.
Args:
num_cls: Number of classes
Returns:
The color map
"""
n = num_cls
palette = [0] * (n * 3)
for j in range(0, n):
lab = j
palette[j * 3 + 0] = 0
palette[j * 3 + 1] = 0
palette[j * 3 + 2] = 0
i = 0
while lab:
palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
i += 1
lab >>= 3
return palette
# COMMAND ----------
palette = get_palette(4)
# COMMAND ----------
for i,j in image_to_mask_mapping.items():
plt.imshow(cv2.imread(i))
plt.show()
plt.imshow(cv2.imread(j))
plt.show()
# COMMAND ----------
# x=cv2.imread('/content/drive/MyDrive/lufi_sample.jpg')
# COMMAND ----------
# print(x)
# COMMAND ----------
""" img = Image.open('/content/drive/MyDrive/lufi_sample.jpg').convert('RGB')
img_size = img.size
img = img.resize((768, 768), Image.BICUBIC)
image_tensor = transform_rgb(img)
image_tensor = torch.unsqueeze(image_tensor, 0)
output_tensor = net(image_tensor.to(device))
output_tensor = F.log_softmax(output_tensor[0], dim=1)
output_tensor = torch.max(output_tensor, dim=1, keepdim=True)[1]
output_tensor = torch.squeeze(output_tensor, dim=0)
output_tensor = torch.squeeze(output_tensor, dim=0)
output_arr = output_tensor.cpu().numpy()
output_img = Image.fromarray(output_arr.astype('uint8'), mode='L')
output_img = output_img.resize(img_size, Image.BICUBIC)
output_img.putpalette(palette) """
# COMMAND ----------
images_list = sorted(os.listdir(image_dir))
pbar = tqdm(total=len(images_list))
i=0
for image_name in images_list:
i+=1
img = Image.open(os.path.join(image_dir, image_name)).convert('RGB')
img_size = img.size
img = img.resize((768, 768), Image.BICUBIC)
image_tensor = transform_rgb(img)
image_tensor = torch.unsqueeze(image_tensor, 0)
output_tensor = net(image_tensor.to(device))
output_tensor = F.log_softmax(output_tensor[0], dim=1)
output_tensor = torch.max(output_tensor, dim=1, keepdim=True)[1]
output_tensor = torch.squeeze(output_tensor, dim=0)
output_tensor = torch.squeeze(output_tensor, dim=0)
output_arr = output_tensor.cpu().numpy()
output_img = Image.fromarray(output_arr.astype('uint8'), mode='L')
output_img = output_img.resize(img_size, Image.BICUBIC)
output_img.putpalette(palette)
# d[os.path.join(image_dir, image_name)]=output_img
output_img.save(os.path.join(mask_dir, image_name[:-4]+'_generated.png'))
image_to_mask_mapping
pbar.update(1)
image_to_mask_mapping[os.path.join(image_dir, image_name)]=os.path.join(mask_dir, image_name[:-4]+'_generated.png')
if i==100:
break
pbar.close()
# COMMAND ----------
for i,j in image_to_mask_mapping.items():
cv2_imshow(cv2.imread(i))
cv2_imshow(cv2.imread(j))
# COMMAND ----------
model = torch.load('/content/drive/MyDrive/GANS/Models/cloth_segm_u2net_latest.pth')
# COMMAND ----------
import cv2
from matplotlib import pyplot as plt
# COMMAND ----------
# MAGIC %md
# MAGIC ### Overlap Image and Mask
# COMMAND ----------
for a,b in image_to_mask_mapping.items():
masks = cv2.imread(b)
img = cv2.imread(a)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# for i in range(len(masks)):
# rgb_mask = random_colour_masks(masks[i])
img = cv2.addWeighted(img, 0.4, masks, 1, 0)
#cv2.rectangle(img, boxes[i][0], boxes[i][1],color=(0, 255, 0), thickness=rect_th) # no bounding boxes required
# cv2.putText(img,pred_cls[i], boxes[i][0], cv2.FONT_HERSHEY_SIMPLEX, text_size, (0,255,0),thickness=text_th)
plt.figure(figsize=(20,30))
plt.imshow(img)
plt.xticks([])
plt.yticks([])
plt.show()
# return img
# COMMAND ----------
# MAGIC %md
# MAGIC ### Mapping masks to input
# COMMAND ----------
for i,j in image_to_mask_mapping.items():
# print(i,j)
original_image=cv2.imread(i)
img_original_rbg = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_mask=cv2.imread(j)
break
# COMMAND ----------
def mapping(img_path='./payload/IMG-20200401-WA0002.jpg'):
img1=cv2.imread(img_path)
mask_1=cv2.imread(image_to_mask_mapping[img_path])
return img1,mask_1
# COMMAND ----------
x,y=mapping('/content/drive/MyDrive/GANS/Input_Folder/modi.jpg')
# COMMAND ----------
# image_to_mask_mapping['/content/drive/MyDrive/new dataset/dataset/test/00a8764cff12b2e849c850f4be5608bc.jpg']
# COMMAND ----------
def mask_segments(img_path='./payload/IMG-20200401-WA0002.jpg'):
img_original,masks = mapping(img_path)
img_original_rbg = cv2.cvtColor(img_original, cv2.COLOR_BGR2RGB)
transform = T.Compose([T.ToTensor()])
img = transform(img_original)
img_rgb = transform(img_original_rbg)
# pred = model([img])
# print(pred[0])
# print("Finished image segmentation")
# masks = (pred[0]['masks']>0.5).squeeze().detach().cpu().numpy()
masks=cv2.cvtColor(masks, cv2.COLOR_BGR2GRAY)
# print("Returned segments: ", len(masks))
return img_original_rbg, img_rgb, masks
# COMMAND ----------
# cv2_imshow(cv2.cvtColor(original_mask, cv2.COLOR_BGR2GRAY))
# COMMAND ----------
# plt.imshow(original_image)
# plt.show()
# COMMAND ----------
def PartialStyleTransfer(segment = 0, img_path='./payload/IMG-20200401-WA0002.jpg', style_path="./fast_neural_style_transfer/models/mosaic_style__200_iter__vgg19_weights.pth"):
print("Started partial style transfer")
# mode can be 'styled' or 'color'
# return indices on number of segments
img_original_rbg, img_rgb, masks = mask_segments(img_path)
plt.imshow(img_original_rbg)
plt.show()
print(len(masks))
if len(masks) > 0:
mask = masks
print(mask.shape)
# print mask of image with the original image pixels
img_array = np.array(img_original_rbg[:,:,:])
img_array_floating = np.array(img_rgb[:,:,:])
# if False, set as 0 (black)
masked_img = []
for h in range(img_original_rbg.shape[0]):
sub_masked_img = []
for i in range(img_original_rbg.shape[1]):
tmp=[]
for j in range(img_original_rbg.shape[2]):
if mask[h][i] == False:
tmp.append(float(0))
else:
tmp.append(img_array_floating[j][h][i])
sub_masked_img.append(tmp)
masked_img.append(sub_masked_img)
masked_img_array = np.array(masked_img)
plt.imshow(masked_img_array[:,:,:]) # Export this mask image for style transfer
plt.show()
matplotlib.image.imsave(str(mask_dir + "/" + ntpath.basename(img_path)[:-4]+str("_MASK")+".png"), masked_img_array)
FasterStyleTransfer(style_path, str(mask_dir + "/" + ntpath.basename(img_path)[:-4]+str("_MASK")+".png"), str(style_transfer_mask_dir + "/" + ntpath.basename(img_path)[:-4]+str("_FST")+".png"))
style_img = Image.open(str(style_transfer_mask_dir + "/" + ntpath.basename(img_path)[:-4]+str("_FST")+".png"))
plt.imshow(style_img)
plt.show()
return style_img, img_array_floating, img_array
# COMMAND ----------
def PixelRemoved(img_path='./payload/IMG-20200401-WA0002.jpg'):
transform = T.Compose([T.ToTensor()])
img_original_rbg = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
img_rgb = transform(img_original_rbg)
img_array_floating = np.array(img_rgb[:,:,:])
style_img_original = Image.open(str(style_transfer_mask_dir + "/" + ntpath.basename(img_path)[:-4]+str("_FST")+".png"))
WIDTH, HEIGHT = cv2.cvtColor(cv2.imread(str(mask_dir + "/" + ntpath.basename(img_path)[:-4]+str("_MASK")+".png")), cv2.COLOR_BGR2RGB).shape[1], cv2.cvtColor(cv2.imread(str(mask_dir + "/" + ntpath.basename(img_path)[:-4]+str("_MASK")+".png")), cv2.COLOR_BGR2RGB).shape[0]
style_img_rbg = cv2.resize(cv2.cvtColor(cv2.imread(str(style_transfer_mask_dir + "/" + ntpath.basename(img_path)[:-4]+str("_FST")+".png")), cv2.COLOR_BGR2RGB), (WIDTH,HEIGHT), interpolation=cv2.INTER_CUBIC) # FST reshaped the dimension, this lines reshapes back to consistent dimensions
styled_img = transform(style_img_original)
styled_img_rgb = transform(style_img_rbg)
# remove most frequent pixel
pix_remove = list(dict(collections.Counter(np.hstack(np.hstack(styled_img_rgb))).most_common()).keys())[0]
# img_array = np.array(img_original_rbg[:,:,:])
styled_img_rgb_floating = np.array(styled_img_rgb[:,:,:])
masked_img = []
# When it is detected to be a background pixed, a background pixel from original image is inserted
for h in range(style_img_rbg.shape[0]):
sub_masked_img = []
for i in range(style_img_rbg.shape[1]):
tmp=[]
for j in range(style_img_rbg.shape[2]):
if (float(styled_img_rgb[j][h][i]) > float(pix_remove)-0.1) and (float(styled_img_rgb[j][h][i]) < float(pix_remove)+0.1):
tmp.append(img_array_floating[j][h][i])
else:
tmp.append(styled_img_rgb_floating[j][h][i])
sub_masked_img.append(tmp)
masked_img.append(sub_masked_img)
masked_img_array = np.array(masked_img)
plt.imshow(masked_img_array[:,:,:])
matplotlib.image.imsave(str(output_dir + "/" + ntpath.basename(img_path)[:-4]+str("_MASK_FST")+".png"), masked_img_array)
return masked_img_array
# COMMAND ----------
cd /content/segmented-style-transfer
# COMMAND ----------
class GansMafia:
device='cuda'
image_dir = '/content/drive/MyDrive/GANS/Input_Folder'
mask_dir = '/content/drive/MyDrive/GANS/Mask_Folder'
output_dir='/content/drive/MyDrive/GANS/Output_Folder'
style_transfer_mask_dir='/content/drive/MyDrive/GANS/Styled_Folder'
checkpoint_path = '/content/drive/MyDrive/GANS/Models/cloth_segm_u2net_latest.pth'
def __init__(self, inp_img):
self.inp_img = inp_img
self.img_name = ntpath.basename(self.inp_img)[:-4]
transforms_list = []
transforms_list += [transforms.ToTensor()]
transforms_list += [Normalize_image(0.5, 0.5)]
transform_rgb = transforms.Compose(transforms_list)
net = U2NET(in_ch=3, out_ch=4)
net = load_checkpoint_mgpu(net, checkpoint_path)
net = net.to(device)
net = net.eval()
def get_palette(self, num_cls):
""" Returns the color map for visualizing the segmentation mask.
Args:
num_cls: Number of classes
Returns:
The color map
"""
n = num_cls
palette = [0] * (n * 3)
for j in range(0, n):
lab = j
palette[j * 3 + 0] = 0
palette[j * 3 + 1] = 0
palette[j * 3 + 2] = 0
i = 0
while lab:
palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
i += 1
lab >>= 3
return palette
def mask_dress(self):
img = Image.open(self.inp_img).convert('RGB')
img_size = img.size
img = img.resize((768, 768), Image.BICUBIC)
image_tensor = transform_rgb(img)
image_tensor = torch.unsqueeze(image_tensor, 0)
output_tensor = net(image_tensor.to(device))
output_tensor = F.log_softmax(output_tensor[0], dim=1)
output_tensor = torch.max(output_tensor, dim=1, keepdim=True)[1]
output_tensor = torch.squeeze(output_tensor, dim=0)
output_tensor = torch.squeeze(output_tensor, dim=0)
output_arr = output_tensor.cpu().numpy()
output_img = Image.fromarray(output_arr.astype('uint8'), mode='L')
output_img = output_img.resize(img_size, Image.BICUBIC)
output_img.putpalette(palette)
# d[os.path.join(image_dir, image_name)]=output_img
output_img.save(os.path.join(mask_dir, image_name[:-4]+'_generated.png'))
image_to_mask_mapping
#pbar.update(1)
image_to_mask_mapping[os.path.join(image_dir, image_name)]=os.path.join(mask_dir, image_name[:-4]+'_generated.png')
def getInpImg(self):
return self.inp_img
# COMMAND ----------
GansMafia('/a/b/c').getInpImg()
# COMMAND ----------
style_img, img_array_floating, img_array = PartialStyleTransfer(segment = 13, img_path='/content/drive/MyDrive/GANS/Input_Folder/john.jpg', style_path="./vision/fast_neural_style_transfer/models/mosaic.pth")
masked_img_array = PixelRemoved(img_path='/content/drive/MyDrive/GANS/Input_Folder/john.jpg')
# COMMAND ----------
!pwd
# COMMAND ----------
image_to_mask_mapping.items()
# COMMAND ----------
# COMMAND ----------
# COMMAND ----------
def PixelRemoved(img_path='./payload/IMG-20200401-WA0002.jpg'):
transform = T.Compose([T.ToTensor()])
# img_original_rbg = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
img_original_rbg=cv2.imread(img_path)
img_rgb = transform(img_original_rbg)
img_array_floating = np.array(img_rgb[:,:,:])
style_img_original = Image.open(str(img_path[:-4]+str("_FST")+".png"))
WIDTH, HEIGHT = cv2.cvtColor(cv2.imread(str(img_path[:-4]+str("_MASK")+".png")), cv2.COLOR_BGR2RGB).shape[1], cv2.cvtColor(cv2.imread(str(img_path[:-4]+str("_MASK")+".png")), cv2.COLOR_BGR2RGB).shape[0]
style_img_rbg = cv2.resize(cv2.cvtColor(cv2.imread(str(img_path[:-4]+str("_FST")+".png")), cv2.COLOR_BGR2RGB), (WIDTH,HEIGHT), interpolation=cv2.INTER_CUBIC) # FST reshaped the dimension, this lines reshapes back to consistent dimensions
styled_img = transform(style_img_original)
styled_img_rgb = transform(style_img_rbg)
# remove most frequent pixel
pix_remove = list(dict(collections.Counter(np.hstack(np.hstack(styled_img_rgb))).most_common()).keys())[0]
# img_array = np.array(img_original_rbg[:,:,:])
styled_img_rgb_floating = np.array(styled_img_rgb[:,:,:])
masked_img = []
# When it is detected to be a background pixed, a background pixel from original image is inserted
for h in range(style_img_rbg.shape[0]):
sub_masked_img = []
for i in range(style_img_rbg.shape[1]):
tmp=[]
for j in range(style_img_rbg.shape[2]):
if (float(styled_img_rgb[j][h][i]) > float(pix_remove)-0.1) and (float(styled_img_rgb[j][h][i]) < float(pix_remove)+0.1):
tmp.append(img_array_floating[j][h][i])
else:
tmp.append(styled_img_rgb_floating[j][h][i])
sub_masked_img.append(tmp)
masked_img.append(sub_masked_img)
masked_img_array = np.array(masked_img)
print("this is | |
<reponame>guoqiao/charm_upgrade
#!/usr/bin/python3
import sys
import time
import json
import argparse
import subprocess
import logging
from os import getenv
from os.path import abspath, dirname, join
from collections import defaultdict
import requests
LOG = logging.getLogger(__name__)
LOG_FMT = '%(asctime)s %(levelname)s: %(message)s'
# charms upgrade order
# https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-upgrade-openstack.html#upgrade-order
# https://wiki.canonical.com/CDO/IS/Bootstack/Playbooks/OpenstackCharmUpgrades#Upgrade_Order
NA = '--'
ORDER_MAX = 999
# only care about these branches
BRANCHES = ['20.02', '20.05', '20.08', '20.10', '21.01', '21.04']
HERE = abspath(dirname(__file__))
# branch: github release branch short name, e.g.: 20.05
# commit: git hash, e.g.: 892016dff67830ac16f46e17aefecb3d231063ae
# revision: charm store rev 303 as str
FILE_BRANCH_COMMIT = join(HERE, 'branch_commit.json')
FILE_REVISION_COMMIT = join(HERE, 'revision_commit.json')
FILE_BRANCH_REVISION = join(HERE, 'branch_revision.json')
"""
The following order is the most recommended:
- keystone
- glance
- nova
- neutron
- cinder
- horizon
- heat
ref: https://superuser.openstack.org/articles/openstack-upgrading-tutorial-11-pitfalls-and-solutions/
"""
# will upgrade in this order
OPENSTACK_CHAMRS = [
"barbican-vault", "barbican",
"keystone-ldap", "keystone",
"rabbitmq-server",
"glance",
"nova-cloud-controller",
"designate-bind", "designate",
"octavia-dashboard", "octavia",
"neutron-openvswitch", "neutron-api", "neutron-gateway",
"cinder-backup", "cinder-ceph", "cinder",
"ceph-mon", "ceph-radosgw", "ceph-fs", "ceph-osd",
"swift-proxy", "swift-storage",
"openstack-dashboard",
"heat",
"nova-compute",
"aodh",
"ceilometer-agent", "ceilometer",
"gnocchi",
"hacluster",
"vault",
]
# lma charms will be ugpraded after openstack charms, in this order
LMA_CHARMS = [
"canonical-livepatch",
"thruk-agent", "nrpe", "nagios",
"grafana",
"prometheus-ceph-exporter",
"prometheus-libvirt-exporter",
"prometheus-openstack-exporter",
"prometheus",
"prometheus2",
"telegraf",
"kibana", "filebeat", "graylog", "elasticsearch",
"etcd",
"easyrsa",
"mysql", "percona-cluster",
"mongodb",
"ntp",
]
ORDERS = {charm: i for i, charm in enumerate(OPENSTACK_CHAMRS + LMA_CHARMS)}
def pretty_json(obj):
return json.dumps(obj, indent=4)
def print_json(obj):
print(pretty_json(obj))
def load_json(path):
LOG.debug('load json from %s', path)
with open(path) as f:
return json.load(f)
def save_json(path, data):
with open(path, mode='w') as f:
f.write(pretty_json(data))
LOG.info('save json to %s', path)
def get_cmd_output(cmd, is_json=False):
# cmd is a list
LOG.debug('run cmd: %s', ' '.join(cmd))
output = subprocess.check_output(cmd).decode('utf8')
if is_json:
output = json.loads(output)
LOG.debug('cmd output as json: %s', pretty_json(output))
else:
LOG.debug('cmd output: %s', output)
return output
def get_url_output(url, is_json=False, **kwargs):
resp = requests.get(url, **kwargs)
if is_json:
output = resp.json()
LOG.debug('url output as json: %s', pretty_json(output))
else:
output = resp.text
LOG.debug('url output: %s', output)
return output
def get_repo_branch_commit_map(repo, owner='openstack', branch_prefix='stable'):
"""GitHub repo branch to commit map.
Returns a dict like:
{
'20.05': 'd4be28550008426d5d8ac6af2ea93ce0da685390',
'20.02': '2ec5fe7a873b2b5836f15dfab4ad569c9b7ab0f7',
...
}
e.g.: for repo charm-keystone:
curl https://api.github.com/repos/openstack/charm-keystone/git/matching-refs/heads/stable
[
...
{
"ref": "refs/heads/stable/20.05",
"node_id": "MDM6UmVmNTI4NTg3Njc6cmVmcy9oZWFkcy9zdGFibGUvMjAuMDU=",
"url": "https://api.github.com/repos/openstack/charm-keystone/git/refs/heads/stable/20.05",
"object": {
"sha": "d4be28550008426d5d8ac6af2ea93ce0da685390",
"type": "commit",
"url": "https://api.github.com/repos/openstack/charm-keystone/git/commits/d4be28550008426d5d8ac6af2ea93ce0da685390"
}
}
]
ref: https://developer.github.com/v3/git/refs/#list-matching-references
"""
url = 'https://api.github.com/repos/{owner}/{repo}/git/matching-refs/heads/{branch_prefix}'.format(
owner=owner, repo=repo, branch_prefix=branch_prefix)
LOG.debug(url)
github_token = getenv('GITHUB_TOKEN')
github_user = getenv('GITHUB_USER')
github_pass = getenv('<PASSWORD>')
if github_token:
# https://developer.github.com/v3/#oauth2-token-sent-in-a-header
# curl -H "Authorization: token OAUTH-TOKEN" https://api.github.com
LOG.debug('OAuth2 token used for github api, 5000 requests/hour')
resp = requests.get(url, headers={'Authorization': 'token {}'.format(github_token)})
elif github_user and github_pass:
LOG.debug('basic auth used for github api, 5000 requests/hour')
resp = requests.get(url, auth=(github_user, github_pass))
else:
LOG.warning('no auth used for github api, 60 requests/hour, sleeping 3 secs...')
time.sleep(3) # slow down for github api limit
resp = requests.get(url)
items = resp.json()
# {'20.05': 'd4be28...', '20.02': 'd3812d...', ...}
return {branch['ref'].rsplit('/')[-1]: branch['object']['sha'] for branch in items}
def get_revision_commit(charm, rev=None):
"""Get commit for charm revision.
Args:
rev: revision number in str, since json only allow str keys.
If None, will return commit for latest revision.
Mutliple revs may have same commit.
Returns: tuple (rev, commit)
Get latest revision:
curl https://api.jujucharms.com/charmstore/v5/cinder/meta/any?include=extra-info'
Get specified revision:
curl https://api.jujucharms.com/charmstore/v5/cinder-303/meta/any?include=extra-info'
Output:
{
Id: "cs:cinder-303",
Meta: {
extra-info: {
vcs-revisions: [{
authors: [
{
name: "<NAME>",
email: "<EMAIL>"
}
],
date: "2020-05-21T09:54:19-07:00",
message: "Updates for stable branch...",
commit: "9b8a2305a00a22903e0cc210a57fc1e27333859e"
}]
}
}
}
But sometimes it returns this:
{
"Id": "cs:barbican-vault-15",
"Meta": {
"extra-info": {}
}
}
When this happens, we fallback to get repo info from this url:
https://api.jujucharms.com/charmstore/v5/barbican-vault-4/archive/repo-info
commit-sha-1: a52f533b54abce67fb3df642cda5695568fbfb90
commit-short: a52f533
branch: HEAD
remote: https://github.com/openstack/charm-barbican-vault
info-generated: Fri May 31 06:47:59 UTC 2019
note: This file should exist only in a built or released charm artifact (not in the charm source code tree).
"""
if rev: # rev is str
name = '{}-{}'.format(charm, rev)
else:
name = charm
url = 'https://api.jujucharms.com/charmstore/v5/{}/meta/any?include=extra-info'.format(name)
data = get_url_output(url, is_json=True)
rev = ''
commit = ''
Id = data.get('Id')
if Id and '-' in Id:
rev = Id.rsplit('-')[-1]
vcs_revisions = data.get('Meta', {}).get('extra-info', {}).get('vcs-revisions', {})
if vcs_revisions:
commit = vcs_revisions[0].get('commit')
if not commit:
# if Id exists, but no commit, fall back to use repo-info file content
url = 'https://api.jujucharms.com/charmstore/v5/{}-{}/archive/repo-info'.format(charm, rev)
text = get_url_output(url)
for line in text.strip().splitlines():
line = line.strip()
if ':' in line:
key, value = line.split(':', maxsplit=1)
if key == 'commit-sha-1':
commit = value.strip()
LOG.debug('%s %s %s', charm, rev, line)
break
return rev, commit
def update_charm_revisions(charm, revisions):
"""Update rev -> commit mapping for a charm.
Args:
revisions (dict): existing rev -> commit mapping
Returns:
revisions (dict): will be updated in place
changed (int): how many commits have changed
"""
changed = 0
rev, commit = get_revision_commit(charm) # get current/latest rev commit
if rev not in revisions:
revisions[rev] = commit
changed += 1
n_rev = int(rev)
missing_revs = 0
while n_rev > 0:
n_rev -= 1
rev = str(n_rev)
if rev in revisions:
LOG.info('%s %s exists, skip', charm, rev)
continue
rev, commit = get_revision_commit(charm, rev=rev)
if rev and commit:
missing_revs = 0
revisions[rev] = commit
changed += 1
LOG.info('%s %s: %s', charm, rev, commit)
time.sleep(1) # slow down to avoid api rate limit
else:
LOG.warning('no commit for charm %s rev', charm)
missing_revs += 1
if missing_revs >= 3:
LOG.warning('more than 3 revs missing for %s, break', charm)
break
return changed
def update_branch_commit():
data = {charm: get_repo_branch_commit_map('charm-' + charm) for charm in OPENSTACK_CHAMRS}
save_json(FILE_BRANCH_COMMIT, data)
def update_revision_commit():
# update charms revisions based on existing data
for charm in OPENSTACK_CHAMRS:
current_revisions = load_json(FILE_REVISION_COMMIT)
if charm not in current_revisions: # new added charm
current_revisions[charm] = {}
# will update `current_revisions` in place, return changed count
changed = update_charm_revisions(charm, current_revisions[charm])
if changed:
save_json(FILE_REVISION_COMMIT, current_revisions)
def update_branch_revision():
# convert rev -> commit mapping to commit -> max_rev
OPENSTACK_CHARMS_COMMIT_REVISION = {}
for charm, revisions in load_json(FILE_REVISION_COMMIT).items():
commits = {}
for rev, commit in revisions.items():
# it's possible N revs have same commit, we take the largest rev
if int(rev) > int(commits.get(commit, 0)):
commits[commit] = rev
OPENSTACK_CHARMS_COMMIT_REVISION[charm] = commits
# 20.05 -> 5dcbfd..
OPENSTACK_CHARMS_BRANCH_COMMIT = load_json(FILE_BRANCH_COMMIT)
# 20.05 -> 303
data = defaultdict(dict)
for charm, dict_branch_commit in OPENSTACK_CHARMS_BRANCH_COMMIT.items():
dict_branch_revision = {}
for branch, commit in dict_branch_commit.items():
rev = OPENSTACK_CHARMS_COMMIT_REVISION[charm].get(commit, '')
if rev:
dict_branch_revision[branch] = rev
data[charm] = dict_branch_revision
save_json(FILE_BRANCH_REVISION, data)
def yesno(boolean, yes, no):
"""ternary in python: boolean? yes:no"""
return (no, yes)[boolean]
def mark_revs(revs, current_rev=''):
"""add * before current rev"""
if current_rev:
return ['{}{}'.format(yesno(str(current_rev) == str(rev), '*', ''), rev) for rev in revs]
else:
return revs
def print_app(order, app, current, latest, revs, units):
revs = ['{:>7}'.format(rev) for rev in revs]
print('{:>2} {:<30} {:<40} {:>7} {} {:>5}'.format(order, app, current, latest, ''.join(revs), units))
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='charm upgrade helper'
)
parser.add_argument(
'-v', '--verbose', action='store_true',
help='Be verbose')
parser.add_argument(
'-b', '--update-branch-commit', dest='update_branch_commit', action='store_true',
help='Update charm branch commit mapping with github api, save to file')
parser.add_argument(
'-r', '--update-revision-commit', dest='update_revision_commit', action='store_true',
help='Update charm revision commit mapping with charmstore api, save to file')
parser.add_argument(
'-B', '--update-branch-revision', dest='update_branch_revision', action='store_true',
help='Update charm branch revision mapping based on existing data, save to file')
parser.add_argument(
'-a', '--update-all', dest='update_all', action='store_true',
help='Update all files')
parser.add_argument(
'-s', '--status-json-file',
dest='status_json_file',
help='Load juju status json data from this file')
cli = parser.parse_args()
logging.basicConfig(level=['INFO', 'DEBUG'][cli.verbose], format=LOG_FMT)
if cli.update_branch_commit:
update_branch_commit()
sys.exit()
elif cli.update_revision_commit:
update_revision_commit()
sys.exit()
elif cli.update_branch_revision:
update_branch_revision()
sys.exit()
elif cli.update_all:
update_branch_commit()
update_revision_commit()
update_branch_revision()
sys.exit()
if cli.status_json_file:
# if user specified a file, read from there, helpful for local debug
juju_status = load_json(cli.status_json_file)
else:
# if default file not exist, generate/save/cache it for reuse
juju_status = get_cmd_output(['juju', 'status', '--format', 'json'], is_json=True)
# merge all data into a ordered list
apps = []
for app_name, app_data in juju_status['applications'].items():
data = app_data.copy()
data['name'] = app_name
charm_name = app_data['charm-name']
charm_uri = app_data['charm']
data['charm-uri'] = charm_uri
data['order'] = ORDERS.get(charm_name, ORDER_MAX) # make it large to sort at last
data['charm-release'] = 'NA'
data['units'] = len(app_data.get('units', {})) or ''
apps.append(data)
branch_to_revision = load_json(FILE_BRANCH_REVISION)
print('[help: N: order, *: current, {}: NA]'.format(NA))
print_app('N', 'app', 'current', 'latest', BRANCHES, 'units') # title
for app in sorted(apps, key=lambda app: app['order']):
order = app['order']
if order == ORDER_MAX:
order = NA
charm_name = app['charm-name']
revs = [branch_to_revision.get(charm_name, {}).get(branch, '') for branch in BRANCHES]
can_upgrade_to = app.get('can-upgrade-to', '')
if can_upgrade_to and '-' in can_upgrade_to:
latest_rev = can_upgrade_to.rsplit('-')[-1]
else:
latest_rev = NA
print_app(
order,
app['name'],
app['charm-uri'][:40],
latest_rev,
mark_revs(revs, | |
from cardboard import types
from cardboard.ability import (
AbilityNotImplemented, spell, activated, triggered, static
)
from cardboard.cards import card, common, keywords, match
@card("Maze of Ith")
def maze_of_ith(card, abilities):
def maze_of_ith():
return AbilityNotImplemented
return maze_of_ith,
@card("Stone Calendar")
def stone_calendar(card, abilities):
def stone_calendar():
return AbilityNotImplemented
return stone_calendar,
@card("Goblin Wizard")
def goblin_wizard(card, abilities):
def goblin_wizard():
return AbilityNotImplemented
def goblin_wizard():
return AbilityNotImplemented
return goblin_wizard, goblin_wizard,
@card("Standing Stones")
def standing_stones(card, abilities):
def standing_stones():
return AbilityNotImplemented
return standing_stones,
@card("Living Armor")
def living_armor(card, abilities):
def living_armor():
return AbilityNotImplemented
return living_armor,
@card("Grave Robbers")
def grave_robbers(card, abilities):
def grave_robbers():
return AbilityNotImplemented
return grave_robbers,
@card("Psychic Allergy")
def psychic_allergy(card, abilities):
def psychic_allergy():
return AbilityNotImplemented
def psychic_allergy():
return AbilityNotImplemented
def psychic_allergy():
return AbilityNotImplemented
return psychic_allergy, psychic_allergy, psychic_allergy,
@card("Fissure")
def fissure(card, abilities):
def fissure():
return AbilityNotImplemented
return fissure,
@card("Dark Sphere")
def dark_sphere(card, abilities):
def dark_sphere():
return AbilityNotImplemented
return dark_sphere,
@card("Whippoorwill")
def whippoorwill(card, abilities):
def whippoorwill():
return AbilityNotImplemented
return whippoorwill,
@card("Mana Vortex")
def mana_vortex(card, abilities):
def mana_vortex():
return AbilityNotImplemented
def mana_vortex():
return AbilityNotImplemented
def mana_vortex():
return AbilityNotImplemented
return mana_vortex, mana_vortex, mana_vortex,
@card("Flood")
def flood(card, abilities):
def flood():
return AbilityNotImplemented
return flood,
@card("Fasting")
def fasting(card, abilities):
def fasting():
return AbilityNotImplemented
def fasting():
return AbilityNotImplemented
def fasting():
return AbilityNotImplemented
return fasting, fasting, fasting,
@card("Tormod's Crypt")
def tormods_crypt(card, abilities):
def tormods_crypt():
return AbilityNotImplemented
return tormods_crypt,
@card("Safe Haven")
def safe_haven(card, abilities):
def safe_haven():
return AbilityNotImplemented
def safe_haven():
return AbilityNotImplemented
return safe_haven, safe_haven,
@card("Niall Silvain")
def niall_silvain(card, abilities):
def niall_silvain():
return AbilityNotImplemented
return niall_silvain,
@card("Murk Dwellers")
def murk_dwellers(card, abilities):
def murk_dwellers():
return AbilityNotImplemented
return murk_dwellers,
@card("Morale")
def morale(card, abilities):
def morale():
return AbilityNotImplemented
return morale,
@card("Giant Shark")
def giant_shark(card, abilities):
def giant_shark():
return AbilityNotImplemented
def giant_shark():
return AbilityNotImplemented
def giant_shark():
return AbilityNotImplemented
return giant_shark, giant_shark, giant_shark,
@card("Ashes to Ashes")
def ashes_to_ashes(card, abilities):
def ashes_to_ashes():
return AbilityNotImplemented
return ashes_to_ashes,
@card("Inferno")
def inferno(card, abilities):
def inferno():
return AbilityNotImplemented
return inferno,
@card("Brainwash")
def brainwash(card, abilities):
def brainwash():
return AbilityNotImplemented
def brainwash():
return AbilityNotImplemented
return brainwash, brainwash,
@card("Runesword")
def runesword(card, abilities):
def runesword():
return AbilityNotImplemented
return runesword,
@card("Goblins of the Flarg")
def goblins_of_the_flarg(card, abilities):
def goblins_of_the_flarg():
return AbilityNotImplemented
def goblins_of_the_flarg():
return AbilityNotImplemented
return goblins_of_the_flarg, goblins_of_the_flarg,
@card("Wormwood Treefolk")
def wormwood_treefolk(card, abilities):
def wormwood_treefolk():
return AbilityNotImplemented
def wormwood_treefolk():
return AbilityNotImplemented
return wormwood_treefolk, wormwood_treefolk,
@card("Dust to Dust")
def dust_to_dust(card, abilities):
def dust_to_dust():
return AbilityNotImplemented
return dust_to_dust,
@card("Scarwood Bandits")
def scarwood_bandits(card, abilities):
def scarwood_bandits():
return AbilityNotImplemented
def scarwood_bandits():
return AbilityNotImplemented
return scarwood_bandits, scarwood_bandits,
@card("Hidden Path")
def hidden_path(card, abilities):
def hidden_path():
return AbilityNotImplemented
return hidden_path,
@card("Electric Eel")
def electric_eel(card, abilities):
def electric_eel():
return AbilityNotImplemented
def electric_eel():
return AbilityNotImplemented
return electric_eel, electric_eel,
@card("Nameless Race")
def nameless_race(card, abilities):
def nameless_race():
return AbilityNotImplemented
def nameless_race():
return AbilityNotImplemented
def nameless_race():
return AbilityNotImplemented
return nameless_race, nameless_race, nameless_race,
@card("Lurker")
def lurker(card, abilities):
def lurker():
return AbilityNotImplemented
return lurker,
@card("W<NAME>")
def witch_hunter(card, abilities):
def witch_hunter():
return AbilityNotImplemented
def witch_hunter():
return AbilityNotImplemented
return witch_hunter, witch_hunter,
@card("Worms of the Earth")
def worms_of_the_earth(card, abilities):
def worms_of_the_earth():
return AbilityNotImplemented
def worms_of_the_earth():
return AbilityNotImplemented
def worms_of_the_earth():
return AbilityNotImplemented
return worms_of_the_earth, worms_of_the_earth, worms_of_the_earth,
@card("Skull of Orm")
def skull_of_orm(card, abilities):
def skull_of_orm():
return AbilityNotImplemented
return skull_of_orm,
@card("Marsh Gas")
def marsh_gas(card, abilities):
def marsh_gas():
return AbilityNotImplemented
return marsh_gas,
@card("Merfolk Assassin")
def merfolk_assassin(card, abilities):
def merfolk_assassin():
return AbilityNotImplemented
return merfolk_assassin,
@card("Word of Binding")
def word_of_binding(card, abilities):
def word_of_binding():
return AbilityNotImplemented
return word_of_binding,
@card("Carnivorous Plant")
def carnivorous_plant(card, abilities):
def carnivorous_plant():
return AbilityNotImplemented
return carnivorous_plant,
@card("Uncle Istvan")
def uncle_istvan(card, abilities):
def uncle_istvan():
return AbilityNotImplemented
return uncle_istvan,
@card("Marsh Viper")
def marsh_viper(card, abilities):
def marsh_viper():
return AbilityNotImplemented
return marsh_viper,
@card("Venom")
def venom(card, abilities):
def venom():
return AbilityNotImplemented
def venom():
return AbilityNotImplemented
return venom, venom,
@card("Curse Artifact")
def curse_artifact(card, abilities):
def curse_artifact():
return AbilityNotImplemented
def curse_artifact():
return AbilityNotImplemented
return curse_artifact, curse_artifact,
@card("The Fallen")
def the_fallen(card, abilities):
def the_fallen():
return AbilityNotImplemented
return the_fallen,
@card("Fellwar Stone")
def fellwar_stone(card, abilities):
def fellwar_stone():
return AbilityNotImplemented
return fellwar_stone,
@card("Riptide")
def riptide(card, abilities):
def riptide():
return AbilityNotImplemented
return riptide,
@card("Bog Rats")
def bog_rats(card, abilities):
def bog_rats():
return AbilityNotImplemented
return bog_rats,
@card("Wand of Ith")
def wand_of_ith(card, abilities):
def wand_of_ith():
return AbilityNotImplemented
return wand_of_ith,
@card("Scarecrow")
def scarecrow(card, abilities):
def scarecrow():
return AbilityNotImplemented
return scarecrow,
@card("Miracle Worker")
def miracle_worker(card, abilities):
def miracle_worker():
return AbilityNotImplemented
return miracle_worker,
@card("Ball Lightning")
def ball_lightning(card, abilities):
def ball_lightning():
return AbilityNotImplemented
def ball_lightning():
return AbilityNotImplemented
def ball_lightning():
return AbilityNotImplemented
return ball_lightning, ball_lightning, ball_lightning,
@card("Elves of Deep Shadow")
def elves_of_deep_shadow(card, abilities):
def elves_of_deep_shadow():
return AbilityNotImplemented
return elves_of_deep_shadow,
@card("Bone Flute")
def bone_flute(card, abilities):
def bone_flute():
return AbilityNotImplemented
return bone_flute,
@card("Goblin Caves")
def goblin_caves(card, abilities):
def goblin_caves():
return AbilityNotImplemented
def goblin_caves():
return AbilityNotImplemented
return goblin_caves, goblin_caves,
@card("Inquisition")
def inquisition(card, abilities):
def inquisition():
return AbilityNotImplemented
return inquisition,
@card("Fire Drake")
def fire_drake(card, abilities):
def fire_drake():
return AbilityNotImplemented
def fire_drake():
return AbilityNotImplemented
return fire_drake, fire_drake,
@card("Water Wurm")
def water_wurm(card, abilities):
def water_wurm():
return AbilityNotImplemented
return water_wurm,
@card("Land Leeches")
def land_leeches(card, abilities):
def land_leeches():
return AbilityNotImplemented
return land_leeches,
@card("Savaen Elves")
def savaen_elves(card, abilities):
def savaen_elves():
return AbilityNotImplemented
return savaen_elves,
@card("Drowned")
def drowned(card, abilities):
def drowned():
return AbilityNotImplemented
return drowned,
@card("Cave People")
def cave_people(card, abilities):
def cave_people():
return AbilityNotImplemented
def cave_people():
return AbilityNotImplemented
return cave_people, cave_people,
@card("War Barge")
def war_barge(card, abilities):
def war_barge():
return AbilityNotImplemented
return war_barge,
@card("Marsh Goblins")
def marsh_goblins(card, abilities):
def marsh_goblins():
return AbilityNotImplemented
return marsh_goblins,
@card("Eater of the Dead")
def eater_of_the_dead(card, abilities):
def eater_of_the_dead():
return AbilityNotImplemented
return eater_of_the_dead,
@card("Dark Heart of the Wood")
def dark_heart_of_the_wood(card, abilities):
def dark_heart_of_the_wood():
return AbilityNotImplemented
return dark_heart_of_the_wood,
@card("Spitting Slug")
def spitting_slug(card, abilities):
def spitting_slug():
return AbilityNotImplemented
return spitting_slug,
@card("Orc General")
def orc_general(card, abilities):
def orc_general():
return AbilityNotImplemented
return orc_general,
@card("Frankenstein's Monster")
def frankensteins_monster(card, abilities):
def frankensteins_monster():
return AbilityNotImplemented
return frankensteins_monster,
@card("Mana Clash")
def mana_clash(card, abilities):
def mana_clash():
return AbilityNotImplemented
return mana_clash,
@card("Erosion")
def erosion(card, abilities):
def erosion():
return AbilityNotImplemented
def erosion():
return AbilityNotImplemented
return erosion, erosion,
@card("Dance of Many")
def dance_of_many(card, abilities):
def dance_of_many():
return AbilityNotImplemented
def dance_of_many():
return AbilityNotImplemented
def dance_of_many():
return AbilityNotImplemented
def dance_of_many():
return AbilityNotImplemented
return dance_of_many, dance_of_many, dance_of_many, dance_of_many,
@card("Tivadar's Crusade")
def tivadars_crusade(card, abilities):
def tivadars_crusade():
return AbilityNotImplemented
return tivadars_crusade,
@card("Brothers of Fire")
def brothers_of_fire(card, abilities):
def brothers_of_fire():
return AbilityNotImplemented
return brothers_of_fire,
@card("Bog Imp")
def bog_imp(card, abilities):
def bog_imp():
return AbilityNotImplemented
return bog_imp,
@card("Fountain of Youth")
def fountain_of_youth(card, abilities):
def fountain_of_youth():
return AbilityNotImplemented
return fountain_of_youth,
@card("Mind Bomb")
def mind_bomb(card, abilities):
def mind_bomb():
return AbilityNotImplemented
return mind_bomb,
@card("Reflecting Mirror")
def reflecting_mirror(card, abilities):
def reflecting_mirror():
return AbilityNotImplemented
return reflecting_mirror,
@card("Tracker")
def tracker(card, abilities):
def tracker():
return AbilityNotImplemented
return tracker,
@card("Angry Mob")
def angry_mob(card, abilities):
def angry_mob():
return AbilityNotImplemented
def angry_mob():
return AbilityNotImplemented
return angry_mob, angry_mob,
@card("Banshee")
def banshee(card, abilities):
def banshee():
return AbilityNotImplemented
return banshee,
@card("Preacher")
def preacher(card, abilities):
def preacher():
return AbilityNotImplemented
def preacher():
return AbilityNotImplemented
return preacher, preacher,
@card("Necropolis")
def necropolis(card, abilities):
def necropolis():
return AbilityNotImplemented
def necropolis():
return AbilityNotImplemented
return necropolis, necropolis,
@card("People of the Woods")
def people_of_the_woods(card, abilities):
def people_of_the_woods():
return AbilityNotImplemented
return people_of_the_woods,
@card("Martyr's Cry")
def martyrs_cry(card, abilities):
def martyrs_cry():
return AbilityNotImplemented
return martyrs_cry,
@card("Tower of Coireall")
def tower_of_coireall(card, abilities):
def tower_of_coireall():
return AbilityNotImplemented
return tower_of_coireall,
@card("Apprentice Wizard")
def apprentice_wizard(card, abilities):
def apprentice_wizard():
return AbilityNotImplemented
return apprentice_wizard,
@card("Diabolic Machine")
def diabolic_machine(card, abilities):
def diabolic_machine():
return AbilityNotImplemented
return diabolic_machine,
@card("Scavenger Folk")
def scavenger_folk(card, abilities):
def scavenger_folk():
return AbilityNotImplemented
return scavenger_folk,
@card("Sunken City")
def sunken_city(card, abilities):
def sunken_city():
return AbilityNotImplemented
def sunken_city():
return AbilityNotImplemented
return sunken_city, sunken_city,
@card("Exorcist")
def exorcist(card, abilities):
def exorcist():
return AbilityNotImplemented
return exorcist,
@card("Goblin Rock Sled")
def goblin_rock_sled(card, abilities):
def goblin_rock_sled():
return AbilityNotImplemented
def goblin_rock_sled():
return AbilityNotImplemented
def goblin_rock_sled():
return AbilityNotImplemented
return goblin_rock_sled, goblin_rock_sled, goblin_rock_sled,
@card("Barl's Cage")
def barls_cage(card, abilities):
def barls_cage():
return AbilityNotImplemented
return barls_cage,
@card("Pikemen")
def pikemen(card, abilities):
def pikemen():
return AbilityNotImplemented
return pikemen,
@card("Goblin Shrine")
def goblin_shrine(card, abilities):
def goblin_shrine():
return AbilityNotImplemented
def goblin_shrine():
return AbilityNotImplemented
def goblin_shrine():
return AbilityNotImplemented
return goblin_shrine, goblin_shrine, goblin_shrine,
@card("Goblin Digging Team")
def goblin_digging_team(card, abilities):
def goblin_digging_team():
return AbilityNotImplemented
return goblin_digging_team,
@card("Deep Water")
def deep_water(card, abilities):
def deep_water():
return AbilityNotImplemented
return deep_water,
@card("Festival")
def festival(card, abilities):
def festival():
return AbilityNotImplemented
def festival():
return AbilityNotImplemented
return festival, festival,
@card("Book of Rass")
def book_of_rass(card, abilities):
def book_of_rass():
return AbilityNotImplemented
return book_of_rass,
@card("Scarwood Hag")
def scarwood_hag(card, abilities):
def scarwood_hag():
return AbilityNotImplemented
def scarwood_hag():
return AbilityNotImplemented
return scarwood_hag, scarwood_hag,
@card("Ghost Ship")
def ghost_ship(card, abilities):
def ghost_ship():
return AbilityNotImplemented
def ghost_ship():
return AbilityNotImplemented
return ghost_ship, ghost_ship,
@card("Rag Man")
def rag_man(card, abilities):
def rag_man():
return AbilityNotImplemented
return rag_man,
@card("Gaea's Touch")
def gaeas_touch(card, abilities):
def gaeas_touch():
return AbilityNotImplemented
def gaeas_touch():
return AbilityNotImplemented
return gaeas_touch, gaeas_touch,
@card("Knights of Thorn")
def knights_of_thorn(card, abilities):
def knights_of_thorn():
return AbilityNotImplemented
return knights_of_thorn,
@card("Holy Light")
def holy_light(card, abilities):
def holy_light():
return AbilityNotImplemented
return holy_light,
@card("Cleansing")
def cleansing(card, abilities):
def cleansing():
return AbilityNotImplemented
return cleansing,
@card("Amnesia")
def amnesia(card, abilities):
def amnesia():
return AbilityNotImplemented
return amnesia,
@card("City of Shadows")
def city_of_shadows(card, abilities):
def city_of_shadows():
return AbilityNotImplemented
def city_of_shadows():
return AbilityNotImplemented
return city_of_shadows, city_of_shadows,
@card("Blood of the Martyr")
def blood_of_the_martyr(card, abilities):
def blood_of_the_martyr():
return AbilityNotImplemented
return blood_of_the_martyr,
@card("Eternal Flame")
def eternal_flame(card, abilities):
def eternal_flame():
return AbilityNotImplemented
return eternal_flame,
@card("Sorrow's Path")
def sorrows_path(card, abilities):
def sorrows_path():
return AbilityNotImplemented
def sorrows_path():
return AbilityNotImplemented
return sorrows_path, sorrows_path,
@card("Coal Golem")
def coal_golem(card, abilities):
def coal_golem():
return AbilityNotImplemented
return coal_golem,
@card("Leviathan")
def leviathan(card, abilities):
def leviathan():
return AbilityNotImplemented
def leviathan():
return AbilityNotImplemented
def leviathan():
return AbilityNotImplemented
def leviathan():
return AbilityNotImplemented
return leviathan, leviathan, leviathan, leviathan,
@card("Tangle Kelp")
def tangle_kelp(card, abilities):
def tangle_kelp():
return AbilityNotImplemented
def tangle_kelp():
return AbilityNotImplemented
def tangle_kelp():
return AbilityNotImplemented
return tangle_kelp, tangle_kelp, tangle_kelp,
@card("Blood | |
<gh_stars>0
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from ._testing import _make_credentials
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
return self.channel_stub.responses.pop()
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return MultiCallableStub(method, self)
class TestInstance(unittest.TestCase):
PROJECT = 'project'
INSTANCE_ID = 'instance-id'
INSTANCE_NAME = 'projects/' + PROJECT + '/instances/' + INSTANCE_ID
LOCATION_ID = 'locname'
LOCATION = 'projects/' + PROJECT + '/locations/' + LOCATION_ID
APP_PROFILE_PATH = (
'projects/' + PROJECT + '/instances/' + INSTANCE_ID
+ '/appProfiles/')
DISPLAY_NAME = 'display_name'
OP_ID = 8915
OP_NAME = ('operations/projects/%s/instances/%soperations/%d' %
(PROJECT, INSTANCE_ID, OP_ID))
TABLE_ID = 'table_id'
TABLE_NAME = INSTANCE_NAME + '/tables/' + TABLE_ID
@staticmethod
def _get_target_class():
from google.cloud.bigtable.instance import Instance
return Instance
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
@staticmethod
def _get_target_client_class():
from google.cloud.bigtable.client import Client
return Client
def _make_client(self, *args, **kwargs):
return self._get_target_client_class()(*args, **kwargs)
def test_constructor_defaults(self):
client = object()
instance = self._make_one(self.INSTANCE_ID, client)
self.assertEqual(instance.instance_id, self.INSTANCE_ID)
self.assertEqual(instance.display_name, self.INSTANCE_ID)
self.assertIs(instance._client, client)
def test_constructor_non_default(self):
display_name = 'display_name'
client = object()
instance = self._make_one(self.INSTANCE_ID, client,
display_name=display_name)
self.assertEqual(instance.instance_id, self.INSTANCE_ID)
self.assertEqual(instance.display_name, display_name)
self.assertIs(instance._client, client)
def test_table_factory(self):
from google.cloud.bigtable.table import Table
instance = self._make_one(self.INSTANCE_ID, None)
table = instance.table(self.TABLE_ID)
self.assertIsInstance(table, Table)
self.assertEqual(table.table_id, self.TABLE_ID)
self.assertEqual(table._instance, instance)
def test__update_from_pb_success(self):
from google.cloud.bigtable_admin_v2.proto import (
instance_pb2 as data_v2_pb2)
display_name = 'display_name'
instance_pb = data_v2_pb2.Instance(
display_name=display_name,
)
instance = self._make_one(None, None)
self.assertIsNone(instance.display_name)
instance._update_from_pb(instance_pb)
self.assertEqual(instance.display_name, display_name)
def test__update_from_pb_no_display_name(self):
from google.cloud.bigtable_admin_v2.proto import (
instance_pb2 as data_v2_pb2)
instance_pb = data_v2_pb2.Instance()
instance = self._make_one(None, None)
self.assertIsNone(instance.display_name)
with self.assertRaises(ValueError):
instance._update_from_pb(instance_pb)
def test_from_pb_success(self):
from google.cloud.bigtable_admin_v2.proto import (
instance_pb2 as data_v2_pb2)
client = _Client(project=self.PROJECT)
instance_pb = data_v2_pb2.Instance(
name=self.INSTANCE_NAME,
display_name=self.INSTANCE_ID,
)
klass = self._get_target_class()
instance = klass.from_pb(instance_pb, client)
self.assertIsInstance(instance, klass)
self.assertEqual(instance._client, client)
self.assertEqual(instance.instance_id, self.INSTANCE_ID)
def test_from_pb_bad_instance_name(self):
from google.cloud.bigtable_admin_v2.proto import (
instance_pb2 as data_v2_pb2)
instance_name = 'INCORRECT_FORMAT'
instance_pb = data_v2_pb2.Instance(name=instance_name)
klass = self._get_target_class()
with self.assertRaises(ValueError):
klass.from_pb(instance_pb, None)
def test_from_pb_project_mistmatch(self):
from google.cloud.bigtable_admin_v2.proto import (
instance_pb2 as data_v2_pb2)
ALT_PROJECT = 'ALT_PROJECT'
client = _Client(project=ALT_PROJECT)
self.assertNotEqual(self.PROJECT, ALT_PROJECT)
instance_pb = data_v2_pb2.Instance(name=self.INSTANCE_NAME)
klass = self._get_target_class()
with self.assertRaises(ValueError):
klass.from_pb(instance_pb, client)
def test_name_property(self):
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
api = bigtable_instance_admin_client.BigtableInstanceAdminClient(
mock.Mock())
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
# Patch the the API method.
client._instance_admin_client = api
instance = self._make_one(self.INSTANCE_ID, client)
self.assertEqual(instance.name, self.INSTANCE_NAME)
def test___eq__(self):
client = object()
instance1 = self._make_one(self.INSTANCE_ID, client)
instance2 = self._make_one(self.INSTANCE_ID, client)
self.assertEqual(instance1, instance2)
def test___eq__type_differ(self):
client = object()
instance1 = self._make_one(self.INSTANCE_ID, client)
instance2 = object()
self.assertNotEqual(instance1, instance2)
def test___ne__same_value(self):
client = object()
instance1 = self._make_one(self.INSTANCE_ID, client)
instance2 = self._make_one(self.INSTANCE_ID, client)
comparison_val = (instance1 != instance2)
self.assertFalse(comparison_val)
def test___ne__(self):
instance1 = self._make_one('instance_id1', 'client1')
instance2 = self._make_one('instance_id2', 'client2')
self.assertNotEqual(instance1, instance2)
def test_reload(self):
from google.cloud.bigtable_admin_v2.proto import (
instance_pb2 as data_v2_pb2)
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
api = bigtable_instance_admin_client.BigtableInstanceAdminClient(
mock.Mock())
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = self._make_one(self.INSTANCE_ID, client)
# Create response_pb
DISPLAY_NAME = u'hey-hi-hello'
response_pb = data_v2_pb2.Instance(
display_name=DISPLAY_NAME,
)
# Patch the stub used by the API method.
client._instance_admin_client = api
bigtable_instance_stub = (
client._instance_admin_client.bigtable_instance_admin_stub)
bigtable_instance_stub.GetInstance.side_effect = [response_pb]
# Create expected_result.
expected_result = None # reload() has no return value.
# Check Instance optional config values before.
self.assertEqual(instance.display_name, self.INSTANCE_ID)
# Perform the method and check the result.
result = instance.reload()
self.assertEqual(result, expected_result)
# Check Instance optional config values before.
self.assertEqual(instance.display_name, DISPLAY_NAME)
def test_create(self):
import datetime
from google.api_core import operation
from google.longrunning import operations_pb2
from google.protobuf.any_pb2 import Any
from google.cloud.bigtable_admin_v2.proto import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.bigtable_admin_v2 import enums
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES
NOW = datetime.datetime.utcnow()
NOW_PB = _datetime_to_pb_timestamp(NOW)
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = self._make_one(self.INSTANCE_ID, client,
display_name=self.DISPLAY_NAME)
# Create response_pb
metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB)
type_url = 'type.googleapis.com/%s' % (
messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name,)
response_pb = operations_pb2.Operation(
name=self.OP_NAME,
metadata=Any(
type_url=type_url,
value=metadata.SerializeToString(),
)
)
# Patch the stub used by the API method.
channel = ChannelStub(responses=[response_pb])
instance_api = (
bigtable_instance_admin_client.BigtableInstanceAdminClient(
channel=channel))
client._instance_admin_client = instance_api
# Perform the method and check the result.
result = instance.create(location_id=self.LOCATION_ID)
actual_request = channel.requests[0][1]
cluster_id = '{}-cluster'.format(self.INSTANCE_ID)
cluster = self._create_cluster(
instance_api, cluster_id, self.LOCATION_ID, DEFAULT_SERVE_NODES,
enums.StorageType.STORAGE_TYPE_UNSPECIFIED)
expected_request = self._create_instance_request(
self.DISPLAY_NAME,
{cluster_id: cluster}
)
self.assertEqual(expected_request, actual_request)
self.assertIsInstance(result, operation.Operation)
# self.assertEqual(result.operation.name, self.OP_NAME)
self.assertIsInstance(result.metadata,
messages_v2_pb2.CreateInstanceMetadata)
def test_create_w_explicit_serve_nodes(self):
from google.api_core import operation
from google.longrunning import operations_pb2
from google.cloud.bigtable_admin_v2 import enums
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
serve_nodes = 10
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = self._make_one(self.INSTANCE_ID, client,
display_name=self.DISPLAY_NAME)
# Create response_pb
response_pb = operations_pb2.Operation(name=self.OP_NAME)
# Patch the stub used by the API method.
channel = ChannelStub(responses=[response_pb])
instance_api = (
bigtable_instance_admin_client.BigtableInstanceAdminClient(
channel=channel))
client._instance_admin_client = instance_api
# Perform the method and check the result.
result = instance.create(
location_id=self.LOCATION_ID, serve_nodes=serve_nodes,
default_storage_type=enums.StorageType.SSD)
actual_request = channel.requests[0][1]
cluster_id = '{}-cluster'.format(self.INSTANCE_ID)
cluster = self._create_cluster(
instance_api, cluster_id, self.LOCATION_ID, serve_nodes,
enums.StorageType.SSD)
expected_request = self._create_instance_request(
self.DISPLAY_NAME,
{cluster_id: cluster}
)
self.assertEqual(expected_request, actual_request)
self.assertIsInstance(result, operation.Operation)
def _create_cluster(self, instance_api, cluster_id, location_id,
server_nodes, storage_type):
from google.cloud.bigtable_admin_v2.types import instance_pb2
cluster_name = instance_api.cluster_path(
self.PROJECT, self.INSTANCE_ID, cluster_id)
location = instance_api.location_path(
self.PROJECT, location_id)
return instance_pb2.Cluster(
name=cluster_name, location=location,
serve_nodes=server_nodes,
default_storage_type=storage_type)
def _create_instance_request(self, display_name, clusters):
from google.cloud.bigtable_admin_v2.proto import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
from google.cloud.bigtable_admin_v2.types import instance_pb2
instance = instance_pb2.Instance(display_name=display_name)
return messages_v2_pb2.CreateInstanceRequest(
parent='projects/%s' % (self.PROJECT),
instance_id=self.INSTANCE_ID,
instance=instance,
clusters=clusters
)
def test_update(self):
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
api = bigtable_instance_admin_client.BigtableInstanceAdminClient(
mock.Mock())
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = self._make_one(self.INSTANCE_ID, client,
display_name=self.DISPLAY_NAME)
# Mock api calls
client._instance_admin_client = api
# Create expected_result.
expected_result = None
# Perform the method and check the result.
result = instance.update()
self.assertEqual(result, expected_result)
def test_delete(self):
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
api = bigtable_instance_admin_client.BigtableInstanceAdminClient(
mock.Mock())
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = self._make_one(self.INSTANCE_ID, client)
# Mock api calls
client._instance_admin_client = api
# Create expected_result.
expected_result = None # delete() has no return value.
# Perform the method and check the result.
result = instance.delete()
self.assertEqual(result, expected_result)
def _list_tables_helper(self, table_name=None):
from google.cloud.bigtable_admin_v2.proto import (
table_pb2 as table_data_v2_pb2)
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2 as table_messages_v1_pb2)
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_table_admin_client, bigtable_instance_admin_client)
table_api = bigtable_table_admin_client.BigtableTableAdminClient(
mock.Mock())
instance_api = (
bigtable_instance_admin_client.BigtableInstanceAdminClient(
mock.Mock()))
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = self._make_one(self.INSTANCE_ID, client)
# Create response_pb
if table_name is None:
table_name = self.TABLE_NAME
response_pb = table_messages_v1_pb2.ListTablesResponse(
tables=[
table_data_v2_pb2.Table(name=table_name),
],
)
# Patch the stub used by the API method.
client._table_admin_client = table_api
client._instance_admin_client = instance_api
bigtable_table_stub = (
client._table_admin_client.bigtable_table_admin_stub)
bigtable_table_stub.ListTables.side_effect = [response_pb]
# Create expected_result.
expected_table = instance.table(self.TABLE_ID)
expected_result = [expected_table]
# Perform the method and check the result.
result = instance.list_tables()
self.assertEqual(result, expected_result)
def test_list_tables(self):
self._list_tables_helper()
def test_list_tables_failure_bad_split(self):
with self.assertRaises(ValueError):
self._list_tables_helper(table_name='wrong-format')
def test_list_tables_failure_name_bad_before(self):
BAD_TABLE_NAME = ('nonempty-section-before' +
'projects/' + self.PROJECT +
'/instances/' + self.INSTANCE_ID +
'/tables/' + self.TABLE_ID)
with self.assertRaises(ValueError):
self._list_tables_helper(table_name=BAD_TABLE_NAME)
def test_create_app_profile_with_wrong_routing_policy(self):
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = self._make_one(self.INSTANCE_ID, client)
app_profile_id = 'appProfileId1262094415'
update_mask = []
# Create AppProfile with exception
with self.assertRaises(ValueError):
instance.create_app_profile(app_profile_id=app_profile_id,
routing_policy_type=None)
with self.assertRaises(ValueError):
instance.update_app_profile(app_profile_id,
update_mask=update_mask,
routing_policy_type=None)
def test_create_app_profile_with_multi_routing_policy(self):
from google.cloud.bigtable_admin_v2.proto import instance_pb2
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = self._make_one(self.INSTANCE_ID, client)
description = 'description-1724546052'
app_profile_id = 'appProfileId1262094415'
expected_response = {
'name': self.APP_PROFILE_PATH + app_profile_id,
'description': description,
'multi_cluster_routing_use_any':
instance_pb2.AppProfile.MultiClusterRoutingUseAny()
}
expected_request = {
'app_profile_id': app_profile_id,
'routing_policy_type': 1,
'description': description
}
expected_response = instance_pb2.AppProfile(**expected_response)
channel = ChannelStub(responses=[expected_response])
instance_api = (
bigtable_instance_admin_client.BigtableInstanceAdminClient(
channel=channel))
# Patch the stub used by the API method.
client._instance_admin_client = instance_api
# Perform the method and check the result.
result = instance.create_app_profile(**expected_request)
parent = client._instance_admin_client.instance_path(
self.PROJECT, self.INSTANCE_ID)
expected_request = _CreateAppProfileRequestPB(
parent=parent, app_profile_id=app_profile_id,
app_profile=expected_response,
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
self.assertEqual(result, | |
are using the pure Python implementation ' +
'of fast fca.')
# Return output
mining_results = _fast_fca(
context,
min_c=min_occ,
min_z=min_spikes,
max_z=max_spikes,
max_c=max_occ,
winlen=winlen,
min_neu=min_neu,
report=report)
return mining_results, rel_matrix
def _build_context(binary_matrix, winlen, only_windows_with_first_spike=True):
"""
Building the context given a matrix (number of trains x number of bins) of
binned spike trains
Parameters
----------
binary_matrix : sparse.coo_matrix
Binary matrix containing the binned spike trains
winlen : int
Length of the binsize used to bin the data
only_windows_with_first_spike : bool
Whether to consider every window or only the one with a spike in the
first bin. It is possible to discard windows without a spike in the
first bin because the same configuration of spikes will be repeated
in a following window, just with different position for the first spike
Default: True
Returns
--------
context : list
List of tuples containing one object (window position idx) and one of
the correspondent spikes idx (bin idx * neuron idx)
transactions : list
List of all transactions, each element of the list contains the
attributes of the corresponding object.
rel_matrix : sparse.coo_matrix
A binary matrix with shape (number of windows, winlen*len(data)). Each
row corresponds to a window (order according to
their position in time).
Each column corresponds to one bin and one neuron and it is 0 if no
spikes or 1 if one or more spikes occurred in that bin for that
particular neuron.
E.g. the entry [0,0] of this matrix corresponds to the first bin of the
first window position for the first neuron, the entry [0,winlen] to the
first bin of the first window position for the second neuron.
"""
# Initialization of the outputs
context = []
transactions = []
num_neurons, num_bins = binary_matrix.shape
indices = np.argsort(binary_matrix.col)
binary_matrix.row = binary_matrix.row[indices]
binary_matrix.col = binary_matrix.col[indices]
if only_windows_with_first_spike:
# out of all window positions
# get all non-empty first bins
window_indices = np.unique(binary_matrix.col)
else:
window_indices = np.arange(num_bins - winlen + 1)
windows_row = []
windows_col = []
for window_idx in window_indices:
for col in range(window_idx, window_idx + winlen):
if col in binary_matrix.col:
nonzero_indices = np.nonzero(binary_matrix.col == col)[0]
windows_col.extend(
binary_matrix.row[nonzero_indices] * winlen
+ (col - window_idx))
windows_row.extend([window_idx] * len(nonzero_indices))
# Shape of the rel_matrix:
# (num of window positions,
# num of bins in one window * number of neurons)
rel_matrix = sparse.coo_matrix(
(np.ones((len(windows_col)), dtype=bool),
(windows_row, windows_col)),
shape=(num_bins, winlen * num_neurons),
dtype=bool).A
# Array containing all the possible attributes (each spike is indexed by
# a number equal to neu idx*winlen + bin_idx)
attributes = np.array(
[s * winlen + t for s in range(binary_matrix.shape[0])
for t in range(winlen)])
# Building context and rel_matrix
# Looping all the window positions w
for w in window_indices:
# spikes in the current window
times = rel_matrix[w]
current_transactions = attributes[times]
# adding to the context the window positions and the correspondent
# attributes (spike idx) (fast_fca input)
context += [(w, a) for a in current_transactions]
# appending to the transactions spike idx (fast_fca input) of the
# current window (fpgrowth input)
transactions.append(list(current_transactions))
# Return context and rel_matrix
return context, transactions, rel_matrix
def _fpgrowth(transactions, min_c=2, min_z=2, max_z=None,
max_c=None, rel_matrix=None, winlen=1, min_neu=1,
target='c', report='a'):
"""
Find frequent item sets with the fpgrowth algorithm.
Parameters
----------
transactions: tuple
Transactions database to mine.
The database must be an iterable of transactions;
each transaction must be an iterable of items;
each item must be a hashable object.
If the database is a dictionary, the transactions are
the keys, the values their (integer) multiplicities.
target: str
type of frequent item sets to find
s/a sets/all all frequent item sets
c closed closed frequent item sets
m maximal maximal frequent item sets
g gens generators
Default:'c'
min_c: int
minimum support of an item set
Default: 2
min_z: int
minimum number of items per item set
Default: 2
max_z: None/int
maximum number of items per item set. If max_c==None no maximal
size required
Default: None
max_c: None/int
maximum support per item set. If max_c==None no maximal
support required
Default: None
report: str
'a': all the mined patterns
'#': pattern spectrum using as signature the pair:
(number of spikes, number of occurrence)
'3d#': pattern spectrum using as signature the triplets:
(number of spikes, number of occurrence, difference between the
times of the last and the first spike of the pattern)
Default: 'a'
rel_matrix : None or sparse.coo_matrix
A binary matrix with shape (number of windows, winlen*len(data)). Each
row corresponds to a window (order according to
their position in time).
Each column corresponds to one bin and one neuron and it is 0 if no
spikes or 1 if one or more spikes occurred in that bin for that
particular neuron.
E.g. the entry [0,0] of this matrix corresponds to the first bin of the
first window position for the first neuron, the entry [0,winlen] to the
first bin of the first window position for the second neuron.
If == None only the closed frequent itemsets (intent) are returned and
not which the index of their occurrences (extent)
Default: None
The following parameters are specific to Massive parallel SpikeTrains
winlen: int (positive)
The size (number of bins) of the sliding window used for the
analysis. The maximal length of a pattern (delay between first and
last spike) is then given by winlen*binsize
Default: 1
min_neu: int (positive)
Minimum number of neurons in a sequence to considered a
potential pattern.
Default: 1
Returns
--------
If report == 'a':
All the pattern candidates (concepts) found in the data. Each
pattern is represented as a tuple containing
(spike IDs, discrete times (window position)
of the occurrences of the pattern). The spike IDs are defined as:
spike_id=neuron_id*bin_id; with neuron_id in [0, len(data)] and
bin_id in [0, winlen].
If report == '#':
The pattern spectrum is represented as a list of triplets each
formed by:
(pattern size, number of occurrences, number of patterns)
If report == '3d#':
The pattern spectrum is represented as a list of quadruplets each
formed by:
(pattern size, number of occurrences, difference between last
and first spike of the pattern, number of patterns)
"""
if min_neu < 1:
raise AttributeError('min_neu must be an integer >=1')
# By default, set the maximum pattern size to the number of spiketrains
if max_z is None:
max_z = np.max((np.max([len(tr) for tr in transactions]), min_z + 1))
# By default set maximum number of data to number of bins
if max_c is None:
max_c = len(transactions)
# Initializing outputs
concepts = []
if report == '#':
spec_matrix = np.zeros((max_z + 1, max_c + 1))
if report == '3d#':
spec_matrix = np.zeros((max_z + 1, max_c + 1, winlen))
spectrum = []
# Mining the data with fpgrowth algorithm
if np.unique(transactions, return_counts=True)[1][0] == len(
transactions):
fpgrowth_output = [(tuple(transactions[0]), len(transactions))]
else:
fpgrowth_output = fim.fpgrowth(
tracts=transactions,
target=target,
supp=-min_c,
zmin=min_z,
zmax=max_z,
report='a',
algo='s')
# Applying min/max conditions and computing extent (window positions)
fpgrowth_output = list(filter(
lambda c: _fpgrowth_filter(
c, winlen, max_c, min_neu), fpgrowth_output))
# filter out subsets of patterns that are found as a side-effect
# of using the moving window strategy
fpgrowth_output = _filter_for_moving_window_subsets(
fpgrowth_output, winlen)
for (intent, supp) in fpgrowth_output:
if report == 'a':
if rel_matrix is not None:
# Computing the extent of the concept (patterns
# occurrences), checking in rel_matrix in which windows
# the intent occurred
extent = tuple(np.where(
np.all(rel_matrix[:, intent], axis=1) == 1)[0])
concepts.append((intent, extent))
# Computing 2d spectrum
elif report == '#':
spec_matrix[len(intent) - 1, supp - 1] += 1
# Computing 3d spectrum
elif report == '3d#':
spec_matrix[len(intent) - 1, supp - 1, max(
np.array(intent) % winlen)] += 1
del fpgrowth_output
if report == 'a':
return concepts
if report == '#':
for (z, c) in np.transpose(np.where(spec_matrix != 0)):
spectrum.append((z + 1, c + 1, int(spec_matrix[z, c])))
elif report == '3d#':
for (z, c, l) in np.transpose(np.where(spec_matrix != 0)):
spectrum.append(
(z + 1, c | |
is None:
return False
return _release_cluster_lock(session, lock, action_id, scope)
@retry_on_deadlock
def cluster_lock_steal(cluster_id, action_id):
with session_for_write() as session:
lock = session.query(
models.ClusterLock).with_for_update().get(cluster_id)
if lock is not None:
lock.action_ids = [action_id]
lock.semaphore = -1
lock.save(session)
else:
lock = models.ClusterLock(cluster_id=cluster_id,
action_ids=[action_id],
semaphore=-1)
session.add(lock)
return lock.action_ids
@retry_on_deadlock
def node_lock_acquire(node_id, action_id):
with session_for_write() as session:
lock = session.query(
models.NodeLock).with_for_update().get(node_id)
if lock is None:
lock = models.NodeLock(node_id=node_id, action_id=action_id)
session.add(lock)
return lock.action_id
@retry_on_deadlock
def node_is_locked(node_id):
with session_for_read() as session:
query = session.query(models.NodeLock)
lock = query.get(node_id)
return lock is not None
@retry_on_deadlock
def node_lock_release(node_id, action_id):
with session_for_write() as session:
success = False
lock = session.query(
models.NodeLock).with_for_update().get(node_id)
if lock is not None and lock.action_id == action_id:
session.delete(lock)
success = True
return success
@retry_on_deadlock
def node_lock_steal(node_id, action_id):
with session_for_write() as session:
lock = session.query(
models.NodeLock).with_for_update().get(node_id)
if lock is not None:
lock.action_id = action_id
lock.save(session)
else:
lock = models.NodeLock(node_id=node_id, action_id=action_id)
session.add(lock)
return lock.action_id
# Policies
def policy_model_query():
with session_for_read() as session:
query = session.query(models.Policy).options(
joinedload(models.Policy.bindings)
)
return query
@retry_on_deadlock
def policy_create(context, values):
with session_for_write() as session:
policy = models.Policy()
policy.update(values)
session.add(policy)
return policy
def policy_get(context, policy_id, project_safe=True):
policy = policy_model_query()
policy = policy.filter_by(id=policy_id).first()
if policy is None:
return None
if project_safe:
if context.project_id != policy.project:
return None
return policy
def policy_get_by_name(context, name, project_safe=True):
return query_by_name(context, policy_model_query, name,
project_safe=project_safe)
def policy_get_by_short_id(context, short_id, project_safe=True):
return query_by_short_id(context, policy_model_query, models.Policy,
short_id, project_safe=project_safe)
def policy_get_all(context, limit=None, marker=None, sort=None, filters=None,
project_safe=True):
query = policy_model_query()
if project_safe:
query = query.filter_by(project=context.project_id)
if filters:
query = utils.exact_filter(query, models.Policy, filters)
keys, dirs = utils.get_sort_params(sort, consts.POLICY_CREATED_AT)
if marker:
marker = policy_model_query().get(marker)
return sa_utils.paginate_query(query, models.Policy, limit, keys,
marker=marker, sort_dirs=dirs).all()
@retry_on_deadlock
def policy_update(context, policy_id, values):
with session_for_write() as session:
policy = session.query(models.Policy).get(policy_id)
if not policy:
raise exception.ResourceNotFound(type='policy', id=policy_id)
policy.update(values)
policy.save(session)
return policy
@retry_on_deadlock
def policy_delete(context, policy_id):
with session_for_write() as session:
policy = session.query(models.Policy).get(policy_id)
if not policy:
return
bindings = session.query(models.ClusterPolicies).filter_by(
policy_id=policy_id)
if bindings.count():
raise exception.EResourceBusy(type='policy', id=policy_id)
session.delete(policy)
# Cluster-Policy Associations
def cluster_policy_model_query():
with session_for_read() as session:
query = session.query(models.ClusterPolicies)
return query
def cluster_policy_get(context, cluster_id, policy_id):
query = cluster_policy_model_query()
bindings = query.filter_by(cluster_id=cluster_id,
policy_id=policy_id)
return bindings.first()
def cluster_policy_get_all(context, cluster_id, filters=None, sort=None):
with session_for_read() as session:
query = session.query(models.ClusterPolicies)
query = query.filter_by(cluster_id=cluster_id)
if filters is not None:
key_enabled = consts.CP_ENABLED
if key_enabled in filters:
filter_enabled = {key_enabled: filters[key_enabled]}
query = utils.exact_filter(query, models.ClusterPolicies,
filter_enabled)
key_type = consts.CP_POLICY_TYPE
key_name = consts.CP_POLICY_NAME
if key_type in filters and key_name in filters:
query = query.join(models.Policy).filter(
models.Policy.type == filters[key_type] and
models.Policy.name == filters[key_name])
elif key_type in filters:
query = query.join(models.Policy).filter(
models.Policy.type == filters[key_type])
elif key_name in filters:
query = query.join(models.Policy).filter(
models.Policy.name == filters[key_name])
keys, dirs = utils.get_sort_params(sort)
return sa_utils.paginate_query(query, models.ClusterPolicies, None,
keys, sort_dirs=dirs).all()
def cluster_policy_ids_by_cluster(context, cluster_id):
"""an internal API for getting cluster IDs."""
with session_for_read() as session:
policies = session.query(models.ClusterPolicies.policy_id).filter_by(
cluster_id=cluster_id).all()
return [p[0] for p in policies]
def cluster_policy_get_by_type(context, cluster_id, policy_type, filters=None):
query = cluster_policy_model_query()
query = query.filter_by(cluster_id=cluster_id)
key_enabled = consts.CP_ENABLED
if filters and key_enabled in filters:
filter_enabled = {key_enabled: filters[key_enabled]}
query = utils.exact_filter(query, models.ClusterPolicies,
filter_enabled)
query = query.join(models.Policy).filter(models.Policy.type == policy_type)
return query.all()
def cluster_policy_get_by_name(context, cluster_id, policy_name, filters=None):
query = cluster_policy_model_query()
query = query.filter_by(cluster_id=cluster_id)
key_enabled = consts.CP_ENABLED
if filters and key_enabled in filters:
filter_enabled = {key_enabled: filters[key_enabled]}
query = utils.exact_filter(query, models.ClusterPolicies,
filter_enabled)
query = query.join(models.Policy).filter(models.Policy.name == policy_name)
return query.all()
@retry_on_deadlock
def cluster_policy_attach(context, cluster_id, policy_id, values):
with session_for_write() as session:
binding = models.ClusterPolicies()
binding.cluster_id = cluster_id
binding.policy_id = policy_id
binding.update(values)
session.add(binding)
# Load foreignkey cluster and policy
return cluster_policy_get(context, cluster_id, policy_id)
@retry_on_deadlock
def cluster_policy_detach(context, cluster_id, policy_id):
with session_for_write() as session:
query = session.query(models.ClusterPolicies)
bindings = query.filter_by(cluster_id=cluster_id,
policy_id=policy_id).first()
if bindings is None:
return
session.delete(bindings)
@retry_on_deadlock
def cluster_policy_update(context, cluster_id, policy_id, values):
with session_for_write() as session:
query = session.query(models.ClusterPolicies)
binding = query.filter_by(cluster_id=cluster_id,
policy_id=policy_id).first()
if binding is None:
return None
binding.update(values)
binding.save(session)
return binding
@retry_on_deadlock
def cluster_add_dependents(context, cluster_id, profile_id):
"""Add profile ID of container node to host cluster's 'dependents' property
:param cluster_id: ID of the cluster to be updated.
:param profile_id: Profile ID of the container node.
:raises ResourceNotFound: The specified cluster does not exist in database.
"""
with session_for_write() as session:
cluster = session.query(models.Cluster).get(cluster_id)
if cluster is None:
raise exception.ResourceNotFound(type='cluster', id=cluster_id)
profiles = cluster.dependents.get('profiles', [])
profiles.append(profile_id)
cluster.dependents.update({'profiles': profiles})
cluster.save(session)
@retry_on_deadlock
def cluster_remove_dependents(context, cluster_id, profile_id):
"""Remove profile ID from host cluster's 'dependents' property
:param cluster_id: ID of the cluster to be updated.
:param profile_id: Profile ID of the container node.
:raises ResourceNotFound: The specified cluster does not exist in database.
"""
with session_for_write() as session:
cluster = session.query(models.Cluster).get(cluster_id)
if cluster is None:
raise exception.ResourceNotFound(type='cluster', id=cluster_id)
profiles = cluster.dependents.get('profiles', [])
if profile_id in profiles:
profiles.remove(profile_id)
if len(profiles) == 0:
cluster.dependents.pop('profiles')
else:
cluster.dependents.update({'profiles': profiles})
cluster.save(session)
# Profiles
def profile_model_query():
with session_for_read() as session:
query = session.query(models.Profile)
return query
@retry_on_deadlock
def profile_create(context, values):
with session_for_write() as session:
profile = models.Profile()
profile.update(values)
session.add(profile)
return profile
def profile_get(context, profile_id, project_safe=True):
query = profile_model_query()
profile = query.get(profile_id)
if profile is None:
return None
if project_safe:
if context.project_id != profile.project:
return None
return profile
def profile_get_by_name(context, name, project_safe=True):
return query_by_name(context, profile_model_query, name,
project_safe=project_safe)
def profile_get_by_short_id(context, short_id, project_safe=True):
return query_by_short_id(context, profile_model_query, models.Profile,
short_id, project_safe=project_safe)
def profile_get_all(context, limit=None, marker=None, sort=None, filters=None,
project_safe=True):
query = profile_model_query()
if project_safe:
query = query.filter_by(project=context.project_id)
if filters:
query = utils.exact_filter(query, models.Profile, filters)
keys, dirs = utils.get_sort_params(sort, consts.PROFILE_CREATED_AT)
if marker:
marker = profile_model_query().get(marker)
return sa_utils.paginate_query(query, models.Profile, limit, keys,
marker=marker, sort_dirs=dirs).all()
@retry_on_deadlock
def profile_update(context, profile_id, values):
with session_for_write() as session:
profile = session.query(models.Profile).get(profile_id)
if not profile:
raise exception.ResourceNotFound(type='profile', id=profile_id)
profile.update(values)
profile.save(session)
return profile
@retry_on_deadlock
def profile_delete(context, profile_id):
with session_for_write() as session:
profile = session.query(models.Profile).get(profile_id)
if profile is None:
return
# used by any clusters?
clusters = session.query(models.Cluster).filter_by(
profile_id=profile_id)
if clusters.count() > 0:
raise exception.EResourceBusy(type='profile', id=profile_id)
# used by any nodes?
nodes = session.query(models.Node).filter_by(profile_id=profile_id)
if nodes.count() > 0:
raise exception.EResourceBusy(type='profile', id=profile_id)
session.delete(profile)
# Credentials
def credential_model_query():
with session_for_read() as session:
query = session.query(models.Credential)
return query
@retry_on_deadlock
def cred_create(context, values):
with session_for_write() as session:
cred = models.Credential()
cred.update(values)
session.add(cred)
return cred
def cred_get(context, user, project):
return credential_model_query().get((user, project))
@retry_on_deadlock
def cred_update(context, user, project, values):
with session_for_write() as session:
cred = session.query(models.Credential).get((user, project))
cred.update(values)
cred.save(session)
return cred
@retry_on_deadlock
def cred_delete(context, user, project):
with session_for_write() as session:
cred = session.query(models.Credential).get((user, project))
if cred is None:
return None
session.delete(cred)
@retry_on_deadlock
def cred_create_update(context, values):
try:
return cred_create(context, values)
except db_exc.DBDuplicateEntry:
user = values.pop('user')
project = values.pop('project')
return cred_update(context, user, project, values)
# Events
def event_model_query():
with session_for_read() as session:
query = session.query(models.Event).options(
joinedload(models.Event.cluster)
)
return query
@retry_on_deadlock
def event_create(context, values):
with session_for_write() as session:
event = models.Event()
event.update(values)
session.add(event)
return event
@retry_on_deadlock
def event_get(context, event_id, project_safe=True):
event = event_model_query().get(event_id)
if project_safe and event is not None:
if event.project != context.project_id:
return None
return event
def event_get_by_short_id(context, short_id, project_safe=True):
return query_by_short_id(context, event_model_query, models.Event,
short_id, project_safe=project_safe)
def _event_filter_paginate_query(context, query, filters=None,
limit=None, marker=None, sort=None):
if filters:
query = utils.exact_filter(query, models.Event, filters)
keys, dirs = utils.get_sort_params(sort, consts.EVENT_TIMESTAMP)
if marker:
marker = event_model_query().get(marker)
return sa_utils.paginate_query(query, models.Event, limit, keys,
marker=marker, sort_dirs=dirs).all()
def event_get_all(context, limit=None, marker=None, sort=None, filters=None,
project_safe=True):
query = event_model_query()
if project_safe:
query = query.filter_by(project=context.project_id)
return _event_filter_paginate_query(context, query, filters=filters,
limit=limit, marker=marker, sort=sort)
def event_count_by_cluster(context, cluster_id, project_safe=True):
query = event_model_query()
if project_safe:
query = query.filter_by(project=context.project_id)
count = query.filter_by(cluster_id=cluster_id).count()
return count
def event_get_all_by_cluster(context, cluster_id, limit=None, marker=None,
sort=None, filters=None, project_safe=True):
query = event_model_query()
query = query.filter_by(cluster_id=cluster_id)
if project_safe:
query = query.filter_by(project=context.project_id)
return _event_filter_paginate_query(context, query, filters=filters,
limit=limit, marker=marker, sort=sort)
@retry_on_deadlock
def event_prune(context, cluster_id, project_safe=True):
with session_for_write() as session:
query = session.query(models.Event).with_for_update()
query = query.filter_by(cluster_id=cluster_id)
if project_safe:
query = query.filter_by(project=context.project_id)
return query.delete(synchronize_session='fetch')
@retry_on_deadlock
def event_purge(project, granularity='days', age=30):
with session_for_write() as session:
query = session.query(models.Event).with_for_update()
if project is not None:
query = query.filter(models.Event.project.in_(project))
if granularity is not None and age is not None:
if granularity == 'days':
age = age * 86400
elif granularity == 'hours':
age = age * 3600
elif granularity == 'minutes':
age = age * 60
time_line = timeutils.utcnow() - datetime.timedelta(seconds=age)
query = query.filter(models.Event.timestamp < time_line)
return query.delete(synchronize_session='fetch')
# Actions
def action_model_query():
with session_for_read() as session:
query = session.query(models.Action).options(
joinedload(models.Action.dep_on),
joinedload(models.Action.dep_by)
)
return query
@retry_on_deadlock
def action_create(context, values):
with session_for_write() as session:
action = models.Action()
action.update(values)
session.add(action)
return action_get(context, action.id)
@retry_on_deadlock
def action_update(context, action_id, values):
with session_for_write() as session:
action = session.query(models.Action).get(action_id)
if not action:
raise exception.ResourceNotFound(type='action', id=action_id)
action.update(values)
action.save(session)
def action_get(context, action_id, project_safe=True, refresh=False):
action = action_model_query().get(action_id)
if action is None:
return None
if project_safe:
if action.project != context.project_id:
return None
return action
def action_list_active_scaling(context, cluster_id=None, project_safe=True):
query = action_model_query()
if project_safe:
query = query.filter_by(project=context.project_id)
if cluster_id:
query = query.filter_by(target=cluster_id)
query = query.filter(
models.Action.status.in_(
[consts.ACTION_READY,
consts.ACTION_WAITING,
consts.ACTION_RUNNING,
consts.ACTION_WAITING_LIFECYCLE_COMPLETION]))
query = query.filter(
| |
# Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
"""The is Aptitude package manager implementation"""
import json
import os
import re
from core.src.package_managers.PackageManager import PackageManager
from core.src.bootstrap.Constants import Constants
class AptitudePackageManager(PackageManager):
"""Implementation of Debian/Ubuntu based package management operations"""
# For more details, try `man apt-get` on any Debian/Ubuntu based box.
def __init__(self, env_layer, execution_config, composite_logger, telemetry_writer, status_handler):
super(AptitudePackageManager, self).__init__(env_layer, execution_config, composite_logger, telemetry_writer, status_handler)
# Repo refresh
self.repo_refresh = 'sudo apt-get -q update'
# Support to get updates and their dependencies
self.security_sources_list = '/tmp/az-update-security.list'
self.prep_security_sources_list_cmd = 'sudo grep security /etc/apt/sources.list > ' + self.security_sources_list
self.dist_upgrade_simulation_cmd_template = 'LANG=en_US.UTF8 sudo apt-get -s dist-upgrade <SOURCES> ' # Dist-upgrade simulation template - <SOURCES> needs to be replaced before use; sudo is used as sometimes the sources list needs sudo to be readable
self.single_package_check_versions = 'apt-cache madison <PACKAGE-NAME>'
self.single_package_find_installed_dpkg = 'sudo dpkg -s <PACKAGE-NAME>'
self.single_package_find_installed_apt = 'sudo apt list --installed <PACKAGE-NAME>'
self.single_package_upgrade_simulation_cmd = '''DEBIAN_FRONTEND=noninteractive apt-get -y --only-upgrade true -s install '''
self.single_package_dependency_resolution_template = 'DEBIAN_FRONTEND=noninteractive LANG=en_US.UTF8 apt-get -y --only-upgrade true -s install <PACKAGE-NAME> '
# Install update
# --only-upgrade: upgrade only single package (only if it is installed)
self.single_package_upgrade_cmd = '''sudo DEBIAN_FRONTEND=noninteractive apt-get -y --only-upgrade true install '''
# Package manager exit code(s)
self.apt_exitcode_ok = 0
# auto OS updates
self.update_package_list = 'APT::Periodic::Update-Package-Lists'
self.unattended_upgrade = 'APT::Periodic::Unattended-Upgrade'
self.os_patch_configuration_settings_file_path = '/etc/apt/apt.conf.d/20auto-upgrades'
self.update_package_list_value = ""
self.unattended_upgrade_value = ""
# Miscellaneous
os.environ['DEBIAN_FRONTEND'] = 'noninteractive' # Avoid a config prompt
self.set_package_manager_setting(Constants.PKG_MGR_SETTING_IDENTITY, Constants.APT)
self.STR_DPKG_WAS_INTERRUPTED = "E: dpkg was interrupted, you must manually run 'sudo dpkg --configure -a' to correct the problem."
self.ESM_MARKER = "The following packages could receive security updates with UA Infra: ESM service enabled:"
def refresh_repo(self):
self.composite_logger.log("\nRefreshing local repo...")
self.invoke_package_manager(self.repo_refresh)
# region Get Available Updates
def invoke_package_manager(self, command):
"""Get missing updates using the command input"""
self.composite_logger.log_debug('\nInvoking package manager using: ' + command)
code, out = self.env_layer.run_command_output(command, False, False)
if code != self.apt_exitcode_ok and self.STR_DPKG_WAS_INTERRUPTED in out:
self.composite_logger.log_error('[ERROR] YOU NEED TO TAKE ACTION TO PROCEED. The package manager on this machine is not in a healthy state, and '
'Patch Management cannot proceed successfully. Before the next Patch Operation, please run the following '
'command and perform any configuration steps necessary on the machine to return it to a healthy state: '
'sudo dpkg --configure -a')
self.telemetry_writer.write_execution_error(command, code, out)
error_msg = 'Package manager on machine is not healthy. To fix, please run: sudo dpkg --configure -a'
self.status_handler.add_error_to_status(error_msg, Constants.PatchOperationErrorCodes.PACKAGE_MANAGER_FAILURE)
raise Exception(error_msg, "[{0}]".format(Constants.ERROR_ADDED_TO_STATUS))
elif code != self.apt_exitcode_ok:
self.composite_logger.log('[ERROR] Package manager was invoked using: ' + command)
self.composite_logger.log_warning(" - Return code from package manager: " + str(code))
self.composite_logger.log_warning(" - Output from package manager: \n|\t" + "\n|\t".join(out.splitlines()))
self.telemetry_writer.write_execution_error(command, code, out)
error_msg = 'Unexpected return code (' + str(code) + ') from package manager on command: ' + command
self.status_handler.add_error_to_status(error_msg, Constants.PatchOperationErrorCodes.PACKAGE_MANAGER_FAILURE)
raise Exception(error_msg, "[{0}]".format(Constants.ERROR_ADDED_TO_STATUS))
# more known return codes should be added as appropriate
else: # verbose diagnostic log
self.composite_logger.log_debug("\n\n==[SUCCESS]===============================================================")
self.composite_logger.log_debug(" - Return code from package manager: " + str(code))
self.composite_logger.log_debug(" - Output from package manager: \n|\t" + "\n|\t".join(out.splitlines()))
self.composite_logger.log_debug("==========================================================================\n\n")
return out
def invoke_apt_cache(self, command):
"""Invoke apt-cache using the command input"""
self.composite_logger.log_debug('Invoking apt-cache using: ' + command)
code, out = self.env_layer.run_command_output(command, False, False)
if code != 0:
self.composite_logger.log('[ERROR] apt-cache was invoked using: ' + command)
self.composite_logger.log_warning(" - Return code from apt-cache: " + str(code))
self.composite_logger.log_warning(" - Output from apt-cache: \n|\t" + "\n|\t".join(out.splitlines()))
error_msg = 'Unexpected return code (' + str(code) + ') from apt-cache on command: ' + command
self.status_handler.add_error_to_status(error_msg, Constants.PatchOperationErrorCodes.PACKAGE_MANAGER_FAILURE)
raise Exception(error_msg, "[{0}]".format(Constants.ERROR_ADDED_TO_STATUS))
# more known return codes should be added as appropriate
else: # verbose diagnostic log
self.composite_logger.log_debug("\n\n==[SUCCESS]===============================================================")
self.composite_logger.log_debug(" - Return code from apt-cache: " + str(code))
self.composite_logger.log_debug(" - Output from apt-cache: \n|\t" + "\n|\t".join(out.splitlines()))
self.composite_logger.log_debug("==========================================================================\n\n")
return out
# region Classification-based (incl. All) update check
def get_all_updates(self, cached=False):
"""Get all missing updates"""
self.composite_logger.log_debug("\nDiscovering all packages...")
if cached and not len(self.all_updates_cached) == 0:
self.composite_logger.log_debug(" - Returning cached package data.")
return self.all_updates_cached, self.all_update_versions_cached # allows for high performance reuse in areas of the code explicitly aware of the cache
cmd = self.dist_upgrade_simulation_cmd_template.replace('<SOURCES>', '')
out = self.invoke_package_manager(cmd)
self.all_updates_cached, self.all_update_versions_cached = self.extract_packages_and_versions(out)
self.composite_logger.log_debug("Discovered " + str(len(self.all_updates_cached)) + " package entries.")
return self.all_updates_cached, self.all_update_versions_cached
def get_security_updates(self):
"""Get missing security updates"""
self.composite_logger.log("\nDiscovering 'security' packages...")
code, out = self.env_layer.run_command_output(self.prep_security_sources_list_cmd, False, False)
if code != 0:
self.composite_logger.log_warning(" - SLP:: Return code: " + str(code) + ", Output: \n|\t" + "\n|\t".join(out.splitlines()))
cmd = self.dist_upgrade_simulation_cmd_template.replace('<SOURCES>', '-oDir::Etc::Sourcelist=' + self.security_sources_list)
out = self.invoke_package_manager(cmd)
security_packages, security_package_versions = self.extract_packages_and_versions(out)
self.composite_logger.log("Discovered " + str(len(security_packages)) + " 'security' package entries.")
return security_packages, security_package_versions
def get_other_updates(self):
"""Get missing other updates"""
self.composite_logger.log("\nDiscovering 'other' packages...")
other_packages = []
other_package_versions = []
all_packages, all_package_versions = self.get_all_updates(True)
security_packages, security_package_versions = self.get_security_updates()
for index, package in enumerate(all_packages):
if package not in security_packages:
other_packages.append(package)
other_package_versions.append(all_package_versions[index])
self.composite_logger.log("Discovered " + str(len(other_packages)) + " 'other' package entries.")
return other_packages, other_package_versions
# endregion
# region Output Parser(s)
def extract_packages_and_versions(self, output):
# sample output format
# Inst coreutils [8.25-2ubuntu2] (8.25-2ubuntu3~16.10 Ubuntu:16.10/yakkety-updates [amd64])
# Inst python3-update-manager [1:16.10.7] (1:16.10.8 Ubuntu:16.10/yakkety-updates [all]) [update-manager-core:amd64 ]
# Inst update-manager-core [1:16.10.7] (1:16.10.8 Ubuntu:16.10/yakkety-updates [all])
self.composite_logger.log_debug("\nExtracting package and version data...")
packages = []
versions = []
search_text = r'Inst[ ](.*?)[ ].*?[(](.*?)[ ](.*?)[ ]\[(.*?)\]'
search = re.compile(search_text, re.M | re.S)
package_list = search.findall(str(output))
for package in package_list:
packages.append(package[0])
versions.append(package[1])
self.composite_logger.log_debug(" - Extracted package and version data for " + str(len(packages)) + " packages [BASIC].")
# Discovering ESM packages - Distro versions with extended security maintenance
lines = output.strip().split('\n')
esm_marker_found = False
esm_packages = []
for line_index in range(0, len(lines)-1):
line = lines[line_index].strip()
if not esm_marker_found:
if self.ESM_MARKER in line:
esm_marker_found = True
continue
esm_packages = line.split()
break
for package in esm_packages:
packages.append(package)
versions.append(Constants.UA_ESM_REQUIRED)
self.composite_logger.log_debug(" - Extracted package and version data for " + str(len(packages)) + " packages [TOTAL].")
return packages, versions
# endregion
# endregion
# region Install Update
def get_composite_package_identifier(self, package, package_version):
return package + '=' + package_version
def install_updates_fail_safe(self, excluded_packages):
return
# endregion
# region Package Information
def get_all_available_versions_of_package(self, package_name):
""" Returns a list of all the available versions of a package """
# Sample output format
# bash | 4.3-14ubuntu1.3 | http://us.archive.ubuntu.com/ubuntu xenial-updates/main amd64 Packages
# bash | 4.3-14ubuntu1.2 | http://security.ubuntu.com/ubuntu xenial-security/main amd64 Packages
# bash | 4.3-14ubuntu1 | http://us.archive.ubuntu.com/ubuntu xenial/main amd64 Packages
package_versions = []
cmd = self.single_package_check_versions.replace('<PACKAGE-NAME>', package_name)
output = self.invoke_apt_cache(cmd)
lines = output.strip().split('\n')
for line in lines:
package_details = line.split(' |')
if len(package_details) == 3:
self.composite_logger.log_debug(" - Applicable line: " + str(line))
package_versions.append(package_details[1].strip())
else:
self.composite_logger.log_debug(" - Inapplicable line: " + str(line))
return package_versions
def is_package_version_installed(self, package_name, package_version):
""" Returns true if the specific package version is installed """
self.composite_logger.log_debug("\nCHECKING PACKAGE INSTALL STATUS FOR: " + str(package_name) + " (" + str(package_version) + ")")
# DEFAULT METHOD
self.composite_logger.log_debug(" - [1/2] Verifying install status with Dpkg.")
cmd = self.single_package_find_installed_dpkg.replace('<PACKAGE-NAME>', package_name)
code, output = self.env_layer.run_command_output(cmd, False, False)
lines = output.strip().split('\n')
if code == 1: # usually not found
# Sample output format ------------------------------------------
# dpkg-query: package 'mysql-client' is not installed and no information is available
# Use dpkg --info (= dpkg-deb --info) to examine archive files,
# and dpkg --contents (= dpkg-deb --contents) to list their contents.
# ------------------------------------------ -------------------
self.composite_logger.log_debug(" - Return code: 1. The package is likely NOT present on the system.")
for line in lines:
if 'not installed' in line and package_name in line:
self.composite_logger.log_debug(" - Discovered to be not installed: " + str(line))
return False
else:
self.composite_logger.log_debug(" - Inapplicable line: " + str(line))
self.telemetry_writer.write_event("[Installed check] Return code: 1. Unable to verify package not present on the system: " + str(output), Constants.TelemetryEventLevel.Verbose)
elif code == | |
matrix mapping source dipole strengths to target normal-grads
See lap3ddipole_native for math definitions.
"""
y = np.atleast_2d(y) # handle ns=1 case: make 1x3 not 3-vecs
d = np.atleast_2d(d)
x = np.atleast_2d(x)
e = np.atleast_2d(e)
ns = y.shape[0]
nt = x.shape[0]
assert(A.shape==(nt,ns))
assert(An.shape==(nt,ns))
prefac = 1.0/(4.0*np.pi)
for i in numba.prange(nt): # outer loop over targs
for j in range(ns):
R0 = x[i,0]-y[j,0]
R1 = x[i,1]-y[j,1]
R2 = x[i,2]-y[j,2]
r2 = R0**2+R1**2+R2**2
r = np.sqrt(r2)
pir3 = prefac/(r*r2) # includes prefactor
ddotR = R0*d[j,0]+R1*d[j,1]+R2*d[j,2]
ddote = d[j,0]*e[i,0]+d[j,1]*e[i,1]+d[j,2]*e[i,2]
edotR = R0*e[i,0]+R1*e[i,1]+R2*e[i,2]
A[i,j] = ddotR * pir3
An[i,j] = (ddote - 3*ddotR*edotR/r2) * pir3
def lap3ddipolemat_ne(y,d,x,e,A,An):
"""Fill dense matrix for pot & direc-grad of 3D Laplace dipoles, non-self.
nunexpr.
Inputs:
y - ns*3 source locs
d - ns*3 src dipole directions (ought to be unit)
x - nt*3 target locs
e - nt*3 target normals (ought to be unit)
Outputs: (must be preallocated)
A - nt*ns matrix mapping source dipole strengths to target pots
An - nt*ns matrix mapping source dipole strengths to target normal-grads
See lap3ddipole_native for math definitions.
"""
y = np.atleast_2d(y) # handle ns=1 case: make 1x3 not 3-vecs
d = np.atleast_2d(d)
x = np.atleast_2d(x)
e = np.atleast_2d(e)
ns = y.shape[0]
nt = x.shape[0]
assert(A.shape==(nt,ns))
assert(An.shape==(nt,ns))
x0 = x[:,0][:,None] # make col vecs for numexpr
x1 = x[:,1][:,None]
x2 = x[:,2][:,None]
n0 = e[:,0][:,None]
n1 = e[:,1][:,None]
n2 = e[:,2][:,None]
y0 = y[:,0]
y1 = y[:,1]
y2 = y[:,2]
d0 = d[:,0]
d1 = d[:,1]
d2 = d[:,2]
R0 = ne.evaluate('x0 - y0') # outer, displ mats x-y
R1 = ne.evaluate('x1 - y1')
R2 = ne.evaluate('x2 - y2')
# this is all slow I assume because of passing over RAM too many times...
ir = ne.evaluate('1/sqrt(R0**2 + R1**2 + R2**2)')
prefac = 1.0/(4.0*np.pi)
ddotR = ne.evaluate('R0*d0 + R1*d1 + R2*d2')
pir3 = ne.evaluate('prefac*(ir*ir*ir)')
ne.evaluate('ddotR*pir3',out=A)
ne.evaluate('((d0*n0 + d1*n1 + d2*n2) - 3*(ddotR*(n0*R0+n1*R1+n2*R2))*(ir*ir))*pir3',out=An)
@numba.njit(parallel=True,fastmath=True) # recompiles every run, slow
def lap3dchargeself_numba(y,q,pot,grad,add=False):
"""evaluate pot & grad of 3D Laplace charges, self (j!=i), naive sum,
numba jit. Writes into pot and grad.
See lap3dcharge_native.
Optional input: add - if True, add to what's in pot,grad; False overwrite.
pot,grad passed in since njit fails with internal pot=zeros(nt)
"""
if y.ndim==1: # n=1, no self-int, no need for atleast_2d
return
n = y.shape[0]
assert(pot.shape==(n,))
assert(grad.shape==(n,3))
prefac = 1.0/(4.0*np.pi)
for i in numba.prange(n): # loop over targs
if not add:
pot[i] = grad[i,0] = grad[i,1] = grad[i,2] = 0.0
for j in range(n):
if j!=i: # same speed as splitting to explicit j<i, j>i cases
R0 = y[i,0]-y[j,0]
R1 = y[i,1]-y[j,1]
R2 = y[i,2]-y[j,2]
r2 = R0**2+R1**2+R2**2
r = np.sqrt(r2)
pqj = prefac*q[j]
pot[i] += pqj / r
pqjir3 = pqj / (r*r2)
grad[i,0] -= R0 * pqjir3
grad[i,1] -= R1 * pqjir3
grad[i,2] -= R2 * pqjir3
@numba.njit(parallel=True,fastmath=True)
def lap3ddipoleself_numba(y,d,pot,grad,add=False):
"""evaluate pot & grad of 3D Laplace dipoles, self (j!=i), naive sum,
numba jit.
Inputs which are written into:
pot float(n) potential at n sources
grad float(n,3) gradient (negative of E field) at n sources
Optional input: add - if True, add to what's in pot,grad; False overwrite.
Definition of pot and grad are as in lap3ddipole_native, omitting j=i term.
Issues: * why is this code 1/2 the speed of lap3ddipole_numba ? (no, it's
not the i j!=i conditional...).
"""
if y.ndim==1: # n=1, no self-int, no need for atleast_2d
return
n = y.shape[0]
assert(pot.shape==(n,))
assert(grad.shape==(n,3))
prefac = 1.0/(4.0*np.pi)
for i in numba.prange(n): # loop over targs
if not add:
pot[i] = grad[i,0] = grad[i,1] = grad[i,2] = 0.0
for j in range(n):
if j!=i: # same speed as splitting to explicit j<i, j>i cases
R0 = y[i,0]-y[j,0]
R1 = y[i,1]-y[j,1]
R2 = y[i,2]-y[j,2]
r2 = R0**2+R1**2+R2**2
r = np.sqrt(r2)
ir2 = 1.0/r2
pir3 = prefac/(r*r2) # includes prefactor
ddotR = R0*d[j,0]+R1*d[j,1]+R2*d[j,2]
pot[i] += ddotR * pir3
grad[i,0] += (d[j,0] - 3*ddotR*R0*ir2) * pir3
grad[i,1] += (d[j,1] - 3*ddotR*R1*ir2) * pir3
grad[i,2] += (d[j,2] - 3*ddotR*R2*ir2) * pir3
def test_lap3dcharge():
""" test gradient of pot in lap3dcharge, eval speeds of slow & jit & self.
Barnett 9/11/18
"""
x = array([0,1,-1]) # choose a targ
y = array([1,2,.4]); q = array([2]) # 1 src
u = 0*q; g = 0*y
lap3dcharge_numba(y,q,x,u,g) # compile & check jit can be called w/ nt=ns=1
u = lap3dcharge_native(y,q,x) # check native can be called w/ nt=ns=1
# check grad... inline funcs needed (NB 2nd tuple in gradf)
f = lambda x: lap3dcharge_native(y,q,x.T,ifgrad=False)
gradf = lambda x: lap3dcharge_native(y,q,x.T,ifgrad=True)[1].ravel()
print('test_lap3dcharge: grad check (native): ', checkgrad(x,f,gradf))
# perf tests...
ns = 2000 # sources
nt = 1000 # targs (check rect case)
y = random.rand(ns,3) # sources in [0,1]^3
q = random.randn(ns) # charges
x = random.rand(nt,3) # targs
#y=np.asfortranarray(y); x=np.asfortranarray(x); q=np.asfortranarray(q)
t0=tic()
u,g = lap3dcharge_native(y,q,x,ifgrad=True) # native python
t=tic()-t0
print("native: %d src-targ pairs in %.3g s: %.3g Gpair/s" % (ns*nt,t,ns*nt/t/1e9))
u2 = zeros(nt) # numba version writes outputs to arguments
g2 = zeros([nt,3])
lap3dcharge_numba(y,q,x,u2,g2)
t0=tic()
lap3dcharge_numba(y,q,x,u2,g2)
t =tic()-t0
print("numba: %d src-targ pairs in %.3g s: %.3g Gpair/s" % (ns*nt,t,ns*nt/t/1e9))
print("pot err numba vs native: %.3g"%(np.max(np.abs(u-u2))))
print("grad err numba vs native: %.3g"%(np.max(np.abs(g-g2))))
def test_lap3ddipole():
""" test gradient of pot in lap3ddipole, eval speeds of slow & jit & self.
Barnett 9/5/18
"""
x = array([0,1,-1]) # choose a targ
y = array([1,2,.4]); d = array([2,1,3]) # 1 src and dipole strength
u = array([0]); g = 0*d
lap3ddipole_numba(y,d,x,u,g) # compile & check jit can be called w/ nt=ns=1
u = lap3ddipole_native(y,d,x) # check native can be called w/ nt=ns=1
# check grad... inline funcs needed (NB 2nd tuple in gradf)
f = lambda x: lap3ddipole_native(y,d,x.T,ifgrad=False)
gradf = lambda x: lap3ddipole_native(y,d,x.T,ifgrad=True)[1].ravel()
print('test_lap3ddipole: grad check (native): ', checkgrad(x,f,gradf))
# perf tests...
ns = 1000 # sources
nt = 2000 # targs (check rect case)
y = random.rand(ns,3) # sources in [0,1]^3
d = random.randn(ns,3) # strength vectors
x = random.rand(nt,3) # targs
# try swap storage order: (2x speed up for native code, strangely)...
#y=np.asfortranarray(y); x=np.asfortranarray(x); d=np.asfortranarray(d)
t0=tic()
u,g = lap3ddipole_native(y,d,x,ifgrad=True) # native python
t =tic()-t0
print("native: %d src-targ pairs in %.3g s: %.3g Gpair/s" % (ns*nt,t,ns*nt/t/1e9))
u2 = zeros(nt) # numba version writes outputs to arguments
g2 = zeros([nt,3])
lap3ddipole_numba(y,d,x,u2,g2) # warm up
t0=tic()
lap3ddipole_numba(y,d,x,u2,g2)
t =tic()-t0
print("numba: %d src-targ pairs in %.3g s: %.3g Gpair/s" % (ns*nt,t,ns*nt/t/1e9))
print("pot err numba vs native: %.3g"%(np.max(np.abs(u-u2))))
print("grad err numba vs native: %.3g"%(np.max(np.abs(g-g2))))
n = 2000 # sources for self-eval j!=i test
y = random.rand(n,3) # in [0,1]^3
d = random.randn(n,3)
pot = 0*y[:,0]; grad = 0*y # allocate output arrays
lap3ddipoleself_numba(y,d,pot,grad) # compile to warm-up, 0.3 s!
t0=tic()
lap3ddipoleself_numba(y,d,pot,grad)
t=tic()-t0
print("numba self: %d src-targ pairs in %.3g s: %.3g Gpair/s" % (n*n,t,n*n/t/1e9))
def test_lap3dmats():
"""test the matrix fillers match the native evaluator answers.
Also tests timing. Conclusion: numba can fill around 1e9 els/sec but
numexpr is 4x slower, at least on i7.
"""
ns = 5000 # sources
y = random.rand(ns,3) # sources in [0,1]^3
d = random.randn(ns,3) # strength vectors (ought to be unit len)
q = random.randn(ns) # charges
nt = 10000 # targs (check rect case)
x = random.rand(nt,3) # targs
e = random.randn(nt,3) # targ normals (ought to be unit len)
u = zeros(nt) # true pot and grad outputs
g = zeros([nt,3])
for meth in range(2):
print("meth=numba:" if meth==0 else "meth=numexpr:")
# charge (monopole)...
lap3dcharge_numba(y,q,x,u,g)
A = zeros([nt,ns]); An = zeros([nt,ns]); # alloc mats
t0=tic()
lap3dchargemat_numba(y,x,e,A,An) if meth==0 else lap3dchargemat_ne(y,x,e,A,An)
t = tic()-t0
print("chg mats fill: two %d*%d mats in %.3g s: %.3g Gels/s" % (nt,ns,t,2*ns*nt/t/1e9))
t0 = tic()
ufrommat = A @ q[:,None]
t = tic()-t0
print("matvec: %.3g s: %.3g Gops/s" % (t,ns*nt/t/1e9))
print('chg mat pot err nrm = ', norm(u[:,None] - ufrommat)) # u make col vec!
gfrommat = An @ q[:,None]
gdote = np.sum(g*e,axis=1)[:,None] # e-direc derivs
print('chg mat n-grad err nrm = ', norm(gdote | |
<reponame>bopopescu/Social-Lite
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for flags in commands working with Google Cloud Functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.functions import util as api_util
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope.concepts import concepts
from googlecloudsdk.calliope.concepts import deps
from googlecloudsdk.command_lib.util import completers
from googlecloudsdk.command_lib.util.concepts import concept_parsers
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
API = 'cloudfunctions'
API_VERSION = 'v1'
LOCATIONS_COLLECTION = API + '.projects.locations'
SEVERITIES = ['DEBUG', 'INFO', 'ERROR']
EGRESS_SETTINGS = ['PRIVATE-RANGES-ONLY', 'ALL']
INGRESS_SETTINGS = ['ALL', 'INTERNAL-ONLY']
INGRESS_SETTINGS_MAPPING = {
'ALLOW_ALL': 'all',
'ALLOW_INTERNAL_ONLY': 'internal-only',
}
EGRESS_SETTINGS_MAPPING = {
'PRIVATE_RANGES_ONLY': 'private-ranges-only',
'ALL_TRAFFIC': 'all',
}
def AddMinLogLevelFlag(parser):
min_log_arg = base.ChoiceArgument(
'--min-log-level',
choices=[x.lower() for x in SEVERITIES],
help_str='Minimum level of logs to be fetched.')
min_log_arg.AddToParser(parser)
def AddIngressSettingsFlag(parser):
ingress_settings_arg = base.ChoiceArgument(
'--ingress-settings',
choices=[x.lower() for x in INGRESS_SETTINGS],
help_str='Ingress settings controls what traffic can reach the function.'
'By default `all` will be used.')
ingress_settings_arg.AddToParser(parser)
def AddEgressSettingsFlag(parser):
egress_settings_arg = base.ChoiceArgument(
'--egress-settings',
choices=[x.lower() for x in EGRESS_SETTINGS],
help_str='Egress settings controls what traffic is diverted through the '
'VPC Access Connector resource. '
'By default `private-ranges-only` will be used.')
egress_settings_arg.AddToParser(parser)
def GetLocationsUri(resource):
registry = resources.REGISTRY.Clone()
registry.RegisterApiByName(API, API_VERSION)
ref = registry.Parse(
resource.name,
params={'projectsId': properties.VALUES.core.project.GetOrFail},
collection=LOCATIONS_COLLECTION)
return ref.SelfLink()
def AddFunctionMemoryFlag(parser):
"""Add flag for specifying function memory to the parser."""
parser.add_argument(
'--memory',
type=arg_parsers.BinarySize(
suggested_binary_size_scales=['KB', 'MB', 'MiB', 'GB', 'GiB'],
default_unit='MB'),
help="""\
Limit on the amount of memory the function can use.
Allowed values are: 128MB, 256MB, 512MB, 1024MB, and 2048MB. By default,
a new function is limited to 256MB of memory. When deploying an update to
an existing function, the function will keep its old memory limit unless
you specify this flag.""")
def AddFunctionTimeoutFlag(parser):
"""Add flag for specifying function timeout to the parser."""
parser.add_argument(
'--timeout',
help="""\
The function execution timeout, e.g. 30s for 30 seconds. Defaults to
original value for existing function or 60 seconds for new functions.
Cannot be more than 540s.
See $ gcloud topic datetimes for information on duration formats.""",
type=arg_parsers.Duration(lower_bound='1s', upper_bound='540s'))
def AddFunctionRetryFlag(parser):
"""Add flag for specifying function retry behavior to the parser."""
parser.add_argument(
'--retry',
help=('If specified, then the function will be retried in case of a '
'failure.'),
action='store_true',
)
def AddAllowUnauthenticatedFlag(parser):
"""Add the --allow-unauthenticated flag."""
parser.add_argument(
'--allow-unauthenticated',
default=False,
action='store_true',
help=('If set, makes this a public function. This will allow all '
'callers, without checking authentication.'))
def ShouldEnsureAllUsersInvoke(args):
if args.allow_unauthenticated:
return True
else:
return False
def ShouldDenyAllUsersInvoke(args):
if (args.IsSpecified('allow_unauthenticated') and
not args.allow_unauthenticated):
return True
else:
return False
def AddSourceFlag(parser):
"""Add flag for specifying function source code to the parser."""
parser.add_argument(
'--source',
help="""\
Location of source code to deploy.
Location of the source can be one of the following three options:
* Source code in Google Cloud Storage (must be a `.zip` archive),
* Reference to source repository or,
* Local filesystem path (root directory of function source).
Note that if you do not specify the `--source` flag:
* Current directory will be used for new function deployments.
* If the function is previously deployed using a local filesystem path,
then function's source code will be updated using the current directory.
* If the function is previously deployed using a Google Cloud Storage
location or a source repository, then the function's source code will not
be updated.
The value of the flag will be interpreted as a Cloud Storage location, if
it starts with `gs://`.
The value will be interpreted as a reference to a source repository, if it
starts with `https://`.
Otherwise, it will be interpreted as the local filesystem path. When
deploying source from the local filesystem, this command skips files
specified in the `.gcloudignore` file (see `gcloud topic gcloudignore` for
more information). If the `.gcloudignore` file doesn't exist, the command
will try to create it.
The minimal source repository URL is:
`https://source.developers.google.com/projects/${PROJECT}/repos/${REPO}`
By using the URL above, sources from the root directory of the repository
on the revision tagged `master` will be used.
If you want to deploy from a revision different from `master`, append one
of the following three sources to the URL:
* `/revisions/${REVISION}`,
* `/moveable-aliases/${MOVEABLE_ALIAS}`,
* `/fixed-aliases/${FIXED_ALIAS}`.
If you'd like to deploy sources from a directory different from the root,
you must specify a revision, a moveable alias, or a fixed alias, as above,
and append `/paths/${PATH_TO_SOURCES_DIRECTORY}` to the URL.
Overall, the URL should match the following regular expression:
```
^https://source\\.developers\\.google\\.com/projects/
(?<accountId>[^/]+)/repos/(?<repoName>[^/]+)
(((/revisions/(?<commit>[^/]+))|(/moveable-aliases/(?<branch>[^/]+))|
(/fixed-aliases/(?<tag>[^/]+)))(/paths/(?<path>.*))?)?$
```
An example of a validly formatted source repository URL is:
```
https://source.developers.google.com/projects/123456789/repos/testrepo/
moveable-aliases/alternate-branch/paths/path-to=source
```
""")
def AddStageBucketFlag(parser):
"""Add flag for specifying stage bucket to the parser."""
parser.add_argument(
'--stage-bucket',
help=('When deploying a function from a local directory, this flag\'s '
'value is the name of the Google Cloud Storage bucket in which '
'source code will be stored. Note that if you set the '
'`--stage-bucket` flag when deploying a function, you will need to '
'specify `--source` or `--stage-bucket` in subsequent deployments '
'to update your source code. To use this flag successfully, the '
'account in use must have permissions to write to this bucket. For '
'help granting access, refer to this guide: '
'https://cloud.google.com/storage/docs/access-control/'),
type=api_util.ValidateAndStandarizeBucketUriOrRaise)
def AddRuntimeFlag(parser):
# TODO(b/110148388): Do not hardcode list of choices in the help text.
parser.add_argument(
'--runtime',
help="""\
Runtime in which to run the function.
Required when deploying a new function; optional when updating
an existing function.
Choices:
- `nodejs8`: Node.js 8
- `nodejs10`: Node.js 10
- `python37`: Python 3.7
- `go111`: Go 1.11
- `go113`: Go 1.13
- `nodejs6`: Node.js 6 (deprecated)
""")
def AddVPCConnectorMutexGroup(parser):
"""Add flag for specifying VPC connector to the parser."""
mutex_group = parser.add_group(mutex=True)
mutex_group.add_argument(
'--vpc-connector',
help="""\
The VPC Access connector that the function can connect to. It can be
either the fully-qualified URI, or the short name of the VPC Access
connector resource. If the short name is used, the connector must
belong to the same project. The format of this field is either
`projects/${PROJECT}/locations/${LOCATION}/connectors/${CONNECTOR}`
or `${CONNECTOR}`, where `${CONNECTOR}` is the short name of the VPC
Access connector.
""")
mutex_group.add_argument(
'--clear-vpc-connector',
action='store_true',
help="""\
Clears the VPC connector field.
""")
def AddBuildWorkerPoolMutexGroup(parser):
"""Add flag for specifying Build Worker Pool to the parser."""
mutex_group = parser.add_group(mutex=True)
mutex_group.add_argument(
'--build-worker-pool',
help="""\
Name of the Cloud Build Custom Worker Pool that should be used to build
the function. The format of this field is
`projects/${PROJECT}/workerPools/${WORKERPOOL}` where ${PROJECT} is the
project id where the worker pool is defined and ${WORKERPOOL} is the
short name of the worker pool.
""")
mutex_group.add_argument(
'--clear-build-worker-pool',
action='store_true',
help="""\
Clears the Cloud Build Custom Worker Pool field.
""")
def AddEntryPointFlag(parser):
"""Add flag for specifying entry point to the parser."""
parser.add_argument(
'--entry-point',
type=api_util.ValidateEntryPointNameOrRaise,
help="""\
Name of a Google Cloud Function (as defined in source code) that will
be executed. Defaults to the resource name suffix, if not specified. For
backward compatibility, if function with given name is not found, then
the system will try to use function named "function". For Node.js this
is name of a function exported by the module specified in
`source_location`.
""")
def AddMaxInstancesFlag(parser):
"""Add flag for specifying the max instances for a function."""
mutex_group = parser.add_group(mutex=True)
mutex_group.add_argument(
'--max-instances',
type=arg_parsers.BoundedInt(lower_bound=1),
help="""\
Sets the maximum number of instances for the function. A function
execution that would exceed max-instances times out.
""")
mutex_group.add_argument(
'--clear-max-instances',
action='store_true',
help="""\
Clears the maximum instances setting for the function.
""")
def AddTriggerFlagGroup(parser):
"""Add arguments specyfying functions trigger to the parser."""
# You can also use --trigger-provider but it is hidden argument so not
# mentioning it for now.
trigger_group = parser.add_mutually_exclusive_group(
help=(
' If you don\'t specify a trigger when deploying an update | |
<filename>hnn_core/dipole.py
"""Class to handle the dipoles."""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
import warnings
import numpy as np
from copy import deepcopy
from .viz import plot_dipole, plot_psd, plot_tfr_morlet
def simulate_dipole(net, tstop, dt=0.025, n_trials=None, record_vsoma=False,
record_isoma=False, postproc=False):
"""Simulate a dipole given the experiment parameters.
Parameters
----------
net : Network object
The Network object specifying how cells are
connected.
tstop : float
The simulation stop time (ms).
dt : float
The integration time step of h.CVode (ms)
n_trials : int | None
The number of trials to simulate. If None, the 'N_trials' value
of the ``params`` used to create ``net`` is used (must be >0)
record_vsoma : bool
Option to record somatic voltages from cells
record_isoma : bool
Option to record somatic currents from cells
postproc : bool
If True, smoothing (``dipole_smooth_win``) and scaling
(``dipole_scalefctr``) values are read from the parameter file, and
applied to the dipole objects before returning. Note that this setting
only affects the dipole waveforms, and not somatic voltages, possible
extracellular recordings etc. The preferred way is to use the
:meth:`~hnn_core.dipole.Dipole.smooth` and
:meth:`~hnn_core.dipole.Dipole.scale` methods instead. Default: False.
Returns
-------
dpls: list
List of dipole objects for each trials
"""
from .parallel_backends import _BACKEND, JoblibBackend
if _BACKEND is None:
_BACKEND = JoblibBackend(n_jobs=1)
if n_trials is None:
n_trials = net._params['N_trials']
if n_trials < 1:
raise ValueError("Invalid number of simulations: %d" % n_trials)
if not net.connectivity:
warnings.warn('No connections instantiated in network. Consider using '
'net = jones_2009_model() or net = law_2021_model() to '
'create a predefined network from published models.',
UserWarning)
for drive_name, drive in net.external_drives.items():
if 'tstop' in drive['dynamics']:
if drive['dynamics']['tstop'] is None:
drive['dynamics']['tstop'] = tstop
for bias_name, bias in net.external_biases.items():
for cell_type, bias_cell_type in bias.items():
if bias_cell_type['tstop'] is None:
bias_cell_type['tstop'] = tstop
if bias_cell_type['tstop'] < 0.:
raise ValueError('End time of tonic input cannot be negative')
duration = bias_cell_type['tstop'] - bias_cell_type['t0']
if duration < 0.:
raise ValueError('Duration of tonic input cannot be negative')
net._instantiate_drives(n_trials=n_trials, tstop=tstop)
net._reset_rec_arrays()
if isinstance(record_vsoma, bool):
net._params['record_vsoma'] = record_vsoma
else:
raise TypeError("record_vsoma must be bool, got %s"
% type(record_vsoma).__name__)
if isinstance(record_isoma, bool):
net._params['record_isoma'] = record_isoma
else:
raise TypeError("record_isoma must be bool, got %s"
% type(record_isoma).__name__)
if postproc:
warnings.warn('The postproc-argument is deprecated and will be removed'
' in a future release of hnn-core. Please define '
'smoothing and scaling explicitly using Dipole methods.',
DeprecationWarning)
dpls = _BACKEND.simulate(net, tstop, dt, n_trials, postproc)
return dpls
def read_dipole(fname):
"""Read dipole values from a file and create a Dipole instance.
Parameters
----------
fname : str
Full path to the input file (.txt)
Returns
-------
dpl : Dipole
The instance of Dipole class
"""
dpl_data = np.loadtxt(fname, dtype=float)
dpl = Dipole(dpl_data[:, 0], dpl_data[:, 1:])
return dpl
def average_dipoles(dpls):
"""Compute dipole averages over a list of Dipole objects.
Parameters
----------
dpls: list of Dipole objects
Contains list of dipole objects, each with a `data` member containing
'L2', 'L5' and 'agg' components
Returns
-------
dpl: instance of Dipole
A new dipole object with each component of `dpl.data` representing the
average over the same components in the input list
"""
scale_applied = dpls[0].scale_applied
for dpl_idx, dpl in enumerate(dpls):
if dpl.scale_applied != scale_applied:
raise RuntimeError('All dipoles must be scaled equally!')
if not isinstance(dpl, Dipole):
raise ValueError(
f"All elements in the list should be instances of "
f"Dipole. Got {type(dpl)}")
if dpl.nave > 1:
raise ValueError("Dipole at index %d was already an average of %d"
" trials. Cannot reaverage" %
(dpl_idx, dpl.nave))
avg_data = list()
layers = dpl.data.keys()
for layer in layers:
avg_data.append(
np.mean(np.array([dpl.data[layer] for dpl in dpls]), axis=0)
)
avg_data = np.c_[avg_data].T
avg_dpl = Dipole(dpls[0].times, avg_data)
# The averaged scale should equal all scals in the input dpl list.
avg_dpl.scale_applied = scale_applied
# set nave to the number of trials averaged in this dipole
avg_dpl.nave = len(dpls)
return avg_dpl
def _rmse(dpl, exp_dpl, tstart=0.0, tstop=0.0, weights=None):
""" Calculates RMSE between data in dpl and exp_dpl
Parameters
----------
dpl: instance of Dipole
A dipole object with simulated data
exp_dpl: instance of Dipole
A dipole object with experimental data
tstart | None: float
Time at beginning of range over which to calculate RMSE
tstop | None: float
Time at end of range over which to calculate RMSE
weights | None: array
An array of weights to be applied to each point in
simulated dpl. Must have length >= dpl.data
If None, weights will be replaced with 1's for typical RMSE
calculation.
Returns
-------
err: float
Weighted RMSE between data in dpl and exp_dpl
"""
from scipy import signal
exp_times = exp_dpl.times
sim_times = dpl.times
# do tstart and tstop fall within both datasets?
# if not, use the closest data point as the new tstop/tstart
for tseries in [exp_times, sim_times]:
if tstart < tseries[0]:
tstart = tseries[0]
if tstop > tseries[-1]:
tstop = tseries[-1]
# make sure start and end times are valid for both dipoles
exp_start_index = (np.abs(exp_times - tstart)).argmin()
exp_end_index = (np.abs(exp_times - tstop)).argmin()
exp_length = exp_end_index - exp_start_index
sim_start_index = (np.abs(sim_times - tstart)).argmin()
sim_end_index = (np.abs(sim_times - tstop)).argmin()
sim_length = sim_end_index - sim_start_index
if weights is None:
# weighted RMSE with weights of all 1's is equivalent to
# normal RMSE
weights = np.ones(len(sim_times[0:sim_end_index]))
weights = weights[sim_start_index:sim_end_index]
dpl1 = dpl.data['agg'][sim_start_index:sim_end_index]
dpl2 = exp_dpl.data['agg'][exp_start_index:exp_end_index]
if (sim_length > exp_length):
# downsample simulation timeseries to match exp data
dpl1 = signal.resample(dpl1, exp_length)
weights = signal.resample(weights, exp_length)
indices = np.where(weights < 1e-4)
weights[indices] = 0
elif (sim_length < exp_length):
# downsample exp timeseries to match simulation data
dpl2 = signal.resample(dpl2, sim_length)
return np.sqrt((weights * ((dpl1 - dpl2) ** 2)).sum() / weights.sum())
class Dipole(object):
"""Dipole class.
An instance of the ``Dipole``-class contains the simulated dipole moment
timecourses for L2 and L5 pyramidal cells, as well as their aggregate
(``'agg'``). The units of the dipole moment are in ``nAm``
(1e-9 Ampere-meters).
Parameters
----------
times : array (n_times,)
The time vector (in ms)
data : array, shape (n_times x n_layers)
The data. The first column represents 'agg' (the total diple),
the second 'L2' layer and the last one 'L5' layer. For experimental
data, it can contain only one column.
nave : int
Number of trials that were averaged to produce this Dipole. Defaults
to 1
Attributes
----------
times : array-like
The time vector (in ms)
sfreq : float
The sampling frequency (in Hz)
data : dict of array
Dipole moment timecourse arrays with keys 'agg', 'L2' and 'L5'
nave : int
Number of trials that were averaged to produce this Dipole
scale_applied : int or float
The total factor by which the dipole has been scaled (using
:meth:`~hnn_core.dipole.Dipole.scale`).
"""
def __init__(self, times, data, nave=1): # noqa: D102
self.times = np.array(times)
if data.ndim == 1:
data = data[:, None]
if data.shape[1] == 3:
self.data = {'agg': data[:, 0], 'L2': data[:, 1], 'L5': data[:, 2]}
elif data.shape[1] == 1:
self.data = {'agg': data[:, 0]}
self.nave = nave
self.sfreq = 1000. / (times[1] - times[0]) # NB assumes len > 1
self.scale_applied = 1 # for visualisation
def copy(self):
"""Return a copy of the Dipole instance
Returns
-------
dpl_copy : instance of Dipole
A copy of the Dipole instance.
"""
return deepcopy(self)
def _post_proc(self, window_len, fctr):
"""Apply scaling and smoothing from param-files (DEPRECATE)
Parameters
----------
window_len : int
Smoothing window in ms
fctr : int
Scaling factor
"""
self.scale(fctr)
if window_len > 0: # this is to allow param-files with len==0
self.smooth(window_len)
def _convert_fAm_to_nAm(self):
"""The NEURON simulator output is in fAm, convert to nAm
NB! Must be run `after` :meth:`Dipole.baseline_renormalization`
"""
for key in self.data.keys():
self.data[key] *= 1e-6
def scale(self, factor):
"""Scale (multiply) the dipole moment by a fixed factor
The attribute ``Dipole.scale_applied`` is updated to reflect factors
applied and displayed in plots.
Parameters
----------
factor : int
Scaling factor, applied to the data in-place.
"""
for key in self.data.keys():
self.data[key] *= factor
self.scale_applied *= factor
return self
def smooth(self, window_len):
"""Smooth the dipole waveform using Hamming-windowed convolution
Note that this method operates in-place, i.e., it will alter | |
<reponame>alapan-sau/SocialMediaDB<gh_stars>0
import subprocess as sp
import pymysql
import pymysql.cursors
from tabulate import tabulate
from time import time
from datetime import datetime
import time
import datetime
# ----------------- Functional Requirement Start ---------------
def printWeeklyReport():
global cur
query = "select CAST(MIN(CAST(uptime as float)) as TIME) as 'MINIMUM UPTIME' from USER;"
try:
cur.execute(query)
con.commit()
dic = {}
print(tabulate(cur.fetchall(), headers=dic, tablefmt='psql'))
except Exception as e:
con.rollback()
print(e)
return
query = "select CAST(MAX(CAST(uptime as float)) as TIME) as 'MAXIMUM UPTIME'from USER;"
try:
cur.execute(query)
con.commit()
dic = {}
print(tabulate(cur.fetchall(), headers=dic, tablefmt='psql'))
except Exception as e:
con.rollback()
print(e)
return
query = "select CAST(AVG(CAST(uptime as float)) as TIME) as 'MEAN UPTIME' from USER;"
try:
cur.execute(query)
con.commit()
dic = {}
print(tabulate(cur.fetchall(), headers=dic, tablefmt='psql'))
except Exception as e:
con.rollback()
print(e)
return
query = """select CAST(AVG(CAST(med.uptime as float)) as TIME) as 'MEDIAN UPTIME' from (select @rowindex:=@rowindex + 1 as rowindex, USER.uptime from USER order by uptime) AS med where med.rowindex in (FLOOR(@rowindex/2), CEIL(@rowindex/2));"""
try:
cur.execute("set@rowindex := -1;")
con.commit()
cur.execute(query)
con.commit()
dic = {}
print(tabulate(cur.fetchall(), headers=dic, tablefmt='psql'))
except Exception as e:
con.rollback()
print(e)
query = "select stddev(uptime) as 'STANDARD DEVIATION OF UPTIME' from USER;"
try:
cur.execute(query)
con.commit()
dic = {}
print(tabulate(cur.fetchall(), headers=dic, tablefmt='psql'))
except Exception as e:
con.rollback()
print(e)
return
def search():
search_key = input("Enter the keyword to be searched for: ")
search_key = search_key+'+'
print("Enter the domain you want to search in:")
print("1. User")
print("2. Post")
print("3. Comment")
print("4. Page")
print("5. Group")
try:
search_param = int(input("Enter the number of the required domain: "))
except Exception as e:
print(e)
print("Invalid domain type")
return
if search_param == 1:
search_type = "USER"
search_field = "name"
elif search_param == 2:
search_type = "POST"
search_field = "text"
elif search_param == 3:
search_type = "COMMENT"
search_field = "text"
elif search_param == 4:
search_type = "PAGE"
search_field = "page_name"
elif search_param == 5:
search_type = "social_media.GROUP"
search_field = "group_name"
else:
print("Invalid Domain Error")
return
try:
query = "SELECT * FROM %s WHERE %s REGEXP '%s'" % (
search_type, search_field, search_key, )
r = cur.execute(query)
if r == 0:
print("No result found")
return
rows = cur.fetchall()
viewTable(rows)
except Exception as e:
print(e)
print("Could not perform search")
def generateReport():
try:
user_id = int(
input("Enter the User ID of the user you want to generate the report for: "))
except Exception as e:
print(e)
print("User ID must be a number")
return
try:
query = "SELECT * FROM USER WHERE user_id=%d" % (user_id)
r = cur.execute(query)
if r == 0:
print("Could not find details of the given User ID")
return
print("The details of the user are as follows: ")
rows = cur.fetchall()
viewTable(rows)
print("Select which report would you like to say: ")
print("1. Followers")
print("2. Following")
print("3. Post")
print("4. Comments")
print("5. Post Reacts")
print("6. Comment Reacts")
print("7. Pages Created")
print("8. Pages Liked")
print("9. Group Admin")
print("10. Group Moderator")
print("11. Group Member")
try:
report_type = int(
input("Enter the number of the report you would like to see: "))
except Exception as e:
print(e)
print("Invalid Choice")
return
if report_type == 1:
query = "SELECT * FROM USER WHERE user_id IN (SELECT follower_id FROM FOLLOWS WHERE following_id=%d)" % (
user_id)
r = cur.execute(query)
if r == 0:
print("There are no followers for the user\n")
else:
print("The users following the user are as follows: ")
rows = cur.fetchall()
viewTable(rows)
elif report_type == 2:
query = "SELECT * FROM USER WHERE user_id IN (SELECT following_id FROM FOLLOWS WHERE follower_id=%d)" % (
user_id)
r = cur.execute(query)
if r == 0:
print("The user does not follow anyone \n")
else:
print("The users the given user is following are as follows: ")
rows = cur.fetchall()
viewTable(rows)
elif report_type == 3:
query = "SELECT * FROM POST WHERE user_id=%d" % (user_id)
r = cur.execute(query)
if r == 0:
print("The user did not post any post\n")
else:
print("Posts posted by the user:")
rows = cur.fetchall()
viewTable(rows)
elif report_type == 4:
query = "SELECT COMMENT.comment_id, COMMENT.text, COMMENT.media, COMMENTS.post_id FROM COMMENT INNER JOIN COMMENTS ON COMMENT.comment_id = COMMENTS.comment_id WHERE COMMENTS.user_id = %d" % (
user_id)
r = cur.execute(query)
if r == 0:
print("The user did not post any post\n")
else:
print("Comments posted by the user:")
rows = cur.fetchall()
viewTable(rows)
elif report_type == 5:
query = "SELECT POST.post_id, POST.text, POST.media, POST.user_id, MAKES_GENERAL_REACT.reacted_type FROM POST INNER JOIN MAKES_GENERAL_REACT ON POST.post_id = MAKES_GENERAL_REACT.post_id WHERE MAKES_GENERAL_REACT.user_id = %d" % (
user_id)
r = cur.execute(query)
if r == 0:
print("The user did not react on any post\n")
else:
print("Posts reacted on by the user:")
rows = cur.fetchall()
viewTable(rows)
elif report_type == 6:
query = "SELECT COMMENT.comment_id, COMMENT.text, COMMENT.media, COMMENTS.post_id, MAKES_A_REACT.reacted_type FROM COMMENT INNER JOIN COMMENTS ON COMMENT.comment_id = COMMENTS.comment_id INNER JOIN MAKES_A_REACT ON MAKES_A_REACT.comment_id = COMMENT.comment_id WHERE MAKES_A_REACT.comment_id=%d" % (
user_id)
r = cur.execute(query)
if r == 0:
print("The user did not react on any comment\n")
else:
print("Comments reacted on by the user: ")
rows = cur.fetchall()
viewTable(rows)
elif report_type == 7:
query = "SELECT * FROM PAGE WHERE owner_id = %d" % (user_id)
r = cur.execute(query)
if r == 0:
print("The user did not create any page\n")
else:
print("Pages created by the user: ")
rows = cur.fetchall()
viewTable(rows)
elif report_type == 8:
query = "SELECT * FROM PAGE WHERE page_id IN (SELECT page_id FROM LIKES WHERE user_id=%d)" % (
user_id)
r = cur.execute(query)
if r == 0:
print("The user did not like any page\n")
else:
print("Pages liked by the user: ")
rows = cur.fetchall()
viewTable(rows)
elif report_type == 9:
query = "SELECT * FROM social_media.GROUP WHERE group_id IN (SELECT group_id FROM IS_ADMIN WHERE user_id=%d)" % (
user_id)
r = cur.execute(query)
if r == 0:
print("The user is not the admin of any group\n")
else:
print("Groups the user is the admin of: ")
rows = cur.fetchall()
viewTable(rows)
elif report_type == 10:
query = "SELECT * FROM social_media.GROUP WHERE group_id IN (SELECT group_id FROM IS_MODERATOR WHERE user_id=%d)" % (
user_id)
r = cur.execute(query)
if r == 0:
print("The user is not the moderator of any group\n")
else:
print("Groups the user is the moderator of: ")
rows = cur.fetchall()
viewTable(rows)
elif report_type == 11:
query = "SELECT * FROM social_media.GROUP WHERE group_id IN (SELECT group_id FROM BELONGS_TO WHERE user_id=%d)" % (
user_id)
r = cur.execute(query)
if r == 0:
print("The user is does not belong to any group\n")
else:
print("Groups the user is the belongs to: ")
rows = cur.fetchall()
viewTable(rows)
except Exception as e:
print(e)
print("Could not generate report :(")
###############################################################################################
##############################################################################################
def mutual():
try:
print("1. Mutual Followings")
print("2. Mutual Followers")
print("3. Mutual Liked Pages")
print("4. Mutual Membership in Groups")
optn = int(input("Enter chosen option: "))
user1ID = int(input("Enter UserID of first User: "))
user2ID = int(input("Enter UserID of second User: "))
if (optn == 1):
query = '''
SELECT *
FROM USER
WHERE user_id IN
(SELECT following_id
FROM FOLLOWS
WHERE follower_id = '%s')
AND
user_id IN
(SELECT following_id
FROM FOLLOWS
WHERE follower_id = '%s')
''' % (user1ID, user2ID)
r = cur.execute(query)
print("Mutual Followers:")
if(r == 0):
print("NO MUTUAL FOLLOWERS")
rows = cur.fetchall()
viewTable(rows)
elif (optn == 2):
query = '''
SELECT *
FROM USER
WHERE user_id IN
(SELECT follower_id
FROM FOLLOWS
WHERE following_id = '%s')
AND
user_id IN
(SELECT follower_id
FROM FOLLOWS
WHERE following_id = '%s')
''' % (user1ID, user2ID)
r = cur.execute(query)
print("Mutual Followings:")
if(r == 0):
print("NO MUTUAL FOLLOWINGS")
rows = cur.fetchall()
viewTable(rows)
elif (optn == 3):
query = '''
SELECT *
FROM PAGE
WHERE page_id IN
(SELECT page_id
FROM LIKES
WHERE user_id = '%s')
AND
page_id IN
(SELECT page_id
FROM LIKES
WHERE user_id = '%s')
''' % (user1ID, user2ID)
r = cur.execute(query)
print("Mutual Likes to Pages:")
if(r == 0):
print("NO MUTUAL LIKES")
rows = cur.fetchall()
viewTable(rows)
elif(optn == 4):
query = '''
SELECT *
FROM social_media.GROUP
WHERE group_id IN
(SELECT group_id
FROM BELONGS_TO
WHERE user_id = '%s')
AND
group_id IN
(SELECT group_id
FROM BELONGS_TO
WHERE user_id = '%s')
''' % (user1ID, user2ID)
r = cur.execute(query)
print("Mutual Likes to Pages:")
if(r == 0):
print("NO MUTUAL LIKES")
rows = cur.fetchall()
viewTable(rows)
else:
print("Invalid Option")
| |
of false-positives
# Manual override for MS11-011 to reduce false positives. The article was updated, but the bulletin database wasn't (https://technet.microsoft.com/en-us/library/security/ms11-011.aspx)
# V1.2 (March 18, 2011): Added Windows 7 for 32-bit Systems Service Pack 1, Windows 7 for x64-based Systems Service Pack 1, Windows Server 2008 R2 for x64-based Systems Service Pack 1, and Windows Server 2008 R2 for Itanium-based Systems Service Pack 1 to Non-Affected Software. This is an informational change only. There were no changes to the security update files or detection logic.
if id == 'MS11-011':
ms11_011 = ['Windows 7 for 32-bit Systems Service Pack 1', 'Windows 7 for x64-based Systems Service Pack 1', 'Windows Server 2008 R2 for x64-based Systems Service Pack 1','Windows Server 2008 R2 for Itanium-based Systems Service Pack 1']
for not_affected in ms11_011:
compare_version = getversion(getname(not_affected),getrelease(not_affected),getservicepack(not_affected),getarchitecture(not_affected))
if version == compare_version:
if ARGS.verbose: ALERT("Ignoring MS11-011 false positive due to it not affecting '%s'" % compare_version)
id = False
for bulletinid in bulletinids:
if bulletinid == id:
title = row[5]
kb = row[2]
severity = row[3]
if id not in ids:
vulns[id] = [title,kb,severity]
ids.add(id)
# alerted, if a bulletin has been alerted to the user so that it doesn't appear twice
# this occurs when a bulletin has multiple parents
# msids, the actual data for all of the relevant msids (the row from the CSV)
alerted = set()
msids = sorted(vulns, reverse=True)
# loop through the bulletinids which is the set of the actual bulletins that are to
# be alerted
for msid in msids:
## don't alert twice, no matter the case
if msid not in alerted:
# get the msid, exploitability alert rating, and resources
m,exploit,resources = getexploit(msid)
# only display the message, if the exploit flag isn't used
# or if it is used, and the alert level is MSF or EXP
if ARGS.audit or (exploit == ALERT.MSF or exploit == ALERT.EXP):
alert = ALERT.NORMAL
if exploit: alert = exploit
ALERT("%s: %s (%s) - %s" % (msid, vulns[msid][0], vulns[msid][1], vulns[msid][2]), alert)
if resources and not ARGS.quiet:
for resource in resources:
ALERT(" %s" % resource)
ALERT("")
alerted.add(msid)
# only attempt to display linked/sub msids based on cli arguments
if ARGS.sub:
# linked ms, the children of this msid
linked = set(getlinkedms([msid], csv.reader(StringIO.StringIO(database))))
linked = linked.intersection(msids)
# loop through the linked msids, and only display those that qualify and
# those that have not been alerted yet
for lmsid in sorted(linked, reverse=True):
if lmsid in msids and lmsid not in alerted:
lexploit = getexploit(lmsid)
lalert = ALERT.NORMAL
if ARGS.audit or (lexploit == ALERT.MSF or lexploit == ALERT.EXP):
if lexploit: lalert = lexploit
ALERT("|_%s: %s (%s) - %s" % (lmsid, vulns[lmsid][0], vulns[lmsid][1], vulns[lmsid][2]), lalert)
# only allow duplicate events to be displayed when command-line args passed
if not ARGS.duplicates: alerted.add(lmsid)
# end run()
# attempt to detect character encoding of a file
# otherwise return None
# https://stackoverflow.com/questions/3323770/character-detection-in-a-text-file-in-python-using-the-universal-encoding-detect
def detect_encoding(filename):
try:
import chardet
data = open(filename, "r").read()
result = chardet.detect(data)
encoding = result['encoding']
return encoding
except:
return None
# the trace command is used to determine linked MS bulletins
# TODO much of this is duplicated from run(). should be merged
def trace(database):
# convert to upper
bulletinid = ARGS.trace.upper()
ALERT("searching for bulletin id %s" % bulletinid)
# get linked msids
lmsids = getlinkedms([bulletinid], csv.reader(StringIO.StringIO(database)))
msids = []
if ARGS.ostext:
ALERT("getting OS information from command line text")
name=getname(ARGS.ostext)
release=getrelease(ARGS.ostext)
servicepack=getservicepack(ARGS.ostext)
architecture=getarchitecture(ARGS.ostext)
if ARGS.verbose:
ALERT("name: %s" % name)
ALERT("release: %s" % release)
ALERT("service pack: %s" % servicepack)
ALERT("architecture: %s" % architecture)
# the os name at least has to be identified
if not name:
ALERT("unable to determine the windows version command line text from '%s'" % ARGS.ostext, ALERT.BAD)
exit(1)
# get linked msids, loop through the row
for row in csv.reader(StringIO.StringIO(database)):
msid = row[1]
affected = row[6]
if msid in lmsids:
# debug
#print ("%s,%s,%s,%s,%s,%s" % (msid, name, release, servicepack, architecture, affected))
if isaffected(name, release, servicepack, architecture, affected) and msid not in msids: msids.append(msid)
else: msids = lmsids
ALERT("linked msids %s" % msids, ALERT.GOOD)
def patches(database):
kbs = []
# convert to upper
bulletinid = ARGS.patches.upper()
ALERT("searching all kb's for bulletin id %s" % bulletinid)
# get linked msids, loop through the row
for row in csv.reader(StringIO.StringIO(database)):
bulletinkb=row[2]
componentkb=row[7]
# if there's a match
if bulletinid in row[1]:
kbs.append(bulletinkb)
kbs.append(componentkb)
ALERT("relevant kbs %s" % (sorted(set(kbs), reverse=True)), ALERT.GOOD)
def getversion(name, release, servicepack, architecture):
version = "Windows " + name
# append release first
if release: version += " R" + release
# then service pack
if servicepack: version += " SP" + servicepack
# architecture
if architecture == "Itanium": version += " Itanium-based"
else: version += " %s-bit" % architecture
return version
def getname(ostext):
if ostext == False:
return False
osname=False
osnamearray=[["xp","XP"],
["2000","2000"],
["2003","2003"],
["vista","Vista"],
["2008","2008"],
[" 7","7"],
[" 8","8"],
["2012","2012"],
["8.1","8.1"],
[" 10","10"]]
for needle in osnamearray:
ostext = ostext.lower()
if "windows" + needle[0] in ostext or "windows " + needle[0] in ostext or "server" + needle[0] in ostext or "server " + needle[0] in ostext:
osname = needle[1]
# the first loop is a more restrictive detection of the OS name, but it does not detect the following
# > Microsoft Windows\xFF7 Entreprise
# so if there is no detection from the first attempt, then search on a more loosely based string of
# needle and space
if not osname:
for needle in osnamearray:
if needle[0] + " " in ostext.lower():
osname = needle[1]
return osname
def getrelease(ostext):
if ostext == False:
return False
osrelease=False
regex="( r| rc|release|rel)[ ]*(\d)"
m=re.search(regex, ostext.lower())
if m and m.group(2):
osrelease=m.group(2)
return osrelease
def getservicepack(ostext):
if ostext == False:
return False
servicepack=False
regex="(sp|pack|pack:)[ ]*(\d)"
m=re.search(regex, ostext.lower())
if m and m.group(2):
servicepack=m.group(2)
return servicepack
# architecture defaults to 32, but can be 64-bit
# or itanium based
def getarchitecture(ostext):
# default to 32-bit
architecture="32"
# haystack
s = ostext.lower()
# attempt to be as flexible as possible
# matching '64-based', 'x64', ' 64', 'i64', '64bit', '64 bit', '64-bit'
if ("64-based" in s) or ("x64" in s) or (" 64" in s) or ("i64" in s) or ("64bit" in s) or ("64 bit" in s) or ("64-bit" in s): architecture="64"
# target Itanium with a simple search for 'tani'
if "tani" in s: architecture="Itanium"
if getname(ostext) == "2008" and getrelease(ostext) == "2" and architecture == "32":
if ARGS.verbose:
ALERT("forcing unidentified architecture to 64-bit because OS identified as Windows 2008 R2 (although could be Itanium and wasn't detected?)")
architecture = "64"
# windows server 2012 is only 64-bit arch
if getname(ostext) == "2012" and architecture == "32":
if ARGS.verbose:
ALERT("forcing unidentified architecture to 64-bit because OS identified as Windows Server 2012 does not support 32-bit")
architecture = "64"
return architecture
# itanium build search string
def getitanium(ostext):
if ostext == False:
return False
regex="(tanium)"
m=re.search(regex, ostext.lower())
if m:
return True
return False
def getpatch(ostext):
patch=False
regex="(\d){5,10}"
m=re.search(regex, ostext.lower())
if m and m.group():
patch=m.group()
return patch
# get the bulletin ids from the haystack
# these are typically in the form of:
# MS14-009[2898860]
# MS13-052[2833940],MS14-009[2898856]
# will return a list if found, otherwise false
def getbulletinids(haystack):
regex="MS[\d]{2,3}-[\d]{2,3}"
m = re.findall(regex, haystack)
if len(m) > 0: return m
return False
def isaffected(name, release, servicepack, architecture, haystack):
if name == getname(haystack):
# ensure None are set to False
# example, if getservicepack() does not get called in the systeminfo parsing
# then servicepack will be None. this will then fail when comparing to False.
if release == None: release = False
if servicepack == None: servicepack = False
if architecture == None: architecture = False
# print "%s,%s,%s,%s" % (name, release, servicepack, architecture)
# print "%s,%s,%s,%s" % (getname(haystack),getrelease(haystack),getservicepack(haystack),getarchitecture(haystack))
n = (name == getname(haystack))
r = (release | |
optional): Name of the vehicle to get the Pose of
Returns:
Pose:
"""
pose = self.client.call('simGetVehiclePose', vehicle_name)
return Pose.from_msgpack(pose)
def simSetTraceLine(self, color_rgba, thickness=1.0, vehicle_name = ''):
"""
Modify the color and thickness of the line when Tracing is enabled
Tracing can be enabled by pressing T in the Editor or setting `EnableTrace` to `True` in the Vehicle Settings
Args:
color_rgba (list): desired RGBA values from 0.0 to 1.0
thickness (float, optional): Thickness of the line
vehicle_name (string, optional): Name of the vehicle to set Trace line values for
"""
self.client.call('simSetTraceLine', color_rgba, thickness, vehicle_name)
def simGetObjectPose(self, object_name):
"""
Args:
object_name (str): Object to get the Pose of
Returns:
Pose:
"""
pose = self.client.call('simGetObjectPose', object_name)
return Pose.from_msgpack(pose)
def simSetObjectPose(self, object_name, pose, teleport = True):
"""
Set the pose of the object(actor) in the environment
The specified actor must have Mobility set to movable, otherwise there will be undefined behaviour.
See https://www.unrealengine.com/en-US/blog/moving-physical-objects for details on how to set Mobility and the effect of Teleport parameter
Args:
object_name (str): Name of the object(actor) to move
pose (Pose): Desired Pose of the object
teleport (bool, optional): Whether to move the object immediately without affecting their velocity
Returns:
bool: If the move was successful
"""
return self.client.call('simSetObjectPose', object_name, pose, teleport)
def simGetObjectScale(self, object_name):
"""
Gets scale of an object in the world
Args:
object_name (str): Object to get the scale of
Returns:
airsim.Vector3r: Scale
"""
scale = self.client.call('simGetObjectScale', object_name)
return Vector3r.from_msgpack(scale)
def simSetObjectScale(self, object_name, scale_vector):
"""
Sets scale of an object in the world
Args:
object_name (str): Object to set the scale of
scale_vector (airsim.Vector3r): Desired scale of object
Returns:
bool: True if scale change was successful
"""
return self.client.call('simSetObjectScale', object_name, scale_vector)
def simListSceneObjects(self, name_regex = '.*'):
"""
Lists the objects present in the environment
Default behaviour is to list all objects, regex can be used to return smaller list of matching objects or actors
Args:
name_regex (str, optional): String to match actor names against, e.g. "Cylinder.*"
Returns:
list[str]: List containing all the names
"""
return self.client.call('simListSceneObjects', name_regex)
def simSpawnObject(self, object_name, asset_name, pose, scale, physics_enabled=False):
"""Spawned selected object in the world
Args:
object_name (str): Desired name of new object
asset_name (str): Name of asset(mesh) in the project database
pose (airsim.Pose): Desired pose of object
scale (airsim.Vector3r): Desired scale of object
Returns:
str: Name of spawned object, in case it had to be modified
"""
return self.client.call('simSpawnObject', object_name, asset_name, pose, scale, physics_enabled)
def simDestroyObject(self, object_name):
"""Removes selected object from the world
Args:
object_name (str): Name of object to be removed
Returns:
bool: True if object is queued up for removal
"""
return self.client.call('simDestroyObject', object_name)
def simSetSegmentationObjectID(self, mesh_name, object_id, is_name_regex = False):
"""
Set segmentation ID for specific objects
See https://microsoft.github.io/AirSim/image_apis/#segmentation for details
Args:
mesh_name (str): Name of the mesh to set the ID of (supports regex)
object_id (int): Object ID to be set, range 0-255
RBG values for IDs can be seen at https://microsoft.github.io/AirSim/seg_rgbs.txt
is_name_regex (bool, optional): Whether the mesh name is a regex
Returns:
bool: If the mesh was found
"""
return self.client.call('simSetSegmentationObjectID', mesh_name, object_id, is_name_regex)
def simGetSegmentationObjectID(self, mesh_name):
"""
Returns Object ID for the given mesh name
Mapping of Object IDs to RGB values can be seen at https://microsoft.github.io/AirSim/seg_rgbs.txt
Args:
mesh_name (str): Name of the mesh to get the ID of
"""
return self.client.call('simGetSegmentationObjectID', mesh_name)
def simPrintLogMessage(self, message, message_param = "", severity = 0):
"""
Prints the specified message in the simulator's window.
If message_param is supplied, then it's printed next to the message and in that case if this API is called with same message value
but different message_param again then previous line is overwritten with new line (instead of API creating new line on display).
For example, `simPrintLogMessage("Iteration: ", to_string(i))` keeps updating same line on display when API is called with different values of i.
The valid values of severity parameter is 0 to 3 inclusive that corresponds to different colors.
Args:
message (str): Message to be printed
message_param (str, optional): Parameter to be printed next to the message
severity (int, optional): Range 0-3, inclusive, corresponding to the severity of the message
"""
self.client.call('simPrintLogMessage', message, message_param, severity)
def simGetCameraInfo(self, camera_name, vehicle_name = ''):
"""
Get details about the camera
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
vehicle_name (str, optional): Vehicle which the camera is associated with
Returns:
CameraInfo:
"""
# TODO: below str() conversion is only needed for legacy reason and should be removed in future
return CameraInfo.from_msgpack(self.client.call('simGetCameraInfo', str(camera_name), vehicle_name))
def simGetDistortionParams(self, camera_name, vehicle_name = ''):
"""
Get camera distortion parameters
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
vehicle_name (str, optional): Vehicle which the camera is associated with
Returns:
List (float): List of distortion parameter values corresponding to K1, K2, K3, P1, P2 respectively.
"""
return self.client.call('simGetDistortionParams', str(camera_name), vehicle_name)
def simSetDistortionParams(self, camera_name, distortion_params, vehicle_name = ''):
"""
Set camera distortion parameters
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
distortion_params (dict): Dictionary of distortion param names and corresponding values
{"K1": 0.0, "K2": 0.0, "K3": 0.0, "P1": 0.0, "P2": 0.0}
vehicle_name (str, optional): Vehicle which the camera is associated with
"""
for param_name, value in distortion_params.items():
self.client.call('simSetDistortionParam', str(camera_name), param_name, value, vehicle_name)
def simSetDistortionParam(self, camera_name, param_name, value, vehicle_name = ''):
"""
Set single camera distortion parameter
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
param_name (str): Name of distortion parameter
value (float): Value of distortion parameter
vehicle_name (str, optional): Vehicle which the camera is associated with
"""
self.client.call('simSetDistortionParam', str(camera_name), param_name, value, vehicle_name)
def simSetCameraPose(self, camera_name, pose, vehicle_name = ''):
"""
- Control the pose of a selected camera
Args:
camera_name (str): Name of the camera to be controlled
pose (Pose): Pose representing the desired position and orientation of the camera
vehicle_name (str, optional): Name of vehicle which the camera corresponds to
"""
# TODO: below str() conversion is only needed for legacy reason and should be removed in future
self.client.call('simSetCameraPose', str(camera_name), pose, vehicle_name)
def simSetCameraOrientation(self, camera_name, orientation, vehicle_name = ''):
"""
.. note::
This API has been upgraded to `simSetCameraPose`
- Control the Orientation of a selected camera
Args:
camera_name (str): Name of the camera to be controlled
orientation (Quaternionr): Quaternion representing the desired orientation of the camera
vehicle_name (str, optional): Name of vehicle which the camera corresponds to
"""
logging.warning("`simSetCameraOrientation` API has been upgraded to `simSetCameraPose`. Please update your code.")
pose = Pose(orientation_val=orientation)
self.simSetCameraPose(camera_name, pose, vehicle_name)
def simSetCameraFov(self, camera_name, fov_degrees, vehicle_name = ''):
"""
- Control the field of view of a selected camera
Args:
camera_name (str): Name of the camera to be controlled
fov_degrees (float): Value of field of view in degrees
vehicle_name (str, optional): Name of vehicle which the camera corresponds to
"""
# TODO: below str() conversion is only needed for legacy reason and should be removed in future
self.client.call('simSetCameraFov', str(camera_name), fov_degrees, vehicle_name)
def simGetGroundTruthKinematics(self, vehicle_name = ''):
"""
Get Ground truth kinematics of the vehicle
Args:
vehicle_name (str, optional): Name of the vehicle
Returns:
KinematicsState: Ground truth of the vehicle
"""
kinematics_state = self.client.call('simGetGroundTruthKinematics', vehicle_name)
return KinematicsState.from_msgpack(kinematics_state)
simGetGroundTruthKinematics.__annotations__ = {'return': KinematicsState}
def simGetGroundTruthEnvironment(self, vehicle_name = ''):
"""
Get ground truth environment state
Args:
vehicle_name (str, optional): Name of the vehicle
Returns:
EnvironmentState: Ground truth environment state
"""
env_state = self.client.call('simGetGroundTruthEnvironment', vehicle_name)
return EnvironmentState.from_msgpack(env_state)
simGetGroundTruthEnvironment.__annotations__ = {'return': EnvironmentState}
# sensor APIs
def getImuData(self, imu_name = '', vehicle_name = ''):
"""
Args:
imu_name (str, optional): Name of IMU to get data from, specified in settings.json
vehicle_name (str, optional): Name of vehicle to which the sensor corresponds to
Returns:
ImuData:
"""
return ImuData.from_msgpack(self.client.call('getImuData', imu_name, vehicle_name))
def getBarometerData(self, barometer_name = '', vehicle_name = | |
<filename>tests/readers/opendss/Capacitors/test_capacitor_connectivity.py
# -*- coding: utf-8 -*-
"""
test_capacitor_connectivity.py
----------------------------------
Tests for parsing all the attributes of Capacitors when reading from OpenDSS to Ditto
"""
import os
import math
import pytest
import numpy as np
from ditto.store import Store
from ditto.readers.opendss.read import Reader
from ditto.default_values.default_values_json import Default_Values
current_directory = os.path.realpath(os.path.dirname(__file__))
def test_capacitor_connectivity():
m = Store()
r = Reader(
master_file=os.path.join(current_directory, "test_capacitor_connectivity.dss")
)
r.parse(m)
m.set_names()
# Reading OpenDSS default values
d_v = Default_Values(
os.path.join(
current_directory,
"../../../../ditto/default_values/opendss_default_values.json",
)
)
parsed_values = d_v.parse()
# Capacitor Cap1 should be a three phase capacitor (3 PhaseCapacitor objects) connected to bus1
assert len(m["cap1"].phase_capacitors) == 3 # Cap1 is a three phase capacitor
assert sum(
[phase_capacitor.var for phase_capacitor in m["cap1"].phase_capacitors]
) == pytest.approx(600 * 10 ** 3, 0.0001)
assert m["cap1"].name == "cap1"
assert m["cap1"].nominal_voltage == float(4.16) * 10 ** 3
assert m["cap1"].connection_type == parsed_values["Capacitor"]["connection_type"]
assert m["cap1"].delay is None
assert m["cap1"].mode is None
assert m["cap1"].low is None
assert m["cap1"].high is None
assert m["cap1"].resistance == 0.0
assert m["cap1"].reactance == 0.0
assert m["cap1"].susceptance is None
assert m["cap1"].conductance is None
assert m["cap1"].pt_ratio is None
assert m["cap1"].ct_ratio is None
assert m["cap1"].pt_phase is None
assert m["cap1"].connecting_element == "bus1"
assert m["cap1"].measuring_element is None
assert m["cap1"].feeder_name == "sourcebus_src"
assert set([pc.phase for pc in m["cap1"].phase_capacitors]) == set(["A", "B", "C"])
# Capacitor Cap2 should be a one phase capacitor (1 PhaseCapacitor object) connected to bus2 on phase C
assert len(m["cap2"].phase_capacitors) == 1 # Cap2 is a one phase capacitor
assert m["cap2"].phase_capacitors[0].var == 100 * 10 ** 3
assert m["cap2"].name == "cap2"
assert m["cap2"].nominal_voltage == float(2.4) * 10 ** 3
assert m["cap2"].connection_type == parsed_values["Capacitor"]["connection_type"]
assert m["cap2"].delay is None
assert m["cap2"].mode is None
assert m["cap2"].low is None
assert m["cap2"].high is None
assert m["cap2"].resistance == 0.0
assert m["cap2"].reactance == 0.0
assert m["cap2"].susceptance is None
assert m["cap2"].conductance is None
assert m["cap2"].pt_ratio is None
assert m["cap2"].ct_ratio is None
assert m["cap2"].pt_phase is None
assert m["cap2"].connecting_element == "bus2"
assert m["cap2"].measuring_element is None
assert m["cap2"].feeder_name == "sourcebus_src"
assert m["cap2"].phase_capacitors[0].phase == "C"
# Capacitor Cap3 should be a one phase capacitor (1 PhaseCapacitor object) connected to bus3 on phase A
assert len(m["cap3"].phase_capacitors) == 1 # Cap3 is a one phase capacitor
assert m["cap3"].phase_capacitors[0].var == 200.37 * 10 ** 3
assert m["cap3"].name == "cap3"
assert m["cap3"].nominal_voltage == float(2.4) * 10 ** 3
assert m["cap3"].connection_type == parsed_values["Capacitor"]["connection_type"]
assert m["cap3"].delay is None
assert m["cap3"].mode is None
assert m["cap3"].low is None
assert m["cap3"].high is None
assert m["cap3"].resistance == 0.0
assert m["cap3"].reactance == 0.0
assert m["cap3"].susceptance is None
assert m["cap3"].conductance is None
assert m["cap3"].pt_ratio is None
assert m["cap3"].ct_ratio is None
assert m["cap3"].pt_phase is None
assert m["cap3"].connecting_element == "bus3"
assert m["cap3"].measuring_element is None
assert m["cap3"].feeder_name == "sourcebus_src"
assert m["cap3"].phase_capacitors[0].phase == "A"
# Capacitor Cap4 should be a two phase capacitor (2 PhaseCapacitor objects) connected to bus4 on phase A and C
assert len(m["cap4"].phase_capacitors) == 2 # Cap4 is a two phase capacitor
assert sum(
[phase_capacitor.var for phase_capacitor in m["cap4"].phase_capacitors]
) == pytest.approx(400 * 10 ** 3, 0.0001)
assert m["cap4"].name == "cap4"
assert m["cap4"].nominal_voltage == float(2.4) * 10 ** 3
assert m["cap4"].connection_type == parsed_values["Capacitor"]["connection_type"]
assert m["cap4"].delay is None
assert m["cap4"].mode is None
assert m["cap4"].low is None
assert m["cap4"].high is None
assert m["cap4"].resistance == 0.0
assert m["cap4"].reactance == 0.0
assert m["cap4"].susceptance is None
assert m["cap4"].conductance is None
assert m["cap4"].pt_ratio is None
assert m["cap4"].ct_ratio is None
assert m["cap4"].pt_phase is None
assert m["cap4"].connecting_element == "bus4"
assert m["cap4"].measuring_element is None
assert m["cap4"].feeder_name == "sourcebus_src"
assert set([pc.phase for pc in m["cap4"].phase_capacitors]) == set(["A", "C"])
# Capacitors from epri_j1
assert len(m["b4909-1"].phase_capacitors) == 3 # b4909-1 is a three phase capacitor
assert sum(
[phase_capacitor.var for phase_capacitor in m["b4909-1"].phase_capacitors]
) == pytest.approx(900 * 10 ** 3, 0.0001)
assert m["b4909-1"].name == "b4909-1"
assert m["b4909-1"].nominal_voltage == float(12.47) * 10 ** 3
assert m["b4909-1"].connection_type == parsed_values["Capacitor"]["connection_type"]
assert m["b4909-1"].delay == 30
assert m["b4909-1"].mode == "voltage"
assert m["b4909-1"].low == parsed_values["Capacitor"]["low"]
assert m["b4909-1"].high == parsed_values["Capacitor"]["high"]
assert m["b4909-1"].resistance == 0.0
assert m["b4909-1"].reactance == 0.0
assert m["b4909-1"].susceptance is None
assert m["b4909-1"].conductance is None
assert m["b4909-1"].pt_ratio == parsed_values["Capacitor"]["pt_ratio"]
assert m["b4909-1"].ct_ratio == parsed_values["Capacitor"]["ct_ratio"]
assert m["b4909-1"].pt_phase == "B"
assert m["b4909-1"].connecting_element == "b4909"
assert m["b4909-1"].measuring_element == "Line.OH_B4904"
assert m["b4909-1"].feeder_name == "sourcebus_src"
assert set([pc.phase for pc in m["b4909-1"].phase_capacitors]) == set(
["A", "B", "C"]
)
assert len(m["b4909-2"].phase_capacitors) == 3 # b4909-2 is a three phase capacitor
assert sum(
[phase_capacitor.var for phase_capacitor in m["b4909-2"].phase_capacitors]
) == pytest.approx(900 * 10 ** 3, 0.0001)
assert m["b4909-2"].name == "b4909-2"
assert m["b4909-2"].nominal_voltage == float(12.47) * 10 ** 3
assert m["b4909-2"].connection_type == parsed_values["Capacitor"]["connection_type"]
assert m["b4909-2"].delay == 30
assert m["b4909-2"].mode == "voltage"
assert m["b4909-2"].low == 120.5
assert m["b4909-2"].high == 125
assert m["b4909-2"].resistance == 0.0
assert m["b4909-2"].reactance == 0.0
assert m["b4909-2"].susceptance is None
assert m["b4909-2"].conductance is None
assert m["b4909-2"].pt_ratio == parsed_values["Capacitor"]["pt_ratio"]
assert m["b4909-2"].ct_ratio == parsed_values["Capacitor"]["ct_ratio"]
assert m["b4909-2"].pt_phase == "B"
assert m["b4909-2"].connecting_element == "b4909"
assert m["b4909-2"].measuring_element == "Line.OH_B4904"
assert m["b4909-2"].feeder_name == "sourcebus_src"
assert set([pc.phase for pc in m["b4909-2"].phase_capacitors]) == set(
["A", "B", "C"]
)
# oh_b4904
assert len(m["oh_b4904"].wires) == 3
# Phases of the different wires
assert set([w.phase for w in m["oh_b4904"].wires]) == set(["A", "B", "C"])
assert m["oh_b4904"].name == "oh_b4904"
assert (
m["oh_b4904"].nameclass == "OH-3X_477AAC_4/0AAACN"
) # Linecode is OH-3X_477AAC_4/0AAACN
assert m["oh_b4904"].line_type == None
assert m["oh_b4904"].from_element == "b4909"
assert m["oh_b4904"].to_element == "b4904"
assert m["oh_b4904"].length == pytest.approx(161.84879)
assert m["oh_b4904"].nominal_voltage == float(4.16) * 10 ** 3
assert m["oh_b4904"].is_fuse is None
assert m["oh_b4904"].is_switch is None
assert m["oh_b4904"].faultrate == parsed_values["Line"]["faultrate"]
z1 = complex(0.12241009, 0.39494091) # Specified in the dss input
z0 = complex(0.33466485, 1.2742766) # Specified in the dss input
diag = ((2 * z1 + z0) / 3) * 0.001 # Units = km
diag = round(diag.real, 10) + round(diag.imag, 10) * 1j
rem = ((z0 - z1) / 3) * 0.001 # Units =km
rem = round(rem.real, 11) + round(rem.imag, 10) * 1j
imp_matrix = np.zeros((3, 3), dtype=np.complex_)
imp_matrix.fill(rem)
np.fill_diagonal(imp_matrix, diag)
imp_matrix = imp_matrix.tolist()
assert m["oh_b4904"].impedance_matrix == imp_matrix
c1 = complex(11.1973, 0) # Specified in the dss input
c0 = complex(4.8089, 0) # Specified in the dss input
c_diag = ((2 * c1 + c0) / 3) * 0.001 # Units = km
c_diag = round(c_diag.real, 9) + c_diag.imag * 1j
c_rem = ((c0 - c1) / 3) * 0.001 # Units = km
c_rem = round(c_rem.real, 9) + c_rem.imag * 1j
cap_matrix = np.zeros((3, 3), dtype=np.complex_)
cap_matrix.fill(c_rem)
np.fill_diagonal(cap_matrix, c_diag)
cap_matrix = cap_matrix.tolist()
assert m["oh_b4904"].capacitance_matrix == cap_matrix
assert m["oh_b4904"].feeder_name == "sourcebus_src"
assert m["oh_b4904"].is_recloser is None
assert m["oh_b4904"].is_breaker is None
for w in m["oh_b4904"].wires:
assert w.nameclass == ""
assert w.X is None
assert w.Y is None
assert w.diameter is None
assert w.gmr is None
assert w.ampacity == float(732)
assert w.emergency_ampacity == float(871)
assert w.resistance is None
assert w.insulation_thickness == 0.0
assert w.is_open is None
assert w.concentric_neutral_gmr is None
assert w.concentric_neutral_resistance is None
assert w.concentric_neutral_diameter is None
assert w.concentric_neutral_outside_diameter is None
assert w.concentric_neutral_nstrand is None
assert (
len(m["b18944-1"].phase_capacitors) == 3
) # b18944-1 is a three phase capacitor
assert sum(
[phase_capacitor.var for phase_capacitor in m["b18944-1"].phase_capacitors]
) == pytest.approx(1200 * 10 ** 3, 0.0001)
assert m["b18944-1"].name == "b18944-1"
assert m["b18944-1"].nominal_voltage == float(12.47) * 10 ** 3
assert (
m["b18944-1"].connection_type == parsed_values["Capacitor"]["connection_type"]
)
assert m["b18944-1"].delay == 31
assert m["b18944-1"].mode == "voltage"
assert m["b18944-1"].low == parsed_values["Capacitor"]["low"]
assert m["b18944-1"].high == parsed_values["Capacitor"]["high"]
assert m["b18944-1"].resistance == 0.0
assert m["b18944-1"].reactance == 0.0
assert m["b18944-1"].susceptance is None
assert m["b18944-1"].conductance is None
assert m["b18944-1"].pt_ratio == parsed_values["Capacitor"]["pt_ratio"]
assert m["b18944-1"].ct_ratio == parsed_values["Capacitor"]["ct_ratio"]
assert m["b18944-1"].pt_phase == "A"
assert m["b18944-1"].connecting_element == "b18941"
assert m["b18944-1"].measuring_element == "Line.OH_B18944"
assert m["b18944-1"].feeder_name == "sourcebus_src"
assert set([pc.phase for pc in m["b18944-1"].phase_capacitors]) == set(
["A", "B", "C"]
)
assert (
len(m["b18944-2"].phase_capacitors) == 3
) # b18944-2 is a three phase capacitor
assert sum(
[phase_capacitor.var for phase_capacitor in m["b18944-2"].phase_capacitors]
) == pytest.approx(1200 * 10 ** 3, 0.0001)
assert m["b18944-2"].name == "b18944-2"
assert m["b18944-2"].nominal_voltage == float(12.47) * 10 ** 3
assert (
m["b18944-2"].connection_type == parsed_values["Capacitor"]["connection_type"]
)
assert m["b18944-2"].delay == 31
assert m["b18944-2"].mode == "voltage"
assert m["b18944-2"].low == 118
assert m["b18944-2"].high == 124
assert m["b18944-2"].resistance == 0.0
assert m["b18944-2"].reactance == 0.0
assert m["b18944-2"].susceptance is None
assert m["b18944-2"].conductance is None
assert m["b18944-2"].pt_ratio == parsed_values["Capacitor"]["pt_ratio"]
assert m["b18944-2"].ct_ratio == parsed_values["Capacitor"]["ct_ratio"]
assert m["b18944-2"].pt_phase == "A"
assert m["b18944-2"].connecting_element == "b18941"
assert m["b18944-2"].measuring_element | |
'Flags', 'model_type': 'Type', 'id': 'ID', 'name': 'Name', 'picture': ''}
cols = ['picture', 'name', 'description', 'first_year', 'country']
lrange = dict(entry=[x for x in entry if x], styles=dict(zip(cols, cols)))
lsection = dict(columns=cols, headers=hdrs, range=[lrange], note='', name=sec.name)
llistix = dict(section=[lsection])
return pif.render.format_template('simplelistix.html', llineup=llistix)
cols = 4
def pub_text_link(pub):
pic = pif.render.fmt_img(pub['id'], prefix='s')
name = pic + '<br>' + pub['name'] if pic else pub['name']
return {'text': pif.render.format_link("makes.cgi?make=" + pub['id'], name)}
ents = [pub_text_link(pub_ent(x)) for x in pubs]
llineup = {'id': '', 'name': '', 'columns': cols, 'header': '', 'footer': '',
'section': [{'columns': cols, 'range': [{'entry': ents, 'id': 'makelist'}]}]}
pif.render.format_matrix_for_template(llineup)
return pif.render.format_template('simplematrix.html', llineup=llineup)
def make_relateds(pif, ref_id, pub_id, imgs):
pic = imgs[0] if imgs else ''
relateds = pif.dbh.fetch_casting_relateds(pub_id, section_id='pub')
vs = pif.dbh.fetch_variation_selects_by_ref(ref_id, pub_id)
retval = []
for related in relateds:
related['id'] = related['casting_related.related_id']
vars = [x for x in vs if x['variation_select.mod_id'] == related['id']]
descs = [x.get('variation.text_description', '') for x in vars] + related.get(
'casting_related.description', '').split(';')
related = pif.dbh.modify_man_item(related)
related['descs'] = [x for x in descs if x]
related['imgid'] = [related['id']]
for s in related['descs']:
if s.startswith('same as '):
related['imgid'].append(s[8:])
related['img'] = pif.render.format_image_required(
related['imgid'], made=related['made'], pdir=config.IMG_DIR_MAN, vars=[
x['variation_select.var_id'] for x in vars], largest=mbdata.IMG_SIZ_SMALL)
if related['link']:
related['link'] = '%s=%s&dir=%s&pic=%s&ref=%s&sec=%s' % (
related['link'], related['linkid'], pif.render.pic_dir, pic, ref_id, pub_id)
related['img'] = '<a href="%(link)s">%(img)s</a>' % related
related['descs'] = '<br>'.join(['<div class="varentry">%s</div>' % x for x in related['descs']])
retval.append({
'text': '<span class="modelnumber">%(id)s</span><br>\n%(img)s<br>\n<b>%(name)s</b>\n<br>%(descs)s\n' %
related})
return retval
def single_publication(pif, pub_id):
man = pif.dbh.fetch_publication(pub_id).first
if not man:
raise useful.SimpleError("That publication was not found.")
# should just use man.section_id
sec = get_section_by_model_type(pif, man.base_id.model_type)
pif.set_page_info(sec.page_info.id)
man['casting_type'] = 'Publication'
man['name'] = man['base_id.rawname'].replace(';', ' ')
imgs = pub_images(pif, pub_id.lower())
relateds = make_relateds(pif, 'pub.' + mbdata.model_type_names[man['base_id.model_type']].lower(), pub_id, imgs)
left_bar_content = ''
if pif.is_allowed('a'): # pragma: no cover
left_bar_content += '<p><b><a href="%s">Base ID</a><br>\n' % pif.dbh.get_editor_link('base_id', {'id': pub_id})
left_bar_content += '<a href="%s">Publication</a><br>\n' % pif.dbh.get_editor_link(
'publication', {'id': pub_id})
left_bar_content += '<a href="traverse.cgi?d=%s">Library</a><br>\n' % pif.render.pic_dir.replace('pic', 'lib')
left_bar_content += '<a href="upload.cgi?d=%s&n=%s&c=%s">Product Upload</a><br>\n' % (
pif.render.pic_dir.replace('pic', 'lib'), pub_id, pub_id)
upper_box = ''
if imgs:
upper_box += pif.render.format_image_link_image(imgs[0], link_largest=mbdata.IMG_SIZ_LARGE)
# else:
# upper_box += pif.render.format_image_link_image(img, link_largest=mbdata.IMG_SIZ_LARGE)
if man['base_id.description']:
upper_box += '<br>' if upper_box else ''
upper_box += useful.printablize(man['base_id.description'])
lran = [{
'id': 'ran',
'entry': [{'text': pif.render.format_image_link_image(img[img.rfind('/') + 1:])}
for img in sorted(imgs)] if imgs else [{'text': pif.render.format_image_link_image(pub_id)}]
} if len(imgs) > 1 else {}]
if relateds:
lran.append({'id': 'related', 'entry': relateds, 'name': 'Related Models'})
llineup = {'id': pub_id, 'name': '', 'section': [{'id': 'sec', 'range': lran, 'columns': 4}], 'columns': 4}
pif.render.set_button_comment(pif, 'id=%s' % pub_id)
pif.render.format_matrix_for_template(llineup)
context = {
'title': man.get('name', ''),
'note': '',
'type_id': 'p_' + sec.id,
# 'icon_id': pub_id,
'vehicle_type': '',
'rowspan': 5 if upper_box else 4,
'left_bar_content': left_bar_content,
'upper_box': upper_box,
'llineup': llineup,
}
return pif.render.format_template('pub.html', **context)
def pub_images(pif, pub_id):
imgs = glob.glob(os.path.join(pif.render.pic_dir, '?_' + pub_id + '_*.jpg'))
imgs = list(set([os.path.split(fn)[1][2:-4] for fn in imgs]))
if (os.path.exists(os.path.join(pif.render.pic_dir, pub_id + '.jpg')) or
glob.glob(os.path.join(pif.render.pic_dir, '?_' + pub_id + '.jpg'))):
imgs.insert(0, pub_id)
imgs.sort()
return imgs
# ----- advertising ---- the special snowflake -------------------------
@basics.web_page
def ads_main(pif):
pif.render.print_html()
pif.render.hierarchy_append('/', 'Home')
pif.render.hierarchy_append('/database.php', 'Database')
pif.render.hierarchy_append('/cgi-bin/ads.cgi', 'Advertisements')
pif.render.set_button_comment(pif)
pic_dir = pif.render.pic_dir
lib_dir = pic_dir.replace('pic', 'lib')
ranges = []
sobj = pif.form.search('title')
def fmt_cy(ent):
cy = ent.get('country', '')
cyflag = pif.render.show_flag(cy) if (cy and cy != 'US') else ''
cyflag = (' <img src="' + cyflag[1] + '">') if cyflag else ''
return cy, cyflag
def fmt_vid(ent):
# sep = pif.render.format_image_art('wheel.gif', also={'class': 'dlm'})
# add country
cy, cyflag = fmt_cy(ent)
cmt = ent['description']
ostr = pif.render.format_link(ent['url'], ent['name'])
if cmt:
ostr += ' ' + cmt
ostr += cyflag
ostr += (' ' + pif.render.format_link('edlinks.cgi?id=%s' % ent['id'],
'<i class="fas fa-edit"></i>')) if pif.is_allowed('ma') else ''
return ostr
# id page_id section_id display_order flags associated_link last_status link_type country url name description note
vlinks = [fmt_vid(x) for x in pif.dbh.depref(
'link_line', pif.dbh.fetch_link_lines(page_id='links.others', section='Lvideoads', order='name'))
if useful.search_match(sobj, x['name'])]
def fmt_pub(ent, pdir=None):
pdir = pdir if pdir else pic_dir
ldir = pdir.replace('pic', 'lib')
# ent: id, description, country, first_year, model_type
cy, post = fmt_cy(ent)
_, floc = pif.render.find_image_file(ent['id'], largest='e', pdir=pdir)
_, lloc = pif.render.find_image_file(ent['id'], largest='e', pdir=ldir)
# floc = pdir + '/' + ent['id'] + '.jpg'
# lloc = floc.replace('/pic/', '/lib/')
if floc:
if ent['model_type']:
url = 'pub.cgi?id=' + ent['id']
else:
url = '/' + pdir + '/' + floc
else:
url = '/' + ldir + '/' + lloc
if not useful.search_match(sobj, ent['description']):
return ''
name = useful.printablize(ent['description'])
if ent['first_year']:
name += ' (' + ent['first_year'] + ')'
if pif.is_allowed('ma'):
if ent['model_type']:
post += ' ' + pif.render.format_link(
pif.dbh.get_editor_link('publication', {'id': ent['id']}), '<i class="fas fa-edit"></i>')
else:
post += ' ' + pif.render.format_link(
'/cgi-bin/mass.cgi?type=ads&id=%s&description=%s&year=%s&country=%s' % (
ent['id'], useful.url_quote(ent['description'], plus=True), ent['first_year'], cy),
'<i class="far fa-plus-square"></i>')
if floc:
post += ' ' + pif.render.format_link(
'/cgi-bin/imawidget.cgi?d=%s&f=%s' % (pdir, floc), '<i class="fas fa-paint-brush"></i>')
elif lloc:
post += ' ' + pif.render.format_link(
'/cgi-bin/imawidget.cgi?d=%s&f=%s' % (ldir, lloc), '<i class="fas fa-paint-brush"></i>')
post += ' ' + pif.render.format_link(
'/cgi-bin/upload.cgi?d=%s&n=%s' % (ldir, ent['id']), '<i class="fas fa-upload"></i>')
name = ent['id'] + ' - ' + name
if floc:
return pif.render.format_link(url, name) + post
return name + post
fields = {
'id': 'id',
'description': 'base_id.description',
'first_year': 'base_id.first_year',
'country': 'country',
'model_type': 'base_id.model_type',
'rawname': 'base_id.rawname',
}
def mangle_object(x):
return {y: x[fields[y]] for y in fields}
links = {x.id: mangle_object(x)
for x in pif.dbh.fetch_publications(model_type='AD', order='base_id.first_year,base_id.id')}
pic_ims = ad_images(pic_dir)
missing_pics = sorted(set(links.keys()) - set(pic_ims))
lib_ims = sorted(set(ad_images(lib_dir)) - set(links.keys()))
pic_ims = sorted(set(pic_ims) - set(links.keys()))
list_ents = {ent[0]: dict(itertools.zip_longest(['id', 'description', 'first_year', 'country', 'model_type'], ent))
for ent in [x.strip().split('|') for x in open(pic_dir + '/list.dat').readlines()]}
list_ids = sorted(set(list_ents.keys()) - set(links.keys()))
link_ids = sorted(set(links.keys()) - set(missing_pics), key=lambda x: (links[x]['first_year'], links[x]['id']))
plinks = list()
for pic_id in link_ids:
plinks.append(fmt_pub(links[pic_id]))
ranges.append({'entry': plinks})
plinks = [fmt_pub(list_ents[lid]) for lid in list_ids]
if plinks:
ranges.append({'name': 'More information is needed on these (year, location).', 'entry': plinks})
if pif.is_allowed('ma'):
plinks = [fmt_pub({'id': ent, 'description': ent, 'first_year': '', 'model_type': ''}, lib_dir)
for ent in lib_ims]
if plinks:
ranges.append({'name': '<i>Nonpublished ads</i>', 'entry': plinks})
missing = [fmt_pub(links[pic_id]) for pic_id in missing_pics]
if missing:
ranges.append({'name': '<i>Database entries missing pictures</i>', 'entry': missing})
lsecs = [
{'id': 'print', 'name': 'Print Advertising', 'range': ranges},
{'id': 'video', 'name': 'Video Advertising', 'range': [{'entry': vlinks}]},
]
pif.render.set_footer(pif.render.format_button('back', '/') + ' to the index.')
if pif.is_allowed('ma'):
pif.render.set_footer(
pif.render.format_link('/cgi-bin/upload.cgi?d=%s' % lib_dir, 'Upload new ad') + ' - ' +
pif.render.format_link('/cgi-bin/edlinks.cgi?page_id=links.others&sec=Lvideoads&add=1', 'Add new video'))
llineup = {'section': lsecs}
return pif.render.format_template('simpleulist.html', llineup=llineup)
def ad_images(pdir):
def mangle_name(x):
x = x[x.rfind('/') + 1:-4]
return x[2:] if x[1] == '_' else x
return [mangle_name(x) for x in glob.glob(pdir + '/*.jpg')]
# ----- command line ---------------------------------------------------
def check_boxes(pif):
boxes = find_boxes(pif)
for key in sorted(boxes.keys()):
for picroot in get_pic_roots(boxes[key]['id'], boxes[key]['box_type.box_type'][0]):
print('%-9s' % picroot,)
for picsize in 'mcs':
img = pif.render.find_image_path(picroot, prefix=picsize + '_', pdir=config.IMG_DIR_BOX)
if not img:
print('.',)
else:
imginf = imglib.img_info(img)
if imginf[1] < mbdata.imagesizes[picsize][0]:
print(picsize,)
else:
print(picsize.upper(),)
print()
check_database(pif)
def check_database(pif):
count = 0
fields = {}
d = pif.dbh.fetch('box_type')
for e in d:
x = ('.' + config.IMG_DIR_BOX + '/x_' + e['box_type.mod_id'] + '-' +
e['box_type.box_type'][0] + e['box_type.pic_id'] + '.jpg')
count += int(os.path.exists(x.lower()))
for f in e:
if e[f] and f[9:] not in ('notes', 'year', 'id', 'pic_id', 'mod_id', 'model_name'):
fields.setdefault(f[9:], set())
fields[f[9:]].update(e[f].split('/'))
for h in e[f].split('/'):
if h not in box_lookups[f[9:]]:
print(h, e[f], f, e['box_type.id'])
for f in fields:
s1 = fields[f] - set(box_lookups[f].keys())
s2 = set(box_lookups[f].keys()) - fields[f] - {'_title'}
if s1 or s2:
print(f, s1, s2)
print('x-pics', count, 'of', len(d))
def dump_database(pif):
cols = ['id', 'pic', 'box_size', 'year', 'additional_text', 'bottom', 'sides', 'end_flap', 'model_name', 'notes']
titles = {
'id': 'id',
'mod_id': 'mod_id',
'box_type': 'typ',
'pic_id': 'p',
'pic': 'pic',
'box_size': 'z',
'year': 'year',
'additional_text': 'addl_text',
'bottom': 'bottom',
'sides': 'sides',
'end_flap': 'end_flap',
'model_name': 'model_name',
'notes': 'notes',
}
db = pif.dbh.depref('box_type', pif.dbh.fetch('box_type'))
lens = {col: 0 for col in cols}
for row in db:
row['pic'] = '%s-%s%s' % (row['mod_id'], row['box_type'][0], row['pic_id'])
for col in cols[1:]:
lens[col] = max(lens[col], len(row[col]))
lens['id'] = 4
# id | mod_id | typ | p | z | year | addl_text | bottom | sides | end_flap | model_name | notes
print(' | '.join([('%%-%ds' % lens[col]) % titles[col] for col | |
= doKeywordArgs(keys,d)
newline = d.get('newline',None)
align = d.get('align',0)
# if not align: align = 0
# Compute the caller name.
try: # get the function name from the call stack.
f1 = sys._getframe(1) # The stack frame, one level up.
code1 = f1.f_code # The code object
name = code1.co_name # The code name
except Exception:
name = '?'
if name == "?":
name = "<unknown>"
# Pad the caller name.
if align != 0 and len(name) < abs(align):
pad = ' ' * (abs(align) - len(name))
if align > 0: name = name + pad
else: name = pad + name
# Munge *args into s.
# print ('trace:args...')
# for z in args: print (isString(z),repr(z))
result = [name]
for arg in args:
if isString(arg):
pass
elif isBytes(arg):
arg = toUnicode(arg)
else:
arg = repr(arg)
if result:
result.append(" " + arg)
else:
result.append(arg)
s = ''.join(result)
# 'print s,' is not valid syntax in Python 3.x.
pr(s,newline=newline)
#@+node:ekr.20120114064730.10485: *3* translateArgs
def translateArgs(args,d):
'''Return the concatenation of s and all args,
with odd args translated.'''
if True: ### not hasattr(g,'consoleEncoding'):
e = sys.getdefaultencoding()
consoleEncoding = isValidEncoding(e) and e or 'utf-8'
result = [] ; n = 0 ; spaces = d.get('spaces')
for arg in args:
n += 1
# print('translateArgs: arg',arg,type(arg),isString(arg),'will trans',(n%2)==1)
# First, convert to unicode.
if type(arg) == type('a'):
arg = toUnicode(arg,consoleEncoding)
# Now translate.
if not isString(arg):
arg = repr(arg)
elif (n % 2) == 1:
arg = translateString(arg)
else:
pass # The arg is an untranslated string.
if arg:
if result and spaces: result.append(' ')
result.append(arg)
return ''.join(result)
#@+node:ekr.20120114064730.10486: *3* translateString & tr
def translateString (s):
'''Return the translated text of s.'''
if isPython3:
if not isString(s):
s = str(s,'utf-8')
if False: ### g.app.translateToUpperCase:
s = s.upper()
else:
s = gettext.gettext(s)
return s
else:
if False: ### g.app.translateToUpperCase:
return s.upper()
else:
return gettext.gettext(s)
tr = translateString
#@+node:ekr.20120114064730.10488: ** SAG.os.path wrappers
#@+at Note: all these methods return Unicode strings. It is up to the user to
# convert to an encoded string as needed, say when opening a file.
#@+node:ekr.20120114064730.10489: *3* os_path_abspath
def os_path_abspath(path):
"""Convert a path to an absolute path."""
path = toUnicodeFileEncoding(path)
path = os.path.abspath(path)
path = toUnicodeFileEncoding(path)
return path
#@+node:ekr.20120114064730.10490: *3* os_path_basename
def os_path_basename(path):
"""Return the second half of the pair returned by split(path)."""
path = toUnicodeFileEncoding(path)
path = os.path.basename(path)
path = toUnicodeFileEncoding(path)
return path
#@+node:ekr.20120114064730.10491: *3* os_path_dirname
def os_path_dirname(path):
"""Return the first half of the pair returned by split(path)."""
path = toUnicodeFileEncoding(path)
path = os.path.dirname(path)
path = toUnicodeFileEncoding(path)
return path
#@+node:ekr.20120114064730.10492: *3* os_path_exists
def os_path_exists(path):
"""Return True if path exists."""
path = toUnicodeFileEncoding(path)
return os.path.exists(path)
#@+node:ekr.20120114064730.10493: *3* os_path_expandExpression
def os_path_expandExpression (s,**keys):
'''Expand {{anExpression}} in c's context.'''
trace = False
s1 = s
c = keys.get('c')
if not c:
trace('can not happen: no c',callers())
return s
if not s:
if trace: trace('no s')
return ''
i = s.find('{{')
j = s.find('}}')
if -1 < i < j:
exp = s[i+2:j].strip()
if exp:
try:
import os
import sys
p = c.p
d = {
### 'c':c,'g':g,'p':p,
'os':os,'sys':sys,}
val = eval(exp,d)
s = s[:i] + str(val) + s[j+2:]
if trace: trace(s1,s)
except Exception:
trace(callers())
es_exception(full=True, c=c, color='red')
return s
#@+node:ekr.20120114064730.10494: *3* os_path_expanduser
def os_path_expanduser(path):
"""wrap os.path.expanduser"""
path = toUnicodeFileEncoding(path)
result = os.path.normpath(os.path.expanduser(path))
return result
#@+node:ekr.20120114064730.10495: *3* os_path_finalize & os_path_finalize_join
def os_path_finalize (path,**keys):
'''
Expand '~', then return os.path.normpath, os.path.abspath of the path.
There is no corresponding os.path method'''
c = keys.get('c')
if c: path = os_path_expandExpression(path,**keys)
path = os_path_expanduser(path)
path = os.path.abspath(path)
path = os.path.normpath(path)
return path
def os_path_finalize_join (*args,**keys):
'''Do os.path.join(*args), then finalize the result.'''
c = keys.get('c')
if c:
args = [os_path_expandExpression(z,**keys)
for z in args if z]
return os.path.normpath(os.path.abspath(
os_path_join(*args,**keys))) # Handles expanduser
#@+node:ekr.20120114064730.10496: *3* os_path_getmtime
def os_path_getmtime(path):
"""Return the modification time of path."""
path = toUnicodeFileEncoding(path)
return os.path.getmtime(path)
#@+node:ekr.20120114064730.10497: *3* os_path_getsize
def os_path_getsize (path):
'''Return the size of path.'''
path = toUnicodeFileEncoding(path)
return os.path.getsize(path)
#@+node:ekr.20120114064730.10498: *3* os_path_isabs
def os_path_isabs(path):
"""Return True if path is an absolute path."""
path = toUnicodeFileEncoding(path)
return os.path.isabs(path)
#@+node:ekr.20120114064730.10499: *3* os_path_isdir
def os_path_isdir(path):
"""Return True if the path is a directory."""
path = toUnicodeFileEncoding(path)
return os.path.isdir(path)
#@+node:ekr.20120114064730.10500: *3* os_path_isfile
def os_path_isfile(path):
"""Return True if path is a file."""
path = toUnicodeFileEncoding(path)
return os.path.isfile(path)
#@+node:ekr.20120114064730.10501: *3* os_path_join
def os_path_join(*args,**keys):
trace = False
### c = keys.get('c')
uargs = [toUnicodeFileEncoding(arg) for arg in args]
if trace: trace('1',uargs)
### Note: This is exactly the same convention as used by getBaseDirectory.
# if uargs and uargs[0] == '!!':
# uargs[0] = app.loadDir
# elif uargs and uargs[0] == '.':
# c = keys.get('c')
# if c and c.openDirectory:
# uargs[0] = c.openDirectory
# # trace(c.openDirectory)
uargs = [os_path_expanduser(z) for z in uargs if z]
if trace: trace('2',uargs)
path = os.path.join(*uargs)
if trace: trace('3',path)
# May not be needed on some Pythons.
path = toUnicodeFileEncoding(path)
return path
#@+node:ekr.20120114064730.10502: *3* os_path_normcase
def os_path_normcase(path):
"""Normalize the path's case."""
path = toUnicodeFileEncoding(path)
path = os.path.normcase(path)
path = toUnicodeFileEncoding(path)
return path
#@+node:ekr.20120114064730.10503: *3* os_path_normpath
def os_path_normpath(path):
"""Normalize the path."""
path = toUnicodeFileEncoding(path)
path = os.path.normpath(path)
path = toUnicodeFileEncoding(path)
return path
#@+node:ekr.20120114064730.10504: *3* os_path_realpath
def os_path_realpath(path):
path = toUnicodeFileEncoding(path)
path = os.path.realpath(path)
path = toUnicodeFileEncoding(path)
return path
#@+node:ekr.20120114064730.10505: *3* os_path_split
def os_path_split(path):
path = toUnicodeFileEncoding(path)
head,tail = os.path.split(path)
head = toUnicodeFileEncoding(head)
tail = toUnicodeFileEncoding(tail)
return head,tail
#@+node:ekr.20120114064730.10506: *3* os_path_splitext
def os_path_splitext(path):
path = toUnicodeFileEncoding(path)
head,tail = os.path.splitext(path)
head = toUnicodeFileEncoding(head)
tail = toUnicodeFileEncoding(tail)
return head,tail
#@+node:ekr.20120114064730.10507: *3* os_startfile
def os_startfile(fname):
if sys.platform.startswith('win'):
os.startfile(fname)
elif sys.platform == 'darwin':
# From Marc-Antoine Parent.
try:
subprocess.call(['open', fname])
except OSError:
pass # There may be a spurious "Interrupted system call"
except ImportError:
os.system("open '%s'" % (fname,))
else:
os.system('xdg-open "%s"'%fname)
#@+node:ekr.20120114064730.10508: *3* toUnicodeFileEncoding
def toUnicodeFileEncoding(path):
if path: path = path.replace('\\', os.sep)
# Yes, this is correct. All os_path_x functions return Unicode strings.
return toUnicode(path)
#@+node:ekr.20120114064730.10509: ** SAG.Scanning
#@+node:ekr.20120114064730.10529: *3* escaped
# Returns True if s[i] is preceded by an odd number of backslashes.
def escaped(s,i):
count = 0
while i-1 >= 0 and s[i-1] == '\\':
count += 1
i -= 1
return (count%2) == 1
#@+node:ekr.20120114064730.10530: *3* find_line_start
def find_line_start(s,i):
if i < 0:
return 0 # New in Leo 4.4.5: add this defensive code.
# bug fix: 11/2/02: change i to i+1 in rfind
i = s.rfind('\n',0,i+1) # Finds the highest index in the range.
if i == -1: return 0
else: return i + 1
#@+node:ekr.20120114064730.10531: *3* find_on_line
def find_on_line(s,i,pattern):
j = s.find('\n',i)
if j == -1: j = len(s)
k = s.find(pattern,i,j)
return k
#@+node:ekr.20120114064730.10532: *3* is_c_id
def is_c_id(ch):
return isWordChar(ch)
#@+node:ekr.20120114064730.10533: *3* is_nl
def is_nl(s,i):
return i < len(s) and (s[i] == '\n' or s[i] == '\r')
#@+node:ekr.20120114064730.10534: *3* is_special
# We no longer require that the directive appear befor any @c directive or section definition.
def is_special(s,i,directive):
'''Return True if the body text contains the @ directive.'''
# j = skip_line(s,i) ; trace(s[i:j],':',directive)
assert (directive and directive [0] == '@' )
# 10/23/02: all directives except @others must start the line.
skip_flag = directive in ("@others","@all")
while i < len(s):
if match_word(s,i,directive):
return True, i
else:
i = skip_line(s,i)
if skip_flag:
i = skip_ws(s,i)
return False, -1
#@+node:ekr.20120114064730.10535: *3* is_ws & is_ws_or_nl
def is_ws(c):
return c == '\t' or c == ' '
def is_ws_or_nl(s,i):
return is_nl(s,i) or (i < len(s) and is_ws(s[i]))
#@+node:ekr.20120114064730.10536: *3* match
# Warning: this code makes no assumptions about what follows pattern.
def match(s,i,pattern):
return s and pattern and s.find(pattern,i,i+len(pattern)) == i
#@+node:ekr.20120114064730.10537: *3* match_c_word
def match_c_word (s,i,name):
if name == None: return False
n = len(name)
if n == 0: return False
return name == s[i:i+n] and (i+n == len(s) or not is_c_id(s[i+n]))
#@+node:ekr.20120114064730.10538: *3* match_ignoring_case
def match_ignoring_case(s1,s2):
if s1 == None or s2 == None: return False
return s1.lower() == s2.lower()
#@+node:ekr.20120114064730.10539: *3* match_word
def match_word(s,i,pattern):
if pattern == None: return False
j = len(pattern)
if j == 0: return False
if s.find(pattern,i,i+j) != i:
return False
if i+j >= len(s):
return True
ch = s[i+j]
return not isWordChar(ch)
#@+node:ekr.20120114064730.10511: *3* scanf
# A quick and dirty sscanf. Understands only %s and %d.
def scanf (s,pat):
count = pat.count("%s") + pat.count("%d")
pat = pat.replace("%s","(\S+)")
pat = pat.replace("%d","(\d+)")
parts = re.split(pat,s)
result = []
for part in parts:
if len(part) > 0 and len(result) < count:
result.append(part)
# trace("scanf | |
order hexahedron (8 nodes associated with the vertices, 24 with the edges, 24 with the faces, 8 in the volume)
hexahedron_125_node = sp.int32(93) # 125-node fourth order hexahedron (8 nodes associated with the vertices, 36 with the edges, 54 with the faces, 27 in the volume)
#end class gmshtranslator
# From GMSH doc -
# 1 : 2-node line.
# 2 : 3-node triangle.
# 3 : 4-node quadrangle.
# 4 : 4-node tetrahedron.
# 5 : 8-node hexahedron.
# 6 : 6-node prism.
# 7 : 5-node pyramid.
# 8 : 3-node second order line (2 nodes associated with the vertices and 1 with the edge).
# 9 : 6-node second order triangle (3 nodes associated with the vertices and 3 with the edges).
# 10 : 9-node second order quadrangle (4 nodes associated with the vertices, 4 with the edges and 1 with the face).
# 11 : 10-node second order tetrahedron (4 nodes associated with the vertices and 6 with the edges).
# 12 : 27-node second order hexahedron (8 nodes associated with the vertices, 12 with the edges, 6 with the faces and 1 with the volume).
# 13 : 18-node second order prism (6 nodes associated with the vertices, 9 with the edges and 3 with the quadrangular faces).
# 14 : 14-node second order pyramid (5 nodes associated with the vertices, 8 with the edges and 1 with the quadrangular face).
# 15 : 1-node point.
# 16 : 8-node second order quadrangle (4 nodes associated with the vertices and 4 with the edges).
# 17 : 20-node second order hexahedron (8 nodes associated with the vertices and 12 with the edges).
# 18 : 15-node second order prism (6 nodes associated with the vertices and 9 with the edges).
# 19 : 13-node second order pyramid (5 nodes associated with the vertices and 8 with the edges).
# 20 : 9-node third order incomplete triangle (3 nodes associated with the vertices, 6 with the edges)
# 21 : 10-node third order triangle (3 nodes associated with the vertices, 6 with the edges, 1 with the face)
# 22 : 12-node fourth order incomplete triangle (3 nodes associated with the vertices, 9 with the edges)
# 23 : 15-node fourth order triangle (3 nodes associated with the vertices, 9 with the edges, 3 with the face)
# 24 : 15-node fifth order incomplete triangle (3 nodes associated with the vertices, 12 with the edges)
# 25 : 21-node fifth order complete triangle (3 nodes associated with the vertices, 12 with the edges, 6 with the face)
# 26 : 4-node third order edge (2 nodes associated with the vertices, 2 internal to the edge)
# 27 : 5-node fourth order edge (2 nodes associated with the vertices, 3 internal to the edge)
# 28 : 6-node fifth order edge (2 nodes associated with the vertices, 4 internal to the edge)
# 29 : 20-node third order tetrahedron (4 nodes associated with the vertices, 12 with the edges, 4 with the faces)
# 30 : 35-node fourth order tetrahedron (4 nodes associated with the vertices, 18 with the edges, 12 with the faces, 1 in the volume)
# 31 : 56-node fifth order tetrahedron (4 nodes associated with the vertices, 24 with the edges, 24 with the faces, 4 in the volume)
# 92 : 64-node third order hexahedron (8 nodes associated with the vertices, 24 with the edges, 24 with the faces, 8 in the volume)
# 93 : 125-node fourth order hexahedron (8 nodes associated with the vertices, 36 with the edges, 54 with the faces, 27 in the volume)
# Line: Line3: Line4:
# 0----------1 --> u 0-----2----1 0----2----3----1
# Triangle: Triangle6: Triangle9/10: Triangle12/15:
# v
# ^ 2
# | | \
# 2 2 2 9 8
# |`\ |`\ | \ | \
# | `\ | `\ 7 6 10 (14) 7
# | `\ 5 `4 | \ | \
# | `\ | `\ 8 (9) 5 11 (12) (13) 6
# | `\ | `\ | \ | \
# 0----------1 --> u 0-----3----1 0---3---4---1 0---3---4---5---1
# Quadrangle: Quadrangle8: Quadrangle9:
# v
# ^
# |
# 3-----------2 3-----6-----2 3-----6-----2
# | | | | | | |
# | | | | | | |
# | +---- | --> u 7 5 7 8 5
# | | | | | |
# | | | | | |
# 0-----------1 0-----4-----1 0-----4-----1
# Tetrahedron: Tetrahedron10:
# v
# .
# ,/
# /
# 2 2
# ,/|`\ ,/|`\
# ,/ | `\ ,/ | `\
# ,/ '. `\ ,6 '. `5
# ,/ | `\ ,/ 8 `\
# ,/ | `\ ,/ | `\
# 0-----------'.--------1 --> u 0--------4--'.--------1
# `\. | ,/ `\. | ,/
# `\. | ,/ `\. | ,9
# `\. '. ,/ `7. '. ,/
# `\. |/ `\. |/
# `3 `3
# `\.
# ` w
# Hexahedron: Hexahedron20: Hexahedron27:
# v
# 3----------2 3----13----2 3----13----2
# |\ ^ |\ |\ |\ |\ |\
# | \ | | \ | 15 | 14 |15 24 | 14
# | \ | | \ 9 \ 11 \ 9 \ 20 11 \
# | 7------+---6 | 7----19+---6 | 7----19+---6
# | | +-- |-- | -> u | | | | |22 | 26 | 23|
# 0---+---\--1 | 0---+-8----1 | 0---+-8----1 |
# \ | \ \ | \ 17 \ 18 \ 17 25 \ 18
# \ | \ \ | 10 | 12| 10 | 21 12|
# \| w \| \| \| \| \|
# 4----------5 4----16----5 4----16----5
# Prism: Prism15: Prism18:
# w
# ^
# |
# 3 3 3
# ,/|`\ ,/|`\ ,/|`\
# ,/ | `\ 12 | 13 12 | 13
# ,/ | `\ ,/ | `\ ,/ | `\
# 4------+------5 4------14-----5 4------14-----5
# | | | | 8 | | 8 |
# | ,/|`\ | | | | | ,/|`\ |
# | ,/ | `\ | | | | | 15 | 16 |
# |,/ | `\| | | | |,/ | `\|
# ,| | |\ 10 | 11 10-----17-----11
# ,/ | 0 | `\ | 0 | | 0 |
# u | ,/ `\ | v | ,/ `\ | | ,/ `\ |
# | ,/ `\ | | ,6 `7 | | ,6 `7 |
# |,/ `\| |,/ `\| |,/ `\|
# 1-------------2 1------9------2 1------9------2
# Pyramid: Pyramid13: Pyramid14:
# 4 4 4
# ,/|\ ,/|\ ,/|\
# ,/ .'|\ ,/ .'|\ ,/ .'|\
# ,/ | | \ ,/ | | \ ,/ | | \
# ,/ .' | `. ,/ .' | `. ,/ .' | `.
# ,/ | '. \ ,7 | 12 \ ,7 | 12 \
# ,/ .' w | \ ,/ .' | \ ,/ .' | \
# ,/ | ^ | \ ,/ 9 | 11 ,/ 9 | 11
# 0----------.'--|-3 `. 0--------6-.'----3 `. 0--------6-.'----3 `.
# `\ | | `\ \ `\ | `\ \ `\ | `\ \
# `\ .' +----`\ - \ -> v `5 .' 10 \ `5 .' 13 10 \
# `\ | `\ `\ \ `\ | `\ \ `\ | `\ \
# `\.' `\ `\` `\.' `\` `\.' `\`
# 1----------------2 1--------8-------2 1--------8-------2
# `\
# u
# element_strings = {
# "brick27string" : """
# add element # {0} type 27NodeBrickLT
# with nodes ({1}, {2}, {3},
# {4}, {5}, {6},
# {7}, {8}, {9},
# {10}, {11}, {12},
# {13}, {14}, {15},
# {16}, {17}, {18},
# {19}, {20}, {21},
# {22}, {23}, {24},
# {25}, {26}, {27})
# use material # {28} ;
# """,
# "brick8string" : """
# add element # {0} type 8NodeBrickLT
# with nodes ({1}, {2}, {3},
# {4}, {5}, {6},
# {7}, {8})
# use material # {9} ;
# """,
# "shell4node" : """
# add element # {tag} type 4NodeShell_ANDES with nodes ({n1}, {n2}, {n3}, {n4}) use material # | |
and x_horiz_end >= x_end:
if is_on_poly:
y_end = max(horiz_line[0][1], horiz_line[1][1])
bucket['lines'].append((y_start, y_end))
total_length += (y_end - y_start)
is_on_poly = False
else:
y_start = horiz_line[1][1] # both yco are same on a horizontal line
is_on_poly = True
total_pixel_count += (x_end - x_start) * total_length
bucket['vertical_pixel_count'] = total_length
bucket['total_pixel_count'] = total_pixel_count
current_poly_fillrate = poly_fillrate
if total_pixel_count / poly_fillrate < 3: # at least 3 frames for filling a poly
current_poly_fillrate = max(1, total_pixel_count / 3)
retval.append(bucket)
already_visited.add(x_start)
return retval
def paint_stepwise_poly(arg_buckets, increment):
""" Paints the poly only until increment (no of pixels)
:param arg_buckets: The bucket structure[x1,x2 and list of y1/y2 pairs] to fill stepwise the polygon
:param increment: the number of pixels to paint
:return: False: there are more pixel to paint; True: The whole polygon was painted
"""
remaining_pixels = increment
bucket = None
for bucket in arg_buckets:
if increment > bucket['total_pixel_count']:
x1 = bucket['x_start']
x2 = bucket['x_end']
for line in bucket['lines']:
y1 = line[0]
y2 = line[1]
path = [(x1, y1), (x1, y2), (x2, y2), (x2, y1)]
hal_fill_poly(path, color[poly_fill_color_index])
remaining_pixels -= (x2 - x1) * (y2 - y1)
else:
break
if increment < bucket['total_pixel_count']: # paint fractional part
pixel_count = 0
for xco in range(bucket['x_start'], bucket['x_end']):
for vert_line in bucket['lines']:
hal_draw_rect((xco, vert_line[0]), (xco+1, vert_line[1]), color[poly_fill_color_index])
pixel_count += (vert_line[1] - vert_line[0])
if pixel_count > remaining_pixels:
break
return increment >= arg_buckets[-1]['total_pixel_count'] - 1
def pairwise(iterable):
"""s -> (s0,s1), (s1,s2), (s2, s3), ..."""
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
def get_random_vector(arg_max, arg_min=0.0): # TODO :optimize: rnd_x and get y by pythagoras; length is always 1.0 then
retval = [random.random() - .5, random.random() - .5]
length = math.sqrt(retval[0] * retval[0] + retval[1] * retval[1])
factor = random.random() * (arg_max - arg_min) + arg_min
retval[0] = (retval[0] / length) * factor
retval[1] = (retval[1] / length) * factor
return retval
def distance_point_line(pt, l1, l2, sqrt=True):
"""returns distance between point and line segment
optionally omits calculating square root if only comparison is needed
:param pt: 1st point
:param l1: 1st point of line segment
:param l2: 2nd point of line segment
:param sqrt: if false returns squared distance
:return: (squared) distance between points
"""
a = pt[0] - l1[0] # var A = x - x1;
b = pt[1] - l1[1] # var B = y - y1;
c = l2[0] - l1[0] # var C = x2 - x1;
d = l2[1] - l1[1] # var D = y2 - y1;
dot = a * c + b * d
len_sq = c * c + d * d
param = -1
if len_sq != 0: # in case of 0 length line
param = float(dot) / len_sq
if param < 0:
xx = l1[0]
yy = l1[1]
elif param > 1:
xx = l2[0]
yy = l2[1]
else:
xx = l1[0] + param * c
yy = l1[1] + param * d
dx = pt[0] - xx
dy = pt[1] - yy
retval = dx * dx + dy * dy
if sqrt:
retval = math.sqrt(retval)
return retval
def intersect_line(p1, p2, p3, p4, strict=False):
"""
This function will intersect the two lines given by two points each
boolean flag strict will determine if 2nd point belongs to line
(so if line (( 0, 0) - (0,100) ) will intersect
with line (-50,100)- (50,100) )
:param p1: 1st point of first line
:param p2: 2nd point of first line
:param p3: 1st point of second line
:param p4: 2nd point of second line
:param strict: if true excludes 2nd point of each line
:return: returns point of intersection or
() if no intersection or
the two points, if parallel lines overlap
"""
retval = ()
t1 = t2 = 2.0
d1 = (p2[0] - p1[0], p2[1] - p1[1])
d2 = (p4[0] - p3[0], p4[1] - p3[1])
det = float(d1[0] * d2[1] - d2[0] * d1[1])
if det == 0: # same direction => parallel lines? or same line?
d3 = (p3[0] - p1[0], p3[1] - p1[1]) # delta between p1 and p3
d4 = (p4[0] - p2[0], p4[1] - p2[1]) # delta between p2 and p4
det2 = float(d1[0] * d3[1] - d3[0] * d1[1]) # determinant to check if delta3 is same as delta1
det3 = float(d2[0] * d4[1] - d4[0] * d2[1]) # determinant to check if delta3 is same as delta1
if det2 == 0 and det3 == 0: # same line
if d1[0] != 0: # either d1[0] (dx must be >0 or dy >0 or its not a line)
t1 = (float(p3[0] - p1[0]) / d1[0]) # calc factor on same line
t2 = (float(p4[0] - p1[0]) / d1[0])
elif d1[1] != 0:
t1 = (float(p3[1] - p1[1]) / d1[1])
t2 = (float(p4[1] - p1[1]) / d1[1])
elif d2[0] != 0: # p1 and p2 are same -> swap p1,p2 with p3,p4
t1 = (float(p1[0] - p3[0]) / d2[0])
t2 = (float(p2[0] - p3[0]) / d2[0])
elif d2[1] != 0:
t1 = (float(p1[1] - p3[1]) / d2[1])
t2 = (float(p2[1] - p3[1]) / d2[1])
else: # p1 and p2 are same AND p3 and P4 are same: return p1 if they are all same
if p1 == p3:
return p1
else: # parallel lines do not intersect
return ()
# either one of them is in limit[0..1] or they are on different sides..
if min(t1, t2) <= 1.0 and max(t1, t2) >= 0.0:
t1n = max(min(t1, t2), 0.0)
t2n = min(max(t1, t2), 1.0)
retval = ((p1[0] + t1n * d1[0], p1[1] + t1n * d1[1]),
(p1[0] + t2n * d1[0], p1[1] + t2n * d1[1]))
if retval[0] == retval[1]:
retval = retval[0]
else:
t1 = float(d2[0] * (p1[1] - p3[1]) - d2[1] * (p1[0] - p3[0])) / det
t2 = float(d1[0] * (p1[1] - p3[1]) - d1[1] * (p1[0] - p3[0])) / det
if strict:
if 0.0 <= t1 < 1.0 and 0.0 <= t2 < 1.0: # point has to be on line segment
retval = (p3[0] + t2 * d2[0], p3[1] + t2 * d2[1])
else:
if 0.0 <= t1 <= 1.0 and 0.0 <= t2 <= 1.0: # point has to be on line segment
retval = (p3[0] + t2 * d2[0], p3[1] + t2 * d2[1])
return retval
def draw_list(p_list, arg_color, closed=True):
upper_limit = len(p_list)
if not closed:
upper_limit -= 1
for index in range(0, upper_limit):
hal_draw_line(p_list[index],
p_list[(index + 1) % len(p_list)], arg_color)
def remove_double_vertex(polygon):
"""
will remove all double vertexes in a polygon list TODO use faster ALGO
to avoid problems in intersect_line, if no line, but the same point
is given double points will always arise if you start a new path on a corner
"""
removals = []
old_v = (-1, -1)
for index in range(len(polygon) - 1, 0, -1):
if polygon[index][0] == old_v[0] and polygon[index][1] == old_v[1]:
removals.append(index)
old_v = polygon[index]
for index in removals:
del polygon[index]
return polygon
def calc_area(polygon):
retval = 0
for index in range(0, len(polygon)):
v1 = polygon[index]
v2 = polygon[(index + 1) % len(polygon)]
retval += (v1[0] * v2[1] - v1[1] * v2[0])
return retval / 2.0
def is_inside(polygon, candidate, outside_point=(), strict=True):
"""
will determine, if a given candidate point is inside the polygon
parameters:
polygon (list of two dimensional points)
candidate a 2D-Point which is in question to be in or outside of the poly
outside_point a point guaranteed to be on the outside, if not given,
method will calculate one(slower)
strict controls, if boundary lines belong to the polygon (False) or not (True)
returns True, if candidate is inside polygon
False, if candidate is outside of polygon
"""
on_line = False
for index in range(0, len(polygon)):
vertex1 = polygon[index]
vertex2 = polygon[(index + 1) % len(polygon)]
intersect = intersect_line(vertex1, vertex2, # TODO: use Point-line intersection, not line-line..
candidate, candidate, strict=True)
if len(intersect) > 0: # intersection was found
on_line = | |
<filename>main.py
# Copyright © 2020. All rights reserved.
# Authors: <NAME>
# Contacts: <EMAIL>
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import copy
# Getting new Xi and y of first side
def get_X1_y1(X_train, y_train, X_test, y_test, index, threshold, side):
X1_train = []
y1_train = []
for i in range(y_train.shape[0]):
if side == 1:
if X_train[i, index] < threshold:
X1_train.append(X_train[i])
y1_train.append(y_train[i])
else:
if X_train[i, index] >= threshold:
X1_train.append(X_train[i])
y1_train.append(y_train[i])
X1_train = np.asarray(X1_train)
y1_train = np.asarray(y1_train)
X1_test = []
y1_test = []
for i in range(y_test.shape[0]):
if side == 1:
if X_test[i, index] < threshold:
X1_test.append(X_test[i])
y1_test.append(y_test[i])
else:
if X_test[i, index] >= threshold:
X1_test.append(X_test[i])
y1_test.append(y_test[i])
X1_test = np.asarray(X1_test)
y1_test = np.asarray(y1_test)
return X1_train, y1_train, X1_test, y1_test
# Getting new Xi and y of second side
def get_X2_y2(X_train, y_train, X_test, y_test, index, threshold, side):
X2_train = []
y2_train = []
for i in range(y_train.shape[0]):
if side == 2:
if X_train[i, index] < threshold:
X2_train.append(X_train[i])
y2_train.append(y_train[i])
else:
if X_train[i, index] >= threshold:
X2_train.append(X_train[i])
y2_train.append(y_train[i])
X2_train = np.asarray(X2_train)
y2_train = np.asarray(y2_train)
X2_test = []
y2_test = []
for i in range(y_test.shape[0]):
if side == 2:
if X_test[i, index] < threshold:
X2_test.append(X_test[i])
y2_test.append(y_test[i])
else:
if X_test[i, index] >= threshold:
X2_test.append(X_test[i])
y2_test.append(y_test[i])
X2_test = np.asarray(X2_test)
y2_test = np.asarray(y2_test)
return X2_train, y2_train, X2_test, y2_test
# Finding best threshold of single Xi
def find_threshold_of_x(xx, col, y, num_of_pos, num_of_neg):
threshold_list = []
TP1_list = []
TN1_list = []
TP2_list = []
TN2_list = []
value_list1 = []
value_list2 = []
if xx.shape[0] > 2:
for j in range(1, xx.shape[0] - 1):
TP1 = 0 # number of True Positive
TN1 = 0 # number of True Negative
TP2 = 0
TN2 = 0
for z in range(col.shape[0]):
if col[z] < xx[j]:
if y[z] == 1:
TP1 += 1
else:
TN2 += 1
else:
if y[z] == 2:
TN1 += 1
else:
TP2 += 1
threshold_list.append(xx[j])
TP1_list.append(TP1)
TN1_list.append(TN1)
TP2_list.append(TP2)
TN2_list.append(TN2)
if (num_of_pos != 0) and (num_of_neg != 0):
value_list1.append(((TP1 / num_of_pos) + (TN1 / num_of_neg)) / 2)
value_list2.append(((TP2 / num_of_pos) + (TN2 / num_of_neg)) / 2)
elif num_of_pos == 0:
value_list1.append(TN1 / num_of_neg)
value_list2.append(TN2 / num_of_neg)
else:
value_list1.append(TP1 / num_of_pos)
value_list2.append(TP2 / num_of_pos)
else:
TP1 = 0
TN1 = 0
TP2 = 0
TN2 = 0
for z in range(col.shape[0]):
if col[z] < xx[1]:
if y[z] == 1:
TP1 += 1
else:
TN2 += 1
else:
if y[z] == 2:
TN1 += 1
else:
TP2 += 1
threshold_list.append(xx[1])
TP1_list.append(TP1)
TN1_list.append(TN1)
TP2_list.append(TP2)
TN2_list.append(TN2)
if num_of_pos > 0 and num_of_neg > 0:
value_list1.append(((TP1 / num_of_pos) + (TN1 / num_of_neg)) / 2)
value_list2.append(((TP2 / num_of_pos) + (TN2 / num_of_neg)) / 2)
elif num_of_pos == 0:
value_list1.append(TN1 / num_of_neg)
value_list2.append(TN2 / num_of_neg)
else:
value_list1.append(TP1 / num_of_pos)
value_list2.append(TP1 / num_of_pos)
if max(value_list1) > max(value_list2):
threshold = threshold_list[value_list1.index(max(value_list1))]
TP = TP1_list[value_list1.index(max(value_list1))]
TN = TN1_list[value_list1.index(max(value_list1))]
value = max(value_list1)
side = 1
else:
threshold = threshold_list[value_list2.index(max(value_list2))]
TP = TP2_list[value_list2.index(max(value_list2))]
TN = TN2_list[value_list2.index(max(value_list2))]
value = max(value_list2)
side = 2
FP = num_of_pos - TP # number of False Positive
FN = num_of_neg - TN # number of False Negative
return threshold, value, side, FP, FN
# Finding threshold of each Xi
def find_thresholds(X_train, y_train, X_test, y_test):
num_of_pos = np.sum(y_train == 1) # number of positive objects (norma)
num_of_neg = np.sum(y_train == 2) # number of negative objects (pathology)
threshold_list = []
train_value_list = []
side_list = []
FP_list = []
FN_list = []
for i in range(X_train.shape[1]):
col = X_train[:, i]
xx = copy.deepcopy(col) # make copy of Xi to not change initial list
xx.sort()
# get threshold of Xi, its value on train sample and side of threshold
threshold, train_value, side, FP, FN = find_threshold_of_x(xx, col, y_train, num_of_pos, num_of_neg)
threshold_list.append(threshold)
train_value_list.append(train_value)
side_list.append(side)
FP_list.append(FP)
FN_list.append(FN)
train_value_list = np.asarray(train_value_list)
# get value of thresholds on test sample
num_of_pos = np.sum(y_test == 1)
num_of_neg = np.sum(y_test == 2)
test_value_list = []
if y_test.shape[0] > 0:
for i in range(X_test.shape[1]):
col = X_test[:, i]
TP = 0
TN = 0
for j in range(col.shape[0]):
if side_list[i] == 1:
if col[j] < threshold_list[i]:
if y_test[j] == 1:
TP += 1
else:
if y_test[j] == 2:
TN += 1
else:
if col[j] >= threshold_list[i]:
if y_test[j] == 1:
TP += 1
else:
if y_test[j] == 2:
TN += 1
if num_of_pos > 0 and num_of_neg > 0:
test_value_list.append(((TP / num_of_pos) + (TN / num_of_neg)) / 2)
elif num_of_pos == 0:
test_value_list.append(TN / num_of_neg)
else:
test_value_list.append(TP / num_of_pos)
test_value_list = np.asarray(test_value_list)
else:
test_value_list = np.ones(X_train.shape[1])
return threshold_list, side_list, FP_list, FN_list, train_value_list, test_value_list
# Getting value of each feature on next level
def get_value_on_next_level(Xtrain, ytrain, Xtest, ytest, test_weight):
threshold_list, side_list, FP_list, FN_list, train_value_list, test_value_list = find_thresholds(X_train=Xtrain,
y_train=ytrain,
X_test=Xtest,
y_test=ytest)
complex_value_list = (1 - test_weight) * train_value_list + test_weight * test_value_list
df = pd.DataFrame(
{'train_value': train_value_list,
'test_value': test_value_list,
'complex_value': complex_value_list})
df = df.sort_values(['complex_value', 'test_value', 'train_value'], ascending=[False, False, False])
return df['complex_value'].values[0]
# Getting new nodes of tree
def get_new_nodes(Xtrain, ytrain, Xtest, ytest, col_names, test_weight, Xtrain_list, ytrain_list, Xtest_list,
ytest_list, lnl, pll, psl, til, leaf_number, tree_index, level_number, previous_leaf, previous_side,
mti, F):
threshold_list, side_list, FP_list, FN_list, train_value_list, test_value_list = find_thresholds(X_train=Xtrain,
y_train=ytrain,
X_test=Xtest,
y_test=ytest)
if max(train_value_list) < 1.0:
value_list = []
for index in range(len(col_names)):
if FP_list[index] > 0:
X1_train, y1_train, X1_test, y1_test = get_X1_y1(Xtrain, ytrain, Xtest, ytest, index,
threshold_list[index], side_list[index])
if y1_train.shape[0] > 1:
first_value = get_value_on_next_level(X1_train, y1_train, X1_test, y1_test, test_weight)
else:
first_value = 0.0
else:
first_value = 1.0
if FN_list[index] > 0:
X2_train, y2_train, X2_test, y2_test = get_X2_y2(Xtrain, ytrain, Xtest, ytest, index,
threshold_list[index], side_list[index])
if y2_train.shape[0] > 1:
second_value = get_value_on_next_level(X2_train, y2_train, X2_test, y2_test, test_weight)
else:
second_value = 0.0
else:
second_value = 1.0
value_list.append((first_value + second_value) / 2)
# find indexes of F best features
df = pd.DataFrame({'value': value_list})
df = df.sort_values(['value'], ascending=[False])
else:
complex_value_list = (1 - test_weight) * train_value_list + test_weight * test_value_list
df = pd.DataFrame(
{'train_value': train_value_list,
'test_value': test_value_list,
'complex_value': complex_value_list})
df = df.sort_values(['complex_value', 'test_value', 'train_value'], ascending=[False, False, False])
index_list = df.index.tolist()[:F]
node_list = []
temp = 0
for index in index_list:
node = []
node.append(col_names[index]) # best feature
node.append(side_list[index]) # side of threshold
node.append(threshold_list[index]) # threshold value
node.append(train_value_list[index]) # train value
node.append(test_value_list[index]) # test value
node.append(FP_list[index]) # number of False Positive
if FP_list[index] > 0:
X1_train, y1_train, X1_test, y1_test = get_X1_y1(Xtrain, ytrain, Xtest, ytest, index,
threshold_list[index], side_list[index])
if y1_train.shape[0] > 1:
Xtrain_list.append(X1_train)
ytrain_list.append(y1_train)
Xtest_list.append(X1_test)
ytest_list.append(y1_test)
lnl.append(level_number)
pll.append(leaf_number)
psl.append(1)
if temp == 0:
til.append(tree_index)
else:
til.append(mti)
node.append(FN_list[index]) # number of False Negative
if FN_list[index] > 0:
X2_train, y2_train, X2_test, y2_test = get_X2_y2(Xtrain, ytrain, Xtest, ytest, index,
threshold_list[index], side_list[index])
if y2_train.shape[0] > 1:
Xtrain_list.append(X2_train)
ytrain_list.append(y2_train)
Xtest_list.append(X2_test)
ytest_list.append(y2_test)
lnl.append(level_number)
pll.append(leaf_number)
psl.append(2)
if temp == 0:
til.append(tree_index)
else:
til.append(mti)
node.append(leaf_number) # current leaf number
node.append(level_number) # current level number
node.append(previous_leaf) # previous leaf number
node.append(previous_side) # previous side
node_list.append(node)
mti += 1
temp += 1
return node_list, Xtrain_list, ytrain_list, Xtest_list, ytest_list, lnl, pll, psl
# Finding best tree
def get_forest(test_weight, X_train, y_train, X_test, y_test, col_names, F):
tree_list = []
leaf_number = 1 # first leaf number
level_number = 1 # first level number
tree_index = 0 # first tree index
Xtrain_list = []
ytrain_list = []
Xtest_list = []
ytest_list = []
lnl = [] # level number list
pll = [] # previous leaf list
psl = [] # previous side list
til = [] # tree index list
F_counter = 0
node_list, Xtrain_list, ytrain_list, Xtest_list, ytest_list, lnl, pll, psl = get_new_nodes(X_train, y_train,
X_test, y_test,
col_names,
test_weight,
Xtrain_list,
ytrain_list,
Xtest_list,
ytest_list, lnl, pll,
psl, til,
leaf_number,
tree_index,
level_number, 0, 0,
0, F)
for new_node in node_list:
tree_list.append(new_node)
count_list = np.zeros(F)
i = 0
while i < len(pll):
Xtrain = Xtrain_list[i]
ytrain = ytrain_list[i]
Xtest = Xtest_list[i]
ytest = ytest_list[i]
leaf_number += 1
level_number = lnl[i] + 1
previous_leaf = pll[i]
previous_side = psl[i]
tree_index = til[i]
mti = max(til) # max tree index
node_list, Xtrain_list, ytrain_list, Xtest_list, ytest_list, lnl, pll, psl = get_new_nodes(Xtrain, ytrain,
Xtest, ytest,
col_names,
test_weight,
Xtrain_list,
ytrain_list,
Xtest_list,
ytest_list, lnl,
pll, psl, | |
<reponame>ConnectionMaster/python-plugin
import imp
import importlib
from collections import defaultdict
import sys
import shutil
import tempfile
import hashlib
import logging
import jsonpickle
import errno
from distutils.sysconfig import get_python_lib
#WSE-402 add support for pip 9 and pip 10
try:
from pip._internal.req import parse_requirements
except ImportError:
from pip.req import parse_requirements
import pkg_resources as pk_res
from setuptools import Command
from setuptools.package_index import PackageIndex, os
from agent.api.model import PolicyCheckResourceNode
from agent.api.model.AgentProjectInfo import AgentProjectInfo
from agent.api.model import Coordinates
from agent.api.model.DependencyInfo import DependencyInfo
from agent.api.dispatch.UpdateInventoryRequest import UpdateInventoryRequest
from agent.api.dispatch.CheckPoliciesRequest import CheckPoliciesRequest
from agent.client.WssServiceClient import WssServiceClient
SPACE = " "
REQUIREMENTS = "-r"
UPDATE_REQUEST_FILE = "whitesource/update_request.json"
DASH = "-"
class SetupToolsCommand(Command):
"""setuptools Command"""
description = "Setuptools WSS plugin"
user_options = [
('offline=', 'o', 'Offline flag'),
('pathConfig=', 'p', 'Configuration file path'),
('debug=', 'd', 'Show debugging output'),
]
def initialize_options(self):
self.offline = None
self.debug = None
self.proxySetting = None
self.service = None
self.configDict = None
self.pathConfig = None
self.token = None
self.userEnvironment = None
self.distDepend = None
self.pkgIndex = PackageIndex()
self.dependencyList = []
self.projectCoordinates = None
self.tmpdir = tempfile.mkdtemp(prefix="wss_python_plugin-")
def finalize_options(self):
# log file activation and config
if self.debug == 'y':
logging.basicConfig(format='%(asctime)s%(levelname)s:%(message)s', level=logging.DEBUG,
filename='wss_plugin.log')
# load and import config file
try:
sys.path.append(self.pathConfig)
if sys.version_info.major >= 3:
config_file_spec = importlib.util.spec_from_file_location('config_file', self.pathConfig)
config_file_module = importlib.util.module_from_spec(config_file_spec)
config_file_spec.loader.exec_module(config_file_module)
self.configDict = config_file_module.config_info
else:
self.configDict = imp.load_source('config_file', self.pathConfig).config_info
logging.info('Loading config_file was successful')
except Exception as err:
print("Can't import the config file.")
sys.exit(err)
# load proxy setting if exist
if 'proxy' in self.configDict:
self.proxySetting = self.configDict['proxy']
if 'index_url' in self.configDict:
self.pkgIndex = PackageIndex(index_url=self.configDict['index_url'])
self.projectCoordinates = Coordinates.create_project_coordinates(self.distribution)
self.userEnvironment = pk_res.Environment([get_python_lib()], platform=None, python=None)
distribution_specification = self.distribution.get_name() + "==" + self.distribution.get_version()
distribution_requirement = pk_res.Requirement.parse(distribution_specification)
# resolve all dependencies
try:
self.distDepend = pk_res.working_set.resolve([distribution_requirement], env=self.userEnvironment)
self.distDepend.pop(0)
logging.info("Finished resolving dependencies")
except Exception as err:
print("distribution was not found on this system, and is required by this application", err.message)
def run(self):
self.validate_config_file()
self.scan_modules()
self.create_service()
self.run_plugin()
def validate_config_file(self):
""" Validate content of config file params """
# org token
if 'org_token' in self.configDict:
if self.configDict['org_token'] == '':
sys.exit("Organization token is empty")
else:
sys.exit("No organization token option exists")
logging.info("Validation of config file was successful")
# Todo: check existence of other keys in dict
def scan_modules(self):
""" Downloads all the dependencies calculates their sha1 and creates a list of dependencies info"""
if self.distDepend is not None:
for dist in self.distDepend:
try:
# create a dist instance from requirement instance
current_requirement = dist.as_requirement()
current_distribution = self.pkgIndex.fetch_distribution(
current_requirement, self.tmpdir, force_scan=True, source=True, develop_ok=True)
# create dep. root
if current_distribution is not None:
self.dependencyList.append(create_dependency_record(current_distribution))
except Exception as err:
print("Error in fetching dists " + dist.key + " " + dist.version)
logging.info("Finished calculation for all dependencies")
else:
logging.info("No dependencies were found")
shutil.rmtree(self.tmpdir)
def create_service(self):
""" Creates a WssServiceClient with the destination url"""
if ('url_destination' in self.configDict) and (self.configDict['url_destination'] != ''):
self.service = WssServiceClient(self.configDict['url_destination'], self.proxySetting)
else:
self.service = WssServiceClient("https://saas.whitesourcesoftware.com/agent", self.proxySetting)
logging.debug("The destination url is set to: " + self.service.to_string())
def run_plugin(self):
""" Initializes the plugin requests"""
org_token = self.configDict['org_token']
user_key = ''
project = self.create_project_obj()
product = ''
product_version = ''
self.connection_retries = 1
self.connection_retries_interval = 3
self.policy_violation = False
if 'product_name' in self.configDict:
product = self.configDict['product_name']
if 'user_key' in self.configDict:
user_key = self.configDict['user_key']
if 'product_version' in self.configDict:
product_version = self.configDict['product_version']
if 'connection_retries' in self.configDict:
self.connection_retries = self.configDict['connection_retries']
if 'connection_retries_interval' in self.configDict:
self.connection_retries_interval = self.configDict['connection_retries_interval']
if self.configDict.get('offline') or self.offline:
logging.debug("Offline request")
offline_request(project, org_token, user_key, product, product_version)
else:
if self.configDict.get('check_policies'):
logging.debug("Checking policies")
self.check_policies(project, org_token, user_key, product, product_version)
# no policy violations => send update and pass build
if not self.policy_violation:
logging.debug("Updating inventory")
self.update_inventory(project, org_token, user_key, product, product_version)
# policy violation AND force_update
elif self.configDict.get('force_update'):
print("However all dependencies will be force updated to project inventory.")
logging.debug("Updating inventory")
self.update_inventory(project, org_token, user_key, product, product_version)
# fail the build
if self.configDict.get('fail_on_error'):
print("Build failure due to policy violation (fail_on_error = True)")
sys.exit(1)
# policy violation AND (NOT force_update)
elif self.configDict.get('fail_on_error'):
# fail the build
print("Build failure due to policy violation (fail_on_error = True)")
sys.exit(1)
def create_project_obj(self):
""" create the actual project """
project_token = None
if 'project_token' in self.configDict:
project_token = self.configDict['project_token']
if project_token == '':
project_token = None
return AgentProjectInfo(coordinates=self.projectCoordinates, dependencies=self.dependencyList,
project_token=project_token)
def check_policies(self, project_info, token, user_key, product_name, product_version):
""" Sends the check policies request to the agent according to the request type """
projects = [project_info]
force_check_all_dependencies = self.configDict.get('force_check_all_dependencies')
request = CheckPoliciesRequest(token, user_key, product_name, product_version, projects, force_check_all_dependencies)
result = self.service.check_policies(request, self.connection_retries, self.connection_retries_interval)
try:
self.handle_policies_result(result)
except Exception:
logging.warning("Some dependencies do not conform with open source policies")
sys.exit(1)
def handle_policies_result(self, result):
""" Checks if any policies rejected if so stops """
logging.debug("Creating policies report")
if result.has_rejections():
self.policy_violation = True
print("Some dependencies do not conform with open source policies:")
print_policies_rejection(result)
else:
logging.debug("All dependencies conform with open source policies!")
def update_inventory(self, project_info, token, user_key, product_name, product_version):
""" Sends the update request to the agent according to the request type """
logging.debug("Updating White Source")
projects = [project_info]
request = UpdateInventoryRequest(token, user_key, product_name, product_version, projects)
result = self.service.update_inventory(request, self.connection_retries, self.connection_retries_interval)
print_update_result(result)
def calc_hash(file_for_calculation):
""" Calculates sha1 of given file, src distribution in this case"""
block_size = 65536
hash_calculator = hashlib.sha1()
with open(file_for_calculation, 'rb') as dependency_file:
buf = dependency_file.read(block_size)
while len(buf) > 0:
hash_calculator.update(buf)
buf = dependency_file.read(block_size)
return hash_calculator.hexdigest()
def create_dependency_record(distribution):
""" Creates a 'DependencyInfo' instance for package dependency"""
dist_group = distribution.key
if os.name == 'nt':
dist_artifact = distribution.location.split('\\')[-1]
else:
dist_artifact = distribution.location.split('/')[-1]
dist_version = distribution.version
dist_sha1 = calc_hash(distribution.location)
dependency = DependencyInfo(group_id=dist_group, artifact_id=dist_artifact, version_id=dist_version, sha1=dist_sha1)
return dependency
def print_policies_rejection(result):
""" Prints the result of the check policies result"""
if result is not None:
projects_dict = {}
if result.newProjects:
projects_dict.update(create_policy_dict(result.newProjects.items()))
if result.existingProjects:
projects_dict.update(create_policy_dict(result.existingProjects.items()))
if bool(projects_dict):
print(print_project_policies_rejection(projects_dict))
else:
print("There was a problem with the check policies result")
logging.debug("The check policies result is empty")
def print_project_policies_rejection(policy_dict):
""" Prints the the policy and corresponding rejected resources from projects"""
output = ''
for policy in policy_dict:
# policy
output += "Rejected by Policy " + '"' + policy + '":\n'
for node in policy_dict[policy]:
# name
output += "\t* " + node.resource.displayName
# licenses
licenses = node.resource.licenses
if licenses is not None:
license_output = " ("
for lice in licenses:
license_output += lice + ", "
output += license_output[:-2] + ") \n"
return output
def create_policy_dict(projects):
""" Creates a dict of policies and the rejected libs by them"""
policy_dict = defaultdict(list)
# project iterator
for project, resource_node in projects:
rejected_node = PolicyCheckResourceNode.find_rejected_node(resource_node)
# rejected node iterator
for node in rejected_node:
policy_dict[node.policy.displayName].append(node)
return policy_dict
def print_update_result(result):
""" Prints the result of the update result"""
if result is not None:
output = "White Source update results: \n"
output += "White Source organization: " + result.organization + "\n"
# newly created projects
created_project = result.createdProjects
if not created_project:
output += "No new projects found \n"
else:
created_projects_num = len(created_project)
output += str(created_projects_num) + " newly created projects: "
for project in created_project:
output += project + " "
# updated projects
updated_projects = result.updatedProjects
if not updated_projects:
output += "\nNo projects were updated \n"
else:
updated_projects_num = len(updated_projects)
output += str(updated_projects_num) + " existing projects were updated: "
for project in updated_projects:
output += project + " "
output += "\nrequest_token: " + result.orgToken
print(output)
else:
print("There was a problem with the update result")
logging.debug("The update result is empty")
def offline_request(project_info, token, user_key, product_name, product_version):
""" Offline request """
projects = [project_info]
off_request = UpdateInventoryRequest(token, user_key, product_name, product_version, projects);
if not os.path.exists(os.path.dirname(UPDATE_REQUEST_FILE)):
try:
os.makedirs(os.path.dirname(UPDATE_REQUEST_FILE))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(UPDATE_REQUEST_FILE, "w") as f:
result = jsonpickle.encode(off_request, unpicklable=False)
f.write(result)
def run_setup(file_name):
""" Creates a list of package dependencies as a requirement string from the setup.py file"""
req = open_required(file_name)
# Todo: add functionality to run setuptools and wss_plugin logic on existing setup.py
def open_setup(file_name):
""" Creates a list of package dependencies as a requirement string from the setup.py file"""
import setuptools
import mock
req = []
try:
with mock.patch.object(setuptools, file_name) as mock_setup:
import setup
args, kwargs = mock_setup.call_args
req = kwargs.get('install_requires', [])
return req
except Exception as err:
print("No setup file", err.message)
return req
def open_required_pip(file_name):
install_requirements = parse_requirements(file_name, session='hack')
records = [str(ir.req) for ir in install_requirements]
#return open_required(file_name)
return records
# todo | |
request.page and pagename == request.page.page_name:
# do not create new object for current page
pageobj = request.page
else:
pageobj = Page(request, pagename)
return pageobj
def getFrontPage(request):
""" Convenience function to get localized front page
@param request: current request
@rtype: Page object
@return localized page_front_page, if there is a translation
"""
return getLocalizedPage(request, request.cfg.page_front_page)
def getHomePage(request, username=None):
"""
Get a user's homepage, or return None for anon users and
those who have not created a homepage.
DEPRECATED - try to use getInterwikiHomePage (see below)
@param request: the request object
@param username: the user's name
@rtype: Page
@return: user's homepage object - or None
"""
from MoinMoin.Page import Page
# default to current user
if username is None and request.user.valid:
username = request.user.name
# known user?
if username:
# Return home page
page = Page(request, username)
if page.exists():
return page
return None
def getInterwikiHomePage(request, username=None):
"""
Get a user's homepage.
cfg.user_homewiki influences behaviour of this:
'Self' does mean we store user homepage in THIS wiki.
When set to our own interwikiname, it behaves like with 'Self'.
'SomeOtherWiki' means we store user homepages in another wiki.
@param request: the request object
@param username: the user's name
@rtype: tuple (or None for anon users)
@return: (wikiname, pagename)
"""
# default to current user
if username is None and request.user.valid:
username = request.user.name
if not username:
return None # anon user
homewiki = request.cfg.user_homewiki
if homewiki == request.cfg.interwikiname:
homewiki = u'Self'
return homewiki, username
def AbsPageName(context, pagename):
"""
Return the absolute pagename for a (possibly) relative pagename.
@param context: name of the page where "pagename" appears on
@param pagename: the (possibly relative) page name
@rtype: string
@return: the absolute page name
"""
if pagename.startswith(PARENT_PREFIX):
while context and pagename.startswith(PARENT_PREFIX):
context = '/'.join(context.split('/')[:-1])
pagename = pagename[PARENT_PREFIX_LEN:]
pagename = '/'.join(filter(None, [context, pagename, ]))
elif pagename.startswith(CHILD_PREFIX):
if context:
pagename = context + '/' + pagename[CHILD_PREFIX_LEN:]
else:
pagename = pagename[CHILD_PREFIX_LEN:]
return pagename
def RelPageName(context, pagename):
"""
Return the relative pagename for some context.
@param context: name of the page where "pagename" appears on
@param pagename: the absolute page name
@rtype: string
@return: the relative page name
"""
if context == '':
# special case, context is some "virtual root" page with name == ''
# every page is a subpage of this virtual root
return CHILD_PREFIX + pagename
elif pagename.startswith(context + CHILD_PREFIX):
# simple child
return pagename[len(context):]
else:
# some kind of sister/aunt
context_frags = context.split('/') # A, B, C, D, E
pagename_frags = pagename.split('/') # A, B, C, F
# first throw away common parents:
common = 0
for cf, pf in zip(context_frags, pagename_frags):
if cf == pf:
common += 1
else:
break
context_frags = context_frags[common:] # D, E
pagename_frags = pagename_frags[common:] # F
go_up = len(context_frags)
return PARENT_PREFIX * go_up + '/'.join(pagename_frags)
def pagelinkmarkup(pagename, text=None):
""" return markup that can be used as link to page <pagename> """
from MoinMoin.parser.text_moin_wiki import Parser
if re.match(Parser.word_rule + "$", pagename, re.U|re.X) and \
(text is None or text == pagename):
return pagename
else:
if text is None or text == pagename:
text = ''
else:
text = '|%s' % text
return u'[[%s%s]]' % (pagename, text)
#############################################################################
### mimetype support
#############################################################################
import mimetypes
MIMETYPES_MORE = {
# OpenOffice 2.x & other open document stuff
'.odt': 'application/vnd.oasis.opendocument.text',
'.ods': 'application/vnd.oasis.opendocument.spreadsheet',
'.odp': 'application/vnd.oasis.opendocument.presentation',
'.odg': 'application/vnd.oasis.opendocument.graphics',
'.odc': 'application/vnd.oasis.opendocument.chart',
'.odf': 'application/vnd.oasis.opendocument.formula',
'.odb': 'application/vnd.oasis.opendocument.database',
'.odi': 'application/vnd.oasis.opendocument.image',
'.odm': 'application/vnd.oasis.opendocument.text-master',
'.ott': 'application/vnd.oasis.opendocument.text-template',
'.ots': 'application/vnd.oasis.opendocument.spreadsheet-template',
'.otp': 'application/vnd.oasis.opendocument.presentation-template',
'.otg': 'application/vnd.oasis.opendocument.graphics-template',
# some systems (like Mac OS X) don't have some of these:
'.patch': 'text/x-diff',
'.diff': 'text/x-diff',
'.py': 'text/x-python',
'.cfg': 'text/plain',
'.conf': 'text/plain',
'.irc': 'text/plain',
'.md5': 'text/plain',
'.csv': 'text/csv',
'.flv': 'video/x-flv',
'.wmv': 'video/x-ms-wmv',
'.swf': 'application/x-shockwave-flash',
'.moin': 'text/moin-wiki',
'.creole': 'text/creole',
# Windows Server 2003 / Python 2.7 has no or strange entries for these:
'.svg': 'image/svg+xml',
'.svgz': 'image/svg+xml',
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
}
# add all mimetype patterns of pygments
import pygments.lexers
for name, short, patterns, mime in pygments.lexers.get_all_lexers():
for pattern in patterns:
if pattern.startswith('*.') and mime:
MIMETYPES_MORE[pattern[1:]] = mime[0]
[mimetypes.add_type(mimetype, ext, True) for ext, mimetype in MIMETYPES_MORE.items()]
MIMETYPES_sanitize_mapping = {
# this stuff is text, but got application/* for unknown reasons
('application', 'docbook+xml'): ('text', 'docbook'),
('application', 'x-latex'): ('text', 'latex'),
('application', 'x-tex'): ('text', 'tex'),
('application', 'javascript'): ('text', 'javascript'),
}
MIMETYPES_spoil_mapping = {} # inverse mapping of above
for _key, _value in MIMETYPES_sanitize_mapping.items():
MIMETYPES_spoil_mapping[_value] = _key
class MimeType(object):
""" represents a mimetype like text/plain """
def __init__(self, mimestr=None, filename=None):
self.major = self.minor = None # sanitized mime type and subtype
self.params = {} # parameters like "charset" or others
self.charset = None # this stays None until we know for sure!
self.raw_mimestr = mimestr
if mimestr:
self.parse_mimetype(mimestr)
elif filename:
self.parse_filename(filename)
def parse_filename(self, filename):
mtype, encoding = mimetypes.guess_type(filename)
if mtype is None:
mtype = 'application/octet-stream'
self.parse_mimetype(mtype)
def parse_mimetype(self, mimestr):
""" take a string like used in content-type and parse it into components,
alternatively it also can process some abbreviated string like "wiki"
"""
parameters = mimestr.split(";")
parameters = [p.strip() for p in parameters]
mimetype, parameters = parameters[0], parameters[1:]
mimetype = mimetype.split('/')
if len(mimetype) >= 2:
major, minor = mimetype[:2] # we just ignore more than 2 parts
else:
major, minor = self.parse_format(mimetype[0])
self.major = major.lower()
self.minor = minor.lower()
for param in parameters:
key, value = param.split('=')
if value[0] == '"' and value[-1] == '"': # remove quotes
value = value[1:-1]
self.params[key.lower()] = value
if 'charset' in self.params:
self.charset = self.params['charset'].lower()
self.sanitize()
def parse_format(self, format):
""" maps from what we currently use on-page in a #format xxx processing
instruction to a sanitized mimetype major, minor tuple.
can also be user later for easier entry by the user, so he can just
type "wiki" instead of "text/moin-wiki".
"""
format = format.lower()
if format in config.parser_text_mimetype:
mimetype = 'text', format
else:
mapping = {
'wiki': ('text', 'moin-wiki'),
'irc': ('text', 'irssi'),
}
try:
mimetype = mapping[format]
except KeyError:
mimetype = 'text', 'x-%s' % format
return mimetype
def sanitize(self):
""" convert to some representation that makes sense - this is not necessarily
conformant to /etc/mime.types or IANA listing, but if something is
readable text, we will return some text/* mimetype, not application/*,
because we need text/plain as fallback and not application/octet-stream.
"""
self.major, self.minor = MIMETYPES_sanitize_mapping.get((self.major, self.minor), (self.major, self.minor))
def spoil(self):
""" this returns something conformant to /etc/mime.type or IANA as a string,
kind of inverse operation of sanitize(), but doesn't change self
"""
major, minor = MIMETYPES_spoil_mapping.get((self.major, self.minor), (self.major, self.minor))
return self.content_type(major, minor)
def content_type(self, major=None, minor=None, charset=None, params=None):
""" return a string suitable for Content-Type header
"""
major = major or self.major
minor = minor or self.minor
params = params or self.params or {}
if major == 'text':
charset = charset or self.charset or params.get('charset', config.charset)
params['charset'] = charset
mimestr = "%s/%s" % (major, minor)
params = ['%s="%s"' % (key.lower(), value) for key, value in params.items()]
params.insert(0, mimestr)
return "; ".join(params)
def mime_type(self):
""" return a string major/minor only, no params """
return "%s/%s" % (self.major, self.minor)
def module_name(self):
""" convert this mimetype to a string useable as python module name,
we yield the exact module name first and then proceed to shorter
module names (useful for falling back to them, if the more special
module is not found) - e.g. first "text_python", next "text".
Finally, we yield "application_octet_stream" as the most general
mimetype we have.
Hint: the fallback handler module for text/* should be implemented
in module "text" (not "text_plain")
"""
mimetype = self.mime_type()
modname = mimetype.replace("/", "_").replace("-", "_").replace(".", "_")
fragments = modname.split('_')
for length in range(len(fragments), 1, -1):
yield "_".join(fragments[:length])
yield self.raw_mimestr
yield fragments[0]
yield "application_octet_stream"
#############################################################################
### Plugins
#############################################################################
class PluginError(Exception):
""" Base class for plugin errors """
class PluginMissingError(PluginError):
""" Raised when a plugin is not found """
class PluginAttributeError(PluginError):
""" Raised when plugin does not contain an attribtue """
def importPlugin(cfg, kind, name, function="execute"):
""" Import wiki or builtin plugin
Returns <function> attr from a plugin module <name>.
If <function> attr is missing, raise PluginAttributeError.
If <function> is None, return the whole module object.
If <name> plugin can | |
r"""
Depth averaged shallow water equations in conservative form
"""
from __future__ import absolute_import
from .utility_nh import *
from thetis.equation import Equation
g_grav = physical_constants['g_grav']
rho_0 = physical_constants['rho0']
class BaseShallowWaterEquation(Equation):
"""
Abstract base class for ShallowWaterEquations.
"""
def __init__(self, function_space,
bathymetry,
options):
super(BaseShallowWaterEquation, self).__init__(function_space)
# define bunch of members needed to construct forms
self.function_space = function_space
self.bathymetry = bathymetry
self.options = options
self.mesh = self.function_space.mesh()
self.test = TestFunction(self.function_space)
self.trial = TrialFunction(self.function_space)
self.normal = FacetNormal(self.mesh)
self.boundary_markers = sorted(self.function_space.mesh().exterior_facets.unique_markers)
self.boundary_len = self.function_space.mesh().boundary_len
# negigible depth set for wetting and drying
self.threshold = self.options.depth_wd_interface
# mesh dependent variables
self.cellsize = CellSize(self.mesh)
# define measures with a reasonable quadrature degree
p = self.function_space.ufl_element().degree()
self.quad_degree = 2*p + 1
self.dx = dx(degree=self.quad_degree,
domain=self.function_space.ufl_domain())
self.dS = dS(degree=self.quad_degree,
domain=self.function_space.ufl_domain())
def interior_flux(self, N, V, wr, wl):
"""
This evaluates the interior fluxes between the positively and negatively restricted vectors wr, wl.
"""
hr, mur, mvr = wr[0], wr[1], wr[2]
hl, mul, mvl = wl[0], wl[1], wl[2]
E = self.threshold
gravity = Function(V.sub(0)).assign(g_grav)
g = conditional(And(hr < E, hl < E), zero(gravity('+').ufl_shape), gravity('+'))
# Do HLLC flux
hl_zero = conditional(hl <= 0, 0, 1)
ur = conditional(hr <= 0, zero(as_vector((mur / hr, mvr / hr)).ufl_shape),
hl_zero * as_vector((mur / hr, mvr / hr)))
hr_zero = conditional(hr <= 0, 0, 1)
ul = conditional(hl <= 0, zero(as_vector((mul / hl, mvl / hl)).ufl_shape),
hr_zero * as_vector((mul / hl, mvl / hl)))
vr = dot(ur, N)
vl = dot(ul, N)
# wave speed depending on wavelength
c_minus = Min(vr - sqrt(g * hr), vl - sqrt(g * hl))
c_plus = Min(vr + sqrt(g * hr), vl + sqrt(g * hl))
# not divided by zero height
y = (hl * c_minus * (c_plus - vl) - hr * c_plus * (c_minus - vr)) / (hl * (c_plus - vl) - hr * (c_minus - vr))
c_s = conditional(abs(hr * (c_minus - vr) - hl * (c_plus - vl)) <= 1e-16, zero(y.ufl_shape), y)
velocityl = conditional(hl <= 0, zero(mul.ufl_shape), (hr_zero * mul * mvl) / hl)
velocity_ul = conditional(hl <= 0, zero(mul.ufl_shape), (hr_zero * mul * mul) / hl)
velocity_vl = conditional(hl <= 0, zero(mvl.ufl_shape), (hr_zero * mvl * mvl) / hl)
velocityr = conditional(hr <= 0, zero(mur.ufl_shape), (hl_zero * mur * mvr) / hr)
velocity_ur = conditional(hr <= 0, zero(mur.ufl_shape), (hl_zero * mur * mur) / hr)
velocity_vr = conditional(hr <= 0, zero(mvr.ufl_shape), (hl_zero * mvr * mvr) / hr)
F1r = as_vector((mur,
velocity_ur + 0.5 * g * hr**2,
velocityr))
F2r = as_vector((mvr,
velocityr,
velocity_vr + 0.5 * g * hr**2))
F1l = as_vector((mul,
velocity_ul + 0.5 * g * hl**2,
velocityl))
F2l = as_vector((mvl,
velocityl,
velocity_vl + 0.5 * g * hl**2))
F_plus = as_vector((F1r, F2r))
F_minus = as_vector((F1l, F2l))
W_plus = as_vector((hr, mur, mvr))
W_minus = as_vector((hl, mul, mvl))
y = ((sqrt(hr) * vr) + (sqrt(hl) * vl)) / (sqrt(hl) + sqrt(hr))
y = 0.5 * (vl + vr) #+ sqrt(g * hr) - sqrt(g * hl)
v_star = conditional(abs(sqrt(hl) + sqrt(hr)) <= 1e-16, zero(y.ufl_shape), y)
# conditional to prevent dividing by zero
y = ((c_minus - vr) / (c_minus - c_s)) * (W_plus -
as_vector((0,
hr * (c_s - v_star) * N[0],
hr * (c_s - v_star) * N[1])))
w_plus = conditional(abs(c_minus - c_s) <= 1e-16, zero(y.ufl_shape), y)
# conditional to prevent dividing by zero
y = ((c_plus - vl) / (c_plus - c_s)) * (W_minus -
as_vector((0,
hl * (c_s - v_star) * N[0],
hl * (c_s - v_star) * N[1])))
w_minus = conditional(abs(c_plus - c_s) <= 1e-16, zero(y.ufl_shape), y)
Flux = ((0.5 * dot(N, F_plus + F_minus)) +
(0.5 * (-((abs(c_minus) - abs(c_s)) * w_minus) +
((abs(c_plus) - abs(c_s)) * w_plus) +
(abs(c_minus) * W_plus) -
(abs(c_plus) * W_minus))))
return Flux
def boundary_flux(self, V, w, bc_funcs):
"""
This evaluates the boundary flux between the vector and a solid reflective wall (temporarily zero velocity and same depth) or other boundary conditions options.
Here, mur and mul denote outside and inside of momentum cell, respectively.
"""
N = self.normal
h, mu, mv = split(w)
if bc_funcs is None: # TODO improve stability with increased time step size
mul = Constant(0)
mur = mu
mvl = Constant(0)
mvr = mv
hr = h
hl = h
else:
if 'inflow' in bc_funcs:
mul = value.sub(1) # TODO
mur = mu
mvl = value.sub(2)
mvr = mv
hr = h
hl = h
if 'outflow' in bc_funcs:
mul = mu
mur = mu
mvr = mv
mvl = mv
hr = h
hl = h
# Do HLLC flux
ul = conditional(hl <= 0, zero(as_vector((mul / hl, mvl / hl)).ufl_shape),
as_vector((mul / hl, mvl / hl)))
ur = conditional(hr <= 0, zero(as_vector((mur / hr, mvr / hr)).ufl_shape),
as_vector((mur / hr, mvr / hr)))
vr = dot(ur, N)
vl = dot(ul, N)
# wave speed depending on wavelength
c_minus = Min(vr - sqrt(g_grav * hr), vl - sqrt(g_grav * hl))
c_plus = Min(vr + sqrt(g_grav * hr), vl + sqrt(g_grav * hl))
# not divided by zero height
y = (hl * c_minus * (c_plus - vl) - hr * c_plus * (c_minus - vr)) / (hl * (c_plus - vl) - hr * (c_minus - vr))
c_s = conditional(abs(hr * (c_minus - vr) - hl * (c_plus - vl)) <= 1e-8, zero(y.ufl_shape), y)
velocityl = conditional(hl <= 0, zero(mul.ufl_shape), (mul * mvl) / hl)
velocity_ul = conditional(hl <= 0, zero(mul.ufl_shape), (mul * mul) / hl)
velocity_ur = conditional(hr <= 0, zero(mul.ufl_shape), (mur * mur) / hr)
velocityr = conditional(hr <= 0, zero(mul.ufl_shape), (mur * mvr) / hr)
velocity_vr = conditional(hr <= 0, zero(mvr.ufl_shape), (mvr * mvr) / hr)
velocity_vl = conditional(hl <= 0, zero(mvl.ufl_shape), (mvl * mvl) / hl)
F1r = as_vector((mur,
velocity_ur + 0.5 * g_grav * hr**2,
velocityr))
F2r = as_vector((mvr,
velocityr,
velocity_vr + 0.5 * g_grav * hr**2))
F1l = as_vector((mul,
velocity_ul + 0.5 * g_grav * hl**2,
velocityl))
F2l = as_vector((mvl,
velocityl,
velocity_vl + 0.5 * g_grav * hl**2))
F_plus = as_vector((F1r, F2r))
F_minus = as_vector((F1l, F2l))
W_plus = as_vector((hr, mur, mvr))
W_minus = as_vector((hl, mul, mvl))
y = ((sqrt(hr) * vr) + (sqrt(hl) * vl)) / (sqrt(hl) + sqrt(hr))
y = 0.5 * (vl + vr) #+ sqrt(g * hr) - sqrt(g * hl)
v_star = conditional(abs(sqrt(hl) + sqrt(hr)) <= 1e-8, zero(y.ufl_shape), y)
# conditional to prevent dividing by zero
y = ((c_minus - vr) / (c_minus - c_s)) * (W_plus -
as_vector((0,
hl * (c_s - v_star) * N[0],
hl * (c_s - v_star) * N[1])))
w_plus = conditional(abs(c_minus - c_s) <= 1e-8, zero(y.ufl_shape), y)
# conditional to prevent dividing by zero
y = ((c_plus - vl) / (c_plus - c_s)) * (W_minus -
as_vector((0,
hr * (c_s - v_star) * N[0],
hr * (c_s - v_star) * N[1])))
w_minus = conditional(abs(c_plus - c_s) <= 1e-8, zero(y.ufl_shape), y)
Flux = ((0.5 * dot(N, F_plus + F_minus)) +
(0.5 * (-((abs(c_minus) - abs(c_s)) * w_minus) +
((abs(c_plus) - abs(c_s)) * w_plus) +
(abs(c_minus) * W_plus) -
(abs(c_plus) * W_minus))))
return Flux
def wd_depth_displacement(self, eta):
"""
Returns depth change due to wetting and drying
"""
if (not self.options.use_hllc_flux) and self.options.use_wetting_and_drying:
h = self.bathymetry + eta
return 2 * self.threshold**2 / (2 * self.threshold + abs(h)) + 0.5 * (abs(h) - h)
else:
return 0
class ShallowWaterEquations(BaseShallowWaterEquation):
"""
2D depth-averaged shallow water equations in conservative form.
This defines the full 2D SWE equations.
"""
def __init__(self, function_space,
bathymetry,
options):
"""
:arg function_space: Mixed function space where the solution belongs
:arg bathymetry: bathymetry of the domain
:type bathymetry: :class:`Function` or :class:`Constant`
:arg options: :class:`.AttrDict` object containing all circulation model options
"""
super(ShallowWaterEquations, self).__init__(function_space, bathymetry, options)
self.test_h = self.test[0]
self.test_uv = as_vector((self.test[1], self.test[2]))
def mass_term(self, solution):
f = super(ShallowWaterEquations, self).mass_term(solution)
# if self.options.use_wetting_and_drying:
# assert self.options.use_hllc_flux is True
if (not self.options.use_hllc_flux) and self.options.use_wetting_and_drying:
f += dot(self.wd_depth_displacement(solution[0]), self.test_h)*self.dx
| |
self.y = x, y
def __repr__(self):
return "<Vector2D: (%f, %f) >" % (self.x, self.y)
def __hash__(self):
return hash((self.x, self.y))
def __eq__(self, other):
if not isinstance(other, Vector2D):
return False
return self.x == other.x and self.y == other.y
def __add__(self, other):
x = self.x + other.x
y = self.y + other.y
# Return a new object.
return Vector2D(x, y)
__radd__ = __add__
def __sub__(self, other):
x = self.x - other.x
y = self.y - other.y
# Return a new object.
return Vector2D(x, y)
def __rsub__(self, other):
x = other.x - self.x
y = other.y - self.y
# Return a new object.
return Vector2D(x, y)
def __cmp__(self, other):
# This next expression will only return zero (equals) if all
# expressions are false.
return self.x != other.x or self.y != other.y
def __abs__(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __rmul__(self, other):
if isinstance(other, Number):
return Vector2D(self.x * other, self.y * other)
raise ValueError("Cannot multiply %s with %s" % (self.__class__, type(other)))
def __div__(self, other):
if isinstance(other, Number):
return Vector2D(self.x / other, self.y / other)
raise ValueError("Cannot divide %s with %s" % (self.__class__, type(other)))
def copy(self):
"""
vector = copy(self)
Copy the vector so that new vectors containing the same values
are passed around rather than references to the same object.
"""
return Vector2D(self.x, self.y)
def dot(self, other):
""" dot product """
return self.x * other.x + self.y * other.y
class CoordinateSystem(object):
def __init__(
self, x=Vector(1.0, 0.0, 0.0), y=Vector(0.0, 1.0, 0.0), z=Vector(0.0, 0.0, 1.0)
):
self.x = x
self.y = y
self.z = z
def project(self, p):
return Vector(p.dot(self.x), p.dot(self.y), p.dot(self.z))
class Point:
def __init__(self, x=0.0, y=0.0):
if isinstance(x, tuple):
self.x = x[0]
self.y = x[1]
elif isinstance(x, list):
if isinstance(x[0], tuple):
self.x = x[0][0]
self.y = x[0][1]
else:
self.x = x[0]
self.y = x[1]
else:
self.x = x
self.y = y
def __add__(self, p):
"""Point(x1+x2, y1+y2)"""
return Point(self.x + p.x, self.y + p.y)
def __sub__(self, p):
"""Point(x1-x2, y1-y2)"""
return Point(self.x - p.x, self.y - p.y)
def __mul__(self, scalar):
"""Point(x1*x2, y1*y2)"""
return Point(self.x * scalar, self.y * scalar)
def __div__(self, scalar):
"""Point(x1/x2, y1/y2)"""
return Point(self.x / scalar, self.y / scalar)
def __str__(self):
if isinstance(self.x, float):
return "(%.2f, %.2f)" % (self.x, self.y)
else:
return "(%s, %s)" % (self.x, self.y)
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.x, self.y)
def strspc(self):
if isinstance(self.x, float):
return "(%.3f %.3f)" % (self.x, self.y)
else:
return "(%s %s)" % (self.x, self.y)
def length(self):
return math.sqrt(self.x ** 2 + self.y ** 2)
def distance_to(self, p):
"""Calculate the distance between two points."""
return (self - p).length()
def as_tuple(self):
"""(x, y)"""
return (self.x, self.y)
def swapped(self):
return (self.y, self.x)
def clone(self):
"""Return a full copy of this point."""
return Point(self.x, self.y)
def integerize(self):
"""Convert co-ordinate values to integers."""
self.x = int(self.x)
self.y = int(self.y)
def floatize(self):
"""Convert co-ordinate values to floats."""
self.x = float(self.x)
self.y = float(self.y)
def move_to(self, x, y):
"""Reset x & y coordinates."""
self.x = x
self.y = y
def slide(self, p):
"""Move to new (x+dx,y+dy).
Can anyone think up a better name for this function?
slide? shift? delta? move_by?
"""
self.x = self.x + p.x
self.y = self.y + p.y
def slide_xy(self, dx, dy):
"""Move to new (x+dx,y+dy).
Can anyone think up a better name for this function?
slide? shift? delta? move_by?
"""
self.x = self.x + dx
self.y = self.y + dy
def offset(self, xoffset=0.0, yoffset=None):
if yoffset is not None:
return (self.x + xoffset, self.y + yoffset)
else:
return (self.x + xoffset, self.y + xoffset)
def mirror_y(self):
self.y = -self.y
def mirror_x(self):
self.x = -self.x
def rotate(self, rad):
"""Rotate counter-clockwise by rad radians.
Positive y goes *up,* as in traditional mathematics.
Interestingly, you can use this in y-down computer graphics, if
you just remember that it turns clockwise, rather than
counter-clockwise.
The new position is returned as a new Point.
"""
s, c = [f(rad) for f in (math.sin, math.cos)]
x, y = (c * self.x - s * self.y, s * self.x + c * self.y)
return Point(x, y)
def rotate_about(self, p, theta):
"""Rotate counter-clockwise around a point, by theta degrees.
Positive y goes *up,* as in traditional mathematics.
The new position is returned as a new Point.
"""
result = self.clone()
result.slide_xy(-p.x, -p.y)
result.rotate(theta)
result.slide_xy(p.x, p.y)
return result
class Size:
""" Container class for 2D sizes """
def __init__(self, width=0, height=0):
self.width = width
self.height = height
def __str__(self):
return "%s, %s" % (self.width, self.height)
def swapped(self):
return (self.height, self.width)
class Rect:
""" 2D Rectangle class """
def __init__(self, width=2.0, height=2.0, bottomUp=False):
self.bottom_up = bottomUp
self.left = -width / 2.0
self.right = width / 2.0
if bottomUp:
self.top = -height / 2.0
self.bottom = height / 2.0
else:
self.top = height / 2.0
self.bottom = -height / 2.0
self.width = abs(self.right - self.left)
self.height = abs(self.top - self.bottom)
def get_size(self):
self.width = abs(self.right - self.left)
self.height = abs(self.top - self.bottom)
return self.width, self.height
def get_centre(self):
x = self.left + self.width / 2
if self.bottom_up:
y = self.top + self.height / 2
else:
y = self.top - self.height / 2
return x, y
def get_pts(self):
return [
(self.left, self.top),
(self.right, self.top),
(self.left, self.bottom),
(self.right, self.bottom),
]
def get_pts_3d(self, height=0):
return [
(self.left, self.top, height),
(self.right, self.top, height),
(self.left, self.bottom, height),
(self.right, self.bottom, height),
]
def move_to(self, pt, py=None):
if isinstance(pt, Point):
(x, y) = pt.as_tuple()
elif isinstance(pt, tuple):
x, y = pt[0], pt[1]
else:
x, y = pt, py
self.left = x - self.width / 2
self.right = x + self.width / 2
if self.bottom_up:
self.top = y - self.height / 2
self.bottom = y + self.height / 2
else:
self.top = y + self.height / 2
self.bottom = y - self.height / 2
def get_top_left(self):
return (self.left, self.top)
def get_bottom_left(self):
return (self.left, self.bottom)
def move_top_left_to(self, pt):
if isinstance(pt, Point):
(x, y) = pt.as_tuple()
else:
x, y = pt[0], pt[1]
self.left = x
self.right = x + self.width
self.top = y
if self.bottom_up:
self.bottom = y + self.height
else:
self.bottom = y - self.height
def move_bottom_left_to(self, pt):
if isinstance(pt, Point):
(x, y) = pt.as_tuple()
else:
x, y = pt[0], pt[1]
self.left = x
self.right = x + self.width
self.bottom = y
if self.bottom_up:
self.top = y - self.height
else:
self.top = y + self.height
def set_points(self, pt1, pt2):
"""Reset the rectangle coordinates."""
if isinstance(pt1, Point):
(x1, y1) = pt1.as_tuple()
else:
x1, y1 = pt1[0], pt1[1]
if isinstance(pt2, Point):
(x2, y2) = pt2.as_tuple()
else:
x2, y2 = pt2[0], pt2[1]
self.left = min(x1, x2)
self.right = max(x1, x2)
if self.bottom_up:
self.top = min(y1, y2)
self.bottom = max(y1, y2)
else:
self.top = max(y1, y2)
self.bottom = min(y1, y2)
self.width = abs(x2 - x1)
self.height = abs(y2 - y1)
def bounding_rect(self, pts):
"""Makes a bounding rect from the extents of a list of points"""
bx = []
by = []
for pt in pts:
if isinstance(pt, Point):
(x, y) = pt.as_tuple()
else:
x, y = pt[0], pt[1]
bx.append(x)
by.append(y)
self.left = min(bx)
self.right = max(bx)
if self.bottom_up:
self.top = min(by)
self.bottom = max(by)
else:
self.top = max(by)
self.bottom = min(by)
self.width = abs(self.right - self.left)
self.height = abs(self.top - self.bottom)
def set_size(self, width, height):
self.left = -width / 2
self.right = width / 2
if self.bottom_up:
self.top = -height / 2
self.bottom = height / 2
else:
self.top = height / 2
self.bottom = -height / 2
self.width = width
self.height = height
def contains(self, pt):
"""Return true if a point is inside the rectangle."""
x, y = pt.as_tuple()
if self.left <= x <= self.right:
if not self.bottom_up:
if self.bottom <= y <= self.top:
return True
else:
if self.top <= y <= self.bottom:
return True
return False
def overlaps(self, other):
"""Return true if a rectangle overlaps this rectangle."""
return (
self.right > other.left
and self.left < other.right
and self.top < other.bottom
and self.bottom > other.top
)
def expanded_by(self, n):
"""Return | |
= {1, 1, 1})
def test_rmw_zp_stk_relative_indirect_word(self):
stdout = StringIO()
mon = Monitor(stdout = stdout)
mpu = mon._mpu
mpu.osx = True; mpu.ind = True; mpu.siz = True;
# kernel stk relative rmw word
mpu.p = mpu.p | 0x20 # set M flag
mpu.pc = 0x200
mpu.sp[1] = 0x1FD
self.rmwVal = 0x55AA
self.rtnVal = 0xAA56
self.mask = mpu.wordMask
zp = 0x01
mpu.memory[mpu.pc] = zp
mpu.memory[mpu.sp[1] + 1] = 0x01
mpu.memory[mpu.sp[1] + 2] = 0x02
mpu.memory[0x201] = 0xAA
mpu.memory[0x202] = 0x55
pc = mpu.pc + 1
mpu.rmw_zp(self.rmw)
tmp1 = mpu.byteMask & mpu.memory[0x201]
tmp2 = mpu.byteMask & mpu.memory[0x202]
data = mpu.wordMask & ((tmp2 << 8) + tmp1)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# user stk relative rmw byte
mpu.p = mpu.p & 0xDF # clr M flag
mpu.pc = 0x200
mpu.sp[0] = 0x17D
self.rmwVal = 0x6633
self.rtnVal = 0x99CD
self.mask = mpu.wordMask
zp = 0x01
mpu.memory[mpu.pc] = zp
mpu.memory[mpu.sp[0] + 1] = 0x03
mpu.memory[mpu.sp[0] + 2] = 0x02
mpu.memory[0x203] = 0x33
mpu.memory[0x204] = 0x66
pc = mpu.pc + 1
mpu.rmw_zp(self.rmw)
tmp1 = mpu.byteMask & mpu.memory[0x203]
tmp2 = mpu.byteMask & mpu.memory[0x204]
data = mpu.wordMask & ((tmp2 << 8) + tmp1)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# ro_zpX (flags: {osx, ind, siz} = {0, 0, 0})
def test_ro_zpX_byte(self):
stdout = StringIO()
mon = Monitor(stdout = stdout)
mpu = mon._mpu
mpu.osx = False; mpu.ind = False; mpu.siz = False;
# index < 512, index + unsigned offset read byte w/o wrap-around
mpu.pc = 0x200
mpu.x[0] = 0x80
zp = 0x7F
mpu.memory[mpu.pc] = zp
mpu.memory[mpu.byteMask & (mpu.x[0] + zp)] = 0x55
data = mpu.byteMask & mpu.memory[0xFF]
pc = mpu.pc + 1
mpu.ro_zpX(self.op)
self.assertEqual(0x55, data)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# index < 512, index + unsigned offset read byte w/ wrap-around
mpu.pc = 0x200
mpu.x[0] = 0x80
zp = 0x80
mpu.memory[mpu.pc] = zp
mpu.memory[mpu.byteMask & (mpu.x[0] + zp)] = 0xAA
data = mpu.byteMask & mpu.memory[0x00]
pc = mpu.pc + 1
mpu.ro_zpX(self.op)
self.assertEqual(0xAA, data)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# index > 511, index + signed offset read byte, no wrap-around
mpu.pc = 0x200
mpu.x[0] = 0x281
zp = 0x80
mpu.memory[mpu.pc] = zp
mpu.memory[0x201] = 0x66
data = mpu.wordMask & mpu.memory[0x201]
pc = mpu.pc + 1
mpu.ro_zpX(self.op)
self.assertEqual(0x66, data)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# ro_zpX (flags: {osx, ind, siz} = {0, 0, 1})
def test_ro_zpX_word(self):
stdout = StringIO()
mon = Monitor(stdout = stdout)
mpu = mon._mpu
mpu.osx = False; mpu.ind = False; mpu.siz = True;
mpu.oax = True
# index < 512, index + unsigned offset read word w/o wrap-around
mpu.pc = 0x200
mpu.a[0] = 0x7F
zp = 0x7F
mpu.memory[mpu.pc] = zp
mpu.memory[0xFE] = 0xAA
mpu.memory[0xFF] = 0x55
tmp1 = mpu.byteMask & mpu.memory[0xFE]
tmp2 = mpu.byteMask & mpu.memory[0xFF]
data = mpu.wordMask & ((tmp2 << 8) + tmp1)
pc = mpu.pc + 1
mpu.ro_zpX(self.op)
self.assertEqual(0x55AA, data)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# index < 512, index + unsigned offset read word w/ wrap-around
mpu.pc = 0x200
mpu.a[0] = 0x80
zp = 0x7F
mpu.memory[mpu.pc] = zp
mpu.memory[0xFF] = 0x33
mpu.memory[0x00] = 0x66
tmp1 = mpu.byteMask & mpu.memory[0xFF]
tmp2 = mpu.byteMask & mpu.memory[0x00]
data = mpu.wordMask & ((tmp2 << 8) + tmp1)
pc = mpu.pc + 1
mpu.ro_zpX(self.op)
self.assertEqual(0x6633, data)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# index > 511, index + signed offset read word, no wrap-around
mpu.pc = 0x200
mpu.a[0] = 0x281
zp = 0x80
mpu.memory[mpu.pc] = zp
mpu.memory[0x201] = 0x56
mpu.memory[0x202] = 0xAA
tmp1 = mpu.byteMask & mpu.memory[0x201]
tmp2 = mpu.byteMask & mpu.memory[0x202]
data = mpu.wordMask & ((tmp2 << 8) + tmp1)
pc = mpu.pc + 1
mpu.ro_zpX(self.op)
self.assertEqual(0xAA56, data)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# ro_zpX (flags: {osx, ind, siz} = {0, 1, 0})
def test_ro_zpX_indirect_byte(self):
stdout = StringIO()
mon = Monitor(stdout = stdout)
mpu = mon._mpu
mpu.osx = False; mpu.ind = True; mpu.siz = False;
# index < 512, index + unsigned offset read indirect byte no wrap-around
mpu.pc = 0x200
mpu.x[0] = 0x7F
zp = 0x7F
mpu.memory[mpu.pc] = zp
mpu.memory[0xFE] = 0x01
mpu.memory[0xFF] = 0x02
mpu.memory[0x201] = 0x55
data = mpu.byteMask & mpu.memory[0x201]
pc = mpu.pc + 1
mpu.ro_zpX(self.op)
self.assertEqual(0x55, data)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# index < 512, index + unsigned offset read indirect byte w/ wrap-around
mpu.pc = 0x200
mpu.x[0] = 0x7F
zp = 0x80
mpu.memory[mpu.pc] = zp
mpu.memory[0xFF] = 0x01
mpu.memory[0x00] = 0x02
mpu.memory[0x201] = 0xAA
data = mpu.byteMask & mpu.memory[0x201]
pc = mpu.pc + 1
mpu.ro_zpX(self.op)
self.assertEqual(0xAA, data)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# index > 511, index + signed offset read indirect byte, no wrap-around
mpu.pc = 0x200
mpu.x[0] = 0x281
zp = 0x80
mpu.memory[mpu.pc] = zp
mpu.memory[0x201] = 0x03
mpu.memory[0x202] = 0x02
mpu.memory[0x203] = 0x66
data = mpu.byteMask & mpu.memory[0x203]
pc = mpu.pc + 1
mpu.ro_zpX(self.op)
self.assertEqual(0x66, data)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# ro_zpX (flags: {osx, ind, siz} = {0, 1, 1})
def test_ro_zpX_indirect_word(self):
stdout = StringIO()
mon = Monitor(stdout = stdout)
mpu = mon._mpu
mpu.osx = False; mpu.ind = True; mpu.siz = True;
mpu.oax = True
# index < 512, index + unsigned offset read indirect word no wrap-around
mpu.pc = 0x200
mpu.a[0] = 0x7F
zp = 0x7F
mpu.memory[mpu.pc] = zp
mpu.memory[0xFE] = 0x01
mpu.memory[0xFF] = 0x02
mpu.memory[0x201] = 0xAA
mpu.memory[0x202] = 0x55
tmp1 = mpu.byteMask & mpu.memory[0x201]
tmp2 = mpu.byteMask & mpu.memory[0x202]
data = mpu.wordMask & ((tmp2 << 8) + tmp1)
pc = mpu.pc + 1
mpu.ro_zpX(self.op)
self.assertEqual(0x55AA, data)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# index < 512, index + unsigned offset read indirect word w/ wrap-around
mpu.pc = 0x200
mpu.a[0] = 0x80
zp = 0x7F
mpu.memory[mpu.pc] = zp
mpu.memory[0xFF] = 0x01
mpu.memory[0x00] = 0x02
mpu.memory[0x201] = 0x33
mpu.memory[0x202] = 0x66
tmp1 = mpu.byteMask & mpu.memory[0x201]
tmp2 = mpu.byteMask & mpu.memory[0x202]
data = mpu.wordMask & ((tmp2 << 8) + tmp1)
pc = mpu.pc + 1
mpu.ro_zpX(self.op)
self.assertEqual(0x6633, data)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# index > 511, index + signed offset read indirect word, no wrap-around
mpu.pc = 0x200
mpu.a[0] = 0x281
zp = 0x80
mpu.memory[mpu.pc] = zp
mpu.memory[0x201] = 0x03
mpu.memory[0x202] = 0x02
mpu.memory[0x203] = 0x56
mpu.memory[0x204] = 0xAA
tmp1 = mpu.byteMask & mpu.memory[0x203]
tmp2 = mpu.byteMask & mpu.memory[0x204]
data = mpu.wordMask & ((tmp2 << 8) + tmp1)
pc = mpu.pc + 1
mpu.ro_zpX(self.op)
self.assertEqual(0xAA56, data)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# ro_zpX (flags: {osx, ind, siz} = {1, 0, 0})
def test_ro_zpX_stk_relative_byte(self):
stdout = StringIO()
mon = Monitor(stdout = stdout)
mpu = mon._mpu
mpu.osx = True; mpu.ind = False; mpu.siz = False;
# index < 512, index + unsigned offset read byte w/o wrap-around
mpu.pc = 0x200
mpu.sp[1] = 0x180
zp = 0x7F
mpu.memory[mpu.pc] = zp
mpu.memory[0x1FF] = 0x55
data = mpu.byteMask & mpu.memory[0x1FF]
pc = mpu.pc + 1
mpu.ro_zpX(self.op)
self.assertEqual(0x55, data)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# index < 512, index + unsigned offset read byte w/ wrap-around
mpu.pc = 0x200
mpu.sp[1] = 0x180
zp = 0x80
mpu.memory[mpu.pc] = zp
mpu.memory[0x100] = 0xAA
data = mpu.byteMask & mpu.memory[0x100]
pc = mpu.pc + 1
mpu.ro_zpX(self.op)
self.assertEqual(0xAA, data)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# index > 511, index + signed offset read byte, no wrap-around
mpu.pc = 0x200
mpu.sp[1] = 0x281
zp = 0x80
mpu.memory[mpu.pc] = zp
mpu.memory[0x201] = 0x66
data = mpu.wordMask & mpu.memory[0x201]
pc = mpu.pc + 1
mpu.ro_zpX(self.op)
self.assertEqual(0x66, data)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# ro_zpX (flags: {osx, ind, siz} = {1, 0, 1})
def test_ro_zpX_stk_relative_word(self):
stdout = StringIO()
mon = Monitor(stdout = stdout)
mpu = mon._mpu
mpu.osx = True; mpu.ind = False; mpu.siz = True;
# index < 512, index + unsigned | |
with before.each:
self.c = CT()
self.c.situacion = 'L'
self.c.numero_maquinas = 1
with context('si situacion es Local'):
with context('si 1 máquina 15kVA'):
with it('must be TI-42W'):
self.c.potencia = 15
for t in range(18, 24):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-42W'))
with context('si situacion es Local'):
with context('si 1 máquina 25kVA'):
with it('must be TI-43W'):
self.c.potencia = 25
for t in range(18, 24):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-43W'))
with context('si situacion es Local'):
with context('si 1 máquina 50kVA'):
with it('must be TI-44W'):
self.c.potencia = 50
for t in range(18, 24):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-44W'))
with context('si situacion es Local'):
with context('si 1 máquina 100kVA'):
with it('must be TI-45W'):
self.c.potencia = 100
for t in range(18, 24):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-45W'))
with context('si situacion es Local'):
with context('si 1 máquina 160kVA'):
with it('must be TI-46W'):
self.c.potencia = 160
for t in range(18, 24):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-46W'))
with context('si situacion es Local'):
with context('si 1 máquina 250kVA'):
with it('must be TI-47W'):
self.c.potencia = 250
for t in range(18, 24):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-47W'))
with context('si situacion es Local'):
with context('si 1 máquina 400kVA'):
with it('must be TI-48W'):
self.c.potencia = 400
for t in range(18, 24):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-48W'))
with context('si situacion es Local'):
with context('si 1 máquina 630kVA'):
with it('must be TI-49W'):
self.c.potencia = 630
for t in range(18, 24):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-49W'))
with context('si situacion es Local'):
with context('si 1 máquina 1000kVA'):
with it('must be TI-50W'):
self.c.potencia = 1000
for t in range(18, 24):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-50W'))
with context('si situacion es Local'):
with context('si 1 máquina 1250kVA'):
with it('must be TI-51W'):
self.c.potencia = 1250
for t in range(18, 24):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-51W'))
with context('si 36kV>=tension>24kV'):
with before.each:
self.c = CT()
self.c.situacion = 'L'
self.c.numero_maquinas = 1
with context('si situacion es Local'):
with context('si 1 máquina 15kVA'):
with it('must be TI-42B'):
self.c.potencia = 15
for t in range(25, 36):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-42B'))
with context('si situacion es Local'):
with context('si 1 máquina 25kVA'):
with it('must be TI-43B'):
self.c.potencia = 25
for t in range(25, 36):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-43B'))
with context('si situacion es Local'):
with context('si 1 máquina 50kVA'):
with it('must be TI-44B'):
self.c.potencia = 50
for t in range(25, 36):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-44B'))
with context('si situacion es Local'):
with context('si 1 máquina 100kVA'):
with it('must be TI-45B'):
self.c.potencia = 100
for t in range(25, 36):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-45B'))
with context('si situacion es Local'):
with context('si 1 máquina 160kVA'):
with it('must be TI-46B'):
self.c.potencia = 160
for t in range(25, 36):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-46B'))
with context('si situacion es Local'):
with context('si 1 máquina 250kVA'):
with it('must be TI-47B'):
self.c.potencia = 250
for t in range(25, 36):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-47B'))
with context('si situacion es Local'):
with context('si 1 máquina 400kVA'):
with it('must be TI-48B'):
self.c.potencia = 400
for t in range(25, 36):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-48B'))
with context('si situacion es Local'):
with context('si 1 máquina 630kVA'):
with it('must be TI-49B'):
self.c.potencia = 630
for t in range(25, 36):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-49B'))
with context('si situacion es Local'):
with context('si 1 máquina 1000kVA'):
with it('must be TI-50B'):
self.c.potencia = 1000
for t in range(25, 36):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-50B'))
with context('si situacion es Local'):
with context('si 1 máquina 1250kVA'):
with it('must be TI-51B'):
self.c.potencia = 1250
for t in range(25, 36):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-51B'))
with context('si 52kV>=tension>36kV'):
with before.each:
self.c = CT()
self.c.situacion = 'L'
self.c.numero_maquinas = 1
with context('si situacion es Local'):
with context('si 1 máquina 15kVA'):
with it('must be TI-42C'):
self.c.potencia = 15
for t in range(37, 52):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-42C'))
with context('si situacion es Local'):
with context('si 1 máquina 25kVA'):
with it('must be TI-43C'):
self.c.potencia = 25
for t in range(37, 52):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-43C'))
with context('si situacion es Local'):
with context('si 1 máquina 50kVA'):
with it('must be TI-44C'):
self.c.potencia = 50
for t in range(37, 52):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-44C'))
with context('si situacion es Local'):
with context('si 1 máquina 100kVA'):
with it('must be TI-45C'):
self.c.potencia = 100
for t in range(37, 52):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-45C'))
with context('si situacion es Local'):
with context('si 1 máquina 160kVA'):
with it('must be TI-46C'):
self.c.potencia = 160
for t in range(37, 52):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-46C'))
with context('si situacion es Local'):
with context('si 1 máquina 250kVA'):
with it('must be TI-47C'):
self.c.potencia = 250
for t in range(37, 52):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-47C'))
with context('si situacion es Local'):
with context('si 1 máquina 400kVA'):
with it('must be TI-48C'):
self.c.potencia = 400
for t in range(37, 52):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-48C'))
with context('si situacion es Local'):
with context('si 1 máquina 630kVA'):
with it('must be TI-49C'):
self.c.potencia = 630
for t in range(37, 52):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-49C'))
with context('si situacion es Local'):
with context('si 1 máquina 1000kVA'):
with it('must be TI-50C'):
self.c.potencia = 1000
for t in range(37, 52):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-50C'))
with context('si situacion es Local'):
with context('si 1 máquina 1250kVA'):
with it('must be TI-51C'):
self.c.potencia = 1250
for t in range(37, 52):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-51C'))
with context('si situacion es Local'):
with context('si 1 máquina 1250kVA'):
with it('must be TI-51C'):
self.c.potencia = 1250
for t in range(37, 52):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-51C'))
with context('si 72.5kV>=tension>52V'):
with before.each:
self.c = CT()
self.c.situacion = 'L'
self.c.numero_maquinas = 1
with context('si situacion es Local'):
with context('si 1 máquina 15kVA'):
with it('must be TI-42D'):
self.c.potencia = 15
for t in range(53, 72) + [72.5]:
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-42D'))
with context('si situacion es Local'):
with context('si 1 máquina 25kVA'):
with it('must be TI-43D'):
self.c.potencia = 25
for t in range(53, 72) + [72.5]:
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-43D'))
with context('si situacion es Local'):
with context('si 1 máquina 50kVA'):
with it('must be TI-44D'):
self.c.potencia = 50
for t in range(53, 72) + [72.5]:
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-44D'))
with context('si situacion es Local'):
with context('si 1 máquina 100kVA'):
with it('must be TI-45D'):
self.c.potencia = 100
for t in range(53, 72) + [72.5]:
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-45D'))
with context('si situacion es Local'):
with context('si 1 máquina 160kVA'):
with it('must be TI-46D'):
self.c.potencia = 160
for t in range(53, 72) + [72.5]:
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-46D'))
with context('si situacion es Local'):
with context('si 1 máquina 250kVA'):
with it('must be TI-47D'):
self.c.potencia = 250
for t in range(53, 72) + [72.5]:
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-47D'))
with context('si situacion es Local'):
with context('si 1 máquina 400kVA'):
with it('must be TI-48D'):
self.c.potencia = 400
for t in range(53, 72) + [72.5]:
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-48D'))
with context('si situacion es Local'):
with context('si 1 máquina 630kVA'):
with it('must be TI-49D'):
self.c.potencia = 630
for t in range(53, 72) + [72.5]:
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-49D'))
with context('si situacion es Local'):
with context('si 1 máquina 1000kVA'):
with it('must be TI-50D'):
self.c.potencia = 1000
for t in range(53, 72) + [72.5]:
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-50D'))
with context('si situacion es Local'):
with context('si 1 máquina 1250kVA'):
with it('must be TI-51D'):
self.c.potencia = 1250
for t in range(53, 72) + [72.5]:
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-51D'))
with context('si 12kV>=tension>=1kV'):
with before.each:
self.c = CT()
self.c.situacion = 'L'
self.c.numero_maquinas = 2
with context('si situacion es Local'):
with context('si 2 máquinas 15kVA'):
with it('must be TI-52U'):
self.c.potencia = 15
for t in range(1, 12):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-52U'))
with context('si situacion es Local'):
with context('si 2 máquinas 25kVA'):
with it('must be TI-53U'):
self.c.potencia = 25 * 2
for t in range(1, 12):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-53U'))
with context('si situacion es Local'):
with context('si 2 máquinas 50kVA'):
with it('must be TI-54U'):
self.c.potencia = 50 * 2
for t in range(1, 12):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-54U'))
with context('si situacion es Local'):
with context('si 2 máquinas 100kVA'):
with it('must be TI-55U'):
self.c.potencia = 100 * 2
for t in range(1, 12):
self.c.tension = t
expect(self.c.tipoinstalacion).to(equal('TI-55U'))
with context('si situacion es Local'):
with context('si 2 máquinas 100kVA'):
with it('must be TI-55U'):
self.c.potencia = 100 | |
import os
import csv
import sys
import re
import threading
import time
import tkinter as tk
from tkinter import scrolledtext, ttk, messagebox, END
from tkinter.filedialog import askdirectory
class App(tk.Frame):
__OUTFILE_PREFIX = "ParsedFileResults"
__OUTFILE_HEADERS = ['FILENAME', 'TRN02', 'TRN03', 'PAYER', 'PAYEE', 'NPI',
'CLAIM', 'CLP02', 'PLB_DATA']
__OUTFILE_HEADERS_271 = ['FILENAME', 'LASTNAME', 'FIRSTNAME', 'MIDINITIAL',
'SUBSCRIBERID', 'INSTYPECODE']
__DEFAULT_FILE_PATTERN = ".835"
__DEFAULT_PARSE_MODE = "835"
__HELPFILE = 'help.txt'
__CHANGELOG = 'changelog.txt'
__ICONFILE = '835_icon.ico'
def __init__(self, master, **kw):
super().__init__(master, **kw)
self.master = master
self.icon_path = self.resource_path(self.__ICONFILE)
self.master.iconbitmap(self.icon_path)
self.__outfile_path = ""
self.__source_dir = ""
self.__file_pattern = ""
self.__append_runs = tk.BooleanVar()
self.__append_runs.set(True)
self.__traverse_subdir = tk.BooleanVar()
self.__traverse_subdir.set(False)
self.__parse_271 = tk.BooleanVar()
self.__parse_271.set(False)
self.__run_counter = 1
self.__outfile_name = self.get_new_outfile_name()
self.__file_exists = self.check_outfile_exists(self.__outfile_name)
self.__parse_mode = self.__DEFAULT_PARSE_MODE
self.__headers = self.__OUTFILE_HEADERS
self.__init_widgets_menu()
self.__init_widgets_other()
def __init_widgets_menu(self):
"""Sets up menu related widgets."""
# Menu Bar
menu_bar = tk.Menu(self.master)
self.master.config(menu=menu_bar)
# File
self.file_menu = tk.Menu(menu_bar, tearoff=0)
self.file_menu.add_command(label="Open",
command=self.browse_for_open_loc)
self.file_menu.add_command(label="Save",
command=self.browse_for_save_loc)
self.file_menu.add_separator()
self.file_menu.add_command(label="Exit", command=self.quit)
menu_bar.add_cascade(label='File', menu=self.file_menu)
# Settings
self.settings_menu = tk.Menu(menu_bar, tearoff=0)
self.settings_menu.add_checkbutton(
label="Append subsequent runs", onvalue=1, offvalue=0,
variable=self.__append_runs, command=None)
self.settings_menu.add_checkbutton(
label="Search in subdirectories", onvalue=1, offvalue=0,
variable=self.__traverse_subdir, command=None)
self.settings_menu.add_checkbutton(
label="Use for 271 parsing", onvalue=1, offvalue=0,
variable=self.__parse_271, command=self.update_widgets_271_toggle)
menu_bar.add_cascade(label='Settings', menu=self.settings_menu)
# Help
help_menu = tk.Menu(menu_bar, tearoff=0)
help_menu.add_command(label="Help", command=self.open_help)
help_menu.add_command(label="Change Log", command=self.open_changelog)
menu_bar.add_cascade(label="Help", menu=help_menu)
def __init_widgets_other(self):
"""Sets up non-menu widgets. """
# Frame Setup
inputs_frame = tk.LabelFrame(root, text="1. Enter File Details: ")
inputs_frame.grid(row=0, columnspan=10, sticky='WE', padx=5, pady=5,
ipadx=5, ipady=5)
inputs_frame.columnconfigure(1, weight=1)
output_frame = tk.Label(root)
output_frame.grid(row=1, columnspan=10, sticky='NSEW', padx=5, pady=5,
ipadx=5, ipady=5)
output_frame.columnconfigure(1, weight=1)
output_frame.rowconfigure(0, weight=1)
progress_frame = tk.Frame(root)
progress_frame.grid(row=2, columnspan=10, sticky='WE', padx=5, pady=5)
progress_frame.columnconfigure(1, weight=1)
progress_frame.rowconfigure(1, weight=1)
footer_frame = tk.Label(root)
footer_frame.grid(row=3, columnspan=10, sticky='EW', padx=5, pady=1)
footer_frame.columnconfigure(1, weight=1)
footer_frame.rowconfigure(2, weight=1)
# File Pattern Input
file_pattern_lbl = tk.Label(inputs_frame, text="File Pattern", anchor='w')
file_pattern_lbl.grid(row=0, column=0, sticky='WE', padx=5, pady=2)
self.file_pattern_txt = tk.Entry(inputs_frame, state="normal")
self.file_pattern_txt.grid(row=0, column=1, columnspan=7,
sticky="WE", padx=5, pady=2)
self.file_pattern_txt.insert(0, str(self.__DEFAULT_FILE_PATTERN))
# Source Directory Prompt
self.in_folder_lbl = tk.Label(inputs_frame, text="Folder with 835s:", anchor='w')
self.in_folder_lbl.grid(row=1, column=0, sticky='WE', padx=5, pady=2)
self.in_folder_txt = tk.Entry(inputs_frame, state="disabled")
self.in_folder_txt.grid(row=1, column=1, columnspan=7,
sticky="WE", padx=5, pady=2)
self.in_folder_btn = tk.Button(inputs_frame, text="Browse ...",
command=self.browse_for_open_loc)
self.in_folder_btn.grid(row=1, column=10, sticky='E', padx=5, pady=2)
# Save Results Prompt
out_folder_lbl = tk.Label(inputs_frame, text="Save Results to:",
anchor='w')
out_folder_lbl.grid(row=2, column=0, sticky='WE', padx=5, pady=2)
self.out_folder_txt = tk.Entry(inputs_frame, state="disabled")
self.out_folder_txt.grid(row=2, column=1, columnspan=7, sticky="WE",
padx=5, pady=2)
self.out_folder_btn = tk.Button(inputs_frame, text="Browse ...",
command=self.browse_for_save_loc)
self.out_folder_btn.grid(row=2, column=10, sticky='E', padx=5, pady=2)
# Results Output Display
self.output_text = tk.scrolledtext.ScrolledText(
output_frame, wrap='word', height=5, width=10,
font=('', 8), fg="#333333")
self.output_text.grid(row=0, column=1, sticky="NSEW", padx=5, pady=2)
self.xscroll_bar = tk.Scrollbar(output_frame, orient='horizontal',
command=self.output_text.xview)
self.output_text.configure(xscrollcommand=self.xscroll_bar.set)
self.xscroll_bar.grid(row=2, column=1, sticky='EW')
# Progress Bar
self.progress_bar = ttk.Progressbar(progress_frame, orient="horizontal",
length=200, mode="determinate")
# Run and Close
self.ok_btn = tk.Button(footer_frame, text="Run",
command=self.setup_processing)
self.ok_btn.grid(row=2, column=2, sticky='SE', padx=5, pady=2, ipadx=27)
close_btn = tk.Button(footer_frame, text="Close", command=self.quit)
close_btn.grid(row=2, column=3, sticky='SE', padx=5, pady=2, ipadx=20)
def update_widgets_271_toggle(self):
if self.__parse_271.get():
self.__parse_mode = "271"
self.__file_pattern = ".txt"
self.in_folder_lbl.config(text = "Folder with 271s:")
self.file_pattern_txt.delete(0,END)
self.file_pattern_txt.insert(0, str(self.__file_pattern))
self.__headers = self.__OUTFILE_HEADERS_271
else:
self.__parse_mode = "835"
self.__file_pattern = self.__DEFAULT_FILE_PATTERN
self.in_folder_lbl.config(text = "Folder with 835s:")
self.file_pattern_txt.delete(0,END)
self.file_pattern_txt.insert(0, str(self.__file_pattern))
self.__headers = self.__OUTFILE_HEADERS
def open_changelog(self):
"""Opens changelog file for user in new window."""
try:
with open(self.resource_path(self.__CHANGELOG),
mode='r') as changelogfile:
msg = changelogfile.read()
new_window = tk.Toplevel(self.master)
new_window.title('Change Log')
new_window.resizable(width=False, height=False)
new_window.iconbitmap(self.icon_path)
changelog_frame = tk.Frame(new_window)
changelog_frame.pack()
txt_widget = tk.scrolledtext.ScrolledText(changelog_frame,
wrap='none',
state='disabled')
txt_widget.configure(state='normal', font='TkFixedFont')
txt_widget.insert(tk.END, str(msg))
txt_widget.configure(state='disabled')
xscroll_bar = tk.Scrollbar(changelog_frame, orient='horizontal',
command=txt_widget.xview)
txt_widget.configure(xscrollcommand=xscroll_bar.set)
txt_widget.grid(row=0, column=0, sticky='EW')
xscroll_bar.grid(row=1, column=0, sticky='EW')
except IOError:
tk.messagebox.showerror(title="Error",
message='Error opening changelog')
def open_help(self):
"""Opens help file for user in new window."""
try:
with open(self.resource_path(self.__HELPFILE), mode='r') as helpfile:
msg = helpfile.read()
tk.messagebox.showinfo('Help', message=msg, icon='question')
except IOError:
tk.messagebox.showerror(title="Error",
message='Error opening help file')
def quit(self):
"""Closes app."""
root.destroy()
def update_outfile_path(self, save_loc, filename):
"""Updates outfile to specified save_loc and filename."""
self.__outfile_path = os.path.join(save_loc, filename)
def browse_for_open_loc(self):
"""Opens window for user to navigate and select input folder location."""
msg = "Browse for Folder containing {ext}s to parse...".format(ext = self.__parse_mode)
open_loc = os.path.normpath(askdirectory(title=msg))
self.in_folder_txt.configure(state='normal')
self.in_folder_txt.delete(0, tk.END)
self.in_folder_txt.insert(0, str(open_loc))
self.in_folder_txt.configure(state='disabled')
self.__source_dir = os.path.normpath(open_loc)
def browse_for_save_loc(self):
"""Opens window for user to navigate and select output folder location."""
save_loc = os.path.normpath(askdirectory(
# initialdir=expanduser(pathvar.get()), # used for debugging
title="Browse for where to save output results..."))
self.out_folder_txt.configure(state='normal')
self.out_folder_txt.delete(0, tk.END)
self.out_folder_txt.insert(0, str(save_loc))
self.out_folder_txt.configure(state='disabled')
self.update_outfile_path(save_loc, self.__outfile_name)
def write_to_csv(self, *args):
"""Appends input args to outfile."""
with open(self.__outfile_path, newline='', mode='a') as outcsv:
csv_writer = csv.writer(outcsv, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
data = []
for x in args:
data.append(x)
csv_writer.writerow(data)
@staticmethod
def warn_missing_loc():
"""Shows user warning of missing input and/or output values"""
msg = "Please specify an input and output folder location"
tk.messagebox.showerror(title="Error- Did you forget??", message=msg)
def print_output(self, text):
"""Adds parameter text at bottom of output text widget for user to see."""
self.output_text.configure(state='normal')
self.output_text.insert(tk.END, str(text))
self.output_text.see(tk.END)
self.output_text.configure(state='disabled')
@staticmethod
def resource_path(relative_path):
# Get absolute path to resource, works for dev and for PyInstaller
base_path = getattr(sys, '_MEIPASS',
os.path.dirname(os.path.abspath(__file__)))
return os.path.join(base_path, relative_path)
def disable_widgets(self):
"""
Disables certain widgets from user interaction while program is parsing
files to prevent unexpected issues."""
self.ok_btn.configure(state='disabled')
self.in_folder_btn.configure(state='disabled')
self.out_folder_btn.configure(state='disabled')
self.file_pattern_txt.configure(state='disabled')
self.file_menu.entryconfigure(0, state='disabled')
self.file_menu.entryconfigure(1, state='disabled')
self.settings_menu.entryconfigure(0, state='disabled')
self.settings_menu.entryconfigure(1, state='disabled')
def enable_widgets(self):
"""Re-enables user interaction with certain widgets previously disabled. """
self.ok_btn.configure(state='normal')
self.in_folder_btn.configure(state='normal')
self.out_folder_btn.configure(state='normal')
self.file_pattern_txt.configure(state='normal')
self.file_menu.entryconfigure(0, state='normal')
self.file_menu.entryconfigure(1, state='normal')
self.settings_menu.entryconfigure(0, state='normal')
self.settings_menu.entryconfigure(1, state='normal')
@staticmethod
def check_outfile_exists(outfile_name):
"""Returns if outfile_name param is an existing file."""
return os.path.isfile(outfile_name)
def get_new_outfile_name(self):
"""Returns new filename affixed with current time-related elements."""
return (self.__OUTFILE_PREFIX + " " +
time.strftime("%Y-%m-%d-%H%M%S") + ".csv")
def begin_progressbar(self):
"""
Disable widgets that affect input values used when processing and
make progress bar visible.
"""
self.disable_widgets()
self.progress_bar.grid(row=2, column=1, stick='EW')
def update_progressbar(self, amount):
"""Update progress bar to specified amount value."""
self.progress_bar['value'] = amount
self.master.update_idletasks()
def end_progressbar(self):
""" Set progress bar to 0 value and hide it from user."""
self.update_progressbar(int(0))
self.enable_widgets()
self.progress_bar.grid_forget()
def process_queue(self):
""" Unused. Potentially use when implementing a Queue. """
pass
# try:
# msg = self.queue.get(0)
# # Show result of the task if needed
# print(msg)
# self.progressBar.stop()
# except queue.Empty:
# self.master.after(100, self.process_queue)
def get_files_list(self, source_dir):
"""Returns a list of files in specified source_dir."""
if self.__traverse_subdir.get():
files = [os.path.join(root, f) for root, dirs, files
in os.walk(source_dir) for f in files]
# files = [f for root,dirs,files in os.walk(source_dir) for f in files]
else:
files = [f for f in os.listdir(source_dir)
if os.path.isfile(os.path.join(source_dir, f))]
return files
def parse_271(self, full_file_path, filename):
"""TODO Document parse_271 function."""
with open(full_file_path, 'r') as file:
file_data = file.readlines()
num_lines_in_file = len(file_data)
if num_lines_in_file == 1:
file_content = file_data[0].split(sep="\n")
else:
file_content = file_data
for line in file_content:
lname, fname, midin, subid, instype = "", "", "", "", ""
idxA1 = ("EB", line.find("EB*R**30*"))
idxB1 = ("SUB", line.find("NM1*IL*1*"))
indexes = sorted([idxA1, idxB1])
for start_index in indexes:
idx_white_space = line[start_index[1]:].find(" ")
data = line[start_index[1]:start_index[1] + idx_white_space]
if start_index[0] == "SUB":
lname = data.split('*')[3]
fname = data.split('*')[4]
midin = data.split('*')[7]
subid = data.split('*')[9]
elif start_index[0] == "EB":
instype = data.split('*')[4]
parsed_line = [filename, lname, fname, midin, subid, instype]
# print(parsed_line)
self.write_to_csv(*parsed_line)
def parse_835(self, full_file_path, filename):
"""
Consider if filename in specified full_file_path is split across
multiple lines (standard) or all in 1 line (less common). Write desired
values out as each CLP or PLB segment is found.
Note: Currently will update TRN and N1* as the respective segment
occurs in a file, but in the future it may be desired to clear values
if a new one occurs, so as not to mix.
"""
file_trn02 = file_trn03 = file_payer = file_payee = file_npi = ""
with open(full_file_path, 'r') as file:
file_data = file.readlines()
num_lines_in_file = len(file_data)
if num_lines_in_file == 1:
file_content = file_data[0].split(sep="~")
else:
file_content = file_data
for line in file_content:
claim, clp02, plb = "", "", "" # reset values
if line.startswith("TRN"):
file_trn02 = re.sub('~', '', line.split('*')[2]) # TRN;02
file_trn03 = re.sub('~', '', line.split('*')[3]) # TRN;03
if line.startswith("N1*PR"):
file_payer = re.sub('~', '', line.split('*')[2]).rstrip() # N1;02
if line.startswith("N1*PE"):
file_payee = re.sub('~', '', line.split('*')[2]).rstrip() # N1;02
file_npi = re.sub('~', '', line.split('*')[4]) # N1;04
if line.startswith("CLP"):
claim = re.sub('~', '', line.split('*')[1]) # CLP;01
clp02 = re.sub('~', '', line.split('*')[2]) # CLP;02
parsed_line = [filename, file_trn02, file_trn03,
file_payer, file_payee, file_npi, claim,
clp02, plb]
self.write_to_csv(*parsed_line)
elif line.startswith("PLB"): # PLB;*
plb = re.sub('~', '', line.rstrip())
parsed_line = [filename, file_trn02, file_trn03,
file_payer, file_payee, file_npi, claim,
clp02, plb]
self.write_to_csv(*parsed_line)
def process_files(self, file_pattern, source_dir):
"""
Get | |
\n',
'ATOM 1260 H7 MOL 2 21.566 11.115 11.837 1.00 0.00 H1- \n',
'ATOM 1261 N1 MOL 2 5.510 23.896 13.515 1.00 0.00 N3- \n',
'ATOM 1262 C1 MOL 2 3.179 23.469 15.133 1.00 0.00 C \n',
'ATOM 1263 C2 MOL 2 3.515 22.704 14.027 1.00 0.00 C \n',
'ATOM 1264 C3 MOL 2 4.685 23.027 13.212 1.00 0.00 C \n',
'ATOM 1265 C4 MOL 2 6.661 24.093 12.648 1.00 0.00 C \n',
'ATOM 1266 C5 MOL 2 7.921 23.823 13.456 1.00 0.00 C \n',
'ATOM 1267 C6 MOL 2 9.161 24.096 12.616 1.00 0.00 C \n',
'ATOM 1268 H1 MOL 2 3.705 24.205 15.351 1.00 0.00 H1- \n',
'ATOM 1269 H2 MOL 2 4.814 22.553 12.422 1.00 0.00 H1- \n',
'ATOM 1270 H3 MOL 2 6.614 23.476 11.889 1.00 0.00 H1- \n',
'ATOM 1271 H4 MOL 2 7.926 22.898 13.752 1.00 0.00 H1- \n',
'ATOM 1272 H5 MOL 2 7.931 24.391 14.243 1.00 0.00 H1- \n',
'ATOM 1273 H6 MOL 2 9.960 23.910 13.134 1.00 0.00 H1- \n',
'ATOM 1274 H7 MOL 2 9.166 23.515 11.837 1.00 0.00 H1- \n',
'ATOM 1275 N1 MOL 2 5.510 11.496 1.115 1.00 0.00 N3- \n',
'ATOM 1276 C1 MOL 2 3.179 11.069 2.733 1.00 0.00 C \n',
'ATOM 1277 C2 MOL 2 3.515 10.304 1.627 1.00 0.00 C \n',
'ATOM 1278 C3 MOL 2 4.685 10.627 0.812 1.00 0.00 C \n',
'ATOM 1279 C4 MOL 2 6.661 11.693 0.248 1.00 0.00 C \n',
'ATOM 1280 C5 MOL 2 7.921 11.423 1.056 1.00 0.00 C \n',
'ATOM 1281 C6 MOL 2 9.161 11.696 0.216 1.00 0.00 C \n',
'ATOM 1282 H1 MOL 2 3.705 11.805 2.951 1.00 0.00 H1- \n',
'ATOM 1283 H2 MOL 2 4.814 10.153 0.022 1.00 0.00 H1- \n',
'ATOM 1284 H3 MOL 2 6.614 11.076 24.289 1.00 0.00 H1- \n',
'ATOM 1285 H4 MOL 2 7.926 10.498 1.352 1.00 0.00 H1- \n',
'ATOM 1286 H5 MOL 2 7.931 11.991 1.843 1.00 0.00 H1- \n',
'ATOM 1287 H6 MOL 2 9.960 11.510 0.734 1.00 0.00 H1- \n',
'ATOM 1288 H7 MOL 2 9.166 11.115 24.237 1.00 0.00 H1- \n',
'ATOM 1289 N1 MOL 2 5.510 0.904 11.285 1.00 0.00 N3- \n',
'ATOM 1290 C1 MOL 2 3.179 1.331 9.667 1.00 0.00 C \n',
'ATOM 1291 C2 MOL 2 3.515 2.096 10.773 1.00 0.00 C \n',
'ATOM 1292 C3 MOL 2 4.685 1.773 11.588 1.00 0.00 C \n',
'ATOM 1293 C4 MOL 2 6.661 0.707 12.152 1.00 0.00 C \n',
'ATOM 1294 C5 MOL 2 7.921 0.977 11.344 1.00 0.00 C \n',
'ATOM 1295 C6 MOL 2 9.161 0.704 12.184 1.00 0.00 C \n',
'ATOM 1296 H1 MOL 2 3.705 0.595 9.449 1.00 0.00 H1- \n',
'ATOM 1297 H2 MOL 2 4.814 2.247 12.378 1.00 0.00 H1- \n',
'ATOM 1298 H3 MOL 2 6.614 1.324 12.911 1.00 0.00 H1- \n',
'ATOM 1299 H4 MOL 2 7.926 1.902 11.048 1.00 0.00 H1- \n',
'ATOM 1300 H5 MOL 2 7.931 0.409 10.557 1.00 0.00 H1- \n',
'ATOM 1301 H6 MOL 2 9.960 0.890 11.666 1.00 0.00 H1- \n',
'ATOM 1302 H7 MOL 2 9.166 1.285 12.963 1.00 0.00 H1- \n',
'ATOM 1303 N1 MOL 2 5.510 13.304 23.685 1.00 0.00 N3- \n',
'ATOM 1304 C1 MOL 2 3.179 13.731 22.067 1.00 0.00 C \n',
'ATOM 1305 C2 MOL 2 3.515 14.496 23.173 1.00 0.00 C \n',
'ATOM 1306 C3 MOL 2 4.685 14.173 23.988 1.00 0.00 C \n',
'ATOM 1307 C4 MOL 2 6.661 13.107 24.552 1.00 0.00 C \n',
'ATOM 1308 C5 MOL 2 7.921 13.377 23.744 1.00 0.00 C \n',
'ATOM 1309 C6 MOL 2 9.161 13.104 24.584 1.00 0.00 C \n',
'ATOM 1310 H1 MOL 2 3.705 12.995 21.849 1.00 0.00 H1- \n',
'ATOM 1311 H2 MOL 2 4.814 14.647 24.778 1.00 0.00 H1- \n',
'ATOM 1312 H3 MOL 2 6.614 13.724 0.511 1.00 0.00 H1- \n',
'ATOM 1313 H4 MOL 2 7.926 14.302 23.448 1.00 0.00 H1- \n',
'ATOM 1314 H5 MOL 2 7.931 12.809 22.957 1.00 0.00 H1- \n',
'ATOM 1315 H6 MOL 2 9.960 13.290 24.066 1.00 0.00 H1- \n',
'ATOM 1316 H7 MOL 2 9.166 13.685 0.563 1.00 0.00 H1- \n',
'ATOM 1317 N1 MOL 2 17.910 0.904 23.685 1.00 0.00 N3- \n',
'ATOM 1318 C1 MOL 2 15.579 1.331 22.067 1.00 0.00 C \n',
'ATOM 1319 C2 MOL 2 15.915 2.096 23.173 1.00 0.00 C \n',
'ATOM 1320 C3 MOL 2 17.085 1.773 23.988 1.00 0.00 C \n',
'ATOM 1321 C4 MOL 2 19.061 0.707 24.552 1.00 0.00 C \n',
'ATOM 1322 C5 MOL 2 20.321 0.977 23.744 1.00 0.00 C \n',
'ATOM 1323 C6 MOL 2 21.561 0.704 24.584 1.00 0.00 C \n',
'ATOM 1324 H1 MOL 2 16.105 0.595 21.849 1.00 0.00 H1- \n',
'ATOM 1325 H2 MOL 2 17.214 2.247 24.778 1.00 0.00 H1- \n',
'ATOM 1326 H3 MOL 2 19.014 1.324 0.511 1.00 0.00 H1- \n',
'ATOM 1327 H4 MOL 2 20.326 1.902 23.448 1.00 0.00 H1- \n',
'ATOM 1328 H5 MOL 2 20.331 0.409 22.957 1.00 0.00 H1- \n',
'ATOM 1329 H6 MOL 2 22.360 0.890 24.066 1.00 0.00 H1- \n',
'ATOM 1330 H7 MOL 2 21.566 1.285 0.563 1.00 0.00 H1- \n',
'ATOM 1331 N1 MOL 2 17.910 13.304 11.285 1.00 0.00 N3- \n',
'ATOM 1332 C1 MOL 2 15.579 13.731 9.667 1.00 0.00 C \n',
'ATOM 1333 C2 MOL 2 15.915 14.496 10.773 1.00 0.00 C \n',
'ATOM 1334 C3 MOL 2 17.085 14.173 11.588 1.00 0.00 C \n',
'ATOM 1335 C4 MOL 2 19.061 13.107 12.152 1.00 0.00 C \n',
'ATOM 1336 C5 MOL 2 20.321 13.377 11.344 1.00 0.00 C \n',
'ATOM 1337 C6 MOL 2 21.561 13.104 12.184 1.00 0.00 C \n',
'ATOM 1338 H1 MOL 2 16.105 12.995 9.449 1.00 0.00 H1- \n',
'ATOM 1339 H2 MOL 2 17.214 14.647 12.378 1.00 0.00 H1- \n',
'ATOM 1340 H3 MOL 2 19.014 13.724 12.911 1.00 0.00 H1- \n',
'ATOM 1341 H4 MOL 2 20.326 14.302 11.048 1.00 0.00 H1- \n',
'ATOM 1342 H5 MOL 2 20.331 12.809 10.557 1.00 0.00 H1- \n',
'ATOM 1343 H6 MOL 2 22.360 13.290 11.666 1.00 0.00 H1- \n',
'ATOM 1344 H7 MOL 2 21.566 13.685 12.963 1.00 0.00 H1- \n',
'TER 1345 \n'
]
system_periodic = {
'remarks': [
'REMARK Materials Studio PDB file\n',
'REMARK Created: Wed Jul 26 11:23:20 GMT Standard Time 2017\n'
],
'unit_cell': np.array([24.8, 24.8, 24.8, 90. , 90. , 90. ]),
'lattice': np.array(
[
[2.48000000e+01, 1.51856203e-15, 1.51856203e-15],
[0.00000000e+00, 2.48000000e+01, 1.51856203e-15],
[0.00000000e+00, 0.00000000e+00, 2.48000000e+01]
]),
'atom_ids': np.array(
[
'N1', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'H1', 'H2', 'H3', 'H4',
'H5', 'H6', 'H7', 'N1', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'H1',
'H2', 'H3', 'H4', 'H5', 'H6', 'H7', 'N1', 'C1', 'C2', 'C3', 'C4',
'C5', 'C6', 'H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'H7', 'N1', 'C1',
'C2', 'C3', 'C4', 'C5', 'C6', 'H1', 'H2', 'H3', 'H4', 'H5', 'H6',
'H7', 'N1', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'H1', 'H2', 'H3',
'H4', 'H5', 'H6', 'H7', 'N1', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6',
'H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'H7', 'N1', 'C1', 'C2', 'C3',
'C4', 'C5', 'C6', 'H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'H7', 'N1',
'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'H1', 'H2', 'H3', 'H4', 'H5',
'H6', 'H7', 'N1', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'H1', 'H2',
'H3', 'H4', 'H5', 'H6', 'H7', 'N1', 'C1', 'C2', 'C3', 'C4', 'C5',
'C6', 'H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'H7', 'N1', 'C1', 'C2',
'C3', 'C4', 'C5', 'C6', 'H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'H7',
'N1', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'H1', 'H2', 'H3', 'H4',
'H5', 'H6', 'H7', 'N1', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'H1',
'H2', 'H3', 'H4', 'H5', 'H6', 'H7', 'N1', 'C1', 'C2', 'C3', 'C4',
'C5', 'C6', 'H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'H7', 'N1', 'C1',
'C2', 'C3', 'C4', 'C5', 'C6', 'H1', 'H2', 'H3', 'H4', 'H5', 'H6',
| |
<gh_stars>0
#
# pokersim.py - Runs a Monte Carlo simulation of a hand
# with user-specified 3 community cards
#
#
import argparse
import random
from flask import Flask, request
app = Flask(__name__)
@app.route("/")
def hello():
flop = request.args.get('flop')
iterations = request.args.get('iterations')
return main(flop, int(iterations))
def readable_hand(cards):
#
# Returns a readable version of a set of cards
#
card_rank = {0: "2", 1: "3", 2: "4", 3: "5", 4: "6", 5: "7", 6: "8",
7: "9", 8: "T", 9: "J", 10: "Q", 11: "K", 12: "A", -1: "X"}
card_suit = {0: "c", 1: "d", 2: "h", 3: "s", -1: "x"}
return_string = ""
for i in cards:
return_string += card_rank[i[0]] + card_suit[i[1]]
return return_string
def hand_copy(cards, discarded_card1_index = None, discarded_card2_index = None, discarded_card3_index = None):
#
# Returns copy of hand (replaces deepcopy with 20x speed improvement)
#
results = []
index = 0
for i in cards:
if discarded_card1_index != index:
if discarded_card2_index != index:
if discarded_card3_index != index:
results.append(i)
index+=1
return results
def legal_hand(cards):
#
# Returns True if hand is legal
# Returns False if hand is illegal
# Case 1: two or more of same card
# Case 2: random card
#
for i in cards:
if cards.count(i) > 1 or cards == [-1, -1]:
return False
return True
def valid_card(card):
#
# Returns True if card is a valid card in text format (rank in (A-2),
# suit in (c, d, h, s) or wildcard (Xx)
# Returns False if card is invalid
#
if card[0] in ("X", "x", "A", "a", "K", "k", "Q", "q", "J", "j",
"T", "t", "9", "8", "7", "6", "5", "4", "3", "2") \
and card[1] in ("x", "X", "c", "C", "d", "D", "h", "H", "s", "S"):
return True
else:
return False
def hand_to_numeric(cards):
#
# Converts alphanumeric hand to numeric values for easier comparisons
# Also sorts cards based on rank
#
card_rank = {"2": 0, "3": 1, "4": 2, "5": 3, "6": 4, "7": 5, "8": 6,
"9": 7, "T": 8, "J": 9, "Q": 10, "K": 11, "A": 12, "X": -1,
"t": 8, "j": 9, "q": 10, "k": 11, "a": 12, "x": -1}
card_suit = {"c": 0, "C": 0, "d": 1, "D": 1, "h": 2, "H": 2,
"s": 3, "S": 3, "x": -1, "X": -1}
result = []
for i in range(len(cards) // 2 + len(cards) % 2):
result.append([card_rank[cards[i * 2]], card_suit[cards[i * 2 + 1]]])
result.sort()
result.reverse()
return result
def check_flush(hand):
#
# Returns True if hand is a Flush, otherwise returns False
#
hand_suit = [hand[0][1], hand[1][1], hand[2][1], hand[3][1], hand[4][1]]
for i in range(4):
if hand_suit.count(i) == 5:
return True
return False
def check_straight(hand):
# Return True if hand is a Straight, otherwise returns False
if hand[0][0] == (hand[1][0] + 1) == (hand[2][0] + 2) == (hand[3][0] + 3)\
== (hand[4][0] + 4):
return True
elif (hand[0][0] == 12) and (hand[1][0] == 3) and (hand[2][0] == 2)\
and (hand[3][0] == 1) and (hand[4][0] == 0):
return True
return False
def check_straightflush(hand):
# Return True if hand is a Straight Flush, otherwise returns False
if check_flush(hand) and check_straight(hand):
return True
return False
def check_fourofakind(hand):
# Return True if hand is Four-of-a-Kind, otherwise returns False
# Also returns rank of four of a kind card and rank of fifth card
# (garbage value if no four of a kind)
hand_rank = [hand[0][0], hand[1][0], hand[2][0], hand[3][0], hand[4][0]]
for quad_card in range(13):
if hand_rank.count(quad_card) == 4:
for kicker in range(13):
if hand_rank.count(kicker) == 1:
return True, quad_card, kicker
return False, 13, 13
def check_fullhouse(hand):
# Return True if hand is a Full House, otherwise returns False
# Also returns rank of three of a kind card and two of a kind card
# (garbage values if no full house)
hand_rank = [hand[0][0], hand[1][0], hand[2][0], hand[3][0], hand[4][0]]
for trip_card in range(13):
if hand_rank.count(trip_card) == 3:
for pair_card in range(13):
if hand_rank.count(pair_card) == 2:
return True, trip_card, pair_card
return False, 13, 13
def check_threeofakind(hand):
# Return True if hand is Three-of-a-Kind, otherwise returns False
# Also returns rank of three of a kind card and remaining two cards
# (garbage values if no three of a kind)
hand_rank = [hand[0][0], hand[1][0], hand[2][0], hand[3][0], hand[4][0]]
for trip_card in range(13):
if hand_rank.count(trip_card) == 3:
for n in range(13):
if hand_rank.count(n) == 1:
for m in range(n+1, 13):
if hand_rank.count(m) == 1:
return True, trip_card, [m, n]
return False, 13, [13, 13]
def check_twopair(hand):
# Return True if hand is Two Pair, otherwise returns False
# Also returns ranks of paired cards and remaining card
# (garbage values if no two pair)
hand_rank = [hand[0][0], hand[1][0], hand[2][0], hand[3][0], hand[4][0]]
for low_pair_card in range(13):
if hand_rank.count(low_pair_card) == 2:
for high_pair_card in range(low_pair_card + 1, 13):
if hand_rank.count(high_pair_card) == 2:
for kicker in range(13):
if hand_rank.count(kicker) == 1:
return True, [high_pair_card, low_pair_card], \
kicker
return False, [13, 13], 13
def check_onepair(hand):
# Return True if hand is One Pair, otherwise returns False
# Also returns ranks of paired cards and remaining three cards
# (garbage values if no pair)
hand_rank = [hand[0][0], hand[1][0], hand[2][0], hand[3][0], hand[4][0]]
for pair_card in range(13):
if hand_rank.count(pair_card) == 2:
for kicker1 in range(13):
if hand_rank.count(kicker1) == 1:
for kicker2 in range(kicker1 + 1, 13):
if hand_rank.count(kicker2) == 1:
for kicker3 in range(kicker2 + 1, 13):
if hand_rank.count(kicker3) == 1:
return True, pair_card, \
[kicker3, kicker2, kicker1]
return False, 13, [13, 13, 13]
def highest_card(hand, hand2):
# Return 0 if hand is higher
# Return 1 if hand2 is higher
# Return 2 if equal
hand_rank = \
[hand[0][0], hand[1][0], hand[2][0], hand[3][0], hand[4][0]]
hand2_rank = \
[hand2[0][0], hand2[1][0], hand2[2][0], hand2[3][0], hand2[4][0]]
#
# Compare
#
if hand_rank > hand2_rank:
return 0
elif hand_rank < hand2_rank:
return 1
return 2
def highest_card_straight(hand, hand2):
# Return 0 if hand is higher
# Return 1 if hand2 is higher
# Return 2 if equal
#
# Compare second card first (to account for Ace low straights)
# if equal, we could have Ace low straight, so compare first card.
# If first card is Ace, that is the lower straight
#
if hand[1][0] > hand2[1][0]:
return 0
elif hand[1][0] < hand2[1][0]:
return 1
elif hand[0][0] > hand2[0][0]:
return 1
elif hand[0][0] < hand2[0][0]:
return 0
return 2
def compare_hands(hand, hand2):
#
# Compare two hands
# Return 0 if hand is better
# Return 1 if hand2 is better
# Return 2 if equal
#
result1 = []
result2 = []
#
# Check for straight flush
#
if check_straightflush(hand):
if check_straightflush(hand2):
return(highest_card_straight(hand, hand2))
else:
return 0
elif check_straightflush(hand2):
return 1
#
# Check for four of a kind
#
result1 = check_fourofakind(hand)
result2 = check_fourofakind(hand2)
if result1[0] == 1:
if result2[0] == 1:
if result1[1] > result2[1]:
return 0
elif result1[1] < result2[1]:
return 1
elif result1[2] > result2[2]:
return 0
elif result1[2] < result2[2]:
return 1
else:
return 2
else:
return 0
elif result2[0] == 1:
return 1
#
# Check for full house
#
result1 = check_fullhouse(hand)
result2 = check_fullhouse(hand2)
if result1[0] == 1:
if result2[0] == 1:
if result1[1] > result2[1]:
return 0
elif result1[1] < result2[1]:
return 1
elif result1[2] > result2[2]:
return 0
elif result1[2] < result2[2]:
return 1
else:
return 2
else:
return 0
elif result2[0] == 1:
return 1
#
# Check for flush
#
if check_flush(hand):
if check_flush(hand2):
return(highest_card(hand, hand2))
else:
return 0
elif check_flush(hand2):
return 1
#
# Check for straight
#
if check_straight(hand):
if check_straight(hand2):
temp = highest_card_straight(hand, hand2)
return temp
else:
return 0
elif check_straight(hand2):
return 1
#
# Check for three of a kind
#
result1 = check_threeofakind(hand)
result2 = check_threeofakind(hand2)
if result1[0] == 1:
if result2[0] == 1:
if result1[1] > result2[1]:
return 0
elif result1[1] < result2[1]:
return 1
elif result1[2] > result2[2]:
return 0
elif result1[2] < result2[2]:
return 1
else:
return 2
else:
return 0
elif result2[0] == 1:
return 1
#
# Check for two | |
import cv2
import numpy
from cimbar.util.geometry import calculate_midpoints
def next_power_of_two_plus_one(x):
return 2**((x - 1).bit_length()) + 1
# should be thought of as a line, not an area
class Anchor:
__slots__ = 'x', 'xmax', 'y', 'ymax'
def __init__(self, x, y, xmax=None, ymax=None):
self.x = x
self.y = y
self.xmax = xmax or x
self.ymax = ymax or y
def merge(self, rhs):
self.x = min(self.x, rhs.x)
self.xmax = max(self.xmax, rhs.xmax)
self.y = min(self.y, rhs.y)
self.ymax = max(self.ymax, rhs.ymax)
@property
def xavg(self):
return (self.x + self.xmax) // 2
@property
def yavg(self):
return (self.y + self.ymax) // 2
@property
def xrange(self):
return abs(self.x - self.xmax) // 2
@property
def yrange(self):
return abs(self.y - self.ymax) // 2
@property
def max_range(self):
return max(abs(self.x - self.xmax), abs(self.y - self.ymax))
@property
def size(self):
return (self.x - self.xmax)**2 + (self.y - self.ymax)**2
def is_mergeable(self, rhs, cutoff):
if abs(self.xavg - rhs.xavg) > cutoff or abs(self.yavg - rhs.yavg) > cutoff:
return False
ratio = rhs.max_range * 10 / self.max_range
return ratio > 6 and ratio < 17
def __repr__(self):
return f'({self.xavg}+-{self.xrange}, {self.yavg}+-{self.yrange})'
def __lt__(self, rhs):
# distance from top left corner
return self.xavg + self.yavg < rhs.xavg + rhs.yavg
class ScanState:
RATIO_LIMITS = {
'1:1:4': [(3.0, 6.0), (3.0, 6.0)],
'1:2:2': [(1.0, 3.0), (0.5, 1.5)],
}
def __init__(self, ratio='1:1:4'):
self.state = 0
self.tally = [0]
self.limits = self.RATIO_LIMITS[ratio]
def pop_state(self):
# when state == 6, we need to drop down to state == 4
self.state -= 2
self.tally = self.tally[2:]
def evaluate_state(self):
if self.state != 6:
return None
# ratio should be 1:1:4:1:1
ones = self.tally[1:6]
for s in ones:
if not s:
return None
center = ones.pop(2)
instructions = {
ones[0]: self.limits[0],
ones[1]: self.limits[1],
ones[2]: self.limits[1],
ones[3]: self.limits[0],
}
for s, limits in instructions.items():
ratio_min = center / (s + 1)
ratio_max = center / max(1, s - 1)
if ratio_max < limits[0] or ratio_min > limits[1]:
return None
anchor_width = sum(ones) + center
return anchor_width
def process(self, active):
# transitions first
is_transition = (self.state in [0, 2, 4] and active) or (self.state in [1, 3, 5] and not active)
if is_transition:
self.state += 1
self.tally.append(0)
self.tally[-1] += 1
if self.state == 6:
res = self.evaluate_state()
self.pop_state()
return res
return None
# not is_transition
if self.state in [1, 3, 5] and active:
self.tally[-1] += 1
if self.state in [2, 4] and not active:
self.tally[-1] += 1
return None
class EdgeScanState:
def __init__(self):
self.state = 0
self.tally = [0]
def pop_state(self):
self.state -= 2
self.tally = self.tally[2:]
def process(self, active):
is_transition = (self.state in [0] and active) or (self.state in [1] and not active)
if is_transition:
self.state += 1
self.tally.append(0)
self.tally[-1] += 1
if self.state == 2:
res = self.tally[1]
self.pop_state()
return res
return None
if self.state in [1] and active:
self.tally[-1] += 1
if self.state in [0] and not active:
self.tally[-1] += 1
return None
def _the_works(img):
x = int(min(img.shape[0], img.shape[1]) * 0.002)
blur_unit = next_power_of_two_plus_one(x)
blur_unit = max(3, blur_unit) # needs to be at least 3
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.GaussianBlur(img,(blur_unit,blur_unit),0)
x = int(min(img.shape[0], img.shape[1]) * 0.05)
thresh_unit = next_power_of_two_plus_one(x)
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, thresh_unit, 0)
return img
class CimbarAlignment:
def __init__(self, corners, edges=[], midpoints=[]):
self.corners = corners
self.edges = edges
self.midpoints = midpoints
@property
def top_left(self):
return self.corners[0]
@property
def top_right(self):
return self.corners[1]
@property
def bottom_left(self):
return self.corners[2]
@property
def bottom_right(self):
return self.corners[3]
@property
def top_mid(self):
return self.midpoints[0]
@property
def right_mid(self):
return self.midpoints[1]
@property
def bottom_mid(self):
return self.midpoints[2]
@property
def left_mid(self):
return self.midpoints[3]
class CimbarScanner:
def __init__(self, img, dark=False, skip=17):
'''
image dimensions need to not be divisible by skip
'''
self.img = _the_works(img)
self.height, self.width = self.img.shape
self.dark = dark
self.skip = skip or self.height // 200
self.cutoff = self.height // 30
self.scan_ratio = '1:1:4'
def _test_pixel(self, x, y):
if self.dark:
return self.img[y, x] > 127
else:
return self.img[y, x] < 127
def horizontal_scan(self, y, r=None):
# for each column, look for the 1:1:4:1:1 pattern
if r:
r = (max(r[0], 0), min(r[1], self.width))
else:
r = (0, self.width)
state = ScanState(self.scan_ratio)
for x in range(*r):
active = self._test_pixel(x, y)
res = state.process(active)
if res:
#print('found possible anchor at {}-{},{}'.format(x - res, x, y))
yield Anchor(x=x-res, xmax=x-1, y=y)
# if the pattern is at the edge of the range
res = state.process(False)
if res:
x = r[1]
yield Anchor(x=x-res, xmax=x-1, y=y)
def vertical_scan(self, x, xmax=None, r=None):
xmax = xmax or x
xavg = (x + xmax) // 2
if r:
r = (max(r[0], 0), min(r[1], self.height))
# print(f'vertically scanning {xavg} from {r} instead of all the way to {self.height}')
else:
r = (0, self.height)
state = ScanState(self.scan_ratio)
for y in range(*r):
active = self._test_pixel(xavg, y)
res = state.process(active)
if res:
#print('found possible anchor at {},{}-{}'.format(xavg, y-res, y))
yield Anchor(x=x, xmax=xmax, y=y-res, ymax=y-1)
# if the pattern is at the edge of the range
res = state.process(False)
if res:
y = r[1]
yield Anchor(x=x, xmax=xmax, y=y-res, ymax=y-1)
def diagonal_scan(self, start_x, end_x, start_y, end_y):
end_x = min(self.width, end_x)
end_y = min(self.height, end_y)
# if we're up against the top/left bounds, roll the scan forward until we're inside the bounds
if start_x < 0:
offset = -start_x
start_x += offset
start_y += offset
if start_y < 0:
offset = -start_y
start_x += offset
start_y += offset
#print(f'diagonally scanning from {start_x},{start_y} to {end_x},{end_y}')
state = ScanState(self.scan_ratio)
x = start_x
y = start_y
while x < end_x and y < end_y:
active = self._test_pixel(x, y)
res = state.process(active)
if res:
ax, axmax = (x-res, x)
ay, aymax = (y-res, y)
yield Anchor(x=ax, xmax=axmax, y=ay, ymax=aymax)
x += 1
y += 1
# if the pattern is at the edge of the image
res = state.process(False)
if res:
yield Anchor(x=x-res, xmax=x, y=y-res, ymax=y)
def t1_scan_horizontal(self, skip=None, start_y=None, end_y=None, r=None):
'''
gets a smart answer for Xs
'''
if not skip:
skip = self.skip
y = start_y or 0
if not end_y:
end_y = self.height
else:
end_y = min(end_y, self.height)
results = []
y += skip
while y < end_y:
results += list(self.horizontal_scan(y, r))
y += skip
return results
def t2_scan_vertical(self, candidates):
'''
gets a smart answer for Ys
'''
results = []
for p in candidates:
range_guess = (p.y - (3 * p.xrange), p.y + (3 * p.xrange))
results += list(self.vertical_scan(p.x, p.xmax, range_guess))
return results
def t3_scan_diagonal(self, candidates):
'''
confirm tokens
'''
results = []
for p in candidates:
range_guess = (p.xavg - (2 * p.yrange), p.xavg + (2 * p.yrange), p.y - p.yrange, p.ymax + p.yrange)
results += list(self.diagonal_scan(*range_guess))
return results
def t4_confirm_scan(self, candidates, merge=True):
def _confirm_results(p, res, cutoff):
return [
c for c in (res or []) if c.is_mergeable(p, cutoff)
]
results = []
for p in candidates:
xrange = (p.x - p.xrange, p.xmax + p.xrange)
yavg = p.yavg
for y in [yavg - 1, yavg, yavg + 1]:
xs = list(self.horizontal_scan(y, r=xrange))
confirms = _confirm_results(p, xs, self.cutoff // 2)
if not confirms:
p = None
break
if merge:
for c in confirms:
p.merge(c)
if not p:
continue
yrange = (p.y - p.yrange, p.ymax + p.yrange)
xavg = p.xavg
for x in [xavg - 1, xavg, xavg + 1]:
ys = list(self.vertical_scan(x, r=yrange))
confirms = _confirm_results(p, ys, self.cutoff // 2)
if not confirms:
p = None
break
if merge:
for c in confirms:
p.merge(c)
if not p:
continue
results.append(p)
return self.deduplicate_candidates(results)
def deduplicate_candidates(self, candidates):
# group
group = []
for p in candidates:
done = False
for i, elem in enumerate(group):
rep = elem[0]
if rep.is_mergeable(p, self.cutoff):
group[i].append(p)
done = True
continue
if not done:
group.append([p])
# average
average = []
for c in group:
area = c[0]
for p in c:
area.merge(p)
average.append(area)
return average
def filter_candidates(self, candidates):
if len(candidates) < 3:
return candidates, None
candidates.sort(key=lambda c: c.size)
best_candidates = candidates[-3:]
xrange = sum([c.xrange for c in best_candidates])
yrange = sum([c.yrange for c in best_candidates])
xrange = xrange // len(best_candidates)
yrange = yrange // len(best_candidates)
max_range = max(xrange, | |
Engine.
Args:
h: The hash
Returns:
The string representation of the hash.
"""
return '%s_%s_%s_%s_%s' % (h[0:8], h[8:16], h[16:24], h[24:32], h[32:40])
def _Hash(content):
"""Compute the sha1 hash of the content.
Args:
content: The data to hash as a string.
Returns:
The string representation of the hash.
"""
h = hashlib.sha1(content).hexdigest()
return _FormatHash(h)
def _HashFromFileHandle(file_handle):
"""Compute the hash of the content of the file pointed to by file_handle.
Args:
file_handle: File-like object which provides seek, read and tell.
Returns:
The string representation of the hash.
"""
pos = file_handle.tell()
content_hash = _Hash(file_handle.read())
file_handle.seek(pos, 0)
return content_hash
def EnsureDir(path):
"""Makes sure that a directory exists at the given path.
If a directory already exists at that path, nothing is done.
Otherwise, try to create a directory at that path with os.makedirs.
If that fails, propagate the resulting OSError exception.
Args:
path: The path that you want to refer to a directory.
"""
try:
os.makedirs(path)
except OSError, exc:
if not (exc.errno == errno.EEXIST and os.path.isdir(path)):
raise
def DoDownloadApp(rpcserver, out_dir, app_id, module, app_version,
error_fh=sys.stderr):
"""Downloads the files associated with a particular app version.
Args:
rpcserver: The RPC server to use to download.
out_dir: The directory the files should be downloaded to.
app_id: The app ID of the app whose files we want to download.
module: The module we want to download from. Can be:
- None: We'll download from the default module.
- <module>: We'll download from the specified module.
app_version: The version number we want to download. Can be:
- None: We'll download the latest default version.
- <major>: We'll download the latest minor version.
- <major>/<minor>: We'll download that exact version.
error_fh: Where to send status and error messages.
"""
StatusUpdate('Fetching file list...', error_fh)
url_args = {'app_id': app_id}
if module:
url_args['module'] = module
if app_version is not None:
url_args['version_match'] = app_version
result = rpcserver.Send('/api/files/list', **url_args)
StatusUpdate('Fetching files...', error_fh)
lines = result.splitlines()
if len(lines) < 1:
logging.error('Invalid response from server: empty')
return
full_version = lines[0]
file_lines = lines[1:]
current_file_number = 0
num_files = len(file_lines)
num_errors = 0
for line in file_lines:
parts = line.split('|', 2)
if len(parts) != 3:
logging.error('Invalid response from server: expecting '
'"<id>|<size>|<path>", found: "%s"\n', line)
return
current_file_number += 1
file_id, size_str, path = parts
try:
size = int(size_str)
except ValueError:
logging.error('Invalid file list entry from server: invalid size: '
'"%s"', size_str)
return
StatusUpdate('[%d/%d] %s' % (current_file_number, num_files, path),
error_fh)
def TryGet():
"""A request to /api/files/get which works with the RetryWithBackoff."""
try:
contents = rpcserver.Send('/api/files/get', app_id=app_id,
version=full_version, id=file_id)
return True, contents
except urllib2.HTTPError, exc:
if exc.code == 503:
return False, exc
else:
raise
def PrintRetryMessage(_, delay):
StatusUpdate('Server busy. Will try again in %d seconds.' % delay,
error_fh)
success, contents = RetryWithBackoff(TryGet, PrintRetryMessage)
if not success:
logging.error('Unable to download file "%s".', path)
num_errors += 1
continue
if len(contents) != size:
logging.error('File "%s": server listed as %d bytes but served '
'%d bytes.', path, size, len(contents))
num_errors += 1
full_path = os.path.join(out_dir, path)
if os.path.exists(full_path):
logging.error('Unable to create file "%s": path conflicts with '
'an existing file or directory', path)
num_errors += 1
continue
full_dir = os.path.dirname(full_path)
try:
EnsureDir(full_dir)
except OSError, exc:
logging.error('Couldn\'t create directory "%s": %s', full_dir, exc)
num_errors += 1
continue
try:
out_file = open(full_path, 'wb')
except IOError, exc:
logging.error('Couldn\'t open file "%s": %s', full_path, exc)
num_errors += 1
continue
try:
try:
out_file.write(contents)
except IOError, exc:
logging.error('Couldn\'t write to file "%s": %s', full_path, exc)
num_errors += 1
continue
finally:
out_file.close()
if num_errors > 0:
logging.error('Number of errors: %d. See output for details.', num_errors)
class _ClientDeployLoggingContext(object):
"""Context for sending and recording server rpc requests.
Attributes:
rpcserver: The AbstractRpcServer to use for the upload.
requests: A list of client_deployinfo.Request objects to include
with the client deploy log.
time_func: Function to get the current time in milliseconds.
request_params: A dictionary with params to append to requests
"""
def __init__(self,
rpcserver,
request_params,
usage_reporting,
time_func=time.time):
"""Creates a new AppVersionUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
request_params: A dictionary with params to append to requests
usage_reporting: Whether to actually upload data.
time_func: Function to return the current time in millisecods
(default time.time).
"""
self.rpcserver = rpcserver
self.request_params = request_params
self.usage_reporting = usage_reporting
self.time_func = time_func
self.requests = []
def Send(self, url, payload='', **kwargs):
"""Sends a request to the server, with common params."""
start_time_usec = self.GetCurrentTimeUsec()
request_size_bytes = len(payload)
try:
logging.info('Send: %s, params=%s', url, self.request_params)
kwargs.update(self.request_params)
result = self.rpcserver.Send(url, payload=payload, **kwargs)
self._RegisterReqestForLogging(url, 200, start_time_usec,
request_size_bytes)
return result
except urllib2.HTTPError, e:
self._RegisterReqestForLogging(url, e.code, start_time_usec,
request_size_bytes)
raise e
def GetCurrentTimeUsec(self):
"""Returns the current time in microseconds."""
return int(round(self.time_func() * 1000 * 1000))
def GetSdkVersion(self):
"""Returns the current SDK Version."""
sdk_version = sdk_update_checker.GetVersionObject()
return sdk_version.get('release', '?') if sdk_version else '?'
def _RegisterReqestForLogging(self, path, response_code, start_time_usec,
request_size_bytes):
"""Registers a request for client deploy logging purposes."""
end_time_usec = self.GetCurrentTimeUsec()
self.requests.append(client_deployinfo.Request(
path=path,
response_code=response_code,
start_time_usec=start_time_usec,
end_time_usec=end_time_usec,
request_size_bytes=request_size_bytes))
def LogClientDeploy(self, runtime, start_time_usec, success):
"""Logs a client deployment attempt.
Args:
runtime: The runtime for the app being deployed.
start_time_usec: The start time of the deployment in micro seconds.
success: True if the deployment succeeded otherwise False.
"""
if not self.usage_reporting:
logging.info('Skipping usage reporting.')
return
end_time_usec = self.GetCurrentTimeUsec()
try:
info = client_deployinfo.ClientDeployInfoExternal(
runtime=runtime,
start_time_usec=start_time_usec,
end_time_usec=end_time_usec,
requests=self.requests,
success=success,
sdk_version=self.GetSdkVersion())
self.Send('/api/logclientdeploy', info.ToYAML())
except BaseException, e:
logging.debug('Exception logging deploy info continuing - %s', e)
class EndpointsState(object):
SERVING = 'serving'
PENDING = 'pending'
FAILED = 'failed'
_STATES = frozenset((SERVING, PENDING, FAILED))
@classmethod
def Parse(cls, value):
state = value.lower()
if state not in cls._STATES:
lst = sorted(cls._STATES)
pretty_states = ', '.join(lst[:-1]) + ', or ' + lst[-1]
raise ValueError('Unexpected Endpoints state "%s"; should be %s.' %
(value, pretty_states))
return state
class AppVersionUpload(object):
"""Provides facilities to upload a new appversion to the hosting service.
Attributes:
rpcserver: The AbstractRpcServer to use for the upload.
config: The AppInfoExternal object derived from the app.yaml file.
app_id: The application string from 'config'.
version: The version string from 'config'.
backend: The backend to update, if any.
files: A dictionary of files to upload to the rpcserver, mapping path to
hash of the file contents.
in_transaction: True iff a transaction with the server has started.
An AppVersionUpload can do only one transaction at a time.
deployed: True iff the Deploy method has been called.
started: True iff the StartServing method has been called.
logging_context: The _ClientDeployLoggingContext for this upload.
ignore_endpoints_failures: True to finish deployment even if there are
errors updating the Google Cloud Endpoints configuration (if there is
one). False if these errors should cause a failure/rollback.
"""
def __init__(self, rpcserver, config, module_yaml_path='app.yaml',
backend=None,
error_fh=None,
usage_reporting=False, ignore_endpoints_failures=True):
"""Creates a new AppVersionUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: An AppInfoExternal object that specifies the configuration for
this application.
module_yaml_path: The (string) path to the yaml file corresponding to
<config>, relative to the bundle directory.
backend: If specified, indicates the update applies to the given backend.
The backend name must match an entry in the backends: stanza.
error_fh: Unexpected HTTPErrors are printed to this file handle.
usage_reporting: Whether or not to report usage.
ignore_endpoints_failures: True to finish deployment even if there are
errors updating the Google Cloud Endpoints configuration (if there is
one). False if these errors should cause a failure/rollback.
"""
self.rpcserver = rpcserver
self.config = config
self.app_id = self.config.application
self.module = self.config.module
self.backend = backend
self.error_fh = error_fh or sys.stderr
self.version = self.config.version
self.params = {}
if self.app_id:
self.params['app_id'] = self.app_id
if self.module:
self.params['module'] = self.module
if self.backend:
self.params['backend'] = self.backend
elif self.version:
self.params['version'] = self.version
self.files = {}
self.all_files = set()
self.in_transaction = False
self.deployed = False
self.started = False
self.batching = True
self.logging_context = _ClientDeployLoggingContext(rpcserver,
self.params,
usage_reporting)
self.file_batcher = UploadBatcher('file', self.logging_context)
self.blob_batcher = UploadBatcher('blob', self.logging_context)
self.errorblob_batcher = UploadBatcher('errorblob', self.logging_context)
if not self.config.vm_settings:
self.config.vm_settings = appinfo.VmSettings()
self.config.vm_settings['module_yaml_path'] = module_yaml_path
if not self.config.auto_id_policy:
self.config.auto_id_policy = appinfo.DATASTORE_ID_POLICY_DEFAULT
self.ignore_endpoints_failures = ignore_endpoints_failures
def AddFile(self, path, file_handle):
"""Adds the provided file | |
import os
import shutil
import addSubproject
import option
import utility
import grapeGit as git
import grapeConfig
import grapeMenu
import checkout
# update your custom sparse checkout view
class UpdateView(option.Option):
"""
grape uv - Updates your active submodules and ensures you are on a consistent branch throughout your project.
Usage: grape-uv [-f ] [--checkSubprojects] [-b] [--skipSubmodules] [--allSubmodules]
[--skipNestedSubprojects] [--allNestedSubprojects] [--sync=<bool>]
[--add=<addedSubmoduleOrSubproject>...] [--rm=<removedSubmoduleOrSubproject>...]
Options:
-f Force removal of subprojects currently in your view that are taken out of the view as a
result to this call to uv.
--checkSubprojects Checks for branch model consistency across your submodules and subprojects, but does
not go through the 'which submodules do you want' script.
-b Automatically creates subproject branches that should be there according to your branching
model.
--allSubmodules Automatically add all submodules to your workspace.
--allNestedSubprojects Automatically add all nested subprojects to your workspace.
--sync=<bool> Take extra steps to ensure the branch you're on is up to date with origin,
either by pushing or pulling the remote tracking branch.
This will also checkout the public branch in a headless state prior to offering to create
a new branch (in repositories where the current branch does not exist).
[default: .grapeconfig.post-checkout.syncWithOrigin]
--add=<project> Submodule or subproject to add to the workspace. Can be defined multiple times.
--remove=<project> Submodule or subproject to remove from the workspace. Can be defined multiple times.
"""
def __init__(self):
super(UpdateView, self).__init__()
self._key = "uv"
self._section = "Workspace"
self._pushBranch = False
self._skipPush = False
def description(self):
return "Update the view of your current working tree"
@staticmethod
def defineActiveSubmodules(projectType="submodule"):
"""
Queries the user for the submodules (projectType == "submodule") or nested subprojects
(projectType == "nested subproject") they would like to activate.
"""
if projectType == "submodule":
allSubprojects = git.getAllSubmodules()
activeSubprojects = git.getActiveSubmodules()
if projectType == "nested subproject":
config = grapeConfig.grapeConfig()
allSubprojectNames = config.getAllNestedSubprojects()
allSubprojects = []
for project in allSubprojectNames:
allSubprojects.append(config.get("nested-%s" % project, "prefix"))
activeSubprojects = grapeConfig.GrapeConfigParser.getAllActiveNestedSubprojectPrefixes()
toplevelDirs = {}
toplevelActiveDirs = {}
toplevelSubs = []
for sub in allSubprojects:
# we are taking advantage of the fact that branchPrefixes are the same as directory prefixes for local
# top-level dirs.
prefix = git.branchPrefix(sub)
if sub != prefix:
toplevelDirs[prefix] = []
toplevelActiveDirs[prefix] = []
for sub in allSubprojects:
prefix = git.branchPrefix(sub)
if sub != prefix:
toplevelDirs[prefix].append(sub)
else:
toplevelSubs.append(sub)
for sub in activeSubprojects:
prefix = git.branchPrefix(sub)
if sub != prefix:
toplevelActiveDirs[prefix].append(sub)
included = {}
for directory, subprojects in toplevelDirs.items():
activeDir = toplevelActiveDirs[directory]
if len(activeDir) == 0:
defaultValue = "none"
elif set(activeDir) == set(subprojects):
defaultValue = "all"
else:
defaultValue = "some"
opt = utility.userInput("Would you like all, some, or none of the %ss in %s?" % (projectType,directory),
default=defaultValue)
if opt.lower()[0] == "a":
for subproject in subprojects:
included[subproject] = True
if opt.lower()[0] == "n":
for subproject in subprojects:
included[subproject] = False
if opt.lower()[0] == "s":
for subproject in subprojects:
included[subproject] = utility.userInput("Would you like %s %s? [y/n]" % (projectType, subproject),
'y' if (subproject in activeSubprojects) else 'n')
for subproject in toplevelSubs:
included[subproject] = utility.userInput("Would you like %s %s? [y/n]" % (projectType, subproject),
'y' if (subproject in activeSubprojects) else 'n')
return included
@staticmethod
def defineActiveNestedSubprojects():
"""
Queries the user for the nested subprojects they would like to activate.
"""
return UpdateView.defineActiveSubmodules(projectType="nested subproject")
def execute(self, args):
sync = args["--sync"].lower().strip()
sync = sync == "true" or sync == "yes"
args["--sync"] = sync
config = grapeConfig.grapeConfig()
origwd = os.getcwd()
wsDir = utility.workspaceDir()
os.chdir(wsDir)
base = git.baseDir()
if base == "":
return False
hasSubmodules = len(git.getAllSubmodules()) > 0 and not args["--skipSubmodules"]
includedSubmodules = {}
includedNestedSubprojectPrefixes = {}
allSubmodules = git.getAllSubmodules()
allNestedSubprojects = config.getAllNestedSubprojects()
addedSubmodules = []
addedNestedSubprojects = []
addedProjects = args["--add"]
notFound = []
for proj in addedProjects:
if proj in allSubmodules:
addedSubmodules.append(proj)
elif proj in allNestedSubprojects:
addedNestedSubprojects.append(proj)
else:
notFound.append(proj)
rmSubmodules = []
rmNestedSubprojects = []
rmProjects = args["--rm"]
for proj in rmProjects:
if proj in allSubmodules:
rmSubmodules.append(proj)
elif proj in allNestedSubprojects:
rmNestedSubprojects.append(proj)
else:
notFound.append(proj)
if notFound:
utility.printMsg("\"%s\" not found in submodules %s \nor\n nested subprojects %s" % (",".join(notFound),",".join(allSubmodules),",".join(allNestedSubprojects)))
return False
if not args["--checkSubprojects"]:
# get submodules to update
if hasSubmodules:
if args["--allSubmodules"]:
includedSubmodules = {sub:True for sub in allSubmodules}
elif args["--add"] or args["--rm"]:
includedSubmodules = {sub:True for sub in git.getActiveSubmodules()}
includedSubmodules.update({sub:True for sub in addedSubmodules})
includedSubmodules.update({sub:False for sub in rmSubmodules})
else:
includedSubmodules = self.defineActiveSubmodules()
# get subprojects to update
if not args["--skipNestedSubprojects"]:
nestedPrefixLookup = lambda x : config.get("nested-%s" % x, "prefix")
if args["--allNestedSubprojects"]:
includedNestedSubprojectPrefixes = {nestedPrefixLookup(sub):True for sub in allNestedSubprojects}
elif args["--add"] or args["--rm"]:
includedNestedSubprojectPrefixes = {sub:True for sub in grapeConfig.GrapeConfigParser.getAllActiveNestedSubprojectPrefixes()}
includedNestedSubprojectPrefixes.update({nestedPrefixLookup(sub):True for sub in addedNestedSubprojects})
includedNestedSubprojectPrefixes.update({nestedPrefixLookup(sub):False for sub in rmNestedSubprojects})
else:
includedNestedSubprojectPrefixes = self.defineActiveNestedSubprojects()
if hasSubmodules:
initStr = ""
deinitStr = ""
rmCachedStr = ""
resetStr = ""
for submodule, nowActive in includedSubmodules.items():
if nowActive:
initStr += ' %s' % submodule
else:
deinitStr += ' %s' % submodule
rmCachedStr += ' %s' % submodule
resetStr += ' %s' % submodule
if args["-f"] and deinitStr:
deinitStr = "-f"+deinitStr
utility.printMsg("Configuring submodules...")
utility.printMsg("Initializing submodules...")
git.submodule("init %s" % initStr.strip())
if deinitStr:
utility.printMsg("Deiniting submodules that were not requested... (%s)" % deinitStr)
done = False
while not done:
try:
git.submodule("deinit %s" % deinitStr.strip())
done = True
except git.GrapeGitError as e:
if "the following file has local modifications" in e.gitOutput:
print e.gitOutput
utility.printMsg("A submodule that you wanted to remove has local modifications. "
"Use grape uv -f to force removal.")
return False
elif "use 'rm -rf' if you really want to remove it including all of its history" in e.gitOutput:
if not args["-f"]:
raise e
# it is safe to move the .git of the submodule to the .git/modules area of the workspace...
module = None
for l in e.gitOutput.split('\n'):
if "Submodule work tree" in l and "contains a .git directory" in l:
module = l.split("'")[1]
break
if module:
src = os.path.join(module, ".git")
dest = os.path.join(wsDir, ".git", "modules", module)
utility.printMsg("Moving %s to %s"%(src, dest))
shutil.move(src, dest )
else:
raise e
else:
raise e
git.rm("--cached %s" % rmCachedStr)
git.reset(" %s" % resetStr)
if initStr:
utility.printMsg("Updating active submodules...(%s)" % initStr)
git.submodule("update")
# handle nested subprojects
if not args["--skipNestedSubprojects"]:
reverseLookupByPrefix = {nestedPrefixLookup(sub) : sub for sub in allNestedSubprojects}
userConfig = grapeConfig.grapeUserConfig()
updatedActiveList = []
for subproject, nowActive in includedNestedSubprojectPrefixes.items():
subprojectName = reverseLookupByPrefix[subproject]
section = "nested-%s" % reverseLookupByPrefix[subproject]
userConfig.ensureSection(section)
previouslyActive = userConfig.getboolean(section, "active")
previouslyActive = previouslyActive and os.path.exists(os.path.join(base, subproject, ".git"))
userConfig.set(section, "active", "True" if previouslyActive else "False")
if nowActive and previouslyActive:
updatedActiveList.append(subprojectName)
if nowActive and not previouslyActive:
utility.printMsg("Activating Nested Subproject %s" % subproject)
if not addSubproject.AddSubproject.activateNestedSubproject(subprojectName, userConfig):
utility.printMsg("Can't activate %s. Exiting..." % subprojectName)
return False
updatedActiveList.append(subprojectName)
if not nowActive and not previouslyActive:
pass
if not nowActive and previouslyActive:
#remove the subproject
subprojectdir = os.path.join(base, utility.makePathPortable(subproject))
proceed = args["-f"] or \
utility.userInput("About to delete all contents in %s. Any uncommitted changes, committed changes "
"that have not been pushed, or ignored files will be lost. Proceed?" %
subproject, 'n')
if proceed:
shutil.rmtree(subprojectdir)
userConfig.setActiveNestedSubprojects(updatedActiveList)
grapeConfig.writeConfig(userConfig, os.path.join(utility.workspaceDir(), ".git", ".grapeuserconfig"))
checkoutArgs = "-b" if args["-b"] else ""
safeSwitchWorkspaceToBranch( git.currentBranch(), checkoutArgs, sync)
os.chdir(origwd)
return True
@staticmethod
def getDesiredSubmoduleBranch(config):
publicBranches = config.getPublicBranchList()
currentBranch = git.currentBranch()
if currentBranch in publicBranches:
desiredSubmoduleBranch = config.getMapping("workspace", "submodulepublicmappings")[currentBranch]
else:
desiredSubmoduleBranch = currentBranch
return desiredSubmoduleBranch
def setDefaultConfig(self, config):
config.ensureSection("workspace")
config.set("workspace", "submodulepublicmappings", "?:master")
def ensureLocalUpToDateWithRemote(repo = '', branch = 'master'):
utility.printMsg( "Ensuring local branch %s in %s is up to date with origin" % (branch, repo))
with utility.cd(repo):
# attempt to fetch the requested branch
try:
git.fetch("origin", "%s:%s" % (branch, branch))
except:
# the branch may not exist, but this is ok
pass
if git.currentBranch() == branch:
return
if not git.hasBranch(branch):
# switch to corresponding public branch if the branch does not exist
public = grapeConfig.workspaceConfig().getPublicBranchFor(branch)
# figure out if this is a submodule
relpath = os.path.relpath(repo, utility.workspaceDir())
relpath = relpath.replace('\\',"/")
with utility.cd(utility.workspaceDir()):
# if this is a submodule, get the appropriate public mapping
if relpath in git.getAllSubmoduleURLMap().keys():
| |
content = FipFloatingIpAssociateFirewallInfoResponsesSerializer(label="弹性公网IP成功关联后的返回信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息", help_text="示例:该订单 BMS201908231116166874034 无对应服务器交付信息")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class FipFloatingIpAttachToServerInfoResponsesSerializer(serializers.ModelSerializer):
account_id = serializers.CharField(label="用户id", help_text="示例:eaaafbf4d92949d2bec2d5e91c0d9940")
attached = serializers.CharField(label="是否绑定", help_text="示例:true/False")
contract_number = serializers.CharField(label="合同编号", help_text="示例:Test_bz")
create_at = serializers.CharField(label="绑定成功时间", help_text="示例:2019-09-24 14:29:30")
external_line_type = serializers.CharField(label="对外网络类型", help_text="示例:three_line_ip")
external_name = serializers.CharField(label="弹性公网IP物料名称", help_text="示例:External_Net1")
external_name_id = serializers.CharField(label="弹性公网IP物料id",
help_text="示例:98a35cf5-1bab-4950-b947-d9f198db046a")
fixed_address = serializers.CharField(label="弹性公网IP所绑定的端口", help_text="示例:172.24.0.24")
floating_ip = serializers.CharField(label="绑定的弹性公网IP", help_text="示例:10.100.2.172")
floating_ip_id = serializers.CharField(label="绑定的弹性公网IP id",
help_text="示例:172d6c81-595f-4aa6-b2f7-1ec99e01716c")
id = serializers.CharField(label="标识", help_text="示例:5d89b7cac7c85c7bfc7093e9")
instance_name = serializers.CharField(label="实例名称", help_text="示例:node_8_32")
instance_uuid = serializers.CharField(label="实例id", help_text="示例:51c7094c-f039-41b5-865d-d37faa148c96")
is_measure_end = serializers.CharField(label="当前IP有没有做过变更", help_text="示例:false/True")
order_id = serializers.CharField(label="订单号", help_text="示例:BMS201909241429300929017")
project_id = serializers.CharField(label="项目id", help_text="示例:d1f0dbdf0b764e7693e3e19783be0ea9")
qos_policy_id = serializers.CharField(label="带宽id", help_text="示例:ce801724-7284-4a81-be1b-50b6f330bb29")
qos_policy_name = serializers.CharField(label="带宽大小", help_text="示例:2M")
status = serializers.CharField(label="状态", help_text="示例:active")
update_at = serializers.CharField(label="更新时间", help_text="示例:2019-10-12 16:21:43.621873")
class Meta:
model = ResponseNoneMeta
fields = ["account_id", "attached", "contract_number", "create_at", "external_line_type", "external_name",
"external_name_id", "fixed_address", "floating_ip", "floating_ip_id", "id", "instance_name",
"instance_uuid", "is_measure_end", "order_id", "project_id", "qos_policy_id", "qos_policy_name",
"status", "update_at", "external_line_type"]
class FipFloatingIpAttachToServerResponsesSerializer(serializers.ModelSerializer):
content = FipFloatingIpAttachToServerInfoResponsesSerializer(label="弹性公网IP成功关联实例后的返回信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息", help_text="示例:该订单 BMS201908231116166874034 无对应服务器交付信息")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class FipFloatingIpListResultInfoResponsesSerializer(serializers.ModelSerializer):
account_id = serializers.CharField(label="用户id", help_text="示例:eaaafbf4d92949d2bec2d5e91c0d9940")
attached = serializers.CharField(label="是否绑定服务器", help_text="示例:false/True")
contract_number = serializers.CharField(label="合同编号", help_text="示例:ding")
create_at = serializers.CharField(label="创建时间", help_text="示例:2019-10-14 10:10:08.082042")
external_line_type = serializers.CharField(label="对外网络类型", help_text="示例:three_line_ip")
external_name = serializers.CharField(label="弹性公网IP物料名称", help_text="示例:External_Net1")
external_name_id = serializers.CharField(label="弹性公网IP物料id", help_text="示例:98a35cf5-1bab-4950-b947-d9f198db046a")
firewall_info = serializers.CharField(label="防火墙信息")
fixed_address = serializers.CharField(label="服务器内网IP端口")
floating_ip = serializers.CharField(label="弹性公网IP", help_text="示例:10.100.2.172")
floating_ip_id = serializers.CharField(label="弹性公网IP id", help_text="示例:41deaaeb-b007-41c2-8150-f953cb9765e9")
id = serializers.CharField(label="标识", help_text="示例:5da3d900e115747865abf500")
instance_name = serializers.CharField(label="实例名称", help_text="示例:null")
instance_uuid = serializers.CharField(label="实例id", help_text="示例:null")
is_measure_end = serializers.CharField(label="是否做过变更", help_text="示例:false")
order_id = serializers.CharField(label="订单id", help_text="示例:BMS201910141010033503635")
project_id = serializers.CharField(label="项目id", help_text="示例:d1f0dbdf0b764e7693e3e19783be0ea9")
qos_policy_id = serializers.CharField(label="带宽id", help_text="示例:2671fc70-7018-4240-9177-f4209ea7ac35")
qos_policy_name = serializers.CharField(label="带宽名称", help_text="示例:100M")
status = serializers.CharField(label="状态", help_text="示例:active")
update_at = serializers.CharField(label="更新时间", help_text="示例:null")
class Meta:
model = ResponseNoneMeta
fields = ["account_id", "attached", "contract_number", "create_at", "external_line_type",
"external_name", "external_name_id", "firewall_info", "fixed_address", "floating_ip",
"floating_ip_id", "id", "instance_name", "instance_uuid", "is_measure_end", "order_id",
"project_id", "qos_policy_id", "qos_policy_name", "status", "update_at"]
class FipFloatingIpCreatedInfoResponsesSerializer(serializers.ModelSerializer):
floating_ip_list_result = FipFloatingIpListResultInfoResponsesSerializer(label="弹性公网IP创建成功返回信息")
order_result = OrderByAccoInfoResponsesSerializer(label="返回订单信息")
class Meta:
model = ResponseNoneMeta
fields = ["floating_ip_list_result", "order_result"]
class FipFloatingIpCreatedResponsesSerializer(serializers.ModelSerializer):
content = FipFloatingIpCreatedInfoResponsesSerializer(label="弹性公网IP创建成功后的返回信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class FipFloatingIpDeletedResponsesSerializer(serializers.ModelSerializer):
content = serializers.CharField(label="成功信息", help_text="示例:41deaaeb-b007-41c2-8150-f953cb9765e9: true")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class FipFloatingIpDetachFromServerInfoResponsesSerializer(serializers.ModelSerializer):
account_id = serializers.CharField(label="用户id", help_text="示例:eaaafbf4d92949d2bec2d5e91c0d9940")
attached = serializers.CharField(label="是否绑定服务器", help_text="示例:false/True")
contract_number = serializers.CharField(label="合同编号", help_text="示例:ding")
create_at = serializers.CharField(label="创建时间", help_text="示例:2019-10-14 10:10:08.082042")
external_line_type = serializers.CharField(label="对外网络类型", help_text="示例:three_line_ip")
external_name = serializers.CharField(label="弹性公网IP物料名称", help_text="示例:External_Net1")
external_name_id = serializers.CharField(label="弹性公网IP物料id", help_text="示例:98a35cf5-1bab-4950-b947-d9f198db046a")
firewall_info = serializers.CharField(label="防火墙信息")
fixed_address = serializers.CharField(label="服务器内网IP端口")
floating_ip = serializers.CharField(label="弹性公网IP", help_text="示例:10.100.2.172")
floating_ip_id = serializers.CharField(label="弹性公网IP id", help_text="示例:41deaaeb-b007-41c2-8150-f953cb9765e9")
id = serializers.CharField(label="标识", help_text="示例:5da3d900e115747865abf500")
instance_name = serializers.CharField(label="实例名称", help_text="示例:null")
instance_uuid = serializers.CharField(label="实例id", help_text="示例:null")
is_measure_end = serializers.CharField(label="是否做过变更", help_text="示例:false")
order_id = serializers.CharField(label="订单id", help_text="示例:BMS201910141010033503635")
project_id = serializers.CharField(label="项目id", help_text="示例:d1f0dbdf0b764e7693e3e19783be0ea9")
qos_policy_id = serializers.CharField(label="带宽id", help_text="示例:2671fc70-7018-4240-9177-f4209ea7ac35")
qos_policy_name = serializers.CharField(label="带宽名称", help_text="示例:100M")
status = serializers.CharField(label="状态", help_text="示例:active")
update_at = serializers.CharField(label="更新时间", help_text="示例:null")
class Meta:
model = ResponseNoneMeta
fields = ["account_id", "attached", "contract_number", "create_at", "external_line_type",
"external_name", "external_name_id", "firewall_info", "fixed_address", "floating_ip",
"floating_ip_id", "id", "instance_name", "instance_uuid", "is_measure_end", "order_id",
"project_id", "qos_policy_id", "qos_policy_name", "status", "update_at"]
class FipFloatingIpInfoResponsesSerializer(serializers.ModelSerializer):
account_id = serializers.CharField(label="用户id", help_text="示例:eaaafbf4d92949d2bec2d5e91c0d9940")
attached = serializers.CharField(label="是否绑定服务器", help_text="示例:false/True")
contract_info = serializers.CharField(label="合同信息")
contract_number = serializers.CharField(label="合同编号", help_text="示例:ding")
create_at = serializers.CharField(label="创建时间", help_text="示例:2019-10-14 10:10:08.082042")
external_line_type = serializers.CharField(label="对外网络类型", help_text="示例:three_line_ip")
external_name = serializers.CharField(label="弹性公网IP物料名称", help_text="示例:External_Net1")
external_name_id = serializers.CharField(label="弹性公网IP物料id", help_text="示例:98a35cf5-1bab-4950-b947-d9f198db046a")
firewall_info = serializers.CharField(label="防火墙信息")
fixed_address = serializers.CharField(label="服务器内网IP端口")
floating_ip = serializers.CharField(label="弹性公网IP", help_text="示例:10.100.2.172")
floating_ip_id = serializers.CharField(label="弹性公网IP id", help_text="示例:41deaaeb-b007-41c2-8150-f953cb9765e9")
id = serializers.CharField(label="标识", help_text="示例:5da3d900e115747865abf500")
instance_name = serializers.CharField(label="实例名称", help_text="示例:null")
instance_uuid = serializers.CharField(label="实例id", help_text="示例:null")
is_measure_end = serializers.CharField(label="是否做过变更", help_text="示例:false")
order_id = serializers.CharField(label="订单id", help_text="示例:BMS201910141010033503635")
project_id = serializers.CharField(label="项目id", help_text="示例:d1f0dbdf0b764e7693e3e19783be0ea9")
qos_policy_id = serializers.CharField(label="带宽id", help_text="示例:2671fc70-7018-4240-9177-f4209ea7ac35")
qos_policy_name = serializers.CharField(label="带宽名称", help_text="示例:100M")
status = serializers.CharField(label="状态", help_text="示例:active")
update_at = serializers.CharField(label="更新时间", help_text="示例:null")
class Meta:
model = ResponseNoneMeta
fields = ["account_id", "attached", "contract_info", "contract_number", "create_at", "external_line_type",
"external_name", "external_name_id", "firewall_info", "fixed_address", "floating_ip",
"floating_ip_id", "id", "instance_name", "instance_uuid", "is_measure_end", "order_id",
"project_id", "qos_policy_id", "qos_policy_name", "status", "update_at"]
class FipFloatingIpQuotaSetInfoResponsesSerializer(serializers.ModelSerializer):
account_id = serializers.CharField(label="用户id", help_text="示例:eaaafbf4d92949d2bec2d5e91c0d9940")
attached = serializers.CharField(label="是否绑定服务器", help_text="示例:false/True")
contract_info = serializers.CharField(label="合同信息")
contract_number = serializers.CharField(label="合同编号", help_text="示例:ding")
create_at = serializers.CharField(label="创建时间", help_text="示例:2019-10-14 10:10:08.082042")
external_line_type = serializers.CharField(label="对外网络类型", help_text="示例:three_line_ip")
external_name = serializers.CharField(label="弹性公网IP物料名称", help_text="示例:External_Net1")
external_name_id = serializers.CharField(label="弹性公网IP物料id", help_text="示例:98a35cf5-1bab-4950-b947-d9f198db046a")
firewall_info = serializers.CharField(label="防火墙信息")
fixed_address = serializers.CharField(label="服务器内网IP端口")
floating_ip = serializers.CharField(label="弹性公网IP", help_text="示例:10.100.2.172")
floating_ip_id = serializers.CharField(label="弹性公网IP id", help_text="示例:41deaaeb-b007-41c2-8150-f953cb9765e9")
id = serializers.CharField(label="标识", help_text="示例:5da3d900e115747865abf500")
instance_name = serializers.CharField(label="实例名称", help_text="示例:null")
instance_uuid = serializers.CharField(label="实例id", help_text="示例:null")
is_measure_end = serializers.CharField(label="是否做过变更", help_text="示例:false")
order_id = serializers.CharField(label="订单id", help_text="示例:BMS201910141010033503635")
project_id = serializers.CharField(label="项目id", help_text="示例:d1f0dbdf0b764e7693e3e19783be0ea9")
qos_policy_id = serializers.CharField(label="带宽id", help_text="示例:2671fc70-7018-4240-9177-f4209ea7ac35")
qos_policy_name = serializers.CharField(label="带宽名称", help_text="示例:100M")
status = serializers.CharField(label="状态", help_text="示例:active")
update_at = serializers.CharField(label="更新时间", help_text="示例:null")
class Meta:
model = ResponseNoneMeta
fields = ["account_id", "attached", "contract_info", "contract_number", "create_at", "external_line_type",
"external_name", "external_name_id", "firewall_info", "fixed_address", "floating_ip",
"floating_ip_id", "id", "instance_name", "instance_uuid", "is_measure_end", "order_id",
"project_id", "qos_policy_id", "qos_policy_name", "status", "update_at"]
class FipFloatingIpQuotaInfoResponsesSerializer(serializers.ModelSerializer):
available_fip_quota = serializers.CharField(label="浮动IP可用配额", help_text="示例:19")
class Meta:
model = ResponseNoneMeta
fields = ["available_fip_quota"]
class FipFloatingIpDetachFromServerResponsesSerializer(serializers.ModelSerializer):
content = FipFloatingIpDetachFromServerInfoResponsesSerializer(label="弹性公网IP解绑实例成功后返回信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class FipFloatingIpListResponsesSerializer(serializers.ModelSerializer):
content = FipFloatingIpInfoResponsesSerializer(label="查询成功返回信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class FipFloatingIpQuotaResponsesSerializer(serializers.ModelSerializer):
content = FipFloatingIpQuotaInfoResponsesSerializer(label="查询成功返回信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class FipFloatingIpQuotaSetResponsesSerializer(serializers.ModelSerializer):
content = FipFloatingIpQuotaSetInfoResponsesSerializer(label="调整弹性公网IP带宽后返回信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class AvaliableInstanceInfoResponsesSerializer(serializers.ModelSerializer):
instance_id = serializers.CharField(label="实例id", help_text="示例:cafc0e26-7a34-40af-9cb4-ba1d987c6b90")
instance_name = serializers.CharField(label="浮动IP可用配额", help_text="示例:centos7-base-1011")
port = serializers.CharField(label="浮动IP可用配额", help_text="示例:['192.168.45.21']")
class Meta:
model = ResponseNoneMeta
fields = ["instance_id", "instance_name", "port"]
class InstAvaliableInstanceListResponsesSerializer(serializers.ModelSerializer):
content = AvaliableInstanceInfoResponsesSerializer(label="查询可绑定实例列表")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class FeedbackVbsAttachInfoResponsesSerializer(serializers.ModelSerializer):
account_id = serializers.CharField(label="用户id", help_text="示例:af95a6b571bd4d7e9e713d8c107d7f89")
attached_type = serializers.CharField(label="绑定类型", help_text="示例:裸金属服务器")
contract_number = serializers.CharField(label="合同编号", help_text="示例:ding123")
create_at = serializers.CharField(label="创建时间", help_text="示例:2019-10-17 11:18:48")
first_create_at = serializers.CharField(label="第一次创建时间", help_text="示例:2019-10-17 11:18:48")
id = serializers.CharField(label="标识", help_text="示例:5da7dd97f37056ff1229ae60")
instance_name = serializers.CharField(label="实例名称", help_text="示例:cen7")
instance_uuid = serializers.CharField(label="实例ID", help_text="示例:776c046b-4675-4ccb-a16a-30db03e44caa")
is_measure_end = serializers.CharField(label="是否做过变更操作", help_text="示例:0/1")
name = serializers.CharField(label="云硬盘名称", help_text="示例:u1810_14_volume_1")
order_id = serializers.CharField(label="订单ID", help_text="示例:BMS201910171104454214082")
project_id = serializers.CharField(label="项目ID", help_text="示例:68c50c64def84818948b1dc4320a44fa")
region = serializers.CharField(label="可用区", help_text="示例:regionOne")
size = serializers.CharField(label="云硬盘大小", help_text="示例:13")
update_at = serializers.CharField(label="更新时间", help_text="示例:null")
volume_id = serializers.CharField(label="云硬盘ID", help_text="示例:b264e220-8d1c-4695-9941-e1c19a99a246")
volume_type = serializers.CharField(label="云硬盘类型", help_text="示例:inspure_iscsi")
class Meta:
model = ResponseNoneMeta
fields = ["account_id", "attached_type", "contract_number","create_at", "first_create_at",
"id","instance_name","instance_uuid", "is_measure_end", "name", "order_id","project_id",
"region", "size","update_at", "volume_id", "volume_type",
]
class FeedbackVbsAttachResponsesSerializer(serializers.ModelSerializer):
content = FeedbackVbsAttachInfoResponsesSerializer(label="返回的信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class ObsObjectStoreInfoResponsesSerializer(serializers.ModelSerializer):
container_bytes_used = serializers.CharField(label="所用字节数", help_text="0")
container_object_count = serializers.CharField(label="对象数", help_text="0")
is_public = serializers.CharField(label="权限是否公有", help_text="True/False")
name = serializers.CharField(label="桶名称", help_text="ding")
public_url = serializers.CharField(label="域名", help_text="")
timestamp = serializers.CharField(label="创建时间", help_text="示例:2019-10-15 12:45:13")
class Meta:
model = ResponseNoneMeta
fields = ["container_bytes_used", "container_object_count", "is_public", "name", "public_url", "timestamp"]
class ObsCreatedBucketInfoResponsesSerializer(serializers.ModelSerializer):
object_store = ObsObjectStoreInfoResponsesSerializer(label="桶的信息")
order_result = OrderByAccoInfoResponsesSerializer(label="订单信息")
class Meta:
model = ResponseNoneMeta
fields = ["object_store", "order_result"]
class ObsCreatedBucketResponsesSerializer(serializers.ModelSerializer):
content = ObsCreatedBucketInfoResponsesSerializer(label="返回的信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class ObsDeleteBucketResponsesSerializer(serializers.ModelSerializer):
content = serializers.CharField(label="成功信息", help_text="删除桶成功")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class ObsDeleteObjectsResponsesSerializer(serializers.ModelSerializer):
content = serializers.CharField(label="成功信息", help_text="删除对象成功")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class ObsUpdateBucketResponsesSerializer(serializers.ModelSerializer):
content = ObsObjectStoreInfoResponsesSerializer(label="成功返回信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class ObsBucketListResponsesSerializer(serializers.ModelSerializer):
content = ObsObjectStoreInfoResponsesSerializer(label="成功返回信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class ObsObjectsInfoResponsesSerializer(serializers.ModelSerializer):
bytes = serializers.CharField(label="字节数", help_text="示例:21735")
content_type = serializers.CharField(label="类型", help_text="示例:image/png")
is_object = serializers.CharField(label="是否是对象存储", help_text="示例:true/False")
is_subdir = serializers.CharField(label="是否是目录", help_text="示例:false/True")
name = serializers.CharField(label="名称", help_text="示例:2019-04-04_144415.png")
path = serializers.CharField(label="路径", help_text="示例:2019-04-04_144415.png")
timestamp = serializers.CharField(label="创建时间", help_text="示例:2019-08-26 10:38:04")
class Meta:
model = ResponseNoneMeta
fields = ["bytes", "content_type", "is_object", "is_subdir", "name", "path", "timestamp"]
class ObsCreatedDirInfoResponsesSerializer(serializers.ModelSerializer):
content_type = serializers.CharField(label="类型", help_text="示例:application/pseudo-folder")
is_object = serializers.CharField(label="是否是对象存储", help_text="示例:true/False")
is_subdir = serializers.CharField(label="是否是目录", help_text="示例:false/True")
name = serializers.CharField(label="名称", help_text="示例:da/ding")
path = serializers.CharField(label="路径", help_text="示例:da/ding")
etag = serializers.CharField(label="标签", help_text="示例:d41d8cd98f00b204e9800998ecf8427e")
class Meta:
model = ResponseNoneMeta
fields = ["etag", "content_type", "is_object", "is_subdir", "name", "path"]
class ObsObjectsReturnInfoResponsesSerializer(serializers.ModelSerializer):
objects = ObsObjectsInfoResponsesSerializer(label="返回信息")
class Meta:
model = ResponseNoneMeta
fields = ["objects"]
class ObsObjectsListResponsesSerializer(serializers.ModelSerializer):
content = ObsObjectsReturnInfoResponsesSerializer(label="成功返回信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
| |
"""
Copyright 2021 <NAME>
Orchestrates a hadoop + Hive + SQL cluster of docker nodes
"""
import argparse
import collections
import distutils.dir_util
import json
import os
import re
import shutil
import subprocess
import sys
import time
# PyPI installed modules...
import requests
# The root directory of the playground repository
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
# The hadoop distribution path on the docker nodes
HADOOP_HOME = '/himage/hadoop-3.3.0'
# The hive distribution path on the docker nodes
HIVE_HOME = '/himage/apache-hive-3.1.2-bin'
# The sqoop distribution path on the docker nodes
SQOOP_HOME = '/himage/sqoop-1.4.7.bin__hadoop-2.6.0'
# The path of the docker-compose.yml file
COMPOSE_FILE = os.path.join(ROOT_DIR, 'docker-compose.yml')
# The non-secured sql password used on the sql node
SQL_TEST_PASSWORD = '<PASSWORD>'
# The number of data nodes in the cluster (this variable only affects health checks)
NUM_DATA_NODES = 1
# The number of node manager nodes in the cluster (this variable only affects health checks)
NUM_NODE_MANAGERS = 1
# The minimum amount of disk space each node requires to operate (applicable in health checks)
MIN_DISK_SPACE = 8589934592 # 1GB
# Exposed localhost ports for each of the nodes
PORT_UI_NN1 = 3000
PORT_UI_DN1 = 3001
PORT_UI_RMAN = 3002
PORT_UI_NM1 = 3003
PORT_UI_MRHIST = 3004
PORT_UI_HS = 3005
PORT_SQL_SQL = 3006
# Descriptions of what each port does
PORT_DOC = [
(PORT_UI_NN1, 'http', 'Web UI for the primary name node'),
(PORT_UI_DN1, 'http', 'Web UI for data node 1'),
(PORT_UI_RMAN, 'http', 'Web UI for YARN resource manager'),
(PORT_UI_NM1, 'http', 'Web UI for node manager 1'),
(PORT_UI_MRHIST, 'http', 'Web UI map reduce history server'),
(PORT_UI_HS, 'http', 'Web UI for hive server'),
(PORT_SQL_SQL, 'sql (tcp/ip)', 'SQL server connection port')
]
# A health checklist item description
NodeHealthBeanCheck = collections.namedtuple('NodeHealthBeanCheck', \
'bean_name prop_name check_func')
# The status of a single node in the cluster
NodeHealthReport = collections.namedtuple('NodeHealthReport', \
'is_healthy message')
# A summary of the status on each of the nodes in the cluster
HealthReportSummary = collections.namedtuple('HealthReportSummary', \
'cluster_healthy nn1 dn1 rman nm1 mrhist hs client sql')
class Config:
"""
Represents the configuration for any playground tasks
"""
def __init__(self, project_name=None, source_dir=None, data_dir=None, volumes_dir=None):
self.project_name = project_name
self.source_dir = source_dir
self.data_dir = data_dir
self.volumes_dir = volumes_dir
@property
def project_name(self):
"""
The project name used for the Docker-Compose project name
"""
return self._project_name
@project_name.setter
def project_name(self, value):
self._project_name = value
@property
def source_dir(self):
"""
The local directory containing files to be uploaded to the client node /src directory upon
setup.
"""
return self._source_dir
@source_dir.setter
def source_dir(self, value):
if value:
self._source_dir = os.path.abspath(value)
else:
self._source_dir = None
@property
def data_dir(self):
"""
The local directory containing files to be ingested into HDFS upon setup.
"""
return self._data_dir
@data_dir.setter
def data_dir(self, value):
if value:
self._data_dir = os.path.abspath(value)
else:
self._data_dir = None
@property
def volumes_dir(self):
"""
The local directory (which may not yet exist) where docker will persist files between runs.
"""
return self._volumes_dir
@volumes_dir.setter
def volumes_dir(self, value):
if value:
self._volumes_dir = os.path.abspath(value)
else:
self._volumes_dir = None
def save(self, filename):
"""
Saves the configuration to a file.
"""
with open(filename, 'w') as _fp:
json.dump({ \
'project_name': self._project_name, \
'source_dir': self._source_dir, \
'data_dir': self._data_dir, \
'volumes_dir': self._volumes_dir \
}, _fp, indent=2)
@staticmethod
def load(filename):
"""
Loads the configuration from a file.
"""
with open(filename, 'r') as _fp:
_c = Config()
_j = json.load(_fp)
_c.project_name = _j['project_name']
_c.source_dir = _j['source_dir']
_c.data_dir = _j['data_dir']
_c.volumes_dir = _j['volumes_dir']
return _c
def exec_docker(config, node_name, command, workdir=None, \
interactive=False, detached=False, check=True):
"""
Executes a command on a node through docker.
"""
_args = ['docker', 'exec']
if workdir:
_args.append('-w')
_args.append(workdir)
if interactive:
_args.append('-i')
_args.append('-t')
if detached:
_args.append('-d')
_args.append('%s_%s_1' % (config.project_name, node_name))
split_spaces = True
for _c in command.split('"'):
if split_spaces:
_splt = _c.split(' ')
for _s in _splt:
if _s:
_args.append(_s)
else:
_args.append(_c)
split_spaces = not split_spaces
output = subprocess.run(_args, check=check, shell=True)
return output.returncode
def build_img(config):
"""
Builds or rebuilds the dockerfile images.
"""
set_environment(config)
os.system('docker-compose -p %s -f "%s" build' % (config.project_name, COMPOSE_FILE))
def format_hdfs(config):
"""
Formats hdfs in the cluster.
"""
exec_docker(config, 'nn1', '%s/bin/hdfs namenode -format -force clust' % (HADOOP_HOME))
def ingest_data(config):
"""
Ingests data from the configured data volume into hdfs.
"""
exec_docker(config, 'nn1', '%s/bin/hadoop fs -put /data /data' % (HADOOP_HOME))
def copy_source(config):
"""
Copies from the configured local source directory to the source volume.
Use to update the client node's /src folder on a running cluster when new code is written.
"""
if not os.path.exists(config.source_dir):
print('Source directory does not exist. Please check configuration and try again.')
return
dir_name = os.path.join(config.volumes_dir, 'client')
if not os.path.exists(dir_name):
os.makedirs(dir_name)
distutils.dir_util.copy_tree(config.source_dir, dir_name)
print('Source files copied to volume.')
def setup_hive(config):
"""
Makes required hdfs directories for hive to run and initializes the schema metastore.
"""
fs_cmd = '%s/bin/hadoop fs ' % (HADOOP_HOME)
exec_docker(config, 'nn1', fs_cmd + '-mkdir /tmp', check=False)
exec_docker(config, 'nn1', fs_cmd + '-mkdir -p /user/hive/warehouse', check=False)
exec_docker(config, 'nn1', fs_cmd + '-chmod g+w /tmp')
exec_docker(config, 'nn1', fs_cmd + '-chmod g+w /user/hive/warehouse')
exec_docker(config, 'hs', '%s/bin/schematool -dbType derby -initSchema' % \
(HIVE_HOME), workdir='/metastore')
def cluster_up(config):
"""
Boots the cluster up but does not run any of the daemons.
"""
set_environment(config)
os.system('docker-compose -p %s -f "%s" up -d' % (config.project_name, COMPOSE_FILE))
def start_hadoop_daemons(config):
"""
Runs all daemons in the hadoop distribution on their respective nodes.
"""
exec_docker(config, 'nn1', '%s/bin/hdfs --daemon start namenode' % (HADOOP_HOME))
exec_docker(config, 'dn1', '%s/bin/hdfs --daemon start datanode' % (HADOOP_HOME))
exec_docker(config, 'rman', '%s/bin/yarn --daemon start resourcemanager' % (HADOOP_HOME))
exec_docker(config, 'nm1', '%s/bin/yarn --daemon start nodemanager' % (HADOOP_HOME))
exec_docker(config, 'mrhist', '%s/bin/mapred --daemon start historyserver' % (HADOOP_HOME))
def start_hive_server(config):
"""
Starts the hive server daemon.
"""
exec_docker(config, 'hs', '%s/bin/hiveserver2' % (HIVE_HOME), \
detached=True, workdir='/metastore')
def cluster_down(config):
"""
Spins the cluster down.
"""
set_environment(config)
os.system('docker-compose -p %s -f "%s" down' % (config.project_name, COMPOSE_FILE))
def metric_request(port):
"""
Sends an http request to a node's jmx endpoint. Returns the parsed json, or None on error.
"""
try:
_r = requests.get('http://localhost:%d/jmx' % (port))
except:
return None
if _r.status_code != 200:
return None
try:
return _r.json()
except ValueError:
return None
def find_bean_by_name(jsn, nme):
"""
Extracts a bean of the given name from jmx metrics json object.
"""
if 'beans' not in jsn:
return None
else:
return next((b for b in jsn['beans'] if b['name'] == nme), None)
def extract_bean_prop(jsn, bean_name, propname):
"""
Extracts a property of a bean of the given name from jmx metrics json object.
"""
bean = find_bean_by_name(jsn, bean_name)
if bean and propname in bean:
return bean[propname]
else:
return None
def gen_node_report_from_checks(jsn, checks):
"""
Creates a node health report using the jmx metrics json and a list of type NodeHealthBeanCheck
"""
healthy = True
messages = []
for _c in checks:
prop = extract_bean_prop(jsn, _c.bean_name, _c.prop_name)
if prop is not None:
report = _c.check_func(prop)
prefix = '\u2705 '
if not report.is_healthy:
healthy = False
prefix = '\u274C '
messages.append(prefix + report.message)
else:
healthy = False
messages.append('\u274C Missing required bean property. Bean name: "%s", property: "%s"' % \
(_c.bean_name, _c.prop_name))
message = '\n'.join(messages)
return NodeHealthReport(is_healthy=healthy, message=message)
def _check_func_disk_space(prop_val):
"""
A check function for comparing the prop_val to the expected disk space amount.
"""
return NodeHealthReport(is_healthy=True, message='Sufficient disk space.') \
if prop_val >= MIN_DISK_SPACE else NodeHealthReport(is_healthy=False, message='Insufficient' \
' disk space. Minimum required disk space is %d. Remaining bytes: %d' % \
(MIN_DISK_SPACE, prop_val))
def json_checker_namenode(jsn):
"""
Checks the jmx metrics json for the namenode and returns a node health report
"""
checks = [
NodeHealthBeanCheck( \
bean_name='Hadoop:service=NameNode,name=StartupProgress', \
prop_name='PercentComplete', \
check_func=lambda i: NodeHealthReport(is_healthy=True, message='Startup completed.') \
if i == 1.0 else NodeHealthReport(is_healthy=False, message='Startup not complete.' \
' Progress: %%%f.' % (i * 100)) \
),
NodeHealthBeanCheck( \
bean_name='Hadoop:service=NameNode,name=FSNamesystem', \
prop_name='tag.HAState', \
check_func=lambda i: NodeHealthReport(is_healthy=True, message='Namenode active.') \
if i == 'active' else NodeHealthReport(is_healthy=False, message='Namenode inactive.' \
' State: "%s"' % (i)) \
),
NodeHealthBeanCheck( \
bean_name='Hadoop:service=NameNode,name=FSNamesystem', \
prop_name='MissingBlocks', \
check_func=lambda i: NodeHealthReport(is_healthy=True, message='No missing blocks.') \
if i == 0 else NodeHealthReport(is_healthy=False, message='One or more missing blocks.' \
' Data is missing. Blocks missing: %d.' % (i)) \
),
NodeHealthBeanCheck( \
bean_name='Hadoop:service=NameNode,name=FSNamesystem', \
prop_name='CapacityRemaining', \
check_func=_check_func_disk_space
),
NodeHealthBeanCheck( \
bean_name='Hadoop:service=NameNode,name=FSNamesystemState', \
prop_name='NumLiveDataNodes', \
check_func=lambda i: NodeHealthReport(is_healthy=True, message='All data nodes' \
' are connected.') \
if i == 1 else NodeHealthReport(is_healthy=False, message='Some data nodes are not' \
' connected. Number of connected data nodes: %d/%d' % (i, NUM_DATA_NODES)) \
),
NodeHealthBeanCheck( \
bean_name='Hadoop:service=NameNode,name=FSNamesystemState', \
prop_name='NumStaleDataNodes', \
check_func=lambda i: NodeHealthReport(is_healthy=True, message='No stale data | |
design_df, run_enrichr=None, enrichrgram=True)
elif data_type == PROTEOMICS or data_type == METABOLOMICS:
json_data = to_clustergrammer(X_std, design_df)
return json_data
def get_standardized_df(analysis_data, axis, pk_cols=PKS):
data_type = analysis_data.data_type
data_df, design_df = get_dataframes(analysis_data, pk_cols)
# standardise data differently for genomics vs proteomics/metabolomics
X_std = None
if data_type == GENOMICS:
inference = GraphOmicsInference(data_df, design_df, data_type, min_value=MIN_REPLACE_GENOMICS)
X_std = inference.standardize_df(inference.data_df, axis=axis)
elif data_type == PROTEOMICS or data_type == METABOLOMICS:
inference = GraphOmicsInference(data_df, design_df, data_type, min_value=MIN_REPLACE_PROTEOMICS_METABOLOMICS)
X_std = inference.standardize_df(inference.data_df, log=True, axis=axis)
return X_std, data_df, design_df
def to_clustergrammer(data_df, design_df, run_enrichr=None, enrichrgram=None):
json_data = None
if not data_df.empty:
net = Network()
data_df = data_df[~data_df.index.duplicated(keep='first')] # remove rows with duplicate indices
net.load_df(data_df)
cats = {}
for k, v in design_df.groupby('group').groups.items():
cats[k] = v.values.tolist()
net.add_cats('col', [
{
'title': 'Group',
'cats': cats
}
])
# net.filter_sum('row', threshold=20)
# net.normalize(axis='col', norm_type='zscore')
# net.filter_N_top('row', 1000, rank_type='var')
# net.filter_threshold('row', threshold=3.0, num_occur=4)
# net.swap_nan_for_zero()
# net.downsample(ds_type='kmeans', axis='col', num_samples=10)
# net.random_sample(random_state=100, num_samples=10, axis='col')
net.cluster(dist_type='cosine', run_clustering=True,
dendro=True, views=['N_row_sum', 'N_row_var'],
linkage_type='average', sim_mat=False, filter_sim=0.1,
calc_cat_pval=False, run_enrichr=run_enrichr, enrichrgram=enrichrgram)
json_data = net.export_net_json()
return json_data
def get_last_data(analysis, data_type):
analysis_data = AnalysisData.objects.filter(analysis=analysis, data_type=data_type).order_by('-timestamp')[0]
return analysis_data
def get_context(analysis, current_user):
show_selection_group = True if not current_user.is_anonymous else False
view_names = {
TABLE_IDS[GENOMICS]: get_reverse_url('get_ensembl_gene_info', analysis),
TABLE_IDS[PROTEOMICS]: get_reverse_url('get_uniprot_protein_info', analysis),
TABLE_IDS[METABOLOMICS]: get_reverse_url('get_kegg_metabolite_info', analysis),
TABLE_IDS[REACTIONS]: get_reverse_url('get_reactome_reaction_info', analysis),
TABLE_IDS[PATHWAYS]: get_reverse_url('get_reactome_pathway_info', analysis),
'get_firdi_data': get_reverse_url('get_firdi_data', analysis),
'get_heatmap_data': get_reverse_url('get_heatmap_data', analysis),
'get_short_info': get_reverse_url('get_short_info', None),
'save_group': get_reverse_url('save_group', analysis),
'load_group': get_reverse_url('load_group', analysis),
'list_groups': get_reverse_url('list_groups', analysis),
'get_boxplot': get_reverse_url('get_boxplot', analysis),
'get_gene_ontology': get_reverse_url('get_gene_ontology', analysis),
}
context = {
'analysis_id': analysis.pk,
'analysis_name': analysis.name,
'analysis_description': analysis.description,
'analysis_species': analysis.get_species_str(),
'publication': analysis.publication,
'publication_link': analysis.publication_link,
'view_names': json.dumps(view_names),
'show_gene_data': show_data_table(analysis, GENOMICS),
'show_protein_data': show_data_table(analysis, PROTEOMICS),
'show_compound_data': show_data_table(analysis, METABOLOMICS),
'read_only': analysis.get_read_only_status(current_user),
'show_selection_group': show_selection_group
}
return context
def show_data_table(analysis, data_type):
analysis_data = get_last_analysis_data(analysis, data_type)
data_df, design_df = get_dataframes(analysis_data, IDS)
return np.any(data_df['obs'] == True) # show table if there's any observation
def get_reverse_url(viewname, analysis):
if analysis is not None:
return reverse(viewname, kwargs={'analysis_id': analysis.id})
else:
return reverse(viewname)
# TODO: no longer used, can remove?
def get_count_df(gene_2_proteins_mapping, protein_2_reactions_mapping, compound_2_reactions_mapping,
reaction_2_pathways_mapping, species_list):
count_df, pathway_compound_counts, pathway_protein_counts = get_reaction_df(
gene_2_proteins_mapping,
protein_2_reactions_mapping,
compound_2_reactions_mapping,
reaction_2_pathways_mapping,
species_list)
reaction_count_df = count_df.rename({
'reaction_id': 'reaction_pk',
'observed_protein_count': 'R_E',
'observed_compound_count': 'R_C'
}, axis='columns')
reaction_count_df = reaction_count_df.drop([
'reaction_name',
'protein_coverage',
'compound_coverage',
'all_coverage',
'protein',
'all_protein_count',
'compound',
'all_compound_count',
'pathway_ids',
'pathway_names'
], axis=1)
pathway_pks = set(list(pathway_compound_counts.keys()) + list(pathway_protein_counts.keys()))
data = []
for pathway_pk in pathway_pks:
try:
p_e = pathway_protein_counts[pathway_pk]
except KeyError:
p_e = 0
try:
p_c = pathway_compound_counts[pathway_pk]
except KeyError:
p_c = 0
data.append((pathway_pk, p_e, p_c))
pathway_count_df = pd.DataFrame(data, columns=['pathway_pk', 'P_E', 'P_C'])
return reaction_count_df, pathway_count_df
def save_json_string(data, outfile):
with open(outfile, 'w') as f:
f.write(data)
logger.debug('Saving %s' % outfile)
def csv_to_dataframe(csv_str):
# extract group, if any
filtered_str = ''
group_str = None
for line in csv_str.splitlines(): # go through all lines and remove the line containing the grouping info
if re.match(GROUP_COL, line, re.I):
group_str = line
else:
filtered_str += line + '\n'
# extract id values
data = StringIO(filtered_str)
try:
data_df = pd.read_csv(data)
data_df.columns = data_df.columns.str.replace('.',
'_') # replace period with underscore to prevent alasql breaking
data_df.columns = data_df.columns.str.replace('-',
'_') # replace dash with underscore to prevent alasql breaking
data_df.columns = data_df.columns.str.replace('#', '') # remove funny characters
rename = {data_df.columns.values[0]: IDENTIFIER_COL}
for i in range(len(data_df.columns.values[1:])): # sql doesn't like column names starting with a number
col_name = data_df.columns.values[i]
if col_name[0].isdigit():
new_col_name = '_' + col_name # append an underscore in front of the column name
rename[col_name] = new_col_name
data_df = data_df.rename(columns=rename)
data_df.iloc[:, 0] = data_df.iloc[:, 0].astype(str) # assume id is in the first column and is a string
except pd.errors.EmptyDataError:
data_df = None
# create grouping dataframe
group_df = None
if data_df is not None:
sample_data = data_df.columns.values
if group_str is not None:
group_data = group_str.split(',')
else:
num_samples = len(sample_data)
group_data = [DEFAULT_GROUP_NAME for x in
range(num_samples)] # assigns a default group if nothing specified
# skip non-measurement columns
filtered_sample_data = []
filtered_group_data = []
for i in range(len(sample_data)):
sample_name = sample_data[i]
if sample_name == IDENTIFIER_COL or \
sample_name == PIMP_PEAK_ID_COL or \
sample_name.startswith(PADJ_COL_PREFIX) or \
sample_name.startswith(FC_COL_PREFIX):
continue
filtered_sample_data.append(sample_data[i])
filtered_group_data.append(group_data[i])
# convert to dataframe
if len(filtered_group_data) > 0:
group_df = pd.DataFrame(list(zip(filtered_sample_data, filtered_group_data)),
columns=[SAMPLE_COL, GROUP_COL])
return data_df, group_df
def get_ids_from_dataframe(df):
if df is None:
return []
else:
return df.iloc[:, 0].values.tolist() # id is always the first column
def merge_relation(r1, r2):
unique_keys = list(set(r1.keys + r2.keys))
unique_values = list(set(r1.values + r2.values))
mapping_list = r1.mapping_list + r2.mapping_list
mapping_list = list(map(dict, set(map(lambda x: frozenset(x.items()), mapping_list)))) # removes duplicates, if any
return Relation(keys=list(unique_keys), values=list(unique_values),
mapping_list=mapping_list)
def reverse_relation(rel):
return Relation(keys=rel.values, values=rel.keys, mapping_list=rel.mapping_list)
def expand_relation(rel, mapping, pk_col):
expanded_keys = substitute(rel.keys, mapping)
expanded_values = substitute(rel.values, mapping)
expanded_mapping_list = []
for row in rel.mapping_list:
expanded = expand_each(row, mapping, pk_col)
if len(expanded) == 0:
expanded = [row]
expanded_mapping_list.extend(expanded)
return Relation(keys=expanded_keys, values=expanded_values, mapping_list=expanded_mapping_list)
def substitute(my_list, mapping):
new_list = []
for x in my_list:
if x in mapping:
new_list.extend(mapping[x])
else:
new_list.append(x)
return new_list
def expand_each(row, mapping, pk_col):
results = []
pk = row[pk_col]
try:
replacements = mapping[pk]
for rep in replacements:
new_row = without_keys(row, [pk_col])
new_row[pk_col] = rep
results.append(new_row)
except KeyError:
pass
return results
# https://stackoverflow.com/questions/31433989/return-copy-of-dictionary-excluding-specified-keys
def without_keys(d, keys):
return {x: d[x] for x in d if x not in keys}
def pk_to_json(pk_label, display_label, data, metadata_map, observed_df, has_species=False,
observed_ids=None, mapping=None):
if observed_df is not None:
if PIMP_PEAK_ID_COL in observed_df.columns: # if peak id is present, rename the identifier column to include it
observed_df[IDENTIFIER_COL] = observed_df[IDENTIFIER_COL] + '_' + observed_df[PIMP_PEAK_ID_COL].astype(str)
if mapping is not None:
data = expand_data(data, mapping)
observed_df = observed_df.set_index(IDENTIFIER_COL) # set identifier as index
observed_df = observed_df[~observed_df.index.duplicated(keep='first')] # remove row with duplicate indices
observed_df = observed_df.fillna(value=0) # replace all NaNs with 0s
output = []
for item in sorted(data):
if item == NA:
continue # handled below after this loop
if '_' in item:
tokens = item.split('_')
assert len(tokens) == 2
item = tokens[0]
peak_id = tokens[1]
else:
peak_id = None
# add observed status and the primary key label to row data
row = {}
if observed_ids is not None:
if item in observed_ids:
row['obs'] = True
else:
row['obs'] = False
else:
row['obs'] = None
if peak_id:
key = '%s_%s' % (item, peak_id)
row[pk_label] = key
else:
row[pk_label] = item
# add display label to row_data
species = None
if len(metadata_map) > 0 and item in metadata_map and metadata_map[item] is not None:
if peak_id:
label = '%s (%s)' % (metadata_map[item]['display_name'].capitalize(), peak_id)
else:
label = metadata_map[item]['display_name'].capitalize()
# get the species if it's there too
if has_species and 'species' in metadata_map[item]:
species = metadata_map[item]['species']
else:
label = item # otherwise use the item id as the label
row[display_label] = label
# add the remaining data columns to row
if observed_df is not None:
try:
if peak_id:
observed_values = observed_df.loc[key].to_dict()
else:
observed_values = observed_df.loc[item].to_dict()
except KeyError: # missing data
observed_values = {}
for col in observed_df.columns:
observed_values.update({col: None})
observed_values.pop(PIMP_PEAK_ID_COL, None) # remove pimp peakid column
# convert numpy bool to python bool, else json serialisation will break
for k, v in observed_values.items():
if type(v) == np.bool_:
observed_values[k] = bool(v)
row.update(observed_values)
if has_species:
row['species'] = species
if row not in output:
output.append(row)
# add dummy entry
row = {'obs': NA, pk_label: NA, display_label: NA}
if has_species:
row['species'] = NA
if observed_df is not None: # also add the remaining columns
for col in observed_df.columns:
if col == PIMP_PEAK_ID_COL:
continue
row.update({col: 0})
if row not in output:
output.append(row)
output_json = json.dumps(output)
return output_json
def expand_data(data, mapping):
new_data = []
for x in data:
if x in mapping:
new_data.extend(mapping[x])
else:
new_data.append(x)
data = new_data
return data
def make_relations(mapping, source_pk, target_pk, value_key=None):
id_values = []
mapping_list = []
for key in mapping:
value_list = mapping[key]
# value_list can be either a list of strings or dictionaries
# check if the first element is a dict, else assume it's a string
assert len(value_list) > 0
is_string = True
first = value_list[0]
if isinstance(first, dict):
is_string = False
# process each element in value_list
for value in value_list:
if is_string: # value_list is a list of string
actual_value = value
else: # value_list is a list of dicts
assert value_key is not None, 'value_key is missing'
actual_value = value[value_key]
id_values.append(actual_value)
row = {source_pk: key, target_pk: actual_value}
mapping_list.append(row)
unique_keys = | |
#**************************************************************************#
# This file is part of pymsc which is released under MIT License. See file #
# LICENSE or go to https://github.com/jam1garner/pymsc/blob/master/LICENSE #
# for full license details. #
#**************************************************************************#
from sys import version_info
isPython3 = version_info >= (3,)
assert isPython3 #If this fails switch to python 3
import struct, tempfile
MSC_MAGIC = b'\xB2\xAC\xBC\xBA\xE6\x90\x32\x01\xFD\x02\x00\x00\x00\x00\x00\x00'
COMMAND_IDS = {
"nop" : 0x0,
"begin" : 0x2,
"end" : 0x3,
"jump" : 0x4,
"jump4" : 0x4,
"jump5" : 0x5,
"return_6" : 0x6,
"return_7" : 0x7,
"return_8" : 0x8,
"return_9" : 0x9,
"pushInt" : 0xa,
"pushVar" : 0xb,
"error_C" : 0xc,
"pushShort" : 0xd,
"addi" : 0xe,
"subi" : 0xf,
"multi" : 0x10,
"divi" : 0x11,
"modi" : 0x12,
"negi" : 0x13,
"i++" : 0x14,
"i--" : 0x15,
"bitAnd" : 0x16,
"bitOr" : 0x17,
"bitNot" : 0x18,
"bitXor" : 0x19,
"leftShift" : 0x1a,
"rightShift" : 0x1b,
"setVar" : 0x1c,
"i+=" : 0x1d,
"i-=" : 0x1e,
"i*=" : 0x1f,
"i/=" : 0x20,
"i%=" : 0x21,
"i&=" : 0x22,
"i|=" : 0x23,
"i^=" : 0x24,
"equals" : 0x25,
"notEquals" : 0x26,
"notEqual" : 0x26,
"lessThan" : 0x27,
"lessOrEqual" : 0x28,
"greater" : 0x29,
"greaterOrEqual" : 0x2a,
"not" : 0x2b,
"printf" : 0x2c,
"sys" : 0x2d,
"try" : 0x2e,
"unk_2E" : 0x2e,
"callFunc" : 0x2f,
"callFunc2" : 0x30,
"callFunc3" : 0x31,
"push" : 0x32,
"pop" : 0x33,
"if" : 0x34,
"ifNot" : 0x35,
"else" : 0x36,
"error_37" : 0x37,
"intToFloat" : 0x38,
"floatToInt" : 0x39,
"addf" : 0x3a,
"subf" : 0x3b,
"multf" : 0x3c,
"divf" : 0x3d,
"negf" : 0x3e,
"f++" : 0x3f,
"f--" : 0x40,
"floatVarSet" : 0x41,
"float+=" : 0x42,
"float-=" : 0x43,
"float*=" : 0x44,
"float/=" : 0x45,
"floatEqual" : 0x46,
"floatNotEqual" : 0x47,
"floatLess" : 0x48,
"floatLessOrEqual" : 0x49,
"floatGreater" : 0x4a,
"floatGreaterOrEqual" : 0x4b,
"error_4c" : 0x4c,
"exit" : 0x4d,
"byte" : 0xFFFE,
"long" : 0xFFFF
}
COMMAND_NAMES = {}
for k, v in COMMAND_IDS.items():
if not v in COMMAND_NAMES:
COMMAND_NAMES[v] = k
COMMAND_FORMAT = {
0x0 : '',
0x2 : 'HH',
0x3 : '',
0x4 : 'I',
0x5 : 'I',
0x6 : '',
0x7 : '',
0x8 : '',
0x9 : '',
0xa : 'I',
0xb : 'BH',
0xc : '',
0xd : 'H',
0xe : '',
0xf : '',
0x10 : '',
0x11 : '',
0x12 : '',
0x13 : '',
0x14 : 'BH',
0x15 : 'BH',
0x16 : '',
0x17 : '',
0x18 : '',
0x19 : '',
0x1a : '',
0x1b : '',
0x1c : 'BH',
0x1d : 'BH',
0x1e : 'BH',
0x1f : 'BH',
0x20 : 'BH',
0x21 : 'BH',
0x22 : 'BH',
0x23 : 'BH',
0x24 : 'BH',
0x25 : '',
0x26 : '',
0x27 : '',
0x28 : '',
0x29 : '',
0x2a : '',
0x2b : '',
0x2c : 'B',
0x2d : 'BB',
0x2e : 'I',
0x2f : 'B',
0x30 : 'B',
0x31 : 'B',
0x32 : '',
0x33 : '',
0x34 : 'I',
0x35 : 'I',
0x36 : 'I',
0x38 : 'B',
0x39 : 'B',
0x3a : '',
0x3b : '',
0x3c : '',
0x3d : '',
0x3e : '',
0x3f : 'BH',
0x40 : 'BH',
0x41 : 'BH',
0x42 : 'BH',
0x43 : 'BH',
0x44 : 'BH',
0x45 : 'BH',
0x46 : '',
0x47 : '',
0x48 : '',
0x49 : '',
0x4a : '',
0x4b : '',
0x4c : '',
0x4d : '',
0xFFFE : 'B',
0xFFFF : 'I'
}
COMMAND_STACKPOPS = {
0x0 : lambda params: 0,
0x2 : lambda params: 0,
0x3 : lambda params: 0,
0x4 : lambda params: 0,
0x5 : lambda params: 0,
0x6 : lambda params: 1,
0x7 : lambda params: 0,
0x8 : lambda params: 1,
0x9 : lambda params: 0,
0xa : lambda params: 0,
0xb : lambda params: 0,
0xc : lambda params: 0,
0xd : lambda params: 0,
0xe : lambda params: 2,
0xf : lambda params: 2,
0x10 : lambda params: 2,
0x11 : lambda params: 2,
0x12 : lambda params: 2,
0x13 : lambda params: 1,
0x14 : lambda params: 0,
0x15 : lambda params: 0,
0x16 : lambda params: 2,
0x17 : lambda params: 2,
0x18 : lambda params: 1,
0x19 : lambda params: 2,
0x1a : lambda params: 2,
0x1b : lambda params: 2,
0x1c : lambda params: 1,
0x1d : lambda params: 1,
0x1e : lambda params: 1,
0x1f : lambda params: 1,
0x20 : lambda params: 1,
0x21 : lambda params: 1,
0x22 : lambda params: 1,
0x23 : lambda params: 1,
0x24 : lambda params: 1,
0x25 : lambda params: 2,
0x26 : lambda params: 2,
0x27 : lambda params: 2,
0x28 : lambda params: 2,
0x29 : lambda params: 2,
0x2a : lambda params: 2,
0x2b : lambda params: 1,
0x2c : lambda params: params[0],
0x2d : lambda params: params[0],
0x2e : lambda params: 0,
0x2f : lambda params: params[0] + 1,
0x30 : lambda params: params[0] + 1,
0x31 : lambda params: params[0] + 1,
0x32 : lambda params: -1,
0x33 : lambda params: 1,
0x34 : lambda params: 1,
0x35 : lambda params: 1,
0x36 : lambda params: 0,
0x37 : lambda params: 0,
0x38 : lambda params: 0,
0x39 : lambda params: 0,
0x3a : lambda params: 2,
0x3b : lambda params: 2,
0x3c : lambda params: 2,
0x3d : lambda params: 2,
0x3e : lambda params: 1,
0x3f : lambda params: 0,
0x40 : lambda params: 0,
0x41 : lambda params: 1,
0x42 : lambda params: 1,
0x43 : lambda params: 1,
0x44 : lambda params: 1,
0x45 : lambda params: 1,
0x46 : lambda params: 2,
0x47 : lambda params: 2,
0x48 : lambda params: 2,
0x49 : lambda params: 2,
0x4a : lambda params: 2,
0x4b : lambda params: 2,
0x4c : lambda params: 0,
0x4d : lambda params: 0,
0xFFFE : lambda params: 0,
0xFFFF : lambda params: 0
}
TYPE_SIZES = {
'B' : 1,
'H' : 2,
'I' : 4
}
def getSizeFromFormat(formatString):
s = 0
for char in formatString:
s += TYPE_SIZES[char]
return s
def disassembleCommands(rawCommands, startOffset):
pos = 0
commands = []
while pos < len(rawCommands):
newCommand = Command()
newCommand.read(rawCommands, pos)
newCommand.commandPosition = startOffset + pos
commands.append(newCommand)
pos += (1 + newCommand.paramSize)
return commands
#Thanks Triptych https://stackoverflow.com/questions/1265665/python-check-if-a-string-represents-an-int-without-using-try-except
def _RepresentsInt(s):
try:
int(s, 0)
return True
except:
return False
def _RepresentsFloat(s):
try:
float(s.rstrip('f'))
return True
except:
return False
def parseCommands(text, refs={}, mscStrings=[]):
lines = text.replace(', ',',').split('\n')
lines = [line.strip() for line in lines if line.strip() != '']
lines = [line.split('#')[0] for line in lines if line.split('#')[0] != '']
splitCommands = [[split for split in line.split(' ') if split != ''] for line in lines]
cmds = []
labels = {}
aliases = {}
currentPos = 0
for i,splitCommand in enumerate(splitCommands):
cmd = Command()
if splitCommand[0][-1] == ':':
labels[splitCommand[0][0:-1]] = currentPos
elif splitCommand[0] == '.alias':
params = splitCommand[1].split(',')
aliases[params[1]] = int(params[0], 0)
else:
if splitCommand[0][-1] == '.':
cmd.pushBit = True
splitCommand[0] = splitCommand[0][0:-1]
cmd.command = COMMAND_IDS[splitCommand[0]]
currentPos += getSizeFromFormat(COMMAND_FORMAT[cmd.command]) + 1
if len(splitCommand) > 1 and not ((cmd.command == 0xA or cmd.command == 0xD) and splitCommand[1][0] == '"'):
cmd.parameters = [param for param in splitCommand[1].split(',')]
elif (cmd.command == 0xA or cmd.command == 0xD) and splitCommand[1][0] == '"':
printString = splitCommand[1][1:]
for s in splitCommand[2:]:
printString += " "+s
if printString[-1] == '"':
printString = printString[:-1]
cmd.parameters = [len(mscStrings)]
mscStrings.append(printString)
cmds.append(cmd)
labelNames = labels.keys()
aliasNames = aliases.keys()
for cmd in cmds:
for i in range(len(cmd.parameters)):
if cmd.parameters[i] in labelNames:
cmd.parameters[i] = labels[cmd.parameters[i]]
elif cmd.parameters[i] in aliasNames:
cmd.parameters[i] = aliases[cmd.parameters[i]]
elif cmd.parameters[i] in refs:
cmd.parameters[i] = refs[cmd.parameters[i]]
elif _RepresentsInt(cmd.parameters[i]):
cmd.parameters[i] = int(cmd.parameters[i], 0)
elif _RepresentsFloat(cmd.parameters[i]):
cmd.parameters[i] = struct.unpack('>L', struct.pack('>f', float(cmd.parameters[i].rstrip('f'))))[0]
return cmds
class Command:
def __init__(self, command=0, parameters=[], pushBit=False):
self.command = command
self.parameters = parameters
self.pushBit = pushBit
self.paramSize = 0
self.commandPosition = 0
self.debugString = None
def __len__(self):
return getSizeFromFormat(COMMAND_FORMAT[self.command]) + 1
def read(self, byteBuffer, pos):
self.command = int(byteBuffer[pos]) & 0x7F
self.pushBit = (int(byteBuffer[pos]) & 0x80) != 0
if self.command | |
list of FairVariable objects. They correspond to triples in the RDF:
The name is stored as a blanknode with a hasInputVar relation to the step.
This blanknode has an RDF:type, PPLAN:Variable; and an RDFS:comment, a string literal
representing the type (i.e. int, str, float) of the variable.
"""
return [self._get_variable(var_ref) for var_ref
in self.get_attribute(namespaces.PPLAN.hasInputVar, return_list=True)]
@inputs.setter
def inputs(self, variables: List[FairVariable]):
self.remove_attribute(namespaces.PPLAN.hasInputVar)
for variable in variables:
self._add_variable(variable, namespaces.PPLAN.hasInputVar)
@property
def outputs(self) -> List[FairVariable]:
"""Outputs for this step.
Outputs are a list of FairVariable objects. They correspond to triples in the RDF:
The name is stored as a blanknode with a hasOutputVar relation to the step.
This blanknode has an RDF:type, PPLAN:Variable; and an RDFS:comment, a string literal
representing the type (i.e. int, str, float) of the variable.
"""
return [self._get_variable(var_ref) for var_ref
in self.get_attribute(namespaces.PPLAN.hasOutputVar, return_list=True)]
@outputs.setter
def outputs(self, variables: List[FairVariable]):
self.remove_attribute(namespaces.PPLAN.hasOutputVar)
for variable in variables:
self._add_variable(variable, namespaces.PPLAN.hasOutputVar)
def validate(self, shacl=False):
"""Validate step.
Check whether this step rdf has sufficient information required of
a step in the Plex ontology.
"""
conforms = True
log = ''
if not self.is_pplan_step:
log += 'Step RDF does not say it is a pplan:Step\n'
conforms = False
if not self.description:
log += 'Step RDF has no dcterms:description\n'
conforms = False
if not self.label:
log += 'Step RDF has no rdfs:label\n'
conforms = False
if self.is_manual_task == self.is_script_task:
log += 'Step RDF must be either a bpmn:ManualTask or a bpmn:ScriptTask\n'
conforms = False
assert conforms, log
# Now validate against the PLEX shacl shapes file, if requested
if shacl:
self.shacl_validate()
def register_workflow(self, workflow):
"""Register workflow that this step is part of."""
self._workflows.add(workflow)
def _update_registered_workflows(self):
"""Update the workflows that this step is part of.
NB: it could be that a step was deleted from a workflow
"""
self._workflows = {workflow for workflow in self._workflows
if self in workflow}
def publish_as_nanopub(self, use_test_server=False, **kwargs):
"""
Publish this rdf as a nanopublication.
Args:
use_test_server (bool): Toggle using the test nanopub server.
kwargs: Keyword arguments to be passed to [nanopub.Publication.from_assertion](
https://nanopub.readthedocs.io/en/latest/reference/publication.html#
nanopub.publication.Publication.from_assertion).
This allows for more control over the nanopublication RDF.
Returns:
a dictionary with publication info, including 'nanopub_uri', and 'concept_uri'
"""
self._update_registered_workflows()
old_uri = self.uri
self._publish_as_nanopub(use_test_server=use_test_server, **kwargs)
var_names = [var.name for var in (self.inputs + self.outputs)]
for workflow in self._workflows:
replace_in_rdf(workflow.rdf, oldvalue=rdflib.URIRef(old_uri),
newvalue=rdflib.URIRef(self.uri))
# Similarly replace old URIs for variable name bindings
# in both this step and any workflow objects that use it.
published_step_uri_defrag, _ = urldefrag(self.uri)
for var_name in var_names:
old_var_uri = old_uri + '#' + var_name
new_var_uri = published_step_uri_defrag + '#' + var_name
replace_in_rdf(self.rdf, oldvalue=rdflib.URIRef(old_var_uri),
newvalue=rdflib.URIRef(new_var_uri))
replace_in_rdf(workflow.rdf, oldvalue=rdflib.URIRef(old_var_uri),
newvalue=rdflib.URIRef(new_var_uri))
del workflow._steps[old_uri]
workflow._steps[self.uri] = self
def __str__(self):
"""
Returns string representation of this FairStep object.
"""
s = f'Step URI = {self._uri}\n'
s += self._rdf.serialize(format='trig').decode('utf-8')
return s
def is_fairstep(label: str = None, is_pplan_step: bool = True, is_manual_task: bool = False,
is_script_task: bool = True, **kwargs):
"""Mark a function as a FAIR step to be used in a fair workflow.
Use as decorator to mark a function as a FAIR step. Set properties of the fair step using
arguments to the decorator.
The raw code of the function will be used to set the description of the fair step.
The type annotations of the input arguments and return statement will be used to
automatically set inputs and outputs of the FAIR step.
Args:
label (str): Label of the fair step (corresponds to rdfs:label predicate)
is_pplan_step (str): Denotes whether this step is a pplan:Step
is_manual_task (str): Denotes whether this step is a bpmn.ManualTask
is_script_task (str): Denotes whether this step is a bpmn.ScriptTask
All additional arguments are expected to correspond to input parameters of the decorated
function, and are used to provide extra semantic types for that parameter. For example,
consider the following decorated function:
@is_fairstep(label='Addition', a='http://www.example.org/number', out='http://www.example.org/float')
def add(a:float, b:float) -> float:
return a + b
1. Note that using 'a' as parameter to the decorator allows the user to provide a URI for a semantic type
that should be associated with the function's input parameter, 'a'. This can be either a string, an
rdflib.URIRef, or a list of these.
2. Note that the return parameter is referred to using 'returns', because it does not otherwise have a name.
In this case, the function only returns one value. However, if e.g. a tuple of 3 values were returned,
you could use a tuple for 'returns' in the decorator arguments too. For example:
out=('http://www.example.org/mass', 'http://www.example.org/distance')
This would set the semantic type of the first return value as some 'mass' URI, and the second
return value as 'distance'. Lists can also be provided instead of a single URI, if more than one
semantic type should be associated with a given output. Any element of this tuple can also be
set to None, if no semantic type is desired for it.
3. The return parameter name (by default 'returns') can be changed if necessary, by modifying
the IS_FAIRSTEP_RETURN_VALUE_PARAMETER_NAME constant.
"""
def _modify_function(func):
"""
Store FairStep object as _fairstep attribute of the function. Use inspection to get the
description, inputs, and outputs of the step based on the function specification.
Returns this function decorated with the noodles schedule decorator.
"""
# Description of step is the raw function code
description = inspect.getsource(func)
inputs = _extract_inputs_from_function(func, kwargs)
outputs = _extract_outputs_from_function(func, kwargs)
fairstep = FairStep(uri='http://www.example.org/unpublished-'+func.__name__,
label=label,
description=description,
is_pplan_step=is_pplan_step,
is_manual_task=is_manual_task,
is_script_task=is_script_task,
language=LINGSYS_PYTHON,
inputs=inputs,
outputs=outputs)
def _add_logging(func):
@functools.wraps(func)
def _wrapper(*func_args, **func_kwargs):
# Get the arg label/value pairs as a dict (for both args and kwargs)
func_args_dict = dict(zip(inspect.getfullargspec(func).args, func_args))
all_args = {**func_args_dict, **func_kwargs}
# Execute step (with timing)
t0 = datetime.now()
if is_manual_task:
execution_result = manual_assistant.execute_manual_step(fairstep)
else:
execution_result = func(*func_args, **func_kwargs)
t1 = datetime.now()
# Log step execution
prov_logger.add(StepRetroProv(step=fairstep, step_args=all_args, output=execution_result, time_start=t0, time_end=t1))
return execution_result
return _wrapper
func._fairstep = fairstep
return noodles.schedule(_add_logging(func))
return _modify_function
def _extract_inputs_from_function(func, additional_params) -> List[FairVariable]:
"""
Extract inputs from function using inspection. The name of the argument will be the name of
the fair variable, the corresponding type hint will be the type of the variable.
"""
argspec = inspect.getfullargspec(func)
inputs = list()
for arg in argspec.args:
try:
computational_type = argspec.annotations[arg].__name__
except KeyError:
if WARN_FOR_TYPE_HINTING:
warn(f'Function input argument {arg} does not have type hinting, '
'FAIR step function arguments without type hinting will not have a computational '
'type associated with them see https://docs.python.org/3/library/typing.html')
computational_type = None
inputs.append(FairVariable(
name=arg,
computational_type=computational_type,
semantic_types=additional_params.get(arg)))
return inputs
def _extract_outputs_from_function(func, additional_params) -> List[FairVariable]:
"""
Extract outputs from function using inspection. The name will be {function_name}_output{
output_number}. The corresponding return type hint will be the type of the variable.
"""
annotations = get_type_hints(func)
try:
return_annotation = annotations['return']
except KeyError:
if WARN_FOR_TYPE_HINTING:
warn(f'Function output does not have type hinting, '
'The outputs will not have a computational '
'type associated with them. Also multiple outputs will not be captured'
'correctly. See https://docs.python.org/3/library/typing.html')
return_annotation = None
if _is_generic_tuple(return_annotation):
return_sem_types = additional_params.get(IS_FAIRSTEP_RETURN_VALUE_PARAMETER_NAME)
if return_sem_types is not None:
num_return_args = len(return_annotation.__args__)
if len(return_sem_types) != num_return_args:
raise ValueError(f'"out" parameter to is_fairstep decorator must be a '
'tuple of length number of returned values (in this case, '
'{num_return_args}).')
else:
return_sem_types = [None for arg in return_annotation.__args__]
return [FairVariable(
name='out' + str(i + 1),
computational_type=annotation.__name__,
semantic_types=return_sem_types[i])
for i, annotation in enumerate(return_annotation.__args__)]
else:
computational_type = return_annotation.__name__ if return_annotation is not None else None
return [FairVariable(
name='out1',
computational_type=computational_type,
semantic_types=additional_params.get(IS_FAIRSTEP_RETURN_VALUE_PARAMETER_NAME))]
def _is_generic_tuple(type_):
"""
Check whether a type annotation is Tuple
"""
if hasattr(typing, '_GenericAlias'):
# 3.7
# _GenericAlias cannot be imported from typing, because it doesn't
# exist in all versions, and it will fail the type check in those
# versions as well, so we ignore it.
return (isinstance(type_, typing._GenericAlias)
and type_.__origin__ is tuple)
else:
# 3.6 and earlier
# GenericMeta cannot be imported from typing, because it doesn't
# exist in all versions, and it will fail the type check in those
# versions as well, so we ignore it.
return (isinstance(type_, | |
key in ride['lifts']:
item = collection.find_one({'_id': key})
if item['status'] in ['PENDING', 'ACTIVE']:
collection.update({'_id': key}, {'$set': {'status': 'CANCELLED'}}, upsert = False)
new_item = collection.find_one({'_id': key})
after_updated_lift(new_item, item)
#===============================================================================
# before_insert_reports ()
#===============================================================================
def before_insert_reports(reports):
for report in reports if isinstance(reports, list) else [ reports ]:
report['timestamp'] = int(time.time())
#===============================================================================
# after_insert_reports ()
#===============================================================================
def after_insert_reports(reports):
for report in reports if isinstance(reports, list) else [ reports ]:
update_sites_reports_info(report)
#===============================================================================
# after_replace_report ()
#===============================================================================
def after_replace_report(updates, report):
update_sites_reports_info(report)
#===============================================================================
# after_update_report ()
#===============================================================================
def after_update_report(updates, report):
update_sites_reports_info(report)
#===============================================================================
# after_delete_report ()
#===============================================================================
def after_delete_report(report):
update_sites_reports_info(report)
#===============================================================================
# before_insert_messages ()
#===============================================================================
def before_insert_messages(messages):
for message in messages if isinstance(messages, list) else [ messages ]:
message['timestamp'] = int(time.time())
users = app.data.driver.db['users']
sender = users.find_one({'_id': message['sender_id']})
receiver = users.find_one({'_id': message['receiver_id']})
message['sender_name'] = sender['name']
message['receiver_name'] = receiver['name']
#===============================================================================
# after_insert_messages ()
#===============================================================================
def after_insert_messages(messages):
for message in messages if isinstance(messages, list) else [ messages ]:
post_process_data('messages', request, message)
notification_thread_message(message['receiver_id'], message)
#===============================================================================
# remove_private_info ()
#===============================================================================
def remove_private_info(item, resource, request):
############################################################################
#### Temporary, unsecure hack to remove the password from /users endpoint
############################################################################
if resource == 'users':
fields = [ 'password' ]
if request.method == 'GET' and request.url.split('/')[-1] == 'users':
remove_fields(item, fields)
############################################################################
#### Temporary, unsecure hack to return the password in case of sign in
#### with social id! We should probably switch to a different auth scheme
#### than Basic Auth which requires client always sending username/password
return
############################################################################
if resource == 'users':
fields = [ 'password' ]
auth = request.authorization
if request.method == 'POST' or \
(auth and 'email' in item and item['email'] == auth['username']):
# User is authorized
return
else:
remove_fields(item, fields)
#===============================================================================
# filter_data ()
#===============================================================================
def filter_data(resource, request, data):
apply_function(data, recursively_remove_fields, EVE_EXTRA_FIELDS)
apply_function(data, remove_private_info, resource, request)
return data
#===============================================================================
# finalize_payload ()
#===============================================================================
def finalize_payload(resource, request, response):
data = str_to_json(response.get_data())
data = flatten_data(data)
data = filter_data(resource, request, data)
if isinstance(data, list):
data = {
resource: data
}
response.set_data(json_to_str(data))
#===============================================================================
# add_location_header ()
#===============================================================================
def add_location_header(resource, request, response):
assert request.method == 'POST'
data = str_to_json(response.get_data())
if 200 <= response.status_code <= 299:
# Single item
if resource not in data:
response.headers.set('Location', '%s/%s' % (request.url, data['_id']))
# List with one item
elif resource in data and len(data) == 1 and len(data[resource]) == 1:
response.headers.set('Location', '%s/%s' % (request.url, data[0]['_id']))
# Multiple items were created, cannot set 'Location' header
else:
pass
response.set_data(json_to_str(data))
#===========================================================================
# post_process_trip ()
#===========================================================================
def post_process_trip(trip, request):
fields_to_expand = {
# { 'new_field_name': [ 'reference_resource', 'reference_id' ], ... }
# After expansion 'reference_id' will be removed.
'driver': [ 'users', 'driver_id' ],
'car': [ 'cars', 'car_id' ]
}
for step in trip['steps']:
transport = step['transport']
if transport['travel_mode'] == 'CAR_POOLING':
for key in fields_to_expand:
resource = fields_to_expand[key][0]
field = fields_to_expand[key][1]
collection = app.data.driver.db[resource]
item = collection.find_one({'_id': str_to_oid(transport[field])})
assert item
filter_data(resource, request, item)
transport[key] = item
transport.pop(field)
#===========================================================================
# post_process_status ()
#===========================================================================
def post_process_status(lift, request):
if 'driver_id' in request.args or 'passenger_id' in request.args:
user_id = request.args['driver_id'] if 'driver_id' in request.args else request.args['passenger_id']
collection = app.data.driver.db['feedbacks']
item = collection.find_one({ '$and': [ {'lift_id': str_to_oid(lift['_id'])}, {'reviewer_id': str_to_oid(user_id)} ] })
if item:
lift['status'] = 'REVIEWED'
#===========================================================================
# post_process_lift ()
#===========================================================================
def post_process_lift(lift, request):
post_process_trip(lift['trip'], request)
post_process_status(lift, request)
#===========================================================================
# post_process_ride ()
#===========================================================================
def post_process_ride(ride, request):
for lift in ride['lifts']:
lift.pop('trip')
users_col = app.data.driver.db['users']
user = users_col.find_one({'_id': str_to_oid(lift['passenger_id'])})
if user['pictures']:
lift.update({'passenger_img': user['pictures'][0]['file']})
#===========================================================================
# post_process_feedback ()
#===========================================================================
def post_process_feedback(feedback, request):
users = app.data.driver.db['users']
reviewer = users.find_one({'_id': str_to_oid(feedback['reviewer_id'])})
reviewed = users.find_one({'_id': str_to_oid(feedback['reviewed_id'])})
feedback['reviewer'] = reviewer['name']
feedback['reviewed_name'] = reviewed['name']
#===============================================================================
# post_process_data ()
#===============================================================================
def post_process_data(resource, request, data):
func_to_apply = {
'trips': post_process_trip,
'lifts': post_process_lift,
'rides': post_process_ride,
'feedbacks': post_process_feedback,
}
if resource in func_to_apply:
apply_function(data, func_to_apply[resource], request)
objectids_to_strings(data)
recursively_remove_fields(data, EVE_EXTRA_FIELDS)
return data
#===============================================================================
# find_price ()
#===============================================================================
def find_price(leg, name):
if leg['transport']['travel_mode'] == 'METRO':
return metro_fare(name)
elif leg['transport']['travel_mode'] == 'BUS':
return bus_fare(leg, name)
elif leg['transport']['travel_mode'] == 'RAIL':
return rail_fare(leg['distance'], name)
elif leg['transport']['travel_mode'] == 'TRAM':
return tram_fare(leg, name)
elif leg['transport']['travel_mode'] == 'CAR_POOLING':
return carpooling_fare(leg['distance'], name)
else:
return 0
#===============================================================================
# set_custom_payload ()
#===============================================================================
def set_custom_payload(resource, request, response):
items = []
if (resource in REQUIRED_PARAMS and
not set(request.args).issuperset(REQUIRED_PARAMS[resource])):
msg = 'Missing or invalid parameters' + \
' (required: %s)' % ', '.join(REQUIRED_PARAMS[resource])
response_set_error(response, 422, msg)
return
#---------------------------------------------------------------------------
# /trips
#---------------------------------------------------------------------------
if resource == 'trips':
trip_date = 'date=' + timestamp_to_datetime(request.args['start_date'], '%Y%m%d')
trip_time = 'time=' + timestamp_to_datetime(request.args['start_date'], '%H:%M:%S')
start_lat = 'slat=' + request.args['start_lat']
start_lon = 'slng=' + request.args['start_lon']
end_lat = 'tlat=' + request.args['end_lat']
end_lon = 'tlng=' + request.args['end_lon']
in_service = False
fetched_site = None
# Fetch collection 'sites' from db
sites_collection = app.data.driver.db['sites']
cursor = sites_collection.find({})
# For every site in db
for site in cursor:
bb_minlat = site['bounding_box']['min_lat']
bb_minlon = site['bounding_box']['min_lon']
bb_maxlat = site['bounding_box']['max_lat']
bb_maxlon = site['bounding_box']['max_lon']
s_lat = float(request.args['start_lat'])
s_lon = float(request.args['start_lon'])
t_lat = float(request.args['end_lat'])
t_lon = float(request.args['end_lon'])
# Check if given coordinates are within a site's bounding box and retrieve url
if inside_bounding_box(bb_minlat, bb_minlon, bb_maxlat, bb_maxlon, s_lat, s_lon, t_lat, t_lon):
in_service = True
base_url = site['url']
name = site['name']
currency = site['price_info']['currency']
fetched_site = site
# If within a site's bounding box
if in_service:
full_url = "%s?%s&%s&%s&%s&%s&%s" % (base_url, trip_date, trip_time, start_lat, start_lon, end_lat, end_lon)
app.logger.debug('%s' % (full_url))
try:
# GET request to Route Planning
r = requests.get(full_url, timeout=38)
json_extended_response = json.loads(r.text)
if json_extended_response['result'] == True:
json_response = json_extended_response['data']
else:
# TODO: Return error here
app.logger.debug('%s' % (json_extended_response['error']['message']))
json_response = []
except requests.exceptions.Timeout:
# TODO: Return error here
app.logger.debug('Request to %s time out' % (full_url))
json_response = []
else:
# TODO: Return error here
app.logger.debug('Coordinates outside the site boundaries')
json_response = []
auth = request.authorization
collection = app.data.driver.db['users']
self_user = collection.find_one({'email': auth['username']})
lifts_collection = app.data.driver.db['lifts']
# Find all future lifts(no past lifts) created by user (as passenger) and confirmed by driver,if any
fetched_lifts = list(lifts_collection.find({ '$and': [ {'passenger_id': self_user['_id']} , \
{'status': 'ACTIVE'} , \
{'start_point.date': {'$gte': int(time.time())}} , \
{'_deleted': {'$ne': True}}] }))
# Find rides for which the user has already created a lift for
fetched_rides = []
if fetched_lifts:
collection = app.data.driver.db['rides']
for fetched_lift in fetched_lifts:
lookup = {'_id': fetched_lift['ride_id']}
fetched_ride = collection.find_one(lookup)
fetched_rides.append(fetched_ride)
trips = []
# For each trip
for trip in json_response:
steps = []
discard_trip = False
bus_list = []
travel_mode_list = []
# For each leg
for leg in trip['legs']:
if leg['transport']['travel_mode'] in EXTENDED_TRAVEL_MODES:
leg['transport']['travel_mode'] = EXTENDED_TRAVEL_MODES[leg['transport']['travel_mode']]
if leg['transport']['travel_mode'] in ALLOWED_TRAVEL_MODES:
if leg['transport']['travel_mode'] not in travel_mode_list:
travel_mode_list.append(leg['transport']['travel_mode'])
# If leg is carpooling
intermediate_points = []
if leg['transport']['travel_mode'] == 'CAR_POOLING':
# Set custom ride_id if carpooling is Mobalt shuttle
ride_id = leg['transport']['ride_id'] if leg['transport']['ride_id'] else '88e50050223f9badec44f5ff'
collection = app.data.driver.db['rides']
item = collection.find_one({'_id': str_to_oid(ride_id)})
# If user is not the driver of the ride and has not already created a lift for that ride
if oid_to_str(self_user['_id']) != oid_to_str(item['driver_id']) and item not in fetched_rides:
# If ride is external
if 'extras' in item:
# If ride is of other providers except Mobalt
if leg['transport']['ride_id']:
public_uri = item['extras']['url']
# If ride is of Mobalt provider
else:
user_name = self_user['name'].split(' ')[0]
user_surname = self_user['name'].split(' ')[-1]
user_email = self_user['email']
user_phone = self_user['phone']
start_address = leg['route']['points'][0]['address'] if leg['route']['points'][0]['address'] else 'Unknown address'
starting_stop_name = leg['route']['points'][0]['address'] if leg['route']['points'][0]['address'] else 'Unknown address'
starting_stop_time = leg['route']['points'][0]['departure_time']
arrival_stop_name = leg['route']['points'][-1]['address'] if leg['route']['points'][0]['address'] else 'Unknown address'
# Compose Mobalt URL parameters
mobalt_url_params = ('&name=%s&surname=%s&email=%s&phone=%s&start_address=%s&starting_stop_name=%s&starting_stop_time=%s&arrival_stop_name=%s' % (user_name, user_surname, user_email, user_phone, start_address, starting_stop_name, starting_stop_time, arrival_stop_name))
public_uri = (leg['transport']['route_url'] + mobalt_url_params)
fetched_site['ride_details']['external'] = fetched_site['ride_details']['external'] + 1
transport = {
'travel_mode': 'CAR_POOLING',
'ride_id': ride_id, # rides['_id'] foreign key
'driver_id': oid_to_str(item['driver_id']), # users['_id'] foreign key
'car_id': oid_to_str(item['car_id']), # cars['_id'] foreign key
'public_uri': public_uri
}
# Dictionary containing information regarding external carpooling bookings
external_booking = {
'uuid': item['extras']['uuid'],
'url': item['extras']['url'],
'username': self_user['email']
}
fetched_site['external_carpooling'].append(external_booking)
# If ride is internal
else:
fetched_site['ride_details']['internal'] = fetched_site['ride_details']['internal'] + 1
transport = {
'travel_mode': 'CAR_POOLING',
'ride_id': ride_id, # rides['_id'] foreign key
'driver_id': oid_to_str(item['driver_id']), # users['_id'] foreign key
'car_id': oid_to_str(item['car_id']) # cars['_id'] foreign key
}
distance = int(float(leg['distance']))
sites_collection.update({'_id': fetched_site['_id']}, fetched_site, upsert = False)
# Else discard the ride
else:
discard_trip = True
break
# Else if leg is PT or feet
else:
# If transport name is empty replace with existing info
if not leg['transport']['short_name'] and not leg['transport']['long_name']:
route_short_name = leg['transport']['travel_mode']
route_long_name = leg['transport']['travel_mode']
elif not leg['transport']['short_name']:
route_short_name = '%s %s' % (leg['transport']['travel_mode'], leg['transport']['long_name'])
route_long_name = leg['transport']['long_name']
elif not leg['transport']['long_name']:
route_short_name = leg['transport']['short_name']
route_long_name = '%s %s' % (leg['transport']['travel_mode'], leg['transport']['short_name'])
else:
route_short_name = leg['transport']['short_name']
| |
<reponame>dhimmel/bioregistry
# -*- coding: utf-8 -*-
"""Utilities for normalizing prefixes."""
import logging
from functools import lru_cache
from typing import Any, Dict, List, Mapping, Optional, Set, Tuple, Union
from .resource_manager import manager
from .schema import Attributable, Resource
__all__ = [
"get_resource",
"get_name",
"get_description",
"get_preferred_prefix",
"get_mappings",
"get_synonyms",
"get_pattern",
"get_curie_pattern",
"get_namespace_in_lui",
"get_example",
"has_no_terms",
"is_deprecated",
"is_proprietary",
"get_contact",
"get_contact_email",
"get_contact_name",
"get_contact_github",
"get_contact_orcid",
"get_homepage",
"get_repository",
"get_obo_download",
"get_json_download",
"get_owl_download",
"get_version",
"get_versions",
"get_registry_map",
"get_registry_invmap",
"get_banana",
"get_obo_health_url",
# Ontology
"get_provided_by",
"get_provides_for",
"get_part_of",
"get_has_parts",
"get_has_canonical",
"get_canonical_for",
"get_appears_in",
"get_depends_on",
# CURIE handling
"normalize_prefix",
"parse_curie",
"normalize_parsed_curie",
"normalize_curie",
]
logger = logging.getLogger(__name__)
def get_resource(prefix: str) -> Optional[Resource]:
"""Get the Bioregistry entry for the given prefix.
:param prefix: The prefix to look up, which is normalized with :func:`normalize_prefix`
before lookup in the Bioregistry
:returns: The Bioregistry entry dictionary, which includes several keys cross-referencing
other registries when available.
"""
return manager.get_resource(prefix)
def get_name(prefix: str) -> Optional[str]:
"""Get the name for the given prefix, it it's available."""
return manager.get_name(prefix)
def get_description(prefix: str, use_markdown=True) -> Optional[str]:
"""Get the description for the given prefix, if available."""
entry = get_resource(prefix)
if entry is None:
return None
return entry.get_description(use_markdown=use_markdown)
def get_preferred_prefix(prefix: str) -> Optional[str]:
"""Get the preferred prefix (e.g., with stylization) if it exists.
:param prefix: The prefix to lookup.
:returns: The preferred prefix, if annotated in the Bioregistry or OBO Foundry.
No preferred prefix annotation, defaults to normalized prefix
>>> get_preferred_prefix("rhea")
None
Preferred prefix defined in the Bioregistry
>>> get_preferred_prefix("wb")
'WormBase'
Preferred prefix defined in the OBO Foundry
>>> get_preferred_prefix("fbbt")
'FBbt'
Preferred prefix from the OBO Foundry overridden by the Bioregistry
(see also https://github.com/OBOFoundry/OBOFoundry.github.io/issues/1559)
>>> get_preferred_prefix("dpo")
'DPO'
"""
return manager.get_preferred_prefix(prefix)
def get_mappings(prefix: str) -> Optional[Mapping[str, str]]:
"""Get the mappings to external registries, if available."""
entry = get_resource(prefix)
if entry is None:
return None
return entry.get_mappings()
def get_synonyms(prefix: str) -> Optional[Set[str]]:
"""Get the synonyms for a given prefix, if available."""
return manager.get_synonyms(prefix)
def get_pattern(prefix: str) -> Optional[str]:
"""Get the pattern for the given prefix, if it's available.
:param prefix: The prefix to look up, which is normalized with :func:`normalize_prefix`
before lookup in the Bioregistry
:returns: The pattern for the prefix, if it is available, using the following order of preference:
1. Custom
2. MIRIAM
3. Wikidata
"""
return manager.get_pattern(prefix)
def get_namespace_in_lui(prefix: str) -> Optional[bool]:
"""Check if the namespace should appear in the LUI."""
entry = get_resource(prefix)
if entry is None:
return None
return entry.get_namespace_in_lui()
def get_appears_in(prefix: str) -> Optional[List[str]]:
"""Return a list of resources that this resources (has been annotated to) depends on.
This is complementary to :func:`get_depends_on`.
:param prefix: The prefix to look up
:returns: The list of resources this prefix has been annotated to appear in. This
list could be incomplete, since curation of these fields can easily get out
of sync with curation of the resource itself. However, false positives should
be pretty rare.
>>> import bioregistry
>>> assert "bfo" not in bioregistry.get_appears_in("foodon")
>>> assert "fobi" in bioregistry.get_appears_in("foodon")
"""
return manager.get_appears_in(prefix)
def get_depends_on(prefix: str) -> Optional[List[str]]:
"""Return a list of resources that this resources (has been annotated to) depends on.
This is complementary to :func:`get_appears_in`.
:param prefix: The prefix to look up
:returns: The list of resources this prefix has been annotated to depend on. This
list could be incomplete, since curation of these fields can easily get out
of sync with curation of the resource itself. However, false positives should
be pretty rare.
>>> import bioregistry
>>> assert "bfo" in bioregistry.get_depends_on("foodon")
>>> assert "fobi" not in bioregistry.get_depends_on("foodon")
"""
return manager.get_depends_on(prefix)
def get_has_canonical(prefix: str) -> Optional[str]:
"""Get the canonical prefix.
If two (or more) stand-alone resources both provide for the same
semantic space, but none of them have a first-party claim to the
semantic space, then the ``has_canonical`` relationship is used
to choose a preferred prefix. This is different than the
``provides``, relationship, which is appropriate when it's obvious
that one resource has a full claim to the semantic space.
:param prefix: The prefix to lookup.
:returns: The canonical prefix for this one, if one is annotated.
This is the inverse of :func:`get_canonical_for`.
>>> get_has_canonical("refseq")
'ncbiprotein'
>>> get_has_canonical("chebi")
None
"""
return manager.get_has_canonical(prefix)
def get_canonical_for(prefix: str) -> Optional[List[str]]:
"""Get the prefixes for which this is annotated as canonical.
:param prefix: The prefix to lookup.
:returns: The prefixes for which this is annotated as canonical.
This is the inverse of :func:`get_has_canonical`.
>>> "refseq" in get_canonical_for("ncbiprotein")
True
>>> get_canonical_for("chebi")
[]
"""
return manager.get_canonical_for(prefix)
def get_identifiers_org_prefix(prefix: str) -> Optional[str]:
"""Get the identifiers.org prefix if available.
:param prefix: The prefix to lookup.
:returns: The Identifiers.org/MIRIAM prefix corresponding to the prefix, if mappable.
>>> import bioregistry
>>> bioregistry.get_identifiers_org_prefix('chebi')
'chebi'
>>> bioregistry.get_identifiers_org_prefix('ncbitaxon')
'taxonomy'
>>> assert bioregistry.get_identifiers_org_prefix('MONDO') is None
"""
entry = manager.get_resource(prefix)
if entry is None:
return None
return entry.get_identifiers_org_prefix()
def get_n2t_prefix(prefix: str) -> Optional[str]:
"""Get the name-to-thing prefix if available.
:param prefix: The prefix to lookup.
:returns: The Name-to-Thing prefix corresponding to the prefix, if mappable.
>>> import bioregistry
>>> bioregistry.get_n2t_prefix('chebi')
'chebi'
>>> bioregistry.get_n2t_prefix('ncbitaxon')
'taxonomy'
>>> assert bioregistry.get_n2t_prefix('MONDO') is None
"""
return manager.get_mapped_prefix(prefix, "n2t")
def get_wikidata_prefix(prefix: str) -> Optional[str]:
"""Get the wikidata prefix if available.
:param prefix: The prefix to lookup.
:returns: The Wikidata prefix (i.e., property identifier) corresponding to the prefix, if mappable.
>>> get_wikidata_prefix('chebi')
'P683'
>>> get_wikidata_prefix('ncbitaxon')
'P685'
"""
return manager.get_mapped_prefix(prefix, "wikidata")
def get_bioportal_prefix(prefix: str) -> Optional[str]:
"""Get the BioPortal prefix if available.
:param prefix: The prefix to lookup.
:returns: The BioPortal prefix corresponding to the prefix, if mappable.
>>> get_bioportal_prefix("chebi")
'CHEBI'
>>> get_bioportal_prefix("uniprot")
None
>>> get_bioportal_prefix("nope")
None
"""
return manager.get_mapped_prefix(prefix, "bioportal")
def get_obofoundry_prefix(prefix: str) -> Optional[str]:
"""Get the OBO Foundry prefix if available."""
entry = get_resource(prefix)
if entry is None:
return None
return entry.get_obofoundry_prefix()
def get_registry_map(metaprefix: str) -> Dict[str, str]:
"""Get a mapping from the Bioregistry prefixes to prefixes in another registry."""
return manager.get_registry_map(metaprefix)
def get_registry_invmap(metaprefix: str) -> Dict[str, str]:
"""Get a mapping from the external registry prefixes to Bioregistry prefixes."""
return manager.get_registry_invmap(metaprefix)
def get_obofoundry_uri_prefix(prefix: str) -> Optional[str]:
"""Get the URI prefix for an OBO Foundry entry.
:param prefix: The prefix to lookup.
:returns: The OBO PURL URI prefix corresponding to the prefix, if mappable.
>>> import bioregistry
>>> bioregistry.get_obofoundry_uri_prefix('go') # standard
'http://purl.obolibrary.org/obo/GO_'
>>> bioregistry.get_obofoundry_uri_prefix('ncbitaxon') # mixed case
'http://purl.obolibrary.org/obo/NCBITaxon_'
>>> assert bioregistry.get_obofoundry_uri_prefix('sty') is None
"""
entry = get_resource(prefix)
if entry is None:
return None
return entry.get_obofoundry_uri_prefix()
def get_ols_prefix(prefix: str) -> Optional[str]:
"""Get the OLS prefix if available."""
entry = get_resource(prefix)
if entry is None:
return None
return entry.get_ols_prefix()
def get_fairsharing_prefix(prefix: str) -> Optional[str]:
"""Get the FAIRSharing prefix if available.
:param prefix: The prefix to lookup.
:returns: The FAIRSharing prefix corresponding to the prefix, if mappable.
>>> get_fairsharing_prefix("genbank")
'FAIRsharing.9kahy4'
"""
return manager.get_mapped_prefix(prefix, "fairsharing")
def get_scholia_prefix(prefix: str) -> Optional[str]:
"""Get the Scholia prefix if available.
:param prefix: The prefix to lookup.
:returns: The Scholia prefix corresponding to the prefix, if mappable.
>>> get_scholia_prefix("pubmed")
'pubmed'
>>> get_scholia_prefix("pdb")
None
"""
entry = get_resource(prefix)
if entry is None:
return None
return entry.get_scholia_prefix()
def get_banana(prefix: str) -> Optional[str]:
"""Get the optional redundant prefix to go before an identifier.
A "banana" is an embedded prefix that isn't actually part of the identifier.
Usually this corresponds to the prefix itself, with some specific stylization
such as in the case of FBbt. The banana does NOT include a colon ":" at the end
:param prefix: The name of the prefix (possibly unnormalized)
:return: The banana, if the prefix is valid and has an associated banana.
Explicitly annotated banana
>>> assert "GO_REF" == get_banana('go.ref')
Banana imported through OBO Foundry
>>> assert "FBbt" == get_banana('fbbt')
Banana inferred for OBO Foundry ontology
>>> get_banana('chebi')
'CHEBI'
No banana, no namespace in LUI
>>> assert get_banana('pdb') is None
"""
entry = get_resource(prefix)
if entry is None:
return None
return entry.get_banana()
def get_default_format(prefix: str) -> Optional[str]:
"""Get the default, first-party URI prefix.
:param prefix: The prefix to lookup.
:returns: The first-party URI prefix string, if available.
>>> import bioregistry
>>> bioregistry.get_default_format('ncbitaxon')
'https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=Info&id=$1'
>>> bioregistry.get_default_format('go')
'http://amigo.geneontology.org/amigo/term/GO:$1'
>>> assert bioregistry.get_default_format('nope') is None
"""
entry = get_resource(prefix)
if entry is None:
return None
return entry.get_default_format()
def get_miriam_uri_prefix(prefix: str) -> Optional[str]:
"""Get the URI prefix for a MIRIAM entry.
:param prefix: The prefix to lookup.
| |
190, 247, 255],
[102, 188, 249, 255],
[101, 185, 251, 255],
[99, 183, 253, 255],
[98, 181, 255, 255],
[96, 179, 0, 255],
[95, 177, 0, 255],
[93, 174, 0, 255],
[92, 172, 0, 255],
[91, 170, 0, 255],
[89, 167, 0, 255],
[88, 165, 0, 255],
[86, 162, 0, 255],
[85, 160, 0, 255],
[83, 158, 0, 255],
[82, 155, 0, 255],
[80, 153, 0, 255],
[79, 150, 0, 255],
[77, 147, 0, 255],
[76, 145, 0, 255],
[74, 142, 0, 255],
[73, 140, 0, 255],
[71, 137, 0, 255],
[70, 134, 0, 255],
[68, 132, 0, 255],
[67, 129, 0, 255],
[65, 126, 0, 255],
[63, 123, 0, 255],
[62, 121, 0, 255],
[60, 118, 0, 255],
[59, 115, 0, 255],
[57, 112, 0, 255],
[56, 109, 0, 255],
[54, 106, 0, 255],
[53, 104, 0, 255],
[51, 101, 0, 255],
[50, 98, 0, 255],
[48, 95, 0, 255],
[47, 92, 0, 255],
[45, 89, 0, 255],
[43, 86, 0, 255],
[42, 83, 0, 255],
[40, 80, 0, 255],
[39, 77, 0, 255],
[37, 74, 0, 255],
[36, 71, 0, 255],
[34, 68, 0, 255],
[33, 65, 0, 255],
[31, 62, 0, 255],
[29, 59, 0, 255],
[28, 56, 0, 255],
[26, 53, 0, 255],
[25, 50, 0, 255],
[23, 47, 0, 255],
[22, 43, 0, 255],
[20, 40, 0, 255],
[18, 37, 0, 255],
[17, 34, 0, 255],
[15, 31, 0, 255],
[14, 28, 0, 255],
[12, 25, 0, 255],
[11, 22, 0, 255],
[9, 18, 0, 255],
[7, 15, 0, 255],
[6, 12, 0, 255],
[4, 9, 0, 255],
[3, 6, 0, 255],
[1, 3, 0, 255],
[0, 0, 0, 255],
], dtype=np.uint8)
YLGN = np.array([
[229, 0, 0, 255],
[228, 255, 255, 255],
[227, 255, 255, 255],
[225, 255, 255, 255],
[224, 255, 254, 255],
[222, 255, 254, 255],
[221, 255, 254, 255],
[220, 255, 254, 255],
[218, 255, 253, 255],
[217, 255, 253, 255],
[216, 255, 253, 255],
[214, 254, 253, 255],
[213, 254, 252, 255],
[211, 254, 252, 255],
[210, 254, 252, 255],
[209, 254, 252, 255],
[207, 254, 251, 255],
[206, 254, 251, 255],
[204, 254, 251, 255],
[203, 254, 251, 255],
[202, 254, 250, 255],
[200, 254, 250, 255],
[199, 253, 250, 255],
[198, 253, 250, 255],
[196, 253, 249, 255],
[195, 253, 249, 255],
[193, 253, 249, 255],
[192, 253, 249, 255],
[191, 253, 248, 255],
[189, 253, 248, 255],
[188, 253, 248, 255],
[186, 253, 248, 255],
[185, 252, 247, 255],
[184, 252, 246, 255],
[184, 252, 245, 255],
[183, 251, 245, 255],
[182, 251, 244, 255],
[182, 251, 243, 255],
[181, 250, 242, 255],
[180, 250, 241, 255],
[180, 249, 240, 255],
[179, 249, 239, 255],
[178, 249, 238, 255],
[178, 248, 237, 255],
[177, 248, 236, 255],
[176, 248, 235, 255],
[175, 247, 234, 255],
[175, 247, 233, 255],
[174, 246, 232, 255],
[173, 246, 231, 255],
[173, 246, 230, 255],
[172, 245, 229, 255],
[171, 245, 228, 255],
[171, 245, 228, 255],
[170, 244, 227, 255],
[169, 244, 226, 255],
[169, 243, 225, 255],
[168, 243, 224, 255],
[167, 243, 223, 255],
[166, 242, 222, 255],
[166, 242, 221, 255],
[165, 241, 220, 255],
[164, 241, 219, 255],
[164, 241, 218, 255],
[163, 240, 217, 255],
[162, 240, 216, 255],
[162, 239, 214, 255],
[161, 238, 213, 255],
[160, 238, 211, 255],
[160, 237, 210, 255],
[159, 237, 209, 255],
[158, 236, 207, 255],
[158, 236, 206, 255],
[157, 235, 205, 255],
[156, 234, 203, 255],
[156, 234, 202, 255],
[155, 233, 200, 255],
[154, 233, 199, 255],
[154, 232, 198, 255],
[153, 231, 196, 255],
[152, 231, 195, 255],
[152, 230, 193, 255],
[151, 230, 192, 255],
[150, 229, 191, 255],
[150, 228, 189, 255],
[149, 228, 188, 255],
[148, 227, 187, 255],
[148, 227, 185, 255],
[147, 226, 184, 255],
[146, 225, 182, 255],
[146, 225, 181, 255],
[145, 224, 180, 255],
[144, 224, 178, 255],
[144, 223, 177, 255],
[143, 222, 175, 255],
[142, 222, 174, 255],
[142, 221, 173, 255],
[141, 220, 171, 255],
[140, 220, 169, 255],
[140, 219, 168, 255],
[139, 218, 166, 255],
[139, 217, 164, 255],
[138, 217, 163, 255],
[137, 216, 161, 255],
[137, 215, 159, 255],
[136, 215, 158, 255],
[135, 214, 156, 255],
[135, 213, 154, 255],
[134, 212, 153, 255],
[133, 212, 151, 255],
[133, 211, 149, 255],
[132, 210, 148, 255],
[131, 210, 146, 255],
[131, 209, 144, 255],
[130, 208, 143, 255],
[129, 207, 141, 255],
[129, 207, 139, 255],
[128, 206, 137, 255],
[127, 205, 136, 255],
[127, 204, 134, 255],
[126, 204, 132, 255],
[125, 203, 131, 255],
[125, 202, 129, 255],
[124, 202, 127, 255],
[123, 201, 126, 255],
[123, 200, 124, 255],
[122, 199, 122, 255],
[121, 199, 121, 255],
[121, 198, 119, 255],
[120, 197, 117, 255],
[119, 196, 116, 255],
[118, 195, 114, 255],
[117, 194, 112, 255],
[116, 194, 110, 255],
[115, 193, 109, 255],
[114, 192, 107, 255],
[113, 191, 105, 255],
[113, 190, 104, 255],
[112, 189, 102, 255],
[111, 188, 100, 255],
[110, 188, 98, 255],
[109, 187, 97, 255],
[108, 186, 95, 255],
[107, 185, 93, 255],
[106, 184, 91, 255],
[106, 183, 90, 255],
[105, 183, 88, 255],
[104, 182, 86, 255],
[103, 181, 84, 255],
[102, 180, 83, 255],
[101, 179, 81, 255],
[100, 178, 79, 255],
[99, 177, 78, 255],
[98, 177, 76, 255],
[98, 176, 74, 255],
[97, 175, 72, 255],
[96, 174, 71, 255],
[95, 173, 69, 255],
[94, 172, 67, 255],
[93, 171, 65, 255],
[92, 170, 64, 255],
[92, 169, 63, 255],
[91, 168, 62, 255],
[90, 167, 61, 255],
[89, 165, 60, 255],
[88, 164, 59, 255],
[87, 163, 58, 255],
[87, 162, 58, 255],
[86, 161, 57, 255],
[85, 159, 56, 255],
[84, 158, 55, 255],
[83, 157, 54, 255],
[83, 156, 53, 255],
[82, 154, 52, 255],
[81, 153, 51, 255],
[80, 152, 50, 255],
[79, 151, 49, 255],
[78, 150, 48, 255],
[78, 148, 47, 255],
[77, 147, 46, 255],
[76, 146, 45, 255],
[75, 145, 44, 255],
[74, 143, 43, 255],
[74, 142, 42, 255],
[73, 141, 41, 255],
[72, 140, 41, 255],
[71, 138, 40, 255],
[70, 137, 39, 255],
[69, 136, 38, 255],
[69, 135, 37, 255],
[68, 134, 36, 255],
[67, 132, 35, 255],
[66, 131, 34, 255],
[66, 130, 33, 255],
[66, 130, 32, 255],
[65, 129, 31, 255],
[65, 128, 29, 255],
[65, 127, 28, 255],
[64, 126, 27, 255],
[64, 125, 26, 255],
[63, 124, 25, 255],
[63, 123, 24, 255],
[63, 123, 23, 255],
[62, 122, 22, 255],
[62, 121, 21, 255],
[62, 120, 19, 255],
[61, 119, 18, 255],
[61, 118, 17, 255],
[60, 117, 16, 255],
[60, 116, 15, 255],
[60, 115, 14, 255],
[59, 115, 13, 255],
[59, 114, 12, 255],
[59, 113, 11, 255],
[58, 112, 10, 255],
[58, 111, 8, 255],
[57, 110, 7, 255],
[57, 109, 6, 255],
[57, 108, 5, 255],
[56, 108, 4, 255],
[56, 107, 3, 255],
[56, 106, 2, 255],
[55, 105, 1, 255],
[55, 104, 0, 255],
[54, 103, 0, 255],
[54, 102, 0, 255],
[53, 101, 0, 255],
[53, 100, 0, 255],
[53, 99, 0, 255],
[52, 97, 0, 255],
[52, 96, 0, 255],
[51, 95, 0, 255],
[51, 94, 0, 255],
[50, 93, 0, 255],
[50, 92, 0, 255],
[49, 91, 0, 255],
[49, 90, 0, 255],
[49, 89, 0, 255],
[48, 88, 0, 255],
[48, 86, 0, 255],
[47, 85, 0, 255],
[47, 84, 0, 255],
[46, 83, 0, 255],
[46, 82, 0, 255],
[46, 81, 0, 255],
[45, 80, 0, 255],
[45, 79, 0, 255],
[44, 78, 0, 255],
[44, 76, 0, 255],
[43, 75, 0, 255],
[43, 74, | |
aa
* np.random.random_sample(
3,
)
+ bb
)
self.set_structure(
lattice=np.array([[10, 0, 0], [0, 10, 0], [0, 0, 10]], np.float),
species=["Si"] * (coordination + 1),
coords=coords,
coords_are_cartesian=False,
)
self.setup_random_indices_local_geometry(coordination)
def setup_random_indices_local_geometry(self, coordination):
"""
Sets up random indices for the local geometry, for testing purposes
:param coordination: coordination of the local geometry
"""
self.icentral_site = 0
self.indices = list(range(1, coordination + 1))
np.random.shuffle(self.indices)
def setup_ordered_indices_local_geometry(self, coordination):
"""
Sets up ordered indices for the local geometry, for testing purposes
:param coordination: coordination of the local geometry
"""
self.icentral_site = 0
self.indices = list(range(1, coordination + 1))
def setup_explicit_indices_local_geometry(self, explicit_indices):
"""
Sets up explicit indices for the local geometry, for testing purposes
:param explicit_indices: explicit indices for the neighbors (set of numbers
from 0 to CN-1 in a given order)
"""
self.icentral_site = 0
self.indices = [ii + 1 for ii in explicit_indices]
def get_coordination_symmetry_measures(self, only_minimum=True, all_csms=True, optimization=None):
"""
Returns the continuous symmetry measures of the current local geometry in a dictionary.
:return: the continuous symmetry measures of the current local geometry in a dictionary.
"""
test_geometries = self.allcg.get_implemented_geometries(len(self.local_geometry.coords))
if len(self.local_geometry.coords) == 1:
if len(test_geometries) == 0:
return {}
result_dict = {
"S:1": {
"csm": 0.0,
"indices": [0],
"algo": "EXPLICIT",
"local2perfect_map": {0: 0},
"perfect2local_map": {0: 0},
"scaling_factor": None,
"rotation_matrix": None,
"translation_vector": None,
}
}
if all_csms:
for csmtype in [
"wocs_ctwocc",
"wocs_ctwcc",
"wocs_csc",
"wcs_ctwocc",
"wcs_ctwcc",
"wcs_csc",
]:
result_dict["S:1"]["csm_{}".format(csmtype)] = 0.0
result_dict["S:1"]["scaling_factor_{}".format(csmtype)] = None
result_dict["S:1"]["rotation_matrix_{}".format(csmtype)] = None
result_dict["S:1"]["translation_vector_{}".format(csmtype)] = None
return result_dict
result_dict = {}
for geometry in test_geometries:
self.perfect_geometry = AbstractGeometry.from_cg(
cg=geometry,
centering_type=self.centering_type,
include_central_site_in_centroid=self.include_central_site_in_centroid,
)
points_perfect = self.perfect_geometry.points_wcs_ctwcc()
cgsm = self.coordination_geometry_symmetry_measures(
geometry, points_perfect=points_perfect, optimization=optimization
)
result, permutations, algos, local2perfect_maps, perfect2local_maps = cgsm
if only_minimum:
if len(result) > 0:
imin = np.argmin([rr["symmetry_measure"] for rr in result])
if geometry.algorithms is not None:
algo = algos[imin]
else:
algo = algos
result_dict[geometry.mp_symbol] = {
"csm": result[imin]["symmetry_measure"],
"indices": permutations[imin],
"algo": algo,
"local2perfect_map": local2perfect_maps[imin],
"perfect2local_map": perfect2local_maps[imin],
"scaling_factor": 1.0 / result[imin]["scaling_factor"],
"rotation_matrix": np.linalg.inv(result[imin]["rotation_matrix"]),
"translation_vector": result[imin]["translation_vector"],
}
if all_csms:
self._update_results_all_csms(result_dict, permutations, imin, geometry)
else:
result_dict[geometry.mp_symbol] = {
"csm": result,
"indices": permutations,
"algo": algos,
"local2perfect_map": local2perfect_maps,
"perfect2local_map": perfect2local_maps,
}
return result_dict
def _update_results_all_csms(self, result_dict, permutations, imin, geometry):
permutation = permutations[imin]
# Without central site, centered on the centroid (centroid does not include the central site)
# result_dict[geometry.mp_symbol]['csm_wocs_ctwocc'] = \
# result[imin]
pdist = self.local_geometry.points_wocs_ctwocc(permutation=permutation)
pperf = self.perfect_geometry.points_wocs_ctwocc()
sm_info = symmetry_measure(points_distorted=pdist, points_perfect=pperf)
result_dict[geometry.mp_symbol]["csm_wocs_ctwocc"] = sm_info["symmetry_measure"]
result_dict[geometry.mp_symbol]["rotation_matrix_wocs_ctwocc"] = np.linalg.inv(sm_info["rotation_matrix"])
result_dict[geometry.mp_symbol]["scaling_factor_wocs_ctwocc"] = 1.0 / sm_info["scaling_factor"]
result_dict[geometry.mp_symbol]["translation_vector_wocs_ctwocc"] = self.local_geometry.centroid_without_centre
# Without central site, centered on the centroid (centroid includes the central site)
pdist = self.local_geometry.points_wocs_ctwcc(permutation=permutation)
pperf = self.perfect_geometry.points_wocs_ctwcc()
sm_info = symmetry_measure(points_distorted=pdist, points_perfect=pperf)
result_dict[geometry.mp_symbol]["csm_wocs_ctwcc"] = sm_info["symmetry_measure"]
result_dict[geometry.mp_symbol]["rotation_matrix_wocs_ctwcc"] = np.linalg.inv(sm_info["rotation_matrix"])
result_dict[geometry.mp_symbol]["scaling_factor_wocs_ctwcc"] = 1.0 / sm_info["scaling_factor"]
result_dict[geometry.mp_symbol]["translation_vector_wocs_ctwcc"] = self.local_geometry.centroid_with_centre
# Without central site, centered on the central site
pdist = self.local_geometry.points_wocs_csc(permutation=permutation)
pperf = self.perfect_geometry.points_wocs_csc()
sm_info = symmetry_measure(points_distorted=pdist, points_perfect=pperf)
result_dict[geometry.mp_symbol]["csm_wocs_csc"] = sm_info["symmetry_measure"]
result_dict[geometry.mp_symbol]["rotation_matrix_wocs_csc"] = np.linalg.inv(sm_info["rotation_matrix"])
result_dict[geometry.mp_symbol]["scaling_factor_wocs_csc"] = 1.0 / sm_info["scaling_factor"]
result_dict[geometry.mp_symbol]["translation_vector_wocs_csc"] = self.local_geometry.bare_centre
# With central site, centered on the centroid (centroid does not include the central site)
pdist = self.local_geometry.points_wcs_ctwocc(permutation=permutation)
pperf = self.perfect_geometry.points_wcs_ctwocc()
sm_info = symmetry_measure(points_distorted=pdist, points_perfect=pperf)
result_dict[geometry.mp_symbol]["csm_wcs_ctwocc"] = sm_info["symmetry_measure"]
result_dict[geometry.mp_symbol]["rotation_matrix_wcs_ctwocc"] = np.linalg.inv(sm_info["rotation_matrix"])
result_dict[geometry.mp_symbol]["scaling_factor_wcs_ctwocc"] = 1.0 / sm_info["scaling_factor"]
result_dict[geometry.mp_symbol]["translation_vector_wcs_ctwocc"] = self.local_geometry.centroid_without_centre
# With central site, centered on the centroid (centroid includes the central site)
pdist = self.local_geometry.points_wcs_ctwcc(permutation=permutation)
pperf = self.perfect_geometry.points_wcs_ctwcc()
sm_info = symmetry_measure(points_distorted=pdist, points_perfect=pperf)
result_dict[geometry.mp_symbol]["csm_wcs_ctwcc"] = sm_info["symmetry_measure"]
result_dict[geometry.mp_symbol]["rotation_matrix_wcs_ctwcc"] = np.linalg.inv(sm_info["rotation_matrix"])
result_dict[geometry.mp_symbol]["scaling_factor_wcs_ctwcc"] = 1.0 / sm_info["scaling_factor"]
result_dict[geometry.mp_symbol]["translation_vector_wcs_ctwcc"] = self.local_geometry.centroid_with_centre
# With central site, centered on the central site
pdist = self.local_geometry.points_wcs_csc(permutation=permutation)
pperf = self.perfect_geometry.points_wcs_csc()
sm_info = symmetry_measure(points_distorted=pdist, points_perfect=pperf)
result_dict[geometry.mp_symbol]["csm_wcs_csc"] = sm_info["symmetry_measure"]
result_dict[geometry.mp_symbol]["rotation_matrix_wcs_csc"] = np.linalg.inv(sm_info["rotation_matrix"])
result_dict[geometry.mp_symbol]["scaling_factor_wcs_csc"] = 1.0 / sm_info["scaling_factor"]
result_dict[geometry.mp_symbol]["translation_vector_wcs_csc"] = self.local_geometry.bare_centre
def get_coordination_symmetry_measures_optim(
self, only_minimum=True, all_csms=True, nb_set=None, optimization=None
):
"""
Returns the continuous symmetry measures of the current local geometry in a dictionary.
:return: the continuous symmetry measures of the current local geometry in a dictionary.
"""
cn = len(self.local_geometry.coords)
test_geometries = self.allcg.get_implemented_geometries(cn)
if all([cg.algorithms[0].algorithm_type == EXPLICIT_PERMUTATIONS for cg in test_geometries]):
return self.get_coordination_symmetry_measures(
only_minimum=only_minimum, all_csms=all_csms, optimization=optimization
)
if not all(
[all([algo.algorithm_type == SEPARATION_PLANE for algo in cg.algorithms]) for cg in test_geometries]
):
raise ValueError("All algorithms should be EXPLICIT_PERMUTATIONS or SEPARATION_PLANE")
result_dict = {}
for geometry in test_geometries:
logging.log(
level=5,
msg="Getting Continuous Symmetry Measure with Separation Plane "
'algorithm for geometry "{}"'.format(geometry.ce_symbol),
)
self.perfect_geometry = AbstractGeometry.from_cg(
cg=geometry,
centering_type=self.centering_type,
include_central_site_in_centroid=self.include_central_site_in_centroid,
)
points_perfect = self.perfect_geometry.points_wcs_ctwcc()
cgsm = self.coordination_geometry_symmetry_measures_sepplane_optim(
geometry,
points_perfect=points_perfect,
nb_set=nb_set,
optimization=optimization,
)
result, permutations, algos, local2perfect_maps, perfect2local_maps = cgsm
if only_minimum:
if len(result) > 0:
imin = np.argmin([rr["symmetry_measure"] for rr in result])
if geometry.algorithms is not None:
algo = algos[imin]
else:
algo = algos
result_dict[geometry.mp_symbol] = {
"csm": result[imin]["symmetry_measure"],
"indices": permutations[imin],
"algo": algo,
"local2perfect_map": local2perfect_maps[imin],
"perfect2local_map": perfect2local_maps[imin],
"scaling_factor": 1.0 / result[imin]["scaling_factor"],
"rotation_matrix": np.linalg.inv(result[imin]["rotation_matrix"]),
"translation_vector": result[imin]["translation_vector"],
}
if all_csms:
self._update_results_all_csms(result_dict, permutations, imin, geometry)
return result_dict
def coordination_geometry_symmetry_measures(
self,
coordination_geometry,
tested_permutations=False,
points_perfect=None,
optimization=None,
):
"""
Returns the symmetry measures of a given coordination_geometry for a set of permutations depending on
the permutation setup. Depending on the parameters of the LocalGeometryFinder and on the coordination
geometry, different methods are called.
:param coordination_geometry: Coordination geometry for which the symmetry measures are looked for
:return: the symmetry measures of a given coordination_geometry for a set of permutations
:raise: NotImplementedError if the permutation_setup does not exists
"""
if tested_permutations:
tested_permutations = set()
if self.permutations_safe_override:
raise ValueError("No permutations safe override anymore")
csms = []
permutations = []
algos = []
local2perfect_maps = []
perfect2local_maps = []
for algo in coordination_geometry.algorithms:
if algo.algorithm_type == EXPLICIT_PERMUTATIONS:
return self.coordination_geometry_symmetry_measures_standard(
coordination_geometry,
algo,
points_perfect=points_perfect,
optimization=optimization,
)
if algo.algorithm_type == SEPARATION_PLANE:
cgsm = self.coordination_geometry_symmetry_measures_separation_plane(
coordination_geometry,
algo,
tested_permutations=tested_permutations,
points_perfect=points_perfect,
)
csm, perm, algo, local2perfect_map, perfect2local_map = cgsm
csms.extend(csm)
permutations.extend(perm)
algos.extend(algo)
local2perfect_maps.extend(local2perfect_map)
perfect2local_maps.extend(perfect2local_map)
return csms, permutations, algos, local2perfect_maps, perfect2local_maps
def coordination_geometry_symmetry_measures_sepplane_optim(
self, coordination_geometry, points_perfect=None, nb_set=None, optimization=None
):
"""
Returns the symmetry measures of a given coordination_geometry for a set of permutations depending on
the permutation setup. Depending on the parameters of the LocalGeometryFinder and on the coordination
geometry, different methods are called.
:param coordination_geometry: Coordination geometry for which the symmetry measures are looked for
:return: the symmetry measures of a given coordination_geometry for a set of permutations
:raise: NotImplementedError if the permutation_setup does not exists
"""
csms = []
permutations = []
algos = []
local2perfect_maps = []
perfect2local_maps = []
for algo in coordination_geometry.algorithms:
if algo.algorithm_type == SEPARATION_PLANE:
cgsm = self.coordination_geometry_symmetry_measures_separation_plane_optim(
coordination_geometry,
algo,
points_perfect=points_perfect,
nb_set=nb_set,
optimization=optimization,
)
csm, perm, algo, local2perfect_map, perfect2local_map = cgsm
csms.extend(csm)
permutations.extend(perm)
algos.extend(algo)
local2perfect_maps.extend(local2perfect_map)
perfect2local_maps.extend(perfect2local_map)
return csms, permutations, algos, local2perfect_maps, perfect2local_maps
def coordination_geometry_symmetry_measures_standard(
self, coordination_geometry, algo, points_perfect=None, optimization=None
):
"""
Returns the symmetry measures for a set of permutations (whose setup depends on the coordination geometry)
for the coordination geometry "coordination_geometry". Standard implementation looking for the symmetry
measures of each permutation
:param coordination_geometry: The coordination geometry to be investigated
:return: The symmetry measures for the given coordination geometry for each permutation investigated
"""
# permutations_symmetry_measures = np.zeros(len(algo.permutations),
# np.float)
if optimization == 2:
permutations_symmetry_measures = [None] * len(algo.permutations)
permutations = list()
algos = list()
local2perfect_maps = list()
perfect2local_maps = list()
for iperm, perm in enumerate(algo.permutations):
local2perfect_map = {}
perfect2local_map = {}
permutations.append(perm)
for iperfect, ii in enumerate(perm):
perfect2local_map[iperfect] = ii
local2perfect_map[ii] = iperfect
local2perfect_maps.append(local2perfect_map)
perfect2local_maps.append(perfect2local_map)
points_distorted = self.local_geometry.points_wcs_ctwcc(permutation=perm)
sm_info = symmetry_measure(points_distorted=points_distorted, points_perfect=points_perfect)
sm_info["translation_vector"] = self.local_geometry.centroid_with_centre
permutations_symmetry_measures[iperm] = sm_info
algos.append(str(algo))
return (
permutations_symmetry_measures,
permutations,
algos,
local2perfect_maps,
perfect2local_maps,
)
permutations_symmetry_measures = [None] * len(algo.permutations)
permutations = list()
algos = list()
local2perfect_maps = list()
perfect2local_maps = list()
for iperm, perm in enumerate(algo.permutations):
local2perfect_map = {}
perfect2local_map = {}
permutations.append(perm)
for iperfect, ii in enumerate(perm):
perfect2local_map[iperfect] = ii
local2perfect_map[ii] = iperfect
local2perfect_maps.append(local2perfect_map)
perfect2local_maps.append(perfect2local_map)
points_distorted = self.local_geometry.points_wcs_ctwcc(permutation=perm)
sm_info = symmetry_measure(points_distorted=points_distorted, points_perfect=points_perfect)
sm_info["translation_vector"] = self.local_geometry.centroid_with_centre
permutations_symmetry_measures[iperm] = sm_info
algos.append(str(algo))
return (
permutations_symmetry_measures,
permutations,
algos,
local2perfect_maps,
perfect2local_maps,
)
def coordination_geometry_symmetry_measures_separation_plane(
self,
coordination_geometry,
separation_plane_algo,
testing=False,
tested_permutations=False,
points_perfect=None,
):
"""
Returns the symmetry measures of the given coordination geometry "coordination_geometry" using separation
facets to reduce the complexity of the system. Caller to the refined 2POINTS, 3POINTS and other ...
:param coordination_geometry: The coordination geometry to be investigated
:return: The symmetry measures | |
<filename>implicit_constrained_optimization/co_utils.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Utils for constrained optimization."""
import tensorflow as tf
def fpr_func(threshold, preds, labels):
"""False acceptance rate or False positive rate."""
return tf.reduce_sum(
tf.multiply(
tf.cast(tf.greater(preds, threshold), tf.float32),
1 - labels)) / tf.reduce_sum(1 - labels)
def fpr_func_multi(threshold, preds, labels):
"""False acceptance rate or False positive rate."""
return tf.reduce_sum(
tf.multiply(
tf.cast(tf.greater(preds, threshold), tf.float32), 1 - labels),
axis=0) / tf.reduce_sum(
1 - labels, axis=0)
def fpr_func_multi_th(threshold, preds, labels):
"""False acceptance rate or False positive rate."""
preds_exp = tf.expand_dims(preds, axis=-1) # batchsize x classes x 1
threshold_exp = tf.expand_dims(
threshold, axis=0) # 1 x classes x thresholds_per_class
labels_exp = tf.expand_dims(labels, axis=-1) # batchsize x classes x 1
return tf.reduce_sum(
tf.multiply(
tf.cast(tf.greater(preds_exp, threshold_exp), tf.float32),
1 - labels_exp),
axis=0) / tf.reduce_sum(
1 - labels_exp, axis=0)
def tpr_func_multi(threshold, preds, labels):
"""True positives."""
preds_exp = tf.expand_dims(preds, axis=-1) # batchsize x classes x 1
threshold_exp = tf.expand_dims(
threshold, axis=0) # 1 x classes x thresholds_per_class
labels_exp = tf.expand_dims(labels, axis=-1) # batchsize x classes x 1
return tf.reduce_sum(
tf.multiply(
tf.cast(tf.greater(preds_exp, threshold_exp), tf.float32),
labels_exp),
axis=0) / tf.reduce_sum(
labels_exp, axis=0)
def fnr_func(threshold, preds, labels):
"""False rejection rate or False negative rate."""
return tf.reduce_sum(
tf.multiply(tf.cast(tf.less_equal(preds, threshold), tf.float32),
labels)) / tf.reduce_sum(labels)
def fnr_func_multi(threshold, preds, labels):
"""False rejection rate or False negative rate."""
return tf.reduce_sum(
tf.multiply(tf.cast(tf.less_equal(preds, threshold), tf.float32), labels),
axis=0) / tf.reduce_sum(
labels, axis=0)
def fpr_sigmoid_proxy_func(threshold, preds, labels, temperature=1.):
"""Approximation of False acceptance rate using Sigmoid."""
return tf.reduce_sum(
tf.multiply(tf.sigmoid(temperature * (preds - threshold)),
(1 - labels))) / tf.reduce_sum(1 - labels)
def fpr_sigmoid_proxy_func_multi(threshold, preds, labels, temperature=1.):
"""Approximation of False acceptance rate using Sigmoid."""
fpr_per_label = tf.reduce_sum(
tf.multiply(tf.sigmoid(temperature * (preds - threshold)), (1 - labels)),
axis=0) / tf.reduce_sum(
1 - labels, axis=0)
return fpr_per_label
def fnr_sigmoid_proxy_func(threshold, preds, labels, temperature=1.):
"""Approximation of False rejection rate using Sigmoid."""
return tf.reduce_sum(
tf.multiply(tf.sigmoid(-1 * temperature * (preds - threshold)),
labels)) / tf.reduce_sum(labels)
def fnr_sigmoid_proxy_func_multi(threshold, preds, labels, temperature=1.):
"""Approximation of False negative rate using Sigmoid."""
fnr_per_label = tf.reduce_sum(
tf.multiply(tf.sigmoid(-1 * temperature * (preds - threshold)), labels),
axis=0) / tf.reduce_sum(
labels, axis=0)
return fnr_per_label
def fnr_sigmoid_proxy_func_multi_th(threshold, preds, labels, temperature=1.):
"""Approximation of FNR using Sigmoid."""
preds_exp = tf.expand_dims(preds, axis=-1) # batchsize x classes x 1
threshold_exp = tf.expand_dims(
threshold, axis=0) # 1 x classes x thresholds_per_class
pred_negatives = tf.sigmoid(
-1 * temperature *
(preds_exp - threshold_exp)) # batchsize x classes x thresholds_per_class
labels_exp = tf.expand_dims(labels, axis=-1) # batchsize x classes x 1
fnr_per_label = tf.reduce_sum(
tf.multiply(pred_negatives, labels_exp), axis=0) / tf.reduce_sum(
labels_exp, axis=0) # classes x thresholds_per_class
return fnr_per_label
def fp_sigmoid_proxy_func(threshold, preds, labels, temperature=1.):
"""Approximation of False acceptance rate using Sigmoid."""
return tf.reduce_sum(
tf.multiply(tf.sigmoid(temperature * (preds - threshold)), (1 - labels)))
def fn_sigmoid_proxy_func(threshold, preds, labels, temperature=1.):
"""Approximation of False rejection rate using Sigmoid."""
return tf.reduce_sum(
tf.multiply(tf.sigmoid(-1. * temperature * (preds - threshold)), labels))
def fpr_softplus_proxy_func(threshold, preds, labels, temperature=1.):
"""Approximation of False acceptance rate using Softplus."""
return tf.reduce_sum(
tf.multiply(
# tf.log(1 + tf.exp(temperature * (preds - threshold))),
tf.math.softplus(temperature * (preds - threshold)),
(1 - labels))) / tf.reduce_sum(1 - labels)
def fpr_softplus_proxy_func_multi(threshold, preds, labels, temperature=1.):
"""Approximation of False acceptance rate using Softplus."""
fpr_per_label = tf.reduce_sum(
tf.multiply(
# tf.log(1 + tf.exp(temperature * (preds - threshold))),
tf.math.softplus(temperature * (preds - threshold)),
(1 - labels)),
axis=0) / tf.reduce_sum(
1 - labels, axis=0)
return fpr_per_label
def fnr_softplus_proxy_func(threshold, preds, labels, temperature=1.):
"""Approximation of False rejection rate using Softplus."""
return tf.reduce_sum(
tf.multiply(
# tf.log(1 + tf.exp(-1 * temperature * (preds - threshold))),
tf.math.softplus(-1 * temperature * (preds - threshold)),
labels)) / tf.reduce_sum(labels)
def fnr_softplus_proxy_func_multi(threshold, preds, labels, temperature=1.):
"""Approximation of False rejection rate using Softplus."""
fnr_per_label = tf.reduce_sum(
tf.multiply(
tf.math.softplus(-1 * temperature * (preds - threshold)), labels),
axis=0) / tf.reduce_sum(
labels, axis=0)
return fnr_per_label
def fnr_softplus_proxy_func_multi_th(threshold, preds, labels, temperature=1.):
"""Approximation of FNR using Softplus."""
preds_exp = tf.expand_dims(preds, axis=-1) # batchsize x classes x 1
threshold_exp = tf.expand_dims(
threshold, axis=0) # 1 x classes x thresholds_per_class
pred_negatives = tf.math.softplus(
-1 * temperature *
(preds_exp - threshold_exp)) # batchsize x classes x thresholds_per_class
labels_exp = tf.expand_dims(labels, axis=-1) # batchsize x classes x 1
fnr_per_label = tf.reduce_sum(
tf.multiply(pred_negatives, labels_exp), axis=0) / tf.reduce_sum(
labels_exp, axis=0) # classes x thresholds_per_class
return fnr_per_label
def tpr_softplus_proxy_func_multi(threshold, preds, labels, temperature=1.):
"""Approximation of Recall using Softplus."""
preds_exp = tf.expand_dims(preds, axis=-1) # batchsize x classes x 1
threshold_exp = tf.expand_dims(
threshold, axis=0) # 1 x classes x thresholds_per_class
pred_positives = tf.math.softplus(
temperature *
(preds_exp - threshold_exp)) # batchsize x classes x thresholds_per_class
labels_exp = tf.expand_dims(labels, axis=-1) # batchsize x classes x 1
recall_per_label = tf.reduce_sum(
tf.multiply(pred_positives, labels_exp), axis=0) / tf.reduce_sum(
labels_exp, axis=0) # classes x thresholds_per_class
return recall_per_label
def tpr_sigmoid_proxy_func_multi(threshold, preds, labels, temperature=1.):
"""Approximation of Recall using Sigmoid."""
preds_exp = tf.expand_dims(preds, axis=-1) # batchsize x classes x 1
threshold_exp = tf.expand_dims(
threshold, axis=0) # 1 x classes x thresholds_per_class
pred_positives = tf.sigmoid(
temperature *
(preds_exp - threshold_exp)) # batchsize x classes x thresholds_per_class
labels_exp = tf.expand_dims(labels, axis=-1) # batchsize x classes x 1
recall_per_label = tf.reduce_sum(
tf.multiply(pred_positives, labels_exp), axis=0) / tf.reduce_sum(
labels_exp, axis=0) # classes x thresholds_per_class
return recall_per_label
def fpr_softplus_proxy_func_multi_th(threshold, preds, labels, temperature=1.):
"""Approximation of FPR using Softplus."""
preds_exp = tf.expand_dims(preds, axis=-1) # batchsize x classes x 1
threshold_exp = tf.expand_dims(
threshold, axis=0) # 1 x classes x thresholds_per_class
pred_positives = tf.math.softplus(
temperature *
(preds_exp - threshold_exp)) # batchsize x classes x thresholds_per_class
labels_exp = tf.expand_dims(labels, axis=-1) # batchsize x classes x 1
fpr_per_label = tf.reduce_sum(
tf.multiply(pred_positives, 1 - labels_exp), axis=0) / tf.reduce_sum(
1 - labels_exp, axis=0) # classes x thresholds_per_class
return fpr_per_label
def fpr_sigmoid_proxy_func_multi_th(threshold, preds, labels, temperature=1.):
"""Approximation of FPR using Sigmoid."""
preds_exp = tf.expand_dims(preds, axis=-1) # batchsize x classes x 1
threshold_exp = tf.expand_dims(
threshold, axis=0) # 1 x classes x thresholds_per_class
pred_positives = tf.sigmoid(
temperature *
(preds_exp - threshold_exp)) # batchsize x classes x thresholds_per_class
labels_exp = tf.expand_dims(labels, axis=-1) # batchsize x classes x 1
fpr_per_label = tf.reduce_sum(
tf.multiply(pred_positives, 1 - labels_exp), axis=0) / tf.reduce_sum(
1 - labels_exp, axis=0) # classes x thresholds_per_class
return fpr_per_label
def tp_softplus_proxy_func_multi(threshold, preds, labels, temperature=1.):
"""Approximation of true positives using Softplus."""
preds_exp = tf.expand_dims(preds, axis=-1) # batchsize x classes x 1
threshold_exp = tf.expand_dims(
threshold, axis=0) # 1 x classes x thresholds_per_class
pred_positives = tf.math.softplus(
temperature *
(preds_exp - threshold_exp)) # batchsize x classes x thresholds_per_class
labels_exp = tf.expand_dims(labels, axis=-1) # batchsize x classes x 1
tp_per_label = tf.reduce_sum(
tf.multiply(pred_positives, labels_exp),
axis=0) # classes x thresholds_per_class
return tp_per_label
def tp_sigmoid_proxy_func_multi(threshold, preds, labels, temperature=1.):
"""Approximation of true positives using Sigmoid."""
preds_exp = tf.expand_dims(preds, axis=-1) # batchsize x classes x 1
threshold_exp = tf.expand_dims(
threshold, axis=0) # 1 x classes x thresholds_per_class
pred_positives = tf.sigmoid(
temperature *
(preds_exp - threshold_exp)) # batchsize x classes x thresholds_per_class
labels_exp = tf.expand_dims(labels, axis=-1) # batchsize x classes x 1
tp_per_label = tf.reduce_sum(
tf.multiply(pred_positives, labels_exp),
axis=0) # classes x thresholds_per_class
return tp_per_label
def fp_softplus_proxy_func_multi_th(threshold, preds, labels, temperature=1.):
"""Approximation of false positives using Softplus."""
preds_exp = tf.expand_dims(preds, axis=-1) # batchsize x classes x 1
threshold_exp = tf.expand_dims(
threshold, axis=0) # 1 x classes x thresholds_per_class
pred_positives = tf.math.softplus(
temperature *
(preds_exp - threshold_exp)) # batchsize x classes x thresholds_per_class
labels_exp = tf.expand_dims(labels, axis=-1) # batchsize x classes x 1
fp_per_label = tf.reduce_sum(
tf.multiply(pred_positives, 1 - labels_exp),
axis=0) # classes x thresholds_per_class
return fp_per_label
def fp_sigmoid_proxy_func_multi_th(threshold, preds, labels, temperature=1.):
"""Approximation of false positives using Sigmoid."""
preds_exp = tf.expand_dims(preds, axis=-1) # batchsize x classes x 1
threshold_exp = tf.expand_dims(
threshold, axis=0) # 1 x classes x thresholds_per_class
pred_positives = tf.sigmoid(
temperature *
(preds_exp - threshold_exp)) # batchsize x classes x thresholds_per_class
labels_exp = tf.expand_dims(labels, axis=-1) # batchsize x classes x 1
fp_per_label = tf.reduce_sum(
tf.multiply(pred_positives, 1 - labels_exp),
axis=0) # classes x thresholds_per_class
return fp_per_label
def fp_softplus_proxy_func(threshold, preds, labels, | |
from bs4 import BeautifulSoup
from selenium import webdriver
from Crawler.utils import utils
from logging.config import fileConfig
import requests
import configparser
import re
import os
import logging
import threading
import math
import time
# initiate config file
config = configparser.ConfigParser()
config.read(os.path.join(os.getcwd(),'..','Crawler','config','config.ini'))
# initiate logger
fileConfig(os.path.join(os.getcwd(),'..','Crawler','config','logger_config.ini'))
fh = logging.FileHandler('crawler.log')
formatter = logging.Formatter('%(asctime)s - %(threadName)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(fh)
utils = utils(logger)
class FBCrawler:
def __init__(self, run_id):
self.run_id = run_id
self.csv_file_name = ''
self.osn_data = {}
self.osn_data_lock = threading.Lock()
self.osn_data_as_list = []
self.session_and_cookies = []
self.exception_thrown = []
self.json_files_folder = 'first_and_second_data_raw/' + self.run_id + '_' + utils.get_timestamp()
os.makedirs(self.json_files_folder)
self.user_facebook_id = ''
def get_config_and_utils(self):
return config, utils
def initiate_csv_file(self):
"""
initiate the CSV that will contain the input data for the main algorithm
:param file_name_prefix: requested name to the csv file
:return: csv final name (+ timestamp for avoid overloading)
"""
logger.info('Start to initiate the csv file')
columns = ['ID', 'Name', 'TF', 'MF', 'AUA', 'FD', 'CF']
csv_file_name = self.run_id + '_' + utils.get_timestamp() + '.csv'
utils.write_row_in_csv_file('data/'+csv_file_name, columns)
logger.info('Finished to initiate the csv file: ' + csv_file_name)
self.csv_file_name = csv_file_name
return csv_file_name
def login_to_facebook(self, email=config.get('LoginDetails', 'email'), password=config.get('LoginDetails', 'pass')):
"""
login to Facebook according credentials in config.ini
:return: session cookies, session
"""
self.osn_data_lock.acquire()
logger.info('Start to login to Facebook.')
# login page url
url = config.get('LoginDetails', 'login_url')
# initiating the session
try:
s = requests.session()
# login data
post_data = {
'email': email,
'pass': password,
}
attempts_num = config.getint('LoginDetails', 'try_amount')
for i in range(attempts_num):
# post the login data
r = s.post(url, data=post_data, allow_redirects=False)
soup = BeautifulSoup(r.text, features="html.parser")
if r.status_code == 302:
flag, r = utils.customized_get_request(r._next.url, s, r.cookies, 1)
if "התחבר" in r.text:
logger.error('Could not login in: ' + str(i) + '/' + str(attempts_num))
else:
logger.info('Successfully logged in to Facebook.')
self.osn_data_lock.release()
return r.cookies, s
else:
logger.error('Could not login in: ' + str(i)+ '/' +str(attempts_num))
logger.error('Failed to log in!')
if s: s.close()
self.osn_data_lock.release()
return None, None
except Exception as e:
logger.error('Failed to log in!')
self.osn_data_lock.release()
return None, None
def run_selenium_browser(self):
browser = webdriver.Chrome("C:\\Users\\sagiv\\PycharmProjects\\ProjectTry\\Crawler\\chromedriver.exe")
browser.get('http://127.0.0.1:5000/')
browser.find_element_by_id("MoveToManuallyAddPage").click()
browser.execute_script("return nodes;")
def get_facebook_username(self, cookies, session):
succeed_to_get, r = utils.customized_get_request(config.get('UserProfile', 'path_to_main_page'),
session, cookies, 10)
if succeed_to_get:
soup = BeautifulSoup(r.text, 'html.parser')
link_to_profile = soup.findAll(lambda tag: tag.name == 'a' and re.match(r'Profile', tag.text))[0]['href']
only_user_id = re.split('/|\?',link_to_profile)[1]
self.user_facebook_id = only_user_id
def get_user_first_circle_friends_initial_scan_data(self, cookies, session):
"""
Method that scrape the logged in user's friend list and get their ID for later processing
:param cookies: cookies of logged in session
:param session: logged in session
:param output_filename: desired output name
:return: output_filename + timestamp
"""
json_files_folder = 'first_circle_initial_data/'+self.run_id+'_' + utils.get_timestamp()
os.makedirs(json_files_folder)
logger.info('Start to crawl user\'s initial friend list for getting friend id, name and MF')
i = 0
while True:
users_friends_first_scan_data = []
logger.info('Start to work on page: ' + str(i))
attempt_num = config.getint('UserFriendsList', 'try_amount')
succeed_to_get, r = utils.customized_get_request(config.get('UserProfile', 'path_to_friends') +
'?' + config.get('UserProfile', 'friends_list_first_key') + '=' + str(i) +
'&' + config.get('UserProfile', 'friends_list_second_key') + '=' +
config.get('UserProfile', 'friends_list_second_value') +
'&' + config.get('UserProfile', 'friends_list_first_key') + '=' + str(i),
session, cookies, attempt_num)
if not succeed_to_get:
logger.error('Failed to Get friend page num: ' + str(i))
i += 1
continue
soup = BeautifulSoup(r.text, 'html.parser')
friend_tag_list = soup.findAll('td', class_=config.get('UserFriendsList', 'friend_row_td_class'))
if not friend_tag_list:
logger.info('Reached to last page of friend list')
break
for friend in friend_tag_list:
friend_dict = {}
friend_a_tag = friend.find('a', class_=config.get('UserFriendsList', 'friend_row_a_class'))
# Get friend's id
link = friend_a_tag['href']
friend_dict['friend_id'] = re.search(r'\?uid=(\d+)', link).group(1)
logger.info('Get data of: ' + friend_a_tag.text)
# Get Friend's name
friend_dict['friend_name'] = friend_a_tag.text
friend_mutual_friends_text = friend.find('div', class_=config.get('UserFriendsList',
'friend_row_div_class_mf')).text
# Get Mutual friends
amount_of_mutual_friends = re.search(r'(\d+)', friend_mutual_friends_text)
if amount_of_mutual_friends:
friend_dict['friend_mutual_friends'] = re.search(r'(\d+)', friend_mutual_friends_text).group(1)
else:
friend_dict['friend_mutual_friends'] = '0'
friend_dict['connecting_friend_id'] = '0'
users_friends_first_scan_data.append(friend_dict)
utils.save_to_json_file_no_TS(users_friends_first_scan_data, json_files_folder + '/' + str(i))
logger.info('Finished to work on page: ' + str(i))
i += 1
logger.info('Finished to work on getting user\'s initial Friends list, stored data in:' + json_files_folder)
return json_files_folder, i
def _get_friendship_duration_as_months(self, soup):
"""
helper function which get friendship duration as text represented in FB
:param soup: soup object of friendship page
:return:
"""
friendship_info_tags = soup.findAll(lambda tag: tag.name == 'td' and re.match(r'Your friend since (\w{3,9} \d{4})', tag.text))
for info_tag in friendship_info_tags:
ans = re.search(r'Your friend since (\w{3,9} \d{4})',info_tag.text)
if ans:
return utils.convert_friendship_duration_text_to_month(ans.group(1))
friendship_info_tags = soup.findAll(lambda tag: tag.name == 'td' and re.match(r'Your friend since (\w{3,9})', tag.text))
for info_tag in friendship_info_tags:
ans = re.search(r'Your friend since (\w{3,9})',info_tag.text)
if ans:
return utils.convert_only_month_friendship_duration_text_to_month(ans.group(1))
def __add_data_to_main_structure(self, id, name, tf, mf, aua, fd, cf, thread_id):
"""
function that add to thread_id DS the first\second circle's friend
:param id:
:param name:
:param tf:
:param mf:
:param aua:
:param fd:
:param cf:
:param thread_id:
:return:
"""
self.osn_data_as_list[thread_id][id] = {'Name':name,
'TF': tf,
'MF': mf,
'AUA': aua,
'FD': fd,
'CF': cf,}
def __get_new_session_for_thread(self, thread_id):
"""
if session is corrupted so re-login to facebook
:param thread_id:
:return:
"""
logger.info("Re-Login to facebook")
cookies, session = self.login_to_facebook()
while (cookies is None) and (session is None):
cookies, session = self.login_to_facebook()
self.session_and_cookies[thread_id] = session, cookies
def __get_friendship_duration_from_friend_id(self, friend_id, thread_id):
"""
function which crawl from facebook the friendship duration of user and one of his first circle friends
:param friend_id: friend's facebook id (extracted in phase 1)
:param thread_id:
:return: friendship duration
"""
attempt_num = config.getint('FriendshipDuration', 'try_amount')
url = config.get('FriendshipDuration', 'friendship_link') + self.user_facebook_id + '/' + friend_id
while True:
succeed_to_get, r = utils.customized_redirected_get_request(url, self.session_and_cookies[thread_id][0],
self.session_and_cookies[thread_id][1], attempt_num)
if succeed_to_get:
break
if config.get('General','page_to_ignore_text') in r.text:
return None
if (config.get('General','page_not_found_msg') in r.text and r.status_code == 404) or r.status_code == 500:
return None
else:
time.sleep(30)
self.__get_new_session_for_thread(thread_id)
soup = BeautifulSoup(r.text, 'html.parser')
friendship_duration_as_months = self._get_friendship_duration_as_months(soup)
return friendship_duration_as_months
def __get_num_of_days_since_earliest_post_from_timeline(self, uri_to_year_timeline, thread_id):
"""
extract the date of the first post that user posted and count the day since then
:param uri_to_year_timeline:
:param thread_id:
:return: days since first post
"""
if uri_to_year_timeline.startswith('http'):
url = uri_to_year_timeline
else:
url = config.get('General', 'facebook_url') + uri_to_year_timeline
while True:
succeed_to_get, r = utils.customized_get_request(url, self.session_and_cookies[thread_id][0],
self.session_and_cookies[thread_id][1],
config.getint('UserAge', 'try_amount'))
if succeed_to_get:
break
if (config.get('General','page_not_found_msg') in r.text and r.status_code == 404) or r.status_code == 500:
return None
else:
logger.error('Could not GET year timeline in uri: ' + uri_to_year_timeline)
time.sleep(30)
self.__get_new_session_for_thread(thread_id)
soup = BeautifulSoup(r.text, 'html.parser')
all_posts_dates_tags = list(reversed(soup.findAll('abbr')))
if not all_posts_dates_tags:
return None
return utils.get_num_of_days_from_first_post_date(all_posts_dates_tags[0].text)
def _get_FB_user_account_age_as_days(self, soup, thread_id):
"""
This function finds the earliest year after 2004 (facbook foundation) and search for the first post as
indication for user initialization and counts the days from now to then
:param soup: soup of timeline user's page
:param thread_id:
:return: FB user age as days
"""
timeline_year_tags = soup.findAll(lambda tag: tag.name == 'div' and re.match(r'^\d{4}$', tag.text) and tag.find('a'))
timeline_year_tags = list(reversed(list(filter(lambda x: x.find('a'), timeline_year_tags))))
for year_tag in timeline_year_tags:
if int(year_tag.text) >= 2004:
days_since_joined = self.__get_num_of_days_since_earliest_post_from_timeline(year_tag.find('a')['href'],
thread_id)
if days_since_joined:
return days_since_joined
else:
return utils.get_days_since_start_of_year(year_tag.text)
# if all years tags are empty from posts
for year_tag in timeline_year_tags:
if int(year_tag.text) >= 2004:
return utils.get_days_since_start_of_year(year_tag.text)
def __get_friend_friend_list_uri(self, soup):
"""
Extract a link to friend list page from the soup of timeline page
:param soup:
:return:
"""
uris_include_friend_text = soup.findAll(lambda tag: tag.name == 'a' and re.match(r'Friends', tag.text))
if len(uris_include_friend_text)>1:
return uris_include_friend_text[1]['href']
elif len(uris_include_friend_text)==1:
return uris_include_friend_text[0]['href']
return None
def __get_facebook_user_account_age(self, facebook_user_id, thread_id, is_fs_friend=True):
"""
function that gets the age of user acount in facebook
:param facebook_user_id: facebook_user's facebook id
:param thread_id:
:param is_fs_friend: flag that tell if it first\second circle friend
:return: age of user in days
"""
attempt_num = config.getint('UserAge', 'try_amount')
url = config.get('UserAge', 'user_age_link') + facebook_user_id
while True:
if is_fs_friend:
succeed_to_get, r = utils.customized_redirected_get_request(url, self.session_and_cookies[thread_id][0],
self.session_and_cookies[thread_id][1], attempt_num)
else:
succeed_to_get, r = utils.customized_get_request(url, self.session_and_cookies[thread_id][0],
self.session_and_cookies[thread_id][1], attempt_num)
if succeed_to_get:
break
if (config.get('General','page_not_found_msg') in r.text and r.status_code == 404) or r.status_code == 500:
return None
if config.get('General','page_to_ignore_text') in r.text:
return None, None
if not succeed_to_get:
logger.error('Could not GET timeline page with: ' + facebook_user_id)
time.sleep(30)
self.__get_new_session_for_thread(thread_id)
soup = BeautifulSoup(r.text, 'html.parser')
facebook_user_facebook_age_in_days = self._get_FB_user_account_age_as_days(soup, thread_id)
friend_friends_list_uri = self.__get_friend_friend_list_uri(soup)
return facebook_user_facebook_age_in_days , friend_friends_list_uri
def __get_facebook_user_amount_of_friends_from_first_friends_page(self, soup):
"""
search the amount of a friends in user first first page of friends list
:param soup:
:return:
"""
firends_amount_header = soup.find(lambda tag: tag.name == 'h3' and re.match(r'Friends \((\d+|\d+\,\d+)\)', tag.text))
if firends_amount_header:
return utils.get_amount_of_friends_from_text(firends_amount_header.text)
def _get_facebook_user_amount_of_friends(self, facebook_user_friend_list_uri, thread_id):
"""
Function which get the user's amount of friends in | |
Simulation object
run(12)
)
Args:
name (str, optional): Group name.
m (float, int): Group mass.
Returns:
Group
"""
return Group(name, m, callee=self)
def plot(self):
"""Simulation results plotter.
Returns:
SimulationPlotter
"""
return SimulationPlotter(self)
def plot_group_size(self, fpath=None, title='Distribution of Group Size', nx=250):
"""Plots the distribution of group sizes.
Args:
fpath (str, optional): Path to the plot file.
title (str): Plot title.
nx (int): Number of x-axis (i.e., the iteration axis) points.
Returns:
matplotlib figure object if ``fpath`` is None; ``self`` otherwise
"""
# Data:
data = [g.n for g in self.pop.groups.values()]
density = gaussian_kde(data)
x = np.linspace(min(data), max(data), nx)
density.covariance_factor = lambda: .25
density._compute_covariance()
# Figure:
fig = plt.figure(figsize=(12,2))
if title:
plt.title(title)
plt.plot(x, density(x), lw=1, linestyle='-', c='#666666', mfc='none', antialiased=True) # marker='o', markersize=5
# plt.legend(['Susceptible', 'Exposed', 'Recovered'], loc='upper right')
plt.ylabel('Density')
plt.xlabel('Group size')
plt.grid(alpha=0.25, antialiased=True)
# Return:
if fpath:
fig.savefig(fpath, dpi=300, bbox_inches='tight')
return self
else:
return fig
def plot_site_size(self, fpath=None, title='Distribution of Site Size', nx=250):
"""Plots the distribution of site sizes.
Args:
fpath (str, optional): Path to the plot file.
title (str): Plot title.
nx (int): Number of x-axis (i.e., the iteration axis) points.
Returns:
matplotlib figure object if ``fpath`` is None; ``self`` otherwise
"""
# Data:
data = [g.n for g in self.pop.groups.values()]
density = gaussian_kde(data)
x = np.linspace(min(data), max(data), nx)
density.covariance_factor = lambda: .25
density._compute_covariance()
# Figure:
fig = plt.figure(figsize=(12,2))
if title:
plt.title(title)
plt.plot(x, density(x), lw=1, linestyle='-', c='#666666', mfc='none', antialiased=True) # marker='o', markersize=5
plt.ylabel('Density')
plt.xlabel('Group size')
plt.grid(alpha=0.25, antialiased=True)
# Return:
if fpath:
fig.savefig(fpath, dpi=300, bbox_inches='tight')
return self
else:
return fig
def rem_probe(self, probe):
"""Removes the designated probe.
Args:
probe (Probe): The probe.
Returns:
``self``
"""
self.probes.discard(probe)
return self
def rem_rule(self, rule):
"""Removes the designated rule.
Args:
rule (Rule): The rule.
Returns:
``self``
"""
self.rules.discard(rule)
return self
def remote_after(self):
"""Restores the object after remote execution (on a cluster).
Returns:
``self``
"""
return self
def remote_before(self):
"""Prepare the object for remote execution (on a cluster).
Returns:
``self``
"""
if self.traj_id is not None:
for p in self.probes:
p.set_traj_id(self.traj_id)
return self
def reset_cb(self):
"""Reset all callback functions.
The following callback functions are available:
- **after_iter**: Call after iteration.
- **before_iter**: Call before iteration.
- **check_work**:
- **save_state**:
- **upd_progress**:
Returns:
``self``
"""
self.cb = DotMap(
after_iter = None,
before_iter = None,
check_work = None,
save_state = None,
upd_progress = None
)
return self
def reset_comp_hist(self):
"""Reset computational history.
See :meth:`~pram.sim.Simulation.get_comp_hist`.
Returns:
``self``
"""
self.comp_hist = DotMap( # computational history
mem_iter = [], # memory usage per iteration [B]
t_iter = [], # time per iteration [ms]
t_sim = 0 # total simulation time [ms]
)
return self
def reset_pop(self):
"""Resets population.
All groups and sites are removed from the simulation and the simulation object is set back in the pre-setup
state.
Returns:
``self``
"""
self.run_cnt = 0
self.is_setup_done = False
self.pop = GroupPopulation()
gc.collect()
return self
def reset_pragmas(self):
"""Reset all pragmas to their default values.
See :meth:`~pram.sim.Simulation.get_pragma`.
Returns:
``self``
"""
self.pragma = DotMap(
analyze = True, # flag: analyze the simulation and suggest improvements?
autocompact = False, # flag: remove empty groups after every iteration?
autoprune_groups = False, # flag: remove attributes and relations not referenced by rules?
autostop = False, # flag: stop simulation when the n, p, or t condition is met?
autostop_n = 0, #
autostop_p = 0, #
autostop_t = 10, #
comp_summary = False, # flag: show computational summary at the end of a simulation run?
live_info = False, #
live_info_ts = False, #
fractional_mass = False, # flag: should fractional mass be allowed?
probe_capture_init = True, # flag: let probes capture the pre-run state of the simulation?
rule_analysis_for_db_gen = True # flag: should static rule analysis results help form DB groups
)
return self
def reset_probes(self):
"""Removes all probes.
Returns:
``self``
"""
self.probes = []
return self
def reset_rules(self):
"""Removes all group rules.
Returns:
``self``
"""
self.rules = []
self.analysis.rule_static.reset()
return self
def reset_sim_rules(self):
"""Removes all simulation rules.
Returns:
``self``
"""
self.sim_rules = []
return self
def reset_vars(self):
"""Reset all simulation variables.
Returns:
``self``
"""
self.vars = {}
return self
def run(self, iter_or_dur=1, do_disp_t=False, do_disp_iter=False):
"""Run the simulation.
Args:
iter_or_dur (int or str): Number of iterations or a string representation of duration (see
:meth:`util.Time.dur2ms() <pram.util.Time.dur2ms>`)
do_disp_t (bool): Display simulation time at every iteration? Useful for debugging.
do_disp_iter (bool): Display simulation iteration at every iteration? Useful for debugging.
Returns:
``self``
"""
ts_sim_0 = Time.ts()
# No rules or groups:
if len(self.rules) == 0:
print('No rules are present\nExiting')
return self
if len(self.pop.groups) == 0:
print('No groups are present\nExiting')
return self
self.pop.freeze() # masses of groups cannot be changed directly but only via the group-splitting mechanism
# Decode iterations/duration:
self._inf('Setting simulation duration')
self.running.is_running = True
self.running.progress = 0.0
if isinstance(iter_or_dur, int):
self.timer.add_iter(iter_or_dur)
self.running.step = 1.0 / float(iter_or_dur)
elif isinstance(iter_or_dur, str):
self.timer.add_dur(Time.dur2ms(iter_or_dur))
# self.running.step = ... # TODO
else:
raise ValueError(f'Number of iterations or duration must be an integer or a string: {iter_or_dur}')
# Rule conditioning 01 -- Init:
setattr(Group, 'attr_used', set())
setattr(Group, 'rel_used', set())
# Sync simulation and rule timers:
self._inf('Syncing rule timers')
for r in self.rules:
r.set_t_unit(self.timer.ms)
# Rule setup and simulation compacting:
if not self.is_setup_done:
if self.fn.group_setup:
self._inf('Running group setup')
self.pop.apply_rules(self.fn.group_setup, 0, self.timer, is_sim_setup=True)
self._inf('Running rule setup')
self.pop.apply_rules(self.rules, 0, self.timer, is_rule_setup=True)
self.is_setup_done = True
if self.pragma.autocompact:
self._inf('Compacting the model')
self.compact()
# Save last-iter info:
# self.comp_hist.mem_iter.append(psutil.Process(self.pid).memory_full_info().uss) # TODO: ray doesn't work with memory_full_info() (access denied)
self.comp_hist.mem_iter.append(0)
self.comp_hist.t_iter.append(Time.ts() - ts_sim_0)
# Force probes to capture the initial state:
if self.pragma.probe_capture_init and self.run_cnt == 0:
self._inf('Capturing the initial state')
for p in self.probes:
p.run(None, None, self.traj_id)
# Run the simulation:
self._inf('Initial population')
self._inf(f' Mass : {"{:,}".format(int(self.pop.get_mass()))}')
self._inf(f' Groups : {"{:,}".format(self.pop.get_group_cnt())}')
self._inf(f' Sites : {"{:,}".format(self.pop.get_site_cnt())}')
self._inf('Running the PRAM')
self.run_cnt += 1
self.autostop_i = 0 # number of consecutive iterations the 'autostop' condition has been met for
self.timer.start()
for i in range(self.timer.get_i_left()):
if do_disp_iter:
print(i)
ts_iter_0 = Time.ts()
if self.cb.before_iter is not None:
self.cb.before_iter(self)
if self.pragma.live_info:
self._inf(f'Iteration {self.timer.i + 1} of {self.timer.i_max}')
self._inf(f' Group count: {self.pop.get_group_cnt()}')
elif do_disp_t:
print(f't:{self.timer.get_t()}')
# Apply group rules:
self.pop.apply_rules(self.rules, self.timer.get_i(), self.timer.get_t())
m_flow = self.pop.last_iter.mass_flow_tot
m_pop = float(self.pop.get_mass())
if m_pop > 0:
p_flow = float(m_flow) / m_pop
else:
p_flow = None
# Apply simulation rules:
for r in self.sim_rules:
if r.is_applicable(self.timer.get_i(), self.timer.get_t()):
r.apply(self, self.timer.get_i(), self.timer.get_t())
# Save last-iter info:
# self.comp_hist.mem_iter.append(psutil.Process(self.pid).memory_full_info().uss) # TODO: ray doesn't work with memory_full_info() (access denied)
self.comp_hist.mem_iter.append(0)
self.comp_hist.t_iter.append(Time.ts() - ts_iter_0)
# Run probes:
for p in self.probes:
p.run(self.timer.get_i(), self.timer.get_t(), self.traj_id)
# Cleanup the population:
self.pop.do_post_iter()
# Advance timer:
self.timer.step()
self.running.progress += self.running.step
# Autostop:
if self.pragma.autostop and p_flow is not None:
if m_flow < self.pragma.autostop_n or p_flow < self.pragma.autostop_p:
self.autostop_i += 1
else:
self.autostop_i = 0
if self.autostop_i >= self.pragma.autostop_t:
if self.pragma.live_info:
self._inf('Autostop condition has been met; population mass transfered during the most recent iteration')
self._inf(f' {m_flow} of {self.pop.get_mass()} = {p_flow * 100}%')
self.timer.stop()
break
else:
print('')
print('Autostop condition has been met; population mass transfered during the most recent iteration:')
print(f' {m_flow} of {self.pop.get_mass()} = {p_flow * 100}%')
self.timer.stop()
break
# Autocompact:
if self.pragma.autocompact:
self._inf(f' Compacting the model')
self.compact()
# Callbacks:
if self.cb.after_iter:
self.cb.after_iter(self)
if self.cb.upd_progress:
self.cb.upd_progress(i, iter_or_dur)
if self.cb.check_work:
while not self.cb.check_work():
time.sleep(0.1)
self.timer.stop()
self._inf(f'Final population info')
self._inf(f' Groups: {"{:,}".format(self.pop.get_group_cnt())}')
# Rule conditioning 02 -- Analyze and cleanup:
self.analysis.rule_static.analyze_groups(self.pop.groups.values())
self.analyze_rules_dynamic()
setattr(Group, 'attr_used', None)
setattr(Group, 'rel_used', None)
# we want this None instead of just calling clear() to prevent dynamic rule analysis picking up on
# calls to has_attr(), has_rel(), and others that happen via outside rules
# getattr(Group, 'attr_used').clear()
# getattr(Group, 'rel_used' ).clear()
# Rule cleanup and simulation compacting:
self._inf('Running rule cleanup')
self.pop.apply_rules(self.rules, 0, self.timer, is_rule_cleanup=True)
if self.pragma.autocompact:
self._inf('Compacting the model')
self.compact()
self._inf('Finishing simulation')
self.running.is_running = False
self.running.progress = 1.0
self.running.step = 1.0
# Flush any buffered probe data:
for p in self.probes:
if p.persistence is not None:
p.persistence.flush()
self.comp_hist.t_sim = Time.ts() - ts_sim_0
self.run__comp_summary()
return self
def run__comp_summary(self):
"""Called by :meth:`~pram.sim.Simulation.run` to display computational | |
#!/usr/bin/env python3
# coding: utf-8
"""
Common source for utility functions used by ABCD-BIDS task-fmri-pipeline
<NAME>: <EMAIL>
Created: 2021-01-15
Updated: 2021-11-12
"""
# Import standard libraries
import argparse
from datetime import datetime # for seeing how long scripts take to run
from glob import glob
import json
import multiprocessing as mp
import os
import pandas as pd
import random # only used by rand_string
import shutil
import string # only used by rand_string
import subprocess
import sys
import time
# Constants: Name of scanner-info command-line argument, directory containing
# the main pipeline script, SLURM-/SBATCH-related arguments' default names, and
# name of the argument to get the directory containing the main wrapper script
SCAN_ARG = 'scanners_info'
SCRIPT_DIR = os.path.dirname(os.path.dirname(__file__))
SLURM_ARGS = ('account', 'cpus', 'memory', 'print_progress', 'sleep', 'time')
WRAPPER_LOC = 'wrapper_location'
def add_arg_if_in_arg_names(arg_name, all_args, parser, *shortnames, **kwargs):
"""
Wrapper for argparse.ArgumentParser.add_argument. Nearly identical, but
will only add the argument to the parser if arg_name is in all_args.
:param arg_name: String naming the argument to (maybe) add to parser
:param all_args: Set of strings; each names a command-line argument
:param parser: argparse.ArgumentParser
:param shortnames: Unpacked list of strings; each is arg_name shortened
:param kwargs: Unpacked dictionary of argparse attributes to give the arg
:return: parser, but (maybe) with the argument named arg_name added
"""
if arg_name in all_args:
cli_arg = as_cli_arg(arg_name)
parser.add_argument(
cli_arg[1:], cli_arg, *shortnames, **kwargs
)
return parser
def add_lvl_args_to(parser):
"""
:param parser: argparse.ArgumentParser with all command-line arguments
that the user gave to pipeline_wrapper.py
:return: parser with all command-line arguments needed for level X analysis
"""
# 1) Top-level directory with pipeline_wrapper.py 2) Run number 3) Path to
# .json file which stores the 'paths' dictionary
# parser.add_argument('--code-dir', type=valid_readable_dir, required=True)
parser.add_argument('--run-number', type=valid_whole_number, required=True)
parser.add_argument('--temp-json', type=valid_readable_json, required=True)
return parser
def add_slurm_args_to(parser):
"""
:param parser: argparse.ArgumentParser with some command-line arguments
:return: parser with all CLI arguments needed to run parallel SLURM jobs
"""
default_CPUs = 1
default_gb_mem = 8
default_sleep = 10
default_time_limit = "01:00:00"
parser.add_argument(
'-A', '--account',
help="Name of the account to submit the SBATCH job under."
)
parser.add_argument(
'-c', '--cpus', type=valid_whole_number, default=default_CPUs,
help=('Number of CPUs to use for each Python job. By default, this '
'argument\'s value will be {}.'.format(default_CPUs))
)
parser.add_argument(
'-mem', '--memory', type=valid_whole_number, default=default_gb_mem,
help=("Memory in gigabytes (GB) to assign to each sbatch job. The "
"default number is {} GB.".format(default_gb_mem))
)
parser.add_argument(
'-progress', '--print-progress', action='store_true',
help=('Include this flag for the script to print updates about its '
'progress at intervals defined by --sleep. This will also print '
'every command that is run to submit a pipeline batch job.')
)
parser.add_argument(
'-sleep', '--sleep', type=valid_whole_number, default=default_sleep,
help=("Number of seconds to wait between batch job submissions. The "
"default number is {}.".format(default_sleep))
)
parser.add_argument(
'-time', '--time', metavar="SLURM_JOB_TIME_LIMIT",
type=valid_time_str, default=default_time_limit,
help=("Time limit for each automated_subset_analysis batch job. The "
"time limit must be formatted specifically as HH:MM:SS where HH "
"is hours, MM is minutes, and SS is seconds. {} is the default "
"time limit.".format(default_time_limit))
)
return parser
def argify(argname, argval):
"""
:param argname: String naming a parameter for a script called from terminal
:param argval: Object to assign in string form as the value of the argument
:return: String, a parameter assignment for a script called from terminal
"""
return "--{}={}".format(argname, argval)
def as_cli_arg(arg_str):
"""
:param arg_str: String naming a stored argument taken from the command line
:return: String which is the command-line argument form of arg_str
"""
return "--" + arg_str.replace("_", "-")
def copy_and_rename_file(old_file, new_file):
"""
Rename a file and copy it to a new location
:param old_file: String, valid path to an existing file to copy
:param new_file: String, valid path to what will be a copy of old_file
"""
os.rename(shutil.copy2(old_file, os.path.dirname(new_file)), new_file)
def copy_event_files_to_default_dir(cli_args, all_event_files):
"""
Copy all event files into the default event files directory
:param cli_args: Dictionary containing all command-line arguments from user
:param all_event_files: List of strings that are valid paths to real files
"""
for each_EV_file in all_event_files:
try: shutil.copy(each_EV_file, cli_args['events_dir'])
except shutil.SameFileError: pass
def count_lines_in_txt_file(filepath):
"""
Quickly count how many lines are in a text file.
Taken from pynative.com/python-count-number-of-lines-in-file
:param filepath: String, valid path to an existing readable text file
:return: Int, the number of lines in the file at filepath
"""
with open(filepath, 'r') as infile: # open file in read mode
for count, _ in enumerate(infile):
pass
return count + 1
def dict_has(a_dict, a_key):
"""
:param a_dict: Dictionary (any)
:param a_key: Object (any)
:return: True if and only if a_key is mapped to something truthy in a_dict
"""
return a_key in a_dict and a_dict[a_key]
def ensure_dict_has(a_dict, a_key, new_value):
"""
:param a_dict: Dictionary (any)
:param a_key: Object which will be a key in a_dict
:param new_value: Object to become the value mapped to a_key in a_dict
unless a_key is already mapped to a value
:return: a_dict, but with a_key mapped to some value
"""
if not dict_has(a_dict, a_key):
a_dict[a_key] = new_value
return a_dict
def exit_with_time_info(start_time, exit_code=0):
"""
Terminate the pipeline after displaying a message showing how long it ran
:param start_time: datetime.datetime object of when the script started
"""
print('The pipeline for this subject took this long to run {}: {}'
.format('successfully' if exit_code == 0 else 'and then crashed',
datetime.now() - start_time))
sys.exit(exit_code)
def extract_from_json(json_path):
"""
:param json_path: String, a valid path to a real readable .json file
:return: Dictionary, the contents of the file at json_path
"""
with open(json_path, 'r') as infile:
return json.load(infile)
def get_all_analysis_paths(cli_args):
"""
Build and save paths for various variables called throughout the pipeline
:param cli_args: Dictionary containing all command-line arguments from user
:return: Dictionary containing paths to all of the following variables:
AROI2, BIDS, dir_lvl, feat_name, final_smooth, lvl_2_paths,
sub_paths, templates
"""
paths = {'dir_lvl': {str(lvl): os.path.join( # Feature dirs for all levels
cli_args['output'], 'Level{}_feats'.format(lvl)
) for lvl in (1, 2)},
'feat_name': '{}.feat'.format(cli_args['study_name']),
'final_smooth': ('_smoothed_{}mm' # Spatial smoothing variable
.format(cli_args['spat_smooth']))}
for lvl in cli_args['levels']:
tmpllv = 'template{}'.format(lvl)
paths[tmpllv] = os.path.join(cli_args['templates'], cli_args[tmpllv])
paths['lvl_2'] = get_lvl_paths(
paths['dir_lvl']['2'], get_sub_base(cli_args),
cli_args['study_name'] + '.gfeat', cli_args['runs'], 'fsf'
)
paths['sub_ses'] = {f_or_a: os.path.join( # Subject anat & func directories
cli_args['study_dir'], 'derivatives',
cli_args['bids_dir'], cli_args['subject'],
cli_args['ses'], f_or_a
) for f_or_a in ('anat', 'func')}
paths['AROI2'] = os.path.join(cli_args['templates'], 'Atlas_ROIs.2.nii.gz')
return paths
def get_and_print_time_since(event_name, event_time):
"""
Print and return a string showing how much time has passed since the
current running script reached a certain part of its process
:param event_name: String to print after 'Time elapsed since '
:param event_time: datetime object representing a time in the past
:return: String with an easily human-readable message showing how much time
has passed since {event_time} when {event_name} happened.
"""
timestamp = ("\nTime elapsed since {}: {}"
.format(event_name, datetime.now() - event_time))
print(timestamp)
return timestamp
def get_args_to_run_film_gls(**kwargs):
"""
:return: List of strings which are a Bash command calling film_gls
"""
in_arg = kwargs.pop('in_arg')
to_call = ['film_gls', '--sa', argify('in', in_arg)]
for argname, argval in kwargs.items():
to_call.append(argify(argname, argval))
return to_call
def get_default_ext_command(cmd_name):
"""
Try to get valid path to external software command file without user input
:param cmd_name: String naming the executable command file
:return: String, path to the command if the user has the command alias in
their .bashrc / $PATH; otherwise None
"""
try: # If the command path is already defined, then use it
cmd = subprocess.check_output(("which", cmd_name)
).decode('utf-8').split()[-1]
except subprocess.CalledProcessError:
cmd = None
return cmd
def get_LR_functions(cli_args, paths):
"""
:param cli_args: Dictionary containing all command-line arguments from user
:param paths: Dictionary of path strings, and of dictionaries of path
strings, used throughout processing in both levels
:return: Dictionary mapping 'surf' to a function which returns the file
path string to a .surf.gii file, and mapping 'shape' to a function
which returns the file path string to a .shape.gii file
"""
return {'surf': lambda x: os.path.join(
paths['sub_ses']['anat'], get_subj_ses(cli_args) +
'_hemi-{}_space-MNI_mesh-fsLR32k_midthickness.surf.gii'.format(x)
), 'shape': lambda y: os.path.join(
cli_args['templates'], y + '.atlasroi.32k_fs_LR.shape.gii'
)}
def get_lvl_paths(lvl_dir, sub_base, feat_name, runs, *extra_subdirs):
"""
Get a dictionary of paths to analysis-level-specific files for paths dict
:param lvl_dir: String, path to the feat directory for level 1 or 2
:param sub_base: String identifying a subject, session, and task
:param feat_name: String naming a feature
:param runs: | |
# -*- coding: utf-8 -*-
"""
@File: patent2vec.py
@Description: This is a module for generating document embedding for patents.
This application,
1. Creates Patent2Vec model
2. Initializes Patent2Vec model's weights
with pre-trained model
3. Trains Patent2Vec model
4. Infers document embedding for a new patent document
5. Predict document embeddings for the collection of patent documents
6. Saves/Loads Patent2Vec model
7. Evaluates Patent2Vec model
8. Tunes the vocabulary size
9. Saves document embeddings to database
@Author: <NAME>
@EMail: <EMAIL>
@Created_on: 04/05/2017
@License Copyright [2017] [Chetan Borse]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
@python_version: 3.5
===============================================================================
"""
import os
import time
import logging
import datetime
import numpy as np
from scipy.stats import zscore
from gensim import utils
from gensim.models import Doc2Vec
from Configuration import config
from Utils.exceptions import PathNotFoundError
from Utils.exceptions import ModelNotFoundError
from Utils.database import Database
# Set logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)s [%(levelname)s] %(message)s',)
log = logging.getLogger("Patent2Vec")
# Global variables
PATENT2VEC_MODEL_PATH = config.PATENT2VEC_MODEL_PATH
PATENT2VEC_MODEL = config.PATENT2VEC_MODEL
PATENT_EMBEDDING = config.PATENT_EMBEDDING
PATENT_LABEL = config.PATENT_LABEL
PATENT_CATEGORY = config.PATENT_CATEGORY
L2_NORMALIZED_PATENT_EMBEDDING = config.L2_NORMALIZED_PATENT_EMBEDDING
STANDARDIZED_PATENT_EMBEDDING = config.STANDARDIZED_PATENT_EMBEDDING
WORD2VEC_BENCHMARK_DATA = config.WORD2VEC_BENCHMARK_DATA
class Patent2Vec(object):
"""
This is a class for generating document embedding for patents.
Note: This implementation is based on Doc2Vec implementation.
This class,
1. Creates Patent2Vec model
2. Initializes Patent2Vec model's weights with pre-trained model
3. Trains Patent2Vec model
4. Infers document embedding for a new patent document
5. Predict document embeddings for the collection of patent documents
6. Saves/Loads Patent2Vec model
7. Evaluates Patent2Vec model
8. Tunes the vocabulary size
9. Saves document embeddings to database
"""
def __init__(self,
dm=1,
dm_mean=1,
dm_concat=0,
min_word_count=5,
max_vocab_size=None,
size=500,
context_window_size=8,
downsampling=1e-5,
hs=0,
negative=2,
iter=50,
workers=4,
use_less_memory=False,
docvecs_mapfile=None):
self.dm = dm
self.dm_mean = dm_mean
self.dm_concat = dm_concat
self.min_word_count = min_word_count
self.max_vocab_size = max_vocab_size
self.size = size
self.context_window_size = context_window_size
self.downsampling = downsampling
self.hs = hs
self.negative = negative
self.iter = iter
self.workers = workers
self.use_less_memory = use_less_memory
self.docvecs_mapfile = docvecs_mapfile
self.model = None
def build(self, patents):
"""
Create a Patent2Vec model and build a vocabulary.
"""
log.info("Building Patent2Vec model")
# Create a Patent2Vec model
self.model = Doc2Vec(dm=self.dm,
dm_mean=self.dm_mean,
dm_concat=self.dm_concat,
min_count=self.min_word_count,
max_vocab_size=self.max_vocab_size,
size=self.size,
window=self.context_window_size,
sample=self.downsampling,
hs=self.hs,
negative=self.negative,
iter=self.iter,
workers=self.workers,
docvecs_mapfile=self.docvecs_mapfile)
# Build a vocabulary of ngrams for a given corpus
if self.use_less_memory:
self.model.build_vocab(patents)
else:
self.model.build_vocab(patents.to_array())
def intersect_with_pretrained_embedding(self,
pretrained_word2vec,
binary=False):
"""
Intersect the vocabulary of ngrams with pre-trained word/concept
embeddings.
Note: No new words/concepts are added to the existing vocabulary,
but intersecting words/concepts adopt the pre-trained
word/concept embedding's weights and non-intersecting
words/concepts are left alone.
"""
log.info("Intersecting vocabulary with pre-trained word/concept embeddings")
self.model.intersect_word2vec_format(pretrained_word2vec, binary=binary)
def reuse_from(self, model):
"""
Reuse shareable structures from other Patent2Vec model.
"""
self.model.reset_from(model)
def train(self,
patents,
alpha=0.1,
min_alpha=0.0001,
passes=10,
fixed_alpha=False):
"""
Train Patent2Vec model.
Note: For rigorous training, set 'passes' > 1.
There are two training approaches (for every pass),
1. Flexible learning rate ->
Provide minimum & maximum learning rates and
let the gensim package adjusts it.
2. Fixed learning rate ->
Provide minimum & maximum learning rates;
both must be same and do not let the gensim adjusts
the learning rate.
"""
log.info("Training Patent2Vec model")
# Compute delta, i.e. change in learning rate after every pass.
delta = (alpha - min_alpha) / passes
log.info("START %s", str(datetime.datetime.now()))
# Train/Re-train model for a given number of passes
for i in range(passes):
# Shuffle patent documents
patents.shuffle()
# If user chooses fixed learning rate,
# then 'alpha' and 'min_alpha' should be same;
# otherwise let the gensim adjusts learning rate
# after each epoch/iteration.
self.model.alpha = alpha
if fixed_alpha:
self.model.min_alpha = alpha
else:
self.model.min_alpha = min_alpha
# Train Patent2Vec model for the given number of iterations
# as specified by 'self.iter'
start_time = time.time()
if self.use_less_memory:
self.model.train(patents,
total_examples=len(patents),
epochs=self.iter)
else:
self.model.train(patents.get_corpus(),
total_examples=len(patents),
epochs=self.iter)
end_time = time.time()
log.debug("Pass(%d): Completed at alpha %f", i+1, alpha)
log.debug("Pass(%d): Time elapsed = %f", i+1, (end_time-start_time))
# Lower maximum learning rate's value for next pass
alpha -= delta
log.info("END %s", str(datetime.datetime.now()))
def evaluate(self):
"""
Evaluate Patent2Vec model.
"""
log.info("Evaluating Patent2Vec model")
if not os.path.exists(WORD2VEC_BENCHMARK_DATA):
raise PathNotFoundError("%s: Evaluation dataset does not exist!"
% WORD2VEC_BENCHMARK_DATA.rsplit(os.sep, 1)[1])
# Evaluate Patent2Vec model
accuracy = self.model.accuracy(WORD2VEC_BENCHMARK_DATA)
# Find correct and incorrect predictions
correct = len(accuracy[-1]['correct'])
incorrect = len(accuracy[-1]['incorrect'])
total = correct + incorrect
# Calculate correct and incorrect predictions' percentage
percentage = lambda x: (x / total) * 100
log.info("Total: %d, Correct: %0.2f%%, Incorrect: %0.2f%%",
total, percentage(correct), percentage(incorrect))
def infer(self, document, alpha=0.1, min_alpha=0.0001, steps=100):
"""
Infer a document embedding for a given patent document.
"""
return self.model.infer_vector(document,
alpha=alpha,
min_alpha=min_alpha,
steps=steps)
def predict(self,
documents,
alpha=0.1,
min_alpha=0.0001,
steps=100,
save=True,
database=None,
table_name=None,
save_patent_category=True,
prepend_document_category=False):
"""
Predict document embeddings.
"""
log.info("Predicting document embeddings")
if save and database is None:
raise ValueError("'database' not defined!")
if save and table_name is None:
raise ValueError("'table_name' not defined!")
# Predict document embeddings
tags = []
embeddings = []
for document in documents:
embedding = self.infer(document.words[0],
alpha=alpha,
min_alpha=min_alpha,
steps=steps)
embeddings.append(embedding)
tags.append(document.tags[0])
# Insert predicted document embeddings into database
if save:
for i, embedding in enumerate(embeddings):
patent_name = self._get_document_label(tags[i],
prepend_document_category)
embedding = " ".join(map(str, embedding))
if save_patent_category:
patent_category = self._get_document_category(tags[i])
else:
patent_category = "UNKNOWN"
record = [("PatentName", patent_name),
("DocumentEmbedding", embedding),
("PatentCategory", patent_category)]
database.insert(table=table_name, record=record)
return (tags, embeddings)
def save(self, model=None, path=None):
"""
Save Patent2Vec model.
"""
log.info("Saving Patent2Vec model")
if model is None:
model = PATENT2VEC_MODEL.rsplit(os.sep, 1)[1]
if path is None:
path = PATENT2VEC_MODEL_PATH
if not os.path.exists(path):
raise PathNotFoundError("Path does not exist: %s" % path)
self.model.save(os.path.join(path, model))
def load(self, model):
"""
Load Patent2Vec model.
"""
log.info("Loading Patent2Vec model")
if not os.path.exists(model):
raise PathNotFoundError("Patent2Vec model does not exist: %s"
% model)
self.model = Doc2Vec.load(model)
def clean(self):
"""
Clean temporary generated data.
"""
log.info("Cleaning temporary generated data")
self.model.delete_temporary_training_data()
def generate_l2_normalized_embeddings(self):
"""
Generate L2 normalized document embeddings.
"""
self.model.docvecs.init_sims()
def standardize_embeddings(self, document_embeddings, rows, columns):
"""
Standardize document embeddings.
"""
path = STANDARDIZED_PATENT_EMBEDDING.rsplit(os.sep, 1)[0]
if not os.path.exists(path):
raise PathNotFoundError("Path does not exist: %s" % path)
standardized_patent_embeddings = np.memmap(STANDARDIZED_PATENT_EMBEDDING,
dtype='float32',
mode='w+',
shape=(rows, columns))
standardized_patent_embeddings[:] = np.array(zscore(document_embeddings))[:]
return standardized_patent_embeddings
def save_document_embeddings(self,
document_embeddings=None,
rows=None,
columns=500,
database=None,
table_name=None,
save_patent_category=True,
prepend_document_category=False):
"""
Save document embeddings to database.
"""
log.info("Saving document embeddings")
if document_embeddings is None:
document_embeddings = PATENT_EMBEDDING
if not os.path.exists(document_embeddings):
raise PathNotFoundError("Path does not exist: %s"
% document_embeddings)
if rows is None:
raise ValueError("'rows' not defined!")
if database is None:
raise ValueError("'database' not defined!")
if table_name is None:
raise ValueError("'table_name' not defined!")
# Create a memory map with document embeddings for reducing load on RAM
embeddings = np.memmap(document_embeddings,
dtype='float32',
mode='r',
shape=(rows, columns))
# Insert document embedding records into database
for i, embedding in enumerate(embeddings):
doctag = self.model.docvecs.index_to_doctag(i)
patent_name = self._get_document_label(doctag,
prepend_document_category)
embedding = " ".join(map(str, embedding))
if save_patent_category:
patent_category = self._get_document_category(doctag)
else:
patent_category = "UNKNOWN"
record = [("PatentName", patent_name),
("DocumentEmbedding", embedding),
("PatentCategory", patent_category)]
database.insert(table=table_name, record=record)
def _get_document_label(self, doctag, prepend_document_category=False):
"""
Get document label.
"""
document_name = doctag.rsplit(os.sep, 1)[1]
document_name = document_name.rsplit('.', 1)[0]
if prepend_document_category:
document_category = doctag.rsplit(os.sep, 2)[1]
else:
document_category = ""
if document_category:
document_label = document_category + "." + document_name
else:
document_label = document_name
return document_label
def _get_document_category(self, doctag, description=None):
"""
Get document category.
"""
document_category = doctag.rsplit(os.sep, 2)[1]
if description == "20ng_6categories":
if document_category in ["comp.graphics",
"comp.os.ms-windows.misc",
"comp.sys.ibm.pc.hardware",
"comp.sys.mac.hardware",
"comp.windows.x"]:
return "computer"
if document_category in ["talk.politics.misc",
"talk.politics.guns",
"talk.politics.mideast"]:
return "politics"
if document_category in ["talk.religion.misc",
"alt.atheism",
"soc.religion.christian"]:
return "religion"
if document_category in ["sci.crypt",
"sci.electronics",
"sci.med",
"sci.space"]:
return "science"
if document_category in ["misc.forsale"]:
return "forsale"
if document_category in ["rec.autos",
"rec.motorcycles",
"rec.sport.baseball",
"rec.sport.hockey"]:
return "rec"
return document_category
def tune_vocab_size(self, patents, min_count_range=(0, 50)):
"""
Function for tuning the vocabulary size.
"""
if self.model is None:
raise ModelNotFoundError("Patent2Vec model not found!")
# Scan vocabulary across entire corpus
if self.use_less_memory:
self.model.scan_vocab(patents)
else:
self.model.scan_vocab(patents.to_array())
# Find out the vocabulary size for different minimum token frequency
for i | |
'''
Created on Jul 22, 2011
@author: Rio
'''
from __future__ import absolute_import
from collections import defaultdict
import os
from logging import getLogger
import itertools
from numpy import swapaxes, uint8, zeros
import numpy
from mceditlib.anvil.adapter import VERSION_1_7, VERSION_1_8
from mceditlib.anvil.entities import PCEntityRef, PCTileEntityRef, \
ItemStackRef, ItemRef
from mceditlib.exceptions import PlayerNotFound, LevelFormatError
from mceditlib.selection import BoundingBox
from mceditlib.fakechunklevel import FakeChunkedLevelAdapter, FakeChunkData
from mceditlib.blocktypes import BlockTypeSet, PCBlockTypeSet
from mceditlib import nbt
log = getLogger(__name__)
blocktypeClassesByName = {"Alpha": PCBlockTypeSet}
def createSchematic(shape, blocktypes='Alpha'):
"""
Create a new .schematic of the given shape and blocktypes and return a WorldEditor.
Parameters
----------
shape : tuple of int
blocktypes : BlockTypeSet or str
Returns
-------
WorldEditor
"""
from mceditlib.worldeditor import WorldEditor
adapter = SchematicFileAdapter(shape=shape, blocktypes=blocktypes)
editor = WorldEditor(adapter=adapter)
return editor
def blockIDMapping(blocktypes):
mapping = nbt.TAG_Compound()
for name, ID in blocktypes.IDsByName.iteritems():
mapping[str(ID)] = nbt.TAG_String(name)
return mapping
def itemIDMapping(blocktypes):
mapping = nbt.TAG_Compound()
for name, ID in blocktypes.itemTypes.IDsByInternalName.iteritems():
mapping[str(ID)] = nbt.TAG_String(name)
return mapping
class SchematicChunkData(FakeChunkData):
def addEntity(self, entity):
self.dimension.addEntity(entity)
def addTileEntity(self, tileEntity):
self.dimension.addTileEntity(tileEntity)
class SchematicFileAdapter(FakeChunkedLevelAdapter):
"""
"""
# XXX use abstract entity ref or select correct ref for contained level format
ChunkDataClass = SchematicChunkData
def __init__(self, shape=None, filename=None, blocktypes='Alpha', readonly=False, resume=False):
"""
Creates an object which stores a section of a Minecraft world as an
NBT structure. The order of the coordinates for the block arrays in
the file is y,z,x. This is the same order used in Minecraft 1.4's
chunk sections.
Parameters
----------
shape: tuple of int
The shape of the schematic as (x, y, z)
filename: basestring
Path to a file to load a saved schematic from.
blocktypes: basestring or BlockTypeSet
The name of a builtin blocktypes set (one of
"Classic", "Alpha", "Pocket") to indicate allowable blocks. The default
is Alpha. An instance of BlockTypeSet may be passed instead.
Returns
----------
SchematicFileAdapter
"""
self.EntityRef = PCEntityRef
self.TileEntityRef = PCTileEntityRef
if filename is None and shape is None:
raise ValueError("shape or filename required to create %s" % self.__class__.__name__)
if filename:
self.filename = filename
if os.path.exists(filename):
rootTag = nbt.load(filename)
else:
rootTag = None
else:
self.filename = None
rootTag = None
if blocktypes in blocktypeClassesByName:
self.blocktypes = blocktypeClassesByName[blocktypes]()
else:
if not isinstance(blocktypes, BlockTypeSet):
raise ValueError("%s is not a recognized BlockTypeSet", blocktypes)
self.blocktypes = blocktypes
if rootTag:
self.rootTag = rootTag
if "Materials" in rootTag:
self.blocktypes = blocktypeClassesByName[self.Materials]()
else:
rootTag["Materials"] = nbt.TAG_String(self.blocktypes.name)
w = self.rootTag["Width"].value
l = self.rootTag["Length"].value
h = self.rootTag["Height"].value
assert self.rootTag["Blocks"].value.size == w * l * h
self._Blocks = self.rootTag["Blocks"].value.astype('uint16').reshape(h, l, w) # _Blocks is y, z, x
del self.rootTag["Blocks"]
if "AddBlocks" in self.rootTag:
# Use WorldEdit's "AddBlocks" array to load and store the 4 high bits of a block ID.
# Unlike Minecraft's NibbleArrays, this array stores the first block's bits in the
# 4 high bits of the first byte.
size = (h * l * w)
# If odd, add one to the size to make sure the adjacent slices line up.
add = numpy.empty(size + (size & 1), 'uint16')
# Fill the even bytes with data
add[::2] = self.rootTag["AddBlocks"].value
# Copy the low 4 bits to the odd bytes
add[1::2] = add[::2] & 0xf
# Shift the even bytes down
add[::2] >>= 4
# Shift every byte up before merging it with Blocks
add <<= 8
self._Blocks |= add[:size].reshape(h, l, w)
del self.rootTag["AddBlocks"]
self.rootTag["Data"].value = self.rootTag["Data"].value.reshape(h, l, w)
if "Biomes" in self.rootTag:
self.rootTag["Biomes"].value.shape = (l, w)
# If BlockIDs is present, it contains an ID->internalName mapping
# from the source level's FML tag.
if "BlockIDs" in self.rootTag:
self.blocktypes.addBlockIDsFromSchematicTag(self.rootTag["BlockIDs"])
# If itemStackVersion is present, it was exported from MCEdit 2.0.
# Its value is either 17 or 18, the values of the version constants.
# ItemIDs will also be present.
# If itemStackVersion is not present, this schematic was exported from
# WorldEdit or MCEdit 1.0. The itemStackVersion cannot be determined
# without searching the entities for an itemStack and checking
# the type of its `id` tag. If no itemStacks are found, the
# version defaults to 1.8 which does not need an ItemIDs tag.
if "itemStackVersion" in self.rootTag:
itemStackVersion = self.rootTag["itemStackVersion"].value
if itemStackVersion not in (VERSION_1_7, VERSION_1_8):
raise LevelFormatError("Unknown item stack version %d" % itemStackVersion)
if itemStackVersion == VERSION_1_7:
itemIDs = self.rootTag.get("ItemIDs")
if itemIDs is not None:
self.blocktypes.addItemIDsFromSchematicTag(itemIDs)
self.blocktypes.itemStackVersion = itemStackVersion
else:
self.blocktypes.itemStackVersion = self.getItemStackVersionFromEntities()
else:
rootTag = nbt.TAG_Compound(name="Schematic")
rootTag["Height"] = nbt.TAG_Short(shape[1])
rootTag["Length"] = nbt.TAG_Short(shape[2])
rootTag["Width"] = nbt.TAG_Short(shape[0])
rootTag["Entities"] = nbt.TAG_List()
rootTag["TileEntities"] = nbt.TAG_List()
rootTag["Materials"] = nbt.TAG_String(self.blocktypes.name)
rootTag["itemStackVersion"] = nbt.TAG_Byte(self.blocktypes.itemStackVersion)
self._Blocks = zeros((shape[1], shape[2], shape[0]), 'uint16')
rootTag["Data"] = nbt.TAG_Byte_Array(zeros((shape[1], shape[2], shape[0]), uint8))
rootTag["Biomes"] = nbt.TAG_Byte_Array(zeros((shape[2], shape[0]), uint8))
self.rootTag = rootTag
self.rootTag["BlockIDs"] = blockIDMapping(self.blocktypes)
itemMapping = itemIDMapping(self.blocktypes)
if itemMapping is not None:
self.rootTag["ItemIDs"] = itemMapping # Only present for Forge 1.7
# Expand blocks and data to chunk edges
h16 = (self.Height + 15) & ~0xf
l16 = (self.Length + 15) & ~0xf
w16 = (self.Width + 15) & ~0xf
blocks = self._Blocks
self._Blocks = numpy.zeros((h16, l16, w16), blocks.dtype)
self._Blocks[:blocks.shape[0], :blocks.shape[1], :blocks.shape[2]] = blocks
data = self.rootTag["Data"].value
self.rootTag["Data"].value = numpy.zeros((h16, l16, w16), data.dtype)
self.rootTag["Data"].value[:data.shape[0], :data.shape[1], :data.shape[2]] = data
self.rootTag["Data"].value &= 0xF # discard high bits
self.entitiesByChunk = defaultdict(list)
for tag in self.rootTag["Entities"]:
ref = self.EntityRef(tag)
pos = ref.Position
cx, cy, cz = pos.chunkPos()
self.entitiesByChunk[cx, cz].append(tag)
self.tileEntitiesByChunk = defaultdict(list)
for tag in self.rootTag["TileEntities"]:
ref = self.TileEntityRef(tag)
pos = ref.Position
cx, cy, cz = pos.chunkPos()
self.tileEntitiesByChunk[cx, cz].append(tag)
def getItemStackVersionFromEntities(self):
for listTag in self.rootTag["Entities"], self.rootTag["TileEntities"]:
for name, tag, path in nbt.walk(listTag):
if ItemRef.tagIsItem(tag):
if tag["id"].tagID == nbt.ID_STRING:
return VERSION_1_8
if tag["id"].tagID == nbt.ID_SHORT:
return VERSION_1_7
# No itemstacks - use version 1.8 since ItemIDs won't need to
# be added to the root tag.
return VERSION_1_8
def fakeEntitiesForChunk(self, cx, cz):
return self.entitiesByChunk[cx, cz], self.tileEntitiesByChunk[cx, cz]
def syncToDisk(self):
"""
Ugh... reimplement this class in a way that uses a RevisionHistory?
"""
pass
def saveChanges(self):
return self.saveToFile(self.filename)
def saveChangesIter(self):
self.saveChanges()
yield 100, 100, "Done"
def saveToFile(self, filename):
""" save to file named filename."""
self.Materials = self.blocktypes.name
self.rootTag["Blocks"] = nbt.TAG_Byte_Array(self._Blocks[:self.Height, :self.Length, :self.Width].astype('uint8'))
self.rootTag["Data"].value = self.rootTag["Data"].value[:self.Height, :self.Length, :self.Width]
add = self._Blocks >> 8
if add.any():
add = add[:self.Height, :self.Length, :self.Width]
# WorldEdit AddBlocks compatibility.
# The first 4-bit value is stored in the high bits of the first byte.
# Increase odd size by one to align slices.
packed_add = zeros(add.size + (add.size & 1), 'uint8')
packed_add[:add.size] = add.ravel()
# Shift even bytes to the left
packed_add[::2] <<= 4
# Merge odd bytes into even bytes
packed_add[::2] |= packed_add[1::2]
# Save only the even bytes, now that they contain the odd bytes in their lower bits.
packed_add = packed_add[0::2]
self.rootTag["AddBlocks"] = nbt.TAG_Byte_Array(packed_add)
entities = []
for e in self.entitiesByChunk.values():
entities.extend(e)
tileEntities = []
for te in self.tileEntitiesByChunk.values():
tileEntities.extend(te)
self.rootTag["Entities"] = nbt.TAG_List(entities)
self.rootTag["TileEntities"] = nbt.TAG_List(tileEntities)
log.info("Saving schematic %s with %d blocks, %d Entities and %d TileEntities",
os.path.basename(filename),
self.rootTag["Blocks"].value.size,
len(self.rootTag["Entities"]),
len(self.rootTag["TileEntities"]),
)
with open(filename, 'wb') as chunkfh:
self.rootTag.save(chunkfh)
del self.rootTag["Blocks"]
self.rootTag.pop("AddBlocks", None)
def __repr__(self):
return u"SchematicFileAdapter(shape={0}, blocktypes={2}, filename=\"{1}\")".format(self.size, self.filename or u"", self.Materials)
minHeight = 0
def getDimensionBounds(self, dimName=""):
return BoundingBox((0, 0, 0), (self.Width, self.Height, self.Length))
@property
def maxHeight(self):
return self.Height
@property
def Length(self):
return self.rootTag["Length"].value
@property
def Width(self):
return self.rootTag["Width"].value
@property
def Height(self):
return self.rootTag["Height"].value
@property
def Blocks(self):
return swapaxes(self._Blocks, 0, 2)
@property
def Data(self):
return swapaxes(self.rootTag["Data"].value, 0, 2)
@property
def Materials(self):
return self.rootTag["Materials"].value
@Materials.setter
def Materials(self, val):
if "Materials" not in self.rootTag:
self.rootTag["Materials"] = nbt.TAG_String()
self.rootTag["Materials"].value = val
@property
def Biomes(self):
return swapaxes(self.rootTag["Biomes"].value, 0, 1)
def getPlayer(self, *a, **kw):
raise PlayerNotFound
def playerNames(self):
return ()
@classmethod
def _isTagLevel(cls, rootTag):
return "Schematic" == rootTag.name
def _update_shape(self):
rootTag = self.rootTag
shape = self.Blocks.shape
rootTag["Height"] = nbt.TAG_Short(shape[2])
rootTag["Length"] = nbt.TAG_Short(shape[1])
rootTag["Width"] = nbt.TAG_Short(shape[0])
#
# def rotateLeft(self):
# """
# Rotate the schematic to the left (when looking down).
#
# Transform this schematic in place by rotating 90 degrees counterclockwise around the vertical axis.
#
# By default, rotateLeft and the other transformation functions use swapaxes
# and reversed slice to modify the indexing properties of self.Blocks without copying any data.
# """
#
# self._fakeEntities = None
# self._Blocks = | |
def trigger(a, b, c):
func(a, b, c, True)
func(a, b, c, False)
trigger.get_concrete_function()
root = tracking.AutoTrackable()
root.f = func
root = cycle(root, cycles)
self.assertAllEqual(root.f(), [1.0, 2.0, 3.0, True])
self.assertAllEqual(root.f(-1.0, training=False), [3.0, 2.0, -1.0, False])
with self.assertRaisesRegex(ValueError, "Could not find matching function"):
root.f(["hello", 1.0])
def test_prefer_specific_trace(self, cycles):
@def_function.function(autograph=False)
def func(a):
if isinstance(a, int):
return a
else:
return a + 1
self.assertAllEqual(2, func(2).numpy())
self.assertAllEqual(3, func(constant_op.constant(2)).numpy())
root = tracking.AutoTrackable()
root.f = func
root = cycle(root, cycles)
self.assertAllEqual(2, root.f(2).numpy())
self.assertAllEqual(4, root.f(3).numpy())
self.assertAllEqual(3, root.f(constant_op.constant(2)).numpy())
self.assertAllEqual(4, root.f(constant_op.constant(3)).numpy())
def test_partial(self, cycles):
def f(x, y):
return x + y
func = def_function.function(
functools.partial(f, x=array_ops.zeros([1]), y=array_ops.ones([1])))
root = tracking.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(), [1.0])
root = cycle(root, cycles)
self.assertAllEqual(root.f(), [1.0])
def test_partial_with_non_tensor_defaults(self, cycles):
def f(x, y=3):
return x + y
func = def_function.function(functools.partial(f, y=5))
root = tracking.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(1), 6)
root = cycle(root, cycles)
self.assertAllEqual(root.f(1), 6)
def test_partial_with_positional(self, cycles):
def f(x, y):
return x + y
func = def_function.function(functools.partial(f, constant_op.constant(5)))
root = tracking.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(1), 6)
root = cycle(root, cycles)
self.assertAllEqual(root.f(1), 6)
def test_partial_with_positional_captured_tensors(self, cycles):
def f(x, y):
return x + y
tensor = constant_op.constant(5) + constant_op.constant(7)
func = def_function.function(functools.partial(f, tensor))
root = tracking.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(1), 13)
root = cycle(root, cycles)
self.assertAllEqual(root.f(1), 13)
def test_partial_keyword_hiding_default(self, cycles):
def f(x=3, training=True, y=7):
if training:
return x + y
else:
return x + y + 2
func = def_function.function(functools.partial(f, y=6))
root = tracking.AutoTrackable()
root.f = func
self.assertEqual(root.f().numpy(), 9)
self.assertEqual(root.f(training=False).numpy(), 11)
root = cycle(root, cycles)
self.assertEqual(root.f().numpy(), 9)
self.assertEqual(root.f(training=False).numpy(), 11)
def test_partial_with_kwargs(self, cycles):
def f(a, b, *args, **kwargs):
args_sum = sum(args)
return a + b + kwargs["some_tensor"] * kwargs["learning_rate"] + args_sum
constant_tensor = constant_op.constant(10)
func = def_function.function(
functools.partial(
f, 7, 1, 2, learning_rate=3, some_tensor=constant_tensor))
root = tracking.AutoTrackable()
root.f = func
self.assertEqual(root.f(constant_op.constant(4)).numpy(), 44)
root = cycle(root, cycles)
self.assertEqual(root.f(constant_op.constant(5)).numpy(), 45)
def test_partial_bind_only_first_argument(self, cycles):
if sys.version_info[0] < 3:
self.skipTest("Test is only valid in python3. Only then we get some more "
"advanced inspection of partials where this is allowed.")
def f(x, y):
return x + y
partial_func = functools.partial(f, x=5)
tf_func = def_function.function(partial_func)
root = tracking.AutoTrackable()
root.f = tf_func
self.assertAllEqual(root.f(y=constant_op.constant(7)), 12)
root = cycle(root, cycles)
self.assertAllEqual(root.f(y=constant_op.constant(9)), 14)
def test_partial_with_passed_fn_as_default(self, cycles):
def f(x, y):
return x(3) + y
def my_func(a):
return 2 * a
func = def_function.function(functools.partial(f, my_func))
root = tracking.AutoTrackable()
root.f = func
self.assertEqual(root.f(constant_op.constant(3)).numpy(), 9)
root = cycle(root, cycles)
self.assertEqual(root.f(constant_op.constant(3)).numpy(), 9)
def test_partial_with_input_signature(self, cycles):
def full_function(a, b, c=3.0):
return a, b, c
partial = functools.partial(full_function, 1, c=4)
self.assertAllEqual((1, 2.0, 4), partial(2.0))
signature = [tensor_spec.TensorSpec([], dtypes.float32)]
func = def_function.function(partial, input_signature=signature)
root = tracking.AutoTrackable()
root.f = func
a, b, c = root.f(2.0)
self.assertAllEqual([a.numpy(), b.numpy(), c.numpy()], (1, 2.0, 4))
root = cycle(root, cycles)
a, b, c = root.f(3.0)
self.assertAllEqual([a.numpy(), b.numpy(), c.numpy()], (1, 3.0, 4))
def test_convert_to_input_signature(self, cycles):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)])
def func(x):
return x
root = tracking.AutoTrackable()
root.f = func
root = cycle(root, cycles)
self.assertEqual([2], root.f([2]).numpy())
def test_named_tuple(self, cycles):
class NamedTupleType(collections.namedtuple("NamedTupleType", ["a", "b"])):
pass
@def_function.function
def f(x):
return x.a + x.b
f.get_concrete_function(
NamedTupleType(
a=tensor_spec.TensorSpec(None, dtypes.float32, name="a"),
b=tensor_spec.TensorSpec(None, dtypes.float32, name="b")))
obj = tracking.AutoTrackable()
obj.__call__ = f
if sys.version_info.major == 3 and sys.version_info.minor < 5:
# TODO(allenl): figure out why this doesn't work in Python3.4
self.skipTest("Not working in Python 3.4")
imported = cycle(obj, cycles)
self.assertAllClose(3.,
imported(NamedTupleType(a=constant_op.constant(1.),
b=constant_op.constant(2.))))
def test_extra_args(self, cycles):
@def_function.function
def f(x):
return math_ops.add(x["a"], 1.)
# Trigger a trace.
f({"a": constant_op.constant(2.0)})
obj = tracking.AutoTrackable()
obj.__call__ = f
imported = cycle(obj, cycles)
self.assertEqual(4.0, imported({"a": 3.0}).numpy())
with self.assertRaisesRegex(ValueError,
"Could not find matching function to call"):
imported({"a": 2.0, "b": 3.0})
def test_shapes_available(self, cycles):
@def_function.function(input_signature=[
tensor_spec.TensorSpec([None, 3], dtypes.int32),
tensor_spec.TensorSpec([None, 2], dtypes.int32)
])
def func(x, y):
return array_ops.concat([x, y], axis=1)
root = tracking.AutoTrackable()
root.f = func
root = cycle(root, cycles)
imported_graph = root.f.get_concrete_function().graph
input_x, input_y = imported_graph.inputs
self.assertEqual([None, 3], input_x.shape.as_list())
self.assertEqual([None, 2], input_y.shape.as_list())
output, = imported_graph.outputs
self.assertEqual([None, 5], output.shape.as_list())
signature = root.signatures["serving_default"]
self.assertEqual(
[None, 3], signature.inputs[0].shape.as_list())
self.assertEqual(
[None, 2], signature.inputs[1].shape.as_list())
self.assertEqual(
[None, 5], signature.outputs[0].shape.as_list())
def test_variables_destroyed(self, cycles):
v1 = variables.Variable(1.)
weak_v1 = weakref.ref(v1)
root = util.Checkpoint(v=v1)
root = cycle(root, cycles)
del v1
self.assertIsNone(weak_v1())
weak_v2 = weakref.ref(root.v)
del root
self.assertIsNone(weak_v2())
def test_variable_attributes_preserved(self, cycles):
v = variables.Variable(
1.,
trainable=False,
synchronization=variables.VariableSynchronization.NONE,
aggregation=variables.VariableAggregation.ONLY_FIRST_REPLICA)
self.assertEqual(variables.VariableSynchronization.NONE,
v.synchronization)
self.assertEqual(variables.VariableAggregation.ONLY_FIRST_REPLICA,
v.aggregation)
root = tracking.AutoTrackable()
root.v = v
root = cycle(root, cycles)
self.assertEqual(False, root.v.trainable)
self.assertEqual(variables.VariableSynchronization.NONE,
root.v.synchronization)
self.assertEqual(variables.VariableAggregation.ONLY_FIRST_REPLICA,
root.v.aggregation)
def test_captured_dataset(self, cycles):
class HasDataset(module.Module):
def __init__(self):
super(HasDataset, self).__init__()
self.dataset = (
dataset_ops.Dataset.range(5)
.map(lambda x: x ** 2))
@def_function.function
def __call__(self, x):
current_sum = array_ops.zeros([], dtype=dtypes.int64)
for element in self.dataset:
current_sum += x * element
return current_sum
root = HasDataset()
self.assertEqual(
3 * (1 + 4 + 9 + 16),
root(constant_op.constant(3, dtype=dtypes.int64)).numpy())
root = cycle(root, cycles)
self.assertEqual(
3 * (1 + 4 + 9 + 16),
root(constant_op.constant(3, dtype=dtypes.int64)).numpy())
def test_tuple_signature(self, cycles):
root = util.Checkpoint()
root.f = def_function.function(
lambda: (array_ops.ones([]), array_ops.zeros([])),
input_signature=())
root = cycle(root, cycles, signatures=root.f)
self.assertEqual(({"output_0": 1., "output_1": 0.}),
self.evaluate(root.signatures["serving_default"]()))
def test_version_info(self, cycles):
root = util.Checkpoint()
root = cycle(root, cycles)
self.assertEqual(versions.__version__, root.tensorflow_version)
self.assertEqual(versions.__git_version__, root.tensorflow_git_version)
def test_load_grad_save(self, cycles):
root = util.Checkpoint()
root.v = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v * x)
root.g = def_function.function(root.f)
for _ in range(cycles):
with backprop.GradientTape() as tape:
inp = constant_op.constant(2.)
tape.watch(inp)
output = root.g(inp)
self.assertAllClose(4., output)
self.assertAllClose(2., tape.gradient(output, inp))
root = cycle(root, 1)
def test_destroy_resource(self, cycles):
def get_handle():
return resource_variable_ops.var_handle_op(
shape=tensor_shape.as_shape([]),
dtype=dtypes.float32,
shared_name="my_var_name",
name="my_var",
container="my_container")
class MyResourceDeleter(tracking.CapturableResourceDeleter):
def destroy_resource(self):
handle = get_handle()
resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True)
class MyResource(tracking.TrackableResource):
def __init__(self):
# Set the resource deleter, so when the resource object goes out of
# scope it will be deleted automatically.
super(MyResource, self).__init__(deleter=MyResourceDeleter())
def _create_resource(self):
return get_handle()
def _initialize(self):
resource_variable_ops.assign_variable_op(
self.resource_handle, 1.0, name="assign")
class MyModel(tracking.AutoTrackable):
def __init__(self):
super(MyModel, self).__init__()
self.resource = MyResource()
@def_function.function(input_signature=[])
def increase(self):
handle = self.resource.resource_handle
resource_variable_ops.assign_add_variable_op(
handle, 10.0, name="assign_add")
return resource_variable_ops.read_variable_op(handle, dtypes.float32)
root = MyModel()
imported = cycle(root, cycles)
self.assertEqual(11, imported.increase().numpy()) # Create the resource.
handle = imported.resource.resource_handle
# Delete the imported SaveModel. Since we explicitly set the deleter, it
# should destroy the resource automatically.
del imported
# Try to destroy the resource again, should fail.
with self.assertRaisesRegex(errors.NotFoundError,
r"Resource .* does not exist."):
resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=False)
def test_function_called_as_operation(self, cycles):
@framework_function.Defun(dtypes.float32)
def inner(x):
return x + 1.
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.float32)])
def outer(x):
return inner(x)
root = module.Module()
root.f = outer
imported = cycle(root, cycles)
self.assertAllClose(2., imported.f(constant_op.constant(1.)))
def test_ragged(self, cycles):
@def_function.function
def f(x, c=1):
"""Returns Tensor x incremented by Python constant c."""
return math_ops.add(x, c)
for c in (1, 2, 3):
_ = f.get_concrete_function(
ragged_tensor.RaggedTensorSpec([None, None], dtype=dtypes.int32),
c)
obj = tracking.AutoTrackable()
obj.f = f
imported1 = cycle(obj, cycles, signatures={})
rt = ragged_factory_ops.constant([[1, 2], [3]])
self.assertAllEqual(imported1.f(rt), [[2, 3], [4]])
self.assertAllEqual(imported1.f(rt, 2), [[3, 4], [5]])
self.assertAllEqual(imported1.f(rt, 3), [[4, 5], [6]])
imported2 = cycle(obj, cycles)
rt = ragged_factory_ops.constant([[1, 2], [3]])
self.assertAllEqual(imported2.f(rt, 1), [[2, 3], [4]])
self.assertAllEqual(imported2.f(rt, 2), [[3, 4], [5]])
self.assertAllEqual(imported2.f(rt, 3), [[4, 5], [6]])
def test_accepts_io_device(self, cycles):
options = load_options.LoadOptions()
self.assertIsNone(options.experimental_io_device)
options = load_options.LoadOptions(experimental_io_device="/job:localhost")
self.assertEqual("/job:localhost", options.experimental_io_device)
def test_load_custom_saveable_object(self, cycles):
root = tracking.AutoTrackable()
root.table = lookup_ops.MutableHashTable(dtypes.string, dtypes.float32, -1)
root.table.insert("foo", 15)
root.table2 = lookup_ops.MutableHashTable(dtypes.string, dtypes.float32, -1)
root.table2.insert("idk", 21)
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.string)])
def lookup(key):
return root.table.lookup(key)
root.lookup = lookup
imported = cycle(root, cycles)
self.assertEqual(self.evaluate(imported.lookup("foo")), 15)
self.assertEqual(self.evaluate(imported.lookup("idk")), -1)
def test_saving_ndarray_specs(self, cycles):
class NdarrayModule(module.Module):
@def_function.function
def plain(self, x):
return tnp.add(x, 1)
@def_function.function(input_signature=[
np_arrays.NdarraySpec(tensor_spec.TensorSpec([], dtypes.float32))])
def with_signature(self, x):
return tnp.add(x, 1)
m = NdarrayModule()
c = tnp.asarray(3.0, tnp.float32)
output_plain, output_with_signature = m.plain(c), m.with_signature(c)
loaded_m = cycle(m, cycles)
load_output_plain, load_output_with_signature = (
loaded_m.plain(c), loaded_m.with_signature(c))
self.assertIsInstance(output_plain, tnp.ndarray)
self.assertIsInstance(load_output_plain, tnp.ndarray)
self.assertIsInstance(output_with_signature, tnp.ndarray)
self.assertIsInstance(load_output_with_signature, tnp.ndarray)
self.assertAllClose(output_plain, load_output_plain)
self.assertAllClose(output_with_signature, load_output_with_signature)
class SingleCycleTests(test.TestCase, parameterized.TestCase):
def test_load_with_tags(self):
root = tracking.AutoTrackable()
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
with self.assertRaises(ValueError):
load.load(path, tags=[tag_constants.EVAL])
load.load(path, tags=[tag_constants.SERVING])
load.load(path, tags=tag_constants.SERVING)
load.load(path, tags=set([tag_constants.SERVING]))
def test_single_restore_op_used(self):
root = module.Module()
root.v1 = variables.Variable(1.)
root.v2 = variables.Variable(2.)
root.v3 = variables.Variable(3.)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
restore_count = 0
def _count_restores(op_type, *unused_args, **unused_kwargs):
nonlocal restore_count
if op_type == b"RestoreV2":
restore_count += 1
op_callbacks.add_op_callback(_count_restores)
load.load(path)
op_callbacks.remove_op_callback(_count_restores)
self.assertEqual(1, restore_count)
def test_docstring_examples(self):
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
exported = util.Checkpoint(v=variables.Variable(3.))
exported.f = def_function.function(
lambda x: exported.v * x,
input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32)])
save.save(exported, path)
imported = load.load(path)
self.assertEqual(3., imported.v.numpy())
self.assertEqual(6., imported.f(x=constant_op.constant(2.)).numpy())
save.save(exported, path, exported.f.get_concrete_function())
imported = load.load(path)
f = imported.signatures["serving_default"]
| |
func_f44f33e103f543419a1dcb8c9bab2103(r, p, s, q, N):
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
return C
def func_51b005fbf8e74403aa64e1e560f4a121(r, p, s, q, N):
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
return D
def func_d80b5445bcb8423ea115253bdafb1a49(D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
return S
def func_c5c1cd9252a44a0fb2eb6bbc6cd0f635(D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
return ans
def func_8b0b1f540a134849aaf84f4e85719f85(D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
return A
def func_4e279cb869f14253b9cbe076a362d83a(D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
return C
def func_f40ec97aa3644f42a9c2594f5f938438(D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
return B
def func_0f932a0c32b9485c925e60dbac99e6f5(D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
return a
def func_eae41f32b71340c3b66c7a6a5d285881(S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
return a
def func_ca7a426aa9444f3abd5c4abf82b5acf1(S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
return B
def func_8de30364d1d748998bad9c5638546b12(S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
return C
def func_904c3271d17c4de9b2e444b2bbf608b3(S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
return b
def func_6b8a5929a0a4401f949aefc5f6257b5e(S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
return A
def func_2396a2baecd84eea8232d5140016d6bf(S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
return ans
def func_fb33cb9fed75486f8dec617b2a1f691e(N, D, S):
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return t
def func_09d5055fdeb94fa8bad4f7d371486581(N, D, S):
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return ans
def func_455be6a458934a9ebbac6910cf5ce21f(N, D, S):
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return b
def func_7dd36c38fa874062b025eaaaf8a79a59(N, D, S):
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return C
def func_f698eab025f14ab48a0ee6d93383b33e(N, D, S):
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return B
def func_961ae086e67b4957bf9fb8ce7e679ae7(N, D, S):
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return p
def func_76c99b831d36499ca897111baa5be2a9(N, D, S):
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return a
def func_314645dcd1514998a4b6ec54aa9d6e56(N, D, S):
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return A
def func_c484d27f1e4a4aa9a72bfb03a0c515b9(N, D, S):
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return p
def func_3319764df52e4a2981112f4e91efb612(N, D, S):
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return t
def func_2b4f3fd635024a038365a3f10b460a1a(N, D, S):
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return a
def func_d5d3a78783db4755ade1e3476cd397a2(N, D, S):
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return C
def func_f2bf762e911444538048ea0e2fb32d39(N, D, S):
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A | |
import cv2
from PIL import Image, ImageEnhance, ImageFilter
#from skimage.util import random_noise
import mmcv
import numpy as np
from numpy import random
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from ..registry import PIPELINES
try:
from imagecorruptions import corrupt
except ImportError:
corrupt = None
try:
import albumentations
from albumentations import Compose
except ImportError:
albumentations = None
Compose = None
@PIPELINES.register_module
class CQ_Resize(object):
"""Resize images & bbox & mask.
This transform resizes the input image to some scale. Bboxes and masks are
then resized with the same scale factor. If the input dict contains the key
"scale", then the scale in the input dict is used, otherwise the specified
scale in the init method is used.
`img_scale` can either be a tuple (single-scale) or a list of tuple
(multi-scale). There are 3 multiscale modes:
- `ratio_range` is not None: randomly sample a ratio from the ratio range
and multiply it with the image scale.
- `ratio_range` is None and `multiscale_mode` == "range": randomly sample a
scale from the a range.
- `ratio_range` is None and `multiscale_mode` == "value": randomly sample a
scale from multiple scales.
Args:
img_scale (tuple or list[tuple]): Images scales for resizing.
multiscale_mode (str): Either "range" or "value".
ratio_range (tuple[float]): (min_ratio, max_ratio)
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
image.
"""
def __init__(self,
img_scale_pg=None,
img_scale_ps=None,
multiscale_mode='range',
ratio_range=None,
keep_ratio=True):
if img_scale_pg is None:
self.img_scale_pg = None
else:
if isinstance(img_scale_pg, list):
self.img_scale_pg = img_scale_pg
else:
self.img_scale_pg = [img_scale_pg]
assert mmcv.is_list_of(self.img_scale_pg, tuple)
if img_scale_ps is None:
self.img_scale_ps = None
else:
if isinstance(img_scale_ps, list):
self.img_scale_ps = img_scale_ps
else:
self.img_scale_ps = [img_scale_ps]
assert mmcv.is_list_of(self.img_scale_ps, tuple)
if ratio_range is not None:
# mode 1: given a scale and a range of image ratio
assert len(self.img_scale) == 1
else:
# mode 2: given multiple scales or a range of scales
assert multiscale_mode in ['value', 'range']
self.multiscale_mode = multiscale_mode
self.ratio_range = ratio_range
self.keep_ratio = keep_ratio
@staticmethod
def random_select(img_scales):
assert mmcv.is_list_of(img_scales, tuple)
scale_idx = np.random.randint(len(img_scales))
img_scale = img_scales[scale_idx]
return img_scale, scale_idx
@staticmethod
def random_sample(img_scales):
assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
img_scale_long = [max(s) for s in img_scales]
img_scale_short = [min(s) for s in img_scales]
long_edge = np.random.randint(
min(img_scale_long),
max(img_scale_long) + 1)
short_edge = np.random.randint(
min(img_scale_short),
max(img_scale_short) + 1)
img_scale = (long_edge, short_edge)
return img_scale, None
@staticmethod
def random_sample_ratio(img_scale, ratio_range):
assert isinstance(img_scale, tuple) and len(img_scale) == 2
min_ratio, max_ratio = ratio_range
assert min_ratio <= max_ratio
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
return scale, None
def _random_scale(self, results):
if results['ori_shape'][1] < 1000:
self.img_scale = self.img_scale_pg
else:
self.img_scale = self.img_scale_ps
if self.ratio_range is not None:
scale, scale_idx = self.random_sample_ratio(
self.img_scale[0], self.ratio_range)
elif len(self.img_scale) == 1:
scale, scale_idx = self.img_scale[0], 0
elif self.multiscale_mode == 'range':
scale, scale_idx = self.random_sample(self.img_scale)
elif self.multiscale_mode == 'value':
scale, scale_idx = self.random_select(self.img_scale)
else:
raise NotImplementedError
results['scale'] = scale
results['scale_idx'] = scale_idx
def _resize_img(self, results):
if self.keep_ratio:
img, scale_factor = mmcv.imrescale(
results['img'], results['scale'], return_scale=True)
else:
img, w_scale, h_scale = mmcv.imresize(
results['img'], results['scale'], return_scale=True)
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
dtype=np.float32)
results['img'] = img
results['img_shape'] = img.shape
results['pad_shape'] = img.shape # in case that there is no padding
results['scale_factor'] = scale_factor
results['keep_ratio'] = self.keep_ratio
def _resize_bboxes(self, results):
img_shape = results['img_shape']
for key in results.get('bbox_fields', []):
bboxes = results[key] * results['scale_factor']
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1] - 1)
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0] - 1)
results[key] = bboxes
def _resize_masks(self, results):
for key in results.get('mask_fields', []):
if results[key] is None:
continue
if self.keep_ratio:
masks = [
mmcv.imrescale(
mask, results['scale_factor'], interpolation='nearest')
for mask in results[key]
]
else:
mask_size = (results['img_shape'][1], results['img_shape'][0])
masks = [
mmcv.imresize(mask, mask_size, interpolation='nearest')
for mask in results[key]
]
results[key] = np.stack(masks)
def _resize_seg(self, results):
for key in results.get('seg_fields', []):
if self.keep_ratio:
gt_seg = mmcv.imrescale(
results[key], results['scale'], interpolation='nearest')
else:
gt_seg = mmcv.imresize(
results[key], results['scale'], interpolation='nearest')
results['gt_semantic_seg'] = gt_seg
def __call__(self, results):
if 'scale' not in results:
self._random_scale(results)
self._resize_img(results)
self._resize_bboxes(results)
self._resize_masks(results)
self._resize_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += ('(img_scale={}, multiscale_mode={}, ratio_range={}, '
'keep_ratio={})').format(self.img_scale,
self.multiscale_mode,
self.ratio_range,
self.keep_ratio)
return repr_str
@PIPELINES.register_module
class MinIoFRandomCrop(object):
"""Random crop the image & bboxes, the cropped patches have minimum IoU
requirement with original image & bboxes, the IoU threshold is randomly
selected from min_ious.
Args:
min_ious (tuple): minimum IoU threshold
crop_size (tuple): Expected size after cropping, (h, w).
"""
def __init__(self, min_iofs=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3):
# 1: return ori img
self.sample_mode = (1, *min_iofs, 0)
self.min_crop_size = min_crop_size
def __call__(self, results):
img, boxes, labels = [
results[k] for k in ('img', 'gt_bboxes', 'gt_labels')
]
h, w, c = img.shape
while True:
mode = random.choice(self.sample_mode)
if mode == 1:
return results
min_iou = mode
for i in range(50):
new_w = random.uniform(self.min_crop_size * w, w)
new_h = random.uniform(self.min_crop_size * h, h)
# h / w in [0.5, 2]
if new_h / new_w < 0.5 or new_h / new_w > 2:
continue
left = random.uniform(w - new_w)
top = random.uniform(h - new_h)
patch = np.array(
(int(left), int(top), int(left + new_w), int(top + new_h)))
overlaps = bbox_overlaps(
patch.reshape(-1, 4), boxes.reshape(-1, 4), mode='iof_crop').reshape(-1)
if overlaps.max() > min_iou:
continue
# center of boxes should inside the crop img
center = (boxes[:, :2] + boxes[:, 2:]) / 2
mask = (center[:, 0] > patch[0]) * (
center[:, 1] > patch[1]) * (center[:, 0] < patch[2]) * (
center[:, 1] < patch[3])
if not mask.any():
continue
boxes = boxes[mask]
labels = labels[mask]
# adjust boxes
img = img[patch[1]:patch[3], patch[0]:patch[2]]
boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])
boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])
boxes -= np.tile(patch[:2], 2)
results['img'] = img
results['gt_bboxes'] = boxes
results['gt_labels'] = labels
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(min_ious={}, min_crop_size={})'.format(
self.min_ious, self.min_crop_size)
return repr_str
@PIPELINES.register_module
class RandomVerticalFlip(object):
"""Vertical Flip the image & bbox.
If the input dict contains the key "flip", then the flag will be used,
otherwise it will be randomly decided by a ratio specified in the init
method.
Args:
flip_ratio (float, optional): The flipping probability.
"""
def __init__(self, flip_ratio=None):
self.flip_ratio = flip_ratio
if flip_ratio is not None:
assert flip_ratio >= 0 and flip_ratio <= 1
def bbox_flip(self, bboxes, img_shape):
"""Flip bboxes vertically.
Args:
bboxes(ndarray): shape (..., 4*k)
img_shape(tuple): (height, width)
"""
assert bboxes.shape[-1] % 4 == 0
h = img_shape[0]
flipped = bboxes.copy()
flipped[..., 1::4] = h - bboxes[..., 3::4] - 1
flipped[..., 3::4] = h - bboxes[..., 1::4] - 1
return flipped
def __call__(self, results):
if 'flip' not in results:
flip = True if np.random.rand() < self.flip_ratio else False
results['flip'] = flip
if results['flip']:
# flip image
results['img'] = mmcv.imflip(results['img'], direction='vertical')
# flip bboxes
for key in results.get('bbox_fields', []):
results[key] = self.bbox_flip(results[key],
results['img_shape'])
return results
def __repr__(self):
return self.__class__.__name__ + '(flip_ratio={})'.format(
self.flip_ratio)
@PIPELINES.register_module
class BBoxJitter(object):
"""
bbox jitter
Args:
min (int, optional): min scale
max (int, optional): max scale
## origin w scale
"""
def __init__(self, min=0, max=2):
self.min_scale = min
self.max_scale = max
self.count = 0
def bbox_jitter(self, bboxes, img_shape):
"""
Args:
bboxes(ndarray): shape (..., 4*k)
img_shape(tuple): (height, width)
"""
assert bboxes.shape[-1] % 4 == 0
if len(bboxes) == 0:
return bboxes
jitter_bboxes = []
for box in bboxes:
w = box[2] - box[0]
h = box[3] - box[1]
center_x = (box[0] + box[2]) / 2
center_y = (box[1] + box[3]) / 2
scale = np.random.uniform(self.min_scale, self.max_scale)
w = w * scale / 2.
h = h * scale / 2.
xmin = center_x - w
ymin = center_y - h
xmax = center_x + w
ymax = center_y + h
box2 = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
jitter_bboxes.append(box2)
jitter_bboxes = np.array(jitter_bboxes, dtype=np.float32)
jitter_bboxes[:, 0::2] = np.clip(jitter_bboxes[:, 0::2], 0, img_shape[1] - 1)
jitter_bboxes[:, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b] = np.clip(jitter_bboxes[:, 1::2], 0, img_shape[0] - 1)
return jitter_bboxes
def __call__(self, results):
for key in results.get('bbox_fields', []):
results[key] = self.bbox_jitter(results[key],
results['img_shape'])
return results
def __repr__(self):
return self.__class__.__name__ + '(bbox_jitter={}-{})'.format(
self.min_scale, self.max_scale)
@PIPELINES.register_module
class RandomRotate(object):
"""Rotate the image & bbox.
If the input dict contains the key "rotate", then the flag will be used,
otherwise it will be randomly decided by a ratio | |
# Copyright 2012 <NAME>
# Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
import tempfile
from cinder import db
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import test
from cinder.tests.image import fake as fake_image
from cinder.tests.test_volume import DriverTestCase
from cinder import units
from cinder.volume import configuration as conf
import cinder.volume.drivers.rbd as driver
from cinder.volume.flows.api import create_volume
LOG = logging.getLogger(__name__)
CEPH_MON_DUMP = """dumped monmap epoch 1
{ "epoch": 1,
"fsid": "33630410-6d93-4d66-8e42-3b953cf194aa",
"modified": "2013-05-22 17:44:56.343618",
"created": "2013-05-22 17:44:56.343618",
"mons": [
{ "rank": 0,
"name": "a",
"addr": "[::1]:6789\/0"},
{ "rank": 1,
"name": "b",
"addr": "[::1]:6790\/0"},
{ "rank": 2,
"name": "c",
"addr": "[::1]:6791\/0"},
{ "rank": 3,
"name": "d",
"addr": "127.0.0.1:6792\/0"},
{ "rank": 4,
"name": "e",
"addr": "example.com:6791\/0"}],
"quorum": [
0,
1,
2]}
"""
class TestUtil(test.TestCase):
def test_ascii_str(self):
self.assertIsNone(driver.ascii_str(None))
self.assertEqual('foo', driver.ascii_str('foo'))
self.assertEqual('foo', driver.ascii_str(u'foo'))
self.assertRaises(UnicodeEncodeError,
driver.ascii_str, 'foo' + unichr(300))
class RBDTestCase(test.TestCase):
def setUp(self):
super(RBDTestCase, self).setUp()
self.cfg = mock.Mock(spec=conf.Configuration)
self.cfg.volume_tmp_dir = None
self.cfg.rbd_pool = 'rbd'
self.cfg.rbd_ceph_conf = None
self.cfg.rbd_secret_uuid = None
self.cfg.rbd_user = None
self.cfg.volume_dd_blocksize = '1M'
# set some top level mocks for these common modules and tests can then
# set method/attributes as required.
self.rados = mock.Mock()
self.rbd = mock.Mock()
self.rbd.RBD = mock.Mock
self.rbd.Image = mock.Mock
self.rbd.ImageSnapshot = mock.Mock
mock_exec = mock.Mock()
mock_exec.return_value = ('', '')
self.driver = driver.RBDDriver(execute=mock_exec,
configuration=self.cfg,
rados=self.rados,
rbd=self.rbd)
self.driver.set_initialized()
self.volume_name = u'volume-00000001'
self.snapshot_name = u'snapshot-00000001'
self.volume_size = 1
self.volume = dict(name=self.volume_name, size=self.volume_size)
self.snapshot = dict(volume_name=self.volume_name,
name=self.snapshot_name)
def tearDown(self):
super(RBDTestCase, self).tearDown()
@mock.patch('cinder.volume.drivers.rbd.RADOSClient')
def test_create_volume(self, mock_client):
client = mock_client.return_value
client.__enter__.return_value = client
self.driver._supports_layering = mock.Mock()
self.driver._supports_layering.return_value = True
self.rbd.RBD.create = mock.Mock()
self.driver.create_volume(self.volume)
args = [client.ioctx, str(self.volume_name),
self.volume_size * units.GiB]
kwargs = {'old_format': False,
'features': self.rbd.RBD_FEATURE_LAYERING}
self.rbd.RBD.create.assert_called_once()
client.__enter__.assert_called_once()
client.__exit__.assert_called_once()
self.driver._supports_layering.assert_called_once()
self.rbd.RBD.create.assert_called_once_with(*args, **kwargs)
@mock.patch('cinder.volume.drivers.rbd.RADOSClient')
def test_create_volume_no_layering(self, mock_client):
client = mock_client.return_value
client.__enter__.return_value = client
self.driver._supports_layering = mock.Mock()
self.driver._supports_layering.return_value = False
self.rbd.RBD.create = mock.Mock()
self.driver.create_volume(self.volume)
args = [client.ioctx, str(self.volume_name),
self.volume_size * units.GiB]
kwargs = {'old_format': True,
'features': 0}
self.rbd.RBD.create.assert_called_once()
client.__enter__.assert_called_once()
client.__exit__.assert_called_once()
self.driver._supports_layering.assert_called_once()
self.rbd.RBD.create.assert_called_once_with(*args, **kwargs)
@mock.patch('cinder.volume.drivers.rbd.RADOSClient')
def test_delete_volume(self, mock_client):
client = mock_client.return_value
self.driver.rbd.Image.list_snaps = mock.Mock()
self.driver.rbd.Image.list_snaps.return_value = []
self.driver.rbd.Image.close = mock.Mock()
self.driver.rbd.Image.remove = mock.Mock()
self.driver.rbd.Image.unprotect_snap = mock.Mock()
self.driver._get_clone_info = mock.Mock()
self.driver._get_clone_info.return_value = (None, None, None)
self.driver._delete_backup_snaps = mock.Mock()
self.driver.delete_volume(self.volume)
self.driver._get_clone_info.assert_called_once()
self.driver.rbd.Image.list_snaps.assert_called_once()
client.__enter__.assert_called_once()
client.__exit__.assert_called_once()
self.driver._delete_backup_snaps.assert_called_once()
self.assertFalse(self.driver.rbd.Image.unprotect_snap.called)
self.driver.rbd.RBD.remove.assert_called_once()
@mock.patch('cinder.volume.drivers.rbd.rbd')
def test_delete_volume_not_found(self, mock_rbd):
mock_rbd.RBD = mock.Mock
mock_rbd.ImageNotFound = Exception
mock_rbd.Image.side_effect = mock_rbd.ImageNotFound
self.driver.rbd = mock_rbd
with mock.patch.object(driver, 'RADOSClient'):
self.assertIsNone(self.driver.delete_volume(self.volume))
mock_rbd.Image.assert_called_once()
def test_delete_busy_volume(self):
self.rbd.Image.close = mock.Mock()
self.rbd.Image.list_snaps = mock.Mock()
self.rbd.Image.list_snaps.return_value = []
self.rbd.Image.unprotect_snap = mock.Mock()
self.rbd.ImageBusy = Exception
self.rbd.RBD.remove = mock.Mock()
self.rbd.RBD.remove.side_effect = self.rbd.ImageBusy
self.driver._get_clone_info = mock.Mock()
self.driver._get_clone_info.return_value = (None, None, None)
self.driver._delete_backup_snaps = mock.Mock()
with mock.patch.object(driver, 'RADOSClient') as mock_rados_client:
self.assertRaises(exception.VolumeIsBusy,
self.driver.delete_volume, self.volume)
self.driver._get_clone_info.assert_called_once()
self.rbd.Image.list_snaps.assert_called_once()
mock_rados_client.assert_called_once()
self.driver._delete_backup_snaps.assert_called_once()
self.assertFalse(self.rbd.Image.unprotect_snap.called)
self.rbd.RBD.remove.assert_called_once()
@mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy')
def test_create_snapshot(self, mock_proxy):
proxy = mock_proxy.return_value
proxy.__enter__.return_value = proxy
self.driver.create_snapshot(self.snapshot)
args = [str(self.snapshot_name)]
proxy.create_snap.assert_called_with(*args)
proxy.protect_snap.assert_called_with(*args)
@mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy')
def test_delete_snapshot(self, mock_proxy):
proxy = mock_proxy.return_value
proxy.__enter__.return_value = proxy
self.driver.delete_snapshot(self.snapshot)
args = [str(self.snapshot_name)]
proxy.remove_snap.assert_called_with(*args)
proxy.unprotect_snap.assert_called_with(*args)
def test_get_clone_info(self):
volume = self.rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name))
volume.parent_info.return_value = parent_info
info = self.driver._get_clone_info(volume, self.volume_name)
self.assertEqual(info, parent_info)
self.assertFalse(volume.set_snap.called)
volume.parent_info.assert_called_once()
def test_get_clone_info_w_snap(self):
volume = self.rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name))
volume.parent_info.return_value = parent_info
snapshot = self.rbd.ImageSnapshot()
info = self.driver._get_clone_info(volume, self.volume_name,
snap=snapshot)
self.assertEqual(info, parent_info)
volume.set_snap.assert_called_once()
self.assertEqual(volume.set_snap.call_count, 2)
volume.parent_info.assert_called_once()
def test_get_clone_info_w_exception(self):
self.rbd.ImageNotFound = Exception
volume = self.rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
volume.parent_info.side_effect = self.rbd.ImageNotFound
snapshot = self.rbd.ImageSnapshot()
info = self.driver._get_clone_info(volume, self.volume_name,
snap=snapshot)
self.assertEqual(info, (None, None, None))
volume.set_snap.assert_called_once()
self.assertEqual(volume.set_snap.call_count, 2)
volume.parent_info.assert_called_once()
def test_get_clone_info_deleted_volume(self):
volume = self.rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name))
volume.parent_info.return_value = parent_info
info = self.driver._get_clone_info(volume,
"%s.deleted" % (self.volume_name))
self.assertEqual(info, parent_info)
self.assertFalse(volume.set_snap.called)
volume.parent_info.assert_called_once()
@mock.patch('cinder.volume.drivers.rbd.RADOSClient')
def test_create_cloned_volume(self, mock_client):
src_name = u'volume-00000001'
dst_name = u'volume-00000002'
self.cfg.rbd_max_clone_depth = 2
self.rbd.RBD.clone = mock.Mock()
self.driver._get_clone_depth = mock.Mock()
# Try with no flatten required
self.driver._get_clone_depth.return_value = 1
self.rbd.Image.create_snap = mock.Mock()
self.rbd.Image.protect_snap = mock.Mock()
self.rbd.Image.close = mock.Mock()
self.driver.create_cloned_volume(dict(name=dst_name),
dict(name=src_name))
self.rbd.Image.create_snap.assert_called_once()
self.rbd.Image.protect_snap.assert_called_once()
self.rbd.RBD.clone.assert_called_once()
self.rbd.Image.close.assert_called_once()
@mock.patch('cinder.volume.drivers.rbd.RADOSClient')
def test_create_cloned_volume_w_flatten(self, mock_client):
src_name = u'volume-00000001'
dst_name = u'volume-00000002'
self.cfg.rbd_max_clone_depth = 1
self.rbd.RBD.Error = Exception
self.rbd.RBD.clone = mock.Mock()
self.rbd.RBD.clone.side_effect = self.rbd.RBD.Error
self.driver._get_clone_depth = mock.Mock()
# Try with no flatten required
self.driver._get_clone_depth.return_value = 1
self.rbd.Image.create_snap = mock.Mock()
self.rbd.Image.protect_snap = mock.Mock()
self.rbd.Image.unprotect_snap = mock.Mock()
self.rbd.Image.remove_snap = mock.Mock()
self.rbd.Image.close = mock.Mock()
self.assertRaises(self.rbd.RBD.Error, self.driver.create_cloned_volume,
dict(name=dst_name), dict(name=src_name))
self.rbd.Image.create_snap.assert_called_once()
self.rbd.Image.protect_snap.assert_called_once()
self.rbd.RBD.clone.assert_called_once()
self.rbd.Image.unprotect_snap.assert_called_once()
self.rbd.Image.remove_snap.assert_called_once()
self.rbd.Image.close.assert_called_once()
@mock.patch('cinder.volume.drivers.rbd.RADOSClient')
def test_create_cloned_volume_w_clone_exception(self, mock_client):
src_name = u'volume-00000001'
dst_name = u'volume-00000002'
self.cfg.rbd_max_clone_depth = 2
self.rbd.RBD.Error = Exception
self.rbd.RBD.clone = mock.Mock()
self.rbd.RBD.clone.side_effect = self.rbd.RBD.Error
self.driver._get_clone_depth = mock.Mock()
# Try with no flatten required
self.driver._get_clone_depth.return_value = 1
self.rbd.Image.create_snap = mock.Mock()
self.rbd.Image.protect_snap = mock.Mock()
self.rbd.Image.unprotect_snap = mock.Mock()
self.rbd.Image.remove_snap = mock.Mock()
self.rbd.Image.close = mock.Mock()
self.assertRaises(self.rbd.RBD.Error, self.driver.create_cloned_volume,
dict(name=dst_name), dict(name=src_name))
self.rbd.Image.create_snap.assert_called_once()
self.rbd.Image.protect_snap.assert_called_once()
self.rbd.RBD.clone.assert_called_once()
self.rbd.Image.unprotect_snap.assert_called_once()
self.rbd.Image.remove_snap.assert_called_once()
self.rbd.Image.close.assert_called_once()
def test_good_locations(self):
locations = ['rbd://fsid/pool/image/snap',
'rbd://%2F/%2F/%2F/%2F', ]
map(self.driver._parse_location, locations)
def test_bad_locations(self):
locations = ['rbd://image',
'http://path/to/somewhere/else',
'rbd://image/extra',
'rbd://image/',
'rbd://fsid/pool/image/',
'rbd://fsid/pool/image/snap/',
'rbd://///', ]
for loc in locations:
self.assertRaises(exception.ImageUnacceptable,
self.driver._parse_location,
loc)
self.assertFalse(
self.driver._is_cloneable(loc, {'disk_format': 'raw'}))
@mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy')
def test_cloneable(self, mock_proxy):
self.driver._get_fsid = mock.Mock()
self.driver._get_fsid.return_value = 'abc'
location = 'rbd://abc/pool/image/snap'
info = {'disk_format': 'raw'}
self.assertTrue(self.driver._is_cloneable(location, info))
@mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy')
def test_uncloneable_different_fsid(self, mock_proxy):
self.driver._get_fsid = mock.Mock()
self.driver._get_fsid.return_value = 'abc'
location = 'rbd://def/pool/image/snap'
self.assertFalse(
self.driver._is_cloneable(location, {'disk_format': 'raw'}))
@mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy')
def test_uncloneable_unreadable(self, mock_proxy):
self.driver._get_fsid = mock.Mock()
self.driver._get_fsid.return_value = 'abc'
location = 'rbd://abc/pool/image/snap'
self.rbd.Error = Exception
mock_proxy.side_effect = self.rbd.Error
args = [location, {'disk_format': 'raw'}]
self.assertFalse(self.driver._is_cloneable(*args))
mock_proxy.assert_called_once()
def test_uncloneable_bad_format(self):
self.driver._get_fsid = mock.Mock()
self.driver._get_fsid.return_value = 'abc'
location = 'rbd://abc/pool/image/snap'
formats = ['qcow2', 'vmdk', 'vdi']
for f in formats:
self.assertFalse(
self.driver._is_cloneable(location, {'disk_format': f}))
def _copy_image(self):
with mock.patch.object(tempfile, 'NamedTemporaryFile'):
with mock.patch.object(os.path, 'exists') as mock_exists:
mock_exists.return_value = True
with mock.patch.object(image_utils, 'fetch_to_raw'):
with mock.patch.object(self.driver, 'delete_volume'):
with mock.patch.object(self.driver, '_resize'):
mock_image_service = mock.MagicMock()
args = [None, {'name': 'test', 'size': 1},
mock_image_service, None]
self.driver.copy_image_to_volume(*args)
def test_copy_image_no_volume_tmp(self):
self.cfg.volume_tmp_dir = None
self._copy_image()
def test_copy_image_volume_tmp(self):
self.cfg.volume_tmp_dir = '/var/run/cinder/tmp'
self._copy_image()
@mock.patch('cinder.volume.drivers.rbd.RADOSClient')
def test_update_volume_stats(self, mock_client):
client = mock_client.return_value
client.__enter__.return_value = client
client.cluster = mock.Mock()
client.cluster.get_cluster_stats = mock.Mock()
client.cluster.get_cluster_stats.return_value = {'kb': 1024 ** 3,
'kb_avail': 1024 ** 2}
self.driver.configuration.safe_get = mock.Mock()
self.driver.configuration.safe_get.return_value = 'RBD'
expected = dict(
volume_backend_name='RBD',
vendor_name='Open Source',
driver_version=self.driver.VERSION,
storage_protocol='ceph',
total_capacity_gb=1024,
free_capacity_gb=1,
reserved_percentage=0)
actual = self.driver.get_volume_stats(True)
client.cluster.get_cluster_stats.assert_called_once()
self.assertDictMatch(expected, actual)
@mock.patch('cinder.volume.drivers.rbd.RADOSClient')
def test_update_volume_stats_error(self, mock_client):
client = mock_client.return_value
client.__enter__.return_value = client
client.cluster = mock.Mock()
client.cluster.get_cluster_stats = mock.Mock()
client.cluster.get_cluster_stats.side_effect = Exception
self.driver.configuration.safe_get = mock.Mock()
self.driver.configuration.safe_get.return_value = 'RBD'
self.rados.Error = Exception
expected = dict(volume_backend_name='RBD',
vendor_name='Open Source',
driver_version=self.driver.VERSION,
storage_protocol='ceph',
total_capacity_gb='unknown',
free_capacity_gb='unknown',
reserved_percentage=0)
actual = self.driver.get_volume_stats(True)
client.cluster.get_cluster_stats.assert_called_once()
self.assertDictMatch(expected, actual)
def test_get_mon_addrs(self):
self.driver._execute = mock.Mock()
self.driver._execute.return_value = (CEPH_MON_DUMP, '')
hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com']
ports = ['6789', '6790', '6791', '6792', '6791']
self.assertEqual((hosts, ports), self.driver._get_mon_addrs())
def test_initialize_connection(self):
hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com']
ports = ['6789', '6790', '6791', '6792', '6791']
self.driver._get_mon_addrs = mock.Mock()
self.driver._get_mon_addrs.return_value = (hosts, ports)
expected = {
'driver_volume_type': 'rbd',
'data': {
'name': '%s/%s' % (self.cfg.rbd_pool,
self.volume_name),
'hosts': hosts,
'ports': ports,
'auth_enabled': False,
'auth_username': None,
'secret_type': 'ceph',
'secret_uuid': None, }
}
actual = self.driver.initialize_connection(dict(name=self.volume_name),
None)
self.assertDictMatch(expected, actual)
@mock.patch('cinder.volume.drivers.rbd.RADOSClient')
def test_clone(self, mock_client):
src_pool = u'images'
src_image = u'image-name'
src_snap = u'snapshot-name'
client_stack = []
def mock__enter__(inst):
def _inner():
client_stack.append(inst)
return inst
return _inner
client = mock_client.return_value
# capture both rados client used to perform the clone
client.__enter__.side_effect = mock__enter__(client)
self.rbd.RBD.clone = mock.Mock()
self.driver._clone(self.volume, src_pool, src_image, src_snap)
args = [client_stack[0].ioctx, str(src_image), str(src_snap),
client_stack[1].ioctx, str(self.volume_name)]
kwargs = {'features': self.rbd.RBD_FEATURE_LAYERING}
self.rbd.RBD.clone.assert_called_once_with(*args, **kwargs)
self.assertEqual(client.__enter__.call_count, 2)
def test_extend_volume(self):
fake_size = '20'
fake_vol = {'project_id': 'testprjid', 'name': self.volume_name,
'size': fake_size,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
self.mox.StubOutWithMock(self.driver, '_resize')
size = int(fake_size) * units.GiB
self.driver._resize(fake_vol, size=size)
self.mox.ReplayAll()
self.driver.extend_volume(fake_vol, fake_size)
self.mox.VerifyAll()
@mock.patch('cinder.volume.drivers.rbd.RADOSClient')
def test_rbd_volume_proxy_init(self, mock_client):
snap = u'snapshot-name'
client = mock_client.return_value
client.__enter__.return_value = client
self.driver._connect_to_rados = mock.Mock()
self.driver._connect_to_rados.return_value = (None, None)
self.driver._disconnect_from_rados = mock.Mock()
self.driver._disconnect_from_rados.return_value = (None, None)
with driver.RBDVolumeProxy(self.driver, self.volume_name):
self.driver._connect_to_rados.assert_called_once()
self.assertFalse(self.driver._disconnect_from_rados.called)
self.driver._disconnect_from_rados.assert_called_once()
self.driver._connect_to_rados.reset_mock()
self.driver._disconnect_from_rados.reset_mock()
with driver.RBDVolumeProxy(self.driver, self.volume_name,
snapshot=snap):
self.driver._connect_to_rados.assert_called_once()
| |
file
wavel_loose -- As wavel, but with less strict criteria
fflat_mfsky -- Find a reduced fibre flat field from a twilight flat
fflat_mfsky_loose-- Find a reduced fibre flat field from any twilight flat field on a night
fflat_mksky_any -- Find a reduced fibre flat field from any twilight flat field in a manager set
fflat -- Find a reduced fibre flat field from the dome lamp
fflat_loose -- As fflat, but with less strict criteria
fflat_flap -- As fflat, but from the flap lamp
fflat_flap_loose -- As fflat_flap, but with less strict criteria
thput -- Find a reduced offset sky (twilight) file
thput_fflat -- Find a dome flat that's had a copy made as MFSKY
thput_sky -- As thput, but find long-exposure object file
bias -- Find a combined bias frame
dark -- Find a combined dark frame
lflat -- Find a combined long-slit flat frame
fcal -- Find a reduced spectrophotometric standard star
fcal_loose -- As fcal, but with less strict criteria
The return type depends on what is asked for:
tlmap, wavel, fflat, thput, fcal and related
-- A FITS file object
bias, dark, lflat -- The path to the combined file
"""
fits_match = None
# The following are the things that could potentially be matched
date = None
plate_id = None
field_id = None
ccd = None
exposure_str = None
min_exposure = None
max_exposure = None
reduced_dir = None
reduced = None
copy_reduced = None
tlm_created = None
flux_calibrated = None
telluric_corrected = None
spectrophotometric = None
lamp = None
central_wavelength = None
# extra match criteria that is the amount of flux in the
# frame, based on the FLXU90P value (9-95th percentile value
# of raw frame). This is for twilights used as flats for
# TLMs. If a frame is a twilight, then this paramater is
# set on initialization of the FITSFile object. Then we
# have easy access to the value.
min_fluxlev = None
max_fluxlev = None
# Define some functions for figures of merit
time_difference = lambda fits, fits_test: (
abs(fits_test.epoch - fits.epoch))
recent_reduction = lambda fits, fits_test: (
-1.0 * os.stat(fits_test.reduced_path).st_mtime)
copy_recent_reduction = lambda fits, fits_test: (
-1.0 * os.stat(self.copy_path(fits_test.reduced_path)).st_mtime)
# merit function that returns the best fluxlev value. As the
# general f-o-m selects objects if the f-o-m is LESS than other values
# we should just multiple fluxlev by -1:
flux_level = lambda fits, fits_test: (
-1.0 * fits_test.fluxlev)
def time_difference_min_exposure(min_exposure):
def retfunc(fits, fits_test):
if fits_test.exposure <= min_exposure:
return np.inf
else:
return time_difference(fits, fits_test)
return retfunc
def determine_tlm_shift_fits(twilight_fits,flat_fits):
twilight_tlm = pf.getdata(twilight_fits.tlm_path,'PRIMARY')
flat_tlm = pf.getdata(flat_fits.tlm_path,'PRIMARY')
tlm_offset = np.mean(twilight_tlm-flat_tlm)
return tlm_offset
def flux_level_shift(fits,fits_test):
fits_comp = self.matchmaker(fits,'tlmap')
if fits_comp == None:
fits_comp = self.matchmaker(fits,'tlmap_loose')
shift = determine_tlm_shift_fits(fits_test,fits_comp)
if np.abs(shift) >= 1:
return np.inf
else:
return flux_level(fits, fits_test)
# Determine what actually needs to be matched, depending on match_class
#
# this case is where we want to use a twilight sky frame to derive the
# tramline maps, rather than a flat field, as the flat can often have too
# little flux in the far blue to do a good job. The order of matching for
# the twilights should be:
# 1) The brightest twilight frame of the same field (needs to be brighter than
# some nominal level, say FLUX90P>500) - tlmap_mfsky.
# 2) The brightest twilight frame from the same night (same constraint on
# brightness) - tlmap_mfsky_loose.
# 3) The brightest twilight frame from a different night (same constraint on
# brightness) - tlmap_mfsky_any.
if match_class.lower() == 'tlmap_mfsky':
# allow MFSKY to be used:
ndf_class = 'MFSKY'
date = fits.date
plate_id = fits.plate_id
field_id = fits.field_id
min_fluxlev = 1000.0
max_fluxlev = 40000.0 # use a max_fluxlev to reduce the chance of saturated twilights
ccd = fits.ccd
tlm_created = True
fom = flux_level
elif match_class.lower() == 'tlmap_mfsky_loose':
# this is the case where we take the brightest twilight on the same
# night, irrespective of whether its from the same plate.
ndf_class = 'MFSKY'
date = fits.date
min_fluxlev = 1000.0
max_fluxlev = 40000.0 # use a max_fluxlev to reduce the chance of saturated twilights
ccd = fits.ccd
tlm_created = True
fom = flux_level_shift
elif match_class.lower() == 'tlmap_mfsky_any':
# in this case find the best (brightest) twilight frame from anywhere
# during the run.
ndf_class = 'MFSKY'
min_fluxlev = 1000.0
max_fluxlev = 40000.0 # use a max_fluxlev to reduce the chance of saturated twilights
ccd = fits.ccd
tlm_created = True
fom = flux_level_shift
elif match_class.lower() == 'tlmap':
ndf_class = 'MFFFF'
date = fits.date
plate_id = fits.plate_id
field_id = fits.field_id
ccd = fits.ccd
tlm_created = True
lamp = 'Dome'
fom = time_difference
elif match_class.lower() == 'tlmap_loose':
# Find a tramline map with looser criteria
ndf_class = 'MFFFF'
ccd = fits.ccd
tlm_created = True
lamp = 'Dome'
fom = time_difference
elif match_class.lower() == 'tlmap_flap':
# Find a tramline map from a flap flat
ndf_class = 'MFFFF'
date = fits.date
plate_id = fits.plate_id
field_id = fits.field_id
ccd = fits.ccd
tlm_created = True
lamp = 'Flap'
fom = time_difference
elif match_class.lower() == 'tlmap_flap_loose':
# Tramline map from flap flat with looser criteria
ndf_class = 'MFFFF'
ccd = fits.ccd
tlm_created = True
lamp = 'Flap'
fom = time_difference
elif match_class.lower() == 'wavel':
# Find a reduced arc field
ndf_class = 'MFARC'
date = fits.date
plate_id = fits.plate_id
field_id = fits.field_id
ccd = fits.ccd
reduced = True
fom = time_difference
elif match_class.lower() == 'wavel_loose':
# Find a reduced arc field, with looser criteria
ndf_class = 'MFARC'
ccd = fits.ccd
reduced = True
fom = time_difference
# options for using twilight frame as flibre flat:
elif match_class.lower() == 'fflat_mfsky':
# allow MFSKY to be used:
ndf_class = 'MFSKY'
date = fits.date
plate_id = fits.plate_id
field_id = fits.field_id
min_fluxlev = 1000.0
max_fluxlev = 40000.0 # use a max_fluxlev to reduce the chance of saturated twilights
ccd = fits.ccd
copy_reduced = True
fom = flux_level
elif match_class.lower() == 'fflat_mfsky_loose':
# this is the case where we take the brightest twilight on the same
# night, irrespective of whether its from the same plate.
ndf_class = 'MFSKY'
date = fits.date
min_fluxlev = 1000.0
max_fluxlev = 40000.0 # use a max_fluxlev to reduce the chance of saturated twilights
ccd = fits.ccd
copy_reduced = True
fom = flux_level_shift
elif match_class.lower() == 'fflat_mfsky_any':
# in this case find the best (brightest) twilight frame from anywhere
# during the run.
ndf_class = 'MFSKY'
min_fluxlev = 1000.0
max_fluxlev = 40000.0 # use a max_fluxlev to reduce the chance of saturated twilights
ccd = fits.ccd
copy_reduced = True
fom = flux_level_shift
elif match_class.lower() == 'fflat':
# Find a reduced fibre flat field from the dome lamp
ndf_class = 'MFFFF'
date = fits.date
plate_id = fits.plate_id
field_id = fits.field_id
ccd = fits.ccd
reduced = True
lamp = 'Dome'
fom = time_difference
elif match_class.lower() == 'fflat_loose':
# Find a reduced fibre flat field with looser criteria
ndf_class = 'MFFFF'
ccd = fits.ccd
reduced = True
lamp = 'Dome'
fom = time_difference
elif match_class.lower() == 'fflat_flap':
# Find a reduced flap fibre flat field
ndf_class = 'MFFFF'
date = fits.date
plate_id = fits.plate_id
field_id = fits.field_id
ccd = fits.ccd
reduced = True
lamp = 'Flap'
fom = time_difference
elif match_class.lower() == 'fflat_flap_loose':
# Fibre flat field from flap lamp with looser criteria
ndf_class = 'MFFFF'
ccd = fits.ccd
reduced = True
lamp = 'Flap'
fom = time_difference
elif match_class.lower() == 'thput':
# Find a reduced offset sky field
ndf_class = 'MFSKY'
date = fits.date
plate_id = fits.plate_id
field_id = fits.field_id
ccd = fits.ccd
reduced = True
fom = recent_reduction
elif match_class.lower() == 'thput_fflat':
# Find a dome flat that's had a fake sky copy made
ndf_class = 'MFFFF'
date = fits.date
plate_id = fits.plate_id
field_id = fits.field_id
ccd = fits.ccd
copy_reduced = True
| |
outfile (former: 'tmat')"]),
('<RELAX_SPINANGLE_DIRAC>', [
None, '%l', False,
"Run option: relax the spin angle in a SCF calculation [only DIRAC mode] (former: 'ITERMDIR')"
]),
('<SEARCH_EFERMI>', [
None, '%l', False,
"Run option: modify convergence parameters to scan for fermi energy only (to reach charge neutrality). (former: 'SEARCHEF')"
]),
('<SET_GMAT_TO_ZERO>',
[None, '%l', False, "Run option: set GMAT=0 in evaluation of density (former: 'GMAT=0')"]),
('<SET_EMPTY_SYSTEM>',
[None, '%l', False, "Run option: set potential and nuclear charge to zero (former: 'zeropot')"]),
('<SET_KMESH_LARGE>',
[None, '%l', False, "Run option: set equal k-mesh (largest) for all energy points (former: 'fix mesh')"]),
('<SET_KMESH_SMALL>',
[None, '%l', False, "Run option: set equal k-mesh (smallest) for all energy points (former: 'fix4mesh')"]),
('<SET_TMAT_NOINVERSION>', [
None, '%l', False,
"Run option: do not perform inversion to get msst = Delta t^-1, but msst = Delta t. (former: 'testgmat')"
]),
('<SIMULATE_ASA>', [
None, '%l', False,
"Run option: set non-spherical potential to zero in full-potential calculation with Chebychev solver (former: 'simulasa')"
]),
('<SLOW_MIXING_EFERMI>', [
None, '%l', False,
"Run option: renormalize Fermi-energy shift by mixing factor during mixing (former: 'slow-neu')"
]),
('<STOP_1A>', [None, '%l', False, "Run option: stop after main1a (former: 'STOP1A')"]),
('<STOP_1B>', [None, '%l', False, "Run option: stop after main1b (former: 'STOP1B')"]),
('<STOP_1C>', [None, '%l', False, "Run option: stop after main1c (former: 'STOP1C')"]),
('<SYMMETRIZE_GMAT>',
[None, '%l', False,
"Run option: use symmetrization [G(k) + G(-k)]/2 in k-point loop (former: 'symG(k)')"]),
('<SYMMETRIZE_POTENTIAL_CUBIC>', [
None, '%l', False,
"Run option: keep only symmetric part of potential (L=1,11,21,25,43,47). (former: 'potcubic')"
]),
('<SYMMETRIZE_POTENTIAL_MADELUNG>', [
None, '%l', False,
"Run option: symmetrize potential in consistency to madelung potential (former: 'potsymm')"
]),
('<TORQUE_OPERATOR_ONLYMT>', [
None, '%l', False,
"Run option: for torque operator: include only the part within the muffin tin (former: 'ONLYMT')"
]),
('<TORQUE_OPERATOR_ONLYSPH>', [
None, '%l', False,
"Run option: for torque operator: include only the spherically symmetric part (former: 'ONLYSPH')"
]),
('<USE_CHEBYCHEV_SOLVER>', [None, '%l', False,
"Run option: use the Chebychev solver (former: 'NEWSOSOL')"]),
('<USE_COND_LB>', [
None, '%l', False,
"Run option: perform calculation of conductance in Landauer-Büttiker formalism (former: 'CONDUCT')"
]),
('<USE_CONT>',
[None, '%l', False, "Run option: no usage of embedding points. NEMB is set to 0. (former: 'CONT')"]),
('<USE_DECI_ONEBULK>', [
None, '%l', False,
"Run option: in case of decimation: use same bulk on right and left. Speeds up calculations. (former: 'ONEBULK')"
]),
('<USE_DECIMATION>',
[None, '%l', False,
"Run option: use Decimation technique for semi-infinite systems (former: 'DECIMATE')"]),
('<USE_EWALD_2D>', [
None, '%l', False,
"Run option: use 2D ewald sum instead of 3D sum (Attention: does not work always!) (former: 'ewald2d')"
]),
('<USE_FULL_BZ>', [
None, '%l', False,
"Run option: use full Brillouin zone, i.e. switch off symmetries for k-space integration (former: 'fullBZ')"
]),
('<USE_LDAU>',
[None, '%l', False, "Run option: use LDA+U as exchange-correlation potential (former: 'LDA+U')"]),
('<USE_LLOYD>', [
None, '%l', False,
"Run option: use Lloyds formula to correct finite angular momentum cutoff (former: 'LLOYD')"
]),
('<USE_QDOS>',
[None, '%l', False,
"Run option: writes out qdos files for band structure calculations. (former: 'qdos')"]),
('<USE_READCPA>', [None, '%l', False, "Run option: read cpa t-matrix from file (former: 'readcpa')"]),
('<USE_RIGID_EFERMI>', [
None, '%l', False,
"Run option: keep the Fermi energy fixed during self-consistency (former: 'rigid-ef')"
]),
('<USE_SEMICORE>', [None, '%l', False, "Run option: use semicore contour (former: 'SEMICORE')"]),
('<USE_SEMI_CIRCLE_CONTOUR>',
[None, '%l', False, 'Run option: use semi-circular energy contour (set number of points with NPT1)']),
('<USE_SPHERICAL_POTENTIAL_ONLY>',
[None, '%l', False, "Run option: keeping only spherical component of potential (former: 'Vspher')"]),
('<USE_VIRTUAL_ATOMS>', [None, '%l', False, "Run option: add virtual atoms (former: 'VIRATOMS')"]),
('<WRITE_BDG_TESTS>',
[None, '%l', False, "Run option: test options for Bogouliubov-deGennes (former: 'BdG_dev')"]),
('<WRITE_DOS>',
[None, '%l', False, "Run option: write out DOS files in any case (also if npol!=0) (former: 'DOS')"]),
('<WRITE_DOS_LM>', [
None, '%l', False,
"Run option: write out DOS files with decomposition into l and m components (former: 'lmdos')"
]),
('<WRITE_GMAT_PLAIN>',
[None, '%l', False, "Run option: write out Green function as plain text file (former: 'GPLAIN')"]),
('<WRITE_GREEN_HOST>', [
None, '%l', False,
"Run option: write green function of the host to file `green_host` (former: 'WRTGREEN')"
]),
('<WRITE_GREEN_IMP>',
[None, '%l', False, "Run option: write out impurity Green function to GMATLL_GES (former: 'GREENIMP')"]),
('<WRITE_COMPLEX_QDOS>', [None, '%l', False,
"Run option: write complex qdos to file (former: 'compqdos')"]),
('<WRITE_CPA_PROJECTION_FILE>',
[None, '%l', False, "Run option: write CPA projectors to file (former: 'projfile')"]),
('<WRITE_DECI_POT>',
[None, '%l', False, "Run option: write decimation-potential file (former: 'deci-pot')"]),
('<WRITE_DECI_TMAT>',
[None, '%l', False, "Run option: write t-matrix to file 'decifile' (former: 'deci-out')"]),
('<WRITE_DENSITY_ASCII>',
[None, '%l', False, "Run option: write density rho2ns to file densitydn.ascii (former: 'den-asci')"]),
('<WRITE_ENERGY_MESH>',
[None, '%l', False, "Run option: write out the energy mesh to file `emesh.scf` (former: 'EMESH')"]),
('<WRITE_GENERALIZED_POTENTIAL>', [
None, '%l', False,
"Run option: write potential in general format. Usually prepares for running the VORONOI program. (former: 'GENPOT')"
]),
('<WRITE_GMAT_FILE>', [None, '%l', False, "Run option: write GMAT to file (former: 'gmatfile')"]),
('<WRITE_GREF_FILE>', [None, '%l', False, "Run option: write GREF to file (former: 'greffile')"]),
('<WRITE_GMAT_ASCII>',
[None, '%l', False, "Run option: write GMAT to formatted file `gmat.ascii` (former: 'gmatasci')"]),
('<WRITE_KKRIMP_INPUT>',
[None, '%l', False, "Run option: write out files for KKRimp-code (former: 'KKRFLEX')"]),
('<WRITE_KKRSUSC_INPUT>',
[None, '%l', False, "Run option: write out files for KKRsusc-code (former: 'KKRSUSC')"]),
('<WRITE_KPTS_FILE>',
[None, '%l', False, "Run option: write and read k-mesh to/from file `kpoints` (former: 'kptsfile')"]),
('<WRITE_LLOYD_CDOS_FILE>',
[None, '%l', False, "Run option: write Lloyd array to file (former: 'wrtcdos')"]),
('<WRITE_LLOYD_DGREF_FILE>',
[None, '%l', False, "Run option: write Lloyd array to file (former: 'wrtdgref')"]),
('<WRITE_LLOYD_DTMAT_FILE>',
[None, '%l', False, "Run option: write Lloyd array to file (former: 'wrtdtmat')"]),
('<WRITE_LLOYD_FILE>',
[None, '%l', False, "Run option: write several Lloyd-arrays to files (former: 'llyfiles')"]),
('<WRITE_LLOYD_G0TR_FILE>',
[None, '%l', False, "Run option: write Lloyd array to file (former: 'wrtgotr')"]),
('<WRITE_LLOYD_TRALPHA_FILE>',
[None, '%l', False, "Run option: write Lloyd array to file (former: 'wrttral')"]),
('<WRITE_MADELUNG_FILE>', [
None, '%l', False,
"Run option: write madelung summation to file 'abvmad.unformatted' instead of keeping it in memory (former: 'madelfil')"
]),
('<WRITE_PKKR_INPUT>',
[None, '%l', False, "Run option: write out files for Pkkprime-code (former: 'FERMIOUT')"]),
('<WRITE_PKKR_OPERATORS>', [
None, '%l', False,
"Run option: for Fermi-surface output: calculate various operators in KKR basis. (former: 'OPERATOR')"
]),
('<WRITE_POTENTIAL_TESTS>', [
None, '%l', False,
"Run option: write potential at different steps in main2 to different files (former: 'vintrasp' and 'vpotout')"
]),
('<WRITE_RHO2NS>', [
None, '%l', False,
"Run option: write array rho2ns into file out_rhoval (from main1c) and out_rhotot (from main2) (former: 'RHOVALTW' and 'RHOVALW')"
]),
('<WRITE_RHOQ_INPUT>', [
None, '%l', False,
"Run option: write out files needed for rhoq module (Quasiparticle interference) (former: 'rhoqtest')"
]),
('<WRITE_TMAT_FILE>', [None, '%l', False, "Run option: write t-matix to file (former: 'tmatfile')"]),
('<WRITE_TB_COUPLING>', [
None, '%l', False,
"Run option: write couplings in tight-binging reference system to file `couplings.dat` (former: 'godfrin')"
]),
('<CALC_WRONSKIAN>', [
None, '%l', False,
'Run option: calculate the wronskian relations of first and second kind for the wavefunctions (see PhD Bauer pp 48)'
]),
# end new style run options
])
# keywords for KKRimp (all allowed settings for config file)
self._DEFAULT_KEYS_KKRIMP = dict([ # complete list of keywords, detault all that are not mandatory to None
# chemistry
('NSPIN',
[None, '%i', False, 'Chemistry, Atom types: Number of spin directions in potential. Values 1 or 2']),
('KVREL', [
None, '%i', False,
'Chemistry, Atom types: Relativistic treatment of valence electrons. Takes values 0 (Schroedinger), 1 (Scalar relativistic), 2 (Dirac ; works only in ASA mode)'
]),
('XC', [
None, '%s', False,
'Chemistry, Exchange-correlation: Type of exchange correlation potential. Takes values 0 (LDA, Moruzzi-Janak-Williams), 1 (LDA, von Barth-Hedin), 2 (LDA, Vosko-Wilk-Nussair), 3 (GGA, Perdew-Wang 91), 4 (GGA, PBE), 5 (GGA, PBEsol)'
]),
# external fields
('HFIELD', [
None, '%f %i', False,
'External fields: Value of an external magnetic field in the first iteration. Works only with LINIPOL, XINIPOL'
]),
# accuracy
('INS', [
None, '%i', False,
'Accuracy, Radial solver: Takes values 0 for ASA and 1 for full potential Must be 0 for Munich Dirac solver ([KREL]=2)'
]),
('ICST', [None, '%i', False, 'Accuracy, Radial solver: Number of iterations in the radial solver']),
('RADIUS_LOGPANELS', [
None, '%f', | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Programmable Proxy Server in a single Python file.
:copyright: (c) 2013-present by <NAME> and contributors.
:license: BSD, see LICENSE for more details.
"""
import argparse
import asyncio
import base64
import contextlib
import errno
import functools
import hashlib
import importlib
import inspect
import io
import ipaddress
import json
import logging
import mimetypes
import multiprocessing
import os
import pathlib
import queue
import secrets
import selectors
import socket
import ssl
import struct
import subprocess
import sys
import threading
import time
from abc import ABC, abstractmethod
from multiprocessing import connection
from multiprocessing.reduction import send_handle, recv_handle
from types import TracebackType
from typing import Any, Dict, List, Tuple, Optional, Union, NamedTuple, Callable, Type, TypeVar
from typing import cast, Generator, TYPE_CHECKING
from urllib import parse as urlparse
from typing_extensions import Protocol
if os.name != 'nt':
import resource
PROXY_PY_DIR = os.path.dirname(os.path.realpath(__file__))
PROXY_PY_START_TIME = time.time()
VERSION = (1, 2, 0)
__version__ = '.'.join(map(str, VERSION[0:3]))
__description__ = '⚡⚡⚡ Fast, Lightweight, Programmable Proxy Server in a single Python file.'
__author__ = '<NAME>'
__author_email__ = '<EMAIL>'
__homepage__ = 'https://github.com/abhinavsingh/proxy.py'
__download_url__ = '%s/archive/master.zip' % __homepage__
__license__ = 'BSD'
# Defaults
DEFAULT_BACKLOG = 100
DEFAULT_BASIC_AUTH = None
DEFAULT_BUFFER_SIZE = 1024 * 1024
DEFAULT_CA_CERT_DIR = None
DEFAULT_CA_CERT_FILE = None
DEFAULT_CA_KEY_FILE = None
DEFAULT_CA_SIGNING_KEY_FILE = None
DEFAULT_CERT_FILE = None
DEFAULT_CLIENT_RECVBUF_SIZE = DEFAULT_BUFFER_SIZE
DEFAULT_DEVTOOLS_WS_PATH = b'/devtools'
DEFAULT_DISABLE_HEADERS: List[bytes] = []
DEFAULT_DISABLE_HTTP_PROXY = False
DEFAULT_ENABLE_DEVTOOLS = False
DEFAULT_ENABLE_STATIC_SERVER = False
DEFAULT_ENABLE_WEB_SERVER = False
DEFAULT_IPV4_HOSTNAME = ipaddress.IPv4Address('127.0.0.1')
DEFAULT_IPV6_HOSTNAME = ipaddress.IPv6Address('::1')
DEFAULT_KEY_FILE = None
DEFAULT_LOG_FILE = None
DEFAULT_LOG_FORMAT = '%(asctime)s - pid:%(process)d [%(levelname)-.1s] %(funcName)s:%(lineno)d - %(message)s'
DEFAULT_LOG_LEVEL = 'INFO'
DEFAULT_NUM_WORKERS = 0
DEFAULT_OPEN_FILE_LIMIT = 1024
DEFAULT_PAC_FILE = None
DEFAULT_PAC_FILE_URL_PATH = b'/'
DEFAULT_PID_FILE = None
DEFAULT_PLUGINS = ''
DEFAULT_PORT = 8899
DEFAULT_SERVER_RECVBUF_SIZE = DEFAULT_BUFFER_SIZE
DEFAULT_STATIC_SERVER_DIR = os.path.join(PROXY_PY_DIR, 'public')
DEFAULT_THREADLESS = False
DEFAULT_TIMEOUT = 10
DEFAULT_VERSION = False
UNDER_TEST = False # Set to True if under test
logger = logging.getLogger(__name__)
def text_(s: Any, encoding: str = 'utf-8', errors: str = 'strict') -> Any:
"""Utility to ensure text-like usability.
If s is of type bytes or int, return s.decode(encoding, errors),
otherwise return s as it is."""
if isinstance(s, int):
return str(s)
if isinstance(s, bytes):
return s.decode(encoding, errors)
return s
def bytes_(s: Any, encoding: str = 'utf-8', errors: str = 'strict') -> Any:
"""Utility to ensure binary-like usability.
If s is type str or int, return s.encode(encoding, errors),
otherwise return s as it is."""
if isinstance(s, int):
s = str(s)
if isinstance(s, str):
return s.encode(encoding, errors)
return s
version = bytes_(__version__)
CRLF, COLON, WHITESPACE, COMMA, DOT, SLASH, HTTP_1_1 = b'\r\n', b':', b' ', b',', b'.', b'/', b'HTTP/1.1'
PROXY_AGENT_HEADER_KEY = b'Proxy-agent'
PROXY_AGENT_HEADER_VALUE = b'proxy.py v' + version
PROXY_AGENT_HEADER = PROXY_AGENT_HEADER_KEY + \
COLON + WHITESPACE + PROXY_AGENT_HEADER_VALUE
TcpConnectionTypes = NamedTuple('TcpConnectionTypes', [
('SERVER', int),
('CLIENT', int),
])
tcpConnectionTypes = TcpConnectionTypes(1, 2)
ChunkParserStates = NamedTuple('ChunkParserStates', [
('WAITING_FOR_SIZE', int),
('WAITING_FOR_DATA', int),
('COMPLETE', int),
])
chunkParserStates = ChunkParserStates(1, 2, 3)
HttpStatusCodes = NamedTuple('HttpStatusCodes', [
# 1xx
('CONTINUE', int),
('SWITCHING_PROTOCOLS', int),
# 2xx
('OK', int),
# 3xx
('MOVED_PERMANENTLY', int),
('SEE_OTHER', int),
('TEMPORARY_REDIRECT', int),
('PERMANENT_REDIRECT', int),
# 4xx
('BAD_REQUEST', int),
('UNAUTHORIZED', int),
('FORBIDDEN', int),
('NOT_FOUND', int),
('PROXY_AUTH_REQUIRED', int),
('REQUEST_TIMEOUT', int),
('I_AM_A_TEAPOT', int),
# 5xx
('INTERNAL_SERVER_ERROR', int),
('NOT_IMPLEMENTED', int),
('BAD_GATEWAY', int),
('GATEWAY_TIMEOUT', int),
('NETWORK_READ_TIMEOUT_ERROR', int),
('NETWORK_CONNECT_TIMEOUT_ERROR', int),
])
httpStatusCodes = HttpStatusCodes(
100, 101,
200,
301, 303, 307, 308,
400, 401, 403, 404, 407, 408, 418,
500, 501, 502, 504, 598, 599
)
HttpMethods = NamedTuple('HttpMethods', [
('GET', bytes),
('HEAD', bytes),
('POST', bytes),
('PUT', bytes),
('DELETE', bytes),
('CONNECT', bytes),
('OPTIONS', bytes),
('TRACE', bytes),
('PATCH', bytes),
])
httpMethods = HttpMethods(
b'GET',
b'HEAD',
b'POST',
b'PUT',
b'DELETE',
b'CONNECT',
b'OPTIONS',
b'TRACE',
b'PATCH',
)
HttpParserStates = NamedTuple('HttpParserStates', [
('INITIALIZED', int),
('LINE_RCVD', int),
('RCVING_HEADERS', int),
('HEADERS_COMPLETE', int),
('RCVING_BODY', int),
('COMPLETE', int),
])
httpParserStates = HttpParserStates(1, 2, 3, 4, 5, 6)
HttpParserTypes = NamedTuple('HttpParserTypes', [
('REQUEST_PARSER', int),
('RESPONSE_PARSER', int),
])
httpParserTypes = HttpParserTypes(1, 2)
HttpProtocolTypes = NamedTuple('HttpProtocolTypes', [
('HTTP', int),
('HTTPS', int),
('WEBSOCKET', int),
])
httpProtocolTypes = HttpProtocolTypes(1, 2, 3)
WebsocketOpcodes = NamedTuple('WebsocketOpcodes', [
('CONTINUATION_FRAME', int),
('TEXT_FRAME', int),
('BINARY_FRAME', int),
('CONNECTION_CLOSE', int),
('PING', int),
('PONG', int),
])
websocketOpcodes = WebsocketOpcodes(0x0, 0x1, 0x2, 0x8, 0x9, 0xA)
def build_http_request(method: bytes, url: bytes,
protocol_version: bytes = HTTP_1_1,
headers: Optional[Dict[bytes, bytes]] = None,
body: Optional[bytes] = None) -> bytes:
"""Build and returns a HTTP request packet."""
if headers is None:
headers = {}
return build_http_pkt(
[method, url, protocol_version], headers, body)
def build_http_response(status_code: int,
protocol_version: bytes = HTTP_1_1,
reason: Optional[bytes] = None,
headers: Optional[Dict[bytes, bytes]] = None,
body: Optional[bytes] = None) -> bytes:
"""Build and returns a HTTP response packet."""
line = [protocol_version, bytes_(status_code)]
if reason:
line.append(reason)
if headers is None:
headers = {}
has_content_length = False
has_transfer_encoding = False
for k in headers:
if k.lower() == b'content-length':
has_content_length = True
if k.lower() == b'transfer-encoding':
has_transfer_encoding = True
if body is not None and \
not has_transfer_encoding and \
not has_content_length:
headers[b'Content-Length'] = bytes_(len(body))
return build_http_pkt(line, headers, body)
def build_http_header(k: bytes, v: bytes) -> bytes:
"""Build and return a HTTP header line for use in raw packet."""
return k + COLON + WHITESPACE + v
def build_http_pkt(line: List[bytes],
headers: Optional[Dict[bytes, bytes]] = None,
body: Optional[bytes] = None) -> bytes:
"""Build and returns a HTTP request or response packet."""
req = WHITESPACE.join(line) + CRLF
if headers is not None:
for k in headers:
req += build_http_header(k, headers[k]) + CRLF
req += CRLF
if body:
req += body
return req
def build_websocket_handshake_request(
key: bytes,
method: bytes = b'GET',
url: bytes = b'/') -> bytes:
"""
Build and returns a Websocket handshake request packet.
:param key: Sec-WebSocket-Key header value.
:param method: HTTP method.
:param url: Websocket request path.
"""
return build_http_request(
method, url,
headers={
b'Connection': b'upgrade',
b'Upgrade': b'websocket',
b'Sec-WebSocket-Key': key,
b'Sec-WebSocket-Version': b'13',
}
)
def build_websocket_handshake_response(accept: bytes) -> bytes:
"""
Build and returns a Websocket handshake response packet.
:param accept: Sec-WebSocket-Accept header value
"""
return build_http_response(
101, reason=b'Switching Protocols',
headers={
b'Upgrade': b'websocket',
b'Connection': b'Upgrade',
b'Sec-WebSocket-Accept': accept
}
)
def find_http_line(raw: bytes) -> Tuple[Optional[bytes], bytes]:
"""Find and returns first line ending in CRLF along with following buffer.
If no ending CRLF is found, line is None."""
pos = raw.find(CRLF)
if pos == -1:
return None, raw
line = raw[:pos]
rest = raw[pos + len(CRLF):]
return line, rest
def new_socket_connection(addr: Tuple[str, int]) -> socket.socket:
conn = None
try:
ip = ipaddress.ip_address(addr[0])
if ip.version == 4:
conn = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0)
conn.bind((DEFAULT_IPV4_HOSTNAME,0))
conn.connect(addr)
else:
conn = socket.socket(
socket.AF_INET6, socket.SOCK_STREAM, 0)
conn.connect((addr[0], addr[1], 0, 0))
except ValueError:
pass # does not appear to be an IPv4 or IPv6 address
if conn is not None:
return conn
# try to establish dual stack IPv4/IPv6 connection.
return socket.create_connection(addr)
class socket_connection(contextlib.ContextDecorator):
"""Same as new_socket_connection but as a context manager and decorator."""
def __init__(self, addr: Tuple[str, int]):
self.addr: Tuple[str, int] = addr
self.conn: Optional[socket.socket] = None
super().__init__()
def __enter__(self) -> socket.socket:
self.conn = new_socket_connection(self.addr)
return self.conn
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> bool:
if self.conn:
self.conn.close()
return False
def __call__(self, func: Callable[..., Any]) -> Callable[[socket.socket], Any]:
@functools.wraps(func)
def decorated(*args: Any, **kwargs: Any) -> Any:
with self as conn:
return func(conn, *args, **kwargs)
return decorated
class _HasFileno(Protocol):
def fileno(self) -> int:
... # pragma: no cover
class TcpConnectionUninitializedException(Exception):
pass
class TcpConnection(ABC):
"""TCP server/client connection abstraction.
Main motivation of this class is to provide a buffer management
when reading and writing into the socket.
Implement the connection property abstract method to return
a socket connection object."""
def __init__(self, tag: int):
self.buffer: bytes = b''
self.closed: bool = False
self.tag: str = 'server' if tag == tcpConnectionTypes.SERVER else 'client'
@property
@abstractmethod
def connection(self) -> Union[ssl.SSLSocket, socket.socket]:
"""Must return the socket connection to use in this class."""
raise TcpConnectionUninitializedException() # pragma: no cover
def send(self, data: bytes) -> int:
"""Users must handle BrokenPipeError exceptions"""
return self.connection.send(data)
def recv(self, buffer_size: int = DEFAULT_BUFFER_SIZE) -> Optional[bytes]:
"""Users must handle socket.error exceptions"""
data: bytes = self.connection.recv(buffer_size)
if len(data) == 0:
return None
logger.debug(
'received %d bytes from %s' %
(len(data), self.tag))
# logger.info(data)
return data
def close(self) -> bool:
if not self.closed:
self.connection.close()
self.closed = True
return self.closed
def buffer_size(self) -> int:
return len(self.buffer)
def has_buffer(self) -> bool:
return self.buffer_size() > 0
def queue(self, data: bytes) -> int:
self.buffer += data
return len(data)
def flush(self) -> int:
"""Users must handle BrokenPipeError exceptions"""
if self.buffer_size() == 0:
return 0
sent: int = self.send(self.buffer)
# logger.info(self.buffer[:sent])
self.buffer = self.buffer[sent:]
logger.debug('flushed %d bytes to %s' % (sent, self.tag))
return sent
class TcpServerConnection(TcpConnection):
"""Establishes connection to upstream server."""
def __init__(self, host: str, port: int):
super().__init__(tcpConnectionTypes.SERVER)
self._conn: Optional[Union[ssl.SSLSocket, socket.socket]] = None
self.addr: Tuple[str, int] = (host, int(port))
@property
def connection(self) -> Union[ssl.SSLSocket, socket.socket]:
if self._conn is None:
raise TcpConnectionUninitializedException()
return | |
<gh_stars>10-100
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import Queue
import traceback
from Crypto . Cipher import AES
import ecdsa
import json
import commands
import copy
import chacha
import poly1305
from geopy . distance import vincenty
import curve25519
use_chacha = ( os . getenv ( "LISP_USE_CHACHA" ) != None )
use_poly = ( os . getenv ( "LISP_USE_POLY" ) != None )
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
lisp_print_rloc_probe_list = False
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
if 60 - 60: iIii1I11I1II1 / i1IIi * oO0o - I1ii11iIi11i + o0oOOo0O0Ooo
if 94 - 94: i1IIi % Oo0Ooo
if 68 - 68: Ii1I / O0
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
if 46 - 46: O0 * II111iiii / IiII * Oo0Ooo * iII111i . I11i
lisp_map_notify_queue = { }
lisp_map_servers_list = { }
lisp_ddt_map_requestQ = { }
lisp_db_list = [ ]
lisp_group_mapping_list = { }
lisp_map_resolvers_list = { }
lisp_rtr_list = { }
lisp_elp_list = { }
lisp_rle_list = { }
lisp_geo_list = { }
lisp_json_list = { }
lisp_myrlocs = [ None , None , None ]
lisp_mymacs = { }
if 62 - 62: i11iIiiIii - II111iiii % I1Ii111 - iIii1I11I1II1 . I1ii11iIi11i . II111iiii
if 61 - 61: oO0o / OoOoOO00 / iII111i * OoO0O00 . II111iiii
if 1 - 1: II111iiii - I1ii11iIi11i % i11iIiiIii + IiII . I1Ii111
if 55 - 55: iIii1I11I1II1 - I1IiiI . Ii1I * IiII * i1IIi / iIii1I11I1II1
if 79 - 79: oO0o + I1Ii111 . ooOoO0o * IiII % I11i . I1IiiI
lisp_myinterfaces = { }
lisp_iid_to_interface = { }
lisp_multi_tenant_interfaces = [ ]
if 94 - 94: iII111i * Ii1I / IiII . i1IIi * iII111i
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
if 47 - 47: i1IIi % i11iIiiIii
if 20 - 20: ooOoO0o * II111iiii
if 65 - 65: o0oOOo0O0Ooo * iIii1I11I1II1 * ooOoO0o
if 18 - 18: iIii1I11I1II1 / I11i + oO0o / Oo0Ooo - II111iiii - I11i
lisp_registered_count = 0
if 1 - 1: I11i - OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
if 13 - 13: OOooOOo * oO0o * I1IiiI
if 55 - 55: II111iiii
lisp_info_sources_by_address = { }
lisp_info_sources_by_nonce = { }
if 43 - 43: OoOoOO00 - i1IIi + I1Ii111 + Ii1I
if 17 - 17: o0oOOo0O0Ooo
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
lisp_crypto_keys_by_nonce = { }
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
lisp_data_plane_security = False
lisp_search_decap_keys = True
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
if 42 - 42: oO0o - i1IIi / i11iIiiIii + OOooOOo + OoO0O00
if 17 - 17: oO0o . Oo0Ooo . I1ii11iIi11i
lisp_crypto_ephem_port = None
if 3 - 3: OoOoOO00 . Oo0Ooo . I1IiiI / Ii1I
if 38 - 38: II111iiii % i11iIiiIii . ooOoO0o - OOooOOo + Ii1I
if 66 - 66: OoooooooOO * OoooooooOO . OOooOOo . i1IIi - OOooOOo
if 77 - 77: I11i - iIii1I11I1II1
lisp_pitr = False
if 82 - 82: i11iIiiIii . OOooOOo / Oo0Ooo * O0 % oO0o % iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 - Ii1I * OoO0O00 + o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
lisp_l2_overlay = False
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo
lisp_rloc_probing = False
lisp_rloc_probe_list = { }
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
lisp_register_all_rtrs = True
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
if 83 - 83: OoooooooOO
lisp_nonce_echoing = False
lisp_nonce_echo_list = { }
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
if 4 - 4: II111iiii / ooOoO0o . iII111i
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if 50 - 50: I1IiiI
lisp_nat_traversal = False
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
if 50 - 50: II111iiii - ooOoO0o * I1ii11iIi11i / I1Ii111 + o0oOOo0O0Ooo
if 88 - 88: Ii1I / I1Ii111 + iII111i - II111iiii / ooOoO0o - OoOoOO00
if 15 - 15: I1ii11iIi11i + OoOoOO00 - OoooooooOO / OOooOOo
if 58 - 58: i11iIiiIii % I11i
lisp_program_hardware = False
if 71 - 71: OOooOOo + ooOoO0o % i11iIiiIii + I1ii11iIi11i - IiII
if 88 - 88: OoOoOO00 - OoO0O00 % OOooOOo
if 16 - 16: I1IiiI * oO0o % IiII
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
if 44 - 44: oO0o
if 88 - 88: I1Ii111 % Ii1I . II111iiii
if 38 - 38: o0oOOo0O0Ooo
if 57 - 57: O0 / oO0o * I1Ii111 / OoOoOO00 . II111iiii
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
if 26 - 26: iII111i
if 91 - 91: OoO0O00 . I1ii11iIi11i + OoO0O00 - iII111i / OoooooooOO
if 39 - 39: I1ii11iIi11i / ooOoO0o - II111iiii
if 98 - 98: I1ii11iIi11i / I11i % oO0o . OoOoOO00
if 91 - 91: oO0o % Oo0Ooo
lisp_ipc_lock = None
if 64 - 64: I11i % iII111i - I1Ii111 - oO0o
if 31 - 31: I11i - II111iiii . I11i
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
lisp_default_iid = 0
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
if 68 - 68: OoO0O00 * OoooooooOO % O0 + OoO0O00 + ooOoO0o
lisp_ms_rtr_list = [ ]
if 4 - 4: ooOoO0o + O0 * OOooOOo
if 55 - 55: Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * oO0o - i11iIiiIii - Ii1I
if 25 - 25: I1ii11iIi11i
if 7 - 7: i1IIi / | |
import numpy as np
import networkx as nx
from autode.smiles import atom_types
from autode.log import logger
from autode.utils import log_time
from autode.atoms import Atom, AtomCollection
from autode.bonds import get_avg_bond_length
from autode.smiles.base import SMILESAtom, SMILESBond, SMILESStereoChem
from autode.smiles.angles import Dihedral, Dihedrals, Angle, Angles
from ade_dihedrals import rotate, closed_ring_coords
from ade_rb_opt import opt_rb_coords
from autode.exceptions import (SMILESBuildFailed,
FailedToSetRotationIdxs,
FailedToAdjustAngles)
class Builder(AtomCollection):
"""3D geometry builder::
Atoms: C, 4H H H
Bonds: 4 x C-H --> C
H H
"""
@property
def built(self):
"""Have all the atoms been shifted appropriately?
Returns:
(bool):
"""
return self.atoms is not None and len(self.queued_atoms) == 0
@property
def canonical_atoms(self):
"""Generate canonical autodE atoms from this set
Returns:
(list(autode.atoms.Atom)): Atoms
"""
atoms = []
for atom in self.atoms:
x, y, z = atom.coord
atoms.append(Atom(atom.label, x=x, y=y, z=z))
return atoms
@property
def canonical_atoms_at_origin(self):
"""Canonical set of autodE atoms all located at the origin
Returns:
(list(autode.atoms.Atom)): Atoms all with atom.coord = [0, 0, 0]
"""
return [Atom(atom.label) for atom in self.atoms]
@property
def built_atom_idxs(self):
"""Atom indexes that have been built
Returns:
(list(int)): Atom indexes
"""
return [i for i in range(self.n_atoms) if self.atoms[i].is_shifted]
@property
def non_bonded_idx_matrix(self):
"""
Generate a matrix of ones if atoms are non-bonded and zero if for
self pairs or they are bonded
Returns:
(np.ndarray): shape = (n_atoms, n_atoms)
"""
idxs = np.ones(shape=(self.n_atoms, self.n_atoms), dtype='i4')
np.fill_diagonal(idxs, 0) # Exclude self-repulsion
for bond in self.bonds:
idx_i, idx_j = bond
idxs[idx_i, idx_j] = idxs[idx_j, idx_i] = 0
# Do not include any atoms that have yet to be built
for i, atom in enumerate(self.atoms):
if not atom.is_shifted:
idxs[i, :] = idxs[:, i] = 0
return idxs
@property
def max_ring_n(self):
"""Maximum ring size in this molecule
Returns:
(int): Maximum ring size
"""
if self.rings_idxs is None or len(self.rings_idxs) == 0:
return 0
return max(len(idxs) for idxs in self.rings_idxs)
def _atom_is_d8(self, idx):
"""
Is an atom a d8 metal? Only consider a subset of the platinum group
elements
Arguments:
idx (int):
Returns:
(bool):
"""
atom = self.atoms[idx]
if atom.atomic_symbol not in ['Rh', 'Pd', 'Ir', 'Pt']:
return False
dn = atom.group - atom.charge # Initial number of d electrons
for bond in self.bonds.involving(idx):
# Only remove an electron if a ligand is singly bonded (X) and
# treat all double bonds as L2 ligands, rather than X2
if bond.order % 2 == 1:
dn -= 1
logger.info(f'{atom}, dn = {dn}')
return dn == 8
def _explicit_all_hydrogens(self):
"""Convert all implicit hydrogens to explicit ones"""
h_atoms = []
for idx, atom in enumerate(self.atoms):
if not hasattr(atom, 'n_hydrogens') or atom.n_hydrogens is None:
logger.warning(f'{atom} did not have a defined number of '
'hydrogens. Assuming 0')
atom.n_hydrogens = 0
for _ in range(atom.n_hydrogens):
h_atoms.append(SMILESAtom('H', n_hydrogens=0))
# Add the bond between the current atom and the new H
h_idx = self.n_atoms + len(h_atoms) - 1
self.bonds.append(SMILESBond(idx, h_idx, symbol='-'))
# zero the number of implicit hydrogens bonded to this atom now
# they are explicit
atom.n_hydrogens = 0
self.atoms += h_atoms
return
def _set_atom_types(self):
"""
Set the atom types for all atoms, where the atom type is determined
by the number of bonded atoms, and the 'hybridisation' as well as
the stereochemistry
"""
logger.info(f'Setting {self.n_atoms} atom types')
self.rings_idxs = nx.cycle_basis(self.graph)
logger.info(f'Have {len(self.rings_idxs)} ring(s)')
for i, atom in enumerate(self.atoms):
atom.coord = np.zeros(3)
atom.neighbours = list(self.graph.neighbors(i))
atom.in_ring = len(self._ring_idxs([i], return_empty=True)) > 0
if not isinstance(atom, SMILESAtom):
raise SMILESBuildFailed('Builder requires SMILESAtom-s')
if atom.n_bonded == 0:
# No type is needed for an isolated atom
continue
elif atom.n_bonded == 1: # e.g. H2, FCH3
atom.type = atom_types.TerminalAtom()
elif atom.n_bonded == 2: # e.g. OH2, SR2
if atom.group == 16:
atom.type = atom_types.BentAtom()
elif atom.group == 15: # e.g. H2C=NH
atom.type = atom_types.TrigonalAtom()
else: # e.g. AuR2
atom.type = atom_types.LinearAtom()
elif atom.n_bonded == 3: # e.g. NH3
if atom.group == 15:
atom.type = atom_types.TrigonalPyramidalAtom()
else: # e.g. BH3
atom.type = atom_types.TrigonalAtom()
elif atom.n_bonded == 4: # e.g. CH4
if atom.atomic_symbol == 'Xe': # e.g. XeF4
atom.type = atom_types.SquarePlanarAtom()
# Second row transition metals that are d8 should be sq planar
elif self._atom_is_d8(idx=i) and atom.period == 5:
atom.type = atom_types.SquarePlanarAtom()
elif atom.stereochem == SMILESStereoChem.TET_NORMAL:
atom.type = atom_types.TetrahedralNAtom()
elif atom.stereochem == SMILESStereoChem.TET_INVERTED:
atom.type = atom_types.TetrahedralIAtom()
else:
atom.type = atom_types.TetrahedralAtom()
elif atom.n_bonded == 5:
atom.type = atom_types.TrigonalBipyramidalAtom()
elif atom.n_bonded == 6:
atom.type = atom_types.OctahedralAtom()
elif atom.n_bonded == 7:
atom.type = atom_types.PentagonalBipyramidalAtom()
elif atom.n_bonded == 8:
atom.type = atom_types.SquareAntiprismAtom()
else:
raise NotImplementedError('Coordination numbers >8 are not'
'(yet) supported')
return None
def _ring_idxs(self, inc_idxs, return_empty=False):
"""Indexes of atoms in the ring containing this bond
Arguments:
inc_idxs (list(int)): List of atom indexes that need to be included
in the ring
Keyword Arguments:
return_empty (bool):
Returns:
(list(int)): Atom indexes in this ring if they can be found
Raises:
(autode.exceptions.SMILESBuildFailed): If there is no such ring
"""
try:
return next(idxs for idxs in self.rings_idxs
if all(idx in idxs for idx in inc_idxs))
except StopIteration:
if return_empty:
return []
raise SMILESBuildFailed(f'No ring containing {inc_idxs}')
def _ring_path(self, ring_bond):
"""
Find the path which traverses a ring closed by a ring bond
C2----C3
/ | --> 1, 2, 3, 4
C1 **** C4
^
ring bond
Args:
ring_bond (autode.smiles.SMILESBond):
Returns:
(nx.path_generator):
Raises:
(SMILESBuildFailed): If a suitable path is not found
"""
ring_idxs = self._ring_idxs(ring_bond)
paths = nx.shortest_simple_paths(self.graph,
source=ring_bond[0],
target=ring_bond[1])
for possible_path in paths:
# Can always have a path that traverses the ring bond (C1-C4 above)
if len(possible_path) == 2:
continue
# For multiple fused rings there may be other paths that could be
# traversed, so only take the one that has the appropriate idxs
if all(idx in ring_idxs for idx in possible_path):
return possible_path
raise SMILESBuildFailed('Could not find path in ring')
def _ring_dihedrals(self, ring_bond):
"""
Given a ring bond find all the rotatable dihedrals that can be adjusted
to close it with a reasonable bond distance
Arguments:
ring_bond (autode.smiles.SMILESBond):
Yields:
(iterator(autode.smiles.builder.Dihedral)):
Raises:
(autode.exceptions.SMILESBuildFailed): If dihedrals cannot be
located
"""
path = self._ring_path(ring_bond=ring_bond)
# The dihedrals are then all the 4 atom tuples in sequence
dihedral_idxs = [tuple(path[i:i + 4]) for i in range(len(path) - 3)]
# so only add the indexes where the bond (edge) order is one
for i, dihedral_idxs in enumerate(dihedral_idxs):
dihedral = Dihedral(dihedral_idxs)
# Optimum distance between the two middle atoms, used for
# determining if a bond exists thus a dihedral can be rotated
dihedral.mid_dist = self.bonds.first_involving(*dihedral.mid_idxs).r0
# If both atoms either side of this one are 'pi' atoms e.g. in a
# benzene ring, then the ideal angle must be 0 to close the ring
if all(self.atoms[idx].is_pi for idx in dihedral.mid_idxs):
dihedral.phi_ideal = 0.0
# Only yield single bonds, that can be rotated freely
if self.graph.get_edge_data(*dihedral.mid_idxs)['order'] == 1:
yield dihedral
def _reset_queued_atom_sites(self, other_idxs=None):
"""
When a dihedral rotation(s) is(are) performed the rotation is not
applied to the empty sites that are present in the queued atoms,
they therefore need to be reset
Keyword Arguments:
other_idxs (list | set | None): Other indexes that need to be reset
"""
for idx_i in set(self.queued_atoms
+ list(other_idxs if other_idxs is not None else [])):
logger.info(f'Resetting sites on atom {idx_i}')
atom = self.atoms[idx_i]
points = [self.atoms[idx].coord for idx in atom.neighbours]
# Resetting an atom onto two atoms can fail to apply the stereochem
# thus only set it onto one
if atom.has_stereochem and len(points) == 2:
points = points[:1]
atom.type.reset_onto(points, coord=atom.coord)
return None
@log_time(prefix='Closed ring in:', units='ms')
def _adjust_ring_dihedrals(self, ring_bond, dihedrals):
"""Outsource the ring closure to an external function"""
logger.info('Adjusting ring dihedrals to close the ring')
coords = closed_ring_coords(py_coords=self.coordinates,
py_curr_angles=dihedrals.values(self.atoms),
py_ideal_angles=dihedrals.ideal_angles,
py_axes=dihedrals.axes,
py_rot_idxs=dihedrals.rot_idxs,
py_origins=dihedrals.origins,
py_rep_idxs=self.non_bonded_idx_matrix,
py_close_idxs=np.array(tuple(ring_bond),
dtype='i4'),
py_r0=ring_bond.r0)
self.coordinates = coords
return
def _adjust_ring_angles(self, ring_bond):
"""Shift angles in a ring to close e.g. in a cyclopropane the 109º
angles between carbons are much to large to generate a sensible
geometry no matter the dihedral angles, so compress the C-C-C angles
to 60º to close the ring e.g::
C2---- C3 C2
/ --> / |
C1 C1 ---C3
Arguments:
ring_bond (autode.smiles.base.RingBond):
"""
path = self._ring_path(ring_bond=ring_bond)
ring_n | |
big image or tensor 'a', shave it symmetrically into b's shape"""
# If dealing with a tensor should shave the 3rd & 4th dimension, o.w. the 1st and 2nd
is_tensor = (type(a) == torch.Tensor)
r = 2 if is_tensor else 0
c = 3 if is_tensor else 1
# Calculate the shaving of each dimension
shave_r, shave_c = max(0, a.shape[r] - b.shape[r]), max(0, a.shape[c] - b.shape[c])
return a[:, :, shave_r // 2:a.shape[r] - shave_r // 2 - shave_r % 2, shave_c // 2:a.shape[c] - shave_c // 2 - shave_c % 2] if is_tensor \
else a[shave_r // 2:a.shape[r] - shave_r // 2 - shave_r % 2, shave_c // 2:a.shape[c] - shave_c // 2 - shave_c % 2]
def map2tensor(gray_map):
"""Move gray maps to GPU, no normalization is done"""
return torch.FloatTensor(gray_map).unsqueeze(0).unsqueeze(0).cuda()
def resize_tensor_w_kernel(im_t, k, sf=None):
"""Convolves a tensor with a given bicubic kernel according to scale factor"""
# Expand dimensions to fit convolution: [out_channels, in_channels, k_height, k_width]
k = k.expand(im_t.shape[1], im_t.shape[1], k.shape[0], k.shape[1])
# Calculate padding
padding = (k.shape[-1] - 1) // 2
return F.conv2d(im_t, k, stride=round(1 / sf), padding=padding)
def create_penalty_mask(k_size, penalty_scale):
"""Generate a mask of weights penalizing values close to the boundaries"""
center_size = k_size // 2 + k_size % 2
mask = create_gaussian(size=k_size, sigma1=k_size, is_tensor=False)
mask = 1 - mask / np.max(mask)
margin = (k_size - center_size) // 2 - 1
mask[margin:-margin, margin:-margin] = 0
return penalty_scale * mask
def create_gaussian(size, sigma1, sigma2=-1, is_tensor=False):
"""Return a Gaussian"""
func1 = [np.exp(-z ** 2 / (2 * sigma1 ** 2)) / np.sqrt(2 * np.pi * sigma1 ** 2) for z in range(-size // 2 + 1, size // 2 + 1)]
func2 = func1 if sigma2 == -1 else [np.exp(-z ** 2 / (2 * sigma2 ** 2)) / np.sqrt(2 * np.pi * sigma2 ** 2) for z in range(-size // 2 + 1, size // 2 + 1)]
return torch.FloatTensor(np.outer(func1, func2)).cuda() if is_tensor else np.outer(func1, func2)
def calc_curr_k(G, G_kernel_size):
"""given a generator network, the function calculates the kernel it is imitating"""
delta = torch.Tensor([1.]).unsqueeze(0).unsqueeze(-1).unsqueeze(-1).cuda()
for ind, w in enumerate(G.parameters()):
curr_k = F.conv2d(delta, w, padding=G_kernel_size - 1) if ind == 0 else F.conv2d(curr_k, w)
curr_k = curr_k.squeeze().flip([0, 1])
return curr_k
####################
# ffmpeg
####################
def encode_video_with_ffmpeg(src_path, dst_path, crf, fps=25, start_number=1, vframes=1000):
command = 'ffmpeg -r {} -f image2 -start_number {} -i {} -vframes {} -vcodec libx265 ' \
'-vf fps={} -crf {} -pix_fmt yuv420p -an {} -y &>/dev/null'\
.format(fps, start_number, src_path, vframes, fps, crf, dst_path)
print('doing... ' + command)
os.system(command)
def extract_frames_with_ffmpeg(video_path, image_path):
cap = cv2.VideoCapture(video_path)
assert cap.isOpened(), 'cannot open video {}'.format(video_path)
frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cap.release()
process = (
ffmpeg
.input(video_path)
.output('pipe:', format='rawvideo', pix_fmt='bgr24')
.run_async(pipe_stdout=True)
)
k = 0
while True:
k += 1
in_bytes = process.stdout.read(width * height * 3)
if not in_bytes:
break
frame = np.frombuffer(in_bytes, np.uint8).reshape([height, width, 3])
cv2.imwrite(osp.join(image_path, '{:05d}.png'.format(k)), frame)
process.wait()
print('total {} frames'.format(k - 1))
####################
# image decomposition
####################
def gauss_kernel(size=5, device=torch.device('cpu'), channels=3):
kernel = torch.tensor([[1., 4., 6., 4., 1.],
[4., 16., 24., 16., 4.],
[6., 24., 36., 24., 6.],
[4., 16., 24., 16., 4.],
[1., 4., 6., 4., 1.]])
kernel /= 256.
kernel = kernel.repeat(channels, 1, 1, 1)
kernel = kernel.to(device)
return kernel
def conv_gauss(img, kernel):
img = torch.nn.functional.pad(img, (2, 2, 2, 2), mode='reflect')
out = torch.nn.functional.conv2d(img, kernel, groups=img.shape[1])
return out
def downsample(x):
return x[:, :, fdf8:f53e:61e4::18, ::2]
def upsample(x):
x_up = torch.zeros(x.shape[0], x.shape[1], x.shape[2] * 2, x.shape[3] * 2, device=x.device)
x_up[:, :, fdf8:f53e:61e4::18, ::2] = x
return conv_gauss(x_up, 4 * gauss_kernel(channels=x.shape[1], device=x.device))
def lap_pyramid(img, kernel, max_levels=3):
current = img
pyr = []
for level in range(max_levels):
filtered = conv_gauss(current, kernel)
down = downsample(filtered)
up = upsample(down)
diff = current - up
pyr.append(diff)
current = down
return pyr
def gau_pyramid(img, kernel, max_levels=3):
current = img
pyr = [current]
for level in range(max_levels - 1):
filtered = conv_gauss(current, kernel)
current = downsample(filtered)
pyr.append(current)
return pyr
def laplacian_pyramid(img, kernel, max_levels=3):
assert max_levels > 1
current = img
pyr = []
for level in range(max_levels - 1):
filtered = conv_gauss(current, kernel)
down = downsample(filtered)
up = upsample(down)
diff = current - up
pyr.append(diff)
current = down
pyr.append(down)
return pyr
####################
# blur kernel and PCA
####################
def _is_pil_image(img):
if accimage is not None:
return isinstance(img, (Image.Image, accimage.Image))
else:
return isinstance(img, Image.Image)
def _is_tensor_image(img):
return torch.is_tensor(img) and img.ndimension() == 3
def _is_numpy_image(img):
return isinstance(img, np.ndarray) and (img.ndim in {2, 3})
def to_pil_image(pic, mode=None):
"""Convert a tensor or an ndarray to PIL Image.
See :class:`~torchvision.transforms.ToPIlImage` for more details.
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
.. _PIL.Image mode: http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#modes
Returns:
PIL Image: Image converted to PIL Image.
"""
if not(_is_numpy_image(pic) or _is_tensor_image(pic)):
raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))
npimg = pic
if isinstance(pic, torch.FloatTensor):
pic = pic.mul(255).byte()
if torch.is_tensor(pic):
npimg = np.transpose(pic.numpy(), (1, 2, 0))
if not isinstance(npimg, np.ndarray):
raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
'not {}'.format(type(npimg)))
if npimg.shape[2] == 1:
expected_mode = None
npimg = npimg[:, :, 0]
if npimg.dtype == np.uint8:
expected_mode = 'L'
if npimg.dtype == np.int16:
expected_mode = 'I;16'
if npimg.dtype == np.int32:
expected_mode = 'I'
elif npimg.dtype == np.float32:
expected_mode = 'F'
if mode is not None and mode != expected_mode:
raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}"
.format(mode, np.dtype, expected_mode))
mode = expected_mode
elif npimg.shape[2] == 4:
permitted_4_channel_modes = ['RGBA', 'CMYK']
if mode is not None and mode not in permitted_4_channel_modes:
raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGBA'
else:
permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']
if mode is not None and mode not in permitted_3_channel_modes:
raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGB'
if mode is None:
raise TypeError('Input type {} is not supported'.format(npimg.dtype))
return Image.fromarray(npimg, mode=mode)
def to_tensor(pic):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
See ``ToTensor`` for more details.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not(_is_pil_image(pic) or _is_numpy_image(pic)):
raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
if isinstance(pic, np.ndarray):
# handle numpy array
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
return img.float().div(255)
if accimage is not None and isinstance(pic, accimage.Image):
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return torch.from_numpy(nppic)
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
def resize(img, size, interpolation=Image.BILINEAR):
"""Resize the input PIL Image to the given size.
Args:
img (PIL Image): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
Returns:
PIL Image: Resized image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if not (isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)):
raise TypeError('Got inappropriate size arg: {}'.format(size))
if isinstance(size, int):
w, h = img.size
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh | |
else:
# route is not straight and will be split.
# put old route in list and start a new route
if len(newroute) >= n_edges_min:
# route should contain at least n_edges_min edges
newroutes.append(newroute)
routecosts.append(self.get_routecost(newroute))
# attention, we need the last edge of the old route
# in order to include the connector between both routes
newroute = [newroute[-1], id_edge, ]
else:
# route is using an edge outside the TLS
# even thou the route may return into the TLS
# it will be by no means a straight route worth following
# so better eliminate and return nothing
return [], []
if len(newroute) >= n_edges_min:
# route should contain at least n_edges_min edges
newroutes.append(newroute)
routecosts.append(self.get_routecost(newroute))
print ' ind_route', ind_route, len(route), 'newroutes', newroutes, 'routecosts', routecosts
return newroutes, routecosts
def follow_major_route_backward(self, id_edge_next, shape):
print 'follow_major_route_backward', id_edge_next
net = self.parent
edges = net.edges
ids_edges_major = self.ids_edges_major
route = []
ids_edges_major.add(id_edge_next)
angle_max = 45.0/180*np.pi
p01 = shape[0][:2]
p11 = shape[1][:2]
dir1 = p11-p01
#v1 = np.concatenate(((x2-x1).reshape(1,-1),(y2-y1).reshape(1,-1)),0)
#v2 = np.concatenate(((p[0]-x1).reshape(1,-1),(p[1]-y1).reshape(1,-1)),0)
#angles = get_diff_angle_clockwise(v1,v2)
is_follow = True
while is_follow:
ids_edge = edges.get_incoming(id_edge_next)
# print ' id_edge_next',id_edge_next,'=>',ids_edge
ids_edge_eligible = []
angles_eligible = []
shapes_eligible = []
nums_lane_eligible = []
for id_edge, id_sumo, type_spread, shape, \
n_lane, ids_lane, priority, id_fromnode, id_tonode, speed_max\
in zip(ids_edge,
edges.ids_sumo[ids_edge],
edges.types_spread[ids_edge],
edges.shapes[ids_edge],
edges.nums_lanes[ids_edge],
edges.ids_lanes[ids_edge],
edges.priorities[ids_edge],
edges.ids_fromnode[ids_edge],
edges.ids_tonode[ids_edge],
edges.speeds_max[ids_edge],
):
# print ' check next',id_edge,'major',self.is_major_road( priority,n_lane,speed_max),'not used',(id_edge not in ids_edges_major)
if self.is_major_road(priority, n_lane, speed_max) & (id_edge not in ids_edges_major):
p02 = shape[-2][:2]
p12 = shape[-1][:2]
dir2 = p12-p02
angle = get_diff_angle_clockwise(dir1, dir2)
angle_abs = min(angle, 2*np.pi-angle)
edgedirection2 = shape[:2]
# print ' |angle|=%d'%(angle_abs/np.pi*180),angle/np.pi*180,dir1,dir2
if angle_abs < angle_max:
if id_edge not in self.ids_edges_major:
# print ' choose',id_edge
ids_edge_eligible.append(id_edge)
angles_eligible.append(angle_abs)
shapes_eligible.append(shape)
nums_lane_eligible.append(n_lane)
n_eligible = len(ids_edge_eligible)
if n_eligible == 0:
is_follow = False
else:
if n_eligible == 1:
id_edge_next = ids_edge_eligible[0]
shape_next = shapes_eligible[0]
elif n_eligible > 1:
# search edge with maximum number of lanes
n_lane_max = max(nums_lane_eligible)
inds_lane = np.flatnonzero(np.array(nums_lane_eligible) == n_lane_max)
if len(inds_lane) == 1:
# unique maximum
id_edge_next = ids_edge_eligible[inds_lane[0]]
shape_next = shapes_eligible[inds_lane[0]]
else:
# multiple edges have maximum number of lanes
angles_eligible = np.array(angles_eligible, dtype=np.float32)
ind_angle = np.argmin(angles_eligible[inds_lane])
id_edge_next = np.array(ids_edge_eligible, dtype=np.int32)[inds_lane][ind_angle]
shape_next = np.array(shapes_eligible, dtype=np.object)[inds_lane][inds_lane[0]]
p01 = shape_next[0][:2]
p11 = shape_next[1][:2]
dir1 = p11-p01
# print ' **winner:',id_edge_next,'dir1=',dir1
route.append(id_edge_next)
ids_edges_major.add(id_edge_next)
is_follow = True
# print ' backward route:',route
# print
return route
def follow_major_route_foreward(self, id_edge_next, shape):
print 'follow_major_route_foreward', id_edge_next
net = self.parent
edges = net.edges
ids_edges_major = self.ids_edges_major
route = []
ids_edges_major.add(id_edge_next)
angle_max = 45.0/180*np.pi
p01 = shape[-2][:2]
p11 = shape[-1][:2]
dir1 = p11-p01
#v1 = np.concatenate(((x2-x1).reshape(1,-1),(y2-y1).reshape(1,-1)),0)
#v2 = np.concatenate(((p[0]-x1).reshape(1,-1),(p[1]-y1).reshape(1,-1)),0)
#angles = get_diff_angle_clockwise(v1,v2)
is_follow = True
while is_follow:
ids_edge = edges.get_outgoing(id_edge_next)
# print ' id_edge_next',id_edge_next,'=>',ids_edge
ids_edge_eligible = []
angles_eligible = []
shapes_eligible = []
nums_lane_eligible = []
for id_edge, id_sumo, type_spread, shape, \
n_lane, ids_lane, priority, id_fromnode, id_tonode, speed_max\
in zip(ids_edge,
edges.ids_sumo[ids_edge],
edges.types_spread[ids_edge],
edges.shapes[ids_edge],
edges.nums_lanes[ids_edge],
edges.ids_lanes[ids_edge],
edges.priorities[ids_edge],
edges.ids_fromnode[ids_edge],
edges.ids_tonode[ids_edge],
edges.speeds_max[ids_edge],
):
# print ' check next',id_edge,'major',self.is_major_road( priority,n_lane,speed_max),'not used',(id_edge not in ids_edges_major)
if self.is_major_road(priority, n_lane, speed_max) & (id_edge not in ids_edges_major):
p02 = shape[0][:2]
p12 = shape[1][:2]
dir2 = p12-p02
angle = get_diff_angle_clockwise(dir1, dir2)
angle_abs = min(angle, 2*np.pi-angle)
edgedirection2 = shape[:2]
print ' |angle|=%d' % (angle_abs/np.pi*180), angle/np.pi*180, dir1, dir2
if angle_abs < angle_max:
if id_edge not in self.ids_edges_major:
# print ' choose',id_edge
ids_edge_eligible.append(id_edge)
angles_eligible.append(angle_abs)
shapes_eligible.append(shape)
nums_lane_eligible.append(n_lane)
n_eligible = len(ids_edge_eligible)
if n_eligible == 0:
is_follow = False
else:
if n_eligible == 1:
id_edge_next = ids_edge_eligible[0]
shape_next = shapes_eligible[0]
elif n_eligible > 1:
# search edge with maximum number of lanes
n_lane_max = max(nums_lane_eligible)
inds_lane = np.flatnonzero(np.array(nums_lane_eligible) == n_lane_max)
# print ' nums_lane_eligible,inds_lane',nums_lane_eligible,inds_lane,type(inds_lane),inds_lane.dtype
if len(inds_lane) == 1:
# unique maximum
id_edge_next = ids_edge_eligible[inds_lane[0]]
shape_next = shapes_eligible[inds_lane[0]]
else:
# multiple edges have maximum number of lanes
angles_eligible = np.array(angles_eligible, dtype=np.float32)
# print ' angles_eligible',angles_eligible,angles_eligible[inds_lane]
ind_angle = np.argmin(angles_eligible[inds_lane])
# print ' ind_angle',ind_angle,ids_edge_eligible[inds_lane].shape
id_edge_next = np.array(ids_edge_eligible, dtype=np.int32)[inds_lane][ind_angle]
shape_next = np.array(shapes_eligible, dtype=np.object)[inds_lane][inds_lane[0]]
p01 = shape_next[-2][:2]
p11 = shape_next[-1][:2]
dir1 = p11-p01
print ' **winner:', id_edge_next, 'dir1=', dir1
route.append(id_edge_next)
ids_edges_major.add(id_edge_next)
is_follow = True
# print ' foreward route:',route
# print
return route
class BikenetworkCompleter(Process):
def __init__(self, net, logger=None, **kwargs):
print 'Bikenetworkcompleter.__init__'
self._init_common('bikenetworkcompleter',
parent=net,
name='Bike network completer',
logger=logger,
info='Modifies the current network as to allow a higher permeability for bicycles.',
)
attrsman = self.set_attrsman(cm.Attrsman(self))
self.speed_max_bike = attrsman.add(cm.AttrConf('speed_max_bike', kwargs.get('speed_max_bike', 20/3.6),
groupnames=['options'],
perm='rw',
name='Bike speed limit',
unit='m/s',
info='General speed limit applied to edges reserved for bikes only.',
))
self.is_bike_on_ped = attrsman.add(cm.AttrConf('is_bike_on_ped', kwargs.get('is_bike_on_ped', False),
groupnames=['options'],
perm='rw',
name='Bikes on pedestrain ways',
info='If true, bikeways are also given access on links where only pedestrians are allowed. Furthermore an opposite edge is created if pedestrian link in one-way.',
))
self.is_bike_opp = attrsman.add(cm.AttrConf('is_bike_opp', kwargs.get('is_bike_opp', False),
groupnames=['options'],
perm='rw',
name='Bikes in opposite direction',
info='If true, bikeways are created in opposite direction of each one-way edges.',
))
self.width_bikelane_opp = attrsman.add(cm.AttrConf('width_bikelane_opp', kwargs.get('width_bikelane_opp', 0.9),
groupnames=['options'],
perm='rw',
name='Bikelane width opp. dir',
unit='m/s',
info='Width for created bike lines into the opposite direction of one-way edges.',
))
self.speed_max_bike_opp = attrsman.add(cm.AttrConf('speed_max_bike_opp', kwargs.get('speed_max_bike_opp', 8/3.6),
groupnames=['options'],
perm='rw',
name='Bike speed limit in opp. dir',
unit='m/s',
info='General speed limit applied to edges reserved for bikes only and which go into the opposite direction of one-way edges.',
))
self.priority_max = attrsman.add(cm.AttrConf('priority_max', kwargs.get('priority_max', 5),
groupnames=['options'],
perm='rw',
name='Max priority',
info='Operate only on edges up to this level of priority (1 is lowest 10 is highest).',
))
self.n_lanes_max = attrsman.add(cm.AttrConf('n_lanes_max', kwargs.get('n_lanes_max', 3),
groupnames=['options'],
perm='rw',
name='Max lanes',
info='Operate only on edges up to this number of lanes. Note that footpath is also a lane.',
))
self.id_mode_bike = MODES["bicycle"]
self.id_mode_ped = MODES["pedestrian"]
#self.id_mode_ped = MODES["delivery"]
self.ids_modes_tocomplete = set([MODES["pedestrian"], MODES["delivery"], MODES["bus"]])
def do(self):
print 'BikenetworkCompleter.do'
edges = self.parent.edges
nodes = self.parent.nodes
lanes = self.parent.lanes
connections = self.parent.connections
ids_edge = edges.get_ids()
allow_cycloped = [self.id_mode_bike, self.id_mode_ped]
ids_edge_update = []
for id_edge, id_sumo, type_spread, shape, \
n_lanes, ids_lane, priority, id_fromnode, id_tonode\
in zip(ids_edge,
edges.ids_sumo[ids_edge],
edges.types_spread[ids_edge],
edges.shapes[ids_edge],
edges.nums_lanes[ids_edge],
edges.ids_lanes[ids_edge],
edges.priorities[ids_edge],
edges.ids_fromnode[ids_edge],
edges.ids_tonode[ids_edge],
):
if (n_lanes <= self.n_lanes_max) & (priority <= self.priority_max):
# a footpath ha been made accessible for bikes
# check if footpath is a on-way
ids_incoming = nodes.ids_incoming[id_fromnode]
ids_outgoing = nodes.ids_outgoing[id_tonode]
if (ids_incoming is None) | (ids_outgoing is None):
is_oneway = True
else:
is_oneway = set(ids_incoming).isdisjoint(ids_outgoing)
# print ' check:id_edge=',id_edge,'ids_lane=',ids_lane,'is_oneway',is_oneway
if n_lanes == 1:
id_lane = ids_lane[0]
is_bikeaccess, is_bikeonlyaccess = self._detect_bikeaccess(id_lane, lanes)
# print ' is_bikeaccess',is_bikeaccess, is_bikeonlyaccess
if (self.is_bike_on_ped) & (not is_bikeaccess):
ids_modes_allow, lanewidths = self._make_bike_on_ped(id_lane, lanes)
# print ' ids_modes_allow, lanewidths',ids_modes_allow, lanewidths
# print ' ids_incoming',nodes.ids_incoming[id_fromnode]
# print ' ids_outgoing',nodes.ids_outgoing[id_tonode]
if ids_modes_allow is not None:
ids_edge_update.append(id_edge)
if is_oneway: # slow: edges.is_oneway(id_edge):
# print ' add opposite edge with same properties ids_modes_allow',ids_modes_allow
edges.types_spread[id_edge] = 0 # right spread
#edges.widths[id_edge] = 0.5*lanewidths
lanes.widths[id_lane] = 0.5*lanewidths
id_edge_opp = edges.make(id_fromnode=id_tonode,
id_tonode=id_fromnode,
id_sumo='-'+id_sumo,
type_edge='',
num_lanes=1,
speed_max=self.speed_max_bike,
priority=priority,
#length = 0.0,
shape=shape[::-1],
type_spread='right',
#name = '',
#offset_end = 0.0,
width_lanes_default=0.5*lanewidths,
width_sidewalk=-1,
)
id_lane_opp = lanes.make(id_edge=id_edge_opp,
index=0,
width=0.5*lanewidths,
speed_max=self.speed_max_bike,
ids_modes_allow=ids_modes_allow)
edges.ids_lanes[id_edge_opp] = [id_lane_opp]
ids_edge_update.append(id_edge_opp)
is_oneway = False # avoid producing another
if self.is_bike_opp & is_oneway & (not is_bikeonlyaccess):
# create opposite lane with narrow bikelane
# but not for bike-only lanes
# print ' add opposite edge'
edges.types_spread[id_edge] = 0 # right spread
edgewidth = edges.widths[id_edge]-self.width_bikelane_opp
edges.widths[id_edge] = edgewidth
lanes.widths[id_lane] = edgewidth
id_edge_opp = edges.make(id_fromnode=id_tonode,
id_tonode=id_fromnode,
id_sumo='-'+id_sumo,
type_edge='',
num_lanes=1,
speed_max=self.speed_max_bike_opp,
priority=0, # give lowest priority...it's illegal!!
#length = 0.0,
shape=shape[::-1],
type_spread='right',
#name = '',
#offset_end = 0.0,
width_lanes_default=self.width_bikelane_opp,
width_sidewalk=-1,
)
id_lane_opp = lanes.make(id_edge=id_edge_opp,
index=0,
width=self.width_bikelane_opp,
speed_max=self.speed_max_bike_opp,
ids_modes_allow=allow_cycloped,
)
edges.ids_lanes[id_edge_opp] = [id_lane_opp]
ids_edge_update.append(id_edge_opp)
elif n_lanes in [2, 3]:
is_opp = False
if self.is_bike_opp & is_oneway:
if n_lanes == 2:
# only if rightmost is footpath
is_opp = self._detect_pedonlyaccess(ids_lane[0], lanes)
elif n_lanes == 3:
# only if footpath left and right
is_opp = self._detect_pedonlyaccess(ids_lane[0], lanes)\
& self._detect_pedonlyaccess(ids_lane[2], lanes)
if is_opp:
# print ' add opposite edge'
edges.types_spread[id_edge] = 0 # right spread
edgewidth = edges.widths[id_edge]-self.width_bikelane_opp
#edges.widths[id_edge] = edgewidth
#lanes.widths[id_lane[-1]] = edgewidth
id_edge_opp = edges.make(id_fromnode=id_tonode,
id_tonode=id_fromnode,
id_sumo='-'+id_sumo,
type_edge='',
num_lanes=1,
speed_max=self.speed_max_bike_opp,
priority=0, # give lowest priority...it's illegal!!
#length = 0.0,
shape=shape[::-1],
type_spread='right',
#name = '',
#offset_end = 0.0,
| |
a domain and //
assert mep_tags.domain('https://docs.python.org/3/library/') == 'python'
assert mep_tags.domain('//www.cwi.nl:80/%7Eguido/Python.html') == 'cwi'
# returns None on local URLs or those missing //
assert mep_tags.domain('www.cwi.nl/%7Eguido/Python.html') is None
assert mep_tags.domain('help/Python.html') is None
# returns None on garbage
assert mep_tags.domain('oops') is None
assert mep_tags.domain(2) is None
assert mep_tags.domain(None) is None
def test_iiif_image():
myimg = IIIFImageClient('http://image.server/path/', 'myimgid')
# check expected behavior
assert str(mep_tags.iiif_image(myimg, 'size:width=250')) == \
str(myimg.size(width=250))
assert str(mep_tags.iiif_image(myimg, 'size:width=250,height=300')) == \
str(myimg.size(width=250, height=300))
assert str(mep_tags.iiif_image(myimg, 'format:png')) == \
str(myimg.format('png'))
# check that errors don't raise exceptions
assert mep_tags.iiif_image(myimg, 'bogus') == ''
assert mep_tags.iiif_image(myimg, 'size:bogus') == ''
assert mep_tags.iiif_image(myimg, 'size:bogus=1') == ''
@pytest.mark.django_db
def test_partialdate_filter():
# None should return None
assert mep_tags.partialdate(None, 'c') is None
# unset date should return None
acct = Account.objects.create()
evt = Event.objects.create(account=acct)
assert mep_tags.partialdate(evt.partial_start_date, 'c') is None
# test with ap date format as default
with override_settings(DATE_FORMAT='N j, Y'):
# year only
evt.partial_start_date = '1934'
assert mep_tags.partialdate(evt.partial_start_date) == '1934'
# year and month
evt.partial_start_date = '1934-02'
assert mep_tags.partialdate(evt.partial_start_date) == 'Feb. 1934'
# month and day
evt.partial_start_date = '--03-06'
assert mep_tags.partialdate(evt.partial_start_date) == 'March 6'
# full precision
evt.partial_start_date = '1934-11-06'
assert mep_tags.partialdate(evt.partial_start_date) == 'Nov. 6, 1934'
# partial precision with trailing punctuation in the date
evt.partial_start_date = '--11-26'
assert mep_tags.partialdate(evt.partial_start_date, 'j N') == '26 Nov.'
# check a different format
evt.partial_start_date = '--11-26'
assert mep_tags.partialdate(evt.partial_start_date, 'Ymd') == '1126'
evt.partial_start_date = '1932-11'
assert mep_tags.partialdate(evt.partial_start_date, 'Ymd') == '193211'
evt.partial_start_date = '1932'
assert mep_tags.partialdate(evt.partial_start_date, 'Ymd') == '1932'
# check week format
evt.partial_start_date = '1922-01-06'
assert mep_tags.partialdate(evt.partial_start_date, 'W y') == '1 22'
evt.partial_start_date = '--01-06'
assert mep_tags.partialdate(evt.partial_start_date, 'W y') is None
# handle error in parsing date
assert mep_tags.partialdate('foobar', 'Y-m-d') is None
def test_querystring_remove():
# single value by key
qs = mep_tags.querystring_remove(QueryDict('a=1&b=2'), 'a')
assert 'a' not in qs
assert qs['b'] == '2'
# multiple values by key
qs = mep_tags.querystring_remove(QueryDict('a=1&b=2'), 'a', 'b')
assert not qs # empty string
# one of a multivalue
qs = mep_tags.querystring_remove(QueryDict('a=1&a=2&a=3'), a='2')
assert qs.urlencode() == 'a=1&a=3'
def test_querystring_minus():
querystring = QueryDict('a=1&b=2&c=3')
context = {'request': Mock(GET=querystring)}
qs = mep_tags.querystring_minus(context, 'a', 'c')
assert qs == QueryDict('b=2')
qs = mep_tags.querystring_minus(context, 'a', 'b', 'c')
assert qs == QueryDict()
def test_querystring_only():
querystring = QueryDict('a=1&b=2&c=3')
context = {'request': Mock(GET=querystring)}
qs = mep_tags.querystring_only(context, 'a', 'c')
assert qs == QueryDict('a=1&c=3')
qs = mep_tags.querystring_only(context, 'b')
assert qs == QueryDict('b=2')
def test_formfield_selected_filter():
form = MemberSearchForm(data={
'has_card': 1,
'membership_dates_0': 1920,
'birth_year_1': 1950,
'gender': ['Female', 'Male'],
})
form.set_choices_from_facets(
{'gender': OrderedDict([('Female', 0), ('Male', 0)])})
querystring = QueryDict('has_card=1&page=2&sort=relevance&query=stein')
context = {'request': Mock(GET=querystring)}
# boolean field
link = mep_tags.formfield_selected_filter(context, form['has_card'])
# still on python 3.5, can't assume order doesn't change
# assert link == '<a href="?sort=relevance&query=stein">Card</a>'
assert link.startswith('<a data-input="id_has_card" href="?')
assert link.endswith('">Card</a>')
for query_param in ['sort=relevance', 'query=stein']:
assert query_param in link
assert 'has_card=1' not in link
# range field - first date only
querystring = QueryDict('has_card=1&page=2&sort=relevance&query=stein' +
'&membership_dates_0=1920&membership_dates_1=test')
context = {'request': Mock(GET=querystring)}
link = mep_tags.formfield_selected_filter(context,
form['membership_dates'])
assert "Membership Years 1920 – " in link
for query_param in ['sort=relevance', 'query=stein', 'has_card=1']:
assert query_param in link
for membership_param in ['membership_dates_0', 'membership_dates_1']:
assert membership_param not in link
# assert 'href="?has_card=1&sort=relevance&query=stein"' in link
# range field - second date only
querystring = QueryDict('query=stein&birth_year_0=&birth_year_1=1950')
context = {'request': Mock(GET=querystring)}
link = mep_tags.formfield_selected_filter(context,
form['birth_year'])
assert "Birth Year – 1950" in link
assert 'href="?query=stein"' in link
for birth_param in ['birth_year_0', 'birth_year_1']:
assert birth_param not in link
# facet choice field with multiple values
querystring = QueryDict('query=stein&gender=Female&gender=Male')
context = {'request': Mock(GET=querystring)}
link = mep_tags.formfield_selected_filter(context,
form['gender'])
# generates two links; each preserves the *other* filter
# NOTE: [^>]* pattern is to ignore data-input attribute
assert re.search(r'<a[^>]*href="\?[^"]*gender=Male[^"]*">Female</a>', link)
assert re.search(r'<a[^>]*href="\?[^"]*gender=Female[^"]*">Male</a>', link)
assert not re.search(r'<a[^>]*href="\?[^"]*gender=Female[^"]*">Female</a>',
link)
assert not re.search(r'<a[^>]*href="\?[^"]*gender=Male[^"]*">Male</a>',
link)
# assert '<a href="?query=stein&gender=Female">Male</a>' in link
# don't blow up on invalid facet
form = MemberSearchForm(data={
'nationality': 'foobar'
})
querystring = QueryDict('query=stein&nationality=foobar')
context = {'request': Mock(GET=querystring)}
# no facets set on the form - should be an empty link
assert not mep_tags.formfield_selected_filter(context,
form['nationality'])
form.set_choices_from_facets(
{'nationality': OrderedDict([('Argentina', 0), ('Chile', 0)])})
# form has an invalid link - should be an empty link
assert not mep_tags.formfield_selected_filter(context,
form['nationality'])
class TestCheckboxFieldset(TestCase):
def test_get_context(self):
checkbox_fieldset = CheckboxFieldset()
checkbox_fieldset.legend = 'Test Legend'
checkbox_fieldset.facet_counts = {'value': 10}
context = checkbox_fieldset.get_context('a name', ['a', 'b', 'c'], {})
assert context['widget']['legend'] == 'Test Legend'
assert context['facet_counts'] == checkbox_fieldset.facet_counts
def test_render(self):
# make sure that legend is rendered based on a custom property
checkbox_fieldset = CheckboxFieldset()
# set legend and id for test purposes
checkbox_fieldset.legend = 'Foo'
checkbox_fieldset.attrs['id'] = 'widget_id'
checkbox_fieldset.optgroups = Mock()
# mock a substitute for the return value of optgroups
# The reasons for this are two fold:
# 1) The logic of how widgets are populated is fairly convoluted, and
# we're only checking that the template does the right thing here.
# 2) optgroups is a function and needs a mock to supply the return.
checkbox_fieldset.optgroups.return_value = [
(
None,
[
{
'label': 'A',
# ensure that checked value is respected
# id is set
# and value and label are not synonymous
'attrs': {'checked': True, 'id': 'id_for_0'},
'value': 'a'
},
{
'label': 'B',
'attrs': {'id': 'id_for_1'},
'value': 'b'
}
],
0
)
]
# first arg sets the name attribute, other is unused after optgroup
# override
out = checkbox_fieldset.render('gender', 'bar')
# legend should be upper-cased by default
expected_output = '''
<fieldset id="widget_id" tabindex="0">
<legend>Foo</legend>
<ul class="choices">
<li class="choice">
<input type="checkbox" value="a" id="id_for_0" name="gender" checked />
<label for="id_for_0"> A </label>
</li>
<li class="choice">
<input type="checkbox" value="b" id="id_for_1" name="gender" />
<label for="id_for_1"> B </label>
</li>
</ul>
</fieldset>
'''
self.assertHTMLEqual(out, expected_output)
# if required is set, each input should have required set
checkbox_fieldset.is_required = True
out = checkbox_fieldset.render('foo', 'bar')
assert out.count('required') == 2
checkbox_fieldset.attrs['data-hide-threshold'] = 0
checkbox_fieldset.facet_counts = {'a': 0, 'b': 2}
output = checkbox_fieldset.render('gender', 'bar')
assert 'data-hide-threshold="0"' in output
# a choice should be hidden
assert '<li class="choice hide">' in output
# b choice should not be hidden
assert '<li class="choice">' in output
class TestFacetField(TestCase):
def test_init(self):
# value of required is passed through on init
facet_field = FacetChoiceField(required=True)
assert facet_field.required
# if not set, defaults to false
facet_field = FacetChoiceField()
assert not facet_field.required
# check that legend is set via label separately
facet_field = FacetChoiceField(label='Test')
assert facet_field.widget.legend == 'Test'
# but widget attrs overrules
facet_field = FacetChoiceField(label='Test', legend='NotTest')
assert facet_field.widget.legend == 'NotTest'
def test_valid_value(self):
# any value passed in returns true
facet_field = FacetChoiceField()
assert facet_field.valid_value('A') is True
assert facet_field.valid_value(10) is True
class TestFacetForm(TestCase):
def test_set_choices_from_facets(self):
# Create a test form with mappings
class TestForm(FacetForm):
solr_facet_fields = {
'name_s': 'name'
}
name = FacetChoiceField()
member_type = FacetChoiceField()
test_form = TestForm()
# create facets in the format provided by parasolr
facets = OrderedDict()
facets['name_s'] = OrderedDict([('Jane', 2000), ('John', 1)])
facets['member_type'] = OrderedDict([('weekly', 2), ('monthly', 1)])
# handling should not choke on an unhandled field
facets['unhandled_fields'] = OrderedDict(foo=1, bar=1)
test_form.set_choices_from_facets(facets)
# no mapping but matching field should be rendered
assert test_form.fields['member_type'].choices == [
('weekly', 'weekly<span class="count">2</span>'),
('monthly', 'monthly<span class="count">1</span>'),
]
# mapping should convert solr field name to form field name
assert test_form.fields['name'].choices == [
# check that comma formatting appears as expected
('Jane', 'Jane<span class="count">2,000</span>'),
('John', 'John<span class="count">1</span>')
]
# unhandled field should not be passed in
assert 'unhanded_field' not in test_form.fields
class TestVaryOnHeadersMixin(TestCase):
def test_vary_on_headers_mixing(self):
# stub a View that will always return 405 since no methods are defined
vary_on_view = \
views.VaryOnHeadersMixin(vary_headers=['X-Foobar', 'X-Bazbar'])
# mock a request because we don't need its functionality
request = Mock()
response = vary_on_view.dispatch(request)
# check for the set header with the values supplied
assert response['Vary'] == 'X-Foobar, X-Bazbar'
class TestAjaxTemplateMixin(TestCase):
def test_get_templates(self):
class MyAjaxyView(views.AjaxTemplateMixin):
ajax_template_name = 'my_ajax_template.json'
template_name = 'my_normal_template.html'
myview = MyAjaxyView()
myview.request = Mock()
myview.request.is_ajax.return_value = False
assert myview.get_template_names() == [MyAjaxyView.template_name]
myview.request.is_ajax.return_value = True
assert myview.get_template_names() == MyAjaxyView.ajax_template_name
class TestFacetJSONMixin(TestCase):
def test_render_response(self):
class MyViewWithFacets(views.FacetJSONMixin):
template_name = 'my_normal_template.html'
# create a mock request and queryset
view = MyViewWithFacets()
view.object_list = Mock()
view.object_list.get_facets.return_value = {
'facets': 'foo'
}
view.request = HttpRequest()
request = Mock()
# if no Accept: header, should just return a regular response
view.request.META['HTTP_ACCEPT'] = | |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This experiment was partially created using PsychoPy2 Experiment Builder (v1.83.04), Tue Feb 23 13:01:04 2016
If you publish work using this script please cite the relevant PsychoPy publications
<NAME> (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13.
Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008
"""
from __future__ import division # so that 1/3=0.333 instead of 1/3=0
from psychopy import visual, core, data, event, logging, sound, gui, parallel
from psychopy.constants import * # things like STARTED, FINISHED
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import sys # to get file system encoding
import random
locations = ((0, 6.2), (6.2, 0), (0, -6.2), (-6.2, 0))
number_texts = []
NUM_IMAGES = 20
MALE = 0
FEMALE = 1
NUM_BLOCKS = 3
sad_male_images = []
sad_male_images = sad_male_images*int(180/20)
neutral_male_images = []
neutral_male_images = neutral_male_images*int(180/20)
sad_female_images = []
sad_female_images = sad_female_images*int(180/20)
neutral_female_images = []
neutral_female_images = neutral_female_images*int(180/20)
corr_answer = 1*16 #equal to R1 in PyCorder
incorrect_answer = 2*16 #equal to R2 in PyCorder
no_answer = 3*16 #equal to R3 in PyCorder
def choose_male_images():
my_faces = []
my_faces.extend(np.random.choice(sad_male_images, 2, False))
my_faces.extend(np.random.choice(neutral_male_images, 2, False))
random.shuffle(my_faces)
for i in range(len(my_faces)):
my_faces[i].setPos(newPos = locations[i])
thisExp.addData("Image" + str(i) + " _Position", my_faces[i].name)
return my_faces
def choose_female_images():
my_faces = []
my_faces.extend(np.random.choice(sad_female_images, 2,False))
my_faces.extend(np.random.choice(neutral_female_images, 2,False))
random.shuffle(my_faces)
for i in range(len(my_faces)):
my_faces[i].setPos(newPos = locations[i])
thisExp.addData("Image" + str(i) + " _Position", my_faces[i].name)
return my_faces
def get_all_images(win):
path_to_images = "stim/"
#white male = sad male
for i in range(NUM_IMAGES):
file = path_to_images + "AMSA" + "%02d" %(i + 1) + ".jpg"
name = "AMSA" + "%02d" %(i + 1) + ".jpg"
sad_male_images.append(visual.ImageStim(win=win, name=name,units='deg',
image=file, mask=None,
ori=0, pos=[0, 3.2], size=[6, 6],
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-6.0))
#black male = neutral male
for i in range(NUM_IMAGES):
file = path_to_images + "AMNE" + "%02d" %(i + 1) + ".jpg"
name = "AMNE" + "%02d" %(i + 1) + ".jpg"
neutral_male_images.append(visual.ImageStim(win=win, name=name,units='deg',
image=file, mask=None,
ori=0, pos=[0, 3.2], size=[6, 6],
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-6.0))
#white female = sad female
for i in range(NUM_IMAGES):
file = path_to_images + "AFSA" + "%02d" %(i + 1) + ".jpg"
name = "AFSA" + "%02d" %(i + 1) + ".jpg"
sad_female_images.append(visual.ImageStim(win=win, name=name,units='deg',
image=file, mask=None,
ori=0, pos=[0, 3.2], size=[6, 6],
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-6.0))
#black female = neutral female
for i in range(NUM_IMAGES):
file = path_to_images + "AFNE" + "%02d" %(i + 1) + ".jpg"
name = "AFNE" + "%02d" %(i + 1) + ".jpg"
neutral_female_images.append(visual.ImageStim(win=win, name=name,units='deg',
image=file, mask=None,
ori=0, pos=[0, 3.2], size=[6, 6],
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-6.0))
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__)).decode(sys.getfilesystemencoding())
os.chdir(_thisDir)
# Store info about the experiment session
expName = u'FEMA4ImagesEEG' # from the Builder filename that created this script
expInfo = {u'session': u'001', u'participant': u''}
try: #look for pipe from app
expInfo['participant'] = '%s'%(sys.argv[1])
expInfo['session'] = '001'
except IndexError: #if no pipe, run normally
print ('ran without app')
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False:
print ('app closed')
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
print 'subject:',expInfo['participant']
print 'exp:',expName
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data\%s_%s_%s' %(expInfo['participant'], expName, expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath=None,
savePickle=True, saveWideText=True,
dataFileName=filename)
#save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(size=(1360, 768), fullscr=False, screen=0, allowGUI=True, allowStencil=False,
monitor='testMonitor', color=[0,0,0], colorSpace='rgb',
blendMode='avg', useFBO=True,
)
# Get all the images
get_all_images(win)
# store frame rate of monitor if we can measure it successfully
expInfo['frameRate']=win.getActualFrameRate()
if expInfo['frameRate']!=None:
frameDur = 1.0/round(expInfo['frameRate'])
else:
frameDur = 1.0/60.0 # couldn't get a reliable measure so guess
# Initialize components for Routine "Instr"
InstrClock = core.Clock()
Instructions = visual.TextStim(win=win, ori=0, name='Instructions',
text='You will be presented with an array of 4 faces; afterwards you will be presented with a face cue in the center of the screen.\n\nYour task is to indicate the location of the face cue in the previous array by pressing the correct location number (1-4) on the labeled keys.\n\nPlease keep your EYES FIXATED ON THE CROSS.\nPlease use your DOMINANT hand to respond as quickly and accurately as possible.\n\nYou will start with a practice session.\nPlease press the spacebar to continue.', font='Arial',
pos=[0, 0], height=0.1, wrapWidth=1.5,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
# Create the 4 numbers
for i in range(4):
number_texts.append(visual.TextStim(win=win, ori=0, name='Number' + str(i+1), units='deg',
text=str(i+1), font='Arial',
pos=locations[i], height=2, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=0.0))
# Initialize components for Routine "Continue"
ContinueClock = core.Clock()
Ready = visual.TextStim(win=win, ori=0, name='Ready',
text='That was the end of the block.\n\nIf you need to take a break please do so now.\n\nWhen you are ready to continue please press the spacebar and remember to KEEP YOUR EYES FIXATED ON THE CROSS.', font='Arial',
pos=[0, 0], height=0.1, wrapWidth=1.5,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
# Initialize components for Routine "Trial"
TrialClock = core.Clock()
FixationPoint = visual.TextStim(win=win, ori=0, name='FixationPoint',
text='+', font='Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color=1.0, colorSpace='rgb', opacity=1,
depth=0.0)
p_port = parallel.ParallelPort(address=u'0xDFF8')
p_port_images = parallel.ParallelPort(address=u'0xDFF8')
# Initialize components for Routine "Break"
BreakClock = core.Clock()
Break = visual.TextStim(win=win, ori=0, name='Break',
text='That was the end of the block.\n\nIf you need to take a break please take one now.\n\nWhen you are ready to continue please press the spacebar.', font='Arial',
pos=[0, 0], height=0.1, wrapWidth=1.5,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
#------Prepare to start Routine "Instr"-------
t = 0
InstrClock.reset() # clock
frameN = -1
# update component parameters for each repeat
InstrKey_resp = event.BuilderKeyResponse() # create an object of type KeyResponse
InstrKey_resp.status = NOT_STARTED
# keep track of which components have finished
InstrComponents = []
InstrComponents.append(Instructions)
InstrComponents.append(InstrKey_resp)
for thisComponent in InstrComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "Instr"-------
continueRoutine = True
while continueRoutine:
# get current time
t = InstrClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *Instructions* updates
if t >= 0.0 and Instructions.status == NOT_STARTED:
# keep track of start time/frame for later
Instructions.tStart = t # underestimates by a little under one frame
Instructions.frameNStart = frameN # exact frame index
Instructions.setAutoDraw(True)
if Instructions.status == STARTED: # only update if being drawn
Instructions.setColor('white', colorSpace='rgb', log=False)
# *InstrKey_resp* updates
if t >= 0.0 and InstrKey_resp.status == NOT_STARTED:
# keep track of start time/frame for later
InstrKey_resp.tStart = t # underestimates by a little under one frame
InstrKey_resp.frameNStart = frameN # exact frame index
InstrKey_resp.status = STARTED
# keyboard checking is just starting
win.callOnFlip(InstrKey_resp.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
if InstrKey_resp.status == STARTED:
theseKeys = event.getKeys(keyList=['space'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
InstrKey_resp.keys = theseKeys[-1] # just the last key pressed
InstrKey_resp.rt = InstrKey_resp.clock.getTime()
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in InstrComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "Instr"-------
for thisComponent in InstrComponents:
if hasattr(thisComponent, "setAutoDraw"):
| |
<gh_stars>100-1000
# Copyright (c) 2017 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from kmip.core import attributes
from kmip.core import enums
from kmip.core import utils
from kmip.core.messages.payloads import sign
class TestSignRequestPayload(testtools.TestCase):
"""
Test suite for the Sign request payload.
"""
def setUp(self):
super(TestSignRequestPayload, self).setUp()
# Encoding obtained in part from KMIP 1.4 testing document,
# partially cobbled together by hand from other test cases
# in this code base.
#
# This encoding matches the following set of values:
# Unique Identifier - b4faee10-aa2a-4446-8ad4-0881f3422959
# Cryptographic Parameters
# Cryptographic Algorithm - ECDSA
# Data - 01020304050607080910111213141516
self.full_encoding = utils.BytearrayStream(
b'\x42\x00\x79\x01\x00\x00\x00\x60'
b'\x42\x00\x94\x07\x00\x00\x00\x24\x62\x34\x66\x61\x65\x65\x31\x30'
b'\x2D\x61\x61\x32\x61\x2D\x34\x34\x34\x36\x2D\x38\x61\x64\x34\x2D'
b'\x30\x38\x38\x31\x66\x33\x34\x32\x32\x39\x35\x39\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x28\x05\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x00'
b'\x42\x00\xC2\x08\x00\x00\x00\x10\x01\x02\x03\x04\x05\x06\x07\x08'
b'\x09\x10\x11\x12\x13\x14\x15\x16'
)
self.minimum_encoding = utils.BytearrayStream(
b'\x42\x00\x79\x01\x00\x00\x00\x18'
b'\x42\x00\xC2\x08\x00\x00\x00\x10\x01\x02\x03\x04\x05\x06\x07\x08'
b'\x09\x10\x11\x12\x13\x14\x15\x16'
)
self.empty_encoding = utils.BytearrayStream(
b'\x42\x00\x79\x01\x00\x00\x00\x00'
)
def tearDown(self):
super(TestSignRequestPayload, self).tearDown()
def test_init(self):
"""
Test that a Sign request payload can be constructed with no arguments.
"""
payload = sign.SignRequestPayload()
self.assertEqual(None, payload.unique_identifier)
self.assertEqual(None, payload.cryptographic_parameters)
self.assertEqual(None, payload.data)
def test_init_with_args(self):
"""
Test that a Sign request payload can be constructed with valid values.
"""
payload = sign.SignRequestPayload(
unique_identifier='00000000-1111-2222-3333-444444444444',
cryptographic_parameters=attributes.CryptographicParameters(),
data=b'\x01\x02\x03'
)
self.assertEqual(
'00000000-1111-2222-3333-444444444444',
payload.unique_identifier
)
self.assertEqual(
attributes.CryptographicParameters(),
payload.cryptographic_parameters
)
self.assertEqual(b'\x01\x02\x03', payload.data)
def test_invalid_unique_identifier(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the unique identifier of a Sign request payload.
"""
payload = sign.SignRequestPayload()
args = (payload, 'unique_identifier', 0)
self.assertRaisesRegex(
TypeError,
"unique identifier must be a string",
setattr,
*args
)
def test_invalid_cryptographic_parameters(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the cryptographic parameters of a Sign request payload.
"""
payload = sign.SignRequestPayload()
args = (payload, 'cryptographic_parameters', b'\x01\x02\x03')
self.assertRaisesRegex(
TypeError,
"cryptographic parameters must be a CryptographicParameters "
"struct",
setattr,
*args
)
def test_invalid_data(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the data of a Sign request payload.
"""
payload = sign.SignRequestPayload()
args = (payload, 'data', 0)
self.assertRaisesRegex(
TypeError,
"data must be bytes",
setattr,
*args
)
def test_read(self):
"""
Test that a Sign request payload can be read from a data stream.
"""
payload = sign.SignRequestPayload()
self.assertEqual(None, payload.unique_identifier)
self.assertEqual(None, payload.cryptographic_parameters)
self.assertEqual(None, payload.data)
payload.read(self.full_encoding)
self.assertEqual(
'b4faee10-aa2a-4446-8ad4-0881f3422959',
payload.unique_identifier
)
self.assertIsNotNone(payload.cryptographic_parameters)
self.assertEqual(
enums.CryptographicAlgorithm.ECDSA,
payload.cryptographic_parameters.cryptographic_algorithm
)
self.assertEqual(
b'\x01\x02\x03\x04\x05\x06\x07\x08'
b'\x09\x10\x11\x12\x13\x14\x15\x16',
payload.data
)
def test_read_partial(self):
"""
Test that a Sign request payload can be read from a partial data
stream containing the minimum required attributes.
"""
payload = sign.SignRequestPayload()
self.assertEqual(None, payload.unique_identifier)
self.assertEqual(None, payload.cryptographic_parameters)
self.assertEqual(None, payload.data)
payload.read(self.minimum_encoding)
self.assertEqual(
b'\x01\x02\x03\x04\x05\x06\x07\x08'
b'\x09\x10\x11\x12\x13\x14\x15\x16',
payload.data
)
def test_read_invalid(self):
"""
Test that a ValueError gets raised when a required Sign request
payload attribute is missing from the payload encoding.
"""
payload = sign.SignRequestPayload()
args = (self.empty_encoding, )
self.assertRaisesRegex(
ValueError,
"invalid payload missing the data attribute",
payload.read,
*args
)
def test_write(self):
"""
Test that a Sign request payload can be written to a data stream.
"""
payload = sign.SignRequestPayload(
unique_identifier='b4faee10-aa2a-4446-8ad4-0881f3422959',
cryptographic_parameters=attributes.CryptographicParameters(
cryptographic_algorithm=enums.CryptographicAlgorithm.ECDSA
),
data=b'\x01\x02\x03\x04\x05\x06\x07\x08'
b'\x09\x10\x11\x12\x13\x14\x15\x16'
)
stream = utils.BytearrayStream()
payload.write(stream)
self.assertEqual(len(self.full_encoding), len(stream))
self.assertEqual(str(self.full_encoding), str(stream))
def test_write_partial(self):
"""
Test that a partially defined Sign request payload can be written
to a data stream.
"""
payload = sign.SignRequestPayload(
data=b'\x01\x02\x03\x04\x05\x06\x07\x08'
b'\x09\x10\x11\x12\x13\x14\x15\x16'
)
stream = utils.BytearrayStream()
payload.write(stream)
self.assertEqual(len(self.minimum_encoding), len(stream))
self.assertEqual(str(self.minimum_encoding), str(stream))
def test_write_invalid(self):
"""
Test that a ValueError gets raised when a required Sign request
payload attribute is missing when encoding the payload.
"""
payload = sign.SignRequestPayload()
stream = utils.BytearrayStream()
args = (stream, )
self.assertRaisesRegex(
ValueError,
"invalid payload missing the data attribute",
payload.write,
*args
)
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
Sign request payloads with the same data.
"""
a = sign.SignRequestPayload()
b = sign.SignRequestPayload()
self.assertTrue(a == b)
self.assertTrue(b == a)
a = sign.SignRequestPayload(
unique_identifier='b4faee10-aa2a-4446-8ad4-0881f3422959',
cryptographic_parameters=attributes.CryptographicParameters(
cryptographic_algorithm=enums.CryptographicAlgorithm.ECDSA
),
data=b'\x01\x02\x03\x04\x05\x06\x07\x08'
b'\x09\x10\x11\x12\x13\x14\x15\x16'
)
b = sign.SignRequestPayload(
unique_identifier='b4faee10-aa2a-4446-8ad4-0881f3422959',
cryptographic_parameters=attributes.CryptographicParameters(
cryptographic_algorithm=enums.CryptographicAlgorithm.ECDSA
),
data=b'\x01\x02\x03\x04\x05\x06\x07\x08'
b'\x09\x10\x11\x12\x13\x14\x15\x16'
)
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal_unique_identifier(self):
"""
Test that the equality operator returns False when comparing two
Sign request payloads with different unique identifiers.
"""
a = sign.SignRequestPayload(
unique_identifier='a'
)
b = sign.SignRequestPayload(
unique_identifier='b'
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_cryptographic_parameters(self):
"""
Test that the equality operator returns False when comparing two
Sign request payloads with cryptographic parameters.
"""
a = sign.SignRequestPayload(
cryptographic_parameters=attributes.CryptographicParameters(
hashing_algorithm=enums.HashingAlgorithm.MD5
)
)
b = sign.SignRequestPayload(
cryptographic_parameters=attributes.CryptographicParameters(
cryptographic_algorithm=enums.CryptographicAlgorithm.ECDSA
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_data(self):
"""
Test that the equality operator returns False when comparing two
Sign request payloads with different data.
"""
a = sign.SignRequestPayload(data=b'\x01')
b = sign.SignRequestPayload(data=b'\xFF')
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operator returns False when comparing two
Sign request payloads with different types.
"""
a = sign.SignRequestPayload()
b = 'invalid'
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing two
Sign request payloads with the same data.
"""
a = sign.SignRequestPayload()
b = sign.SignRequestPayload()
self.assertFalse(a != b)
self.assertFalse(b != a)
a = sign.SignRequestPayload(
unique_identifier='b4faee10-aa2a-4446-8ad4-0881f3422959',
cryptographic_parameters=attributes.CryptographicParameters(
cryptographic_algorithm=enums.CryptographicAlgorithm.ECDSA
),
data=b'\x01\x02\x03\x04\x05\x06\x07\x08'
b'\x09\x10\x11\x12\x13\x14\x15\x16'
)
b = sign.SignRequestPayload(
unique_identifier='b4faee10-aa2a-4446-8ad4-0881f3422959',
cryptographic_parameters=attributes.CryptographicParameters(
cryptographic_algorithm=enums.CryptographicAlgorithm.ECDSA
),
data=b'\x01\x02\x03\x04\x05\x06\x07\x08'
b'\x09\x10\x11\x12\x13\x14\x15\x16'
)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal_unique_identifier(self):
"""
Test that the inequality operator returns True when comparing two
Sign request payloads with different unique identifiers.
"""
a = sign.SignRequestPayload(
unique_identifier='a'
)
b = sign.SignRequestPayload(
unique_identifier='b'
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_cryptographic_parameters(self):
"""
Test that the equality operator returns False when comparing two
Sign request payloads with cryptographic parameters.
"""
a = sign.SignRequestPayload(
cryptographic_parameters=attributes.CryptographicParameters(
hashing_algorithm=enums.HashingAlgorithm.MD5
)
)
b = sign.SignRequestPayload(
cryptographic_parameters=attributes.CryptographicParameters(
cryptographic_algorithm=enums.CryptographicAlgorithm.ECDSA
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_data(self):
"""
Test that the inequality operator returns True when comparing two
Sign request payloads with different data.
"""
a = sign.SignRequestPayload(data=b'\x01')
b = sign.SignRequestPayload(data=b'\xFF')
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operator returns True when comparing two
Sign request payloads with different types.
"""
a = sign.SignRequestPayload()
b = 'invalid'
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_repr(self):
"""
Test that repr can be applied to a Sign request payload.
"""
payload = sign.SignRequestPayload(
unique_identifier='b4faee10-aa2a-4446-8ad4-0881f3422959',
cryptographic_parameters=attributes.CryptographicParameters(
cryptographic_algorithm=enums.CryptographicAlgorithm.ECDSA
),
data=b'\x01\x02\x03\x04\x05\x06\x07\x08'
)
expected = (
"SignRequestPayload("
"unique_identifier='b4faee10-aa2a-4446-8ad4-0881f3422959', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=None, padding_method=None, "
"hashing_algorithm=None, key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=CryptographicAlgorithm.ECDSA, "
"random_iv=None, iv_length=None, tag_length=None, "
"fixed_field_length=None, invocation_field_length=None, "
"counter_length=None, initial_counter_value=None), "
"data=" + str(b'\x01\x02\x03\x04\x05\x06\x07\x08') + ")"
)
observed = repr(payload)
self.assertEqual(expected, observed)
def test_str(self):
"""
Test that str can be applied to a Sign request payload.
"""
crypto_params = attributes.CryptographicParameters(
cryptographic_algorithm=enums.CryptographicAlgorithm.ECDSA
)
payload = sign.SignRequestPayload(
unique_identifier='b4faee10-aa2a-4446-8ad4-0881f3422959',
cryptographic_parameters=crypto_params,
data=b'\x01\x02\x03\x04\x05\x06\x07\x08',
)
expected = str({
'unique_identifier': 'b4faee10-aa2a-4446-8ad4-0881f3422959',
'cryptographic_parameters': crypto_params,
'data': b'\x01\x02\x03\x04\x05\x06\x07\x08'
})
observed = str(payload)
self.assertEqual(expected, observed)
class TestSignResponsePayload(testtools.TestCase):
"""
Test suite for the Sign response payload.
"""
def setUp(self):
super(TestSignResponsePayload, self).setUp()
# Encoding obtained in part from KMIP 1.4 testing document,
# partially cobbled together by hand from other test cases
# in this code base.
#
# This encoding matches the following set of values:
# Unique Identifier - b4faee10-aa2a-4446-8ad4-0881f3422959
# Signature Data - 01020304050607080910111213141516
self.full_encoding = utils.BytearrayStream(
b'\x42\x00\x7C\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24\x62\x34\x66\x61\x65\x65\x31\x30'
b'\x2D\x61\x61\x32\x61\x2D\x34\x34\x34\x36\x2D\x38\x61\x64\x34\x2D'
b'\x30\x38\x38\x31\x66\x33\x34\x32\x32\x39\x35\x39\x00\x00\x00\x00'
b'\x42\x00\xC3\x08\x00\x00\x00\x10\x01\x02\x03\x04\x05\x06\x07\x08'
b'\x09\x10\x11\x12\x13\x14\x15\x16'
)
self.incomplete_encoding = utils.BytearrayStream(
b'\x42\x00\x7C\x01\x00\x00\x00\x30'
b'\x42\x00\x94\x07\x00\x00\x00\x24\x62\x34\x66\x61\x65\x65\x31\x30'
b'\x2D\x61\x61\x32\x61\x2D\x34\x34\x34\x36\x2D\x38\x61\x64\x34\x2D'
b'\x30\x38\x38\x31\x66\x33\x34\x32\x32\x39\x35\x39\x00\x00\x00\x00'
)
self.empty_encoding = utils.BytearrayStream(
b'\x42\x00\x7C\x01\x00\x00\x00\x00'
)
def tearDown(self):
super(TestSignResponsePayload, self).tearDown()
def test_init(self):
"""
Test that a Sign response payload can be constructed with no
arguments.
"""
payload = sign.SignResponsePayload()
self.assertEqual(None, payload.unique_identifier)
self.assertEqual(None, payload.signature_data)
def test_init_with_args(self):
"""
Test that a Sign response payload can be constructed with valid
values.
"""
payload = sign.SignResponsePayload(
unique_identifier='00000000-1111-2222-3333-444444444444',
signature_data=b'\x01\x02\x03'
)
self.assertEqual(
'00000000-1111-2222-3333-444444444444',
payload.unique_identifier
)
self.assertEqual(b'\x01\x02\x03', payload.signature_data)
def test_invalid_unique_identifier(self):
"""
Test that a TypeError is raised when an invalid value is used to set
| |
-> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("requestor", "requestor__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class AuditEventAgentNetwork(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Logical network location for application activity.
Logical network location for application activity, if the activity has a
network location.
"""
resource_type = Field("AuditEventAgentNetwork", const=True)
address: fhirtypes.String = Field(
None,
alias="address",
title="Identifier for the network access point of the user device",
description=(
"An identifier for the network access point of the user device for the "
"audit event."
),
# if property is element of this resource.
element_property=True,
)
address__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_address", title="Extension field for ``address``."
)
type: fhirtypes.Code = Field(
None,
alias="type",
title="The type of network access point",
description=(
"An identifier for the type of network access point that originated the"
" audit event."
),
# if property is element of this resource.
element_property=True,
)
type__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_type", title="Extension field for ``type``."
)
class AuditEventEntity(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Data or objects used.
Specific instances of data or objects that have been accessed.
"""
resource_type = Field("AuditEventEntity", const=True)
description: fhirtypes.String = Field(
None,
alias="description",
title="Descriptive text",
description="Text that describes the entity in more detail.",
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
detail: typing.List[fhirtypes.AuditEventEntityDetailType] = Field(
None,
alias="detail",
title="Additional Information about the entity",
description=(
"Tagged value pairs for conveying additional information about the "
"entity."
),
# if property is element of this resource.
element_property=True,
)
lifecycle: fhirtypes.CodingType = Field(
None,
alias="lifecycle",
title="Life-cycle stage for the entity",
description="Identifier for the data life-cycle stage for the entity.",
# if property is element of this resource.
element_property=True,
)
name: fhirtypes.String = Field(
None,
alias="name",
title="Descriptor for entity",
description="A name of the entity in the audit event.",
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
query: fhirtypes.Base64Binary = Field(
None,
alias="query",
title="Query parameters",
description="The query parameters for a query-type entities.",
# if property is element of this resource.
element_property=True,
)
query__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_query", title="Extension field for ``query``."
)
role: fhirtypes.CodingType = Field(
None,
alias="role",
title="What role the entity played",
description=(
"Code representing the role the entity played in the event being "
"audited."
),
# if property is element of this resource.
element_property=True,
)
securityLabel: typing.List[fhirtypes.CodingType] = Field(
None,
alias="securityLabel",
title="Security labels on the entity",
description="Security labels for the identified entity.",
# if property is element of this resource.
element_property=True,
)
type: fhirtypes.CodingType = Field(
None,
alias="type",
title="Type of entity involved",
description="The type of the object that was involved in this audit event.",
# if property is element of this resource.
element_property=True,
)
what: fhirtypes.ReferenceType = Field(
None,
alias="what",
title="Specific instance of resource",
description=(
"Identifies a specific instance of the entity. The reference should be "
"version specific."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Resource"],
)
class AuditEventEntityDetail(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Additional Information about the entity.
Tagged value pairs for conveying additional information about the entity.
"""
resource_type = Field("AuditEventEntityDetail", const=True)
type: fhirtypes.String = Field(
None,
alias="type",
title="Name of the property",
description="The type of extra detail provided in the value.",
# if property is element of this resource.
element_property=True,
element_required=True,
)
type__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_type", title="Extension field for ``type``."
)
valueBase64Binary: fhirtypes.Base64Binary = Field(
None,
alias="valueBase64Binary",
title="Property value",
description="The value of the extra detail.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueBase64Binary__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_valueBase64Binary",
title="Extension field for ``valueBase64Binary``.",
)
valueString: fhirtypes.String = Field(
None,
alias="valueString",
title="Property value",
description="The value of the extra detail.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueString__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueString", title="Extension field for ``valueString``."
)
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2422(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("type", "type__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_2422(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {"value": ["valueBase64Binary", "valueString"]}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is | |
from keras.engine import Layer
from keras import activations
from keras import initializers
from keras import regularizers
from keras import constraints
from keras import backend as K
#from keras.layers import RNN
#import tensorflow as tf
#import tensorflow.contrib.rnn as rnn
class NASCell(Layer):
"""Neural Architecture Search (NAS) recurrent network cell.
This implements the recurrent cell from the paper:
https://arxiv.org/abs/1611.01578
<NAME> and <NAME>.
"Neural Architecture Search with Reinforcement Learning" Proc. ICLR 2017.
The class uses an optional projection layer.
# Arguments
units: Positive integer, dimensionality of the output space.
projection_units: (optional) Positive integer, The output dimensionality
for the projection matrices. If None, no projection is performed.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
projection_activation: Activation function to use
for the projection step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
(see [initializers](../initializers.md)).
projection_initializer: Initializer for the `projection_kernel`
weights matrix,
used for the linear transformation of the projection step.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
projection_regularizer: Regularizer function applied to
the `projection_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
projection_constraint: Constraint function applied to
the `projection_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
"""
def __init__(self, units,
projection_units=None,
activation='tanh',
recurrent_activation='sigmoid',
projection_activation='linear',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
projection_initializer='glorot_uniform',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
projection_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
projection_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=2,
**kwargs):
super(NASCell, self).__init__(**kwargs)
self.units = units
self.projection_units = projection_units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.projection_activation = activations.get(projection_activation)
self.cell_activation = activations.get('relu')
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.projection_initializer = initializers.get(projection_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.projection_regularizer = regularizers.get(projection_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.projection_constraint = constraints.get(projection_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
if self.projection_units is not None:
self.state_size = (self.projection_units, self.units)
else:
self.state_size = (self.units, self.units)
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
input_dim = input_shape[-1]
if self.projection_units is not None:
recurrent_output_dim = self.projection_units
else:
recurrent_output_dim = self.units
self.kernel = self.add_weight(shape=(input_dim, self.units * 8),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(recurrent_output_dim, self.units * 8),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.projection_units is not None:
self.projection_kernel = self.add_weight(
shape=(self.units, self.projection_units),
name='projection_kernel',
initializer=self.projection_initializer,
regularizer=self.projection_regularizer,
constraint=self.projection_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(shape, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 6,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(shape=(self.units * 8,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_0 = self.kernel[:, :self.units]
self.kernel_1 = self.kernel[:, self.units: self.units * 2]
self.kernel_2 = self.kernel[:, self.units * 2: self.units * 3]
self.kernel_3 = self.kernel[:, self.units * 3: self.units * 4]
self.kernel_4 = self.kernel[:, self.units * 4: self.units * 5]
self.kernel_5 = self.kernel[:, self.units * 5: self.units * 6]
self.kernel_6 = self.kernel[:, self.units * 6: self.units * 7]
self.kernel_7 = self.kernel[:, self.units * 7:]
self.recurrent_kernel_0 = self.recurrent_kernel[:, :self.units]
self.recurrent_kernel_1 = self.recurrent_kernel[:, self.units: self.units * 2]
self.recurrent_kernel_2 = self.recurrent_kernel[:, self.units * 2: self.units * 3]
self.recurrent_kernel_3 = self.recurrent_kernel[:, self.units * 3: self.units * 4]
self.recurrent_kernel_4 = self.recurrent_kernel[:, self.units * 4: self.units * 5]
self.recurrent_kernel_5 = self.recurrent_kernel[:, self.units * 5: self.units * 6]
self.recurrent_kernel_6 = self.recurrent_kernel[:, self.units * 6: self.units * 7]
self.recurrent_kernel_7 = self.recurrent_kernel[:, self.units * 7:]
if self.use_bias:
self.bias_0 = self.bias[:self.units]
self.bias_1 = self.bias[self.units: self.units * 2]
self.bias_2 = self.bias[self.units * 2: self.units * 3]
self.bias_3 = self.bias[self.units * 3: self.units * 4]
self.bias_4 = self.bias[self.units * 4: self.units * 5]
self.bias_5 = self.bias[self.units * 5: self.units * 6]
self.bias_6 = self.bias[self.units * 6: self.units * 7]
self.bias_7 = self.bias[self.units * 7:]
else:
self.bias_0 = None
self.bias_1 = None
self.bias_2 = None
self.bias_3 = None
self.bias_4 = None
self.bias_5 = None
self.bias_6 = None
self.bias_7 = None
self.built = True
def _generate_dropout_mask(self, inputs, training=None):
if 0 < self.dropout < 1:
ones = K.ones_like(K.squeeze(inputs[:, 0:1, :], axis=1))
def dropped_inputs():
return K.dropout(ones, self.dropout)
self._dropout_mask = [K.in_train_phase(
dropped_inputs,
ones,
training=training)
for _ in range(8)]
else:
self._dropout_mask = None
def _generate_recurrent_dropout_mask(self, inputs, training=None):
if 0 < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.dropout)
self._recurrent_dropout_mask = [K.in_train_phase(
dropped_inputs,
ones,
training=training)
for _ in range(8)]
else:
self._recurrent_dropout_mask = None
def call(self, inputs, states, training=None):
# dropout matrices for input units
dp_mask = self._dropout_mask
# dropout matrices for recurrent units
rec_dp_mask = self._recurrent_dropout_mask
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
if self.implementation == 1:
if 0 < self.dropout < 1.:
inputs_0 = inputs * dp_mask[0]
inputs_1 = inputs * dp_mask[1]
inputs_2 = inputs * dp_mask[2]
inputs_3 = inputs * dp_mask[3]
inputs_4 = inputs * dp_mask[4]
inputs_5 = inputs * dp_mask[5]
inputs_6 = inputs * dp_mask[6]
inputs_7 = inputs * dp_mask[7]
else:
inputs_0 = inputs
inputs_1 = inputs
inputs_2 = inputs
inputs_3 = inputs
inputs_4 = inputs
inputs_5 = inputs
inputs_6 = inputs
inputs_7 = inputs
x_0 = K.dot(inputs_0, self.kernel_0)
x_1 = K.dot(inputs_1, self.kernel_1)
x_2 = K.dot(inputs_2, self.kernel_2)
x_3 = K.dot(inputs_3, self.kernel_3)
x_4 = K.dot(inputs_4, self.kernel_4)
x_5 = K.dot(inputs_5, self.kernel_5)
x_6 = K.dot(inputs_6, self.kernel_6)
x_7 = K.dot(inputs_7, self.kernel_7)
if self.use_bias:
x_0 = K.bias_add(x_0, self.bias_0)
x_1 = K.bias_add(x_1, self.bias_1)
x_2 = K.bias_add(x_2, self.bias_2)
x_3 = K.bias_add(x_3, self.bias_3)
x_4 = K.bias_add(x_4, self.bias_4)
x_5 = K.bias_add(x_5, self.bias_5)
x_6 = K.bias_add(x_6, self.bias_6)
x_7 = K.bias_add(x_7, self.bias_7)
if 0 < self.recurrent_dropout < 1.:
h_tm1_0 = h_tm1 * rec_dp_mask[0]
h_tm1_1 = h_tm1 * rec_dp_mask[1]
h_tm1_2 = h_tm1 * rec_dp_mask[2]
h_tm1_3 = h_tm1 * rec_dp_mask[3]
h_tm1_4 = h_tm1 * rec_dp_mask[4]
h_tm1_5 = h_tm1 * rec_dp_mask[5]
h_tm1_6 = h_tm1 * rec_dp_mask[6]
h_tm1_7 = h_tm1 * rec_dp_mask[7]
else:
h_tm1_0 = h_tm1
h_tm1_1 = h_tm1
h_tm1_2 = h_tm1
h_tm1_3 = h_tm1
h_tm1_4 = h_tm1
h_tm1_5 = h_tm1
h_tm1_6 = h_tm1
h_tm1_7 = h_tm1
# First Layer
layer1_0 = self.recurrent_activation(x_0 + K.dot(h_tm1_0, self.recurrent_kernel_0))
layer1_1 = self.cell_activation(x_1 + K.dot(h_tm1_1, self.recurrent_kernel_1))
layer1_2 = self.recurrent_activation(x_2 + K.dot(h_tm1_2, self.recurrent_kernel_2))
layer1_3 = self.cell_activation(x_3 * K.dot(h_tm1_3, self.recurrent_kernel_3))
layer1_4 = self.activation(x_4 + K.dot(h_tm1_4, self.recurrent_kernel_4))
layer1_5 = self.recurrent_activation(x_5 + K.dot(h_tm1_5, self.recurrent_kernel_5))
layer1_6 = self.activation(x_6 + K.dot(h_tm1_6, self.recurrent_kernel_6))
layer1_7 = self.recurrent_activation(x_7 + K.dot(h_tm1_7, self.recurrent_kernel_7))
# Second Layer
layer2_0 = self.activation(layer1_0 * layer1_1)
layer2_1 = self.activation(layer1_2 + layer1_3)
layer2_2 = self.activation(layer1_4 * layer1_5)
layer2_3 = self.recurrent_activation(layer1_6 + layer1_7)
# Inject the Cell
layer2_0 = self.activation(layer2_0 + c_tm1)
# Third Layer
layer3_0_pre = layer2_0 * layer2_1
c = layer3_0_pre # create a new cell
layer3_0 = layer3_0_pre
layer3_1 = self.activation(layer2_2 + layer2_3)
# Final Layer
h = self.activation(layer3_0 * layer3_1)
if self.projection_units is not None:
h = self.projection_activation(K.dot(h, self.projection_kernel))
else:
if | |
<reponame>ondrejbohdal/evograd<filename>CrossDomainFewShotLearning/methods/backbone.py
# This code is modified from https://github.com/facebookresearch/low-shot-shrink-hallucinate
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
from torch.nn.utils import weight_norm
# --- gaussian initialize ---
def init_layer(L):
# Initialization using fan-in
if isinstance(L, nn.Conv2d):
n = L.kernel_size[0]*L.kernel_size[1]*L.out_channels
L.weight.data.normal_(0, math.sqrt(2.0/float(n)))
elif isinstance(L, nn.BatchNorm2d):
L.weight.data.fill_(1)
L.bias.data.fill_(0)
class distLinear(nn.Module):
def __init__(self, indim, outdim):
super(distLinear, self).__init__()
self.L = weight_norm(
nn.Linear(indim, outdim, bias=False), name='weight', dim=0)
self.relu = nn.ReLU()
def forward(self, x):
x_norm = torch.norm(x, p=2, dim=1).unsqueeze(1).expand_as(x)
x_normalized = x.div(x_norm + 0.00001)
L_norm = torch.norm(self.L.weight.data, p=2, dim=1).unsqueeze(
1).expand_as(self.L.weight.data)
self.L.weight.data = self.L.weight.data.div(L_norm + 0.00001)
cos_dist = self.L(x_normalized)
scores = 10 * cos_dist
return scores
# --- flatten tensor ---
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
# --- LSTMCell module for matchingnet ---
class LSTMCell(nn.Module):
maml = False
def __init__(self, input_size, hidden_size, bias=True):
super(LSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
if self.maml:
self.x2h = Linear_fw(input_size, 4 * hidden_size, bias=bias)
self.h2h = Linear_fw(hidden_size, 4 * hidden_size, bias=bias)
else:
self.x2h = nn.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = nn.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, x, hidden=None):
if hidden is None:
hx = torch.zeors_like(x)
cx = torch.zeros_like(x)
else:
hx, cx = hidden
gates = self.x2h(x) + self.h2h(hx)
ingate, forgetgate, cellgate, outgate = torch.split(
gates, self.hidden_size, dim=1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = torch.mul(cx, forgetgate) + torch.mul(ingate, cellgate)
hy = torch.mul(outgate, torch.tanh(cy))
return (hy, cy)
# --- LSTM module for matchingnet ---
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers=1, bias=True, batch_first=False, bidirectional=False):
super(LSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.num_directions = 2 if bidirectional else 1
assert(self.num_layers == 1)
self.lstm = LSTMCell(input_size, hidden_size, self.bias)
def forward(self, x, hidden=None):
# swap axis if batch first
if self.batch_first:
x = x.permute(1, 0, 2)
# hidden state
if hidden is None:
h0 = torch.zeros(self.num_directions, x.size(
1), self.hidden_size, dtype=x.dtype, device=x.device)
c0 = torch.zeros(self.num_directions, x.size(
1), self.hidden_size, dtype=x.dtype, device=x.device)
else:
h0, c0 = hidden
# forward
outs = []
hn = h0[0]
cn = c0[0]
for seq in range(x.size(0)):
hn, cn = self.lstm(x[seq], (hn, cn))
outs.append(hn.unsqueeze(0))
outs = torch.cat(outs, dim=0)
# reverse foward
if self.num_directions == 2:
outs_reverse = []
hn = h0[1]
cn = c0[1]
for seq in range(x.size(0)):
seq = x.size(1) - 1 - seq
hn, cn = self.lstm(x[seq], (hn, cn))
outs_reverse.append(hn.unsqueeze(0))
outs_reverse = torch.cat(outs_reverse, dim=0)
outs = torch.cat([outs, outs_reverse], dim=2)
# swap axis if batch first
if self.batch_first:
outs = outs.permute(1, 0, 2)
return outs
# --- Linear module ---
class Linear_fw(nn.Linear): # used in MAML to forward input with fast weight
def __init__(self, in_features, out_features, bias=True):
super(Linear_fw, self).__init__(in_features, out_features, bias=bias)
self.weight.fast = None # Lazy hack to add fast weight link
self.bias.fast = None
def forward(self, x):
if self.weight.fast is not None and self.bias.fast is not None:
out = F.linear(x, self.weight.fast, self.bias.fast)
else:
out = super(Linear_fw, self).forward(x)
return out
# --- Conv2d module ---
class Conv2d_fw(nn.Conv2d): # used in MAML to forward input with fast weight
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True):
super(Conv2d_fw, self).__init__(in_channels, out_channels,
kernel_size, stride=stride, padding=padding, bias=bias)
self.weight.fast = None
if not self.bias is None:
self.bias.fast = None
def forward(self, x):
if self.bias is None:
if self.weight.fast is not None:
out = F.conv2d(x, self.weight.fast, None,
stride=self.stride, padding=self.padding)
else:
out = super(Conv2d_fw, self).forward(x)
else:
if self.weight.fast is not None and self.bias.fast is not None:
out = F.conv2d(x, self.weight.fast, self.bias.fast,
stride=self.stride, padding=self.padding)
else:
out = super(Conv2d_fw, self).forward(x)
return out
# --- softplus module ---
def softplus(x):
return torch.nn.functional.softplus(x, beta=100)
# --- feature-wise transformation layer ---
class FeatureWiseTransformation2d_fw(nn.BatchNorm2d):
feature_augment = False
def __init__(self, num_features, momentum=0.1, track_running_stats=True):
super(FeatureWiseTransformation2d_fw, self).__init__(
num_features, momentum=momentum, track_running_stats=track_running_stats)
self.weight.fast = None
self.bias.fast = None
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.zeros(num_features))
if self.feature_augment: # initialize {gamma, beta} with {0.3, 0.5}
self.gamma = torch.nn.Parameter(
torch.ones(1, num_features, 1, 1)*0.3)
self.beta = torch.nn.Parameter(
torch.ones(1, num_features, 1, 1)*0.5)
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
def forward(self, x, step=0):
if self.weight.fast is not None and self.bias.fast is not None:
weight = self.weight.fast
bias = self.bias.fast
else:
weight = self.weight
bias = self.bias
if self.track_running_stats:
out = F.batch_norm(x, self.running_mean, self.running_var,
weight, bias, training=self.training, momentum=self.momentum)
else:
out = F.batch_norm(x, torch.zeros_like(x), torch.ones_like(
x), weight, bias, training=True, momentum=1)
# apply feature-wise transformation
if self.feature_augment and self.training:
gamma = (1 + torch.randn(1, self.num_features, 1, 1, dtype=self.gamma.dtype,
device=self.gamma.device)*softplus(self.gamma)).expand_as(out)
beta = (torch.randn(1, self.num_features, 1, 1, dtype=self.beta.dtype,
device=self.beta.device)*softplus(self.beta)).expand_as(out)
out = gamma*out + beta
return out
class FeatureWiseTransformation2d_fix(nn.BatchNorm2d):
"""FeatureWiseTransformation2d that is compatible with the higher library"""
feature_augment = False
def __init__(self, num_features, momentum=0.1, track_running_stats=True):
super(FeatureWiseTransformation2d_fix, self).__init__(
num_features, momentum=momentum, track_running_stats=track_running_stats)
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.zeros(num_features))
if self.feature_augment: # initialize {gamma, beta} with {0.3, 0.5}
self.gamma = torch.nn.Parameter(
torch.ones(1, num_features, 1, 1)*0.3)
self.beta = torch.nn.Parameter(
torch.ones(1, num_features, 1, 1)*0.5)
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
def forward(self, x, step=0):
weight = self.weight
bias = self.bias
if self.track_running_stats:
out = F.batch_norm(x, self.running_mean, self.running_var,
weight, bias, training=self.training, momentum=self.momentum)
else:
out = F.batch_norm(x, torch.zeros(x.size(1), dtype=x.dtype, device=x.device), torch.ones(
x.size(1), dtype=x.dtype, device=x.device), weight, bias, training=True, momentum=1)
# apply feature-wise transformation
if self.feature_augment and self.training:
gamma = (1 + torch.randn(1, self.num_features, 1, 1, dtype=self.gamma.dtype,
device=self.gamma.device)*softplus(self.gamma)).expand_as(out)
beta = (torch.randn(1, self.num_features, 1, 1, dtype=self.beta.dtype,
device=self.beta.device)*softplus(self.beta)).expand_as(out)
out = gamma*out + beta
return out
# --- BatchNorm2d ---
class BatchNorm2d_fw(nn.BatchNorm2d):
def __init__(self, num_features, momentum=0.1, track_running_stats=True):
super(BatchNorm2d_fw, self).__init__(num_features,
momentum=momentum, track_running_stats=track_running_stats)
self.weight.fast = None
self.bias.fast = None
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.zeros(num_features))
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
def forward(self, x, step=0):
if self.weight.fast is not None and self.bias.fast is not None:
weight = self.weight.fast
bias = self.bias.fast
else:
weight = self.weight
bias = self.bias
if self.track_running_stats:
out = F.batch_norm(x, self.running_mean, self.running_var,
weight, bias, training=self.training, momentum=self.momentum)
else:
out = F.batch_norm(x, torch.zeros(x.size(1), dtype=x.dtype, device=x.device), torch.ones(
x.size(1), dtype=x.dtype, device=x.device), weight, bias, training=True, momentum=1)
return out
class BatchNorm2d_fix(nn.BatchNorm2d):
"""BatchNorm2d that is compatible with the higher library"""
def __init__(self, num_features, momentum=0.1, track_running_stats=True):
super(BatchNorm2d_fix, self).__init__(num_features,
momentum=momentum, track_running_stats=track_running_stats)
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.zeros(num_features))
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
def forward(self, x, step=0):
weight = self.weight
bias = self.bias
if self.track_running_stats:
out = F.batch_norm(x, self.running_mean, self.running_var,
weight, bias, training=self.training, momentum=self.momentum)
else:
out = F.batch_norm(x, torch.zeros(x.size(1), dtype=x.dtype, device=x.device), torch.ones(
x.size(1), dtype=x.dtype, device=x.device), weight, bias, training=True, momentum=1)
return out
# --- BatchNorm1d ---
class BatchNorm1d_fw(nn.BatchNorm1d):
def __init__(self, num_features, momentum=0.1, track_running_stats=True):
super(BatchNorm1d_fw, self).__init__(num_features,
momentum=momentum, track_running_stats=track_running_stats)
self.weight.fast = None
self.bias.fast = None
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.zeros(num_features))
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
def forward(self, x, step=0):
if self.weight.fast is not None and self.bias.fast is not None:
weight = self.weight.fast
bias = self.bias.fast
else:
weight = self.weight
bias = self.bias
if self.track_running_stats:
out = F.batch_norm(x, self.running_mean, self.running_var,
weight, bias, training=self.training, momentum=self.momentum)
else:
out = F.batch_norm(x, torch.zeros(x.size(1), dtype=x.dtype, device=x.device), torch.ones(
x.size(1), dtype=x.dtype, device=x.device), weight, bias, training=True, momentum=1)
return out
# --- Simple Conv Block ---
class ConvBlock(nn.Module):
maml = False
def __init__(self, indim, outdim, pool=True, padding=1):
super(ConvBlock, self).__init__()
self.indim = indim
self.outdim = outdim
if self.maml:
self.C = Conv2d_fw(indim, outdim, 3, padding=padding)
self.BN = FeatureWiseTransformation2d_fw(outdim)
else:
self.C = nn.Conv2d(indim, outdim, 3, padding=padding)
self.BN = FeatureWiseTransformation2d_fix(outdim)
# self.BN = nn.BatchNorm2d(outdim)
self.relu = nn.ReLU(inplace=True)
self.parametrized_layers = [self.C, self.BN, self.relu]
if pool:
self.pool = nn.MaxPool2d(2)
self.parametrized_layers.append(self.pool)
for layer in self.parametrized_layers:
init_layer(layer)
self.trunk = nn.Sequential(*self.parametrized_layers)
def forward(self, x):
out = self.trunk(x)
return out
# --- Simple ResNet Block ---
class SimpleBlock(nn.Module):
maml = False
def __init__(self, indim, outdim, half_res, leaky=False):
super(SimpleBlock, self).__init__()
self.indim = indim
self.outdim = outdim
if self.maml:
self.C1 = Conv2d_fw(indim, outdim, kernel_size=3,
stride=2 if half_res else 1, padding=1, bias=False)
self.BN1 = BatchNorm2d_fw(outdim)
self.C2 = Conv2d_fw(
outdim, outdim, kernel_size=3, padding=1, bias=False)
# feature-wise transformation at the end of each residual block
self.BN2 = FeatureWiseTransformation2d_fw(outdim)
else:
self.C1 = nn.Conv2d(indim, outdim, kernel_size=3,
stride=2 if half_res else 1, padding=1, bias=False)
self.BN1 = BatchNorm2d_fix(outdim)
# self.BN1 = nn.BatchNorm2d(outdim)
self.C2 = nn.Conv2d(
outdim, outdim, kernel_size=3, padding=1, bias=False)
self.BN2 = FeatureWiseTransformation2d_fix(outdim)
# self.BN2 = nn.BatchNorm2d(outdim)
self.relu1 = nn.ReLU(inplace=True) if not leaky else nn.LeakyReLU(
0.2, inplace=True)
self.relu2 = nn.ReLU(inplace=True) if not leaky else nn.LeakyReLU(
0.2, | |
dump the resulting output. After a call
to done(), you may not add any more modules until you call
reset(). """
assert self.mf is None
# If we are building an exe, we also need to implicitly
# bring in Python's startup modules.
if addStartupModules:
self.modules['_frozen_importlib'] = self.ModuleDef('importlib._bootstrap', implicit = True)
self.modules['_frozen_importlib_external'] = self.ModuleDef('importlib._bootstrap_external', implicit = True)
for moduleName in startupModules:
if moduleName not in self.modules:
self.addModule(moduleName, implicit = True)
# Excluding a parent module also excludes all its
# (non-explicit) children, unless the parent has allowChildren
# set.
# Walk through the list in sorted order, so we reach parents
# before children.
names = list(self.modules.items())
names.sort()
excludeDict = {}
implicitParentDict = {}
includes = []
autoIncludes = []
origToNewName = {}
for newName, mdef in names:
moduleName = mdef.moduleName
origToNewName[moduleName] = newName
if mdef.implicit and '.' in newName:
# For implicit modules, check if the parent is excluded.
parentName, baseName = newName.rsplit('.', 1)
if parentName in excludeDict :
mdef = excludeDict[parentName]
if mdef.exclude:
if not mdef.allowChildren:
excludeDict[moduleName] = mdef
elif mdef.implicit or mdef.guess:
autoIncludes.append(mdef)
else:
includes.append(mdef)
self.mf = PandaModuleFinder(excludes=list(excludeDict.keys()), suffixes=self.moduleSuffixes, path=self.path)
# Attempt to import the explicit modules into the modulefinder.
# First, ensure the includes are sorted in order so that
# packages appear before the modules they contain. This
# resolves potential ordering issues, especially with modules
# that are discovered by filename rather than through import
# statements.
includes.sort(key = self.__sortModuleKey)
# Now walk through the list and import them all.
for mdef in includes:
try:
self.__loadModule(mdef)
except ImportError as ex:
message = "Unknown module: %s" % (mdef.moduleName)
if str(ex) != "No module named " + str(mdef.moduleName):
message += " (%s)" % (ex)
print(message)
# Also attempt to import any implicit modules. If any of
# these fail to import, we don't really care.
for mdef in autoIncludes:
try:
self.__loadModule(mdef)
# Since it successfully loaded, it's no longer a guess.
mdef.guess = False
except:
# Something went wrong, guess it's not an importable
# module.
pass
# Check if any new modules we found have "hidden" imports
for origName in list(self.mf.modules.keys()):
hidden = hiddenImports.get(origName, [])
for modname in hidden:
if modname.endswith('.*'):
mdefs = self._gatherSubmodules(modname, implicit = True)
for mdef in mdefs.values():
try:
self.__loadModule(mdef)
except ImportError:
pass
else:
self.__loadModule(self.ModuleDef(modname, implicit = True))
# Now, any new modules we found get added to the export list.
for origName in list(self.mf.modules.keys()):
if origName not in origToNewName:
self.modules[origName] = self.ModuleDef(origName, implicit = True)
missing = []
for origName in self.mf.any_missing_maybe()[0]:
if origName in startupModules:
continue
if origName in self.previousModules:
continue
if origName in self.modules:
continue
# This module is missing. Let it be missing in the
# runtime also.
self.modules[origName] = self.ModuleDef(origName, exclude = True,
implicit = True)
if origName in okMissing:
# If it's listed in okMissing, don't even report it.
continue
prefix = origName.split('.')[0]
if origName not in reportedMissing:
missing.append(origName)
reportedMissing[origName] = True
if missing:
missing.sort()
print("There are some missing modules: %r" % missing)
def __sortModuleKey(self, mdef):
""" A sort key function to sort a list of mdef's into order,
primarily to ensure that packages proceed their modules. """
if mdef.moduleName:
# If we have a moduleName, the key consists of the split
# tuple of packages names. That way, parents always sort
# before children.
return ('a', mdef.moduleName.split('.'))
else:
# If we don't have a moduleName, the key doesn't really
# matter--we use filename--but we start with 'b' to ensure
# that all of non-named modules appear following all of
# the named modules.
return ('b', mdef.filename)
def __loadModule(self, mdef):
""" Adds the indicated module to the modulefinder. """
if mdef.filename:
# If it has a filename, then we found it as a file on
# disk. In this case, the moduleName may not be accurate
# and useful, so load it as a file instead.
tempPath = None
if '.' not in mdef.moduleName:
# If we loaded a python file from the root, we need to
# temporarily add its directory to the module search
# path, so the modulefinder can find any sibling
# python files it imports as well.
tempPath = Filename(mdef.filename.getDirname()).toOsSpecific()
self.mf.path.append(tempPath)
pathname = mdef.filename.toOsSpecific()
ext = mdef.filename.getExtension()
if ext == 'pyc' or ext == 'pyo':
fp = open(pathname, 'rb')
stuff = ("", "rb", imp.PY_COMPILED)
self.mf.load_module(mdef.moduleName, fp, pathname, stuff)
else:
stuff = ("", "rb", imp.PY_SOURCE)
if mdef.text:
fp = io.StringIO(mdef.text)
else:
fp = open(pathname, 'rb')
self.mf.load_module(mdef.moduleName, fp, pathname, stuff)
if tempPath:
del self.mf.path[-1]
else:
# Otherwise, we can just import it normally.
self.mf.import_hook(mdef.moduleName)
def reset(self):
""" After a previous call to done(), this resets the
FreezeTool object for a new pass. More modules may be added
and dumped to a new target. Previously-added modules are
remembered and will not be dumped again. """
self.mf = None
self.previousModules = dict(self.modules)
def mangleName(self, moduleName):
return 'M_' + moduleName.replace('.', '__').replace('-', '_')
def getAllModuleNames(self):
""" Return a list of all module names that have been included
or forbidden, either in this current pass or in a previous
pass. Module names that have been excluded are not included
in this list. """
moduleNames = []
for newName, mdef in list(self.modules.items()):
if mdef.guess:
# Not really a module.
pass
elif mdef.exclude and not mdef.forbid:
# An excluded (but not forbidden) file.
pass
else:
moduleNames.append(newName)
moduleNames.sort()
return moduleNames
def getModuleDefs(self):
""" Return a list of all of the modules we will be explicitly
or implicitly including. The return value is actually a list
of tuples: (moduleName, moduleDef)."""
moduleDefs = []
for newName, mdef in list(self.modules.items()):
prev = self.previousModules.get(newName, None)
if not mdef.exclude:
# Include this module (even if a previous pass
# excluded it). But don't bother if we exported it
# previously.
if prev and not prev.exclude:
# Previously exported.
pass
elif mdef.moduleName in self.mf.modules or \
mdef.moduleName in startupModules or \
mdef.filename:
moduleDefs.append((newName, mdef))
elif mdef.forbid:
if not prev or not prev.forbid:
moduleDefs.append((newName, mdef))
moduleDefs.sort()
return moduleDefs
def __replacePaths(self):
# Build up the replacement pathname table, so we can eliminate
# the personal information in the frozen pathnames. The
# actual filename we put in there is meaningful only for stack
# traces, so we'll just use the module name.
replace_paths = []
for moduleName, module in list(self.mf.modules.items()):
if module.__code__:
origPathname = module.__code__.co_filename
replace_paths.append((origPathname, moduleName))
self.mf.replace_paths = replace_paths
# Now that we have built up the replacement mapping, go back
# through and actually replace the paths.
for moduleName, module in list(self.mf.modules.items()):
if module.__code__:
co = self.mf.replace_paths_in_code(module.__code__)
module.__code__ = co;
def __addPyc(self, multifile, filename, code, compressionLevel):
if code:
data = imp.get_magic() + b'\0\0\0\0'
if sys.version_info >= (3, 0):
data += b'\0\0\0\0'
data += marshal.dumps(code)
stream = StringStream(data)
multifile.addSubfile(filename, stream, compressionLevel)
multifile.flush()
def __addPythonDirs(self, multifile, moduleDirs, dirnames, compressionLevel):
""" Adds all of the names on dirnames as a module directory. """
if not dirnames:
return
str = '.'.join(dirnames)
if str not in moduleDirs:
# Add an implicit __init__.py file (but only if there's
# not already a legitimate __init__.py file).
moduleName = '.'.join(dirnames)
filename = '/'.join(dirnames) + '/__init__'
if self.storePythonSource:
filename += '.py'
stream = StringStream(b'')
if multifile.findSubfile(filename) < 0:
multifile.addSubfile(filename, stream, 0)
multifile.flush()
else:
if __debug__:
filename += '.pyc'
else:
filename += '.pyo'
if multifile.findSubfile(filename) < 0:
code = compile('', moduleName, 'exec')
self.__addPyc(multifile, filename, code, compressionLevel)
moduleDirs[str] = True
self.__addPythonDirs(multifile, moduleDirs, dirnames[:-1], compressionLevel)
def __addPythonFile(self, multifile, moduleDirs, moduleName, mdef,
compressionLevel):
""" Adds the named module to the multifile as a .pyc file. """
# First, split the module into its subdirectory names.
dirnames = moduleName.split('.')
if len(dirnames) > 1 and dirnames[-1] == '__init__':
# The "module" may end in __init__, but that really means
# the parent directory.
dirnames = dirnames[:-1]
self.__addPythonDirs(multifile, moduleDirs, dirnames[:-1], compressionLevel)
filename = '/'.join(dirnames)
module = self.mf.modules.get(mdef.moduleName, None)
if getattr(module, '__path__', None) is not None or \
(getattr(module, '__file__', None) is not None | |
# -*- coding: utf-8 -*-
# Metropolis Drift-Diffusion-Model
# Copyright 2018 <NAME>, <NAME>, <NAME>
# This file is released under the MIT licence that accompanies the code
## This file contains a console interface and related routines
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse.csgraph import floyd_warshall
from scipy.sparse.csgraph import connected_components
from metropolis_ddm import metropolis_ddm_hist
from numbers import Real, Integral
from time import time
# TODO: asymmetrix barriers, consideration sets
def statdist(u, upper_barrier, em):
n = len(u)
pab = np.zeros((n, n))
for a in range(n):
for b in range(n):
if a == b: continue
x = upper_barrier * (u[a] - u[b]) / 2
dt = upper_barrier / (u[a] - u[b]) * np.tanh(x)
q = 1.0 if em is None else em[a,b]
pab[a,b] = np.exp(upper_barrier * u[b]) * q * dt
p = sum(pab, 1)
p /= sum(p)
return p
class Abort(Exception):
pass
def isintopt(x):
return x is None or isinstance(x, Integral)
def isstropt(x):
return x is None or isinstance(x, str)
def input_int(msg, *, lb = None, ub = None, default = None):
assert isintopt(lb) and isintopt(ub) and isintopt(default)
if default is not None:
assert lb is None or default >= lb
assert ub is None or default <= ub
msg += ' [default=%i]' % default
msg += ': '
while True:
x = input(msg).strip()
if default is not None and len(x) == 0:
return default
try:
v = int(x)
except ValueError:
print('Invalid input, please enter an integer')
continue
if lb is not None and v < lb:
print('Invalid value, expected v >= %i, given: %i' % (lb, v))
continue
if ub is not None and v > ub:
print('Invalid value, expected v <= %i, given: %i' % (ub, v))
continue
break
return v
def input_bool(q, *, default=True, yes=['y', 'yes', '1'], no=['n', 'no', '0']):
yes = yes if isinstance(yes, list) else [yes]
no = no if isinstance(no, list) else [no]
y0, n0 = yes[0], no[0]
deft, deff = -1, -1
if default is True:
opts = '[%s]/%s' % (y0, n0)
deft = 0
elif default is False:
opts = '%s/[%s]' % (y0, n0)
deff = 0
else:
opts = '%s/%s' % (y0, n0)
msg = q + (' (%s)? ' % opts)
while True:
x = input(msg).upper().strip()
if len(x) == deft or x in map(lambda s: s.upper(), yes):
return True
if len(x) == deff or x in map(lambda s: s.upper(), no):
return False
print("Invalid input, please enter '%s' or '%s'" % (y0, n0))
def isfloatopt(x):
return x is None or isinstance(x, Real)
def input_float(msg, *, lbt = None, lbe = None, ubt = None, ube = None, default = None):
assert lbt is None or lbe is None
assert ubt is None or ube is None
assert isfloatopt(lbt) and isfloatopt(lbe) and isfloatopt(ubt) and isfloatopt(ube) and isfloatopt(default)
if default is not None:
assert lbe is None or default >= lbe
assert lbt is None or default > lbt
assert ube is None or default <= ube
assert ubt is None or default < ubt
msg += ' [default=%g]' % default
msg += ': '
while True:
x = input(msg).strip()
if default is not None and len(x) == 0:
return default
try:
v = float(x)
except ValueError:
print('Invalid input, please enter a float')
continue
if v != v:
print('Invalid input, `nan` is not allowed')
continue
if lbt is not None and v <= lbt:
print('Invalid value, expected v > %g, given: %g' % (lbt, v))
continue
if lbe is not None and v < lbe:
print('Invalid value, expected v >= %g, given: %g' % (lbe, v))
continue
if ubt is not None and v >= ubt:
print('Invalid value, expected v < %g, given: %g' % (ubt, v))
continue
if ube is not None and v > ube:
print('Invalid value, expected v <= %g, given: %g' % (ube, v))
continue
break
return v
def input_indices(msg, n, m = None, *, filt = None, abort = None):
assert isinstance(msg, str) and isinstance(n, int) and isintopt(m) and isstropt(abort)
if m is None:
m = n
if abort is not None:
msg += " ['%s' to abort]" % abort
msg += ': '
while True:
s = input(msg).strip()
if abort is not None and s == abort:
raise Abort()
ls = s.split(',')
if len(ls) != 2:
print('Invalid format, you need to enter two indices separated by a comma')
continue
try:
i, j = int(ls[0]), int(ls[1])
except ValueError:
print('Invalid indices, please enter two integers')
continue
if not (0 <= i < n and 0 <= j < m):
print('Out of bounds, matrix size is %s×%s, given: %i,%i' % (i, j))
continue
if filt is not None and not filt(i, j):
print('Invalid choice %i,%i' % (i, j))
break
return i, j
# UNIFORM EXPLORATION MATRIX
# this function is not actually used for computation (we use uniform_proposal in that case),
# it is used for plotting the exploration matrix when it is uniform
def explo_matrix_unif(n):
if not isinstance(n, Integral):
raise TypeError('argument `n` must be an int, given: %s' % cname(n))
if n < 2:
raise ValueError('argument `n` must be >= 2, given: %i' % n)
dist = np.ones((n,n))
dist[np.diag_indices(n)] = 0
exp_matr = np.zeros((n,n))
mask = exp_matr != dist # boolean mask (matrix)
exp_matr[mask] = (1 / (n-1)) / dist[mask]
exp_matr[~mask] = 1 - (np.sum(exp_matr, axis=0) - np.diag(exp_matr))
# np.sum(exp_matr,axis=0) -> array of the sums of the columns of exp_matr
return exp_matr
# EXPLORATION MATRIX INPUT
# This function lets the user input the exploration matrix, either through a distance
# matrix or through a graph. When the user decides to input a graph we later convert
# it to a distance matrix and then transorm it into an exploration matrix.
# The arguments of the function are the number of alternatives (n), the exploration
# aversion parameter (ro) and a parameter that tells the function whether to work
# on a graph or directly on a distance matrix (alt, 1 for distance and 0 for graph)
# The input procedure works as follows:
# 0 - both the graph and the distance matrix are initialized as uniform, so as if the
# corresponding graph were complete with weights equal to 1
# 1 - choose a pair of nodes (alternatives), they must be written in the form (i,j)
# 2 - then, once the function checked whether the proposed alternatives are valid,
# the user can insert the value to put in chosen position. If the user is working
# on a graph the value can be either 0 or 1, otherwise it can be any positive float
# (for the distance matrix), but it cannot disconnect the final graph
# 3 - To stop the construction of the matrix, the user simply has to input '0' whenever
# 'Continue (0/1)?' is asked.
#
# After the input procedure, the distance matrix is normalized so that its minimum entry
# outside the main diagonal is 1 (we do not need to do so for the graph because by construction
# the minimum distance is already 1).
def explo_matrix_input(n, ro, alt):
if not isinstance(n, Integral):
raise TypeError('argument `n` must be an int, given: %s' % cname(n))
if n <= 0:
raise ValueError('argument `n` must be > 0, given: %i' % n)
if not isinstance(ro, Real):
raise TypeError('argument `ro` must be a float, given: %s' % cname(ro))
if ro < 0:
raise ValueError('argument `ro` must be >= 0, given: %g' % ro)
if alt not in (0,1):
raise ValueError('argument `alt` must be 0 or 1, given: %s' % alt)
dist = np.ones((n,n))
dist[np.diag_indices(n)] = 0
mask = dist != 0
if alt: # DISTANCE
print('Input the distance matrix of the alternatives:')
g_aux = dist.copy() #corresponding graph of the distance matrix, will be used to check connected components
while True:
print('Current Distance Matrix:\n%s' % dist)
try:
a, b = input_indices('Choose two indices (i,j)', n, filt = lambda i,j: i != j, abort = 'q')
except Abort:
break
d = input_float('Enter new d[%i,%i]' % (a, b), lbt = 0.0)
if np.isposinf(d):
g_aux[a,b] = 0
g_aux[b,a] = 0
| |
randomization=0):
if (self.working_mode == 'synthesis'):
self.flatten_parameters_to_reference(cycle=0)
self.output_handler.write(self, pixel=0, randomization=randomization)
def add_spectral(self, spectral):
"""
Programmatically add a spectral region
Parameters
----------
spectral : dict
Dictionary containing the following data
'Name', 'Wavelength', 'Topology', 'Weights Stokes', 'Wavelength file', 'Wavelength weight file',
'Observations file', 'Mask file'
Returns
-------
None
"""
# Make sure that all keys of the input dictionary are in lower case
# This is irrelevant if a configuration file is used because this has been
# already done
value = hazel.util.lower_dict_keys(spectral)
if (self.verbose >= 1):
self.logger.info('Adding spectral region {0}'.format(value['name']))
if ('wavelength file' not in value):
value['wavelength file'] = None
elif (value['wavelength file'] == 'None'):
value['wavelength file'] = None
if ('wavelength weight file' not in value):
value['wavelength weight file'] = None
elif (value['wavelength weight file'] == 'None'):
value['wavelength weight file'] = None
if ('observations file' not in value):
value['observations file'] = None
elif (value['observations file'] == 'None'):
value['observations file'] = None
if ('stokes weights' not in value):
value['stokes weights'] = None
elif (value['stokes weights'] == 'None'):
value['stokes weights'] = None
if ('mask file' not in value):
value['mask file'] = None
elif (value['mask file'] == 'None'):
value['mask file'] = None
if ('los' not in value):
value['los'] = None
elif (value['los'] == 'None'):
value['los'] = None
for tmp in ['i', 'q', 'u', 'v']:
if ('weights stokes {0}'.format(tmp) not in value):
value['weights stokes {0}'.format(tmp)] = [None]*10
elif (value['weights stokes {0}'.format(tmp)] == 'None'):
value['weights stokes {0}'.format(tmp)] = [None]*10
if ('boundary condition' not in value):
value['boundary condition'] = None
elif (value['boundary condition'] == 'None'):
value['boundary condition'] = None
if ('instrumental profile' not in value):
value['instrumental profile'] = None
elif (value['instrumental profile'] == 'None'):
value['instrumental profile'] = None
# Wavelength file is not present
if (value['wavelength file'] is None):
# If the wavelength is defined
if ('wavelength' in value):
axis = value['wavelength']
wvl = np.linspace(float(axis[0]), float(axis[1]), int(axis[2]))
wvl_lr = None
if (self.verbose >= 1):
self.logger.info(' - Using wavelength axis from {0} to {1} with {2} steps'.format(float(axis[0]), float(axis[1]), int(axis[2])))
else:
raise Exception('Wavelength range is not defined. Please, use "Wavelength" or "Wavelength file"')
else:
# If both observed and synthetic wavelength points are given
if ('wavelength' in value):
axis = value['wavelength']
if (len(axis) != 3):
raise Exception("Wavelength range is not given in the format: lower, upper, steps")
wvl = np.linspace(float(axis[0]), float(axis[1]), int(axis[2]))
if (self.verbose >= 1):
self.logger.info(' - Using wavelength axis from {0} to {1} with {2} steps'.format(float(axis[0]), float(axis[1]), int(axis[2])))
self.logger.info(' - Reading wavelength axis from {0}'.format(value['wavelength file']))
wvl_lr = np.loadtxt(self.root + value['wavelength file'])
else:
if (self.verbose >= 1):
self.logger.info(' - Reading wavelength axis from {0}'.format(value['wavelength file']))
wvl = np.loadtxt(self.root + value['wavelength file'])
wvl_lr = None
if (value['wavelength weight file'] is None):
if (self.verbose >= 1 and self.working_mode == 'inversion'):
self.logger.info(' - Setting all wavelength weights to 1')
weights = np.ones((4,len(wvl)))
else:
if (self.verbose >= 1):
self.logger.info(' - Reading wavelength weights from {0}'.format(value['wavelength weight file']))
weights = np.loadtxt(self.root + value['wavelength weight file'], skiprows=1).T
# Observations file not present
if (value['observations file'] is None):
if (self.working_mode == 'inversion'):
raise Exception("Inversion mode without observations is not allowed.")
obs_file = None
else:
if (self.verbose >= 1):
self.logger.info(' - Using observations from {0}'.format(value['observations file']))
obs_file = value['observations file']
if (value['mask file'] is None):
mask_file = None
if (self.verbose >= 1):
self.logger.info(' - No mask for pixels')
else:
if (self.verbose >= 1):
self.logger.info(' - Using mask from {0}'.format(value['mask file']))
mask_file = value['mask file']
if (value['instrumental profile'] is None):
if (self.verbose >= 1):
self.logger.info(' - No instrumental profile')
else:
if (self.verbose >= 1):
self.logger.info(' - Instrumental profile : {0}'.format(value['instrumental profile']))
# if (value['straylight file'] is None):
# if (self.verbose >= 1):
# self.logger.info(' - Not using straylight')
# stray_file = None
# else:
# if (self.verbose >= 1):
# self.logger.info(' - Using straylight from {0}'.format(value['straylight file']))
# stray_file = value['straylight file']
if (value['los'] is None):
if (self.working_mode == 'synthesis'):
raise Exception("You need to provide the LOS for spectral region {0}".format(value['name']))
los = None
else:
los = np.array(value['los']).astype('float64')
if (self.verbose >= 1):
self.logger.info(' - Using LOS {0}'.format(value['los']))
if (value['boundary condition'] is None):
if (self.verbose >= 1):
self.logger.info(' - Using default boundary conditions [1,0,0,0] in spectral region {0} or read from file. Check carefully!'.format(value['name']))
boundary = np.array([1.0,0.0,0.0,0.0])
self.normalization = 'on-disk'
else:
boundary = np.array(value['boundary condition']).astype('float64')
if (boundary[0] == 0.0):
if (self.verbose >= 1):
self.logger.info(' - Using off-limb normalization (peak intensity)')
if (self.verbose >= 1):
self.logger.info(' - Using boundary condition {0}'.format(value['boundary condition']))
stokes_weights = []
for st in ['i', 'q', 'u', 'v']:
tmp = hazel.util.tofloat(value['weights stokes {0}'.format(st)])
tmp = [i if i is not None else 1.0 for i in tmp]
stokes_weights.append(tmp)
stokes_weights = np.array(stokes_weights)
self.spectrum[value['name']] = Spectrum(wvl=wvl, weights=weights, observed_file=obs_file,
name=value['name'], stokes_weights=stokes_weights, los=los, boundary=boundary, mask_file=mask_file, instrumental_profile=value['instrumental profile'], root=self.root, wvl_lr=wvl_lr)
self.topologies.append(value['topology'])
def add_photosphere(self, atmosphere):
"""
Programmatically add a photosphere
Parameters
----------
atmosphere : dict
Dictionary containing the following data
'Name', 'Spectral region', 'Height', 'Line', 'Wavelength', 'Reference atmospheric model',
'Ranges', 'Nodes'
Returns
-------
None
"""
# Make sure that all keys of the input dictionary are in lower case
# This is irrelevant if a configuration file is used because this has been
# already done
atm = hazel.util.lower_dict_keys(atmosphere)
self.atmospheres[atm['name']] = SIR_atmosphere(working_mode=self.working_mode, name=atm['name'], verbose=self.verbose)
lines = [int(k) for k in list(atm['spectral lines'])]
# If NLTE is available because PyTorch and PyTorch Geom are available
# check whether the line is needed in NLTE or not
if self.nlte_available:
if ('nlte' not in atm):
self.atmospheres[atm['name']].nlte = False
else:
self.atmospheres[atm['name']].nlte = hazel.util.tobool(atm['nlte'])
if (self.verbose >= 1):
self.logger.info(" * Line in NLTE if available")
else:
self.atmospheres[atm['name']].nlte = False
if ('wavelength' not in atm):
atm['wavelength'] = None
elif (atm['wavelength'] == 'None'):
atm['wavelength'] = None
if (atm['wavelength'] is not None):
wvl_range = [float(k) for k in atm['wavelength']]
else:
wvl_range = [np.min(self.spectrum[atm['spectral region']].wavelength_axis), np.max(self.spectrum[atm['spectral region']].wavelength_axis)]
if ('reference frame' in atm):
if ('line-of-sight' in atm['reference frame']):
self.atmospheres[atm['name']].reference_frame = 'line-of-sight'
if ('vertical' in atm['reference frame']):
raise Exception('Magnetic fields in photospheres are always in the line-of-sight reference frame.')
else:
self.atmospheres[atm['name']].reference_frame = 'line-of-sight'
if (self.verbose >= 1):
self.logger.info(" * Adding line : {0}".format(lines))
self.logger.info(" * Magnetic field reference frame : {0}".format(self.atmospheres[atm['name']].reference_frame))
self.atmospheres[atm['name']].add_active_line(lines=lines, spectrum=self.spectrum[atm['spectral region']],
wvl_range=np.array(wvl_range), verbose=self.verbose)
if (self.atmospheres[atm['name']].graphnet_nlte is not None):
self.set_nlte(True)
if ('ranges' in atm):
for k, v in atm['ranges'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].ranges[k2] = None
else:
self.atmospheres[atm['name']].ranges[k2] = hazel.util.tofloat(v)
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
self.atmospheres[atm['name']].regularization[k2] = None
if ('regularization' in atm):
for k, v in atm['regularization'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].regularization[k2] = None
else:
self.atmospheres[atm['name']].regularization[k2] = v
if ('reference atmospheric model' in atm):
my_file = Path(self.root + atm['reference atmospheric model'])
if (not my_file.exists()):
raise FileExistsError("Input file {0} for atmosphere {1} does not exist.".format(my_file, atm['name']))
self.atmospheres[atm['name']].load_reference_model(self.root + atm['reference atmospheric model'], self.verbose)
if (self.atmospheres[atm['name']].model_type == '3d'):
self.atmospheres[atm['name']].n_pixel = self.atmospheres[atm['name']].model_handler.get_npixel()
if ('nodes' in atm):
for k, v in atm['nodes'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
self.atmospheres[atm['name']].cycles[k2] = hazel.util.toint(v)
if ('temperature change to recompute departure coefficients' in atm):
self.atmospheres[atm['name']].t_change_departure = float(atm['temperature change to recompute departure coefficients'])
else:
self.atmospheres[atm['name']].t_change_departure = 0.0
def add_chromosphere(self, atmosphere):
"""
Programmatically add a chromosphere
Parameters
----------
atmosphere : dict
Dictionary containing the following data
'Name', 'Spectral region', 'Height', 'Line', 'Wavelength', 'Reference atmospheric model',
'Ranges', 'Nodes'
Returns
-------
None
"""
# Make sure that all keys of the input dictionary are in lower case
# This is irrelevant if a configuration file is used because this has been
# already done
atm = hazel.util.lower_dict_keys(atmosphere)
self.atmospheres[atm['name']] = Hazel_atmosphere(working_mode=self.working_mode, name=atm['name'])
if ('wavelength' not in atm):
atm['wavelength'] = None
elif (atm['wavelength'] == 'None'):
atm['wavelength'] = None
if (atm['wavelength'] is not None):
wvl_range = [float(k) for k in atm['wavelength']]
else:
wvl_range = [np.min(self.spectrum[atm['spectral region']].wavelength_axis), np.max(self.spectrum[atm['spectral region']].wavelength_axis)]
self.atmospheres[atm['name']].add_active_line(line=atm['line'], spectrum=self.spectrum[atm['spectral region']],
wvl_range=np.array(wvl_range))
if ('reference frame' in | |
import errno
import os
import json
import re
import shutil
import tarfile
from urlparse import urlparse
import arc
from pandaharvester.harvestercore import core_utils
from .base_messenger import BaseMessenger
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestermisc import arc_utils
from pandaharvester.harvestercore.queue_config_mapper import QueueConfigMapper
from pandaharvester.harvestercore.work_spec import WorkSpec
# json for outputs
jsonOutputsFileName = harvester_config.payload_interaction.eventStatusDumpJsonFile
# xml for outputs
xmlOutputsBaseFileName = harvester_config.payload_interaction.eventStatusDumpXmlFile
# json for event request
jsonEventsRequestFileName = harvester_config.payload_interaction.eventRequestFile
# json to feed events
jsonEventsFeedFileName = harvester_config.payload_interaction.eventRangesFile
# json to update events
jsonEventsUpdateFileName = harvester_config.payload_interaction.updateEventsFile
# suffix to read json
suffixReadJson = '.read'
# logger
baselogger = core_utils.setup_logger()
class ARCMessenger(BaseMessenger):
'''Mechanism for passing information about completed jobs back to harvester.'''
def __init__(self, **kwarg):
self.jobSpecFileFormat = 'json'
BaseMessenger.__init__(self, **kwarg)
self.schedulerid = harvester_config.master.harvester_id
self.tmpdir = '/tmp' # TODO configurable or common function
# Credential dictionary role: proxy file
self.certs = dict(zip([r.split('=')[1] for r in list(harvester_config.credmanager.voms)],
list(harvester_config.credmanager.outCertFile)))
self.cred_type = arc.initializeCredentialsType(arc.initializeCredentialsType.SkipCredentials)
def _setup_proxy(self, usercfg, workspec, jobid, log):
'''Set up UserConfig object with correct proxy'''
proxyrole = workspec.workAttributes['proxyrole']
try:
usercfg.ProxyPath(str(self.certs[proxyrole]))
except:
log.error("Job {0}: no proxy found with role {1}".format(jobid, proxyrole))
return False
return True
def _copy_file(self, source, destination, usercfg, log):
'''Copy a file from source to destination'''
log.info('Copying {0} to {1}'.format(source, destination))
source_datapoint = arc_utils.DataPoint(str(source), usercfg)
destination_datapoint = arc_utils.DataPoint(str(destination), usercfg)
dm = arc.DataMover()
dm.retry(False)
dm.passive(True)
dm.secure(False)
status = dm.Transfer(source_datapoint.h, destination_datapoint.h, arc.FileCache(), arc.URLMap())
return status
def _delete_file(self, filename, usercfg, log):
'''Delete a remote file on ARC CE'''
log.info('Deleting {0}'.format(filename))
datapoint = arc_utils.DataPoint(str(filename), usercfg)
datapoint.h.SetSecure(False)
status = datapoint.h.Remove()
return status
def _list_url_recursive(self, url, log, fname='', filelist=[]):
'''List ARC job directory recursively to find all files'''
dp = arc_utils.DataPoint(url+'/'+fname, self.userconfig)
files = dp.h.List(arc.DataPoint.INFO_TYPE_NAME | arc.DataPoint.INFO_TYPE_TYPE)
if not files[1]:
log.warning("Failed listing %s/%s" % (url, fname))
return filelist
for f in files[0]:
if f.GetType() == f.file_type_file:
filelist.append((fname+'/'+f.GetName()).strip('/'))
elif f.GetType() == f.file_type_dir:
filelist = self.listUrlRecursive(url, log, (fname+'/'+str(f.GetName())).strip('/'), filelist)
return filelist
def _download_outputs(self, files, logdir, jobid, pandaid, userconfig, log):
'''Download the output files specified in downloadfiles'''
# construct datapoint object, initialising connection. Use the same
# object until base URL changes. TODO group by base URL.
datapoint = arc_utils.DataPoint(str(jobid), userconfig)
dp = datapoint.h
dm = arc.DataMover()
dm.retry(False)
dm.passive(True)
dm.secure(False)
fetched = []
notfetched = []
notfetchedretry = []
# create required local log dirs
try:
os.makedirs(logdir, 0755)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(logdir):
log.warning('Failed to create directory {0}: {1}'.format(logdir, os.strerror(e.errno)))
notfetched.append(jobid)
return (fetched, notfetched, notfetchedretry)
tmpdldir = os.path.join(self.tmpdir, pandaid)
try:
os.makedirs(tmpdldir, 0755)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(tmpdldir):
log.warning('Failed to create directory {0}: {1}'.format(tmpdldir, os.strerror(e.errno)))
notfetched.append(jobid)
return (fetched, notfetched, notfetchedretry)
filelist = files.split(';')
if re.search(r'[\*\[\]\?]', files):
# found wildcard, need to get sessiondir list
remotefiles = self.listUrlRecursive(jobid, log)
expandedfiles = []
for wcf in filelist:
if re.search(r'[\*\[\]\?]', wcf):
# only match wildcards in matching dirs
expandedfiles += [rf for rf in remotefiles if fnmatch.fnmatch(rf, wcf) and os.path.dirname(rf) == os.path.dirname(wcf)]
else:
expandedfiles.append(wcf)
# remove duplicates from wildcard matching through set
filelist = list(set(expandedfiles))
for f in filelist:
if f == 'gmlog/errors':
localfile = os.path.join(logdir, '%s.log' % pandaid)
elif f.find('.log') != -1:
localfile = os.path.join(logdir, '%s.out' % pandaid)
else:
localfile = os.path.join(tmpdldir, f)
remotefile = arc.URL(str(jobid + '/' + f))
dp.SetURL(remotefile)
localdp = arc_utils.DataPoint(str(localfile), userconfig)
# do the copy
status = dm.Transfer(dp, localdp.h, arc.FileCache(), arc.URLMap())
if not status and str(status).find('File unavailable') == -1: # tmp fix for globus error which is always retried
if status.Retryable():
log.warning('Failed to download but will retry {0}: {1}'.format(dp.GetURL().str(), str(status)))
notfetchedretry.append(jobid)
else:
log.error('Failed to download with permanent failure {0}: {1}'.format(dp.GetURL().str(), str(status)))
notfetched.append(jobid)
else:
os.chmod(localfile, 0644)
log.info('Downloaded {0}'.format(dp.GetURL().str()))
if jobid not in notfetched and jobid not in notfetchedretry:
fetched.append(jobid)
return (fetched, notfetched, notfetchedretry)
def _extractAndFixPilotPickle(self, arcjob, pandaid, haveoutput, logurl, log):
'''
Extract the pilot pickle from jobSmallFiles.tgz, and fix attributes
'''
arcid = arcjob['JobID']
pandapickle = None
tmpjobdir = os.path.join(self.tmpdir, pandaid)
if haveoutput:
log.debug('os.cwd(): {0}'.format(os.getcwd()))
try:
smallfiles = tarfile.open(os.path.join(tmpjobdir, 'jobSmallFiles.tgz'))
pandapickle = smallfiles.extractfile("panda_node_struct.pickle")
except Exception as e:
log.error("{0}: failed to extract pickle for arcjob {1}: {2}".format(pandaid, arcid, str(e)))
if pandapickle:
jobinfo = arc_utils.ARCPandaJob(filehandle=pandapickle)
# de-serialise the metadata to json
try:
jobinfo.metaData = json.loads(jobinfo.metaData)
except:
log.warning("{0}: no metaData in pilot pickle".format(pandaid))
shutil.rmtree(tmpjobdir, ignore_errors=True)
else:
jobinfo = arc_utils.ARCPandaJob(jobinfo={'jobId': long(pandaid), 'state': 'finished'})
jobinfo.schedulerID = self.schedulerid
if logurl:
jobinfo.pilotID = "%s.out|Unknown|Unknown|Unknown|Unknown" % '/'.join([logurl, pandaid])
# TODO: set error code based on batch error message (memory kill etc)
jobinfo.pilotErrorCode = 1008
if arcjob['Error']:
jobinfo.pilotErrorDiag = arcjob['Error']
else:
# Probably failure getting outputs
jobinfo.pilotErrorDiag = "Failed to get outputs from CE"
jobinfo.computingElement = urlparse(arcid).netloc
return jobinfo.dictionary()
def get_access_point(self, workspec, panda_id):
'''Get access point'''
if workspec.mapType == WorkSpec.MT_MultiJobs:
accessPoint = os.path.join(workspec.get_access_point(), str(panda_id))
else:
accessPoint = workspec.get_access_point()
return accessPoint
def post_processing(self, workspec, jobspec_list, map_type):
'''
Fetch job output and process pilot info for sending in final heartbeat.
The pilot pickle is loaded and some attributes corrected (schedulerid,
pilotlog etc), then converted to dictionary and stored in
workspec.workAttributes[pandaid]. If pilot pickle cannot be used,
report ARC error in pilotErrorDiag and fill all possible attributes
using ARC information.
'''
arclog = arc_utils.ARCLogger(baselogger, workspec.workerID)
tmplog = arclog.log
tmplog.info('Post processing ARC job {0}'.format(workspec.batchID))
job = workspec.workAttributes['arcjob']
arcid = job['JobID']
tmplog.info('Job id {0}'.format(arcid))
if 'arcdownloadfiles' not in workspec.workAttributes:
tmplog.error('No files to download')
return True
# Assume one-to-one mapping of workers to jobs. If jobspec_list is empty
# it means the job was cancelled by panda or otherwise forgotten
if not jobspec_list:
return True
# Set certificate to use for interacting with ARC CE
userconfig = arc.UserConfig(self.cred_type)
if not self._setup_proxy(usercfg, workspec, arcid, tmplog):
return True
queueconfigmapper = QueueConfigMapper()
queueconfig = queueconfigmapper.get_queue(jobspec_list[0].computingSite)
logbaseurl = queueconfig.submitter.get('logBaseURL')
logbasedir = queueconfig.submitter.get('logDir', self.tmpdir)
logsubdir = workspec.workAttributes['logsubdir']
pandaid = str(jobspec_list[0].PandaID)
# Construct log path and url
logurl = '/'.join([logbaseurl, logsubdir, str(pandaid)]) if logbaseurl else None
logdir = os.path.join(logbasedir, logsubdir)
# post_processing is only called once, so no retries are done. But keep
# the possibility here in case it changes
(fetched, notfetched, notfetchedretry) = self._download_outputs(workspec.workAttributes['arcdownloadfiles'],
logdir, arcid, pandaid, userconfig, tmplog)
if arcid not in fetched:
tmplog.warning("Could not get outputs of {0}".format(arcid))
workspec.workAttributes[long(pandaid)] = {}
workspec.workAttributes[long(pandaid)] = self._extractAndFixPilotPickle(job, pandaid, (arcid in fetched), logurl, tmplog)
tmplog.debug("pilot info for {0}: {1}".format(pandaid, workspec.workAttributes[long(pandaid)]))
return True
def get_work_attributes(self, workspec):
'''Get info from the job to pass back to harvester'''
# Just return existing attributes. Attributes are added to workspec for
# finished jobs in post_processing
return workspec.workAttributes
def events_requested(self, workspec):
'''Used to tell harvester that the worker requests events'''
# get logger
arclog = arc_utils.ARCLogger(baselogger, workspec.workerID)
tmpLog = arclog.log
# Check for jobid/jsonEventsRequestFileName
job = workspec.workAttributes['arcjob']
arcid = job['JobID']
# Set certificate to use for interacting with ARC CE
usercfg = arc.UserConfig(self.cred_type)
if not self._setup_proxy(usercfg, workspec, arcid, tmpLog):
return {}
remoteJsonFilePath = '%s/%s' % (arcid, jsonEventsRequestFileName)
localJsonFilePath = os.path.join(workspec.get_access_point(), jsonEventsRequestFileName)
tmpLog.debug('looking for event request file {0}'.format(remoteJsonFilePath))
# Try to copy the file
status = self._copy_file(remoteJsonFilePath, localJsonFilePath, usercfg, tmpLog)
if not status:
if status.GetErrno() == errno.ENOENT:
# Not found
tmpLog.debug('not found')
return {}
# Some other error
tmpLog.warning('Failed to copy {0}: {1}'.format(remoteJsonFilePath, str(status)))
return {}
try:
with open(localJsonFilePath) as jsonFile:
retDict = json.load(jsonFile)
os.remove(localJsonFilePath)
except Exception:
tmpLog.debug('failed to load json')
return {}
tmpLog.debug('found')
return retDict
def feed_events(self, workspec, events_dict):
'''Havester has an event range to pass to job'''
# get logger
arclog = arc_utils.ARCLogger(baselogger, workspec.workerID)
tmpLog = arclog.log
# Upload to jobid/jsonEventsFeedFileName, delete jobid/jsonEventsRequestFileName
job = workspec.workAttributes['arcjob']
arcid = job['JobID']
# Set certificate to use for interacting with ARC CE
usercfg = arc.UserConfig(self.cred_type)
if not self._setup_proxy(usercfg, workspec, arcid, tmpLog):
return False
retVal = True
if workspec.mapType in [WorkSpec.MT_OneToOne, WorkSpec.MT_MultiWorkers]:
# put the json just under the access point then upload to ARC CE
localJsonFilePath = os.path.join(workspec.get_access_point(), jsonEventsFeedFileName)
tmpLog.debug('feeding events to {0}'.format(localJsonFilePath))
try:
with open(localJsonFilePath, 'w') as jsonFile:
json.dump(events_dict, jsonFile)
except Exception:
core_utils.dump_error_message(tmpLog)
retVal = False
remoteJsonFilePath = '%s/%s' % (arcid, jsonEventsFeedFileName)
# Try to copy the file
status = self._copy_file(localJsonFilePath, remoteJsonFilePath, usercfg, tmpLog)
if not status:
tmpLog.error('Failed to feed events to {0}: {1}'.format(remoteJsonFilePath, str(status)))
retVal = False
else:
remoteJsonEventsRequestFile = '%s/%s' % (arcid, jsonEventsRequestFileName)
status = self._delete_file(remoteJsonEventsRequestFile, usercfg, tmpLog)
if not status and status.GetErrno() != errno.ENOENT:
tmpLog.error('Failed to delete event request file at {0}'.format(remoteJsonEventsRequestFile))
elif workspec.mapType == WorkSpec.MT_MultiJobs:
# TOBEFIXED
| |
# -*- coding: utf-8 -*-
"""
:mod:`channel.worker` -- Multi-device sync API for a single computation device
==============================================================================
.. module:: worker
:platform: Unix
:synopsis: Provide methods for single device Theano code that enable
homogeneous operations across multiple devices.
Contains :class:`Worker` which provides Platoon's basic API for multi-device
operations. Upon creation, a Worker will initiate connections with its node's
:class:`Controller` process (ZMQ) and get access to intra-node lock. A worker
process is meant to have only one Worker instance to manage a corresponding
computation device, e.g. GPU. Thus, Worker is a singleton class.
Worker's available API depends on available backend frameworks. Currently, there
are two ways to use a Worker for global operations on parameters:
1. Through :meth:`Worker.sync_params`, which is its default interface.
2. Or :meth:`Worker.all_reduce` which is a multi-node/GPU collective
operation.
For detailed information about these methods please check their corresponding
documentation, as well as the brief table which compares the two in project's
:file:`README.md`.
Worker also has :meth:`Worker.recv_mb` interface for collecting mini-batches to
work on from Controller.
"""
from __future__ import absolute_import, print_function
import argparse
import os
import sys
import signal
import base64
import numpy
import posix_ipc
import six
import zmq
try:
import pygpu
from pygpu import collectives as gpucoll
from theano import gpuarray as theanoga
from theano import config as theanoconf
except ImportError:
pygpu = None
from ..util import (mmap, PlatoonError, PlatoonWarning, SingletonType)
if six.PY3:
buffer_ = memoryview
else:
buffer_ = buffer # noqa
@six.add_metaclass(SingletonType)
class Worker(object):
"""
Interface for multi-device operations.
This class handles communication/synchronization with other processes.
The features to do so (control channel, mini-batch channel and shared
parameters) are all independent and optional so you don't have to use all
of them.
Parameters
----------
control_port : int
The tcp port number for control (ZMQ).
port : int, optional
The tcp port number for data (ZMQ).
socket_timeout : int, optional
Timeout in ms for both control and data sockets. Default: 5 min
hwm : int, optional
High water mark (see pyzmq docs) for data transfer.
Attributes
----------
shared_params : list of :class:`numpy.ndarray`
This will have `numpy.ndarray` in the same order as `params_descr`
(see :meth:`init_shared_params`). These arrays are backed by shared
memory. Used by :meth:`sync_params` interface.
shared_arrays : dict of str to :class:`numpy.ndarray`
Maps size in bytes to a ndarray in shared memory. Needed in multi-node
operations. Used by :meth:`all_reduce` interface.
"""
def __init__(self, control_port, data_port=None, socket_timeout=300000,
data_hwm=10, port=None):
if port is not None:
raise RuntimeError(
"The port parameter of Worker was renamed to data_port"
" (as in the Controller)")
self.context = zmq.Context()
self._socket_timeout = socket_timeout
self._worker_id = os.getpid()
if data_port:
self.init_mb_sock(data_port, data_hwm)
self._init_control_socket(control_port)
self._job_uid = self.send_req("platoon-get_job_uid")
print("JOB UID received from the controler {}".format(self._job_uid))
self._lock = posix_ipc.Semaphore("{}_lock".format(self._job_uid))
signal.signal(signal.SIGINT, self._handle_force_close)
try:
self._register_to_platoon()
except Exception as exc:
print(PlatoonWarning("Failed to register in a local GPU comm world.", exc),
file=sys.stderr)
print(PlatoonWarning("Platoon `all_reduce` interface will not be functional."),
file=sys.stderr)
self._local_comm = None
self._shmem_names = dict()
self._shmrefs = dict()
self.shared_arrays = dict()
################################################################################
# Basic Control Interface #
################################################################################
def send_req(self, req, info=None):
"""
Sends a control request to node's :class:`Controller`.
Parameters
----------
req : object
Json-encodable object (usually Python string) that represents the
type of request being sent to Controller.
info : object, optional
Json-encodable object used as input for this Worker's request to
Controller.
Returns
-------
object
Json-decoded object.
"""
query = {'worker_id': self._worker_id, 'req': req, 'req_info': info}
self.csocket.send_json(query)
socks = dict(self.cpoller.poll(self._socket_timeout))
if socks and socks.get(self.csocket) == zmq.POLLIN:
return self.csocket.recv_json()
else:
raise PlatoonError("Control Socket: recv timeout")
def lock(self, timeout=None):
"""
Acquire intra-node lock.
This is advisory and does not prevent concurrent access. This method
will subtracts 1 in underlying POSIX semaphore, will block the rest
calls at 0. The underlying semaphore, :attr:`_lock`, starts at 1.
Parameters
----------
timeout : int, optional
Amount of time to wait for the lock to be available. A timeout of 0
will raise an error immediately if the lock is not available.
Default: None, which will block until the lock is released.
.. versionchanged:: 0.6.0
This method used to be called `lock_params`.
"""
self._lock.acquire(timeout)
def unlock(self):
"""
Release intra-node lock.
The current implementation does not ensure that the process
that locked :attr:`shared_params` is also the one that unlocks them.
It also does not prevent one process from unlocking more than once
(which will allow more than one process to hold the lock). This method
will add 1 in underlying POSIX semaphore, :attr:`_lock`.
Make sure you follow proper lock/unlock logic in your program
to avoid these problems.
.. versionchanged:: 0.6.0
This method used to be called `unlock_params`.
"""
self._lock.release()
@property
def local_size(self):
"Number of workers assigned to local host's controller."
return self._local_size
@property
def local_rank(self):
"Worker's rank in respect to local host's controller (NCCL comm world)."
return self._local_rank
@property
def global_size(self):
"Number of workers spawned across all hosts in total."
return self._global_size
@property
def global_rank(self):
"Worker's rank in respect to all hosts' controllers in total."
return self._global_rank
################################################################################
# Initialization and Finalization Methods #
################################################################################
def _handle_force_close(self, signum, frame):
"""Handle SIGINT signals from Controller.
This is expected to happen when something abnormal has happened in other
workers which implies that training procedure should stop and fail.
.. versionadded:: 0.6.0
"""
self.close()
sys.exit(1) # Exit normally with non success value.
def close(self):
"""
Closes ZMQ connections, POSIX semaphores and shared memory.
"""
print("Closing connections and unlinking memory...", file=sys.stderr)
if hasattr(self, 'asocket'):
self.asocket.close()
if hasattr(self, 'csocket'):
self.csocket.close()
self.context.term()
self._lock.close()
try:
self._lock.unlink()
except posix_ipc.ExistentialError:
pass
if hasattr(self, '_shmref'):
try:
self._shmref.unlink()
except posix_ipc.ExistentialError:
pass
for shmref in self._shmrefs.values():
try:
shmref.unlink()
except posix_ipc.ExistentialError:
pass
def _register_to_platoon(self):
"""
Asks Controller for configuration information and creates a NCCL
communicator that participate in the local node's workers world.
For this it is needed that Theano is imported. Through Theano, this
methods gets access to the single GPU context of this worker process.
This context is to be used in all computations done by a worker's
process.
.. note::
It is necessary that this initialization method is called
successfully before :meth:`all_reduce` in order to be available
and functional.
.. versionadded:: 0.6.0
"""
if pygpu:
self.ctx_name = None
self.gpuctx = theanoga.get_context(self.ctx_name)
self.device = theanoconf.device
self._local_id = gpucoll.GpuCommCliqueId(context=self.gpuctx)
# Ask controller for local's info to participate in
lid = base64.b64encode(self._local_id.comm_id).decode('ascii')
response = self.send_req("platoon-get_platoon_info",
info={'device': self.device,
'local_id': lid})
nlid = base64.b64decode(response['local_id'].encode('ascii'))
self._local_id.comm_id = bytearray(nlid)
self._local_size = response['local_size']
self._local_rank = response['local_rank']
self._local_comm = gpucoll.GpuComm(self._local_id,
self._local_size,
self._local_rank)
self._multinode = response['multinode']
self._global_size = response['global_size']
self._global_rank = response['global_rank']
else:
raise AttributeError("pygpu or theano is not imported")
def init_mb_sock(self, port, data_hwm=10):
"""
Initialize the mini-batch data socket.
Parameters
----------
port : int
The tcp port to reach the mini-batch server on.
data_hwm : int, optional
High water mark, see pyzmq docs.
.. note::
This must be called before using :meth:`recv_mb`.
"""
self.asocket = self.context.socket(zmq.PULL)
self.asocket.setsockopt(zmq.LINGER, 0)
self.asocket.set_hwm(data_hwm)
self.asocket.connect("tcp://localhost:{}".format(port))
self.apoller = zmq.Poller()
self.apoller.register(self.asocket, zmq.POLLIN)
def _init_control_socket(self, port):
"""
Initialize control socket.
Parameters
---------
port : int
The tcp port where the control master is listening at.
.. note::
This must be called before using :meth:`send_req`.
"""
self.csocket = self.context.socket(zmq.REQ)
self.csocket.setsockopt(zmq.LINGER, 0)
self.csocket.connect('tcp://localhost:{}'.format(port))
self.cpoller = zmq.Poller()
self.cpoller.register(self.csocket, zmq.POLLIN)
################################################################################
# Collectives Interface #
################################################################################
def shared(self, array):
"""Creates a new POSIX shared memory buffer to be shared among Workers
and their Controller and maps the size of `array` to that buffer.
Controller is requested to create a new shared memory buffer with the
same size as `array` in order to be used in multi-GPU/node Platoon
collective operations through :meth:`all_reduce` interface. All
participants in the same node have access to that memory.
:param array: This array's size in bytes will be mapped to a shared
memory buffer in host with the same size.
:type array: :ref:`pygpu.gpuarray.GpuArray`
Returns
-------
shared_array : :ref:`numpy.ndarray`
A newly created shared memory buffer with the same size or an already
allocated one.
Notes
-----
*For internal implementation*: There should probably be a barrier across
nodes' Workers to ensure that, so far, each Controller has serviced
a new shared memory's name to all Workers. This is due to the fact that
Controller can service one Worker at a time and a Platoon collective
service is a blocking one | |
lexRefType
class langType(GeneratedsSuper):
"""The Language element containing a reference to a language name or
(if possible persistent) definition. ISO-639-3 still seems to be
the best choice for language codes and closest to persistent
language ID's seem to be the http://cdb.iso.org/lg/...
identifiers also used by the iso-language-639-3 component in the
CLARIN ComponentRegistry?"""
subclass = None
superclass = None
def __init__(self, LANG_ID=None, LANG_DEF=None, LANG_LABEL=None):
self.original_tagname_ = None
self.LANG_ID = _cast(None, LANG_ID)
self.LANG_DEF = _cast(None, LANG_DEF)
self.LANG_LABEL = _cast(None, LANG_LABEL)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, langType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if langType.subclass:
return langType.subclass(*args_, **kwargs_)
else:
return langType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_LANG_ID(self): return self.LANG_ID
def set_LANG_ID(self, LANG_ID): self.LANG_ID = LANG_ID
def get_LANG_DEF(self): return self.LANG_DEF
def set_LANG_DEF(self, LANG_DEF): self.LANG_DEF = LANG_DEF
def get_LANG_LABEL(self): return self.LANG_LABEL
def set_LANG_LABEL(self, LANG_LABEL): self.LANG_LABEL = LANG_LABEL
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='langType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('langType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='langType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='langType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='langType'):
if self.LANG_ID is not None and 'LANG_ID' not in already_processed:
already_processed.add('LANG_ID')
outfile.write(' LANG_ID=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.LANG_ID), input_name='LANG_ID')), ))
if self.LANG_DEF is not None and 'LANG_DEF' not in already_processed:
already_processed.add('LANG_DEF')
outfile.write(' LANG_DEF=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.LANG_DEF), input_name='LANG_DEF')), ))
if self.LANG_LABEL is not None and 'LANG_LABEL' not in already_processed:
already_processed.add('LANG_LABEL')
outfile.write(' LANG_LABEL=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.LANG_LABEL), input_name='LANG_LABEL')), ))
def exportChildren(self, outfile, level, namespace_='', name_='langType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('LANG_ID', node)
if value is not None and 'LANG_ID' not in already_processed:
already_processed.add('LANG_ID')
self.LANG_ID = value
value = find_attr_value_('LANG_DEF', node)
if value is not None and 'LANG_DEF' not in already_processed:
already_processed.add('LANG_DEF')
self.LANG_DEF = value
value = find_attr_value_('LANG_LABEL', node)
if value is not None and 'LANG_LABEL' not in already_processed:
already_processed.add('LANG_LABEL')
self.LANG_LABEL = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class langType
class licenseType(GeneratedsSuper):
"""The license element can be used to include license information in
the eaf file itself."""
subclass = None
superclass = None
def __init__(self, LICENSE_URL=None, valueOf_=None):
self.original_tagname_ = None
self.LICENSE_URL = _cast(None, LICENSE_URL)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, licenseType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if licenseType.subclass:
return licenseType.subclass(*args_, **kwargs_)
else:
return licenseType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_LICENSE_URL(self): return self.LICENSE_URL
def set_LICENSE_URL(self, LICENSE_URL): self.LICENSE_URL = LICENSE_URL
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
(1 if type(self.valueOf_) in [int,float] else self.valueOf_)
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='licenseType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('licenseType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='licenseType')
if self.hasContent_():
outfile.write('>')
outfile.write(self.convert_unicode(self.valueOf_))
self.exportChildren(outfile, level + 1, namespace_='', name_='licenseType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='licenseType'):
if self.LICENSE_URL is not None and 'LICENSE_URL' not in already_processed:
already_processed.add('LICENSE_URL')
outfile.write(' LICENSE_URL=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.LICENSE_URL), input_name='LICENSE_URL')), ))
def exportChildren(self, outfile, level, namespace_='', name_='licenseType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('LICENSE_URL', node)
if value is not None and 'LICENSE_URL' not in already_processed:
already_processed.add('LICENSE_URL')
self.LICENSE_URL = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class licenseType
class CV_RESOURCE(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, DATE=None, AUTHOR=None, VERSION=None, LANGUAGE=None, CONTROLLED_VOCABULARY=None, EXTERNAL_REF=None):
self.original_tagname_ = None
if isinstance(DATE, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(DATE, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = DATE
self.DATE = initvalue_
self.AUTHOR = _cast(None, AUTHOR)
self.VERSION = _cast(None, VERSION)
if LANGUAGE is None:
self.LANGUAGE = []
else:
self.LANGUAGE = LANGUAGE
if CONTROLLED_VOCABULARY is None:
self.CONTROLLED_VOCABULARY = []
else:
self.CONTROLLED_VOCABULARY = CONTROLLED_VOCABULARY
if EXTERNAL_REF is None:
self.EXTERNAL_REF = []
else:
self.EXTERNAL_REF = EXTERNAL_REF
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, CV_RESOURCE)
if subclass is not None:
return subclass(*args_, **kwargs_)
if CV_RESOURCE.subclass:
return CV_RESOURCE.subclass(*args_, **kwargs_)
else:
return CV_RESOURCE(*args_, **kwargs_)
factory = staticmethod(factory)
def get_LANGUAGE(self): return self.LANGUAGE
def set_LANGUAGE(self, LANGUAGE): self.LANGUAGE = LANGUAGE
def add_LANGUAGE(self, value): self.LANGUAGE.append(value)
def insert_LANGUAGE_at(self, index, value): self.LANGUAGE.insert(index, value)
def replace_LANGUAGE_at(self, index, value): self.LANGUAGE[index] = value
def get_CONTROLLED_VOCABULARY(self): return self.CONTROLLED_VOCABULARY
def set_CONTROLLED_VOCABULARY(self, CONTROLLED_VOCABULARY): self.CONTROLLED_VOCABULARY = CONTROLLED_VOCABULARY
def add_CONTROLLED_VOCABULARY(self, value): self.CONTROLLED_VOCABULARY.append(value)
def insert_CONTROLLED_VOCABULARY_at(self, index, value): self.CONTROLLED_VOCABULARY.insert(index, value)
def replace_CONTROLLED_VOCABULARY_at(self, index, value): self.CONTROLLED_VOCABULARY[index] = value
def get_EXTERNAL_REF(self): return self.EXTERNAL_REF
def set_EXTERNAL_REF(self, EXTERNAL_REF): self.EXTERNAL_REF = EXTERNAL_REF
def add_EXTERNAL_REF(self, value): self.EXTERNAL_REF.append(value)
def insert_EXTERNAL_REF_at(self, index, value): self.EXTERNAL_REF.insert(index, value)
def replace_EXTERNAL_REF_at(self, index, value): self.EXTERNAL_REF[index] = value
def get_DATE(self): return self.DATE
def set_DATE(self, DATE): self.DATE = DATE
def get_AUTHOR(self): return self.AUTHOR
def set_AUTHOR(self, AUTHOR): self.AUTHOR = AUTHOR
def get_VERSION(self): return self.VERSION
def set_VERSION(self, VERSION): self.VERSION = VERSION
def hasContent_(self):
if (
self.LANGUAGE or
self.CONTROLLED_VOCABULARY or
self.EXTERNAL_REF
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='CV_RESOURCE', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('CV_RESOURCE')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='CV_RESOURCE')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='CV_RESOURCE', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='CV_RESOURCE'):
if self.DATE is not None and 'DATE' not in already_processed:
already_processed.add('DATE')
outfile.write(' DATE="%s"' % self.gds_format_datetime(self.DATE, input_name='DATE'))
if self.AUTHOR is not None and 'AUTHOR' not in already_processed:
already_processed.add('AUTHOR')
outfile.write(' AUTHOR=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.AUTHOR), input_name='AUTHOR')), ))
if self.VERSION is not None and 'VERSION' not in already_processed:
already_processed.add('VERSION')
outfile.write(' VERSION=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.VERSION), input_name='VERSION')), ))
def exportChildren(self, outfile, level, namespace_='', name_='CV_RESOURCE', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for LANGUAGE_ in self.LANGUAGE:
LANGUAGE_.export(outfile, level, namespace_, name_='LANGUAGE', pretty_print=pretty_print)
for CONTROLLED_VOCABULARY_ in self.CONTROLLED_VOCABULARY:
CONTROLLED_VOCABULARY_.export(outfile, level, namespace_, name_='CONTROLLED_VOCABULARY', pretty_print=pretty_print)
for EXTERNAL_REF_ in self.EXTERNAL_REF:
EXTERNAL_REF_.export(outfile, level, namespace_, name_='EXTERNAL_REF', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('DATE', node)
if value is not None and 'DATE' not in already_processed:
already_processed.add('DATE')
try:
self.DATE = self.gds_parse_datetime(value)
except ValueError as exp:
raise ValueError('Bad date-time attribute (DATE): %s' % exp)
value = find_attr_value_('AUTHOR', node)
if value is not None and 'AUTHOR' not in already_processed:
already_processed.add('AUTHOR')
self.AUTHOR = value
value = find_attr_value_('VERSION', node)
if value is not None and 'VERSION' not in already_processed:
already_processed.add('VERSION')
self.VERSION = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'LANGUAGE':
obj_ = langType.factory()
obj_.build(child_)
self.LANGUAGE.append(obj_)
obj_.original_tagname_ = 'LANGUAGE'
elif nodeName_ == 'CONTROLLED_VOCABULARY':
obj_ = convocType.factory()
obj_.build(child_)
self.CONTROLLED_VOCABULARY.append(obj_)
obj_.original_tagname_ = 'CONTROLLED_VOCABULARY'
elif nodeName_ == 'EXTERNAL_REF':
obj_ = extRefType.factory()
obj_.build(child_)
self.EXTERNAL_REF.append(obj_)
obj_.original_tagname_ = 'EXTERNAL_REF'
# end class CV_RESOURCE
class MEDIA_DESCRIPTORType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, MEDIA_URL=None, RELATIVE_MEDIA_URL=None, MIME_TYPE=None, TIME_ORIGIN=None, EXTRACTED_FROM=None):
self.original_tagname_ = None
self.MEDIA_URL = _cast(None, MEDIA_URL)
self.RELATIVE_MEDIA_URL = _cast(None, RELATIVE_MEDIA_URL)
self.MIME_TYPE = _cast(None, MIME_TYPE)
self.TIME_ORIGIN = _cast(int, TIME_ORIGIN)
self.EXTRACTED_FROM = _cast(None, EXTRACTED_FROM)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, MEDIA_DESCRIPTORType)
if subclass is not None:
return subclass(*args_, | |
dtype=np.int64)
pv = np.zeros(3, dtype=np.int64)
pt = np.zeros(3, dtype=np.int64)
dr = np.zeros(3, dtype=np.float64)
density_t = np.zeros(2, dtype=np.float64)
grad = np.zeros(3, dtype=np.float64)
grad_dir = np.zeros(3, dtype=np.float64)
max_grad = np.float64(0.)
known = np.zeros((vx, vy, vz), dtype=np.int8)
# keep track of the lead index for filling the progress bar
lead_idx = 0
# for index in range of the volume size
for i in np.ndindex(vx, vy, vz):
if i[0] != lead_idx:
lead_idx += 1
i_c[0] += 1
# skip if volume has been visited
if volumes[i] == -1:
continue
elif known[i] == 2:
continue
# we've visited the point
known[i] = 1
# init p for current point, pv for next point in volume space
# pd for next point in density space
for j in range(3):
p[j] = i[j] + idx[j]
pd[j] = p[j]
path[0][j] = i[j]
dr[j] = 0.
# path size is now 1 and max_val is current point
path_num = 1
while True:
max_val = density[p[0], p[1], p[2]]
# calculate density of heptacube around point
for j in range(3):
# convert to density space
pd[j] += 1
# wrap in pbc
if pd[j] < 0:
pd[j] += density.shape[j]
elif pd[j] >= density.shape[j]:
pd[j] -= density.shape[j]
# store density at p+1
density_t[0] = density[pd[0], pd[1], pd[2]]
pd[j] -= 2
# rewrap
if pd[j] < 0:
pd[j] += density.shape[j]
elif pd[j] >= density.shape[j]:
pd[j] -= density.shape[j]
# store density of p-1
density_t[1] = density[pd[0], pd[1], pd[2]]
# if p is max in this axis grad is zero
# else grad is density[p+1] - density[p-1] / 2
if density_t[0] <= max_val >= density_t[1]:
grad[j] = 0.
else:
grad[j] = (density_t[0] - density_t[1]) / 2.
# reset current pd
pd[j] = p[j]
# convert grad to direct coords
max_grad = 0.
for j in range(3):
grad_dir[j] = ((T_grad[j, 0] * grad[0])
+ (T_grad[j, 1] * grad[1])
+ (T_grad[j, 2] * grad[2]))
if grad_dir[j] > max_grad:
max_grad = grad_dir[j]
elif -grad_dir[j] > max_grad:
max_grad = -grad_dir[j]
# max grad is zero then do ongrid step
if max_grad < 1E-14:
for j in range(3):
pv[j] = pd[j] - idx[j]
else:
for j in range(3):
grad_dir[j] /= max_grad
if grad_dir[j] > 0:
int_grad = np.int64(grad_dir[j] + .5)
else:
int_grad = np.int64(grad_dir[j] - .5)
pd[j] = p[j] + int_grad
dr[j] += grad_dir[j] - int_grad
if dr[j] > 0:
int_dr = np.int64(dr[j] + .5)
else:
int_dr = np.int64(dr[j] - .5)
pd[j] += int_dr
dr[j] -= int_dr
if pd[j] >= density.shape[j]:
pd[j] -= density.shape[j]
elif pd[j] < 0:
pd[j] += density.shape[j]
pv[j] = pd[j] - idx[j]
# check if pv is outside of volume space and either extend volume
# space or wrap back in
extend_flag = False
for j in range(3):
# outside volume to left
if pv[j] < negative_len[j]:
upper = pv[j] + density.shape[j] - positive_len[j] + 1
lower = (pv[j] - negative_len[j]) * -1
if upper <= 0:
pv[j] += density.shape[j]
elif upper > lower:
new_negative_len[j] -= vol_shape[j] // 2
extend[j] += vol_shape[j] // 2
extend_flag = True
else:
new_positive_len[j] += vol_shape[j] // 2
extend[j] += vol_shape[j] // 2
pv[j] += density.shape[j]
extend_flag = True
elif pv[j] >= positive_len[j]:
upper = pv[j] - positive_len[j] + 1
lower = (pv[j] - density.shape[j] - negative_len[j]) * -1
if lower <= 0:
pv[j] -= density.shape[j]
elif upper > lower:
new_negative_len[j] -= vol_shape[j] // 2
extend[j] += vol_shape[j] // 2
pv[j] -= density.shape[j]
extend_flag = True
else:
new_positive_len[j] += vol_shape[j] // 2
extend[j] += vol_shape[j] // 2
extend_flag = True
if extend_flag:
for j in range(3):
if extend[j] > density.shape[j]:
extend[j] = density.shape[j]
volumes = volume_extend(volumes, positive_len, extend)
known = volume_extend(known, positive_len, extend)
for j in range(3):
if volumes.shape[j] == density.shape[j]:
positive_len[j] = density.shape[j]
negative_len[j] = 0
else:
positive_len[j] = new_positive_len[j]
negative_len[j] = new_negative_len[j]
# already been here this path
if known[pv[0], pv[1], pv[2]] == 1:
for j in range(3):
dr[j] = 0.
pd[j] = p[j]
pv[j] = p[j] - idx[j]
max_val = density[p[0], p[1], p[2]]
ctr_val = max_val
for ix in range(-1, 2):
# shift p_x into density space and adjust for pbc
pt[0] = p[0] + ix
if pt[0] < 0:
pt[0] += density.shape[0]
elif pt[0] >= density.shape[0]:
pt[0] -= density.shape[0]
for iy in range(-1, 2):
# shift p_y into density space and adjust for pbc
pt[1] = p[1] + iy
if pt[1] < 0:
pt[1] += density.shape[1]
elif pt[1] >= density.shape[1]:
pt[1] -= density.shape[1]
for iz in range(-1, 2):
# shift p_z into density space and adjust for pbc
pt[2] = p[2] + iz
if pt[2] < 0:
pt[2] += density.shape[2]
elif pt[2] >= density.shape[2]:
pt[2] -= density.shape[2]
# check for new maxima, save density and index
pd_val = density[pt[0], pt[1], pt[2]]
pd_val = (pd_val - ctr_val) * dist_mat[ix, iy, iz]
pd_val += ctr_val
if pd_val > max_val:
max_val = pd_val
for j in range(3):
pd[j] = pt[j]
pv[j] = pd[j] - idx[j]
extend_flag = False
break_flag = True
for j in range(3):
# outside volume to left
if pv[j] < negative_len[j]:
upper = pv[j] + density.shape[j] - positive_len[j] + 1
lower = (pv[j] - negative_len[j]) * -1
if upper <= 0:
pv[j] += density.shape[j]
elif upper > lower:
new_negative_len[j] -= vol_shape[j] // 2
extend[j] += vol_shape[j] // 2
extend_flag = True
else:
new_positive_len[j] += vol_shape[j] // 2
extend[j] += vol_shape[j] // 2
pv[j] += density.shape[j]
extend_flag = True
elif pv[j] >= positive_len[j]:
upper = pv[j] - positive_len[j] + 1
lower = (pv[j] - density.shape[j] - negative_len[j])
lower *= -1
if lower <= 0:
pv[j] -= density.shape[j]
elif upper > lower:
new_negative_len[j] -= vol_shape[j] // 2
extend[j] += vol_shape[j] // 2
pv[j] -= density.shape[j]
extend_flag = True
else:
new_positive_len[j] += vol_shape[j] // 2
extend[j] += vol_shape[j] // 2
extend_flag = True
if break_flag and pd[j] != p[j]:
break_flag = False
if extend_flag:
for j in range(3):
if extend[j] > density.shape[j]:
extend[j] = density.shape[j]
volumes = volume_extend(volumes, positive_len, extend)
known = volume_extend(known, positive_len, extend)
for j in range(3):
if volumes.shape[j] == density.shape[j]:
positive_len[j] = density.shape[j]
negative_len[j] = 0
else:
positive_len[j] = new_positive_len[j]
negative_len[j] = new_negative_len[j]
if break_flag:
# store maxima/edge in density space
vol_num = 0
if volumes[pv[0], pv[1], pv[2]] != 0:
vol_num = volumes[pv[0], pv[1], pv[2]]
else:
for k in range(3):
if pv[k] >= vol_shape[k]:
vol_num = -2
elif pv[k] < 0:
vol_num = -2
break
# if known break without updating p
if known[pv[0], pv[1], pv[2]] == 2:
vol_num = volumes[pv[0], pv[1], pv[2]]
break
# no break condition so add point to path
else:
if path_num >= path.shape[0]:
path = array_assign(
path, path.shape[0], path.shape[0] + vx)
for j in range(3):
p[j] = pd[j]
path[path_num][j] = pv[j]
path_num += 1
known[pv[0], pv[1], pv[2]] = 1
# if the volume is empty then create a new one
if vol_num == -2:
if edge_num >= edge_max.shape[0]:
edge_max = array_assign(edge_max, edge_max.shape[0],
edge_max.shape[0] + vx)
# add max to bader_max list add one to len counter
for j in range(3):
edge_max[edge_num][j] = pd[j]
edge_num += 1
vol_num = -2 - edge_num # -1 is vacuum, -2 is maxima flag
# we are at a maxima
elif vol_num == 0:
if bader_num >= bader_max.shape[0]:
bader_max = array_assign(bader_max, bader_max.shape[0],
bader_max.shape[0] + vx)
# add max to bader_max list add one to len counter
for j in range(3):
bader_max[bader_num][j] = pd[j]
bader_num += 1
vol_num = bader_num
# assign bader_num to volumes and adjust known
for j in range(path_num):
for k in range(3):
p[k] = path[j][k]
pv[k] = p[k]
pt[k] = p[k]
volumes[p[0], p[1], p[2]] = vol_num
# this should never == 2 ?
if known[p[0], p[1], p[2]] != 2:
known[p[0], p[1], p[2]] = 0
for k in range(3):
pv[k] += 1
pt[k] += 1
# pv[k] check is in bounds, if not we havent been there so skip
if negative_len[k] <= pv[k] < positive_len[k]:
known_flag = True
vol_temp = volumes[pv[0], pv[1], pv[2]]
if | |
of the release of MySQL Shell 8.0.24, in order to use Inbound
Replication into an MySQL Database Service instance with High
Availability, all tables at the source server need to have Primary Keys.
This needs to be fixed manually before running the dump. Starting with
MySQL 8.0.23 invisible columns may be used to add Primary Keys without
changing the schema compatibility, for more information see:
https://dev.mysql.com/doc/refman/en/invisible-columns.html.
In order to use MySQL Database Service instance with High Availability,
all tables at the MDS server need to have Primary Keys. This can be fixed
automatically using the create_invisible_pks compatibility value.
Please refer to the MySQL Database Service documentation for more
information about restrictions and compatibility.
Dumping to a Bucket in the OCI Object Storage
If the osBucketName option is used, the dump is stored in the specified
OCI bucket, connection is established using the local OCI profile. The
directory structure is simulated within the object name.
The osNamespace, ociConfigFile and ociProfile options cannot be used if
the osBucketName option is set to an empty string.
The osNamespace option overrides the OCI namespace obtained based on the
tenancy ID from the local OCI profile.
Enabling dump loading using preauthenticated requests
To enable loading a dump without requiring an OCI Profile, the dump
operations can automatically generate a preauthenticated request (PAR)
for every file generated on the dump operation, this is done by enabling
the ociParManifest option.
When the ociParManifest option is enabled, a file named "@.manifest.json"
is generated, it contains the PAR for each file generated on the dump.
The manifest is updated as the dump operation progresses.
The ociParManifest option cannot be used if osBucketName is not set. The
default value of this option depends on the dump settings: if ocimds is
enabled and osBucketName is specified then it will be enabled, otherwise
it will be disabled. In any case, if the option is explicitly set to a
value, the user provided value will be used.
When creating PARs, an expiration time is required, it can be defined
through the ociParExpireTime option. If the option is not used, a
predefined expiration time will be used equivalent to a week afer the
dump operation started. The values assigned to this option should be
conformant to RFC3339.
The ociParExpireTime option cannot be used if the ociParManifest option
is not enabled.
EXCEPTIONS
ArgumentError in the following scenarios:
- If any of the input arguments contains an invalid value.
RuntimeError in the following scenarios:
- If there is no open global session.
- If creating the output directory fails.
- If creating or writing to the output file fails.
#@<OUT> util dump_schemas help
NAME
dump_schemas - Dumps the specified schemas to the files in the output
directory.
SYNTAX
util.dump_schemas(schemas, outputUrl[, options])
WHERE
schemas: List of schemas to be dumped.
outputUrl: Target directory to store the dump files.
options: Dictionary with the dump options.
DESCRIPTION
The schemas parameter cannot be an empty list.
The outputUrl specifies where the dump is going to be stored.
By default, a local directory is used, and in this case outputUrl can be
prefixed with file:// scheme. If a relative path is given, the absolute
path is computed as relative to the current working directory. If the
output directory does not exist but its parent does, it is created. If
the output directory exists, it must be empty. All directories are
created with the following access rights (on operating systems which
support them): rwxr-x---. All files are created with the following access
rights (on operating systems which support them): rw-r-----.
The following options are supported:
- excludeTables: list of strings (default: empty) - List of tables to be
excluded from the dump in the format of schema.table.
- ocimds: bool (default: false) - Enable checks for compatibility with
MySQL Database Service (MDS)
- compatibility: list of strings (default: empty) - Apply MySQL Database
Service compatibility modifications when writing dump files. Supported
values: "create_invisible_pks", "force_innodb", "ignore_missing_pks",
"skip_invalid_accounts", "strip_definers", "strip_restricted_grants",
"strip_tablespaces".
- events: bool (default: true) - Include events from each dumped schema.
- routines: bool (default: true) - Include functions and stored
procedures for each dumped schema.
- triggers: bool (default: true) - Include triggers for each dumped
table.
- tzUtc: bool (default: true) - Convert TIMESTAMP data to UTC.
- consistent: bool (default: true) - Enable or disable consistent data
dumps.
- ddlOnly: bool (default: false) - Only dump Data Definition Language
(DDL) from the database.
- dataOnly: bool (default: false) - Only dump data from the database.
- dryRun: bool (default: false) - Print information about what would be
dumped, but do not dump anything.
- chunking: bool (default: true) - Enable chunking of the tables.
- bytesPerChunk: string (default: "64M") - Sets average estimated number
of bytes to be written to each chunk file, enables chunking.
- threads: int (default: 4) - Use N threads to dump data chunks from the
server.
- maxRate: string (default: "0") - Limit data read throughput to maximum
rate, measured in bytes per second per thread. Use maxRate="0" to set
no limit.
- showProgress: bool (default: true if stdout is a TTY device, false
otherwise) - Enable or disable dump progress information.
- defaultCharacterSet: string (default: "utf8mb4") - Character set used
for the dump.
- compression: string (default: "zstd") - Compression used when writing
the data dump files, one of: "none", "gzip", "zstd".
- osBucketName: string (default: not set) - Use specified OCI bucket for
the location of the dump.
- osNamespace: string (default: not set) - Specifies the namespace where
the bucket is located, if not given it will be obtained using the
tenancy id on the OCI configuration.
- ociConfigFile: string (default: not set) - Use the specified OCI
configuration file instead of the one in the default location.
- ociProfile: string (default: not set) - Use the specified OCI profile
instead of the default one.
- ociParManifest: bool (default: not set) - Enables the generation of the
PAR manifest while the dump operation is being executed.
- ociParExpireTime: string (default: not set) - Allows defining the
expiration time for the PARs generated when ociParManifest is enabled.
Requirements
- MySQL Server 5.7 or newer is required.
- File size limit for files uploaded to the OCI bucket is 1.2 TiB.
- Columns with data types which are not safe to be stored in text form
(i.e. BLOB) are converted to Base64, hence the size of such columns
cannot exceed approximately 0.74 * max_allowed_packet bytes, as
configured through that system variable at the target server.
- Schema object names must use latin1 or utf8 character set.
- Only tables which use the InnoDB storage engine are guaranteed to be
dumped with consistent data.
Details
This operation writes SQL files per each schema, table and view dumped,
along with some global SQL files.
Table data dumps are written to TSV files, optionally splitting them into
multiple chunk files.
Requires an open, global Shell session, and uses its connection options,
such as compression, ssl-mode, etc., to establish additional connections.
Data dumps cannot be created for the following tables:
- mysql.apply_status
- mysql.general_log
- mysql.schema
- mysql.slow_log
Options
The names given in the excludeTables option should be valid MySQL
identifiers, quoted using backtick characters when required.
If the excludeTables option contains a table which does not exist, or a
table which belongs to a schema which is not included in the dump or does
not exist, it is ignored.
The tzUtc option allows dumping TIMESTAMP data when a server has data in
different time zones or data is being moved between servers with
different time zones.
If the consistent option is set to true, a global read lock is set using
the FLUSH TABLES WITH READ LOCK statement, all threads establish
connections | |
#!/usr/bin/env python
"""
Gmail notification in Menu bar.
requirement: rumps (https://github.com/jaredks/rumps),
httplib2, oauth2client, google-api-python-client
Worked with python 2.7
"""
import os
import sys
import re
import argparse
import base64
import dateutil.parser
import webbrowser
import urllib
import httplib2
import socket
import signal
import BaseHTTPServer
import rumps
from email.mime.text import MIMEText
from oauth2client.file import Storage
from oauth2client.tools import run_flow, argparser
from oauth2client.client import OAuth2WebServerFlow
from apiclient.discovery import build
from apiclient import errors
__prog__ = os.path.basename(__file__)
__description__ = __doc__
__author__ = 'rcmdnk'
__copyright__ = 'Copyright (c) 2015 rcmdnk'
__credits__ = ['rcmdnk']
__license__ = 'MIT'
__version__ = 'v0.1.0'
__date__ = '12/Jul/2017'
__maintainer__ = 'rcmdnk'
__email__ = '<EMAIL>'
__status__ = 'Prototype'
DEBUG = True
MAILS_MAX_GET = 10
MAILS_MAX_SHOW = 10
AUTHENTICATION_FILE = os.environ['HOME'] + '/.menubargmail_oauth'
SETTING_FILE = os.environ['HOME'] + '/.menubargmail_settings'
PLIST_FILE = os.environ['HOME'] + '/Library/LaunchAgents/menubargmail.plist'
GOOGLE_CLIENT_ID = '401979756927-453hrgvmgjik9tqqq744s6pg7762hfel'\
'.apps.googleusercontent.com'
GOOGLE_CLIENT_SECRET = '<KEY>'
MENU_BAR_ICON = 'MenuBarGmailMenuBarIcon.png'
class MenuBarGmail(rumps.App):
def __init__(self, autostart=True):
# Set default values
self.debug_mode = DEBUG
rumps.debug_mode(self.debug_mode)
self.mails_max_get = MAILS_MAX_GET
self.mails_max_show = MAILS_MAX_SHOW
self.authentication_file = AUTHENTICATION_FILE
self.setting_file = SETTING_FILE
self.plist_file = PLIST_FILE
self.google_client_id = GOOGLE_CLIENT_ID
self.google_client_secret = GOOGLE_CLIENT_SECRET
self.menu_bar_icon = MENU_BAR_ICON
# Read settings
self.settings = {}
self.read_settings()
# Application setup
super(MenuBarGmail, self).__init__(type(self).__name__, title=None,
icon=self.menu_bar_icon)
self.menu = [
'About',
None,
'Account',
'Check now',
'Reconnect',
'Unread messages',
'Set checking interval',
'Set labels',
'Set filter',
'Mail notification',
'Start at login',
None,
'Uninstall',
None,
]
# Other class variables
self.address = ''
self.address = ''
self.messages = {}
self.message_contents = {}
self.service = None
self.is_first = True
if 'notification' in self.settings\
and self.settings['notification'] == '1':
self.menu['Mail notification'].state = True
else:
self.menu['Mail notification'].state = False
if 'startatlogin' in self.settings\
and self.settings['startatlogin'] == '1':
self.menu['Start at login'].state = True
else:
self.menu['Start at login'].state = False
# Set and start get_messages
self.get_messages_timer = rumps.Timer(self.get_messages_wrapper,
int(self.settings['interval'])
if 'interval' in self.settings
else 60)
if autostart:
self.start()
@rumps.clicked('About')
def about(self, sender):
rumps.alert(title='%s' % __prog__,
message='Gmail notification in Menu bar.\n' +
'Version %s\n' % __version__ +
'%s' % __copyright__)
@rumps.clicked('Account')
def account(self, sender):
self.open_gmail()
@rumps.clicked('Check now')
def check_now(self, sender):
self.get_messages()
@rumps.clicked('Reconnect')
def recoonect(self, sender):
self.build_service(True)
self.restart()
@rumps.clicked('Set checking interval')
def set_interval(self, sender):
# Need to stop timer job, otherwise interval can not be changed.
self.stop()
response = rumps.Window('Set checking interval (s)',
default_text=str(
self.get_messages_timer.interval),
dimensions=(100, 20)).run()
if response.clicked:
self.get_messages_timer.interval = int(response.text)
self.settings['interval'] = response.text
self.write_settings()
self.start()
@rumps.clicked('Set labels')
def set_labels(self, sender):
response = rumps.Window('Set labels (comma-separeted list).\n'
'If "labels" is empty and filter is not set,'
' INBOX is checked.',
default_text=self.settings['labels']
if 'labels' in self.settings else '',
dimensions=(400, 20)).run()
if response.clicked:
self.settings['labels'] = response.text.upper()
self.write_settings()
self.restart()
@rumps.clicked('Set filter')
def set_filter(self, sender):
response = rumps.Window('Set filter.\n'
'e.g. "newer_than:1w"'
' for mails within a week\n'
'ref:'
'https://support.google.com/mail/answer/7190',
default_text=self.settings['filter']
if 'filter' in self.settings else '',
dimensions=(400, 20)).run()
if response.clicked:
self.settings['filter'] = response.text.upper()
self.write_settings()
self.restart()
@rumps.clicked('Mail notification')
def mail_notification(self, sender):
sender.state = not sender.state
self.settings['notification'] = str(sender.state)
self.write_settings()
@rumps.clicked('Start at login')
def set_startup(self, sender):
sender.state = not sender.state
if sender.state == 0:
if os.path.exists(self.plist_file):
os.system('launchctl unload %s' % self.plist_file)
os.remove(self.plist_file)
else:
plist = '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN"'''\
''' "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>menubargmail</string>
<key>ProgramArguments</key>
<array>
<string>''' + self.get_exe() + '''</string>
</array>
<key>RunAtLoad</key>
<true/>
</dict>
</plist>'''
with open(self.plist_file, 'w') as f:
f.write(plist)
self.settings['startatlogin'] = str(sender.state)
self.write_settings()
@rumps.clicked('Uninstall')
def uninstall(self, sender):
ret = rumps.alert('Do you want to uninstall MenuBarGmail?',
ok='OK', cancel='Cancel')
if ret == 1:
self.remove_me()
def error_check(func):
def wrapper(*args, **kargs):
try:
func(*args, **kargs)
except errors.HttpError, error:
print '[ERROR] %s: %s' % (
sys._getframe().f_code.co_name, error)
args[0].service = None
except (httplib2.ServerNotFoundError, socket.error), error:
print '[ERROR] %s: Maybe offline, %s' % (
sys._getframe().f_code.co_name, error)
args[0].service = None
except Exception, e:
if len(e.args) > 0 and "timeout" in e.args[0]:
print '[ERROR] %s: %s' % (
sys._getframe().f_code.co_name, e.args[0])
else:
print '[ERROR] %s: Unexpected, %s' % (
sys._getframe().f_code.co_name, sys.exc_info()[0])
args[0].service = None
return wrapper
def get_messages_wrapper(self, sender):
self.get_messages()
@error_check
def get_messages(self, commandline=False):
# Set labels
is_inbox_only = True
labels = []
if 'labels' in self.settings and self.settings['labels'] != '':
for l in self.settings['labels'].split(','):
labels.append(l.strip())
if l != 'INBOX':
is_inbox_only = False
elif 'filter' not in self.settings\
or self.settings['filter'].strip() == '':
labels.append('INBOX')
if not is_inbox_only:
# Get labelIds
label_name_id = {x['name'].upper().replace('/', '-'): x['id']
for x in self.timeout_execute(
self.get_service().users().labels()
.list(userId='me'))['labels']}
else:
label_name_id = {'INBOX': 'INBOX', 'None': None}
labels = [x for x in labels
if x.replace('/', '-') in label_name_id]
if len(labels) == 0:
labels.append('None')
# Get message ids
query = 'label:unread ' + (self.settings['filter']
if 'filter' in self.settings else '')
ids = {}
is_new = False
for l in labels:
response = self.timeout_execute(
self.get_service().users().messages().list(
userId='me',
labelIds=label_name_id[l.replace('/', '-')],
q=query))
ids[l] = []
if 'messages' in response:
ids[l].extend([x['id'] for x in response['messages']])
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = self.timeout_execute(
self.get_service().users().messages().list(
userId='me',
labelIds=label_name_id[l.replace('/', '-')],
q=query, pageToken=page_token))
ids[l].extend([x['id'] for x in response['messages']])
if l not in self.messages:
self.messages[l] = []
if ids[l] != self.messages[l]:
is_new = True
# Remove read messages' id
self.messages[l] = ids[l]
removed = [x for x in self.messages if x not in labels]
if len(removed) > 0:
is_new = True
for l in removed:
del self.messages[l]
# No change
if not is_new:
# No new message
return
# Check total number of messages
# Remove duplication in different labels
all_ids = []
for l in labels:
all_ids += ids[l]
all_ids = list(set(all_ids))
self.message_contents = {
k: v for k, v in self.message_contents.items()
if k in all_ids}
# Set menu's title
um_menu = self.menu['Unread messages']
um_menu.title = 'Unread messages: %d' % len(all_ids)
# Set menubar icon's title
if len(all_ids) == 0:
self.title = ''
else:
self.title = '%d' % len(all_ids)
# Reset menu bar icon after title is put,
# to adjust the width.
self.icon = self.menu_bar_icon
# Get message contents
n_get = 0
for i in all_ids:
if i in self.message_contents\
and 'Subject' in self.message_contents[i]:
continue
is_new = True if i not in self.message_contents\
else False
self.message_contents[i] = {}
if n_get >= self.mails_max_get:
continue
n_get += 1
message = self.timeout_execute(
self.get_service().users().messages().get(
userId='me', id=i))
for k in ['labelIds', 'snippet', 'threadId']:
self.message_contents[i][k] = message[k]
for x in message['payload']['headers']:
if x['name'] == 'Subject':
self.message_contents[i]['Subject'] = x['value']
elif x['name'] == 'Date':
self.message_contents[i]['Date'] =\
x['value'].split(', ')[1].split(' +')[0]
elif x['name'] == 'From':
self.message_contents[i]['FromName'] =\
self.get_address_name(x['value'])
self.message_contents[i]['From'] = x['value']
elif x['name'] in ['Subject', 'To', 'Cc', 'Bcc',
'In-Reply-To', 'References']:
self.message_contents[i][x['name']] = x['value']
for k in ['To', 'Cc']:
if k not in self.message_contents[i]:
self.message_contents[i][k] = ''
body = None
if 'parts' in message['payload']:
for p in message['payload']['parts']:
if 'body' in p and 'data' in p['body']:
body = p['body']['data']
break
if body is None and 'body' in message['payload']\
and 'data' in message['payload']['body']:
body = message['payload']['body']['data']
if body is not None:
self.message_contents[i]['body']\
= base64.urlsafe_b64decode(body.encode('UTF-8'))
if body is None:
self.message_contents[i]['body'] = message['snippet']
# Popup notification
if is_new and not self.is_first\
and self.menu['Mail notification'].state:
rumps.notification(
title='Mail from %s' %
self.message_contents[i]['FromName'],
subtitle=self.message_contents[i]['Subject'],
message=self.message_contents[i]['snippet'])
self.is_first = False
# Get contents
if um_menu._menu is not None:
um_menu.clear()
for l in labels:
threadIds = []
if len(labels) > 1:
# Set each labels' menu
um_menu.add(rumps.MenuItem(
l,
callback=lambda x, y=l: self.open_gmail(y)))
um_menu[l].title = '%s: %d' % (l, len(ids[l]))
for i in sorted([i for i in self.messages[l]
if 'Subject' in self.message_contents[i]],
key=lambda x: dateutil.parser.parse(
self.message_contents[x]['Date'])
.isoformat(),
reverse=True):
v = self.message_contents[i]
if v['threadId'] in threadIds:
continue
threadIds.append(v['threadId'])
title = '%s %s | %s' % (v['Date'], v['FromName'],
v['Subject'])
title = title[0:80]
if len(labels) > 1:
m = um_menu[l]
else:
m = um_menu
if len(m) < self.mails_max_show:
m.add(
rumps.MenuItem(
l+str(i),
callback=lambda x, y=l, z=i:
self.show_mail(y, z)))
m[l+str(i)].title = title
m[l+str(i)].add(rumps.MenuItem(
l+str(i)+'snippet',
callback=lambda x, y=l, z=i: self.show_mail(y, z)))
m[l+str(i)][l+str(i)+'snippet'].title = v['snippet']
if commandline or self.debug_mode:
print ''
print 'labels: %s' % (self.settings['labels']
if 'labels' in self.settings else '')
print 'filter: %s' % (self.settings['filter']
if 'filter' in self.settings else '')
print 'Total number of unread messages: %d\n' % len(all_ids)
for l in labels:
if len(labels) > 1:
print '%d messages for %s' % (len(ids[l]), l)
for i in um_menu[l].values():
print '%s\n' % i.title
else:
for i in um_menu.values():
print '%s\n' % i.title
def read_settings(self):
if not os.path.exists(self.setting_file):
return
with open(self.setting_file, 'r') as f:
for line in f:
l = re.sub(r' *#.*', '', line).strip()
if l == '':
continue
l = l.split('=')
if len(l) < 2:
continue
if l[0] == 'labels':
self.settings[l[0]] = l[1].upper()
| |
<gh_stars>1-10
""" The access backend object base class """
from __future__ import unicode_literals
import six
import hmac
import hashlib
import time
from collections import defaultdict
from passlib.apps import LazyCryptContext
from passlib.utils import sys_bits
from pyramid.security import (
Authenticated,
Everyone,
effective_principals,
Allow,
Deny,
ALL_PERMISSIONS,
)
from pyramid.settings import aslist
DEFAULT_ROUNDS = 535000
def get_pwd_context(rounds=DEFAULT_ROUNDS):
""" Create a passlib context for hashing passwords """
return LazyCryptContext(
schemes=["sha512_crypt", "sha256_crypt"],
default="sha256_crypt" if sys_bits < 64 else "sha512_crypt",
sha512_crypt__default_rounds=rounds,
sha256_crypt__default_rounds=rounds,
)
def group_to_principal(group):
""" Convert a group to its corresponding principal """
if group in (Everyone, Authenticated) or group.startswith("group:"):
return group
elif group == "everyone":
return Everyone
elif group == "authenticated":
return Authenticated
else:
return "group:" + group
def groups_to_principals(groups):
""" Convert a list of groups to a list of principals """
return [group_to_principal(g) for g in groups]
ONE_WEEK = 60 * 60 * 24 * 7
class IAccessBackend(object):
""" Base class for retrieving user and package permission data """
mutable = False
ROOT_ACL = [
(Allow, Authenticated, "login"),
(Allow, "admin", ALL_PERMISSIONS),
(Deny, Everyone, ALL_PERMISSIONS),
]
def __init__(
self,
request=None,
default_read=None,
default_write=None,
disallow_fallback=(),
cache_update=None,
pwd_context=None,
token_expiration=ONE_WEEK,
signing_key=None,
):
self.request = request
self.default_read = default_read
self.default_write = default_write
self.disallow_fallback = disallow_fallback
self.cache_update = cache_update
self.pwd_context = pwd_context
self.token_expiration = token_expiration
self.signing_key = signing_key
@classmethod
def configure(cls, settings):
""" Configure the access backend with app settings """
rounds = int(settings.get("auth.rounds", DEFAULT_ROUNDS))
return {
"default_read": aslist(
settings.get("pypi.default_read", ["authenticated"])
),
"default_write": aslist(settings.get("pypi.default_write", [])),
"disallow_fallback": aslist(settings.get("pypi.disallow_fallback", [])),
"cache_update": aslist(
settings.get("pypi.cache_update", ["authenticated"])
),
"pwd_context": get_pwd_context(rounds),
"token_expiration": int(settings.get("auth.token_expire", ONE_WEEK)),
"signing_key": settings.get("auth.signing_key"),
}
@classmethod
def postfork(cls, **kwargs):
""" This method will be called after uWSGI forks """
def allowed_permissions(self, package):
"""
Get all allowed permissions for all principals on a package
Returns
-------
perms : dict
Mapping of principal to tuple of permissions
"""
all_perms = {}
for user, perms in six.iteritems(self.user_permissions(package)):
all_perms["user:" + user] = tuple(perms)
for group, perms in six.iteritems(self.group_permissions(package)):
all_perms[group_to_principal(group)] = tuple(perms)
# If there are no group or user specifications for the package, use the
# default
if not all_perms:
for principal in groups_to_principals(self.default_read):
all_perms[principal] = ("read",)
for principal in groups_to_principals(self.default_write):
if principal in all_perms:
all_perms[principal] += ("write",)
else:
all_perms[principal] = ("write",)
# add fallback permissions
if package not in self.disallow_fallback:
for principal in all_perms:
all_perms[principal] += ("fallback",)
return all_perms
def get_acl(self, package):
""" Construct an ACL for a package """
acl = []
permissions = self.allowed_permissions(package)
for principal, perms in six.iteritems(permissions):
for perm in perms:
acl.append((Allow, principal, perm))
return acl
def has_permission(self, package, perm):
""" Check if this user has a permission for a package """
current_userid = self.request.userid
if current_userid is not None and self.is_admin(current_userid):
return True
perms = self.allowed_permissions(package)
for principal in effective_principals(self.request):
if perm in perms.get(principal, []):
return True
return False
def user_principals(self, username):
"""
Get a list of principals for a user
Parameters
----------
username : str
Returns
-------
principals : list
"""
principals = ["user:" + username, Everyone, Authenticated]
if self.is_admin(username):
principals.append("admin")
for group in self.groups(username):
principals.append("group:" + group)
return principals
def in_group(self, username, group):
"""
Find out if a user is in a group
Parameters
----------
username : str
Name of user. May be None for the anonymous user.
group : str
Name of the group. Supports 'everyone', 'authenticated', and
'admin'.
Returns
-------
member : bool
"""
if group in ("everyone", Everyone):
return True
elif username is None:
return False
elif group in ("authenticated", Authenticated):
return True
elif group == "admin" and self.is_admin(username):
return True
else:
return group in self.groups(username)
def in_any_group(self, username, groups):
"""
Find out if a user is in any of a set of groups
Parameters
----------
username : str
Name of user. May be None for the anonymous user.
groups : list
list of group names. Supports 'everyone', 'authenticated', and
'admin'.
Returns
-------
member : bool
"""
return any((self.in_group(username, group) for group in groups))
def can_update_cache(self):
"""
Return True if the user has permissions to update the pypi cache
"""
return self.in_any_group(self.request.userid, self.cache_update)
def need_admin(self):
"""
Find out if there are any admin users
This should only be overridden by mutable backends
Returns
-------
need_admin : bool
True if no admin user exists and the backend is mutable, False
otherwise
"""
return False
def allow_register(self):
"""
Check if the backend allows registration
This should only be overridden by mutable backends
Returns
-------
allow : bool
"""
return False
def allow_register_token(self):
"""
Check if the backend allows registration via tokens
This should only be overridden by mutable backends
Returns
-------
allow : bool
"""
return False
def verify_user(self, username, password):
"""
Check the login credentials of a user
For Mutable backends, pending users should fail to verify
Parameters
----------
username : str
password : str
Returns
-------
valid : bool
True if user credentials are valid, false otherwise
"""
stored_pw = self._get_password_hash(username)
if self.mutable:
# if a user is pending, user_data will be None
user_data = self.user_data(username)
if user_data is None:
return False
return bool(stored_pw and self.pwd_context.verify(password, stored_pw))
def _get_password_hash(self, username):
""" Get the stored password hash for a user """
raise NotImplementedError
def groups(self, username=None):
"""
Get a list of all groups
If a username is specified, get all groups that the user belongs to
Parameters
----------
username : str, optional
Returns
-------
groups : list
List of group names
"""
raise NotImplementedError
def group_members(self, group):
"""
Get a list of users that belong to a group
Parameters
----------
group : str
Returns
-------
users : list
List of user names
"""
raise NotImplementedError
def is_admin(self, username):
"""
Check if the user is an admin
Parameters
----------
username : str
Returns
-------
is_admin : bool
"""
raise NotImplementedError
def group_permissions(self, package):
"""
Get a mapping of all groups to their permissions on a package
Parameters
----------
package : str
The name of a python package
Returns
-------
permissions : dict
mapping of group name to a list of permissions
(which can contain 'read' and/or 'write')
"""
raise NotImplementedError
def user_permissions(self, package):
"""
Get a mapping of all users to their permissions for a package
Parameters
----------
package : str
The name of a python package
Returns
-------
permissions : dict
Mapping of username to a list of permissions (which can contain
'read' and/or 'write')
"""
raise NotImplementedError
def user_package_permissions(self, username):
"""
Get a list of all packages that a user has permissions on
Parameters
----------
username : str
Returns
-------
packages : list
List of dicts. Each dict contains 'package' (str) and 'permissions'
(list)
"""
raise NotImplementedError
def group_package_permissions(self, group):
"""
Get a list of all packages that a group has permissions on
Parameters
----------
group : str
Returns
-------
packages : list
List of dicts. Each dict contains 'package' (str) and 'permissions'
(list)
"""
raise NotImplementedError
def user_data(self, username=None):
"""
Get a list of all users or data for a single user
For Mutable backends, this MUST exclude all pending users
Returns
-------
users : list
Each user is a dict with a 'username' str, and 'admin' bool
user : dict
If a username is passed in, instead return one user with the fields
above plus a 'groups' list.
"""
raise NotImplementedError
def check_health(self):
"""
Check the health of the access backend
Returns
-------
(healthy, status) : (bool, str)
Tuple that describes the health status and provides an optional
status message
"""
return (True, "")
def dump(self):
"""
Dump all of the access control data to a universal format
Returns
-------
data : dict
"""
from pypicloud import __version__
data = {}
data["allow_register"] = self.allow_register()
data["version"] = __version__
groups = self.groups()
users = self.user_data()
for user in users:
user["password"] = self._get_password_hash(user["username"])
data["groups"] = {}
packages = {"users": defaultdict(dict), "groups": defaultdict(dict)}
for group in groups:
data["groups"][group] = self.group_members(group)
perms = self.group_package_permissions(group)
for perm in perms:
package = perm["package"]
packages["groups"][package][group] = perm["permissions"]
for user in users:
username = user["username"]
perms = self.user_package_permissions(username)
for perm in perms:
package = perm["package"]
packages["users"][package][username] = perm["permissions"]
# Convert the defaultdict to | |
Again with the optional parameters to change the values.
Change_degree_of_block change the value of out_degree of block.
"""
return ((self.partition.get_edge_count(neighbor_block, to_block) - change_ets)
+ (self.partition.get_edge_count(to_block, neighbor_block) - change_est)
+ self.epsilon) \
/ ((self.partition.get_out_degree_of_block(neighbor_block) - change_degree_of_block)
+ (self.partition.get_in_degree_of_block(neighbor_block) - change_in_degree_of_block)
+ self.epsilon * self.partition.B)
# @formatter:on
class MetropolisHastingInferenceTenK(MetropolisHastingInference):
"""Fix Number of Steps"""
title = "Metropolis Hasting Inference 10k"
short_title = "MHA 10k"
def __init__(self, graph, objective_function, partition):
super(MetropolisHastingInferenceTenK, self).__init__(graph, objective_function, partition)
self.default_number_of_steps = 10000
class MetropolisHastingInferenceFiftyK(MetropolisHastingInference):
"""Fix Number of Steps"""
title = "Metropolis Hasting Inference 50k"
short_title = "MHA 50k"
def __init__(self, graph, objective_function, partition):
super(MetropolisHastingInferenceFiftyK, self).__init__(graph, objective_function, partition)
self.default_number_of_steps = 50000
class MetropolisHastingInferenceHundredK(MetropolisHastingInference):
"""Fix Number of Steps"""
title = "Metropolis Hasting Inference 100k"
short_title = "MHA 100k"
def __init__(self, graph, objective_function, partition):
super(MetropolisHastingInferenceHundredK, self).__init__(graph, objective_function, partition)
self.default_number_of_steps = 100000
class MetropolisHastingInferenceTwoHundredFiftyK(MetropolisHastingInference):
"""Fix Number of Steps"""
title = "Metropolis Hasting Inference 250k"
short_title = "MHA 250k"
def __init__(self, graph, objective_function, partition):
super(MetropolisHastingInferenceTwoHundredFiftyK, self).__init__(graph, objective_function, partition)
self.default_number_of_steps = 250000
class MetropolisHastingInferenceFiveHundredK(MetropolisHastingInference):
"""Fix Number of Steps"""
title = "Metropolis Hasting Inference 500k"
short_title = "MHA 500k"
def __init__(self, graph, objective_function, partition):
super(MetropolisHastingInferenceFiveHundredK, self).__init__(graph, objective_function, partition)
self.default_number_of_steps = 500000
class MetropolisHastingInferenceSimulatedAnnealingCauchy(MetropolisHastingInference):
"""Metropolis Hasting Inference Algorithm with Cauchy Simulated Annealing"""
title = "Metropolis Hasting Inference Simulated Annealing Cauchy"
short_title = "MHAC 1k"
def update_temperature(self):
if self.beta_zero is None:
self._initialize_temperature()
self.beta = (1 + self.performed_steps) / self.beta_zero
class MetropolisHastingInferenceSimulatedAnnealingCauchyFiftyK(MetropolisHastingInferenceSimulatedAnnealingCauchy):
"""Fix Number of Steps"""
title = "Metropolis Hasting Inference Simulated Annealing Cauchy 50k"
short_title = "MHAC 50k"
def __init__(self, graph, objective_function, partition):
super(MetropolisHastingInferenceSimulatedAnnealingCauchy, self).__init__(graph, objective_function, partition)
self.default_number_of_steps = 50000
class MetropolisHastingInferenceSimulatedAnnealingCauchyTwoHundredFiftyK(
MetropolisHastingInferenceSimulatedAnnealingCauchy):
"""Fix Number of Steps"""
title = "Metropolis Hasting Inference Simulated Annealing Cauchy 250k"
short_title = "MHAC 250k"
def __init__(self, graph, objective_function, partition):
super(MetropolisHastingInferenceSimulatedAnnealingCauchy, self).__init__(graph, objective_function, partition)
self.default_number_of_steps = 250000
class MetropolisHastingInferenceSimulatedAnnealingBoltzman(MetropolisHastingInference):
"""Metropolis Hasting Inference Algorithm with Cauchy Simulated Annealing"""
title = "Metropolis Hasting Inference Simulated Annealing Boltzman"
short_title = "MHAB 1k"
def update_temperature(self):
if self.beta_zero is None:
self._initialize_temperature()
self.beta = math.log(1 + self.performed_steps) / self.beta_zero
class MetropolisHastingInferenceSimulatedAnnealingBoltzmanFiftyK(MetropolisHastingInferenceSimulatedAnnealingBoltzman):
"""Fix Number of Steps"""
title = "Metropolis Hasting Inference Simulated Annealing Boltzman 50k"
short_title = "MHAB 50k"
def __init__(self, graph, objective_function, partition):
super(MetropolisHastingInferenceSimulatedAnnealingBoltzman, self).__init__(graph, objective_function, partition)
self.default_number_of_steps = 50000
class MetropolisHastingInferenceSimulatedAnnealingBoltzmanTwoHundredFiftyK(
MetropolisHastingInferenceSimulatedAnnealingBoltzman):
"""Fix Number of Steps"""
title = "Metropolis Hasting Inference Simulated Annealing Boltzman 250k"
short_title = "MHAB 250k"
def __init__(self, graph, objective_function, partition):
super(MetropolisHastingInferenceSimulatedAnnealingBoltzman, self).__init__(graph, objective_function, partition)
self.default_number_of_steps = 250000
class KarrerInference(Inference):
"""
Heuristic Inference Algorithm described in Karrer and Newman 2011
slightly enhanced?
"""
title = "Karrer Inference"
short_title = "KL-G"
def __init__(self, graph, objective_function, partition, no_negative_move=False, limit_possible_blocks=False):
super(KarrerInference, self).__init__(graph, objective_function, partition)
self.no_negative_move = no_negative_move
self.limit_possible_blocks = limit_possible_blocks
self._last_objective_value = float('-inf')
def infer_stochastic_block_model(self):
try:
for _ in range(100):
self.infer_stepwise()
else:
raise Exception("Could not find minimum in 100 steps" + str(self.partition.get_representation()) + str(
self.partition.graph.edges()))
except StopIteration:
pass
def infer_stepwise(self):
saved_representation = self.partition.get_representation()
improve = 0
overall_improve = 0
best_partition_representation = None
iteration_moves = 0
moves = 0
for node in self.partition.get_nodes_iter():
from_block = self.partition.get_block_of_node(node)
next_block = from_block
move_delta = -float("inf")
if self.limit_possible_blocks:
possible_blocks = list(self.partition.get_possible_blocks(from_block))
else:
possible_blocks = list(range(self.partition.B))
if self.no_negative_move:
possible_blocks.remove(from_block)
for block in possible_blocks:
if block != from_block:
parameter = self.partition.precalc_move((node, from_block, block),
self._objective_function)
delta = self._objective_function.calculate_delta(
self.partition, from_block, block, *parameter)
else:
delta = 0.0
if delta > move_delta:
move_delta = delta
next_block = block
if not self.no_negative_move or move_delta > 0:
self.partition.move_node(node, next_block)
moves += 1
improve += move_delta
if improve > 0:
overall_improve += improve
improve = 0
best_partition_representation = self.partition.get_representation()
iteration_moves += moves
moves = 0
# overall improve real positive to ignore rounding errors
if overall_improve > 0.001:
self.partition.set_from_representation(best_partition_representation)
self.node_moves += iteration_moves
actual_value = self.objective_function.calculate(self.partition)
if actual_value < self._last_objective_value + 0.01:
if actual_value < self._last_objective_value - .1:
# if new one is worse then retrieve old state
self.partition.set_from_representation(saved_representation)
raise StopIteration()
else:
self._last_objective_value = actual_value
else:
# if no improvement set back to old partition
self.partition.set_from_representation(saved_representation)
raise StopIteration()
class KarrerInferenceNoNegativeMove(KarrerInference):
""" Karrer with No Negative Move"""
title = "Karrer Inference with no negative move"
short_title = "KL-G nn"
def __init__(self, graph, objective_function, partition, limit_possible_blocks=False):
super(KarrerInferenceNoNegativeMove, self).__init__(graph, objective_function, partition, no_negative_move=True,
limit_possible_blocks=limit_possible_blocks)
class EMInference(Inference):
"""Expectation-Maximization Algorithm for SBM inference"""
title = "Expectation Maximization Inference"
short_title = "KL-EM"
def __init__(self, graph, objective_function, partition, with_toggle_detection=True, limit_possible_blocks=False):
super(EMInference, self).__init__(graph, objective_function, partition)
self.with_toggle_detection = with_toggle_detection
self._old_value = self._objective_function.calculate(partition)
self.limit_possible_blocks = limit_possible_blocks
def infer_stochastic_block_model(self):
if self.partition.is_graph_directed():
try:
for _ in range(2 * len(self.graph)):
self.infer_stepwise_directed()
else:
print("EMInference: could not find an optimal partition in", 2 * len(self.graph), "steps",
self.partition.get_representation(), self.graph.edges())
except StopIteration:
pass
else:
try:
for _ in range(2 * len(self.graph)):
self.infer_stepwise_undirected()
else:
print("EMInference: could not find an optimal partition in", 2 * len(self.graph), "steps",
self.partition.get_representation(), self.graph.edges())
except StopIteration:
pass
def infer_stepwise(self):
if self.partition.is_graph_directed():
self.infer_stepwise_directed()
else:
self.infer_stepwise_undirected()
def infer_stepwise_undirected(self):
"""
For each node retrieve the best block. Then move all nodes to the new best block.
Easy ansatz tend/allow to toggle between two states in the end.
Therefore here is a simple approach to detect this status and resolve it included.
"""
# save representation of partition in case for overall decrease
saved_representation = self.partition.get_representation()
# set flag which checks if we find any improve
improve = False
# count number of moves, in case everything is fine
iteration_moves = 0
# keep list of moves, which will be performed after all calculations
moves = []
possible_blocks = list(range(self.partition.B))
nodes_moved = {block: 0 for block in range(self.partition.B)}
for node in self.partition.get_nodes_iter():
from_block = self.partition.get_block_of_node(node)
# ensure that one don't move the last node out of the block
if self.partition.get_number_of_nodes_in_block(from_block) - nodes_moved[from_block] == 1:
continue
next_block = from_block
move_delta = 0
if self.limit_possible_blocks:
possible_blocks = self.partition.get_possible_blocks(from_block)
possible_blocks.remove(from_block)
parameter = self.partition.precalc_move((node, from_block, from_block),
self._objective_function)
for block in possible_blocks:
if block not in parameter[0]:
parameter[0][block] = 0
delta = self._objective_function.calculate_delta(
self.partition, from_block, block, *parameter)
if parameter[0][block] == 0:
del parameter[0][block]
if delta > move_delta:
move_delta = delta
next_block = block
possible_blocks.append(from_block)
if move_delta > 0.001:
moves.append((node, next_block))
nodes_moved[from_block] += 1
iteration_moves += 1
improve = True
# perform moves
for move in moves:
self.partition.move_node(*move)
if improve:
if self.with_toggle_detection:
new_value = self._objective_function.calculate(self.partition)
# detect decreasing objective function value and prevent decrease by only doing last move
if new_value <= self._old_value:
self.partition.set_from_representation(saved_representation)
# carefully move all nodes which increase the value
for node, to_block in moves:
from_block = self.partition.get_block_of_node(node)
parameter = self.partition.precalc_move((node, from_block, to_block),
self._objective_function)
delta = self._objective_function.calculate_delta(
self.partition, from_block, to_block, *parameter)
if delta > 0.001:
self.partition.move_node(node, to_block)
self.node_moves += 1
else:
self._old_value = new_value
self.node_moves += iteration_moves
else:
# No move -> no change -> stop algorithm
raise StopIteration()
def infer_stepwise_directed(self):
"""
For each node retrieve the best block. Then move all nodes to the new best block.
Easy ansatz tend/allow to toggle between two states in the end.
Therefore here is a simple approach to detect this status and resolve it included.
"""
# save representation of partition in case for overall decrease
saved_representation = self.partition.get_representation()
# set flag which checks if we find any improve
improve = False
# count number of moves, in case everything is fine
iteration_moves = 0
# keep list of moves, which will be performed after all calculations
moves = []
possible_blocks = list(range(self.partition.B))
nodes_moved = {block: 0 for block in range(self.partition.B)}
for node in self.partition.get_nodes_iter():
from_block = self.partition.get_block_of_node(node)
# ensure that one don't move the last node out of the block
if self.partition.get_number_of_nodes_in_block(from_block) - nodes_moved[from_block] == 1:
continue
next_block = from_block
move_delta = 0
if self.limit_possible_blocks:
possible_blocks = self.partition.get_possible_blocks(from_block)
possible_blocks.remove(from_block)
parameter = self.partition.precalc_move((node, from_block, from_block),
self._objective_function)
for block in possible_blocks:
if block not in parameter[0]:
parameter[0][block] = 0
if block not in parameter[1]:
parameter[1][block] = 0
delta = self._objective_function.calculate_delta(
self.partition, from_block, block, *parameter)
if parameter[0][block] == 0:
del parameter[0][block]
if parameter[1][block] == 0:
del parameter[1][block]
if delta > move_delta:
move_delta = delta
next_block = block
possible_blocks.append(from_block)
if move_delta > 0.001:
moves.append((node, next_block))
nodes_moved[from_block] += 1
iteration_moves += 1
improve = True
# perform moves
for move in moves:
self.partition.move_node(*move)
if improve:
if self.with_toggle_detection:
new_value = self._objective_function.calculate(self.partition)
# detect decreasing objective function | |
bin = 10
x,y = numpy.meshgrid(numpy.arange(0,LENGTH1,bin),numpy.arange(0,LENGTH2,bin))
x_conv = coord_conv_x(x)
y_conv = coord_conv_y(y)
epsilon = 0
index = 0
ROT=ROTS[0]
for term in cheby_terms_use:
index += 1
#print index, ROT, term, fitvars[str(ROT)+'$'+term['n']]
epsilon += fitvars[str(ROT)+'$'+term['n']]*term['fx'](x_conv,y_conv)*term['fy'](x_conv,y_conv)
diff = ((x-LENGTH1/2.)**2.+(y-LENGTH2/2.)**2.) - (LENGTH1/2.)**2.
diff_bool = diff[diff<0]
diff[diff>0] = 0
diff[diff<0] = 1
diff2 = copy(diff)
diff2[diff2==0] = -999 #adam-watch# somehow I get arrays that are almost entirely epsilon=[...,-999,...]
diff2[diff2==1] = 0
#hdu = pyfits.PrimaryHDU(diff)
#im = '/scratch/pkelly/diff.fits'
#os.system('rm ' + im)
#hdu.writeto(im)
#print 'test_correction| im =',im, '...finished...'
if not paper_stat: epsilon = epsilon * diff + diff2
else: pass
flat = epsilon.flatten().compress(epsilon.flatten()[epsilon.flatten()!=0])
print 'test_correction| numpy.median(flat)=',numpy.median(flat) , ' len(epsilon.flatten())=',len(epsilon.flatten()) , ' len(flat)=',len(flat)
epsilon = epsilon - numpy.median(flat)
if False:
print 'test_correction| ...writing...'
hdu = pyfits.PrimaryHDU(epsilon)
#os.system('rm ' + tmpdir + 'correction' + ROT + filter + sample_size + '.fits')
#hdu.writeto(tmpdir + '/correction' + ROT + filter + sample_size + '.fits')
im = '/scratch/pkelly/test.fits'
if os.path.isfile(im):
os.system('rm ' + im)
hdu.writeto(im)
print 'test_correction| im =',im, 'finished'
return epsilon, diff_bool
#adam-note# step5: below here is my attempt to apply the correction and make *I.fits files!
def run_correction(OBJNAME=None,FILTER=None,PPRUN=None,r_ext=True): #step5_correct_ims #main
'''inputs: OBJNAME=None,FILTER=None,PPRUN=None,r_ext=True (r_ext=True if you've already done the stellar halo rings, otherwise r_ext=False)
returns: apply the correction and make *I.fits files (basically a wrapper around construct_correction, which does this:save starflat fits files
calls: describe_db,find_nearby,construct_correction,save_fit'''
print '\nrun_correction| START the func. inputs: OBJNAME=',OBJNAME , ' FILTER=',FILTER , ' PPRUN=',PPRUN , ' r_ext=',r_ext
loop = True
while loop:
db2,c = connect_except()
db_keys_try = describe_db(c,['' + test + 'try_db'])
command='SELECT * from ' + test + 'try_db where todo="good" and var_correction > 0.08 order by rand()'
command='SELECT * from ' + test + 'try_db i where i.todo="good" and i.correction_applied!="yes" and (i.objname like "MACS0018%" or i.objname like "MACS0025%" or i.objname like "MACS0257%" or i.objname like "MACS0454%" or i.objname like "MACS0647%" or i.objname like "MACS0717%" or i.objname like "MACS0744%" or i.objname like "MACS0911%" or i.objname like "MACS1149%" or i.objname like "MACS1423%" or i.objname like "MACS2129%" or i.objname like "MACS2214%" or i.objname like "MACS2243%" or i.objname like "A2219" or i.objname like "A2390") order by rand()'
command='SELECT * from ' + test + 'try_db i where i.correction_applied is null and not (i.objname like "MACS0018%" or i.objname like "MACS0025%" or i.objname like "MACS0257%" or i.objname like "MACS0454%" or i.objname like "MACS0647%" or i.objname like "MACS0717%" or i.objname like "MACS0744%" or i.objname like "MACS0911%" or i.objname like "MACS1149%" or i.objname like "MACS1423%" or i.objname like "MACS2129%" or i.objname like "MACS2214%" or i.objname like "MACS2243%" or i.objname like "A2219" or i.objname like "A2390") order by rand() limit 1'
command='SELECT * from ' + test + 'try_db where correction_applied="redo" group by objname order by rand()'
command='SELECT * from ' + test + 'try_db where correction_applied is null and fix="yes" order by rand()'
command='SELECT * from ' + test + 'try_db where correction_applied is null and (config=8 or config=9) order by rand()'
command='SELECT * from ' + test + 'try_db where correction_applied is null and OBJNAME="HDFN" order by rand()'
if OBJNAME is not None:
command='SELECT * from ' + test + 'try_db i where OBJNAME="' + OBJNAME + '" and PPRUN="' + PPRUN + '" limit 1'
loop = False
print ' command=',command
c.execute(command)
results=c.fetchall()
line = results[0]
dtop2 = {}
for i in range(len(db_keys_try)):
dtop2[db_keys_try[i]] = str(line[i])
print ' dtop2["OBJNAME"]=',dtop2["OBJNAME"] , ' dtop2["correction_applied"]=',dtop2["correction_applied"]
illum_dir = illum_main_dir + dtop2['FILTER'] + '/' + dtop2['PPRUN'] + '/'
#logfile = open(illum_dir + 'logfile','w')
OBJNAME_use, FILTER_use, PPRUN_use = dtop2['OBJNAME'], dtop2['FILTER'], dtop2['PPRUN']
sample = 'notselected'
''' if no bootstrap use good fit '''
if dtop2['todo'] == 'good' and (string.find(dtop2['sdssstatus'],'finished') != -1 or string.find(dtop2['Nonestatus'],'finished')):
if string.find(dtop2['sdssstatus'],'finished') != -1:
sample = 'sdss'
if string.find(dtop2['Nonestatus'],'finished') != -1:
sample = 'None'
elif dtop2['todo'] == 'bootstrap' and str(dtop2['todo']) == 'True' :
sample = 'bootstrap'
print ' sample=',sample
if sample == 'notselected':
OBJNAME_use, FILTER_use, PPRUN_use, sample = find_nearby(dtop2['OBJNAME'],dtop2['FILTER'],dtop2['PPRUN'])
print ' parameters: sample=',sample , ' dtop2["sdssstatus"]=',dtop2["sdssstatus"] , ' dtop2["Nonestatus"]=',dtop2["Nonestatus"] , ' dtop2["bootstrapstatus"]=',dtop2["bootstrapstatus"] , ' dtop2["todo"]=',dtop2["todo"] , ' sample=',sample , ' OBJNAME_use=',OBJNAME_use , ' FILTER_use=',FILTER_use , ' PPRUN_use=',PPRUN_use , ' dtop2["OBJNAME"]=',dtop2["OBJNAME"] , ' dtop2["FILTER"]=',dtop2["FILTER"] , ' dtop2["PPRUN"]=',dtop2["PPRUN"]
if sample!='notselected' and sample!=None:
#stderr_orig = sys.stderr
#stdout_orig = sys.stdout
#sys.stdout = logfile
#sys.stderr = logfile
print ' dtop2["OBJNAME"]=',dtop2["OBJNAME"] , ' dtop2["FILTER"]=',dtop2["FILTER"] , ' dtop2["PPRUN"]=',dtop2["PPRUN"] , ' sample=',sample , "all" , ' OBJNAME_use=',OBJNAME_use , ' FILTER_use=',FILTER_use , ' PPRUN_use=',PPRUN_use
construct_correction(dtop2['OBJNAME'],dtop2['FILTER'],dtop2['PPRUN'],sample,'all',OBJNAME_use,FILTER_use,PPRUN_use,r_ext=r_ext)
#sys.stderr = stderr_orig
#sys.stdout = stdout_orig
#logfile.close()
else:
save_fit({'PPRUN':dtop2['PPRUN'],'OBJNAME':dtop2['OBJNAME'],'FILTER':dtop2['FILTER'],'sample':'record','sample_size':'record','correction_applied':'no match'},db='' + test + 'try_db')
#if 0: #help_list[y]['primary']==None or help_list[y]['secondary']==None:
print "run_correction| DONE with func\n"
def find_nearby(OBJNAME,FILTER,PPRUN): #step5_correct_ims #intermediate
'''inputs: OBJNAME,FILTER,PPRUN
returns: (use[0]['OBJNAME'],use[0]['FILTER'],use[0]['PPRUN'],sample)
purpose: figure out the right (closest) correction to apply
calls: describe_db,describe_db,describe_db
called_by: run_correction'''
print '\nfind_nearby| START the func. inputs: OBJNAME=',OBJNAME , ' FILTER=',FILTER , ' PPRUN=',PPRUN
db2,c = connect_except()
db_keys_illum = describe_db(c,[illum_db])
command="SELECT * from "+illum_db+" where PPRUN='" + PPRUN + "' and OBJNAME='" + OBJNAME + "'" # and sample_size='all'"
print command
c.execute(command)
results=c.fetchall()
print len(results)
for line in results:
dtop = {}
for i in range(len(db_keys_illum)):
dtop[db_keys_illum[i]] = str(line[i])
db_keys = describe_db(c,['' + test + 'fit_db','' + test + 'try_db'])
''' select runs with little cloud cover '''
''' pick runs with good statistics and no zp variations '''
if dtop['CONFIG'] == '10_3': # or (dtop['CONFIG'] == '9.0' and dtop['FILTER'] == 'W-J-B'):
for i in range(len(config_bonn.wavelength_groups)):
for filt in config_bonn.wavelength_groups[i]:
if filt == dtop['FILTER']:
FILTER_NUM_ZERO = i
break
command="SELECT * from " + test + "fit_db f left join " + test + "try_db t on (t.pprun=f.pprun and t.OBJNAME=f.OBJNAME) where f.CONFIG='" + dtop['CONFIG'] + "'"
print command
c.execute(command)
results=c.fetchall()
use = []
print len(results), ' # of results '
for line in results:
dp = {}
for i in range(len(db_keys)):
dp[db_keys[i]] = str(line[i])
for i in range(len(config_bonn.wavelength_groups)):
for filt in config_bonn.wavelength_groups[i]:
if filt == dp['FILTER']:
FILTER_NUM = i
break
use.append([abs(FILTER_NUM - FILTER_NUM_ZERO),dp])
use.sort()
use = [x[1] for x in use]
print use[0]['OBJNAME'], use[0]['PPRUN'], PPRUN
else:
''' use B filter if U '''
if dtop['FILTER'] == 'W-J-U': filter = 'W-J-B'
else: filter = dtop['FILTER']
''' use 10_2 if 10_1 and W-J-B '''
if dtop['CONFIG'] == '10_1' and filter == 'W-J-B':
dtop['CONFIG'] = '10_2'
db_keys = describe_db(c,['' + test + 'try_db'])
if (dtop['CONFIG'] == '9.0' and dtop['FILTER'] == 'W-J-B'):
command="SELECT * from " + test + "try_db t where sample_current is not null and (t.todo='good' or (t.todo='bootstrap' and t.bootstrap_good='True')) and t.CONFIG='" + dtop['CONFIG'] + "' and t.objname!='HDFN' order by todo desc"
else:
command="SELECT * from " + test + "try_db t where sample_current is not null and (t.todo='good' or (t.todo='bootstrap' and t.bootstrap_good='True')) and t.CONFIG='" + dtop['CONFIG'] + "' and t.FILTER='" + filter + "' and t.objname!='HDFN' order by todo desc"
print command
c.execute(command)
results=c.fetchall()
use = []
print len(results), ' # of results '
for line in results:
dp = {}
for i in range(len(db_keys)):
dp[db_keys[i]] = str(line[i])
use.append(dp)
def use_comp(x,y):
date = [float(q) for q in re.split('-',re.split('_',PPRUN)[0])]
date_x = [float(q) for q in re.split('-',re.split('_',x['PPRUN'])[0])]
date_y = [float(q) for q in re.split('-',re.split('_',y['PPRUN'])[0])]
#print date, date_x, date_y,
diff = lambda a,b: abs((a[0]-b[0])*365 + (a[1]-b[1])*30 + a[2]-b[2])
diff_x = diff(date_x,date)
diff_y = diff(date_y,date)
if diff_x < diff_y:
return -1
elif diff_x == diff_y:
return 0
else:
return 1
use.sort(use_comp)
if len(use) > 0:
print use[0]['OBJNAME'], use[0]['PPRUN'], PPRUN
sample = 'not set'
''' make sure that the illumination correction is in place '''
sample = use[0]['sample_current']
if sample != 'not set':
return (use[0]['OBJNAME'],use[0]['FILTER'],use[0]['PPRUN'],sample)
else: return(None,None,None,None)
else: return(None,None,None,None)
print "find_nearby| DONE with func\n"
def construct_correction(OBJNAME,FILTER,PPRUN,sample,sample_size,OBJNAME_use=None,FILTER_use=None,PPRUN_use=None,r_ext=True): #step5_correct_ims #main (will be #intermediate if run_correction and find_nearby are fixed)
'''inputs: OBJNAME,FILTER,PPRUN,sample,sample_size,OBJNAME_use=None,FILTER_use=None,PPRUN_use=None,r_ext=True
returns: save starflat fits files
calls: save_fit,get_a_file,get_fits,save_fit,connect_except,describe_db,save_exposure,save_fit,save_fit,save_fit,save_fit
called_by: run_correction,select_analyze'''
print '\nconstruct_correction| START the func. inputs: OBJNAME=',OBJNAME , ' FILTER=',FILTER , ' PPRUN=',PPRUN , ' sample=',sample , ' sample_size=',sample_size,' OBJNAME_use=',OBJNAME_use , ' FILTER_use=',FILTER_use , ' PPRUN_use=',PPRUN_use , ' r_ext=',r_ext
if OBJNAME_use is None:
OBJNAME_use, FILTER_use, PPRUN_use = OBJNAME, FILTER, PPRUN
save_fit({'PPRUN':PPRUN,'OBJNAME':OBJNAME,'FILTER':FILTER,'sample':'record','sample_size':'record','correction_applied':'corrstarted','OBJNAME_use':OBJNAME_use,'FILTER_use':FILTER_use,'PPRUN_use':PPRUN_use,'sample_use':sample,'time':str(time.localtime())},db='' + test + 'try_db')
try:
''' create chebychev polynomials '''
cheby_x = [{'n':'0x','f':lambda x,y:1.},{'n':'1x','f':lambda x,y:x},{'n':'2x','f':lambda | |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines TF Quantization API from SavedModel to SavedModel."""
import tempfile
from typing import Dict, Iterable, List, Mapping, Optional, Set, Tuple
import uuid
import warnings
import numpy as np
# pylint: disable=invalid-import-order,g-bad-import-order
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
from tensorflow.compiler.mlir.quantization.tensorflow.python import pywrap_quantize_model as quantize_model_wrapper
from tensorflow.compiler.mlir.quantization.tensorflow.python import representative_dataset as repr_dataset
from tensorflow.compiler.mlir.quantization.tensorflow import quantization_options_pb2 as quant_opts_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import loader_impl as saved_model_loader
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model.load import load as saved_model_load
from tensorflow.python.trackable import autotrackable
from tensorflow.python.types import core
# The signature key of the saved model init op.
_INIT_OP_SIGNATURE_KEY = '__saved_model_init_op'
# Type aliases for quant_opts_pb2 messages.
_Method = quant_opts_pb2.QuantizationMethod.Method
_ExperimentalMethod = quant_opts_pb2.QuantizationMethod.ExperimentalMethod
def _legalize_tensor_name(tensor_name: str) -> str:
"""Converts tensor name from 'name:index' to 'name__index' format."""
return tensor_name.replace(':', '__')
def _is_qat_saved_model(saved_model_path: str):
"""Checks if the SavedModel is QAT-enabled by looking for 'FakeQuant' ops."""
saved_model_proto = saved_model_loader.parse_saved_model(saved_model_path)
for meta_graph in saved_model_proto.meta_graphs:
if any(
node.op.startswith('FakeQuant') for node in meta_graph.graph_def.node):
return True
for function in meta_graph.graph_def.library.function:
if any(node.op.startswith('FakeQuant') for node in function.node_def):
return True
return False
def _get_signatures_from_saved_model(saved_model_path: str,
signature_keys=None,
tags=None):
"""Gets a map from signature keys to their SignatureDef from a saved model."""
if tags is None:
tags = set([tag_constants.SERVING])
loader = saved_model_loader.SavedModelLoader(saved_model_path)
try:
meta_graphdef = loader.get_meta_graph_def_from_tags(tags)
except RuntimeError as runtime_error:
raise RuntimeError(
f'Failed to retrieve MetaGraphDef with tags {tags}'
f' from a SavedModel in {saved_model_path}.') from runtime_error
signatures = {}
for key, signature_def in meta_graphdef.signature_def.items():
if key == _INIT_OP_SIGNATURE_KEY:
continue
if signature_keys is not None and key not in signature_keys:
continue
signatures[key] = signature_def
return signatures
def _fix_tensor_names(signatures, exported_graph):
"""Tries fixing tensor names in the signatures to match the exported graph.
The output tensor names in the original graph usually become names of the
return nodes in the exported graph. This function tries to fix that and checks
if the input tensor names are found in the exported graph.
Args:
signatures: the signatures of the original graph.
exported_graph: The PTQ-exported GraphDef.
Returns:
Fixed signatures or None if it couldn't be fixed.
"""
if signatures is None:
return None
# The InsertMainFunctionPass populates input and output nodes of the newly
# inserted main function with "tf_saved_model.index_path" attributes. These
# attributes can be used to identify outputs in the exported graph.
output_index_path_map = {}
for op in exported_graph.get_operations():
if (op.type == '_Retval' and
op.get_attr('tf_saved_model.index_path') is not None):
index_path_name = op.get_attr('tf_saved_model.index_path')[0]
index_path_name = index_path_name.decode('utf-8')
output_index_path_map[index_path_name] = op.inputs[0].name
for signature_def in signatures.values():
for tensor_info in signature_def.inputs.values():
try:
exported_graph.get_tensor_by_name(tensor_info.name)
except KeyError:
# If input tensors are not found, the signatures can't be used for the
# exported graph.
warnings.warn('Cannot find the tensor with name %s in the graph.' %
tensor_info.name)
return None
for tensor_info in signature_def.outputs.values():
try:
if tensor_info.name in output_index_path_map:
tensor_info.name = output_index_path_map[tensor_info.name]
else:
# Tries to find the return node with the given name and use its input
# as the output tensor name.
return_node = exported_graph.get_operation_by_name(
_legalize_tensor_name(tensor_info.name))
tensor_info.name = return_node.inputs[0].name
except KeyError:
warnings.warn(
'Cannot find the tensor or node with name %s in the graph.' %
tensor_info.name)
return None
return signatures
def _get_signature_key_and_input(
representative_sample: repr_dataset.RepresentativeSample,
signature_keys: List[str],
) -> Tuple[str, Mapping[str, core.Tensor]]:
"""Gets the signature key and input data from `representative_sample`.
The `representative_sample` can be in two formats:
1. A tuple of: (signature_key, {input_name -> input_tensor})
2. A dict: {input_name -> input_tensor}.
(2) assumes the signature_key to be the default signature key (first item in
`signature_keys`).
Args:
representative_sample: A single sample from the representative dataset, used
for calibration.
signature_keys: A list of signature keys that identifies a function to run
the data samples with. When the `representative_sample` is provided as a
`dict`, it should have a single item.
Returns:
signature_key: Signature key that indicates the function to be used for the
returned input data.
input data: A input_name -> input_tensor mapping (dict).
Raises:
ValueError: When the format of `representative_sample` is invalid, or when
the length of `signature_keys` not 1 when `representative_sample` is `dict`.
"""
if isinstance(representative_sample, tuple):
if (not isinstance(representative_sample[1], dict) or
len(representative_sample) != 2):
raise ValueError('You need to provide a dictionary with input '
'names and values in the second argument in the '
'tuple')
return representative_sample
elif isinstance(representative_sample, dict):
if len(signature_keys) > 1:
raise ValueError('When the model has multiple signatures, you need '
'to provide a tuple with signature key and a '
'dictionary with input names and values')
return signature_keys[0], representative_sample
else:
raise ValueError('You need to provide either a dictionary with input '
'names and values or a tuple with signature key and a '
'dictionary with input names and values')
def _create_feed_dict_from_input_data(
input_data: Mapping[str, core.Tensor],
signature_def: meta_graph_pb2.SignatureDef) -> Dict[str, np.ndarray]:
"""Constructs a feed_dict from input data.
Note: This function should only be used in graph mode.
This is a helper function that converts an 'input key -> input tensor' mapping
to a feed dict. A feed dict is an 'input tensor name -> input data' mapping
and can be directly passed to the `feed_dict` argument of `sess.run()`.
Args:
input_data: Input key -> input tensor mapping. The input keys should match
the input keys of `signature_def`.
signature_def: A SignatureDef representing the function that `input_data` is
an input to.
Raises:
KeyError: When the input key provided from `input_data` does not exist as
one of `signature_def`'s input keys.
Returns:
Feed dict, which is intended to be used as input for `sess.run`. It is
essentially a mapping: input tensor name -> tensor data.
"""
feed_dict = {}
for input_key, input_tensor in input_data.items():
if input_key not in signature_def.inputs:
raise KeyError(f"Invalid input key '{input_key}'. Available input keys"
f' are: {list(signature_def.inputs.keys())}.')
input_tensor_name = signature_def.inputs[input_key].name
feed_dict[input_tensor_name] = input_tensor.eval()
return feed_dict
def _run_graph_for_calibration_graph_mode(
model_dir: str, signature_keys: List[str], tags: Set[str],
representative_dataset: repr_dataset.RepresentativeDataset) -> None:
"""Runs the graph for calibration in graph mode.
This function assumes _graph mode_ (used when legacy TF1 is used or when eager
mode is explicitly disabled) when running the graph. This step is used in
order to collect the statistics in CustomAggregatorOp for quantization using
the representative dataset for the actual data provided for inference.
Args:
model_dir: Path to SavedModel directory.
signature_keys: A list of signature keys that identifies a function to run
the data samples with.
tags: Set of tags identifying the MetaGraphDef within the SavedModel.
representative_dataset: Representative dataset used for calibration.
Raises:
ValueError: When the samples in representative dataset is invalid.
"""
with session.Session() as sess:
meta_graph: meta_graph_pb2.MetaGraphDef = saved_model_loader.load(
sess, tags, export_dir=model_dir)
for sample in representative_dataset:
signature_key, input_data = _get_signature_key_and_input(
sample, signature_keys)
sig_def = meta_graph.signature_def[signature_key]
output_tensor_names = [
output_tensor_info.name
for output_tensor_info in sig_def.outputs.values()
]
# Create a mapping from input tensor name to the input tensor value.
# ex) "Placeholder:0" -> [0, 1, 2]
try:
feed_dict = _create_feed_dict_from_input_data(input_data, sig_def)
except KeyError as key_error:
raise ValueError(f'Invalid input data for signature: {signature_key}.'
) from key_error
sess.run(output_tensor_names, feed_dict=feed_dict)
def _run_graph_for_calibration_eager_mode(
model_dir: str, signature_keys: List[str], tags: Set[str],
representative_dataset: repr_dataset.RepresentativeDataset) -> None:
"""Runs the graph for calibration in eager mode.
This function assumes _eager mode_ (enabled in TF2 by default) when running
the graph. This step is used in order to collect the statistics in
CustomAggregatorOp for quantization using the representative dataset for the
actual data provided for inference.
Args:
model_dir: Path to SavedModel directory.
signature_keys: A list of signature keys that identifies a function to run
the data samples with.
tags: Set of tags identifying the MetaGraphDef within the SavedModel.
representative_dataset: Representative dataset used for calibration.
Raises:
| |
<filename>transformations/multilingual_backtranslation/transformation.py
from nltk import edit_distance
from transformers import MarianMTModel, MarianTokenizer
from interfaces.SentenceOperation import SentenceOperation
from tasks.TaskTypes import TaskType
from transformations.multilingual_backtranslation.helpers.supported_languages import (
SUPPORTED_LANGUAGES,
check_support,
)
# This codebase is based on the previous backtranslation codebase.
# https://github.com/GEM-benchmark/NL-Augmenter/tree/main/transformations/back_translation
class MultilingualBacktranslation(SentenceOperation):
tasks = [TaskType.TEXT_CLASSIFICATION, TaskType.TEXT_TO_TEXT_GENERATION]
languages = SUPPORTED_LANGUAGES
heavy = True
"""
Multilingual backtranslation. Supports backtranslation for 232 language codes (201 unique languages) through MarianMT models.
Args:
src_lang_code: The ISO-code of the original language
tgt_lang_code: The ISO-code of the intermediate language
seed: The seed for the random generator
max_outputs: The maximum number of outputs to return
num_beams: The number of beams to use for beam search
use_larger_model_if_available: If true, a larger, multilingual model is used if available. If false, the smaller model is used if available.
verbose: If true, prints out the decoded outputs
"""
def __init__(
self,
src_lang_code: str = "en",
tgt_lang_code: str = "es",
seed: int = 4,
max_outputs: int = 1,
num_beams: int = 4,
use_larger_model_if_available: bool = True,
verbose: bool = False,
sort_by_edit_distance: bool = True,
):
super().__init__(seed, max_outputs=max_outputs)
self.sort_by_edit_distance = sort_by_edit_distance
assert max_outputs <= num_beams, "max_outputs must be <= num_beams"
(
self.src,
self.tgt,
self.src_lang_code,
self.tgt_lang_code,
) = check_support(
src_lang_code, tgt_lang_code, use_larger_model_if_available
)
if verbose:
print(
f"Starting to load {src_lang_code} to {tgt_lang_code} Translation Model.\n"
)
src_model_name = (
f"Helsinki-NLP/opus-mt-{self.src_lang_code}-{self.tgt_lang_code}"
)
tgt_model_name = (
f"Helsinki-NLP/opus-mt-{self.tgt_lang_code}-{self.src_lang_code}"
)
self.tokenizer_src_tgt = MarianTokenizer.from_pretrained(
src_model_name
)
self.model_src_tgt = MarianMTModel.from_pretrained(src_model_name)
if self.verbose:
print(
f"Completed loading {self.src_lang_code} to {self.tgt_lang_code} Translation Model.\n"
)
print(
f"Starting to load {self.tgt_lang_code} to {self.src_lang_code} Translation Model:"
)
# try:
self.tokenizer_tgt_src = (
MarianTokenizer.from_pretrained(tgt_model_name)
if tgt_model_name != src_model_name
else self.tokenizer_src_tgt
)
self.model_tgt_src = (
MarianMTModel.from_pretrained(tgt_model_name)
if tgt_model_name != src_model_name
else self.model_src_tgt
)
self.num_beams = num_beams
if self.verbose:
print("Completed loading German to English Translation Model.\n")
def back_translate(self, src_sentence: str):
src_sentence = (
self.tgt + src_sentence if self.tgt is not None else src_sentence
)
intermediate = self.src2tgt(src_sentence)
intermediate = [
self.src + x if self.src is not None else x for x in intermediate
]
en_new = [
self.tgt2src(intermediate[i]) for i in range(len(intermediate))
]
return en_new
def src2tgt(self, input):
input_ids = self.tokenizer_src_tgt.encode(input, return_tensors="pt")
outputs = self.model_src_tgt.generate(
input_ids, num_return_sequences=3, num_beams=5
)
decoded = [
self.tokenizer_src_tgt.decode(outputs[i], skip_special_tokens=True)
for i in range(len(outputs))
]
if self.verbose:
print(decoded) # Maschinelles Lernen ist großartig, oder?
return decoded
def tgt2src(self, input):
input_ids = self.tokenizer_tgt_src.encode(input, return_tensors="pt")
outputs = self.model_tgt_src.generate(
input_ids,
num_return_sequences=self.max_outputs,
num_beams=self.num_beams,
)
predicted_outputs = []
for output in outputs:
decoded = self.tokenizer_tgt_src.decode(
output, skip_special_tokens=True
)
# TODO: this should be able to return multiple sequences
predicted_outputs.append(decoded)
if self.verbose:
print(predicted_outputs) # Machine learning is great, isn't it?
return predicted_outputs
def generate(self, sentence: str):
perturbs = self.back_translate(sentence)
out = [x[0] for x in perturbs if x[0] != sentence]
if self.sort_by_edit_distance:
out = sorted(out, key=lambda x: edit_distance(x, sentence))
return out[: self.max_outputs]
if __name__ == "__main__":
backtranslator = MultilingualBacktranslation()
# sentence = "Yo soy un estudiante y trabajo en la cafeteria al lado del mercado. "
sentence = "Neuroplasticity is a continuous processing allowing short-term, medium-term, and long-term remodeling of the neuronosynaptic organization."
out = backtranslator.generate(sentence)
print(out)
SUPPORTED_LANGUAGE_PAIRS = [
("es", "et"),
("lt", "pl"),
("csg", "es"),
("ar", "es"),
("crs", "fr"),
("ee", "sv"),
("de", "he"),
("fr", "tw"),
("es", "mt"),
("fi", "iso"),
("ee", "fi"),
("pis", "sv"),
("bg", "ru"),
("de", "eu"),
("en", "nso"),
("fi", "he"),
("en", "tiv"),
("en", "ur"),
("cus", "en"),
("af", "nl"),
("ca", "uk"),
("aav", "en"),
("eo", "fi"),
("de", "ln"),
("en", "to"),
("it", "uk"),
("it", "vi"),
("en", "pag"),
("sv", "tll"),
("en", "tn"),
("cpf", "en"),
("bi", "fr"),
("ru", "sl"),
("iso", "sv"),
("eo", "pt"),
("en", "jap"),
("af", "fi"),
("en", "lun"),
("en", "hy"),
("en", "fi"),
("en", "lue"),
("es", "sn"),
("en", "ho"),
("fi", "nso"),
("fr", "yap"),
("en", "xh"),
("es", "hr"),
("fr", "swc"),
("en", "sn"),
("es", "fi"),
("fr", "tn"),
("fr", "guw"),
("fr", "ht"),
("fr", "zne"),
("fr", "sg"),
("es", "swc"),
("fi", "guw"),
("en", "sm"),
("fi", "ht"),
("fr", "ig"),
("de", "fr"),
("eo", "es"),
("hy", "ru"),
("es", "nso"),
("en", "ga"),
("da", "de"),
("fr", "pap"),
("kwy", "sv"),
("sv", "yo"),
("ase", "de"),
("bem", "fi"),
("ar", "ru"),
("chk", "en"),
("sl", "sv"),
("lv", "sv"),
("dra", "en"),
("en", "mt"),
("es", "rw"),
("de", "ee"),
("ro", "sv"),
("efi", "fi"),
("de", "ms"),
("fi", "mk"),
("sv", "tum"),
("es", "eu"),
("fr", "to"),
("de", "it"),
("fr", "tum"),
("sh", "uk"),
("fr", "niu"),
("en", "lg"),
("en", "ml"),
("ar", "tr"),
("en", "eo"),
("en", "kwy"),
("en", "run"),
("en", "nl"),
("en", "lu"),
("de", "hu"),
("bg", "fr"),
("fi", "sg"),
("fr", "loz"),
("eo", "sv"),
("fi", "pag"),
("cs", "fi"),
("en", "tll"),
("bzs", "es"),
("es", "yo"),
("fr", "rw"),
("bem", "fr"),
("efi", "sv"),
("ha", "sv"),
("fr", "pon"),
("eo", "he"),
("bcl", "fr"),
("fi", "no"),
("fr", "kwy"),
("en", "zlw"),
("es", "tvl"),
("crs", "fi"),
("ee", "fr"),
("sv", "tw"),
("es", "guw"),
("es", "ht"),
("en", "sv"),
("es", "lus"),
("fi", "war"),
("en", "kj"),
("bcl", "de"),
("en", "grk"),
("en", "umb"),
("es", "nl"),
("fr", "tvl"),
("cs", "sv"),
("ca", "it"),
("it", "ms"),
("ru", "sv"),
("nl", "sv"),
("sv", "xh"),
("bcl", "es"),
("en", "gaa"),
("en", "nic"),
("ca", "nl"),
("he", "ru"),
("cs", "uk"),
("sk", "sv"),
("sv", "tpi"),
("fr", "he"),
("hu", "sv"),
("de", "lt"),
("fi", "lu"),
("fi", "ts"),
("fi", "ilo"),
("fi", "ru"),
("es", "ilo"),
("fr", "tll"),
("ase", "fr"),
("en", "pqe"),
("et", "sv"),
("bi", "sv"),
("en", "ha"),
("en", "ny"),
("fi", "pon"),
("en", "kg"),
("kqn", "sv"),
("en", "pon"),
("ase", "sv"),
("de", "pag"),
("ber", "en"),
("en", "pap"),
("da", "es"),
("af", "en"),
("eo", "pl"),
("es", "niu"),
("az", "tr"),
("fr", "mt"),
("es", "sm"),
("pt", "tl"),
("afa", "en"),
("es", "tll"),
("lt", "tr"),
("en", "ng"),
("fi", "zne"),
("en", "itc"),
("fi", "st"),
("de", "fi"),
("fi", "yo"),
("fi", "mh"),
("gl", "pt"),
("el", "fr"),
("fi", "ha"),
("ceb", "fi"),
("fr", "ms"),
("sv", "yap"),
("en", "tl"),
("is", "sv"),
("es", "vi"),
("en", "zh"),
("en", "zle"),
("bzs", "fi"),
("fi", "lue"),
("fi", "tvl"),
("fi", "hu"),
("sv", "ts"),
("he", "sv"),
("fi", "gaa"),
("fr", "ts"),
("de", "guw"),
("cel", "en"),
("es", "ig"),
("es", "ve"),
("es", "tl"),
("bg", "it"),
("de", "ig"),
("en", "eu"),
("es", "st"),
("st", "sv"),
("sv", "to"),
("fi", "sl"),
("en", "loz"),
("en", "mos"),
("csn", "es"),
("ar", "el"),
("ar", "en"),
("az", "en"),
("en", "mr"),
("en", "lua"),
("es", "rn"),
("de", "ny"),
("bcl", "fi"),
("bzs", "fr"),
("fr", "ru"),
("bcl", "sv"),
("eo", "fr"),
("cs", "de"),
("es", "xh"),
("en", "iso"),
("fi", "hr"),
("fi", "nl"),
("fr", "hu"),
("en", "phi"),
("af", "eo"),
("en", "gv"),
("en", "sal"),
("fr", "gaa"),
("sv", "toi"),
("en", "fr"),
("fi", "toi"),
("crs", "sv"),
("fr", "xh"),
("es", "tw"),
("es", "pag"),
("de", "vi"),
("fi", "swc"),
("ar", "fr"),
("es", "to"),
("da", "eo"),
("es", "ru"),
("no", "pl"),
("fr", "rnd"),
("en", "id"),
("da", "no"),
("rw", "sv"),
("en", "mg"),
("fi", "niu"),
("ceb", "sv"),
("es", "pap"),
("ca", "de"),
("ilo", "sv"),
("de", "loz"),
("no", "ru"),
("es", "ny"),
("fi", "pap"),
("es", "pl"),
("eo", "ro"),
("fi", "ty"),
("chk", "sv"),
("fr", "ro"),
("es", "tzo"),
("es", "sg"),
("fi", "id"),
("id", "sv"),
("de", "no"),
("lt", "ru"),
("fi", "ro"),
("en", "euq"),
("en", "sem"),
("de", "eo"),
("es", "id"),
("en", "ru"),
("eo", "sh"),
("fr", "hr"),
("ase", "es"),
("ar", "de"),
("et", "fi"),
("fr", "pl"),
("sn", "sv"),
("sv", "zne"),
("fr", "lue"),
("sv", "swc"),
("da", "ru"),
("ca", "pt"),
("de", "tl"),
("en", "iir"),
("crs", "en"),
("el", "fi"),
("bnt", "en"),
("fr", "yo"),
("fr", "sv"),
("es", "gil"),
("ru", "uk"),
("fr", "lg"),
("ru", "vi"),
("en", "guw"),
("en", "ht"),
("alv", "en"),
("fr", "sk"),
("en", "om"),
("eo", "nl"),
("en", "gmq"),
("fr", "sm"),
("rnd", "sv"),
("en", "ss"),
("es", "uk"),
("fi", "sk"),
("el", "sv"),
("en", "rw"),
("sg", "sv"),
("pl", "uk"),
("en", "uk"),
("eo", "hu"),
("fi", "lua"),
("fi", "uk"),
("de", "ilo"),
("ar", "pl"),
("sv", "tn"),
("tr", "uk"),
("sv", "war"),
("ee", "es"),
("fi", "lv"),
("bat", "en"),
("et", "ru"),
("de", "et"),
("en", "gil"),
("niu", "sv"),
("sv", "wls"),
("en", "kwn"),
("fi", "gil"),
("af", "de"),
("af", "ru"),
("no", "uk"),
("de", "gaa"),
("fr", "wls"),
("lua", "sv"),
("en", "sk"),
("en", "gl"),
("hu", "uk"),
("es", "he"),
("es", "fr"),
("ee", "en"),
("fr", "iso"),
("en", "ig"),
("en", "hi"),
("fr", "nso"),
("crs", "de"),
("aed", "es"),
("fi", "hil"),
("sv", "ty"),
("en", "es"),
("sv", "uk"),
("en", "ti"),
("en", "is"),
("en", "mh"),
("ceb", "en"),
("de", "en"),
("el", "eo"),
("en", "mfe"),
("es", "ty"),
("fi", "lg"),
("lg", "sv"),
("pag", "sv"),
("es", "tn"),
("bzs", "en"),
("en", "fiu"),
("cs", "eo"),
("en", "kqn"),
("nso", "sv"),
("en", "ine"),
("bcl", "en"),
("bem", "sv"),
("efi", "fr"),
("en", "niu"),
("es", "lt"),
("es", "lua"),
("nl", "no"),
("fr", "uk"),
("en", "inc"),
("en", "pis"),
| |
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""Test the modern PyGreSQL interface.
Sub-tests for the copy methods.
Contributed by <NAME>.
These tests need a database to test against.
"""
try:
import unittest2 as unittest # for Python < 2.7
except ImportError:
import unittest
from collections import Iterable
import pgdb # the module under test
# We need a database to test against. If LOCAL_PyGreSQL.py exists we will
# get our information from that. Otherwise we use the defaults.
# The current user must have create schema privilege on the database.
dbname = 'unittest'
dbhost = None
dbport = 5432
try:
from .LOCAL_PyGreSQL import *
except (ImportError, ValueError):
try:
from LOCAL_PyGreSQL import *
except ImportError:
pass
try:
unicode
except NameError: # Python >= 3.0
unicode = str
class InputStream:
def __init__(self, data):
if isinstance(data, unicode):
data = data.encode('utf-8')
self.data = data or b''
self.sizes = []
def __str__(self):
data = self.data
if str is unicode: # Python >= 3.0
data = data.decode('utf-8')
return data
def __len__(self):
return len(self.data)
def read(self, size=None):
if size is None:
output, data = self.data, b''
else:
output, data = self.data[:size], self.data[size:]
self.data = data
self.sizes.append(size)
return output
class OutputStream:
def __init__(self):
self.data = b''
self.sizes = []
def __str__(self):
data = self.data
if str is unicode: # Python >= 3.0
data = data.decode('utf-8')
return data
def __len__(self):
return len(self.data)
def write(self, data):
if isinstance(data, unicode):
data = data.encode('utf-8')
self.data += data
self.sizes.append(len(data))
class TestStreams(unittest.TestCase):
def test_input(self):
stream = InputStream('Hello, Wörld!')
self.assertIsInstance(stream.data, bytes)
self.assertEqual(stream.data, b'Hello, W\xc3\xb6rld!')
self.assertIsInstance(str(stream), str)
self.assertEqual(str(stream), 'Hello, Wörld!')
self.assertEqual(len(stream), 14)
self.assertEqual(stream.read(3), b'Hel')
self.assertEqual(stream.read(2), b'lo')
self.assertEqual(stream.read(1), b',')
self.assertEqual(stream.read(1), b' ')
self.assertEqual(stream.read(), b'W\xc3\xb6rld!')
self.assertEqual(stream.read(), b'')
self.assertEqual(len(stream), 0)
self.assertEqual(stream.sizes, [3, 2, 1, 1, None, None])
def test_output(self):
stream = OutputStream()
self.assertEqual(len(stream), 0)
for chunk in 'Hel', 'lo', ',', ' ', 'Wörld!':
stream.write(chunk)
self.assertIsInstance(stream.data, bytes)
self.assertEqual(stream.data, b'Hello, W\xc3\xb6rld!')
self.assertIsInstance(str(stream), str)
self.assertEqual(str(stream), 'Hello, Wörld!')
self.assertEqual(len(stream), 14)
self.assertEqual(stream.sizes, [3, 2, 1, 1, 7])
class TestCopy(unittest.TestCase):
cls_set_up = False
@staticmethod
def connect():
return pgdb.connect(database=dbname,
host='%s:%d' % (dbhost or '', dbport or -1))
@classmethod
def setUpClass(cls):
con = cls.connect()
cur = con.cursor()
cur.execute("set client_min_messages=warning")
cur.execute("drop table if exists copytest cascade")
cur.execute("create table copytest ("
"id smallint primary key, name varchar(64))")
cur.close()
con.commit()
cur = con.cursor()
try:
cur.execute("set client_encoding=utf8")
cur.execute("select 'Plácido and José'").fetchone()
except (pgdb.DataError, pgdb.NotSupportedError):
cls.data[1] = (1941, 'Plaacido Domingo')
cls.data[2] = (1946, '<NAME>')
cls.can_encode = False
cur.close()
con.close()
cls.cls_set_up = True
@classmethod
def tearDownClass(cls):
con = cls.connect()
cur = con.cursor()
cur.execute("set client_min_messages=warning")
cur.execute("drop table if exists copytest cascade")
con.commit()
con.close()
def setUp(self):
self.assertTrue(self.cls_set_up)
self.con = self.connect()
self.cursor = self.con.cursor()
self.cursor.execute("set client_encoding=utf8")
def tearDown(self):
try:
self.cursor.close()
except Exception:
pass
try:
self.con.rollback()
except Exception:
pass
try:
self.con.close()
except Exception:
pass
data = [(1935, '<NAME>'),
(1941, 'Plácido Domingo'),
(1946, '<NAME>')]
can_encode = True
@property
def data_text(self):
return ''.join('%d\t%s\n' % row for row in self.data)
@property
def data_csv(self):
return ''.join('%d,%s\n' % row for row in self.data)
def truncate_table(self):
self.cursor.execute("truncate table copytest")
@property
def table_data(self):
self.cursor.execute("select * from copytest")
return self.cursor.fetchall()
def check_table(self):
self.assertEqual(self.table_data, self.data)
def check_rowcount(self, number=len(data)):
self.assertEqual(self.cursor.rowcount, number)
class TestCopyFrom(TestCopy):
"""Test the copy_from method."""
def tearDown(self):
super(TestCopyFrom, self).tearDown()
self.setUp()
self.truncate_table()
super(TestCopyFrom, self).tearDown()
def copy_from(self, stream, **options):
return self.cursor.copy_from(stream, 'copytest', **options)
@property
def data_file(self):
return InputStream(self.data_text)
def test_bad_params(self):
call = self.cursor.copy_from
call('0\t', 'copytest'), self.cursor
call('1\t', 'copytest',
format='text', sep='\t', null='', columns=['id', 'name'])
self.assertRaises(TypeError, call)
self.assertRaises(TypeError, call, None)
self.assertRaises(TypeError, call, None, None)
self.assertRaises(TypeError, call, '0\t')
self.assertRaises(TypeError, call, '0\t', None)
self.assertRaises(TypeError, call, '0\t', 42)
self.assertRaises(TypeError, call, '0\t', ['copytest'])
self.assertRaises(TypeError, call, '0\t', 'copytest', format=42)
self.assertRaises(ValueError, call, '0\t', 'copytest', format='bad')
self.assertRaises(TypeError, call, '0\t', 'copytest', sep=42)
self.assertRaises(ValueError, call, '0\t', 'copytest', sep='bad')
self.assertRaises(TypeError, call, '0\t', 'copytest', null=42)
self.assertRaises(ValueError, call, '0\t', 'copytest', size='bad')
self.assertRaises(TypeError, call, '0\t', 'copytest', columns=42)
self.assertRaises(ValueError, call, b'', 'copytest',
format='binary', sep=',')
def test_input_string(self):
ret = self.copy_from('42\tHello, world!')
self.assertIs(ret, self.cursor)
self.assertEqual(self.table_data, [(42, 'Hello, world!')])
self.check_rowcount(1)
def test_input_string_with_newline(self):
self.copy_from('42\tHello, world!\n')
self.assertEqual(self.table_data, [(42, 'Hello, world!')])
self.check_rowcount(1)
def test_input_string_multiple_rows(self):
ret = self.copy_from(self.data_text)
self.assertIs(ret, self.cursor)
self.check_table()
self.check_rowcount()
if str is unicode: # Python >= 3.0
def test_input_bytes(self):
self.copy_from(b'42\tHello, world!')
self.assertEqual(self.table_data, [(42, 'Hello, world!')])
self.truncate_table()
self.copy_from(self.data_text.encode('utf-8'))
self.check_table()
else: # Python < 3.0
def test_input_unicode(self):
if not self.can_encode:
self.skipTest('database does not support utf8')
self.copy_from(u'43\tWürstel, Käse!')
self.assertEqual(self.table_data, [(43, 'Würstel, Käse!')])
self.truncate_table()
self.copy_from(self.data_text.decode('utf-8'))
self.check_table()
def test_input_iterable(self):
self.copy_from(self.data_text.splitlines())
self.check_table()
self.check_rowcount()
def test_input_iterable_invalid(self):
self.assertRaises(IOError, self.copy_from, [None])
def test_input_iterable_with_newlines(self):
self.copy_from('%s\n' % row for row in self.data_text.splitlines())
self.check_table()
if str is unicode: # Python >= 3.0
def test_input_iterable_bytes(self):
self.copy_from(row.encode('utf-8')
for row in self.data_text.splitlines())
self.check_table()
def test_sep(self):
stream = ('%d-%s' % row for row in self.data)
self.copy_from(stream, sep='-')
self.check_table()
def test_null(self):
self.copy_from('0\t\\N')
self.assertEqual(self.table_data, [(0, None)])
self.assertIsNone(self.table_data[0][1])
self.truncate_table()
self.copy_from('1\tNix')
self.assertEqual(self.table_data, [(1, 'Nix')])
self.assertIsNotNone(self.table_data[0][1])
self.truncate_table()
self.copy_from('2\tNix', null='Nix')
self.assertEqual(self.table_data, [(2, None)])
self.assertIsNone(self.table_data[0][1])
self.truncate_table()
self.copy_from('3\t')
self.assertEqual(self.table_data, [(3, '')])
self.assertIsNotNone(self.table_data[0][1])
self.truncate_table()
self.copy_from('4\t', null='')
self.assertEqual(self.table_data, [(4, None)])
self.assertIsNone(self.table_data[0][1])
def test_columns(self):
self.copy_from('1', columns='id')
self.copy_from('2', columns=['id'])
self.copy_from('3\tThree')
self.copy_from('4\tFour', columns='id, name')
self.copy_from('5\tFive', columns=['id', 'name'])
self.assertEqual(self.table_data, [
(1, None), (2, None), (3, 'Three'), (4, 'Four'), (5, 'Five')])
self.check_rowcount(5)
self.assertRaises(pgdb.ProgrammingError, self.copy_from,
'6\t42', columns=['id', 'age'])
self.check_rowcount(-1)
def test_csv(self):
self.copy_from(self.data_csv, format='csv')
self.check_table()
def test_csv_with_sep(self):
stream = ('%d;"%s"\n' % row for row in self.data)
self.copy_from(stream, format='csv', sep=';')
self.check_table()
self.check_rowcount()
def test_binary(self):
self.assertRaises(IOError, self.copy_from,
b'NOPGCOPY\n', format='binary')
self.check_rowcount(-1)
def test_binary_with_sep(self):
self.assertRaises(ValueError, self.copy_from,
'', format='binary', sep='\t')
def test_binary_with_unicode(self):
self.assertRaises(ValueError, self.copy_from, u'', format='binary')
def test_query(self):
self.assertRaises(ValueError, self.cursor.copy_from, '', "select null")
def test_file(self):
stream = self.data_file
ret = self.copy_from(stream)
self.assertIs(ret, self.cursor)
self.check_table()
self.assertEqual(len(stream), 0)
self.assertEqual(stream.sizes, [8192])
self.check_rowcount()
def test_size_positive(self):
stream = self.data_file
size = 7
num_chunks = (len(stream) + size - 1) // size
self.copy_from(stream, size=size)
self.check_table()
self.assertEqual(len(stream), 0)
self.assertEqual(stream.sizes, [size] * num_chunks)
self.check_rowcount()
def test_size_negative(self):
stream = self.data_file
self.copy_from(stream, size=-1)
self.check_table()
self.assertEqual(len(stream), 0)
self.assertEqual(stream.sizes, [None])
self.check_rowcount()
def test_size_invalid(self):
self.assertRaises(TypeError,
self.copy_from, self.data_file, size='invalid')
class TestCopyTo(TestCopy):
"""Test the copy_to method."""
@classmethod
def setUpClass(cls):
super(TestCopyTo, cls).setUpClass()
con = cls.connect()
cur = con.cursor()
cur.execute("set client_encoding=utf8")
cur.execute("insert into copytest values (%d, %s)", cls.data)
cur.close()
con.commit()
con.close()
def copy_to(self, stream=None, **options):
return self.cursor.copy_to(stream, 'copytest', **options)
@property
def data_file(self):
return OutputStream()
def test_bad_params(self):
call = self.cursor.copy_to
call(None, 'copytest')
call(None, 'copytest',
format='text', sep='\t', null='', columns=['id', 'name'])
self.assertRaises(TypeError, call)
self.assertRaises(TypeError, call, None)
self.assertRaises(TypeError, call, None, 42)
self.assertRaises(TypeError, call, None, ['copytest'])
self.assertRaises(TypeError, call, 'bad', 'copytest')
self.assertRaises(TypeError, call, None, 'copytest', format=42)
self.assertRaises(ValueError, call, None, 'copytest', format='bad')
self.assertRaises(TypeError, call, None, 'copytest', sep=42)
self.assertRaises(ValueError, call, None, 'copytest', sep='bad')
self.assertRaises(TypeError, call, None, 'copytest', null=42)
self.assertRaises(TypeError, call, None, 'copytest', decode='bad')
self.assertRaises(TypeError, call, None, 'copytest', columns=42)
def test_generator(self):
ret = self.copy_to()
self.assertIsInstance(ret, Iterable)
rows = list(ret)
self.assertEqual(len(rows), 3)
rows = ''.join(rows)
self.assertIsInstance(rows, str)
self.assertEqual(rows, self.data_text)
self.check_rowcount()
if str is unicode: # Python >= 3.0
def test_generator_bytes(self):
ret = self.copy_to(decode=False)
self.assertIsInstance(ret, Iterable)
rows = list(ret)
self.assertEqual(len(rows), 3)
rows = b''.join(rows)
self.assertIsInstance(rows, bytes)
self.assertEqual(rows, self.data_text.encode('utf-8'))
else: # Python < 3.0
def test_generator_unicode(self):
ret = self.copy_to(decode=True)
self.assertIsInstance(ret, Iterable)
rows = list(ret)
self.assertEqual(len(rows), 3)
rows = ''.join(rows)
self.assertIsInstance(rows, unicode)
self.assertEqual(rows, self.data_text.decode('utf-8'))
def test_rowcount_increment(self):
ret = self.copy_to()
self.assertIsInstance(ret, Iterable)
for n, row in enumerate(ret):
self.check_rowcount(n + 1)
def test_decode(self):
ret_raw = b''.join(self.copy_to(decode=False))
ret_decoded = ''.join(self.copy_to(decode=True))
self.assertIsInstance(ret_raw, bytes)
self.assertIsInstance(ret_decoded, unicode)
self.assertEqual(ret_decoded, ret_raw.decode('utf-8'))
self.check_rowcount()
def test_sep(self):
ret = list(self.copy_to(sep='-'))
self.assertEqual(ret, ['%d-%s\n' % row for row in self.data])
def test_null(self):
data = ['%d\t%s\n' % row for row in self.data]
self.cursor.execute('insert into copytest values(4, null)')
try:
ret = list(self.copy_to())
self.assertEqual(ret, data + ['4\t\\N\n'])
ret = list(self.copy_to(null='Nix'))
self.assertEqual(ret, data + ['4\tNix\n'])
ret = list(self.copy_to(null=''))
self.assertEqual(ret, data + ['4\t\n'])
finally:
self.cursor.execute('delete from copytest where id=4')
def test_columns(self):
data_id = ''.join('%d\n' % row[0] for row in self.data)
data_name = ''.join('%s\n' % row[1] for row in self.data)
ret = ''.join(self.copy_to(columns='id'))
self.assertEqual(ret, data_id)
ret = ''.join(self.copy_to(columns=['id']))
self.assertEqual(ret, data_id)
ret = ''.join(self.copy_to(columns='name'))
self.assertEqual(ret, data_name)
ret = ''.join(self.copy_to(columns=['name']))
self.assertEqual(ret, data_name)
ret = ''.join(self.copy_to(columns='id, name'))
self.assertEqual(ret, self.data_text)
ret = ''.join(self.copy_to(columns=['id', 'name']))
self.assertEqual(ret, self.data_text)
self.assertRaises(pgdb.ProgrammingError, self.copy_to,
columns=['id', 'age'])
def test_csv(self):
ret = self.copy_to(format='csv')
self.assertIsInstance(ret, Iterable)
rows = list(ret)
self.assertEqual(len(rows), 3)
rows = ''.join(rows)
self.assertIsInstance(rows, str)
self.assertEqual(rows, self.data_csv)
self.check_rowcount(3)
def test_csv_with_sep(self):
rows = ''.join(self.copy_to(format='csv', sep=';'))
self.assertEqual(rows, self.data_csv.replace(',', ';'))
def test_binary(self):
ret = self.copy_to(format='binary')
self.assertIsInstance(ret, Iterable)
for row in ret:
self.assertTrue(row.startswith(b'PGCOPY\n\377\r\n\0'))
break
self.check_rowcount(1)
def test_binary_with_sep(self):
self.assertRaises(ValueError, self.copy_to, format='binary', sep='\t')
def test_binary_with_unicode(self):
self.assertRaises(ValueError, self.copy_to,
format='binary', decode=True)
def test_query(self):
self.assertRaises(ValueError, self.cursor.copy_to, None,
"select name from copytest", columns='noname')
ret = self.cursor.copy_to(None,
"select name||'!' from copytest where id=1941")
self.assertIsInstance(ret, Iterable)
rows = list(ret)
self.assertEqual(len(rows), 1)
self.assertIsInstance(rows[0], str)
self.assertEqual(rows[0], | |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python [conda env:bandit_py3]
# language: python
# name: conda-env-bandit_py3-py
# ---
# %% language="javascript"
# IPython.notebook.kernel.restart()
# %%
import math
import numpy as np
# %%
def perv_comp(soil_moist, pptp, ptc, carea_max, smidx_coef, smidx_exp, contrib_frac, infil, srp):
smidx = 0.0
srpp = 0.0
smidx = soil_moist + (0.5 * ptc)
contrib_frac = min(carea_max, smidx_coef * 10**(smidx_exp * smidx))
srpp = contrib_frac * pptp
infil -= srpp
srp += srpp
# print(f'infil: {infil}')
# print(f'srp: {srp}')
# print(f'contrib_frac: {contrib_frac}')
return (infil, srp, contrib_frac)
# %%
def check_capacity(soil_moist, soil_moist_max, snowinfil_max, infil, srp):
capacity = soil_moist_max - soil_moist
excess = infil - capacity
if excess > snowinfil_max:
srp += excess - snowinfil_max
infil = snowinfil_max + capacity
# print(f'infil: {infil}')
# print(f'srp: {srp}')
return (infil, srp)
# %%
infil = 0.0
avail_water = 0.0
NEARZERO = 1.1920929E-07
pptmix_nopack = False
imperv_stor = 0.0
imperv_stor_max = 0.05
snowinfil_max = 2.0000000
# snowmelt = 0.0264271
snowmelt = 0.0
# pkwater_equiv = 0.0687990
pkwater_equiv = 0.0
soil_moist = 0.6814164
soil_moist_max = 2.9048920
net_ppt = 0.1460000
net_rain = 0.0407064
net_snow = 0.1052936
# net_snow = 0.0
carea_max = 0.571828
smidx_coef = 0.020766
smidx_exp = 0.362845
contrib_frac = 0.0
srp = 0.0
print(net_rain + net_snow)
print(net_ppt - net_snow)
# %%
# pptmix_nopack
if pptmix_nopack:
infil += net_rain
avail_water += net_rain
infil, srp, contrib_frac = perv_comp(soil_moist, net_rain, net_rain, carea_max, smidx_coef, smidx_exp, contrib_frac, infil, srp)
print(f'infil: {infil}')
print(f'srp: {srp}')
print(f'contrib_frac: {contrib_frac}')
print(f'avail_water: {avail_water}')
# infil = 0.0391870
# srp = 0.0015193
# contrib_frac = 0.0373238
# %%
# snowmelt > 0
# pkwater_equiv, net_snow, soil_moist_max, snowinfil_max, imperv_stor, imperv_stor_max, avail_water
# 0.0687990 0.1052936 2.9048920 2.0000000 0.0000000 0.0500000 0.0671335
# soil_moist net_rain net_ppt snowmelt contrib_frac infil srp
# 0.6814164 0.0407064 0.1460000 0.0264271 0.0373238 0.0656142 0.0015193
if snowmelt > 0.0:
avail_water += snowmelt
infil += snowmelt
print('==== snowmelt > 0.0 ====')
if pkwater_equiv > 0.0 or net_ppt - net_snow == 0.0:
# In addition to snowmelt, there is a snowpack and all of the precipitation fell as snow
print(' -- call check_capacity()')
# Pervious area computation
infil, srp = check_capacity(soil_moist, soil_moist_max, snowinfil_max, infil, srp)
else:
# Snowmelt occurred which depleted the snowpack
print(' -- call perv_comp()')
infil, srp, contrib_frac = perv_comp(soil_moist, snowmelt, net_ppt, carea_max, smidx_coef, smidx_exp, contrib_frac, infil, srp)
# print(f'contrib_frac: {contrib_frac}')
elif pkwater_equiv < NEARZERO:
# There is NO snowmelt and NO snowpack
print('==== pkwater_equiv < NEARZERO')
if net_snow < NEARZERO and net_rain > 0.0:
# All or almost all the precipitation fell as rain (any snow fall was lost to sublimation)
avail_water += net_rain
infil += net_rain
print(' ==== net_snow < NEARZERO and net_rain > 0.0')
infil, srp, contrib_frac = perv_comp(soil_moist, net_rain, net_rain, carea_max, smidx_coef, smidx_exp, contrib_frac, infil, srp)
elif infil > 0.0:
# Some amount of snowpack exists; check if infil exceeds max snow infiltration rate.
# The infiltation results from rain/snow mix on a snow-free surface.
print('==== infil > 0.0 ====')
infil, srp = check_capacity(soil_moist, soil_moist_max, snowinfil_max, infil, srp)
# print(f'contrib_frac: {contrib_frac}')
print(f'infil: {infil}')
print(f'srp: {srp}')
print(f'contrib_frac: {contrib_frac}')
print(f'avail_water: {avail_water}')
# %%
srp = 0.0
# infil = net_rain + snowmelt
infil = snowmelt
infil, srp, contrib_frac = perv_comp(soil_moist, net_rain, net_rain, carea_max, smidx_coef, smidx_exp, contrib_frac, infil, srp)
print(f'infil: {infil}')
print(f'srp: {srp}')
print(f'contrib_frac: {contrib_frac}')
# %%
def contrib_fraction_smidx(carea_max, smidx_coef, smidx_exp, soil_moist, precip):
# Compute the antecedent soil moisture
smidx = soil_moist + (0.5 * precip)
# Return the contribution fraction
return min(carea_max, smidx_coef * 10.0**(smidx_exp * smidx))
# %%
def compute_infil_srp(contrib_frac, precip, infil, perv_runoff):
adjusted_precip = contrib_frac * precip
infil = infil - adjusted_precip
# perv_runoff = perv_runoff + dble(adjusted_precip)
return (infil, perv_runoff + adjusted_precip)
# %%
# Example where pptmix_nopack is false
# We have snowmelt, but no snowpack and no precipitation (either net_ppt or net_snow)
#carea_max, smidx_coef, smidx_exp, soil_moist, net_ppt, soil_moist_max, snowinfil_max
# 0.5718280 0.0207660 0.3628450 2.7847483 0.0000000 2.9048920 2.0000000
carea_max = 0.5718280
smidx_coef = 0.0207660
smidx_exp = 0.3628450
soil_moist = 2.7847483
soil_moist_max = 2.9048920
net_rain = 0.0
net_ppt = 0.0
net_snow = 0.0
upslope_hortonian = 0.0
pkwater_equiv = 0.0
snowinfil_max = 2.0
snowmelt = 0.0443047
infil = snowmelt
srp = 0.0
use_cascades = False
pptmix_nopack = False
# %%
cfrac = contrib_fraction_smidx(carea_max, smidx_coef, smidx_exp, soil_moist, net_ppt)
compute_infil_srp(cfrac, snowmelt, infil, srp)
# %%
compute_infil_srp(cfrac, snowmelt, infil, srp)
# %%
infil, srp = check_capacity(soil_moist, soil_moist_max, snowinfil_max, infil, srp)
print(f'infil: {infil}')
print(f'srp: {srp}')
# %%
def compute_infil_test(use_cascades, pptmix_nopack, upslope_hortonian, net_ppt, net_rain, snowmelt, pkwater_equiv,
carea_max, smidx_coef, smidx_exp, soil_moist, soil_moist_max, infil, srp):
if use_cascades:
infil += max(0.0, upslope_hortonian)
if pptmix_nopack:
# Mixed event with no antecedent snowpack
infil += net_rain
if snowmelt > 0.0:
infil += snowmelt
elif pkwater_equiv == 0.0 and net_snow == 0.0 and net_rain > 0.0:
infil += net_rain
elif infil > 0.0:
pass
cfrac = contrib_fraction_smidx(carea_max, smidx_coef, smidx_exp, soil_moist, net_ppt)
infil, srp = compute_infil_srp(cfrac, net_ppt, infil, srp)
infil, srp = check_capacity(soil_moist, soil_moist_max, snowinfil_max, infil, srp)
return (infil, srp, cfrac)
# %%
srp = 0.0
infil = 0.0
compute_infil_test(use_cascades, pptmix_nopack, upslope_hortonian, net_ppt, net_rain, snowmelt, pkwater_equiv,
carea_max, smidx_coef, smidx_exp, soil_moist, soil_moist_max, infil, srp)
# %%
def compute_infil(use_cascades, pptmix_nopack, upslope_hortonian, net_ppt, net_rain, snowmelt, pkwater_equiv,
carea_max, smidx_coef, smidx_exp, soil_moist, soil_moist_max, infil, srp):
avail_water = 0.0
contrib_frac = 0.0
if use_cascades:
avail_water = max(0.0, upslope_hortonian)
infil += max(0.0, upslope_hortonian)
if infil > 0.0:
infil, srp, contrib_frac = perv_comp(soil_moist, upslope_hortonian, upslope_hortonian, carea_max,
smidx_coef, smidx_exp, contrib_frac, infil, srp)
if pptmix_nopack:
infil += net_rain
avail_water += net_rain
infil, srp, contrib_frac = perv_comp(soil_moist, net_rain, net_rain, carea_max, smidx_coef, smidx_exp, contrib_frac, infil, srp)
if snowmelt > 0.0:
avail_water += snowmelt
infil += snowmelt
# print('==== snowmelt > 0.0 ====')
if pkwater_equiv > 0.0 or net_ppt - net_snow < NEARZERO:
# print(' -- call check_capacity()')
infil, srp = check_capacity(soil_moist, soil_moist_max, snowinfil_max, infil, srp)
else:
# print(' -- call perv_comp()')
infil, srp, contrib_frac = perv_comp(soil_moist, snowmelt, net_ppt, carea_max, smidx_coef, smidx_exp, contrib_frac, infil, srp)
elif pkwater_equiv < NEARZERO:
# print('==== pkwater_equiv < NEARZERO')
if net_snow < NEARZERO and net_rain > 0.0:
avail_water += net_rain
infil += net_rain
# print(' ==== net_snow < NEARZERO and net_rain > 0.0')
infil, srp, contrib_frac = perv_comp(soil_moist, net_rain, net_rain, carea_max, smidx_coef, smidx_exp, contrib_frac, infil, srp)
elif infil > 0.0:
# print('==== infil > 0.0 ====')
infil, srp = check_capacity(soil_moist, soil_moist_max, snowinfil_max, infil, srp)
return (infil, srp, contrib_frac)
# %%
srp = 0.0
infil = 0.0
compute_infil(use_cascades, pptmix_nopack, upslope_hortonian, net_ppt, net_rain, snowmelt, pkwater_equiv,
carea_max, smidx_coef, smidx_exp, soil_moist, soil_moist_max, infil, srp)
# %%
# This example triggers for snowmelt AND (pkwater_equiv > 0.0_dp .or. net_ppt - net_snow < NEARZERO)
# infil, pkwater_equiv, snowmelt
# infil: 0.0868653 0.5148234 0.0868653
# infil, net_rain, net_ppt, net_snow, carea_max, smidx_coef, smidx_exp, soil_moist, net_ppt, soil_moist_max, snowinfil_max, srp
# melt_chk_infil: 0.0868653 0.0000000 0.0000000 0.0000000 0.5718280 0.0207660 0.3628450 0.0000000 0.0000000 --- 2.9048920 2.0000000 0.0000000
crap2, pkwater_equiv, snowmelt, crap3, net_rain, net_ppt, net_snow, carea_max, smidx_coef, smidx_exp, soil_moist, crap, soil_moist_max, snowinfil_max, srp = [0.0868653, 0.5148234, 0.0868653, 0.0868653,
0.0000000, 0.0000000, 0.0000000, 0.5718280,
0.0207660, 0.3628450, 0.0000000, 0.0000000,
2.9048920, 2.0000000, 0.0000000]
# %%
srp = 0.0
infil = 0.0
compute_infil_test(use_cascades, pptmix_nopack, upslope_hortonian, net_ppt, net_rain, snowmelt, pkwater_equiv,
carea_max, smidx_coef, smidx_exp, soil_moist, soil_moist_max, infil, srp)
# %%
srp = 0.0
infil = 0.0
compute_infil(use_cascades, pptmix_nopack, upslope_hortonian, net_ppt, net_rain, snowmelt, pkwater_equiv,
carea_max, smidx_coef, smidx_exp, soil_moist, soil_moist_max, infil, srp)
# %%
# %%
# Triggers melt_comp
# there is snowmelt AND no current snowpack AND (either a mixed-event or all-rain event)
# Because there was an antecedent snowpack the rain component was added to pkwater_equiv which was
# then completely depleted (it melted) during snowcomp. So ....
#
# FINAL_infil: 0.2355339 0.0270727 0.1030922
# net_rain, net_ppt, net_snow, carea_max, smidx_coef, smidx_exp, soil_moist, soil_moist_max, snowinfil_max, pkwater_equiv, snowmelt, srp
vstr = '0.0670000 0.0670000 0.0000000 0.5718280 0.0207660 0.3628450 1.8843244 2.9048920 2.0000000 0.0000000 0.2626066 0.0270727'
vals = [float(xx) for xx in vstr.split()]
net_rain, net_ppt, net_snow, carea_max, smidx_coef, smidx_exp, soil_moist, soil_moist_max, snowinfil_max, pkwater_equiv, snowmelt, srp = vals
pptmix_nopack = False
srp = 0.0
infil = 0.0
infil, srp, contrib_frac = compute_infil_test(use_cascades, pptmix_nopack, upslope_hortonian, net_ppt, net_rain,
snowmelt, pkwater_equiv, carea_max, smidx_coef, smidx_exp, soil_moist,
soil_moist_max, infil, srp)
print('---- new style ----')
print(f'infil: {infil}')
print(f'srp: {srp}')
print(f'contrib_frac: {contrib_frac}')
srp = 0.0
infil = 0.0
infil, srp, contrib_frac = compute_infil(use_cascades, pptmix_nopack, upslope_hortonian, net_ppt, net_rain,
snowmelt, pkwater_equiv, carea_max, smidx_coef, smidx_exp, soil_moist,
soil_moist_max, infil, srp)
print('---- old style ----')
print(f'infil: {infil}')
print(f'srp: {srp}')
print(f'contrib_frac: {contrib_frac}')
# %%
infil = snowmelt
srp = 0.0
print(f'pkwater_equiv: {pkwater_equiv}')
print(f'snowmelt: {snowmelt}')
print(f'net_ppt: {net_ppt}')
print(f'net_rain: {net_rain}')
print(f'net_snow: {net_snow}')
cfrac = contrib_fraction_smidx(carea_max, smidx_coef, smidx_exp, soil_moist, snowmelt)
infil, srp = compute_infil_srp(cfrac, snowmelt, infil, srp)
print('-'*40)
print(f'infil: {infil}')
print(f'srp: {srp}')
print(f'contrib_frac: {cfrac}')
print(f'infil + srp: {(infil + srp)}')
infil = snowmelt
srp = 0.0
cfrac = contrib_fraction_smidx(carea_max, smidx_coef, smidx_exp, soil_moist, net_ppt)
infil, srp = compute_infil_srp(cfrac, net_ppt, infil, srp)
print('-'*40)
print(f'infil: {infil}')
print(f'srp: {srp}')
print(f'contrib_frac: {cfrac}')
print(f'infil + srp: {(infil | |
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
import kivy
kivy.require('1.9.0')
import threading
import time
from datetime import datetime
import urllib2
import json
import operator
import gc
import os
from functools import partial
import math
from kivy.config import Config
Config.set("kivy", "exit_on_escape", False)
Config.set("graphics", "height", 660)
Config.set("graphics", "width", 1340)
Config.set('graphics', 'show_cursor', 1)
from kivy.app import App
from kivy.core.window import Window
from kivy.factory import Factory
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.stacklayout import StackLayout
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.scatter import Scatter
from kivy.graphics.svg import Svg
from kivy.animation import Animation
from kivy.uix.stencilview import StencilView
from kivy.uix.screenmanager import Screen, ScreenManager
from kivy.properties import BooleanProperty, StringProperty, DictProperty, ObjectProperty,\
ListProperty, NumericProperty
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.behaviors import FocusBehavior
from kivy.uix.popup import Popup
from kivy.clock import Clock, mainthread
from kivy.uix.dropdown import DropDown
# Set WorldBank API static parameters.
start_url = "http://api.worldbank.org/"
end_url = "?per_page=30000&format=json"
# Set url catalogs.
countries = "countries/"
topics = "topics/"
indicators = "indicators/"
# Set url for World Development Indicators (WDI)
wdi_url = "http://api.worldbank.org/source/2/indicators/?per_page=30000&format=json"
class LabelTip(Label):
def __init__(self, **kwargs):
# make sure we aren't overriding any important functionality
super(LabelTip, self).__init__(**kwargs)
with open("./DB/tips.txt", "r") as stream:
self.text = stream.read()
class TopicToggleButton(ToggleButton):
note = StringProperty("")
class IndexToggleButton(ToggleButton):
code = StringProperty("")
note = StringProperty("")
topic = StringProperty("")
class BtnRmv(Button):
index = StringProperty("")
class SelectAll(Button):
region = ObjectProperty()
normal = StringProperty("")
class MyIndicesBar(BoxLayout):
# Link to ScreenManager
mib_my_indicators_search_sm = ObjectProperty()
def on_touch_down(self, *args):
super(MyIndicesBar, self).on_touch_down(*args)
# Check if mouse is over my_indicators_bar.
if self.collide_point(args[0].pos[0], args[0].pos[1]):
# Switch Screens.
self.mib_my_indicators_search_sm.current = "my_indicators"
class SearchBar(BoxLayout):
# Link to ScreenManager
sb_my_indicators_search_sm = ObjectProperty()
def on_touch_down(self, *args):
super(SearchBar, self).on_touch_down(*args)
# Check if mouse is over search_bar.
if self.collide_point(args[0].pos[0], args[0].pos[1]):
# This touch should not be used to defocus.
FocusBehavior.ignored_touch.append(args[0])
# Switch Screens.
self.sb_my_indicators_search_sm.current = "search_index"
class SearchArea(TextInput):
pass
class IndexStackLayout(StackLayout):
def do_layout(self, *largs):
super(IndexStackLayout, self).do_layout()
col = int((self.width-8)//380)
# Try to fix each Index button.
if col > 0:
# Calculate how many cols are inside the slider.
for button in range(1, len(IndexSelection.shown_ind_btns)+1, col):
# Prepare the list to store each button height per line.
height_list = []
# Locate the highest texture_size per line.
for step in range(col):
height_list.append(IndexSelection.shown_ind_btns[button+step].texture_size[1])
# If current is last button, break.
if button+step == len(IndexSelection.shown_ind_btns):
break
# Renew the height of each button per line, to the highest one.
for step in range(col):
IndexSelection.shown_ind_btns[button+step].height = max(height_list)+20
# If current is last button, break.
if button+step == len(IndexSelection.shown_ind_btns):
break
class CIMScreenManager(ScreenManager):
mouse_pos = ListProperty()
def __init__(self, **kwargs):
# make sure we aren't overriding any important functionality
super(CIMScreenManager, self).__init__(**kwargs)
Window.bind(mouse_pos=self.setter('mouse_pos'))
def on_mouse_pos(self, *args):
self.current_screen.mouse_pos = self.mouse_pos
class MouseScreen(Screen):
mouse_pos = ListProperty()
class IndexSelection(MouseScreen):
# Link to Update button from MainWindow.
is_update_db = ObjectProperty()
# Link to CIMScreenManager.
is_manager = ObjectProperty()
# Link to IndexCreation.
is_index_creation = ObjectProperty()
selected_indices = DictProperty({"feat_index": None, "my_indicators": {}})
coredb_py = ObjectProperty()
def __init__(self, **kwargs):
# make sure we aren't overriding any important functionality
super(IndexSelection, self).__init__(**kwargs)
self.must_build_topics = True
self.shown_ind_btns = {}
self.search_dic = None
self.topics_dic = None
# This function updates status Icon that belongs to IndexCreation Class.
@mainthread
def dl_status_icon_setter(self):
# If there is no active Indicator data download..
if not self.is_index_creation.btn_get_indicators.disabled:
# Compare my_indicators to sorted_indicators so we know if we must re-download data.
my_in = self.selected_indices["my_indicators"].keys()
my_in.sort()
# If lists are the same..
if my_in == self.is_index_creation.sorted_indicators:
self.is_index_creation.downloading_state_icon.source = './Sources/status_valid.png'
else:
self.is_index_creation.downloading_state_icon.source = './Sources/status_error.png'
# Recursively convert Unicode objects to strings objects.
def string_it(self, obj):
if isinstance(obj, dict):
return {self.string_it(key): self.string_it(value) for key, value in obj.iteritems()}
elif isinstance(obj, list):
return [self.string_it(element) for element in obj]
elif isinstance(obj, unicode):
return obj.encode('utf-8')
else:
return obj
# Function that will run every time mouse is moved.
def on_mouse_pos(self, *args):
for button in self.topics_dic.keys():
if button.collide_point(*button.to_widget(*args[1])):
button.background_normal = './Sources/button_hovered.png'
else:
button.background_normal = './Sources/button_normal.png'
# Check if mouse is over add_index_icon.
if self.add_index_icon.collide_point(*args[1]):
self.add_index_label.color = (0.34, 0.65, 0.90, 1)
else:
self.add_index_label.color = (1, 1, 1, 1)
# Check if mouse is over toggle_index_desc_icon.
if self.toggle_index_desc_icon.collide_point(*args[1]):
self.toggle_index_desc_label.color = (0.34, 0.65, 0.90, 1)
else:
self.toggle_index_desc_label.color = (1, 1, 1, 1)
# Function that clears the slider's stacklayout.
def clear_indices_stack(self):
# Clear all widgets from stack layout.
self.indices_slider_stack.clear_widgets()
# Reset slider position back to top.
self.indices_slider.scroll_y = 1
# Clear the "feat_index".
self.selected_indices["feat_index"] = None
# This function is called when an Index is selected.
def on_index_selection(self, *args):
# If current index selection is the feat_index.
if args[0] == self.selected_indices["feat_index"]:
# It means the same button has been toggled and should clear the "feat_index".
self.selected_indices["feat_index"] = None
else:
if self.selected_indices["feat_index"] is not None:
self.selected_indices["feat_index"].state = "normal"
self.selected_indices["feat_index"] = args[0]
# Reset slider position back to top.
self.index_desc_slider.scroll_y = 1
# This function is called when an Index is added to my_indicators.
def on_my_indicators(self):
# If user has selected an Index..
if not self.selected_indices["feat_index"] is None and \
not (self.selected_indices["feat_index"].text in self.selected_indices[
"my_indicators"]):
# Add Index to my_indicators.
self.selected_indices["my_indicators"][self.selected_indices["feat_index"].text] = \
self.selected_indices["feat_index"].code
# Set proper btn backgrounds based on my_indicators.
self.btn_index_background()
# Create my_index_box to hold my_index components.
my_index_box = Factory.MyIndexBox()
# Create btn_rmv_anchor to hold btn_rmv.
btn_rmv_anchor = AnchorLayout(size_hint_y=None,
height=25,
anchor_x="right",
padding=[0, 0, 10, 0])
# Create a removing index btn.
btn_rmv = Factory.BtnRmv(index=self.selected_indices["feat_index"].text,
on_release=self.rmv_my_indicators)
# Add btn_rmv in btn_rmv_anchor.
btn_rmv_anchor.add_widget(btn_rmv)
# Create my_index Label.
my_index = Factory.MyIndex(text=self.selected_indices["feat_index"].text)
# Create my_topic Label.
my_topic = Factory.MyTopic(text=self.selected_indices["feat_index"].topic)
# Add all components in my_index_box.
my_index_box.add_widget(btn_rmv_anchor)
my_index_box.add_widget(Factory.ShadeLine())
my_index_box.add_widget(my_index)
my_index_box.add_widget(my_topic)
# Bind children heights to parent box.
my_index.bind(height=self.fix_my_index_h)
my_topic.bind(height=self.fix_my_index_h)
# Add my_index_box in my_indicators_container.
self.my_indicators_container.add_widget(my_index_box)
# Switch to my_indicators.
self.my_indicators_search_sm.current = "my_indicators"
# Remove previous text inputs.
self.search_area.text = ""
# Check if indicator data must be downloaded again.
self.dl_status_icon_setter()
def rmv_my_indicators(self, *args):
# Remove index from the dict with my indices.
self.selected_indices["my_indicators"].pop(args[0].index, None)
# Remove that specific my_index_box.
self.my_indicators_container.remove_widget(args[0].parent.parent)
# Set proper btn backgrounds based on my_indicators.
self.btn_index_background()
# Check if indicator data must be downloaded again.
self.dl_status_icon_setter()
# Function that clears all search results.
def clear_search_results(self, *args):
# Clear all widgets from search_results_container.
self.search_results_container.clear_widgets()
# Reset slider position back to top.
self.search_results_slider.scroll_y = 1
# Clear search area too because user pressed the clear button.
if len(args) == 1:
# Remove previous text inputs.
self.search_area.text = ""
@staticmethod
def fix_my_index_h(*args):
# Init box height is the sum of the Top and Bottom box paddings
args[0].parent.height = args[0].parent.padding[1] + args[0].parent.padding[3]
# For each child in box add it's height to the box.
for child in args[0].parent.children:
args[0].parent.height += child.height
# This function sets proper background_normal of index buttons.
def btn_index_background(self):
# For each index button..
for btn in IndexSelection.shown_ind_btns.values():
# search if it is in my_indicators.
if btn.text in self.selected_indices["my_indicators"].keys():
btn.background_normal = './Sources/grey_btn_down.png'
btn.bold = True
else:
btn.background_normal = './Sources/wht_btn_normal.png'
btn.bold = False
def search_results(self, keyword):
# Clears all search results.
self.clear_search_results()
# Create sr_toolbox to hold sr_title and clear_sr.
sr_toolbox = BoxLayout(orientation="vertical", size_hint_y=None, height=55)
# Create search Title Label.
sr_title = Factory.SR_Title()
# Create button to clear results.
clear_sr = Factory.SR_Clear(on_press=self.clear_search_results)
# Add search Title and clear button to sr_toolbox
sr_toolbox.add_widget(clear_sr)
sr_toolbox.add_widget(sr_title)
# Add sr_toolbox to search_results_container.
self.search_results_container.add_widget(sr_toolbox)
for topic in self.search_dic:
for index in self.search_dic[topic]:
if keyword.lower() in index.lower():
# Create searched_index_box to hold searched_index components.
searched_index_box = Factory.SearchBox()
# List to store occurrences.
occurrences = []
located = None
# Convert each index into a marked index.
while located != -1:
located = index.lower().find(keyword.lower())
if located != -1:
occurrences.append(
index.partition(index[located:located+len(keyword)])[0])
occurrences.append("[color=ff0078][b]")
occurrences.append(
index.partition(index[located:located+len(keyword)])[1])
occurrences.append("[/b][/color]")
index = index.partition(index[located:located+len(keyword)])[2]
else:
occurrences.append(index)
marked_index = ''.join(occurrences)
# Create search result index Label.
my_index = Factory.SR_Index(text=marked_index)
# Create search result topic Label.
my_topic = Factory.SR_Topic(text=topic)
# Add all components in searched_index_box.
searched_index_box.add_widget(my_topic)
searched_index_box.add_widget(my_index)
# Bind children heights to parent box.
my_index.bind(height=self.fix_my_index_h)
my_topic.bind(height=self.fix_my_index_h)
# Add searched_index_box in search_results_container.
self.search_results_container.add_widget(searched_index_box)
# Show number of search results.
if len(self.search_results_container.children) > 1:
sr_title.text = str(len(self.search_results_container.children)-1) + " matches found:"
sr_title.color = (0.1, 1, 0.1, 1)
else:
sr_title.text = "No results"
sr_title.color = (1, 0, 0, 1)
# This method checks if there is any core DB available.
# If there is, it creates the topics dictionary (topics - button objects).
def build_indices(self):
# If topics dictionary shouldn't be loaded, do nothing.
if not self.must_build_topics:
# Switch screen without updating indicator | |
- 3.36078881755967E-11*m.x1263
- 4.75821426122115E-9*m.x1264 - 5.75253712746733E-8*m.x1265 - 4.75821426122115E-9*m.x1266
- 3.36078881755966E-11*m.x1267 - 4.06309203184713E-10*m.x1268 - 5.75253712746733E-8*m.x1269
- 6.95464339901277E-7*m.x1270 - 5.75253712746733E-8*m.x1271 - 4.06309203184712E-10*m.x1272
- 3.36078881755967E-11*m.x1273 - 4.75821426122115E-9*m.x1274 - 5.75253712746733E-8*m.x1275
- 4.75821426122115E-9*m.x1276 - 3.36078881755966E-11*m.x1277 - 2.3737689931885E-13*m.x1278
- 3.36078881755965E-11*m.x1279 - 4.06309203184711E-10*m.x1280 - 3.36078881755965E-11*m.x1281
- 2.37376899318848E-13*m.x1282 - 3.36078881755964E-11*m.x1283 - 4.7582142612211E-9*m.x1284
- 5.75253712746727E-8*m.x1285 - 4.7582142612211E-9*m.x1286 - 3.36078881755961E-11*m.x1287
- 4.75821426122113E-9*m.x1288 - 6.73669313507416E-7*m.x1289 - 8.14445824596448E-6*m.x1290
- 6.73669313507416E-7*m.x1291 - 4.75821426122112E-9*m.x1292 - 5.75253712746731E-8*m.x1293
- 8.14445824596448E-6*m.x1294 - 9.84640368653642E-5*m.x1295 - 8.14445824596448E-6*m.x1296
- 5.75253712746728E-8*m.x1297 - 4.75821426122113E-9*m.x1298 - 6.73669313507416E-7*m.x1299
- 8.14445824596448E-6*m.x1300 - 6.73669313507416E-7*m.x1301 - 4.75821426122112E-9*m.x1302
- 3.36078881755964E-11*m.x1303 - 4.7582142612211E-9*m.x1304 - 5.75253712746727E-8*m.x1305
- 4.7582142612211E-9*m.x1306 - 3.36078881755961E-11*m.x1307 - 4.06309203184709E-10*m.x1308
- 5.75253712746728E-8*m.x1309 - 6.9546433990127E-7*m.x1310 - 5.75253712746728E-8*m.x1311
- 4.06309203184708E-10*m.x1312 - 5.75253712746731E-8*m.x1313 - 8.14445824596449E-6*m.x1314
- 9.84640368653644E-5*m.x1315 - 8.14445824596449E-6*m.x1316 - 5.75253712746729E-8*m.x1317
- 6.95464339901275E-7*m.x1318 - 9.84640368653646E-5*m.x1319 - 0.00119040042480759*m.x1320
- 9.84640368653646E-5*m.x1321 - 6.95464339901272E-7*m.x1322 - 5.75253712746731E-8*m.x1323
- 8.14445824596449E-6*m.x1324 - 9.84640368653644E-5*m.x1325 - 8.14445824596449E-6*m.x1326
- 5.75253712746729E-8*m.x1327 - 4.06309203184709E-10*m.x1328 - 5.75253712746728E-8*m.x1329
- 6.9546433990127E-7*m.x1330 - 5.75253712746728E-8*m.x1331 - 4.06309203184708E-10*m.x1332
- 3.36078881755964E-11*m.x1333 - 4.7582142612211E-9*m.x1334 - 5.75253712746727E-8*m.x1335
- 4.7582142612211E-9*m.x1336 - 3.36078881755961E-11*m.x1337 - 4.75821426122113E-9*m.x1338
- 6.73669313507416E-7*m.x1339 - 8.14445824596448E-6*m.x1340 - 6.73669313507416E-7*m.x1341
- 4.75821426122112E-9*m.x1342 - 5.75253712746731E-8*m.x1343 - 8.14445824596448E-6*m.x1344
- 9.84640368653642E-5*m.x1345 - 8.14445824596448E-6*m.x1346 - 5.75253712746728E-8*m.x1347
- 4.75821426122113E-9*m.x1348 - 6.73669313507416E-7*m.x1349 - 8.14445824596448E-6*m.x1350
- 6.73669313507416E-7*m.x1351 - 4.75821426122112E-9*m.x1352 - 3.36078881755964E-11*m.x1353
- 4.7582142612211E-9*m.x1354 - 5.75253712746727E-8*m.x1355 - 4.7582142612211E-9*m.x1356
- 3.36078881755961E-11*m.x1357 - 2.3737689931885E-13*m.x1358 - 3.36078881755965E-11*m.x1359
- 4.06309203184711E-10*m.x1360 - 3.36078881755965E-11*m.x1361 - 2.37376899318848E-13*m.x1362
- 3.36078881755967E-11*m.x1363 - 4.75821426122115E-9*m.x1364 - 5.75253712746733E-8*m.x1365
- 4.75821426122115E-9*m.x1366 - 3.36078881755966E-11*m.x1367 - 4.06309203184713E-10*m.x1368
- 5.75253712746733E-8*m.x1369 - 6.95464339901277E-7*m.x1370 - 5.75253712746733E-8*m.x1371
- 4.06309203184712E-10*m.x1372 - 3.36078881755967E-11*m.x1373 - 4.75821426122115E-9*m.x1374
- 5.75253712746733E-8*m.x1375 - 4.75821426122115E-9*m.x1376 - 3.36078881755966E-11*m.x1377
- 2.3737689931885E-13*m.x1378 - 3.36078881755965E-11*m.x1379 - 4.06309203184711E-10*m.x1380
- 3.36078881755965E-11*m.x1381 - 2.37376899318848E-13*m.x1382 - 3.36078881755965E-11*m.x1383
- 4.75821426122112E-9*m.x1384 - 5.75253712746729E-8*m.x1385 - 4.75821426122112E-9*m.x1386
- 3.36078881755964E-11*m.x1387 - 4.75821426122115E-9*m.x1388 - 6.73669313507418E-7*m.x1389
- 8.14445824596451E-6*m.x1390 - 6.73669313507418E-7*m.x1391 - 4.75821426122112E-9*m.x1392
- 5.75253712746733E-8*m.x1393 - 8.14445824596452E-6*m.x1394 - 9.84640368653647E-5*m.x1395
- 8.14445824596452E-6*m.x1396 - 5.75253712746731E-8*m.x1397 - 4.75821426122115E-9*m.x1398
- 6.73669313507418E-7*m.x1399 - 8.14445824596451E-6*m.x1400 - 6.73669313507418E-7*m.x1401
- 4.75821426122112E-9*m.x1402 - 3.36078881755965E-11*m.x1403 - 4.75821426122112E-9*m.x1404
- 5.75253712746729E-8*m.x1405 - 4.75821426122112E-9*m.x1406 - 3.36078881755964E-11*m.x1407
- 4.7582142612211E-9*m.x1408 - 6.73669313507411E-7*m.x1409 - 8.14445824596442E-6*m.x1410
- 6.73669313507411E-7*m.x1411 - 4.75821426122108E-9*m.x1412 - 6.73669313507416E-7*m.x1413
- 9.53782908979563E-5*m.x1414 - 0.0011530946893595*m.x1415 - 9.53782908979563E-5*m.x1416
- 6.73669313507413E-7*m.x1417 - 8.14445824596448E-6*m.x1418 - 0.0011530946893595*m.x1419
- 0.0139405660356362*m.x1420 - 0.0011530946893595*m.x1421 - 8.14445824596445E-6*m.x1422
- 6.73669313507416E-7*m.x1423 - 9.53782908979563E-5*m.x1424 - 0.0011530946893595*m.x1425
- 9.53782908979563E-5*m.x1426 - 6.73669313507413E-7*m.x1427 - 4.7582142612211E-9*m.x1428
- 6.73669313507411E-7*m.x1429 - 8.14445824596442E-6*m.x1430 - 6.73669313507411E-7*m.x1431
- 4.75821426122108E-9*m.x1432 - 5.75253712746729E-8*m.x1433 - 8.14445824596445E-6*m.x1434
- 9.84640368653639E-5*m.x1435 - 8.14445824596445E-6*m.x1436 - 5.75253712746726E-8*m.x1437
- 8.14445824596451E-6*m.x1438 - 0.0011530946893595*m.x1439 - 0.0139405660356362*m.x1440
- 0.0011530946893595*m.x1441 - 8.14445824596446E-6*m.x1442 - 9.84640368653646E-5*m.x1443
- 0.0139405660356362*m.x1444 - 0.168537226983398*m.x1445 - 0.0139405660356362*m.x1446
- 9.8464036865364E-5*m.x1447 - 8.14445824596451E-6*m.x1448 - 0.0011530946893595*m.x1449
- 0.0139405660356362*m.x1450 - 0.0011530946893595*m.x1451 - 8.14445824596446E-6*m.x1452
- 5.75253712746729E-8*m.x1453 - 8.14445824596445E-6*m.x1454 - 9.84640368653639E-5*m.x1455
- 8.14445824596445E-6*m.x1456 - 5.75253712746726E-8*m.x1457 - 4.7582142612211E-9*m.x1458
- 6.73669313507411E-7*m.x1459 - 8.14445824596442E-6*m.x1460 - 6.73669313507411E-7*m.x1461
- 4.75821426122108E-9*m.x1462 - 6.73669313507416E-7*m.x1463 - 9.53782908979563E-5*m.x1464
- 0.0011530946893595*m.x1465 - 9.53782908979563E-5*m.x1466 - 6.73669313507413E-7*m.x1467
- 8.14445824596448E-6*m.x1468 - 0.0011530946893595*m.x1469 - 0.0139405660356362*m.x1470
- 0.0011530946893595*m.x1471 - 8.14445824596445E-6*m.x1472 - 6.73669313507416E-7*m.x1473
- 9.53782908979563E-5*m.x1474 - 0.0011530946893595*m.x1475 - 9.53782908979563E-5*m.x1476
- 6.73669313507413E-7*m.x1477 - 4.7582142612211E-9*m.x1478 - 6.73669313507411E-7*m.x1479
- 8.14445824596442E-6*m.x1480 - 6.73669313507411E-7*m.x1481 - 4.75821426122108E-9*m.x1482
- 3.36078881755965E-11*m.x1483 - 4.75821426122112E-9*m.x1484 - 5.75253712746729E-8*m.x1485
- 4.75821426122112E-9*m.x1486 - 3.36078881755964E-11*m.x1487 - 4.75821426122115E-9*m.x1488
- 6.73669313507418E-7*m.x1489 - 8.14445824596451E-6*m.x1490 - 6.73669313507418E-7*m.x1491
- 4.75821426122112E-9*m.x1492 - 5.75253712746733E-8*m.x1493 - 8.14445824596452E-6*m.x1494
- 9.84640368653647E-5*m.x1495 - 8.14445824596452E-6*m.x1496 - 5.75253712746731E-8*m.x1497
- 4.75821426122115E-9*m.x1498 - 6.73669313507418E-7*m.x1499 - 8.14445824596451E-6*m.x1500
- 6.73669313507418E-7*m.x1501 - 4.75821426122112E-9*m.x1502 - 3.36078881755965E-11*m.x1503
- 4.75821426122112E-9*m.x1504 - 5.75253712746729E-8*m.x1505 - 4.75821426122112E-9*m.x1506
- 3.36078881755964E-11*m.x1507 - 4.06309203184711E-10*m.x1508 - 5.75253712746729E-8*m.x1509
- 6.95464339901272E-7*m.x1510 - 5.75253712746729E-8*m.x1511 - 4.06309203184709E-10*m.x1512
- 5.75253712746733E-8*m.x1513 - 8.14445824596451E-6*m.x1514 - 9.84640368653646E-5*m.x1515
- 8.14445824596451E-6*m.x1516 - 5.7525371274673E-8*m.x1517 - 6.95464339901277E-7*m.x1518
- 9.84640368653647E-5*m.x1519 - 0.0011904004248076*m.x1520 - 9.84640368653647E-5*m.x1521
- 6.95464339901274E-7*m.x1522 - 5.75253712746733E-8*m.x1523 - 8.14445824596451E-6*m.x1524
- 9.84640368653646E-5*m.x1525 - 8.14445824596451E-6*m.x1526 - 5.7525371274673E-8*m.x1527
- 4.06309203184711E-10*m.x1528 - 5.75253712746729E-8*m.x1529 - 6.95464339901272E-7*m.x1530
- 5.75253712746729E-8*m.x1531 - 4.06309203184709E-10*m.x1532 - 5.75253712746727E-8*m.x1533
- 8.14445824596442E-6*m.x1534 - 9.84640368653635E-5*m.x1535 - 8.14445824596442E-6*m.x1536
- 5.75253712746724E-8*m.x1537 - 8.14445824596448E-6*m.x1538 - 0.00115309468935949*m.x1539
- 0.0139405660356361*m.x1540 - 0.00115309468935949*m.x1541 - 8.14445824596443E-6*m.x1542
- 9.84640368653642E-5*m.x1543 - 0.0139405660356362*m.x1544 - 0.168537226983398*m.x1545
- 0.0139405660356362*m.x1546 - 9.84640368653639E-5*m.x1547 - 8.14445824596448E-6*m.x1548
- 0.00115309468935949*m.x1549 - 0.0139405660356361*m.x1550 - 0.00115309468935949*m.x1551
- 8.14445824596443E-6*m.x1552 - 5.75253712746727E-8*m.x1553 - 8.14445824596442E-6*m.x1554
- 9.84640368653635E-5*m.x1555 - 8.14445824596442E-6*m.x1556 - 5.75253712746724E-8*m.x1557
- 6.9546433990127E-7*m.x1558 - 9.84640368653639E-5*m.x1559 - 0.00119040042480759*m.x1560
- 9.84640368653639E-5*m.x1561 - 6.95464339901267E-7*m.x1562 - 9.84640368653644E-5*m.x1563
- 0.0139405660356362*m.x1564 - 0.168537226983398*m.x1565 - 0.0139405660356362*m.x1566
- 9.84640368653639E-5*m.x1567 - 0.00119040042480759*m.x1568 - 0.168537226983398*m.x1569
- 2.03756409938036*m.x1570 - 0.168537226983398*m.x1571 - 0.00119040042480759*m.x1572
- 9.84640368653644E-5*m.x1573 - 0.0139405660356362*m.x1574 - 0.168537226983398*m.x1575
- 0.0139405660356362*m.x1576 - 9.84640368653639E-5*m.x1577 - 6.9546433990127E-7*m.x1578
- 9.84640368653639E-5*m.x1579 - 0.00119040042480759*m.x1580 - 9.84640368653639E-5*m.x1581
- 6.95464339901267E-7*m.x1582 - 5.75253712746727E-8*m.x1583 - 8.14445824596442E-6*m.x1584
- 9.84640368653635E-5*m.x1585 - 8.14445824596442E-6*m.x1586 - 5.75253712746724E-8*m.x1587
- 8.14445824596448E-6*m.x1588 - 0.00115309468935949*m.x1589 - 0.0139405660356361*m.x1590
- 0.00115309468935949*m.x1591 - 8.14445824596443E-6*m.x1592 - 9.84640368653642E-5*m.x1593
- 0.0139405660356362*m.x1594 - 0.168537226983398*m.x1595 - 0.0139405660356362*m.x1596
- 9.84640368653639E-5*m.x1597 - 8.14445824596448E-6*m.x1598 - 0.00115309468935949*m.x1599
- 0.0139405660356361*m.x1600 - 0.00115309468935949*m.x1601 - 8.14445824596443E-6*m.x1602
- 5.75253712746727E-8*m.x1603 - 8.14445824596442E-6*m.x1604 - 9.84640368653635E-5*m.x1605
- 8.14445824596442E-6*m.x1606 - 5.75253712746724E-8*m.x1607 - 4.06309203184711E-10*m.x1608
- 5.75253712746729E-8*m.x1609 - 6.95464339901272E-7*m.x1610 - 5.75253712746729E-8*m.x1611
- 4.06309203184709E-10*m.x1612 - 5.75253712746733E-8*m.x1613 - 8.14445824596451E-6*m.x1614
- 9.84640368653646E-5*m.x1615 - 8.14445824596451E-6*m.x1616 - 5.7525371274673E-8*m.x1617
- 6.95464339901277E-7*m.x1618 - 9.84640368653647E-5*m.x1619 - 0.0011904004248076*m.x1620
- 9.84640368653647E-5*m.x1621 - 6.95464339901274E-7*m.x1622 - 5.75253712746733E-8*m.x1623
- 8.14445824596451E-6*m.x1624 - 9.84640368653646E-5*m.x1625 - 8.14445824596451E-6*m.x1626
- 5.7525371274673E-8*m.x1627 - 4.06309203184711E-10*m.x1628 - 5.75253712746729E-8*m.x1629
- 6.95464339901272E-7*m.x1630 - 5.75253712746729E-8*m.x1631 - 4.06309203184709E-10*m.x1632
- 3.36078881755965E-11*m.x1633 - 4.75821426122112E-9*m.x1634 - 5.75253712746729E-8*m.x1635
- 4.75821426122112E-9*m.x1636 - 3.36078881755964E-11*m.x1637 - 4.75821426122115E-9*m.x1638
- 6.73669313507418E-7*m.x1639 - 8.14445824596451E-6*m.x1640 - 6.73669313507418E-7*m.x1641
- 4.75821426122112E-9*m.x1642 - 5.75253712746733E-8*m.x1643 - 8.14445824596452E-6*m.x1644
- 9.84640368653647E-5*m.x1645 - 8.14445824596452E-6*m.x1646 - 5.75253712746731E-8*m.x1647
- 4.75821426122115E-9*m.x1648 - 6.73669313507418E-7*m.x1649 - 8.14445824596451E-6*m.x1650
- 6.73669313507418E-7*m.x1651 - 4.75821426122112E-9*m.x1652 - 3.36078881755965E-11*m.x1653
- 4.75821426122112E-9*m.x1654 - 5.75253712746729E-8*m.x1655 - 4.75821426122112E-9*m.x1656
- 3.36078881755964E-11*m.x1657 - 4.7582142612211E-9*m.x1658 - 6.73669313507411E-7*m.x1659
- 8.14445824596442E-6*m.x1660 - 6.73669313507411E-7*m.x1661 - 4.75821426122108E-9*m.x1662
- 6.73669313507416E-7*m.x1663 - 9.53782908979563E-5*m.x1664 - 0.0011530946893595*m.x1665
- 9.53782908979563E-5*m.x1666 - 6.73669313507413E-7*m.x1667 - 8.14445824596448E-6*m.x1668
- 0.0011530946893595*m.x1669 - 0.0139405660356362*m.x1670 - 0.0011530946893595*m.x1671
- 8.14445824596445E-6*m.x1672 - 6.73669313507416E-7*m.x1673 - 9.53782908979563E-5*m.x1674
- 0.0011530946893595*m.x1675 - 9.53782908979563E-5*m.x1676 - 6.73669313507413E-7*m.x1677
- 4.7582142612211E-9*m.x1678 - 6.73669313507411E-7*m.x1679 - 8.14445824596442E-6*m.x1680
- 6.73669313507411E-7*m.x1681 - 4.75821426122108E-9*m.x1682 - 5.75253712746729E-8*m.x1683
- 8.14445824596445E-6*m.x1684 - 9.84640368653639E-5*m.x1685 - 8.14445824596445E-6*m.x1686
- 5.75253712746726E-8*m.x1687 - 8.14445824596451E-6*m.x1688 - 0.0011530946893595*m.x1689
- 0.0139405660356362*m.x1690 - 0.0011530946893595*m.x1691 - 8.14445824596446E-6*m.x1692
- 9.84640368653646E-5*m.x1693 - 0.0139405660356362*m.x1694 - 0.168537226983398*m.x1695
- 0.0139405660356362*m.x1696 - 9.8464036865364E-5*m.x1697 - 8.14445824596451E-6*m.x1698
- 0.0011530946893595*m.x1699 - 0.0139405660356362*m.x1700 - 0.0011530946893595*m.x1701
- 8.14445824596446E-6*m.x1702 - 5.75253712746729E-8*m.x1703 - 8.14445824596445E-6*m.x1704
- 9.84640368653639E-5*m.x1705 - 8.14445824596445E-6*m.x1706 - 5.75253712746726E-8*m.x1707
- 4.7582142612211E-9*m.x1708 - 6.73669313507411E-7*m.x1709 - 8.14445824596442E-6*m.x1710
- 6.73669313507411E-7*m.x1711 - 4.75821426122108E-9*m.x1712 - 6.73669313507416E-7*m.x1713
- 9.53782908979563E-5*m.x1714 - 0.0011530946893595*m.x1715 - 9.53782908979563E-5*m.x1716
- 6.73669313507413E-7*m.x1717 - 8.14445824596448E-6*m.x1718 - 0.0011530946893595*m.x1719
- 0.0139405660356362*m.x1720 - 0.0011530946893595*m.x1721 - 8.14445824596445E-6*m.x1722
- 6.73669313507416E-7*m.x1723 - 9.53782908979563E-5*m.x1724 - 0.0011530946893595*m.x1725
- 9.53782908979563E-5*m.x1726 - 6.73669313507413E-7*m.x1727 - 4.7582142612211E-9*m.x1728
- 6.73669313507411E-7*m.x1729 - 8.14445824596442E-6*m.x1730 - 6.73669313507411E-7*m.x1731
- 4.75821426122108E-9*m.x1732 - 3.36078881755965E-11*m.x1733 - 4.75821426122112E-9*m.x1734
- 5.75253712746729E-8*m.x1735 - 4.75821426122112E-9*m.x1736 - 3.36078881755964E-11*m.x1737
- 4.75821426122115E-9*m.x1738 - 6.73669313507418E-7*m.x1739 - 8.14445824596451E-6*m.x1740
- 6.73669313507418E-7*m.x1741 - 4.75821426122112E-9*m.x1742 - 5.75253712746733E-8*m.x1743
- 8.14445824596452E-6*m.x1744 - 9.84640368653647E-5*m.x1745 - 8.14445824596452E-6*m.x1746
- 5.75253712746731E-8*m.x1747 - 4.75821426122115E-9*m.x1748 - 6.73669313507418E-7*m.x1749
- 8.14445824596451E-6*m.x1750 - 6.73669313507418E-7*m.x1751 - 4.75821426122112E-9*m.x1752
- 3.36078881755965E-11*m.x1753 - 4.75821426122112E-9*m.x1754 - 5.75253712746729E-8*m.x1755
- 4.75821426122112E-9*m.x1756 - 3.36078881755964E-11*m.x1757 - 2.37376899318849E-13*m.x1758
- 3.36078881755964E-11*m.x1759 - 4.06309203184709E-10*m.x1760 - 3.36078881755964E-11*m.x1761
- 2.37376899318848E-13*m.x1762 - 3.36078881755966E-11*m.x1763 - 4.75821426122113E-9*m.x1764
- 5.75253712746731E-8*m.x1765 - 4.75821426122113E-9*m.x1766 - 3.36078881755964E-11*m.x1767
- 4.06309203184712E-10*m.x1768 - 5.75253712746731E-8*m.x1769 - 6.95464339901274E-7*m.x1770
- 5.75253712746731E-8*m.x1771 - 4.06309203184709E-10*m.x1772 - 3.36078881755966E-11*m.x1773
- 4.75821426122113E-9*m.x1774 - 5.75253712746731E-8*m.x1775 - 4.75821426122113E-9*m.x1776
- 3.36078881755964E-11*m.x1777 - 2.37376899318849E-13*m.x1778 - 3.36078881755964E-11*m.x1779
- 4.06309203184709E-10*m.x1780 - 3.36078881755964E-11*m.x1781 - 2.37376899318848E-13*m.x1782
- 3.36078881755963E-11*m.x1783 - 4.75821426122108E-9*m.x1784 - 5.75253712746725E-8*m.x1785
- 4.75821426122108E-9*m.x1786 - 3.36078881755961E-11*m.x1787 - 4.75821426122112E-9*m.x1788
- 6.73669313507413E-7*m.x1789 - 8.14445824596445E-6*m.x1790 - 6.73669313507413E-7*m.x1791
- 4.75821426122108E-9*m.x1792 - 5.75253712746729E-8*m.x1793 - 8.14445824596445E-6*m.x1794
- 9.84640368653639E-5*m.x1795 - 8.14445824596445E-6*m.x1796 - 5.75253712746726E-8*m.x1797
- 4.75821426122112E-9*m.x1798 - 6.73669313507413E-7*m.x1799 - 8.14445824596445E-6*m.x1800
- 6.73669313507413E-7*m.x1801 - 4.75821426122108E-9*m.x1802 - 3.36078881755963E-11*m.x1803
- 4.75821426122108E-9*m.x1804 - 5.75253712746725E-8*m.x1805 - 4.75821426122108E-9*m.x1806
- 3.36078881755961E-11*m.x1807 - 4.06309203184708E-10*m.x1808 - 5.75253712746725E-8*m.x1809
- 6.95464339901267E-7*m.x1810 - 5.75253712746725E-8*m.x1811 - 4.06309203184706E-10*m.x1812
- 5.75253712746729E-8*m.x1813 - 8.14445824596445E-6*m.x1814 - 9.84640368653639E-5*m.x1815
- 8.14445824596445E-6*m.x1816 - 5.75253712746726E-8*m.x1817 - 6.95464339901272E-7*m.x1818
- 9.8464036865364E-5*m.x1819 - 0.00119040042480759*m.x1820 - 9.8464036865364E-5*m.x1821
- 6.95464339901269E-7*m.x1822 - 5.75253712746729E-8*m.x1823 - 8.14445824596445E-6*m.x1824
- 9.84640368653639E-5*m.x1825 - 8.14445824596445E-6*m.x1826 - 5.75253712746726E-8*m.x1827
- 4.06309203184708E-10*m.x1828 - 5.75253712746725E-8*m.x1829 - 6.95464339901267E-7*m.x1830
- 5.75253712746725E-8*m.x1831 - 4.06309203184706E-10*m.x1832 - 3.36078881755963E-11*m.x1833
- 4.75821426122108E-9*m.x1834 - 5.75253712746725E-8*m.x1835 - 4.75821426122108E-9*m.x1836
- 3.36078881755961E-11*m.x1837 - 4.75821426122112E-9*m.x1838 - 6.73669313507413E-7*m.x1839
- 8.14445824596445E-6*m.x1840 - 6.73669313507413E-7*m.x1841 - 4.75821426122108E-9*m.x1842
- 5.75253712746729E-8*m.x1843 - 8.14445824596445E-6*m.x1844 - 9.84640368653639E-5*m.x1845
- 8.14445824596445E-6*m.x1846 - 5.75253712746726E-8*m.x1847 - 4.75821426122112E-9*m.x1848
- 6.73669313507413E-7*m.x1849 - 8.14445824596445E-6*m.x1850 - 6.73669313507413E-7*m.x1851
- 4.75821426122108E-9*m.x1852 - 3.36078881755963E-11*m.x1853 - 4.75821426122108E-9*m.x1854
- 5.75253712746725E-8*m.x1855 - 4.75821426122108E-9*m.x1856 - 3.36078881755961E-11*m.x1857
- 2.37376899318849E-13*m.x1858 - 3.36078881755964E-11*m.x1859 - 4.06309203184709E-10*m.x1860
- 3.36078881755964E-11*m.x1861 - 2.37376899318848E-13*m.x1862 - 3.36078881755966E-11*m.x1863
- 4.75821426122113E-9*m.x1864 - 5.75253712746731E-8*m.x1865 - 4.75821426122113E-9*m.x1866
- 3.36078881755964E-11*m.x1867 - 4.06309203184712E-10*m.x1868 - 5.75253712746731E-8*m.x1869
- 6.95464339901274E-7*m.x1870 - 5.75253712746731E-8*m.x1871 - 4.06309203184709E-10*m.x1872
- 3.36078881755966E-11*m.x1873 - 4.75821426122113E-9*m.x1874 - 5.75253712746731E-8*m.x1875
- 4.75821426122113E-9*m.x1876 - 3.36078881755964E-11*m.x1877 - 2.37376899318849E-13*m.x1878
- 3.36078881755964E-11*m.x1879 - 4.06309203184709E-10*m.x1880 - 3.36078881755964E-11*m.x1881
- 2.37376899318848E-13*m.x1882 - 1.58251266212566E-13*m.x1883 - 2.2405258783731E-11*m.x1884
- 2.7087280212314E-10*m.x1885 - 2.2405258783731E-11*m.x1886 - 1.58251266212565E-13*m.x1887
- 2.24052587837312E-11*m.x1888 - 3.1721428408141E-9*m.x1889 - 3.83502475164489E-8*m.x1890
- 3.1721428408141E-9*m.x1891 - 2.24052587837311E-11*m.x1892 - 2.70872802123142E-10*m.x1893
- 3.83502475164489E-8*m.x1894 - 4.63642893267518E-7*m.x1895 - 3.83502475164489E-8*m.x1896
- 2.70872802123141E-10*m.x1897 - 2.24052587837312E-11*m.x1898 - 3.1721428408141E-9*m.x1899
- 3.83502475164489E-8*m.x1900 - 3.1721428408141E-9*m.x1901 - 2.24052587837311E-11*m.x1902
| |
<gh_stars>0
import hashlib
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from time import time
from types import GeneratorType
from typing import TypeVar, List, Union, Dict, Generator, Optional
from . import codes
from . import logger
from .library import Interval, Scheduled, Smart, SubProvider, Keywords
from .tools import ScriptStorage
# Constants
CURRENCIES = {
'AUD': 12,
'CAD': 11,
'HKD': 10,
'PLN': 9,
'BYN': 8,
'UAH': 7,
'NOK': 6,
'CNY': 5,
'RUB': 4,
'EUR': 3,
'USD': 2,
'GBP': 1,
'': 0
}
SIZE_TYPES = {
'P-W-W': 14,
'P-M-W': 13,
'P-U-W': 12,
'C-W-W': 11,
'C-M-W': 10,
'C-U-W': 9,
'S-JP-W': 8,
'S-JP-M': 7,
'S-EU-W': 6,
'S-EU-M': 5,
'S-UK-W': 4,
'S-UK-M': 3,
'S-US-W': 2,
'S-US-M': 1,
'': 0
}
# Error classes
class ParserError(Exception):
pass
class EventsExecutorError(Exception):
pass
# Indexing
@dataclass
class Catalog(ABC):
script: str
def __post_init__(self):
if not isinstance(self.script, str):
raise TypeError('script must be str')
def __eq__(self, other):
if issubclass(type(other), Target):
if other.script == self.script:
return True
else:
return False
else:
return False
def hash(self) -> bytes:
return hashlib.blake2s(self.script.encode()).digest()
def __hash__(self) -> int:
return hash(self.hash())
CatalogType = TypeVar('CatalogType', bound=Catalog)
@dataclass
class CInterval(Interval, Catalog):
def __eq__(self, other):
return Catalog.__eq__(self, other)
@dataclass
class CScheduled(Scheduled, Catalog):
def __eq__(self, other):
return Catalog.__eq__(self, other)
@dataclass
class CSmart(Smart, Catalog):
def __eq__(self, other):
return Catalog.__eq__(self, other)
# Target classes
@dataclass
class Target(ABC):
name: str
script: str
data: Union[str, bytes, int, float, list, tuple, dict] = field(repr=False)
reused: int = field(init=False, compare=False, default=-1)
def __post_init__(self):
if not isinstance(self.name, str):
raise TypeError('name must be str')
if not isinstance(self.script, str):
raise TypeError('scripts must be str')
if not isinstance(self.data, (str, bytes, int, float, list, tuple, dict)):
raise TypeError('data must be (str bytes, int, float, list, tuple, dict)')
def __eq__(self, other):
if issubclass(type(other), Target):
if other.name == self.name and other.script == self.script and other.data == self.data:
return True
else:
return False
else:
return False
def reuse(self, max_: int) -> int:
if max_ > 0:
if self.reused >= max_:
self.reused = 0
else:
self.reused += 1
return self.reused
def hash(self) -> bytes:
return hashlib.blake2s(
self.name.encode() +
(self.data.encode() if isinstance(self.data, (str, bytes)) else str(self.data).encode()) +
self.script.encode()
).digest()
def __hash__(self) -> int:
return hash(self.hash())
TargetType = TypeVar('TargetType', bound=Target)
@dataclass
class TInterval(Interval, Target):
def __eq__(self, other):
return Target.__eq__(self, other)
@dataclass
class TScheduled(Scheduled, Target):
def __eq__(self, other):
return Target.__eq__(self, other)
@dataclass
class TSmart(Smart, Target):
def __eq__(self, other):
return Target.__eq__(self, other)
# Restock Target classes
@dataclass
class RestockTarget(ABC):
script: str
data: Union[str, bytes, int, float, list, tuple, dict] = field(repr=False)
item: int = field(init=False, default=-1)
reused: int = field(init=False, default=-1)
def __post_init__(self):
if not isinstance(self.script, str):
raise TypeError('scripts must be str')
if not isinstance(self.data, (str, bytes, int, float, list, tuple, dict)):
raise TypeError('data must be (str bytes, int, float, list, tuple, dict)')
def __eq__(self, other):
if issubclass(type(other), RestockTarget):
if other.script == self.script and other.data == self.data and other.item == self.item:
return True
else:
return False
else:
return False
def reuse(self, max_: int) -> int:
if max_ > 0:
if self.reused >= max_:
self.reused = 0
else:
self.reused += 1
return self.reused
def hash(self) -> bytes:
return hashlib.blake2s(
self.script.encode() +
(self.data.encode() if isinstance(self.data, (str, bytes)) else str(self.data).encode()) +
str(self.item).encode()
).digest()
def __hash__(self):
return hash(self.hash())
RestockTargetType = TypeVar('RestockTargetType', bound=RestockTarget)
@dataclass
class RTInterval(Interval, RestockTarget):
def __eq__(self, other):
return RestockTarget.__eq__(self, other)
@dataclass
class RTScheduled(Scheduled, RestockTarget):
def __eq__(self, other):
return RestockTarget.__eq__(self, other)
@dataclass
class RTSmart(Smart, RestockTarget):
def __eq__(self, other):
return RestockTarget.__eq__(self, other)
# Target EOF classes
@dataclass
class TargetEnd(ABC):
target: TargetType
description: str = ''
def __post_init__(self):
if not issubclass(type(self.target), Target):
raise TypeError('target must be Target\'s subclass')
if not isinstance(self.description, str):
raise TypeError('description must be str')
TargetEndType = TypeVar('TargetEndType', bound=TargetEnd)
class TEFail(TargetEnd):
pass
class TESoldOut(TargetEnd):
pass
class TESuccess(TargetEnd):
pass
# Item classes
@dataclass
class Price:
currency: int
current: float
old: float = 0.
def __post_init__(self):
if isinstance(self.currency, int):
if self.currency not in CURRENCIES.values():
raise IndexError(f'Currency ({self.currency}) does not exist')
else:
raise TypeError('currency must be int')
if not isinstance(self.current, float):
if isinstance(self.current, int):
self.current = float(self.current)
else:
raise TypeError('current must be float')
if not isinstance(self.old, float):
if isinstance(self.old, int):
self.old = float(self.old)
else:
raise TypeError('old must be float')
def hash(self) -> bytes:
return hashlib.blake2s(bytes(self.currency) + str(self.current).encode() + str(self.old).encode()).digest()
@dataclass
class Size:
size: str
url: str = ''
def __post_init__(self):
if not isinstance(self.size, str):
raise TypeError('size must be str')
if not isinstance(self.url, str):
raise TypeError('size must be str')
def hash(self) -> bytes:
return hashlib.blake2s(self.size.encode() + self.url.encode()).digest()
def export(self) -> list:
return [self.size, self.url]
class Sizes(list):
type: int
def __init__(self, type_: int, values: Union[List[Size], Generator] = None):
if isinstance(type_, int):
if type_ not in SIZE_TYPES.values():
raise IndexError(f'Size type ({type_}) does not exist')
self.type = type_
else:
raise TypeError('type_ must be int')
if values:
if isinstance(values, (list, GeneratorType)):
super().__init__()
for i in values:
if isinstance(i, Size):
super().append(i)
else:
raise ValueError('All items of Sizes must be Size')
else:
raise TypeError('values must be iterable type (like list or generator)')
def __setitem__(self, index: int, value: Size) -> None:
if isinstance(value, Size):
super().__setitem__(index, value)
else:
raise TypeError('value must be Size')
def append(self, value: Size) -> None:
if isinstance(value, Size):
super().append(value)
else:
raise TypeError('Only Size can be appended')
def extend(self, value) -> None:
if isinstance(value, Sizes):
super().extend(value)
else:
raise TypeError('Sizes can be extended only by Sizes')
def hash(self) -> bytes:
hash_ = hashlib.blake2s(str(self.type).encode())
for i in self:
hash_.update(i.hash())
return hash_.digest()
def export(self) -> List[list]:
return [i.export() for i in self]
@dataclass
class FooterItem:
text: str
url: str
def __post_init__(self):
if not isinstance(self.text, str):
raise TypeError('text must be str')
if not isinstance(self.url, str):
raise TypeError('url must be str')
def hash(self) -> bytes:
return hashlib.blake2s(self.text.encode() + self.url.encode()).digest()
class Item(ABC):
url: str
channel: str
name: str
image: str = ''
description: str = ''
price: Price = None
sizes: Sizes = None
footer: List[FooterItem] = None
fields: Dict[str, str] = None
publish_date: float
timestamp: float
def __init__(
self,
url: str,
channel: str,
name: str,
image: str = '',
description: str = '',
price: Price = None,
sizes: Sizes = None,
footer: List[FooterItem] = None,
fields: Dict[str, str] = None,
publish_date: float = -1.
):
if isinstance(url, str):
self.url = url
else:
raise TypeError('url must be str')
if isinstance(channel, str):
self.channel = channel
else:
raise TypeError('channel must be str')
if isinstance(name, str):
self.name = name
else:
raise TypeError('name must be str')
if isinstance(image, str):
self.image = image
else:
raise TypeError('image must be str')
if isinstance(description, str):
self.description = description
else:
raise TypeError('description must be str')
if price:
if isinstance(price, Price):
self.price = price
else:
raise TypeError('price must be Price')
else:
self.price = Price(CURRENCIES[''], 0.)
if sizes:
if isinstance(sizes, Sizes):
self.sizes = sizes
else:
raise TypeError('sizes must be Sizes')
else:
self.sizes = Sizes(SIZE_TYPES[''])
if footer:
if isinstance(footer, list):
for i in footer:
if not isinstance(i, FooterItem):
raise ValueError('All items in footer must be FooterItem')
self.footer = footer
else:
raise TypeError('footer must be list')
else:
self.footer = []
if fields:
if isinstance(fields, dict):
for k, v in fields.items():
if not isinstance(k, str):
raise ValueError('All keys in fields must be str')
if not isinstance(v, str):
raise ValueError('All values in fields must be str')
self.fields = fields
else:
raise TypeError('fields must be dict')
else:
self.fields = {}
if isinstance(publish_date, float):
self.publish_date = publish_date
else:
raise TypeError('publish_date must be float')
self.timestamp = time()
def __repr__(self):
return f'Item({self.url=}, {self.channel=}, {self.name=})'
def hash(self, level: int = 2) -> bytes:
if isinstance(level, int):
if not 0 <= level <= 6:
raise ValueError('level must be 0 <= level <= 5')
else:
raise TypeError('level must be int')
hash_ = hashlib.blake2s(self.url.encode() + self.channel.encode())
if level > 0:
hash_.update(self.name.encode())
if level > 1:
hash_.update(self.image.encode())
if level > 2:
hash_.update(self.description.encode())
if level > 3:
hash_.update(self.price.hash())
if level > 4:
for i in self.footer:
hash_.update(i.hash())
return hash_.digest()
ItemType = TypeVar('ItemType', bound=Item)
class IAnnounce(Item):
pass
class IRelease(Item):
restock: Optional[RestockTargetType]
def __init__(
self,
url: str,
channel: str,
name: str,
image: str = '',
description: str = '',
price: Price = None,
sizes: Sizes = None,
footer: List[FooterItem] = None,
fields: Dict[str, str] = None,
publish_date: float = -1.,
restock: RestockTargetType = None
):
if restock:
if issubclass(type(restock), RestockTarget):
self.restock = restock
else:
raise TypeError('restock must be subclass of RestockTarget')
else:
self.restock = None
super().__init__(url, channel, name, image, description, price, sizes, footer, fields, publish_date)
class IRestock(Item):
id: int
def __init__(
self,
id_: int,
url: str,
channel: str,
name: str,
image: str = '',
| |
<filename>fauxfactory/__init__.py
# -*- coding: utf-8 -*-
"""Generate random data for your tests."""
__all__ = (
'gen_alpha',
'gen_alphanumeric',
'gen_boolean',
'gen_choice',
'gen_cjk',
'gen_cyrillic',
'gen_date',
'gen_datetime',
'gen_email',
'gen_html',
'gen_integer',
'gen_ipaddr',
'gen_iplum',
'gen_latin1',
'gen_mac',
'gen_netmask',
'gen_negative_integer',
'gen_numeric_string',
'gen_positive_integer',
'gen_string',
'gen_time',
'gen_url',
'gen_utf8',
'gen_uuid',
)
import datetime
import random
import re
import string
import sys
import unicodedata
import uuid
import warnings
from collections import Iterable
from fauxfactory.constants import (
HTML_TAGS, LOREM_IPSUM_TEXT,
MAX_YEARS, MIN_YEARS,
SCHEMES, SUBDOMAINS, TLDS, VALID_NETMASKS
)
from functools import wraps
# Private Functions -----------------------------------------------------------
def _make_unicode(data):
"""Convert ``data`` to a unicode string if running Python 2.
:param str data: A string to be type cast.
:return: ``data``, but as unicode. ``data`` is never modified: if a type
cast is necessary, a copy of ``data`` is returned.
"""
if sys.version_info[0] == 2:
return unicode(data) # flake8:noqa pylint:disable=undefined-variable
return data
def _is_positive_int(length):
"""Check that ``length`` argument is an integer greater than zero.
:param int length: The desired length of the string
:raises: ``ValueError`` if ``length`` is not an ``int`` or is less than 1.
:returns: Nothing.
:rtype: None
"""
if not isinstance(length, int) or length <= 0:
raise ValueError("{0} is an invalid 'length'.".format(length))
def _unicode_letters_generator():
"""Generates unicode characters in the letters category
:return: a generator which will generates all unicode letters available
"""
if sys.version_info[0] == 2:
chr_function = unichr # pylint:disable=undefined-variable
range_function = xrange # pylint:disable=undefined-variable
else:
chr_function = chr
range_function = range
# Use sys.maxunicode instead of 0x10FFFF to avoid the exception below, in a
# narrow Python build (before Python 3.3)
# ValueError: unichr() arg not in range(0x10000) (narrow Python build)
# For more information, read PEP 261.
for i in range_function(sys.maxunicode):
char = chr_function(i)
if unicodedata.category(char).startswith('L'):
yield char
UNICODE_LETTERS = [c for c in _unicode_letters_generator()]
# Public Functions ------------------------------------------------------------
def gen_string(str_type, length=None):
"""A simple wrapper that calls other string generation methods.
:param str str_type: The type of string which should be generated.
:param int length: The length of the generated string. Must be 1 or
greater.
:raises: ``ValueError`` if an invalid ``str_type`` is specified.
:returns: A string.
:rtype: str
Valid values for ``str_type`` are as follows:
* alpha
* alphanumeric
* cjk
* cyrillic
* html
* latin1
* numeric
* utf8
"""
str_types_functions = {
u'alpha': gen_alpha,
u'alphanumeric': gen_alphanumeric,
u'cjk': gen_cjk,
u'cyrillic': gen_cyrillic,
u'html': gen_html,
u'latin1': gen_latin1,
u'numeric': gen_numeric_string,
u'utf8': gen_utf8,
}
str_type_lower = str_type.lower() # do not modify user data
if str_type_lower not in str_types_functions.keys():
raise ValueError(
'{0} is not a supported string type. Valid string types are {1}.'
''.format(str_type_lower, u','.join(str_types_functions.keys()))
)
method = str_types_functions[str_type_lower]
if length is None:
return method()
return method(length)
def gen_alpha(length=10):
"""Returns a random string made up of alpha characters.
:param int length: Length for random data.
:returns: A random string made up of alpha characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
output_string = u''.join(
random.choice(string.ascii_letters) for i in range(length)
)
return _make_unicode(output_string)
def gen_alphanumeric(length=10):
"""Returns a random string made up of alpha and numeric characters.
:param int length: Length for random data.
:returns: A random string made up of alpha and numeric characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
output_string = u''.join(
random.choice(
string.ascii_letters + string.digits
) for i in range(length))
return _make_unicode(output_string)
def gen_boolean():
"""Returns a random Boolean value.
:returns: A random Boolean value.
:rtype: bool
"""
choices = (True, False)
return gen_choice(choices)
def gen_choice(choices):
"""Returns a random choice from the available choices.
:param list choices: List of choices from which select a random value.
:raises: ``ValueError`` if ``choices`` is ``None`` or not ``Iterable`` or
a ``dict``.
:returns: A random element from ``choices``.
"""
# Validation for 'choices'
if choices is None:
raise ValueError("Choices argument cannot be None.")
# We don't want a single dictionary value.
if not isinstance(choices, Iterable) or isinstance(choices, dict):
raise ValueError("Choices argument is not iterable.")
if len(choices) == 0:
raise ValueError("Choices argument cannot be empty.")
# If only 1 item is present, return it right away
if len(choices) == 1:
return choices[0]
return random.choice(choices)
def gen_cjk(length=10):
"""Returns a random string made up of CJK characters.
(Source: Wikipedia - CJK Unified Ideographs)
:param int length: Length for random data.
:returns: A random string made up of CJK characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
# Generate codepoints, then convert the codepoints to a string. The
# valid range of CJK codepoints is 0x4E00 - 0x9FCC, inclusive. Python 2
# and 3 support the `unichr` and `chr` functions, respectively.
codepoints = [random.randint(0x4E00, 0x9FCC) for _ in range(length)]
if sys.version_info[0] == 2:
# pylint:disable=undefined-variable
output = u''.join(unichr(codepoint) for codepoint in codepoints)
else:
output = u''.join(chr(codepoint) for codepoint in codepoints)
return _make_unicode(output)
def gen_cyrillic(length=10):
"""Returns a random string made up of Cyrillic characters.
:param int length: Length for random data.
:returns: A random string made up of Cyrillic characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
# Generate codepoints, then convert the codepoints to a string. The
# valid range of Cyrillic codepoints is 0x410 - 0x4ff, inclusive. Python 2
# and 3 support the `unichr` and `chr` functions, respectively.
codepoints = [random.randint(0x0400, 0x04FF) for _ in range(length)]
try:
# (undefined-variable) pylint:disable=E0602
output = u''.join(unichr(codepoint) for codepoint in codepoints)
except NameError:
output = u''.join(chr(codepoint) for codepoint in codepoints)
return _make_unicode(output)
def gen_date(min_date=None, max_date=None):
"""Returns a random date value
:param min_date: A valid ``datetime.date`` object.
:param max_date: A valid ``datetime.date`` object.
:raises: ``ValueError`` if arguments are not valid ``datetime.date``
objects.
:returns: Random ``datetime.date`` object.
"""
_min_value = (datetime.date.today() -
datetime.timedelta(365 * MIN_YEARS))
_max_value = (datetime.date.today() +
datetime.timedelta(365 * MAX_YEARS))
if min_date is None:
min_date = _min_value
if max_date is None:
max_date = _max_value
# Validation
if not isinstance(min_date, datetime.date):
raise ValueError("%s is not a valid datetime.date object")
if not isinstance(max_date, datetime.date):
raise ValueError("%s is not a valid datetime.date object")
# Check that max_date is not before min_date
assert min_date < max_date
# Pick a day between min and max dates
diff = max_date - min_date
days = random.randint(0, diff.days)
date = min_date + datetime.timedelta(days=days)
return date
def gen_datetime(min_date=None, max_date=None):
"""Returns a random datetime value
:param min_date: A valid ``datetime.datetime`` object.
:param max_date: A valid ``datetime.datetime`` object.
:raises: ``ValueError`` if arguments are not valid ``datetime.datetime``
objects.
:returns: Random ``datetime.datetime`` object.
"""
_min_value = (datetime.datetime.now() -
datetime.timedelta(365 * MIN_YEARS))
_max_value = (datetime.datetime.now() +
datetime.timedelta(365 * MAX_YEARS))
if min_date is None:
min_date = _min_value
if max_date is None:
max_date = _max_value
# Validation
if not isinstance(min_date, datetime.datetime):
raise ValueError("%s is not a valid datetime.datetime object")
if not isinstance(max_date, datetime.datetime):
raise ValueError("%s is not a valid datetime.datetime object")
# Check that max_date is not before min_date
assert min_date < max_date
# Pick a time between min and max dates
diff = max_date - min_date
seconds = random.randint(0, diff.days * 3600 * 24 + diff.seconds)
return min_date + datetime.timedelta(seconds=seconds)
def gen_email(name=None, domain=None, tlds=None):
"""Generates a random email address.
:param str name: Email name.
:param str domain: Domain name.
:param str tlds: Top Level Domain Server
:returns: An email address.
:rtype: str
"""
# Generate a new name if needed
if name is None:
name = gen_alpha(8)
# Obtain a random domain if needed
if domain is None:
domain = gen_choice(SUBDOMAINS)
# Obtain a random top level domain if needed
if tlds is None:
tlds = gen_choice(TLDS)
email = u"{0}@{1}.{2}".format(name, domain, tlds)
return _make_unicode(email)
def gen_integer(min_value=None, max_value=None):
"""Returns a random integer value based on the current platform.
:param int min_value: The minimum allowed value.
:param int max_value: The maximum allowed value.
:raises: ``ValueError`` if arguments are not integers or if they are
less or greater than the system's allowed range for integers.
:returns: Returns a random integer value.
:rtype: int
"""
# Platform-specific value range for integers
_min_value = - sys.maxsize - 1
_max_value = sys.maxsize
if min_value is None:
min_value = _min_value
if max_value is None:
max_value = _max_value
if sys.version_info[0] < 3:
integer_types = (int, long,) # pylint:disable=undefined-variable
else:
integer_types = (int,)
# Perform some validations
if not isinstance(min_value, integer_types) or min_value < _min_value:
raise ValueError("\'%s\' is not a valid minimum." % min_value)
if | |
<reponame>googleapis/googleapis-gen<filename>google/appengine/v1/google-cloud-appengine-v1-py/google/cloud/appengine_admin_v1/types/version.py
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.appengine_admin_v1.types import app_yaml
from google.cloud.appengine_admin_v1.types import deploy
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.appengine.v1',
manifest={
'InboundServiceType',
'ServingStatus',
'Version',
'EndpointsApiService',
'AutomaticScaling',
'BasicScaling',
'ManualScaling',
'CpuUtilization',
'RequestUtilization',
'DiskUtilization',
'NetworkUtilization',
'StandardSchedulerSettings',
'Network',
'Volume',
'Resources',
'VpcAccessConnector',
'Entrypoint',
},
)
class InboundServiceType(proto.Enum):
r"""Available inbound services."""
INBOUND_SERVICE_UNSPECIFIED = 0
INBOUND_SERVICE_MAIL = 1
INBOUND_SERVICE_MAIL_BOUNCE = 2
INBOUND_SERVICE_XMPP_ERROR = 3
INBOUND_SERVICE_XMPP_MESSAGE = 4
INBOUND_SERVICE_XMPP_SUBSCRIBE = 5
INBOUND_SERVICE_XMPP_PRESENCE = 6
INBOUND_SERVICE_CHANNEL_PRESENCE = 7
INBOUND_SERVICE_WARMUP = 9
class ServingStatus(proto.Enum):
r"""Run states of a version."""
SERVING_STATUS_UNSPECIFIED = 0
SERVING = 1
STOPPED = 2
class Version(proto.Message):
r"""A Version resource is a specific set of source code and
configuration files that are deployed into a service.
Attributes:
name (str):
Full path to the Version resource in the API. Example:
``apps/myapp/services/default/versions/v1``.
@OutputOnly
id (str):
Relative name of the version within the service. Example:
``v1``. Version names can contain only lowercase letters,
numbers, or hyphens. Reserved names: "default", "latest",
and any name with the prefix "ah-".
automatic_scaling (google.cloud.appengine_admin_v1.types.AutomaticScaling):
Automatic scaling is based on request rate,
response latencies, and other application
metrics. Instances are dynamically created and
destroyed as needed in order to handle traffic.
basic_scaling (google.cloud.appengine_admin_v1.types.BasicScaling):
A service with basic scaling will create an
instance when the application receives a
request. The instance will be turned down when
the app becomes idle. Basic scaling is ideal for
work that is intermittent or driven by user
activity.
manual_scaling (google.cloud.appengine_admin_v1.types.ManualScaling):
A service with manual scaling runs
continuously, allowing you to perform complex
initialization and rely on the state of its
memory over time. Manually scaled versions are
sometimes referred to as "backends".
inbound_services (Sequence[google.cloud.appengine_admin_v1.types.InboundServiceType]):
Before an application can receive email or
XMPP messages, the application must be
configured to enable the service.
instance_class (str):
Instance class that is used to run this version. Valid
values are:
- AutomaticScaling: ``F1``, ``F2``, ``F4``, ``F4_1G``
- ManualScaling or BasicScaling: ``B1``, ``B2``, ``B4``,
``B8``, ``B4_1G``
Defaults to ``F1`` for AutomaticScaling and ``B1`` for
ManualScaling or BasicScaling.
network (google.cloud.appengine_admin_v1.types.Network):
Extra network settings.
Only applicable in the App Engine flexible
environment.
zones (Sequence[str]):
The Google Compute Engine zones that are
supported by this version in the App Engine
flexible environment. Deprecated.
resources (google.cloud.appengine_admin_v1.types.Resources):
Machine resources for this version.
Only applicable in the App Engine flexible
environment.
runtime (str):
Desired runtime. Example: ``python27``.
runtime_channel (str):
The channel of the runtime to use. Only available for some
runtimes. Defaults to the ``default`` channel.
threadsafe (bool):
Whether multiple requests can be dispatched
to this version at once.
vm (bool):
Whether to deploy this version in a container
on a virtual machine.
beta_settings (Sequence[google.cloud.appengine_admin_v1.types.Version.BetaSettingsEntry]):
Metadata settings that are supplied to this
version to enable beta runtime features.
env (str):
App Engine execution environment for this version.
Defaults to ``standard``.
serving_status (google.cloud.appengine_admin_v1.types.ServingStatus):
Current serving status of this version. Only the versions
with a ``SERVING`` status create instances and can be
billed.
``SERVING_STATUS_UNSPECIFIED`` is an invalid value. Defaults
to ``SERVING``.
created_by (str):
Email address of the user who created this
version.
@OutputOnly
create_time (google.protobuf.timestamp_pb2.Timestamp):
Time that this version was created.
@OutputOnly
disk_usage_bytes (int):
Total size in bytes of all the files that are
included in this version and currently hosted on
the App Engine disk.
@OutputOnly
runtime_api_version (str):
The version of the API in the given runtime
environment. Please see the app.yaml reference
for valid values at
https://cloud.google.com/appengine/docs/standard/<language>/config/appref
runtime_main_executable_path (str):
The path or name of the app's main
executable.
service_account (str):
The identity that the deployed version will
run as. Admin API will use the App Engine
Appspot service account as default if this field
is neither provided in app.yaml file nor through
CLI flag.
handlers (Sequence[google.cloud.appengine_admin_v1.types.UrlMap]):
An ordered list of URL-matching patterns that should be
applied to incoming requests. The first matching URL handles
the request and other request handlers are not attempted.
Only returned in ``GET`` requests if ``view=FULL`` is set.
error_handlers (Sequence[google.cloud.appengine_admin_v1.types.ErrorHandler]):
Custom static error pages. Limited to 10KB per page.
Only returned in ``GET`` requests if ``view=FULL`` is set.
libraries (Sequence[google.cloud.appengine_admin_v1.types.Library]):
Configuration for third-party Python runtime libraries that
are required by the application.
Only returned in ``GET`` requests if ``view=FULL`` is set.
api_config (google.cloud.appengine_admin_v1.types.ApiConfigHandler):
Serving configuration for `Google Cloud
Endpoints <https://cloud.google.com/appengine/docs/python/endpoints/>`__.
Only returned in ``GET`` requests if ``view=FULL`` is set.
env_variables (Sequence[google.cloud.appengine_admin_v1.types.Version.EnvVariablesEntry]):
Environment variables available to the application.
Only returned in ``GET`` requests if ``view=FULL`` is set.
build_env_variables (Sequence[google.cloud.appengine_admin_v1.types.Version.BuildEnvVariablesEntry]):
Environment variables available to the build environment.
Only returned in ``GET`` requests if ``view=FULL`` is set.
default_expiration (google.protobuf.duration_pb2.Duration):
Duration that static files should be cached by web proxies
and browsers. Only applicable if the corresponding
`StaticFilesHandler <https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#StaticFilesHandler>`__
does not specify its own expiration time.
Only returned in ``GET`` requests if ``view=FULL`` is set.
health_check (google.cloud.appengine_admin_v1.types.HealthCheck):
Configures health checking for instances. Unhealthy
instances are stopped and replaced with new instances. Only
applicable in the App Engine flexible environment.
Only returned in ``GET`` requests if ``view=FULL`` is set.
readiness_check (google.cloud.appengine_admin_v1.types.ReadinessCheck):
Configures readiness health checking for instances.
Unhealthy instances are not put into the backend traffic
rotation.
Only returned in ``GET`` requests if ``view=FULL`` is set.
liveness_check (google.cloud.appengine_admin_v1.types.LivenessCheck):
Configures liveness health checking for instances. Unhealthy
instances are stopped and replaced with new instances
Only returned in ``GET`` requests if ``view=FULL`` is set.
nobuild_files_regex (str):
Files that match this pattern will not be built into this
version. Only applicable for Go runtimes.
Only returned in ``GET`` requests if ``view=FULL`` is set.
deployment (google.cloud.appengine_admin_v1.types.Deployment):
Code and application artifacts that make up this version.
Only returned in ``GET`` requests if ``view=FULL`` is set.
version_url (str):
Serving URL for this version. Example:
"https://myversion-dot-myservice-dot-
myapp.appspot.com"
@OutputOnly
endpoints_api_service (google.cloud.appengine_admin_v1.types.EndpointsApiService):
Cloud Endpoints configuration.
If endpoints_api_service is set, the Cloud Endpoints
Extensible Service Proxy will be provided to serve the API
implemented by the app.
entrypoint (google.cloud.appengine_admin_v1.types.Entrypoint):
The entrypoint for the application.
vpc_access_connector (google.cloud.appengine_admin_v1.types.VpcAccessConnector):
Enables VPC connectivity for standard apps.
"""
name = proto.Field(
proto.STRING,
number=1,
)
id = proto.Field(
proto.STRING,
number=2,
)
automatic_scaling = proto.Field(
proto.MESSAGE,
number=3,
oneof='scaling',
message='AutomaticScaling',
)
basic_scaling = proto.Field(
proto.MESSAGE,
number=4,
oneof='scaling',
message='BasicScaling',
)
manual_scaling = proto.Field(
proto.MESSAGE,
number=5,
oneof='scaling',
message='ManualScaling',
)
inbound_services = proto.RepeatedField(
proto.ENUM,
number=6,
enum='InboundServiceType',
)
instance_class = proto.Field(
proto.STRING,
number=7,
)
network = proto.Field(
proto.MESSAGE,
number=8,
message='Network',
)
zones = proto.RepeatedField(
proto.STRING,
number=118,
)
resources = proto.Field(
proto.MESSAGE,
number=9,
message='Resources',
)
runtime = proto.Field(
proto.STRING,
number=10,
)
runtime_channel = proto.Field(
proto.STRING,
number=117,
)
threadsafe = proto.Field(
proto.BOOL,
number=11,
)
vm = proto.Field(
proto.BOOL,
number=12,
)
beta_settings = proto.MapField(
proto.STRING,
proto.STRING,
number=13,
)
env = proto.Field(
proto.STRING,
number=14,
)
serving_status = proto.Field(
proto.ENUM,
number=15,
enum='ServingStatus',
)
created_by = proto.Field(
proto.STRING,
number=16,
)
create_time = proto.Field(
proto.MESSAGE,
number=17,
message=timestamp_pb2.Timestamp,
)
disk_usage_bytes = proto.Field(
proto.INT64,
number=18,
)
runtime_api_version = proto.Field(
proto.STRING,
number=21,
)
runtime_main_executable_path = proto.Field(
proto.STRING,
number=22,
)
service_account = proto.Field(
proto.STRING,
number=127,
)
handlers = proto.RepeatedField(
proto.MESSAGE,
number=100,
message=app_yaml.UrlMap,
)
error_handlers = proto.RepeatedField(
proto.MESSAGE,
number=101,
message=app_yaml.ErrorHandler,
)
libraries = proto.RepeatedField(
proto.MESSAGE,
number=102,
message=app_yaml.Library,
)
api_config = proto.Field(
proto.MESSAGE,
number=103,
message=app_yaml.ApiConfigHandler,
)
env_variables = proto.MapField(
proto.STRING,
proto.STRING,
number=104,
)
build_env_variables = proto.MapField(
proto.STRING,
proto.STRING,
number=125,
)
default_expiration = proto.Field(
proto.MESSAGE,
number=105,
message=duration_pb2.Duration,
)
health_check = proto.Field(
proto.MESSAGE,
number=106,
message=app_yaml.HealthCheck,
)
readiness_check = proto.Field(
proto.MESSAGE,
number=112,
message=app_yaml.ReadinessCheck,
)
liveness_check = proto.Field(
proto.MESSAGE,
number=113,
message=app_yaml.LivenessCheck,
)
nobuild_files_regex = proto.Field(
proto.STRING,
number=107,
)
deployment = proto.Field(
proto.MESSAGE,
number=108,
message=deploy.Deployment,
)
version_url = proto.Field(
proto.STRING,
number=109,
)
endpoints_api_service = proto.Field(
| |
'Warren':'',
'Watson':'',
'Waverly':'',
'Waylon':'',
'Wayne':'',
'Wesley':'',
'Weston':'',
'Whitley':'',
'Whitney':'',
'Wilder':'',
'Will':'',
'Willa':'',
'William':'',
'Willow':'',
'Wilson':'',
'Winnie':'',
'Winston':'',
'Winter':'',
'Wren':'',
'Wyatt':'',
'Wynter':'',
'Woody':'',
'Xander':'',
'Xavier':'',
'Ximena':'',
'Xiomara':'',
'Xzavier':'',
'Yadiel':'',
'Yael':'',
'Yahir':'',
'Yahya':'',
'Yara':'',
'Yareli':'',
'Yaretzi':'',
'Yaritza':'',
'Yasmin':'',
'Yehuda':'',
'Yisroel':'',
'Yosef':'',
'Yousef':'',
'Yusuf':'',
'Zachariah':'',
'Zachary':'',
'Zahra':'',
'Zaiden':'',
'Zain':'',
'Zainab':'',
'Zaire':'',
'Zander':'',
'Zane':'',
'Zaniyah':'',
'Zara':'',
'Zaria':'',
'Zariah':'',
'Zariyah':'',
'Zayden':'',
'Zaylee':'',
'Zayn':'',
'Zayne':'',
'Zeke':'',
'Zelda':'',
'Zendaya':'',
'Zion':'',
'Zoe':'',
'Zoey':'',
'Zoie':'',
'Zola':'',
'Zora':'',
'Zuri':'',
'Zyaire':'',
'Aba':'',
'Abaca':'',
'Abacan':'',
'Abaç':'',
'Abay':'',
'Abayhan':'',
'Abaza':'',
'Abbas':'',
'Abdal':'',
'Abdi':'',
'Abdullah':'',
'Abdurrahman':'',
'Abdülâlim':'',
'Abdülazim':'',
'Abdülaziz':'',
'Abdülbaki':'',
'Abdülbari':'',
'Abdülbasir':'',
'Abdülbasit':'',
'Abdülcabbar':'',
'Abdülcebbar':'',
'Abdülcelil':'',
'Abdülcemal':'',
'Abdülcevat':'',
'Abdülezel':'',
'Abdülferit':'',
'Abdülfettah':'',
'Abdülgaffar':'',
'Abdülgaffur':'',
'Abdülgafur':'',
'Abdülgani':'',
'Abdülhadi':'',
'Abdülhak':'',
'Abdülhakim':'',
'Abdülhalik':'',
'Abdülhalim':'',
'Abdülhamit':'',
'Abdülkadir':'',
'Abdülkahhar':'',
'Abdülkerim':'',
'Abdüllâtif':'',
'Abdülmecit':'',
'Abdülmelik':'',
'Abdülmennan':'',
'Abdülmetin':'',
'Abdülnasır':'',
'Abdülvahap':'',
'Abdülvahit':'',
'Abdürrahim':'',
'Abdürrahman':'',
'Abdürrauf':'',
'Abdürreşit':'',
'Abdürrezzak':'',
'Abdüssamet':'',
'Abdüssami':'',
'Abdüsselâm':'',
'Abdüssemi':'',
'Abdüssettar':'',
'Abdüzzeki':'',
'Abgül':'',
'Abher':'',
'Abıhayat':'',
'Abır':'',
'Abıru':'',
'Abid':'',
'Abide':'',
'Abidin':'',
'Abil':'',
'Abir':'',
'Abit':'',
'Abiye':'',
'Ablak':'',
'Abraş':'',
'Abruy':'',
'Abuşka':'',
'Abuzer':'',
'Abuzettin':'',
'Acabay':'',
'Acabey':'',
'Ağabay':'',
'Ağcabey':'',
'Akabay':'',
'Akabey':'',
'Akçabay':'',
'Alabaş':'',
'Alabay':'',
'Alabegim':'',
'Alabegüm':'',
'Alabezek':'',
'Almabanu':'',
'Anabacı':'',
'Anabörü':'',
'Atabay':'',
'Atabek':'',
'Atabey':'',
'Atabörü':'',
'Ayaba':'',
'Babacan':'',
'Babaç':'',
'Babayiğit':'',
'Babür':'',
'Babürşah':'',
'Balaban':'',
'Cabbar':'',
'Cabir':'',
'Çaba':'',
'Çabar':'',
'Farabi':'',
'Gültab':'',
'Hicabi':'',
'İsabet':'',
'Kabadayı':'',
'Kaban':'',
'Kabil':'',
'Kamertab':'',
'Karabaş':'',
'Karabatak':'',
'Karabay':'',
'Karabet':'',
'Karabey':'',
'Karaboğa':'',
'Karabörü':'',
'Karabudun':'',
'Karabuğday':'',
'Karabuğra':'',
'Karabulut':'',
'Karabükey':'',
'Karacabey':'',
'Kayrabay':'',
'Kocabaş':'',
'Kocabey':'',
'Mehabet':'',
'Muhabbet':'',
'Nabi':'',
'Nabia':'',
'Nabiye':'',
'Necabet':'',
'Necabettin':'',
'Nursabah':'',
'Nuşabe':'',
'Olcabay':'',
'Rabbani':'',
'Rabi':'',
'Rabia':'',
'Rabih':'',
'Saba':'',
'Sabah':'',
'Sabahat':'',
'Sabahattin':'',
'Sabahnur':'',
'Sabar':'',
'Sabbar':'',
'Sabıka':'',
'Sabır':'',
'Sabih':'',
'Sabiha':'',
'Sabir':'',
'Sabire':'',
'Sabit':'',
'Sabite':'',
'Sabiye':'',
'Sabri':'',
'Sabrinnisa':'',
'Sabriye':'',
'Sabur':'',
'Sabutay':'',
'Sahabi':'',
'Sarıcabay':'',
'Şaban':'',
'Şahabettin':'',
'Tabende':'',
'Tabgaç':'',
'Türabi':'',
'Tanya':'',
'Tania':'',
'Yabalak':'',
'Yaban':'',
'Yabar':'',
'Yabgu':'',
'Yabız':'',
'Yalabuk':'',
'Yalazabay':'',
'Zabit':'',
'Zeynelabidin':'',
'Aca':'',
'Acahan':'',
'Acar':'',
'Acaralp':'',
'Acarbegüm':'',
'Acarbey':'',
'Acarbike':'',
'Acarbüke':'',
'Acarer':'',
'Acarhatun':'',
'Acarkan':'',
'Acarkatun':'',
'Acarman':'',
'Acaröz':'',
'Acarsoy':'',
'Acartürk':'',
'Acatay':'',
'Acıdaş':'',
'Aclan':'',
'Acun':'',
'Acunal':'',
'Acunalan':'',
'Acunalp':'',
'Acunbegim':'',
'Acunbegüm':'',
'Acunbike':'',
'Acunbüke':'',
'Acuner':'',
'Acungüneş':'',
'Acunışık':'',
'Acunman':'',
'Acunseven':'',
'Aça':'',
'Açalya':'',
'Açangül':'',
'Açelya':'',
'Açıkalın':'',
'Açıkel':'',
'Açıker':'',
'Açıkgün':'',
'Açıl':'',
'Açılay':'',
'Açine':'',
'Açkıngül':'',
'Adahan':'',
'Adak':'',
'Adal':'',
'Adalan':'',
'Adalettin':'',
'Adam':'',
'Adamış':'',
'Adanan':'',
'Adanır':'',
'Adar':'',
'Adarkan':'',
'Adasal':'',
'Adaş':'',
'Aday':'',
'Adeviye':'',
'Adıbelli':'',
'Adıgün':'',
'Adıgüzel':'',
'Adın':'',
'Adısanlı':'',
'Adısönmez':'',
'Adışah':'',
'Adıvar':'',
'Adıyahşi':'',
'Adıyaman':'',
'Adil':'',
'Adile':'',
'Adilhan':'',
'Adlan':'',
'Adlı':'',
'Adlığ':'',
'Adli':'',
'Adnan':'',
'Adni':'',
'Adniye':'',
'Adsız':'',
'Adsoy':'',
'Adviye':'',
'Afacan':'',
'Afak':'',
'Afer':'',
'Afet':'',
'Affan':'',
'Afi':'',
'Afif':'',
'Afife':'',
'Afitap':'',
'Afiye':'',
'Afiyet':'',
'Afra':'',
'Afşar':'',
'Afşin':'',
'Agâh':'',
'Agil':'',
'Aguş':'',
'Ağa':'',
'Ağacan':'',
'Ağahan':'',
'Ağahanım':'',
'Ağahatun':'',
'Ağakan':'',
'Ağakatun':'',
'Ağan':'',
'Ağanbegim':'',
'Ağanbegüm':'',
'Ağanbike':'',
'Ağanbüke':'',
'Ağaner':'',
'Ağaoğlu':'',
'Ağar':'',
'Ağarantan':'',
'Ağaverdi':'',
'Ağbacı':'',
'Ağbegim':'',
'Ağbegüm':'',
'Ağbet':'',
'Ağbilek':'',
'Ağca':'',
'Ağça':'',
'Ağçelik':'',
'Ağer':'',
'Ağgül':'',
'Ağın':'',
'Ağırtaş':'',
'Ağış':'',
'Ağkız':'',
'Ağnak':'',
'Ağyar':'',
'Ahen':'',
'Ahenk':'',
'Ahfeş':'',
'Ahıska':'',
'Ahi':'',
'Ahmet':'',
'Ahsen':'',
'Ahter':'',
'Ahu':'',
'Aişe':'',
'Ajda':'',
'Ajlan':'',
'Aka':'',
'Akad':'',
'Akadlı':'',
'Akağan':'',
'Akal':'',
'Akalan':'',
'Akalın':'',
'Akalp':'',
'Akaltan':'',
'Akan':'',
'Akanay':'',
'Akaner':'',
'Akansu':'',
'Akant':'',
'Akanyıldız':'',
'Akarca':'',
'Akarçay':'',
'Akarsel':'',
'Akarsu':'',
'Akartuna':'',
'Akartürk':'',
'Akasma':'',
'Akasoy':'',
'Akata':'',
'Akatay':'',
'Akay':'',
'Akaydın':'',
'Akbacı':'',
'Akbal':'',
'Akbaran':'',
'Akbaş':'',
'Akbaşak':'',
'Akbatu':'',
'Akbatur':'',
'Akbay':'',
'Akbayar':'',
'Akbek':'',
'Akbel':'',
'Akbet':'',
'Akbey':'',
'Akbil':'',
'Akbilge':'',
'Akboğa':'',
'Akbora':'',
'Akboy':'',
'Akbörü':'',
'Akbudun':'',
'Akbuğ':'',
'Akbulut':'',
'Akburak':'',
'Akburç':'',
'Akburçak':'',
'Akcan':'',
'Akcebe':'',
'Akcivan':'',
'Akça':'',
'Akçael':'',
'Akçagül':'',
'Akçakan':'',
'Akçakaya':'',
'Akçakıl':'',
'Akçakoca':'',
'Akçal':'',
'Akçalı':'',
'Akçam':'',
'Akçan':'',
'Akçasu':'',
'Akçay':'',
'Akçer':'',
'Akçığır':'',
'Akçıl':'',
'Akçınar':'',
'Akçiçek':'',
'Akçit':'',
'Akçora':'',
'Akdağ':'',
'Akdal':'',
'Akdamar':'',
'Akdemir':'',
'Akdes':'',
'Akdik':'',
'Akdiken':'',
'Akdil':'',
'Akdoğ':'',
'Akdoğan':'',
'Akdoğdu':'',
'Akdoğmuş':'',
'Akdoğu':'',
'Akdolun':'',
'Akdora':'',
'Akdoru':'',
'Akdoruk':'',
'Akdöl':'',
'Akduman':'',
'Akdur':'',
'Akdurmuş':'',
'Akel':'',
'Aker':'',
'Akergin':'',
'Akerman':'',
'Akersan':'',
'Akersoy':'',
'Akgil':'',
'Akgiray':'',
'Akgöl':'',
'Akgöze':'',
'Akgüç':'',
'Akgül':'',
'Akgün':'',
'Akgündüz':'',
'Akgüner':'',
'Akgüneş':'',
'Akgüngör':'',
'Akhan':'',
'Akhanım':'',
'Akhun':'',
'Akı':'',
'Akıalp':'',
'Akıl':'',
'Akılbek':'',
'Akıllı':'',
'Akıman':'',
'Akın':'',
'Akınal':'',
'Akınalp':'',
'Akıncı':'',
'Akıncıbay':'',
'Akıner':'',
'Akıneri':'',
'Akıntan':'',
'Akibe':'',
'Akide':'',
'Akif':'',
'Akife':'',
'Akil':'',
'Akile':'',
'Akinci':'',
'Akip':'',
'Akipek':'',
'Akkadın':'',
'Akkan':'',
'Akkar':'',
'Akkaş':'',
'Akkaya':'',
'Akkaynak':'',
'Akkemik':'',
'Akkerman':'',
'Akkılıç':'',
'Akkın':'',
'Akkız':'',
'Akkor':'',
'Akköz':'',
'Akkurt':'',
'Akkuş':'',
'Akkutlu':'',
'Akkuyaş':'',
'Aklan':'',
'Akmaç':'',
'Akman':'',
'Akmanalp':'',
'Akmaner':'',
'Akmaral':'',
'Akmeriç':'',
'Aknur':'',
'Akol':'',
'Akozan':'',
'Akönder':'',
'Akören':'',
'Aköz':'',
'Akpay':'',
'Akpınar':'',
'Akpolat':'',
'Akpulat':'',
'Aksal':'',
'Aksan':'',
'Aksarı':'',
'Aksay':'',
'Aksel':'',
'Aksen':'',
'Akser':'',
'Akses':'',
'Akseven':'',
'Aksevil':'',
'Aksın':'',
'Aksoy':'',
'Aksöğüt':'',
'Aksu':'',
'Aksun':'',
'Aksuna':'',
'Aksunar':'',
'Aksuner':'',
'Aksungur':'',
'Aksülün':'',
'Aksüyek':'',
'Akşın':'',
'Akşit':'',
'Aktaç':'',
'Aktalay':'',
'Aktan':'',
'Aktar':'',
'Aktaş':'',
'Aktay':'',
'Aktekin':'',
'Aktemür':'',
'Aktı':'',
'Aktimur':'',
'Aktolga':'',
'Aktolun':'',
'Aktuğ':'',
'Aktuna':'',
'Aktunç':'',
'Aktün':'',
'Aktürk':'',
'Akün':'',
'Akünal':'',
'Akvarol':'',
'Akyel':'',
'Akyıldız':'',
'Akyiğit':'',
'Akyipek':'',
'Akyol':'',
'Akyön':'',
'Akyurt':'',
'Akyürek':'',
'Akyüz':'',
'Ala':'',
'Alâaddin':'',
'Alaca':'',
'Alacan':'',
'Alaçam':'',
'Alaçuk':'',
'Aladoğan':'',
'Alageyik':'',
'Alagöz':'',
'Alagün':'',
'Alahan':'',
'Alakız':'',
'Alakoç':'',
'Alakurt':'',
'Alakuş':'',
'Alâmet':'',
'Alan':'',
'Alanalp':'',
'Alanay':'',
'Alanbay':'',
'Alaner':'',
'Alangoya':'',
'Alangu':'',
'Alanur':'',
'Alapınar':'',
'Alat':'',
'Alatan':'',
'Alataş':'',
'Alatay':'',
'Alay':'',
'Alaybey':'',
'Alayunt':'',
'Alaz':'',
'Albayrak':'',
'Albeni':'',
'Albora':'',
'Alburak':'',
'Alcan':'',
'Alçık':'',
'Alçın':'',
'Alçınsu':'',
'Alçiçek':'',
'Alçin':'',
'Aldemir':'',
'Aldeniz':'',
'Aldoğan':'',
'Alem':'',
'Alemdar':'',
'Alemşah':'',
'Âlemşah':'',
'Âlemtap':'',
'Alev':'',
'Alevnaz':'',
'Algan':'',
'Algın':'',
'Algış':'',
'Algu':'',
'Algun':'',
'Algur':'',
'Algül':'',
'Algün':'',
'Alhan':'',
'Alıcı ':'',
'Alım':'',
'Alımlı':'',
'Alıncak':'',
'Alışık':'',
'Alışın':'',
'Ali':'',
'Alican':'',
'Alihan':'',
'Alika':'',
'Alim':'',
'Alime':'',
'Alipek':'',
'Alisa':'',
'Alise':'',
'Aliş':'',
'Alişah':'',
'Alişan':'',
'Aliyar':'',
'Aliye':'',
'Alkan':'',
'Alkaş':'',
'Alkılıç':'',
'Alkım':'',
'Alkın':'',
'Alkış':'',
'Alkoç':'',
'Alkor':'',
'Alköz':'',
'Alkun':'',
'Allahverdi':'',
'Allı':'',
'Allıkız':'',
'Almagül':'',
'Almıla':'',
'Almila':'',
'Almile':'',
'Almula':'',
'Alnar':'',
'Alnıaçık':'',
'Alnıak':'',
'Alp':'',
'Alpagu':'',
'Alpağan':'',
'Alpak':'',
'Alpar':'',
'Alparslan':'',
'Alpartun':'',
'Alpaslan':'',
'Alpat':'',
'Alpata':'',
'Alpay':'',
'Alpaydın':'',
'Alpayer':'',
'Alpbilge':'',
'Alpçetin':'',
'Alpdemir':'',
'Alpdoğan':'',
'Alper':'',
'Alperen':'',
'Alpergin':'',
'Alpermiş':'',
'Alpertunga':'',
'Alpgiray':'',
'Alphan':'',
'Alpkan':'',
'Alpkanat':'',
'Alpkartal':'',
'Alpkın':'',
'Alpkutlu':'',
'Alpkülük':'',
'Alpman':'',
'Alpnur':'',
'Alpoğan':'',
'Alpsoy':'',
'Alpsü':'',
'Alptekin':'',
'Alptoğan':'',
'Alptuğ':'',
'Alpyürek':'',
'Alpyürük':'',
'Alsan':'',
'Alsancak':'',
'Alsevin':'',
'Alsoy':'',
'Alsu':'',
'Altaç':'',
'Altan':'',
'Altaner':'',
'Altaş':'',
'Altav':'',
'Altay':'',
'Altemür':'',
'Alten':'',
'Altınay':'',
'Altınbaran':'',
'Altınbaş':'',
'Altınbaşak':'',
'Altınbay':'',
'Altınbike':'',
'Altınçiçek':'',
'Altındal':'',
'Altınel':'',
'Altıner':'',
'Altıngül':'',
'Altınhan':'',
'Altınhanım':'',
'Altınhatun':'',
'Altınışık':'',
'Altınışın':'',
'Altıniz':'',
'Altınkaya':'',
'Altınkılıç':'',
'Altınkız':'',
'Altınnur':'',
'Altınok':'',
'Altınöz':'',
'Altınsaç':'',
'Altınsoy':'',
'Altıntaç':'',
'Altıntaş':'',
'Altıntop':'',
'Altıntuğ':'',
'Altoğan':'',
'Altop':'',
'Altuğ':'',
'Altun':'',
'Altuna':'',
'Altunay':'',
'Altunbaş':'',
'Altuncan':'',
'Altunç':'',
'Altunçağ':'',
'Altuner':'',
'Altunhan':'',
'Altuntaş':'',
'Alyipek':'',
'Amaç':'',
'Amanullah':'',
'Amber':'',
'Amil':'',
'Amile':'',
'Amine':'',
'Amir':'',
'Amiran':'',
'Amire':'',
'Amre':'',
'Anahanım':'',
'Anakadın':'',
'Anakız':'',
'Anar':'',
'Anargül':'',
'Anber':'',
'Ancı':'',
'Ançıbay':'',
'Andaç':'',
'Andak':'',
'Andelip':'',
'Andıç':'',
'Andiç':'',
'Angı':'',
'Angıl':'',
'Angın':'',
'Angış':'',
'Angıt':'',
'Anı':'',
'Anık':'',
'Anıl':'',
'Anıt':'',
'Anka':'',
'Anlı':'',
'Annak':'',
'Ant':'',
'Apa':'',
'Apak':'',
'Apakhan':'',
'Apaydın':'',
'Aracı':'',
'Arafat':'',
'Aral':'',
'Aran':'',
'Aras':'',
'Arat':'',
'Araz':'',
'Arbaş':'',
'Arbay':'',
'Arbek':'',
'Arca':'',
'Arcan':'',
'Arda':'',
'Ardahan':'',
'Ardemir':'',
'Ardıç':'',
'Ardıl':'',
'Arefe':'',
'Arel':'',
'Arer':'',
'Argana':'',
'Argın':'',
'Argu':'',
'Arguç':'',
'Argüden':'',
'Argüder':'',
'Argün':'',
'Arhan':'',
'Arı':'',
'Arıbal':'',
'Arıbaş':'',
'Arıboğa':'',
'Arıca':'',
'Arıcan':'',
'Arıç':'',
'Arıel':'',
'Arıer':'',
'Arığ':'',
'Arıhan':'',
'Arık':'',
'Arıkal':'',
'Arıkan':'',
'Arıkboğa':'',
'Arıker':'',
'Arıkhan':'',
'Arıkiz':'',
'Arıkol':'',
'Arıkut':'',
'Arıl':'',
'Arıman':'',
'Arın':'',
'Arınç':'',
'Arınık':'',
'Arıpınar':'',
'Arısal':'',
'Arısan':'',
'Arısoy':'',
'Arısu':'',
'Arış':'',
'Arıtan':'',
'Arıtaş':'',
'Arıyüz':'',
'Ari':'',
'Arif':'',
'Arife':'',
'Arik':'',
'Arkadaş':'',
'Arkan':'',
'Arkay':'',
'Arkın':'',
'Arkış':'',
'Arkoç':'',
'Arkun':'',
'Arkut':'',
'Arlan':'',
'Armağan':'',
'Arman':'',
'Armanç':'',
'Arna':'',
'Arol':'',
'Arpad':'',
'Arpağ':'',
'Arpak':'',
'Arpınar':'',
'Arsal':'',
'Arsan':'',
'Arslan':'',
'Arslaner':'',
'Arsoy':'',
'Artaç':'',
'Artam':'',
'Artan':'',
'Artuç':'',
'Artuk':'',
'Artun':'',
'Artunç':'',
'Artut':'',
'Aru':'',
'Arukan':'',
'Arukız':'',
'Aryüz':'',
'Arzık':'',
'Arziye':'',
'Arzu':'',
'Arzugül':'',
'Arzuhan':'',
'Arzum':'',
'Asaf':'',
'Asal':'',
'Asalbegim':'',
'Asalbegüm':'',
'Asalet':'',
'Asan':'',
'Âsan':'',
'Asena':'',
'Asfer':'',
'Ası':'',
'Asıf':'',
'Asılbanu':'',
'Asılgül':'',
'Asım':'',
'Asıma':'',
'Asil':'',
'Asile':'',
'Asime':'',
'Asimegül':'',
'Asiye':'',
'Aslan':'',
'Aslaner':'',
'Aslanhan':'',
'Aslı':'',
'Aslıbey':'',
'Aslıgül':'',
'Aslıhan':'',
'Aslım':'',
'Aslınur':'',
'Asliye':'',
'Asma':'',
'Asri':'',
'Asu':'',
'Asude':'',
'Asuman':'',
'Asutay':'',
'Asye':'',
'Aşa':'',
'Aşan':'',
'Aşcır':'',
'Aşır':'',
'Aşina':'',
'Aşir':'',
'Aşkan':'',
'Aşkım':'',
'Aşkın':'',
'Aşkınay':'',
'Aşkıner':'',
'Ata':'',
'Atâ':'',
'Ataan':'',
'Atacan':'',
'Ataç':'',
'Atadan':'',
'Ataergin':'',
'Atagül':'',
'Atagün':'',
'Atahan':'',
'Atak':'',
'Atakan':'',
'Ataker':'',
'Atakul':'',
'Atakurt':'',
'Atakut':'',
'Atalan':'',
'Atalay':'',
'Atalmış':'',
'Ataman':'',
'Atambay':'',
'Atamer':'',
'Atamtürk':'',
'Ataner':'',
'Atanur':'',
'Ataol':'',
'Ataöv':'',
'Atasagun':'',
'Atasan':'',
'Atasay':'',
'Atasev':'',
'Ataseven':'',
'Atasever':'',
'Atasevin':'',
'Atasoy':'',
'Atasü':'',
'Atatöre':'',
'Atatuğ':'',
'Atatüre':'',
'Atatürk':'',
'Ataullah':'',
'Ataün':'',
'Atay':'',
'Ateş':'',
'Atfi':'',
'Atgun':'',
'Atıf':'',
'Atıfa':'',
'Atıfe':'',
'Atıl':'',
'Atılay':'',
'Atılgan':'',
'Atız':'',
'Atik':'',
'Atila':'',
'Atilla':'',
'Atime':'',
'Atiye':'',
'Atlan':'',
'Atlas':'',
'Atlı':'',
'Atlığ':'',
'Atlıhan':'',
'Atmaca':'',
'Atom':'',
'Attilâ':'',
'Atuf':'',
'Avar':'',
'Avcı':'',
'Avhan':'',
'Avkan':'',
'Avni':'',
'Avniye':'',
'Avşar':'',
'Avunç':'',
'Ay':'',
'Aya':'',
'Ayaça':'',
'Ayal':'',
'Ayalp':'',
'Ayaltın':'',
'Ayana':'',
'Ayanç':'',
'Ayanfer':'',
'Ayas':'',
'Ayasun':'',
'Ayaşan':'',
'Ayata':'',
'Ayataç':'',
'Ayaydın':'',
'Ayaz':'',
'Aybala':'',
'Aybanu':'',
'Aybar':'',
'Aybars':'',
'Aybaş':'',
'Aybay':'',
'Aybegim':'',
'Aybegüm':'',
'Aybek':'',
'Ayben':'',
'Aybeniz':'',
'Ayberk':'',
'Aybet':'',
'Aybey':'',
'Aybige':'',
'Aybike':'',
'Aybir':'',
'Aybirgen':'',
'Ayboğa':'',
'Aybora':'',
'Aybüge':'',
'Aybüke':'',
'Ayca':'',
'Aycagül':'',
'Aycahan':'',
'Aycan':'',
'Aycennet':'',
'Ayceren':'',
'Aycıl':'',
'Aycihan':'',
'Ayça':'',
'Ayçağ':'',
'Ayçetin':'',
'Ayçıl':'',
'Ayçiçek':'',
'Ayçil':'',
'Ayçolpan':'',
'Ayçulpan':'',
'Ayda':'',
'Aydagül':'',
'Aydan':'',
'Aydanarı':'',
'Aydanur':'',
'Aydar':'',
'Aydemir':'',
'Aydeniz':'',
'Aydenk':'',
'Aydın':'',
'Aydınalp':'',
'Aydınay':'',
'Aydınbay':'',
'Aydınbey':'',
'Aydınel':'',
'Aydıner':'',
'Aydınol':'',
'Aydıntan':'',
'Aydıntuğ':'',
'Aydınyol':'',
'Aydil':'',
'Aydilek':'',
'Aydinç':'',
'Aydoğan':'',
'Aydoğdu':'',
'Aydoğmuş':'',
'Aydolu':'',
'Aydolun':'',
'Aydonat':'',
'Ayduru':'',
'Ayet':'',
'Ayetullah':'',
'Ayfer':'',
'Ayferi':'',
'Ayferim':'',
'Aygen':'',
'Aygerim':'',
'Aygök':'',
'Aygöl':'',
'Aygönenç':'',
'Aygönül':'',
'Aygut':'',
'Aygutalp':'',
'Aygül':'',
'Aygüler':'',
'Aygülhan':'',
'Aygümüş':'',
'Aygün':'',
'Aygüner':'',
'Aygünkız':'',
'Aygür':'',
'Aygüzel':'',
'Ayhan':'',
'Ayhanım':'',
'Ayhatun':'',
'Ayık':'',
'Ayım':'',
'Ayımbet':'',
'Ayımşa':'',
'Ayışığı':'',
'Ayışını':'',
'Ayilkin':'',
'Aykaç':'',
'Aykal':'',
'Aykan':'',
'Aykaş':'',
'Aykatun':'',
'Aykın':'',
'Aykız':'',
'Aykönül':'',
'Aykul':'',
'Aykurt':'',
'Aykut':'',
'Aykutalp':'',
'Aykutlu':'',
'Aykün':'',
'Ayla':'',
'Aylan':'',
'Aylanur':'',
'Aylin':'',
'Ayman':'',
'Aymaral':'',
'Aymelek':'',
'Aymete':'',
'Aymutlu':'',
'Ayna':'',
'Aynagül':'',
'Aynıfer':'',
'Aynımah':'',
'Ayni':'',
'Aynisa':'',
'Aynişah':'',
'Ayniye':'',
'Aynur':'',
'Aypar':'',
'Aypare':'',
'Aypars':'',
'Ayperi':'',
'Aypınar':'',
'Aypolat':'',
'Ayral':'',
'Ayrıl':'',
'Aysal':'',
'Aysan':'',
'Aysel':'',
'Ayselen':'',
'Aysema':'',
'Aysen':'',
'Ayser':'',
'Aysere':'',
'Ayseren':'',
'Aysev':'',
'Ayseven':'',
'Aysever':'',
'Aysevil':'',
'Aysevim':'',
'Aysevin':'',
'Aysılu':'',
'Aysın':'',
'Aysim':'',
'Aysima':'',
'Aysine':'',
'Aysoy':'',
'Aysu':'',
'Aysuda':'',
'Aysultan':'',
'Aysun':'',
'Aysuna':'',
'Aysunar':'',
'Aysunay':'',
'Aysungur':'',
'Aysü':'',
'Ayşan':'',
| |
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_python, sigma2)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([-1e100, 0.4, 0.3, 0.2])
recpy.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_python, sigma2)
assert_almost_equal(sigma2, self.var_bounds[:, 0])
parameters = np.array([1e100, 0.4, 0.3, 0.2])
recpy.arch_recursion(
parameters, resids, sigma2, 3, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
rec.arch_recursion(
parameters, resids, sigma2, 3, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_python, sigma2)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([-1e100, 0.4, 0.3, 0.2])
recpy.arch_recursion(
parameters, resids, sigma2, 3, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
rec.arch_recursion(
parameters, resids, sigma2, 3, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_python, sigma2)
assert_almost_equal(sigma2, self.var_bounds[:, 0])
def test_egarch(self):
nobs = self.nobs
parameters = np.array([0.0, 0.1, -0.1, 0.95])
resids, sigma2 = self.resids, self.sigma2
p = o = q = 1
backcast = 0.0
var_bounds = self.var_bounds
lnsigma2 = np.empty_like(sigma2)
std_resids = np.empty_like(sigma2)
abs_std_resids = np.empty_like(sigma2)
recpy.egarch_recursion(
parameters,
resids,
sigma2,
p,
o,
q,
nobs,
backcast,
var_bounds,
lnsigma2,
std_resids,
abs_std_resids,
)
sigma2_numba = sigma2.copy()
recpy.egarch_recursion_python(
parameters,
resids,
sigma2,
p,
o,
q,
nobs,
backcast,
var_bounds,
lnsigma2,
std_resids,
abs_std_resids,
)
sigma2_python = sigma2.copy()
rec.egarch_recursion(
parameters,
resids,
sigma2,
p,
o,
q,
nobs,
backcast,
var_bounds,
lnsigma2,
std_resids,
abs_std_resids,
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
norm_const = np.sqrt(2 / np.pi)
for t in range(nobs):
lnsigma2[t] = parameters[0]
if t == 0:
lnsigma2[t] += parameters[3] * backcast
else:
stdresid = resids[t - 1] / np.sqrt(sigma2[t - 1])
lnsigma2[t] += parameters[1] * (np.abs(stdresid) - norm_const)
lnsigma2[t] += parameters[2] * stdresid
lnsigma2[t] += parameters[3] * lnsigma2[t - 1]
sigma2[t] = np.exp(lnsigma2[t])
assert_almost_equal(sigma2_python, sigma2)
parameters = np.array([-100.0, 0.1, -0.1, 0.95])
recpy.egarch_recursion_python(
parameters,
resids,
sigma2,
p,
o,
q,
nobs,
backcast,
var_bounds,
lnsigma2,
std_resids,
abs_std_resids,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.0, 0.1, -0.1, 9.5])
recpy.egarch_recursion_python(
parameters,
resids,
sigma2,
p,
o,
q,
nobs,
backcast,
var_bounds,
lnsigma2,
std_resids,
abs_std_resids,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.0, 0.1, -0.1, 0.95])
mod_resids = resids.copy()
mod_resids[:1] = np.inf
recpy.egarch_recursion_python(
parameters,
resids,
sigma2,
p,
o,
q,
nobs,
backcast,
var_bounds,
lnsigma2,
std_resids,
abs_std_resids,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
def test_midas_hyperbolic(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.8, 0])
j = np.arange(1, 22 + 1)
weights = gamma(j + 0.6) / (gamma(j + 1) * gamma(0.6))
weights = weights / weights.sum()
recpy.midas_recursion(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
sigma2_numba = sigma2.copy()
recpy.midas_recursion_python(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
rec.midas_recursion(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
mod_resids = resids.copy()
mod_resids[:10] = np.inf
recpy.midas_recursion_python(
parameters, weights, mod_resids, sigma2, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 10e10, 0])
j = np.arange(1, 22 + 1)
weights = gamma(j + 0.6) / (gamma(j + 1) * gamma(0.6))
weights = weights / weights.sum()
recpy.midas_recursion_python(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
rec.midas_recursion(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, -0.4, 0])
recpy.midas_recursion_python(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
rec.midas_recursion(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
def test_figarch_recursion(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([1.0, 0.2, 0.4, 0.3])
fresids = resids ** 2
p = q = 1
trunc_lag = 1000
rec.figarch_recursion(
parameters,
fresids,
sigma2,
p,
q,
nobs,
trunc_lag,
backcast,
self.var_bounds,
)
lam = rec.figarch_weights(parameters[1:], p, q, trunc_lag=trunc_lag)
lam_rev = lam[::-1]
omega_tilde = parameters[0] / (1 - parameters[-1])
sigma2_direct = np.empty_like(sigma2)
for t in range(nobs):
backcasts = trunc_lag - t
sigma2_direct[t] = omega_tilde
if backcasts:
sigma2_direct[t] += backcast * lam_rev[:backcasts].sum()
if t:
sigma2_direct[t] += np.sum(lam_rev[-t:] * fresids[max(0, t - 1000) : t])
assert_almost_equal(sigma2_direct, sigma2)
recpy.figarch_recursion(
parameters,
fresids,
sigma2,
p,
q,
nobs,
trunc_lag,
backcast,
self.var_bounds,
)
sigma2_numba = sigma2.copy()
recpy.figarch_recursion_python(
parameters,
fresids,
sigma2,
p,
q,
nobs,
trunc_lag,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.figarch_recursion(
parameters,
fresids,
sigma2,
p,
q,
nobs,
trunc_lag,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
def test_figarch_weights(self):
parameters = np.array([1.0, 0.4])
lam = rec.figarch_weights(parameters[1:], 0, 0, trunc_lag=1000)
lam_direct = np.empty_like(lam)
lam_direct[0] = parameters[-1]
for i in range(1, 1000):
lam_direct[i] = (i - parameters[-1]) / (i + 1) * lam_direct[i - 1]
assert_almost_equal(lam, lam_direct)
@pytest.mark.skipif(
missing_numba or missing_extension, reason="numba not installed"
)
def test_garch_performance(self):
garch_setup = """
parameters = np.array([.1, .4, .3, .2])
fresids = resids ** 2.0
sresids = np.sign(resids)
"""
garch_first = """
recpy.garch_recursion(parameters, fresids, sresids, sigma2, 1, 1, 1, nobs,
backcast, var_bounds)
"""
garch_second = """
rec.garch_recursion(parameters, fresids, sresids, sigma2, 1, 1, 1, nobs, backcast,
var_bounds)
"""
timer = Timer(
garch_first,
"Numba",
garch_second,
"Cython",
"GARCH",
self.timer_setup + garch_setup,
)
timer.display()
assert timer.ratio < 10.0
if not (missing_numba or CYTHON_COVERAGE):
assert 0.1 < timer.ratio
@pytest.mark.skipif(
missing_numba or missing_extension, reason="numba not installed"
)
def test_harch_performance(self):
harch_setup = """
parameters = np.array([.1, .4, .3, .2])
lags = np.array([1, 5, 22], dtype=np.int32)
"""
harch_first = """
recpy.harch_recursion(parameters, resids, sigma2, lags, nobs, backcast,
var_bounds)
"""
harch_second = """
rec.harch_recursion(parameters, resids, sigma2, lags, nobs, backcast, var_bounds)
"""
timer = Timer(
harch_first,
"Numba",
harch_second,
"Cython",
"HARCH",
self.timer_setup + harch_setup,
)
timer.display()
assert timer.ratio < 10.0
if not (missing_numba or CYTHON_COVERAGE):
assert 0.1 < timer.ratio
@pytest.mark.skipif(
missing_numba or missing_extension, reason="numba not installed"
)
def test_egarch_performance(self):
egarch_setup = """
parameters = np.array([0.0, 0.1, -0.1, 0.95])
p = o = q = 1
backcast = 0.0
lnsigma2 = np.empty_like(sigma2)
std_resids = np.empty_like(sigma2)
abs_std_resids = np.empty_like(sigma2)
"""
egarch_first = """
recpy.egarch_recursion(parameters, resids, sigma2, p, o, q, nobs, backcast,
var_bounds, lnsigma2, std_resids, abs_std_resids)
"""
egarch_second = """
rec.egarch_recursion(parameters, resids, sigma2, p, o, q, nobs, backcast,
var_bounds, lnsigma2, std_resids, abs_std_resids)
"""
timer = Timer(
egarch_first,
"Numba",
egarch_second,
"Cython",
"EGARCH",
self.timer_setup + egarch_setup,
)
timer.display()
assert timer.ratio < 10.0
if not (missing_numba or CYTHON_COVERAGE):
assert 0.1 < timer.ratio
@pytest.mark.skipif(
missing_numba or missing_extension, reason="numba not installed"
)
def test_midas_performance(self):
midas_setup = """
from scipy.special import gamma
parameters = np.array([.1, 0.8, 0])
j = np.arange(1,22+1)
weights = gamma(j+0.6) / (gamma(j+1) * gamma(0.6))
weights = weights / weights.sum()
"""
midas_first = """
recpy.midas_recursion(parameters, weights, resids, sigma2, nobs, backcast, var_bounds)
"""
midas_second = """
rec.midas_recursion(parameters, weights, resids, sigma2, nobs, backcast, var_bounds)
"""
timer = Timer(
midas_first,
"Numba",
midas_second,
"Cython",
"MIDAS",
self.timer_setup + midas_setup,
)
timer.display()
assert timer.ratio < 10.0
if not (missing_numba or CYTHON_COVERAGE):
assert 0.1 < timer.ratio
@pytest.mark.skipif(
missing_numba or missing_extension, reason="numba not installed"
)
def test_figarch_performance(self):
midas_setup = """
p = q = 1
trunc_lag = 1000
parameters = np.array([1.0, 0.2, 0.2, 0.04])
fresids = resids ** 2.0
"""
midas_first = """
recpy.figarch_recursion(parameters, fresids, sigma2, p, q, nobs, trunc_lag, backcast, var_bounds)
"""
midas_second = """
rec.figarch_recursion(parameters, fresids, sigma2, p, q, nobs, trunc_lag, backcast, var_bounds)
"""
timer = Timer(
midas_first,
"Numba",
midas_second,
"Cython",
"FIGARCH",
self.timer_setup + midas_setup,
)
timer.display()
assert timer.ratio < 10.0
if not (missing_numba or CYTHON_COVERAGE):
assert 0.1 < timer.ratio
def test_garch_aparch_equiv(self):
parameters = np.array([0.1, 0.1, 0.8])
fresids = self.resids ** 2
sresids = np.sign(self.resids)
sigma2 = np.empty(1000)
p = q = 1
o = 0
recpy.garch_recursion_python(
parameters,
fresids,
sresids,
sigma2,
p,
o,
q,
self.nobs,
self.backcast,
self.var_bounds,
)
sigma2_garch = sigma2.copy()
parameters = np.array([0.1, 0.1, 0.8, 2])
sigma2[:] = np.nan
sigma2_delta = np.empty_like(sigma2)
recpy.aparch_recursion_python(
parameters,
self.resids,
np.abs(self.resids),
sigma2,
sigma2_delta,
p,
o,
q,
self.nobs,
self.backcast,
self.var_bounds,
)
assert_allclose(sigma2_garch, sigma2, atol=1e-6)
sigma2[:] = np.nan
recpy.aparch_recursion(
parameters,
self.resids,
np.abs(self.resids),
sigma2,
sigma2_delta,
p,
o,
q,
self.nobs,
self.backcast,
self.var_bounds,
)
assert_allclose(sigma2_garch, sigma2, atol=1e-6)
sigma2[:] = np.nan
rec.aparch_recursion(
parameters,
self.resids,
np.abs(self.resids),
sigma2,
sigma2_delta,
p,
o,
q,
self.nobs,
self.backcast,
self.var_bounds,
)
assert_allclose(sigma2_garch, sigma2, atol=1e-6)
def test_asym_aparch_smoke(self):
sigma2 = np.empty(1000)
p | |
if parameter.name in list(self.old_new.values()):
key = [k for k in self.old_new if self.old_new[k] == parameter.name][0]
self.old_new[key] = old_param.name
self.old_new[parameter.name] = old_param.name
else:
self.old_new[parameter.name] = old_param.name
# self.add_internal_parameter(iden_param)
else:
#Just add the new parameter to the current list
self.parameters.append(parameter)
self.new_external.append(parameter)
def add_internal_parameter(self, parameter):
""" add a parameter of type internal """
name = parameter.name
# check if a parameter already has this name
old_param = next((p for p in self.parameters if p.name==name), None)
if old_param:
if old_param.value == parameter.value:
return #Nothing to do!
else:
if self.old_new:
pattern = re.compile(r'\b(%s)\b' % '|'.join(list(self.old_new.keys())))
def replace(matchobj):
return self.old_new[matchobj.group(0)]
parameter.value = pattern.sub(replace, parameter.value)
self.old_new[parameter.name] = '%s%s' % (parameter.name, self.addon)
parameter.name = '%s%s' % (parameter.name, self.addon)
self.parameters.append(parameter)
return
# No name conflict:
if self.old_new:
pattern = re.compile(r'\b(%s)\b' % '|'.join(list(self.old_new.keys())))
def replace(matchobj):
return self.old_new[matchobj.group(0)]
parameter.value = pattern.sub(replace, parameter.value)
self.parameters.append(parameter)
def add_coupling(self, coupling):
"""add one coupling"""
# avoid name duplication
name = coupling.name
same_name = next((p for p in self.couplings if p.name==name), None)
if same_name:
coupling.name = '%s%s' % (coupling.name, self.addon)
if self.old_new:
pattern = re.compile(r'\b(%s)\b' % '|'.join(list(self.old_new.keys())))
def replace(matchobj):
return self.old_new[matchobj.group(0)]
coupling.value = pattern.sub(replace, coupling.value)
old_coupling = next((p for p in self.couplings if p.value==coupling.value), None)
if old_coupling:
coupling.replace = old_coupling #tag for replacement
else:
self.couplings.append(coupling)
def add_coupling_order(self, coupling_order):
"""adding a new coupling order inside the model"""
name = coupling_order.name
same_name = next((p for p in self.orders if p.name==name), None)
if same_name:
if coupling_order.hierarchy != same_name.hierarchy:
logger.warning('%s has different hierarchy use the minimal value (%s, %s) => %s' \
% (name, same_name.hierarchy, coupling_order.hierarchy,
min(same_name.hierarchy, coupling_order.hierarchy)))
same_name.hierarchy = min(same_name.hierarchy, coupling_order.hierarchy)
if coupling_order.expansion_order != same_name.expansion_order:
logger.warning('%s has different expansion_order use the minimal value (%s, %s) => %s' \
% (name, coupling_order.expansion_order, same_name.expansion_order,
min(same_name.expansion_order, coupling_order.expansion_order)))
same_name.expansion_order = min(same_name.expansion_order, coupling_order.expansion_order)
if hasattr(same_name, 'perturbative_expansion') and same_name.perturbative_expansion:
logger.info('%s will be forbidden to run at NLO' % same_name.name)
same_name.perturbative_expansion = 0
else:
self.orders.append(coupling_order)
def add_lorentz(self, lorentz):
"""add one coupling"""
# avoid name duplication
name = lorentz.name
same_name = next((p for p in self.lorentz if p.name==name), None)
if same_name:
lorentz.name = '%s%s' % (lorentz.name, self.addon)
if self.old_new:
pattern = re.compile(r'\b(%s)\b' % '|'.join(list(self.old_new.keys())))
def replace(matchobj):
return self.old_new[matchobj.group(0)]
lorentz.structure = pattern.sub(replace, lorentz.structure)
old_lor = next((p for p in self.lorentz
if p.structure==lorentz.structure and p.spins == lorentz.spins),
None)
if old_lor:
lorentz.replace = old_lor #tag for replacement
else:
self.lorentz.append(lorentz)
def add_interaction(self, interaction , model):
"""Add one interaction to the model. This is UNCONDITIONAL!
if the same interaction is in the model this means that the interaction
will appear twice. This is now weaken if both interaction are exactly identical!
(EXACT same color/lorentz/coupling expression)
"""
interaction = interaction.__class__(**interaction.__dict__)
model.all_vertices.pop(-1)
#0. check name:
name = interaction.name
same_name = next((p for p in self.vertices if p.name==name), None)
if same_name:
interaction.name = '%s%s' % (interaction.name, self.addon)
#1. check particles translation
particles = [p.replace if hasattr(p, 'replace') else p for p in interaction.particles]
interaction.particles = particles
#2. check the lorentz structure
lorentz = [l.replace if hasattr(l, 'replace') else l for l in interaction.lorentz]
interaction.lorentz = lorentz
#3. check the couplings
couplings = [(key, c.replace) if hasattr(c, 'replace') else (key, c)
for key, c in interaction.couplings.items()]
interaction.couplings = dict(couplings)
#4. Try to avoid duplication of interaction:
# A crash is raised if the same particles have already the some lorentz structure
# at the same coupling order:
get_pdg = lambda vertex: sorted([p.pdg_code for p in vertex.particles])
id_part = get_pdg(interaction)
iden_vertex = [v for v in self.vertices if get_pdg(v) == id_part]
iden = False
nb_coupling = len(interaction.couplings)
keys = list(interaction.couplings.keys()) # to have a fixed order!
get_lor_and_color = lambda i: (interaction.lorentz[keys[i][1]].structure,
interaction.color[keys[i][0]])
for v in iden_vertex:
if len(v.couplings) != nb_coupling:
continue
found = []
for ((i,j), coup) in v.couplings.items():
new_lorentz = v.lorentz[j].structure
new_color = v.color[i]
k=0
same = [k for k in range(nb_coupling) if k not in found and
get_lor_and_color(k) == (new_lorentz, new_color)]
if not same:
break
else:
for k in same:
if interaction.couplings[keys[k]] == coup:
found.append(k)
break
else:
# check only the coupling order
for k in same:
if interaction.couplings[keys[k]].order == coup.order:
found.append(k)
warning = """Did NOT add interaction %s since same particles/lorentz/color/coupling order
BUT did not manage to ensure that the coupling is the same. couplings expression:
base model: %s
addon model: %s
""" % (id_part, coup.value, interaction.couplings[keys[k]].value)
logger.warning(warning)
found.append(k)
break
else:
pass
# mat
else:
# all found one identical...
return
logger.info('Adding interaction for the following particles: %s' % id_part)
self.vertices.append(interaction)
def add_CTinteraction(self, interaction):
"""Add one interaction to the model. This is UNCONDITIONAL!
if the same interaction is in the model this means that the interaction
will appear twice."""
#0. check name:
name = interaction.name
same_name = next((p for p in self.vertices if p.name==name), None)
if same_name:
interaction.name = '%s%s' % (interaction.name, self.addon)
#1. check particles translation
particles = [p.replace if hasattr(p, 'replace') else p for p in interaction.particles]
interaction.particles = particles
#2. check the lorentz structure
lorentz = [l.replace if hasattr(l, 'replace') else l for l in interaction.lorentz]
interaction.lorentz = lorentz
#3. check the couplings
couplings = [(key, c.replace) if hasattr(c, 'replace') else (key, c)
for key, c in interaction.couplings.items()]
interaction.couplings = dict(couplings)
#4. check the loop_particles
loop_particles=[ [p.replace if hasattr(p, 'replace') else p for p in plist]
for plist in interaction.loop_particles]
interaction.loop_particles = loop_particles
self.CTvertices.append(interaction)
def add_model(self, model=None, path=None, identify_particles=None):
"""add another model in the current one"""
self.new_external = []
if path:
model = ufomodels.load_model(path)
if not model:
raise USRMODERROR('Need a valid Model')
else:
path = model.__path__[0]
# Check the validity of the model. Too old UFO (before UFO 1.0)
if not hasattr(model, 'all_orders'):
raise USRMODERROR('Add-on Model doesn\'t follows UFO convention (no couplings_order information)\n' +\
'MG5 is able to load such model but NOT to the add model feature.')
if isinstance(model.all_particles[0].mass, six.string_types):
raise USRMODERROR('Add-on Model doesn\'t follows UFO convention (Mass/Width of particles are string name, not object)\n' +\
'MG5 is able to load such model but NOT to the add model feature.')
for order in model.all_orders:
if hasattr(order, 'perturbative_expansion') and order.perturbative_expansion:
raise USRMODERROR('Add-on model can not be loop model.')
for order in model.all_orders:
self.add_coupling_order(order)
# Adding automatically identification for anti-particle if needed
# + define identify_pid which keep tracks of the pdg_code identified
identify_pid = {}
if identify_particles:
for new, old in identify_particles.items():
new_part = next((p for p in model.all_particles if p.name==new), None)
old_part = next((p for p in self.particles if p.name==old), None)
# secure agqinst lower/upper case problem
if not new_part:
first = True
for p in model.all_particles:
if p.name.lower() == new.lower():
if not first:
raise Exception
else:
first =False
new_part = p
if not old_part:
first = True
for p in self.particles:
if p.name.lower() == old.lower():
if not first:
raise Exception
else:
first =False
old_part = p
if not old_part:
# last possibility is that the model do not follow MG5 convention
# but that "old" does
defaultname = base_objects.Model.load_default_name() # id->name
for pdg, value in defaultname.items():
if value == old:
old_part = self.particle_dict[pdg]
identify_particles[new] = old_part.name
break
# end for the case security
identify_pid[new_part.pdg_code] = old_part.pdg_code
if new_part is None:
raise USRMODERROR("particle %s not in added model" % new)
if old_part is None:
raise USRMODERROR("particle %s not in original model" % old)
if new_part.antiname not in identify_particles:
new_anti = new_part.antiname
old_anti = old_part.antiname
if old_anti == old:
raise USRMODERROR("failed identification (one particle is self-conjugate and not the other)")
logger.info("adding identification for anti-particle: %s=%s" % (new_anti, old_anti))
identify_particles[new_anti] = old_anti
for parameter in model.all_parameters:
self.add_parameter(parameter, identify_pid)
for coupling in model.all_couplings:
self.add_coupling(coupling)
for lorentz in model.all_lorentz:
self.add_lorentz(lorentz)
for particle in model.all_particles:
if particle.name in identify_particles:
self.add_particle(particle, identify=identify_particles[particle.name])
else:
self.add_particle(particle)
for vertex in model.all_vertices:
self.add_interaction(vertex, model)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.