hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1788929e564ed8994c128d8d524cf0d5338069ba | 15,107 | py | Python | src/qrl/core/p2p/p2pPeerManager.py | cyyber/QRL | f33699501a72a6a58f1cc53499e9d85c3058451c | [
"MIT"
] | 3 | 2020-07-11T15:33:11.000Z | 2021-11-17T11:22:55.000Z | src/qrl/core/p2p/p2pPeerManager.py | cyyber/QRL | f33699501a72a6a58f1cc53499e9d85c3058451c | [
"MIT"
] | null | null | null | src/qrl/core/p2p/p2pPeerManager.py | cyyber/QRL | f33699501a72a6a58f1cc53499e9d85c3058451c | [
"MIT"
] | null | null | null | # coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
import os
from enum import Enum
from typing import Callable, Set, List
from ipaddress import IPv4Address
import simplejson as json
from pyqryptonight.pyqryptonight import UInt256ToString
from qrl.core import config
from qrl.core.misc import logger, ntp
from qrl.core.misc.expiring_set import ExpiringSet
from qrl.core.notification.Observable import Observable
from qrl.core.notification.ObservableEvent import ObservableEvent
from qrl.core.p2p.IPMetadata import IPMetadata
from qrl.core.p2p.p2pObserver import P2PBaseObserver
from qrl.core.p2p.p2pprotocol import P2PProtocol
from qrl.generated import qrllegacy_pb2, qrl_pb2
class P2PPeerManager(P2PBaseObserver):
class EventType(Enum):
NO_PEERS = 1
def __init__(self):
super().__init__()
self._ping_callLater = None
self._disconnect_callLater = None
self._channels = []
self._peer_node_status = dict()
self._known_peers = set()
self.peers_path = os.path.join(config.user.data_dir,
config.dev.peers_filename)
self.banned_peers_filename = os.path.join(config.user.wallet_dir, config.dev.banned_peers_filename)
self._banned_peer_ips = ExpiringSet(expiration_time=config.user.ban_minutes * 60,
filename=self.banned_peers_filename)
self._observable = Observable(self)
self._p2p_factory = None
def register(self, message_type: EventType, func: Callable):
self._observable.register(message_type, func)
def set_p2p_factory(self, p2p_factory):
self._p2p_factory = p2p_factory
@property
def known_peer_addresses(self):
return self._known_peers
def trusted_peer(self, channel: P2PProtocol):
if self.is_banned(channel.peer):
return False
if channel.valid_message_count < config.dev.trust_min_msgcount:
return False
if channel.connection_time < config.dev.trust_min_conntime:
return False
return True
@property
def trusted_addresses(self):
return set([peer.peer.full_address for peer in self._p2p_factory.connections if self.trusted_peer(peer)])
@property
def peer_node_status(self):
return self._peer_node_status
def load_known_peers(self) -> List[str]:
known_peers = []
try:
logger.info('Loading known peers')
with open(self.peers_path, 'r') as infile:
known_peers = json.load(infile)
except Exception as e:
logger.info("Could not open known_peers list")
return [IPMetadata.canonical_full_address(fa) for fa in known_peers]
def save_known_peers(self, known_peers: List[str]):
tmp = list(known_peers)[:3 * config.user.max_peers_limit]
config.create_path(config.user.data_dir)
with open(self.peers_path, 'w') as outfile:
json.dump(tmp, outfile)
def load_peer_addresses(self) -> None:
known_peers = self.load_known_peers()
self._known_peers = self.combine_peer_lists(known_peers, config.user.peer_list, )
logger.info('Loaded known peers: %s', self._known_peers)
self.save_known_peers(self._known_peers)
def extend_known_peers(self, new_peer_addresses: set) -> None:
new_addresses = set(new_peer_addresses) - self._known_peers
if self._p2p_factory is not None:
self._p2p_factory.connect_peer(new_addresses)
self._known_peers |= set(new_peer_addresses)
self.save_known_peers(list(self._known_peers))
@staticmethod
def combine_peer_lists(peer_ips, sender_full_addresses: List, check_global=False) -> Set[IPMetadata]:
tmp_list = list(peer_ips)
tmp_list.extend(sender_full_addresses)
answer = set()
for item in tmp_list:
try:
answer.add(IPMetadata.canonical_full_address(item, check_global))
except: # noqa
logger.warning("Invalid Peer Address {}".format(item))
return answer
def get_better_difficulty(self, current_cumulative_difficulty):
best_cumulative_difficulty = int(UInt256ToString(current_cumulative_difficulty))
local_best = best_cumulative_difficulty
best_channel = None
for channel in self._peer_node_status:
node_chain_state = self._peer_node_status[channel]
node_cumulative_difficulty = int(UInt256ToString(node_chain_state.cumulative_difficulty))
if node_cumulative_difficulty > best_cumulative_difficulty:
best_cumulative_difficulty = node_cumulative_difficulty
best_channel = channel
logger.debug('Local Best Diff : %s', local_best)
logger.debug('Remote Best Diff : %s', best_cumulative_difficulty)
return best_channel
def insert_to_last_connected_peer(self, ip_public_port, connected_peer=False):
known_peers = self.load_known_peers()
connection_set = set()
if self._p2p_factory is not None:
# Prepare set of connected peers
for conn in self._p2p_factory._peer_connections:
connection_set.add(conn.ip_public_port)
# Move the current peer to the last position of connected peers
# or to the start position of disconnected peers
try:
index = 0
if connected_peer:
if ip_public_port in known_peers:
known_peers.remove(ip_public_port)
else:
index = known_peers.index(ip_public_port)
del known_peers[index]
while index < len(known_peers):
if known_peers[index] not in connection_set:
break
index += 1
known_peers.insert(index, ip_public_port)
self.save_known_peers(known_peers)
except ValueError:
pass
def remove_channel(self, channel):
self.insert_to_last_connected_peer(channel.ip_public_port)
if channel in self._channels:
self._channels.remove(channel)
if channel in self._peer_node_status:
del self._peer_node_status[channel]
def new_channel(self, channel):
self._channels.append(channel)
self._peer_node_status[channel] = qrl_pb2.NodeChainState(block_number=0,
header_hash=b'',
cumulative_difficulty=b'\x00' * 32,
timestamp=ntp.getTime())
channel.register(qrllegacy_pb2.LegacyMessage.VE, self.handle_version)
channel.register(qrllegacy_pb2.LegacyMessage.PL, self.handle_peer_list)
channel.register(qrllegacy_pb2.LegacyMessage.CHAINSTATE, self.handle_chain_state)
channel.register(qrllegacy_pb2.LegacyMessage.SYNC, self.handle_sync)
channel.register(qrllegacy_pb2.LegacyMessage.P2P_ACK, self.handle_p2p_acknowledgement)
def _get_version_compatibility(self, version) -> bool:
# Ignore compatibility test on Testnet
if config.dev.hard_fork_heights == config.dev.testnet_hard_fork_heights:
return True
if self._p2p_factory is None:
return True
if self._p2p_factory.chain_height >= config.dev.hard_fork_heights[0]:
try:
major_version = version.split(".")[0]
if int(major_version) < 2:
return False
except Exception:
return False
return True
def handle_version(self, source, message: qrllegacy_pb2.LegacyMessage):
"""
Version
If version is empty, it sends the version & genesis_prev_headerhash.
Otherwise, processes the content of data.
In case of mismatches, it disconnects from the peer
"""
self._validate_message(message, qrllegacy_pb2.LegacyMessage.VE)
if not message.veData.version:
msg = qrllegacy_pb2.LegacyMessage(
func_name=qrllegacy_pb2.LegacyMessage.VE,
veData=qrllegacy_pb2.VEData(version=config.dev.version,
genesis_prev_hash=config.user.genesis_prev_headerhash,
rate_limit=config.user.peer_rate_limit))
source.send(msg)
return
logger.info('%s version: %s | genesis prev_headerhash %s',
source.peer.ip,
message.veData.version,
message.veData.genesis_prev_hash)
if not self._get_version_compatibility(message.veData.version):
logger.warning("Disconnecting from Peer %s running incompatible node version %s",
source.peer.ip,
message.veData.version)
source.loseConnection()
return
source.rate_limit = min(config.user.peer_rate_limit, message.veData.rate_limit)
if message.veData.genesis_prev_hash != config.user.genesis_prev_headerhash:
logger.warning('%s genesis_prev_headerhash mismatch', source.peer)
logger.warning('Expected: %s', config.user.genesis_prev_headerhash)
logger.warning('Found: %s', message.veData.genesis_prev_hash)
source.loseConnection()
def handle_peer_list(self, source, message: qrllegacy_pb2.LegacyMessage):
P2PBaseObserver._validate_message(message, qrllegacy_pb2.LegacyMessage.PL)
if not config.user.enable_peer_discovery:
return
if not message.plData.peer_ips:
return
# If public port is invalid, ignore rest of the data
if not (0 < message.plData.public_port < 65536):
return
source.set_public_port(message.plData.public_port)
self.insert_to_last_connected_peer(source.ip_public_port, True)
sender_peer = IPMetadata(source.peer.ip, message.plData.public_port)
# Check if peer list contains global ip, if it was sent by peer from a global ip address
new_peers = self.combine_peer_lists(message.plData.peer_ips,
[sender_peer.full_address],
check_global=IPv4Address(source.peer.ip).is_global)
logger.info('%s peers data received: %s', source.peer.ip, new_peers)
if self._p2p_factory is not None:
self._p2p_factory.add_new_peers_to_peer_q(new_peers)
def handle_sync(self, source, message: qrllegacy_pb2.LegacyMessage):
P2PBaseObserver._validate_message(message, qrllegacy_pb2.LegacyMessage.SYNC)
if message.syncData.state == '':
if source.factory.synced:
source.send_sync(synced=True)
@staticmethod
def send_node_chain_state(dest_channel, node_chain_state: qrl_pb2.NodeChainState):
# FIXME: Not sure this belongs to peer management
msg = qrllegacy_pb2.LegacyMessage(func_name=qrllegacy_pb2.LegacyMessage.CHAINSTATE,
chainStateData=node_chain_state)
dest_channel.send(msg)
def monitor_chain_state(self):
# FIXME: Not sure this belongs to peer management
current_timestamp = ntp.getTime()
for channel in self._channels:
if channel not in self._peer_node_status:
channel.loseConnection()
continue
delta = current_timestamp - self._peer_node_status[channel].timestamp
if delta > config.user.chain_state_timeout:
del self._peer_node_status[channel]
logger.debug('>>>> No State Update [%18s] %2.2f (TIMEOUT)', channel.peer, delta)
channel.loseConnection()
def broadcast_chain_state(self, node_chain_state: qrl_pb2.NodeChainState):
# FIXME: Not sure this belongs to peer management
# TODO: Verify/Disconnect problematic channels
# Ping all channels
for channel in self._channels:
self.send_node_chain_state(channel, node_chain_state)
self._observable.notify(ObservableEvent(self.EventType.NO_PEERS))
def handle_chain_state(self, source, message: qrllegacy_pb2.LegacyMessage):
P2PBaseObserver._validate_message(message, qrllegacy_pb2.LegacyMessage.CHAINSTATE)
message.chainStateData.timestamp = ntp.getTime() # Receiving time
try:
UInt256ToString(message.chainStateData.cumulative_difficulty)
except ValueError:
logger.warning('Invalid Cumulative Difficulty sent by peer')
source.loseConnection()
return
self._peer_node_status[source] = message.chainStateData
if not self._get_version_compatibility(message.chainStateData.version):
logger.warning("Disconnecting from Peer %s running incompatible node version %s",
source.peer.ip,
message.veData.version)
source.loseConnection()
return
def handle_p2p_acknowledgement(self, source, message: qrllegacy_pb2.LegacyMessage):
P2PBaseObserver._validate_message(message, qrllegacy_pb2.LegacyMessage.P2P_ACK)
source.bytes_sent -= message.p2pAckData.bytes_processed
if source.bytes_sent < 0:
logger.warning('Disconnecting Peer %s', source.peer)
logger.warning('Reason: negative bytes_sent value')
logger.warning('bytes_sent %s', source.bytes_sent)
logger.warning('Ack bytes_processed %s', message.p2pAckData.bytes_processed)
source.loseConnection()
source.send_next()
####################################################
####################################################
####################################################
####################################################
def is_banned(self, peer: IPMetadata):
return peer.ip in self._banned_peer_ips
def ban_channel(self, channel: P2PProtocol):
self._banned_peer_ips.add(channel.peer.ip)
logger.warning('Banned %s', channel.peer.ip)
channel.loseConnection()
def get_peers_stat(self) -> list:
peers_stat = []
# Copying the list of keys, to avoid any change by other thread
for source in list(self.peer_node_status.keys()):
try:
peer_stat = qrl_pb2.PeerStat(peer_ip=source.peer.ip.encode(),
port=source.peer.port,
node_chain_state=self.peer_node_status[source])
peers_stat.append(peer_stat)
except KeyError:
# Ignore in case the key is deleted by other thread causing KeyError
continue
return peers_stat
| 41.61708 | 113 | 0.63977 |
0e355f91937ae38454f290fccbd23dba735ae52a | 5,080 | py | Python | Python_code/Boston_BNN_1hidden.py | mraabo/Dissertation--Bayesian-Neural-Networks | 629b1c5f4bbdb80ef1d1037b4a0a1b7f95ac710b | [
"MIT"
] | null | null | null | Python_code/Boston_BNN_1hidden.py | mraabo/Dissertation--Bayesian-Neural-Networks | 629b1c5f4bbdb80ef1d1037b4a0a1b7f95ac710b | [
"MIT"
] | null | null | null | Python_code/Boston_BNN_1hidden.py | mraabo/Dissertation--Bayesian-Neural-Networks | 629b1c5f4bbdb80ef1d1037b4a0a1b7f95ac710b | [
"MIT"
] | null | null | null | # # ----------------------------- INFO ---------------------------
# In this python script we implement and run a BNN for predicting house prices
# in Boston. The sampler is based on the NUTS sampler
# # ----------------------------- IMPORTS ---------------------------
import warnings
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import time
from keras.datasets import boston_housing
from sklearn import metrics
import numpy as np
import pymc3 as pm
import theano
import arviz as az
from arviz.utils import Numba
import theano.tensor as tt
Numba.disable_numba()
Numba.numba_flag
floatX = theano.config.floatX
# seaborn for vizualzing
sns.set_style("white")
# # ----------------------------- Print versions ---------------------------
print("Running on Python version %s" % sys.version)
print(f"Running on PyMC3 version{pm.__version__}")
print("Running on Theano version %s" % theano.__version__)
print("Running on Arviz version %s" % az.__version__)
print("Running on Numpy version %s" % np.__version__)
# Ignore warnings - NUTS provide many runtimeWarning
warnings.filterwarnings("ignore", category=RuntimeWarning)
tf.random.set_seed(42)
# # ----------------------------- Loading Boston data ---------------------------
(X_train, y_train), (X_test, y_test) = boston_housing.load_data(seed=3030)
# pad Xs with 1's to add bias
ones_train = np.ones(X_train.shape[0])
ones_test = np.ones(X_test.shape[0])
X_train = np.insert(X_train, 0, ones_train, axis=1)
X_test = np.insert(X_test, 0, ones_test, axis=1)
# # ----------------------------- Implementing a BNN function ---------------------------
def construct_bnn(ann_input, ann_output, n_hidden, prior_std):
# Initialize random weights between each layer
init_1 = np.random.randn(
X_train.shape[1], n_hidden).astype(floatX)*prior_std
init_out = np.random.randn(n_hidden, 1).astype(floatX)*prior_std
with pm.Model() as bayesian_neural_network:
ann_input = pm.Data("ann_input", X_train)
ann_output = pm.Data("ann_output", y_train)
# Input -> Layer 1
weights_1 = pm.Normal('w_1', mu=0, sd=prior_std,
shape=(X_train.shape[1], n_hidden),
testval=init_1)
acts_1 = tt.nnet.relu(tt.dot(ann_input, weights_1))
# Layer 1 -> Output Layer
weights_out = pm.Normal('w_out', mu=0, sd=prior_std,
shape=(n_hidden, 1),
testval=init_out)
acts_out = tt.dot(acts_1, weights_out)
# Define likelihood
out = pm.Normal('out', mu=acts_out[:, 0], sd=1, observed=ann_output)
return bayesian_neural_network
# # ----------------------------- Sampling from posterior ---------------------------
# Start time
tic = time.perf_counter() # for timing
bayesian_neural_network_NUTS = construct_bnn(
X_train, y_train, n_hidden=10, prior_std=.1)
# Sample from the posterior using the NUTS samplper
with bayesian_neural_network_NUTS:
trace = pm.sample(draws=3000, tune=1000, chains=3,
target_accept=.9, random_seed=42)
# # ----------------------------- Making predictions on training data ---------------------------
ppc1 = pm.sample_posterior_predictive(
trace, model=bayesian_neural_network_NUTS, random_seed=42)
# Taking the mean over all samples to generate a prediction
y_train_pred = ppc1['out'].mean(axis=0)
# Replace shared variables with testing set
pm.set_data(new_data={"ann_input": X_test, "ann_output": y_test},
model=bayesian_neural_network_NUTS)
# # ----------------------------- Making predictions on test data ---------------------------
ppc2 = pm.sample_posterior_predictive(
trace, model=bayesian_neural_network_NUTS, random_seed=42)
# Taking the mean over all samples to generate a prediction
y_test_pred = ppc2['out'].mean(axis=0)
# End time
toc = time.perf_counter()
print(f"Run time {toc - tic:0.4f} seconds")
# Printing the performance measures
print('MSE (NUTS) on training data:',
metrics.mean_squared_error(y_train, y_train_pred))
print('MSE (NUTS) on test data:', metrics.mean_squared_error(y_test, y_test_pred))
# -------------------------------- Plots ------------------------------------------
# Vizualize uncertainty
# Define examples for which you want to examine the posterior predictive:
example_vec = np.array([10, 15, 68, 72])
for example in example_vec:
plt_hist_array = np.array(ppc2['out'])
plt.hist(plt_hist_array[:, example], density=1,
color="lightsteelblue", bins=30)
plt.xlabel(f"Predicted value for example {example}", fontsize=13)
plt.ylabel("Density", fontsize=13)
# plt.savefig(f'Python_code/Boston_BNN_1hidden_postpred_{example}.pdf')
plt.show()
# Printing standard deviation
for example in example_vec:
output_array = np.array(ppc2['out'])
print(
f"Standard deviation for example {example}: {np.std(output_array[:, example])}")
print(
f"Mean for example {example}: {np.mean(output_array[:, example])}")
| 36.028369 | 97 | 0.633661 |
7b26132c0d8b78762b805dd6438fa5d2c8d060b1 | 13,370 | py | Python | plotting/utils.py | plai-group/amortized-rejection-sampling | 1e85253ae1e6ef1c939e1c488e55f9d95ee48355 | [
"MIT"
] | null | null | null | plotting/utils.py | plai-group/amortized-rejection-sampling | 1e85253ae1e6ef1c939e1c488e55f9d95ee48355 | [
"MIT"
] | null | null | null | plotting/utils.py | plai-group/amortized-rejection-sampling | 1e85253ae1e6ef1c939e1c488e55f9d95ee48355 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from tqdm import tqdm
import matplotlib as mpl
# https://gist.github.com/thriveth/8560036
color_cycle = ['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']
labels_dict = {"ic": "IC",
"prior": "Prior",
"ars-1": r"$\mathrm{ARS}_{M=1}$",
"ars-2": r"$\mathrm{ARS}_{M=2}$",
"ars-5": r"$\mathrm{ARS}_{M=5}$",
"ars-10": r"$\mathrm{ARS}_{M=10}$",
"ars-20": r"$\mathrm{ARS}_{M=20}$",
"ars-50": r"$\mathrm{ARS}_{M=50}$",
"biased": "Biased",
"gt": "Groundtruth",
"is": "IS",
"collapsed": "Collapsed"}
color_dict = {'gt': color_cycle[0],
'prior': color_cycle[5],
'ic': color_cycle[2],
'biased': color_cycle[3],
'ars-1': color_cycle[4],
'ars-2': color_cycle[1],
'ars-5': color_cycle[7],
'ars-10': color_cycle[6],
'ars-100': color_cycle[8],
'ars-50': color_cycle[8],
'is': color_cycle[8],
'ars-20': "C1",
"collapsed": color_cycle[7]}
########################################
## matplotlib style and configs ##
########################################
def setup_matplotlib():
import seaborn as sns
# mpl.use('Agg')
# plt.style.use('classic')
# sns.set(font_scale=1.5)
sns.set_style('white')
sns.color_palette('colorblind')
nice_fonts = {
# Use LaTeX to write all text
"text.usetex": True,
'text.latex.preamble': r'\usepackage{amsfonts}',
"font.family": "serif",
# Use 10pt font in plots, to match 10pt font in document
"axes.labelsize": 10,
"font.size": 10,
# Make the legend/label fonts a little smaller
"legend.fontsize": 8,
"xtick.labelsize": 7,
"ytick.labelsize": 7,
}
mpl.rcParams.update(nice_fonts)
def set_size(width, fraction=1, subplots=(1, 1)):
# https://jwalton.info/Embed-Publication-Matplotlib-Latex/
""" Set aesthetic figure dimensions to avoid scaling in latex.
Parameters
----------
width: float
Width in pts
fraction: float
Fraction of the width which you wish the figure to occupy
subplots: array-like, optional
The number of rows and columns of subplots.
Returns
-------
fig_dim: tuple
Dimensions of figure in inches
"""
if width == 'thesis':
width_pt = 426.79135
elif width == 'beamer':
width_pt = 307.28987
elif width == 'pnas':
width_pt = 246.09686
elif width == 'aistats22':
width_pt = 487.8225
else:
width_pt = width
# Width of figure
fig_width_pt = width_pt * fraction
# Convert from pt to inches
inches_per_pt = 1 / 72.27
# Golden ratio to set aesthetic figure height
golden_ratio = (5**.5 - 1) / 2
# Figure width in inches
fig_width_in = fig_width_pt * inches_per_pt
# Figure height in inches
fig_height_in = fig_width_in * golden_ratio * (subplots[0] / subplots[1])
return (fig_width_in, fig_height_in)
class OOMFormatter(mpl.ticker.ScalarFormatter):
"""OrderOfMagnitude formatter
Source:
https://stackoverflow.com/questions/42656139/set-scientific-notation-with-fixed-exponent-and-significant-digits-for-multiple
"""
def __init__(self, order=0, fformat="%1.1f", *args, **kwargs):
self.oom = order
self.fformat = fformat
mpl.ticker.ScalarFormatter.__init__(self,*args, **kwargs)
def _set_order_of_magnitude(self):
super()._set_order_of_magnitude()
self.orderOfMagnitude = self.oom
def add_center_aligned_legend(fig, handles, ncol, **kwargs):
nlines = len(handles)
leg1 = fig.legend(handles=handles[:nlines//ncol*ncol], ncol=ncol, **kwargs)
if nlines % ncol != 0:
fig.add_artist(leg1)
leg2 = fig.legend(handles=handles[nlines//ncol*ncol:], ncol=nlines-nlines//ncol*ncol)
leg2.remove()
leg1._legend_box._children.append(leg2._legend_handle_box)
leg1._legend_box.stale = True
########################################
## Loading from disk ##
########################################
def load_log_weights(log_weights_root, iw_mode):
"""Loads the log_weights from the disk. It assumes a file structure of <log_weights_root>/<iw_mode>/*.npy
of mulyiple npy files. This function loads all the weights in a single numpy array, concatenating all npy files.
Finally, it caches the result in a file stored at <log_weights_root>/<iw_mode>.npy
In the further calls, it reuses the cached file.
Args:
log_weights_root (str or pathlib.Path)
iw_mode (str)
Returns:
np.ndarray: log importance weights
"""
agg_weights_file = log_weights_root / f"{iw_mode}.npy"
agg_weights_dir = log_weights_root / iw_mode
assert agg_weights_dir.exists() or agg_weights_file.exists()
if not agg_weights_file.exists():
log_weights = np.concatenate(
[np.load(weight_file) for weight_file in agg_weights_dir.glob("*.npy")])
np.save(agg_weights_file, log_weights)
else:
log_weights = np.load(agg_weights_file)
print(f"{log_weights_root} / {iw_mode} has {len(log_weights):,} traces")
return log_weights
########################################
## Estimators and metrics ##
########################################
def _compute_estimator_helper(log_weights, dx, estimator_func, **kwargs):
"""A helper function for computing the plotting data. It generates the
x-values and y-values of the plot. x-values is an increasing sequence of
integers, with incremens of dx and ending with N. y-values is a TxK tensor
where T is the number of trials and K is the size of x-values. The j-th
column of y-values is the estimator applied to the log_weights up to the
corresponding x-value.
Args:
log_weights (torch.FloatTensor of shape TxN): All the log importance weights
of a particular experiment.
dx (int): different between points of evaluating the estimator.
estimator_func (function): the estimator function that operates on a tensor
of shape Txn where n <= N.
**kwargs: optional additional arguments to the estimator function
"""
(T, N) = log_weights.shape
xvals = _get_xvals(end=N, dx=dx)
yvals_all = [estimator_func(log_weights[:, :x], **kwargs) for x in xvals]
yvals_all = torch.stack(yvals_all, dim=1)
return xvals, yvals_all
def _get_xvals(end, dx):
"""Returns a integer numpy array of x-values incrementing by "dx"
and ending with "end".
Args:
end (int)
dx (int)
"""
arange = np.arange(0, end-1+dx, dx, dtype=int)
xvals = arange[1:]
return xvals
def _log_evidence_func(arr):
"""Returns an estimate of the log evidence from a set of log importance wegiths
in arr. arr has shape TxN where T is the number of trials and N is the number
of samples for estimation.
Args:
arr (torch.FloatTensor of shape TxN): log importance weights
Returns:
A tensor of shape (T,) representing the estimates for each set of sampels.
"""
T, N = arr.shape
log_evidence = torch.logsumexp(arr, dim=1) - np.log(N)
return log_evidence
def _ess_func(arr):
"""Effective sample size (ESS)"""
a = torch.logsumexp(arr, dim=1) * 2
b = torch.logsumexp(2 * arr, dim=1)
return torch.exp(a - b)
def _ess_inf_func(arr):
"""ESS-infinity (Q_n)"""
a = torch.max(arr, dim=1)[0]
b = torch.logsumexp(arr, dim=1)
return torch.exp(a - b)
def get_evidence_estimate(log_weights, dx):
return _compute_estimator_helper(log_weights, estimator_func=lambda x: _log_evidence_func(x).exp(), dx=dx)
def get_log_evidence_estimate(log_weights, dx):
return _compute_estimator_helper(log_weights, estimator_func=_log_evidence_func, dx=dx)
def get_ess(log_weights, dx):
return _compute_estimator_helper(log_weights, estimator_func=_ess_func, dx=dx)
def get_ness(log_weights, dx):
"""Normalized ESS (ESS / N)"""
xvals, yvals = get_ess(log_weights, dx=dx)
return xvals, yvals / xvals
def get_qn(log_weights, dx):
return _compute_estimator_helper(log_weights, estimator_func=_ess_inf_func, dx=dx)
########################################
## Plotting functions ##
########################################
def _lineplot_helper(*, name, func, ax, log_weights_dict, iw_mode_list, dx, bias=None, **kwargs):
"""A helper function for making the line functions of the paper.
Args:
name (string): Metric name. Used for logging only.
func (function): The metric computation function. Should be a function that takes in log_weights and dx
and returns x-values and y-values. Any additional arguments in kwargs will be passed to this function.
ax (matplotlib.axes): A matrplotlib ax object in which the plot should be drawn.
log_weights_dict (dict): A dictionary of the form {iw_mode: log_imprtance_weights as a TxN tensor}
iw_mode_list (list): An ordered list of iw modes specifying the order of drawing the lines.
dx (int): The distance between consequent x-values.
bias (float, optional): If not None, shifts all the line's y-values according to it. Defaults to None.
"""
for iw_mode in tqdm(iw_mode_list, desc=name):
if iw_mode not in log_weights_dict:
print(f"Skipping {iw_mode}.")
continue
log_weights = torch.tensor(log_weights_dict[iw_mode])
label = labels_dict[iw_mode]
color = color_dict[iw_mode]
xs, ys_all = func(log_weights, dx=dx)
means = ys_all.mean(dim=0)
stds = ys_all.std(dim=0)
if bias is not None:
means -= bias
ax.plot(xs, means, color=color, label=label)
ax.fill_between(xs, means - stds, means + stds, color=color, alpha=0.2)
print(f"> ({name}) {iw_mode, means[-1].item(), stds[-1].item()}")
def plot_evidence(**kwargs):
_lineplot_helper(name="Evidence plot", func=get_evidence_estimate, **kwargs)
def plot_log_evidence(**kwargs):
_lineplot_helper(name="Evidence plot", func=get_log_evidence_estimate, **kwargs)
def plot_ness(**kwargs):
_lineplot_helper(name="NESS plot", func=get_ness, **kwargs)
def plot_qn(**kwargs):
_lineplot_helper(name="Qn plot", func=get_qn, **kwargs)
def plot_convergence(ax, log_weights_dict, dx, iw_mode_list,
qn_threshold, n_splits=10):
plot_labels = []
plot_x = []
for iw_mode in tqdm(iw_mode_list, desc="Convergence plot"):
if iw_mode not in log_weights_dict:
print(f"Skipping {iw_mode}.")
continue
log_weights = torch.tensor(log_weights_dict[iw_mode])
label = labels_dict[iw_mode]
xs, qns_all = get_qn(log_weights, dx=dx)
assert qns_all.shape[0] % n_splits == 0, f"The number of trials ({qns_all.shape[0]}) should be divisible by {n_splits}"
qns_all = qns_all.reshape(n_splits, qns_all.shape[0] // n_splits, -1)
qn_means = qns_all.mean(dim=0)
print(f"> (Convergence plot) {iw_mode, qn_means.mean(dim=0)[-1].item()} out of {log_weights.shape[-1]} samples")
converged = (qn_means < qn_threshold).cpu().numpy()
plot_labels.append(label)
if not converged.any(axis=-1).all(): # Some of them are not converged ever
plot_x.append([])
else:
plot_x.append(converged.argmax(axis=-1) * dx)
ax.boxplot(plot_x, labels=plot_labels, showmeans=True, meanline=True)
def plot_convergence_2(ax, log_weights_dict, dx, iw_mode_list, qn_threshold):
# Source: https://stackoverflow.com/questions/33328774/box-plot-with-min-max-average-and-standard-deviation/33330997
plot_labels = []
plot_x = []
for iw_mode in tqdm(iw_mode_list, desc="Convergence plot"):
if iw_mode not in log_weights_dict:
print(f"Skipping {iw_mode}.")
continue
log_weights = torch.tensor(log_weights_dict[iw_mode])
label = labels_dict[iw_mode]
xs, qns_all = get_qn(log_weights, dx=dx)
assert qns_all.shape[0] % 10 == 0
qns_all = qns_all.reshape(10, qns_all.shape[0] // 10, -1)
qn_means = qns_all.mean(dim=0)
converged = (qn_means < qn_threshold).cpu().numpy()
plot_labels.append(label)
if not converged.any(axis=-1).all(): # Some of them are not converged ever
plot_x.append([])
else:
plot_x.append(converged.argmax(axis=-1) * dx)
xvals = [i for i in range(len(plot_x)) if plot_x[i] != []]
x = np.stack([x for x in plot_x if x != []])
mins = x.min(axis=1)
maxes = x.max(axis=1)
means = x.mean(axis=1)
std = x.std(axis=1)
# create stacked errorbars:
ax.errorbar(xvals, means, std, fmt='ok', lw=3)
ax.errorbar(xvals, means, [means - mins, maxes - means],
fmt='.k', ecolor='gray', lw=1)
ax.set_xticks(np.arange(len(plot_x)))
ax.set_xticklabels(plot_labels) | 35.558511 | 128 | 0.618624 |
b2c2f70efc01b11f80e3db4fcd445773f80526a7 | 14,071 | py | Python | ctfpwn/api.py | takeshixx/ctfpwn | b4ae4e51164ce420c96f3f06b7aa115b8329055c | [
"Apache-2.0"
] | null | null | null | ctfpwn/api.py | takeshixx/ctfpwn | b4ae4e51164ce420c96f3f06b7aa115b8329055c | [
"Apache-2.0"
] | null | null | null | ctfpwn/api.py | takeshixx/ctfpwn | b4ae4e51164ce420c96f3f06b7aa115b8329055c | [
"Apache-2.0"
] | null | null | null | import asyncio
import os.path
import aiohttp.web
import jinja2
import bson
import bson.json_util
import subprocess
from ctfpwn.db import CtfDb
from ctfpwn.shared import Service, load_ctf_config
from ctfpwn.exploitservice.worker import ExploitWorkerProtocol
dashboard_html = """<html>
<head>
<title>CTF-PWN Dashboard</title>
</head>
<body>
<h2>Services</h2>
<table border="1" cellpadding="5" cellspacing="5">
<tr><th>Name</th><th>Type</th><th>Port</th><th>URL</th><th>Meta</th></tr>
{% for service in services %}
<tr><td>{{ service.name }}</td><td>{{ service.type }}</td><td>{{ service.port }}</td><td>{{ service.url }}</td><td>{{ service.meta }}</td><td><a href="/api/services/{{ service._id }}/delete">delete</a></td></tr>
{% endfor %}
</table>
<h2>Create New Service</h2>
{% if service_create_error %}
<span style="border: 3px solid red">{{ service_create_error }}</span>
{% endif %}
{% if service_message %}
<span style="border: 3px solid green">{{ service_message }}</span>
{% endif %}
<table>
<form action="/api/services" method="post">
<tr>
<td><label for="name">Name</label></td>
<td><input name="name" /></td>
</tr>
<tr>
<td><label for="type">Type</label></td>
<td><input name="type" /></td>
</tr>
<tr>
<td><label for="port">Port</label></td>
<td><input name="port" /></td>
</tr>
<tr>
<td><label for="url">URL</label></td>
<td><input name="url" /></td>
</tr>
<tr>
<td><label for="meta">Meta</label></td>
<td><input name="meta" /></td>
</tr>
<tr>
<td><input type="submit" /></td>
</tr>
</form>
</table>
<hr />
<h2>Exploits</h2>
<table border="1" cellpadding="5" cellspacing="5">
<tr><th>Service</th><th>Exploit</th><th>Port</th><th>Enabled</th></tr>
{% for exploit in exploits %}
<tr><td>{{ exploit.service }}</td><td>{{ exploit.exploit }}</td><td>{{ exploit.port }}</td><td>{{ exploit.enabled }}</td><td><a href="/api/exploits/{{ exploit._id }}/enable">enable</a></td><td><a href="/api/exploits/{{ exploit._id }}/disable">disable</a></td><td><a href="/api/exploits/{{ exploit._id }}/delete">delete</a></td><td><a href="/api/exploits/{{ exploit._id }}/check">check</a></td></tr>
{% endfor %}
</table>
<h2>Create New Exploit</h2>
{% if exploit_create_error %}
<span style="border: 3px solid red">{{ exploit_create_error }}</span>
{% endif %}
{% if exploit_message %}
<span style="border: 3px solid green">{{ exploit_message }}</span>
{% endif %}
<table>
<form action="/api/exploits" method="post">
<tr>
<td><label for="service">Service</label></td>
<td><input name="service" /></td>
</tr>
<tr>
<td><label for="exploit">Exploit</label></td>
<td><input name="exploit" /></td>
</tr>
<tr>
<td><label for="port">Port</label></td>
<td><input name="port" /></td>
</tr>
<tr>
<td><input type="submit" /></td>
</tr>
</form>
</table>
</body>
</html>"""
def cast_objectid(ilist):
"""Cast MongoDB's ObjectID to a string
to make the API dict's JSON-dumpable."""
out = list()
for e in ilist:
if not '_id' in e.keys():
continue
_e = dict(e)
_e['_id'] = str(bson.ObjectId(_e['_id']))
out.append(_e)
return out
async def dashboard(request):
services = await db.select_all_services()
exploits = await db.select_exploits()
ret = jinja2.Environment().from_string(dashboard_html).render(services=services,
exploits=exploits)
return aiohttp.web.Response(text=ret, content_type='text/html')
async def index(request):
return aiohttp.web.json_response({'endpoints': ['/exploits',
'/targets',
'/services']})
async def exploits(request):
exploit_id = request.match_info.get('exploit_id')
if not exploit_id:
exploits = await db.select_exploits()
exploits = cast_objectid(exploits)
return aiohttp.web.json_response(exploits, dumps=bson.json_util.dumps)
async def create_exploit(request):
body = await request.post()
service = body.get('service')
exploit = body.get('exploit')
port = body.get('port')
enabled = body.get('enabled')
if not service or not exploit or not port:
ret = aiohttp.web.json_response(
{'error': {'required arguments': ['service', 'exploit', 'port']}})
ret.set_status(400)
return ret
try:
port = int(port)
except ValueError:
ret = aiohttp.web.json_response(
{'error': {'value error': 'port is not numeric'}})
ret.set_status(400)
return ret
if str(enabled).lower() in ['true', 'yes', 'on', '1']:
enabled = True
elif str(enabled).lower() in ['false', 'no', 'off', '0'] or enabled == None:
enabled = False
else:
ret = aiohttp.web.json_response(
{'error': {'value error': 'invalid value for enabled'}})
ret.set_status(400)
return ret
if not os.path.isfile(exploit):
ret = aiohttp.web.json_response(
{'error': {'value error': 'exploit path is invalid'}})
ret.set_status(400)
return ret
result = await db.update_exploit(service, exploit, port, enabled)
if result['nModified'] > 0 or result['ok'] > 0:
return aiohttp.web.Response(status=201, text='Successfully created exploit')
else:
return aiohttp.web.Response(status=500, text='Exploit creation failed')
async def delete_exploit(request):
exploit_id = request.match_info.get('exploit_id')
if exploit_id:
result = await db.delete_exploit(exploit_id)
if result and result.get('ok') > 0:
if result.get('n') > 0:
return aiohttp.web.Response(status=200, text='Successfully deleted exploit')
else:
return aiohttp.web.Response(status=404, text='Exploit not found')
else:
return aiohttp.web.Response(status=500, text='Exploit deletion failed')
async def modify_exploit(request):
exploit_id = request.match_info.get('exploit_id')
if not exploit_id:
return aiohttp.web.Response(status=400, text='Exploit ID not set')
exploit_action = request.match_info.get('exploit_action')
if not exploit_action:
return aiohttp.web.Response(status=400, text='Exploit action not set')
exploit = await db.select_exploit_id(exploit_id)
if not exploit:
return aiohttp.web.Response(status=404, text='Exploit not found')
exploit = exploit[0]
if exploit_action == 'enable':
await db.toggle_exploit(exploit_id, True)
return aiohttp.web.HTTPFound('/')
elif exploit_action == 'disable':
await db.toggle_exploit(exploit_id, False)
return aiohttp.web.HTTPFound('/')
elif exploit_action == 'delete':
result = await db.delete_exploit_id(exploit_id)
if result and result.get('ok') > 0:
if result.get('n') > 0:
return aiohttp.web.Response(status=200, text='Successfully deleted exploit')
else:
return aiohttp.web.Response(status=404, text='Exploit not found')
else:
return aiohttp.web.Response(status=500, text='Exploit deletion failed')
elif exploit_action == 'check':
if not os.access(exploit.get('exploit'), os.F_OK):
return aiohttp.web.Response(status=500, text='Cannot access exploit file!')
if not os.access(exploit.get('exploit'), os.R_OK):
return aiohttp.web.Response(status=500, text='Cannot read exploit file!')
if not os.access(exploit.get('exploit'), os.X_OK):
return aiohttp.web.Response(status=500, text='Exploit is not executable!')
loop = asyncio.get_event_loop()
future = loop.create_future()
cmd = [exploit.get('exploit'), '1.2.3.4', '12345']
protocol_factory = lambda: ExploitWorkerProtocol(future, cmd)
transport, protocol = await loop.subprocess_exec(protocol_factory, *cmd, stdin=None)
try:
await future
except subprocess.CalledProcessError as e:
ret = 'Exploit execution failed with exception:\n\n' + str(e)
process_output = bytes(protocol.output)
if process_output:
ret += '\n\n\n\nProcess output:\n\n' + process_output.decode()
return aiohttp.web.Response(status=500, text=ret)
return aiohttp.web.Response(status=200, text='Everything is fine, ride on :)')
else:
return aiohttp.web.Response(status=400, text='Invalid exploit action')
async def targets(request):
target_id = request.match_info.get('target_id')
if not target_id:
targets = await db.select_alive_targets()
targets = cast_objectid(targets)
return aiohttp.web.json_response(targets, dumps=bson.json_util.dumps)
async def services(request):
service_id = request.match_info.get('service_id')
if not service_id:
services = await db.select_all_services()
services = cast_objectid(services)
return aiohttp.web.json_response(services, dumps=bson.json_util.dumps)
async def create_service(request):
body = await request.post()
name = body.get('name')
service_type = body.get('type')
port = body.get('port')
url = body.get('url')
meta = body.get('meta')
if not name or not service_type or not (port or url):
ret = aiohttp.web.json_response(
{'error': {'required arguments': ['name', 'service_type', 'port', 'url']}})
ret.set_status(400)
return ret
if type == 'port' and (port and url):
ret = aiohttp.web.json_response(
{'error': {'invalid argument': 'either port or url should be defined (not both)'}})
ret.set_status(400)
return ret
if port:
try:
port = int(port)
except ValueError:
ret = aiohttp.web.json_response(
{'error': {'value error': 'port is not numeric'}})
ret.set_status(400)
return ret
if not service_type in ['port', 'url']:
ret = aiohttp.web.json_response(
{'error': {'invalid type': 'type should be port or url'}})
ret.set_status(400)
return ret
if service_type == 'port':
service = Service(name, service_type, port=port, meta=meta)
else:
service = Service(name, 'url', url=url, port=port, meta=meta)
result = await db.insert_service(service)
if result['nModified'] > 0 or result['ok'] > 0:
return aiohttp.web.Response(status=201, text='Successfully created service')
else:
return aiohttp.web.Response(status=500, text='Service creation failed')
async def delete_service(request):
service_id = request.match_info.get('service_id')
if service_id:
result = await db.delete_service(service_id)
if result and result.get('ok') > 0:
if result.get('n') > 0:
return aiohttp.web.Response(status=200, text='Successfully deleted service')
else:
return aiohttp.web.Response(status=404, text='Service not found')
else:
return aiohttp.web.Response(status=500, text='Service deletion failed')
async def modify_service(request):
service_id = request.match_info.get('service_id')
if not service_id:
return aiohttp.web.Response(status=400, text='Service ID not set')
service_action = request.match_info.get('service_action')
if not service_action:
return aiohttp.web.Response(status=400, text='Service action not set')
if service_action == 'delete':
result = await db.delete_service_id(service_id)
if result and result.get('ok') > 0:
if result.get('n') > 0:
return aiohttp.web.Response(status=200, text='Successfully deleted service')
else:
return aiohttp.web.Response(status=404, text='Service not found')
else:
return aiohttp.web.Response(status=500, text='Service deletion failed')
else:
return aiohttp.web.Response(status=400, text='Invalid service action')
def create_app():
app = aiohttp.web.Application()
app.router.add_get('/', dashboard)
app.router.add_get('/api', index)
app.router.add_get('/api/exploits', exploits)
app.router.add_post('/api/exploits', create_exploit)
app.router.add_get('/api/exploits/{exploit_id}', exploits)
app.router.add_get('/api/exploits/{exploit_id}/{exploit_action}', modify_exploit)
app.router.add_delete('/api/exploits/{exploit_id}', delete_exploit)
app.router.add_get('/api/targets', targets)
app.router.add_get('/api/targets/{targets_id}', targets)
app.router.add_get('/api/services', services)
app.router.add_post('/api/services', create_service)
app.router.add_get('/api/services/{service_id}', services)
app.router.add_get('/api/services/{service_id}/{service_action}', modify_service)
app.router.add_delete('/api/services/{service_id}', delete_service)
return app
async def database():
global db
db = await CtfDb.create()
def run_api(config=None):
if config:
config = load_ctf_config(config)
host = config['api_listening_host']
port = config['api_listening_port']
else:
host = '127.0.0.1'
port = 8080
loop = asyncio.get_event_loop()
loop.create_task(database())
app = create_app()
aiohttp.web.run_app(app, host=host, port=port)
if __name__ == '__main__':
run_api()
| 38.656593 | 406 | 0.604932 |
0fa51395a84be5dc0cda8ecd52f8762f2ae5d98f | 3,731 | py | Python | service/src/normal_socket_plugin.py | hivesolutions/colony_plugins | cfd8fb2ac58037e01002966704b8a642feb37895 | [
"Apache-1.1"
] | 1 | 2016-10-30T09:51:06.000Z | 2016-10-30T09:51:06.000Z | service/src/normal_socket_plugin.py | hivesolutions/colony_plugins | cfd8fb2ac58037e01002966704b8a642feb37895 | [
"Apache-1.1"
] | 1 | 2015-12-29T18:51:07.000Z | 2015-12-29T18:51:07.000Z | service/src/normal_socket_plugin.py | hivesolutions/colony_plugins | cfd8fb2ac58037e01002966704b8a642feb37895 | [
"Apache-1.1"
] | 1 | 2018-01-26T12:54:13.000Z | 2018-01-26T12:54:13.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Colony Framework
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Colony Framework.
#
# Hive Colony Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Colony Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Colony Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import colony
class NormalSocketPlugin(colony.Plugin):
"""
The main class for the Normal Socket plugin.
"""
id = "pt.hive.colony.plugins.service.normal_socket"
name = "Normal Socket"
description = "The plugin that offers the normal socket"
version = "1.0.0"
author = "Hive Solutions Lda. <development@hive.pt>"
platforms = [
colony.CPYTHON_ENVIRONMENT,
colony.JYTHON_ENVIRONMENT,
colony.IRON_PYTHON_ENVIRONMENT
]
capabilities = [
"socket_provider"
]
main_modules = [
"normal_socket"
]
def load_plugin(self):
colony.Plugin.load_plugin(self)
import normal_socket
self.system = normal_socket.NormalSocket(self)
def get_provider_name(self):
"""
Retrieves the socket provider name.
:rtype: String
:return: The socket provider name.
"""
return self.system.get_provider_name()
def provide_socket(self):
"""
Provides a new socket, configured with
the default parameters.
:rtype: Socket
:return: The provided socket.
"""
return self.system.provide_socket()
def provide_socket_parameters(self, parameters):
"""
Provides a new socket, configured with
the given parameters.
:type parameters: Dictionary
:param parameters: The parameters for socket configuration.
:rtype: Socket
:return: The provided socket.
"""
return self.system.provide_socket_parameters(parameters)
def process_exception(self, socket, exception):
"""
Processes the exception taking into account the severity of it,
as for some exception a graceful handling is imposed.
The provided socket object should comply with typical python
interface for it.
:type socket: Socket
:param socket: The socket to be used in the exception processing.
:type exception: Exception
:param exception: The exception that is going to be handled/processed.
:rtype: bool
:return: The result of the processing, in case it's false a normal
exception handling should be performed otherwise a graceful one is used.
"""
return self.system.process_exception(socket, exception)
| 30.834711 | 81 | 0.649156 |
9edf9f8f6d3e36e5e0028a003f033962e75eedc2 | 29,967 | py | Python | src/dataset.py | tomvars/medical_adv_da | d2c81ee7951c580b62d853497672057b8cf7923e | [
"Apache-2.0"
] | 2 | 2021-08-13T07:21:00.000Z | 2021-09-15T08:52:20.000Z | src/dataset.py | tomvars/medical_adv_da | d2c81ee7951c580b62d853497672057b8cf7923e | [
"Apache-2.0"
] | null | null | null | src/dataset.py | tomvars/medical_adv_da | d2c81ee7951c580b62d853497672057b8cf7923e | [
"Apache-2.0"
] | null | null | null | import os
import nibabel as nib
from functools import reduce
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
from pathlib import Path
from src.utils import batch_adaptation, to_var_gpu
from monai.data.dataset import Dataset as MonaiDataset
from monai.data.nifti_saver import NiftiSaver
from monai.transforms import (LoadImaged,
Orientationd,
ToTensord,
Compose,
AddChanneld,
ResizeWithPadOrCropd,
ScaleIntensityd,
SpatialCropd,
BatchInverseTransform
)
import ants
class SliceDataset(Dataset):
"""
This Dataset object expects a data_dir which contains the following structure:
data_dir
|
----flair
|
----- FLAIR_<subject id as int>_slice_<slice id as int>.nii.gz
----- FLAIR_<subject id as int>_slice_<slice id as int>.nii.gz
----- FLAIR_<subject id as int>_slice_<slice id as int>.nii.gz
----labels
|
----- wmh_<subject id as int>_slice_<slice id as int>.nii.gz
----- wmh_<subject id as int>_slice_<slice id as int>.nii.gz
----- wmh_<subject id as int>_slice_<slice id as int>.nii.gz
"""
def __init__(self, data_dir, paddtarget, slice_selection_method, dataset_split_csv, split,
exclude_slices=None, synthesis=True, tumour_only=False):
assert slice_selection_method in ['mask', 'none']
assert isinstance(paddtarget, int)
assert 'flair' in os.listdir(data_dir)
assert 'labels' in os.listdir(data_dir)
self.dataset_split_df = pd.read_csv(dataset_split_csv, names=['subject_id', 'split'])
self.split = split
self.paddtarget = paddtarget
self.slice_selection_method = slice_selection_method
self.synthesis = synthesis
flair_filenames = os.listdir(os.path.join(data_dir, 'flair'))
subject_ids = [x.split('_slice')[0] for x in flair_filenames]
slice_idx_arr = [int(x.split('_')[3].replace('.nii.gz', '')) for x in flair_filenames]
label_paths = [os.path.join(data_dir, 'labels', x.replace('FLAIR', 'wmh')) for x in flair_filenames]
flair_paths = [os.path.join(data_dir, 'flair', x) for x in flair_filenames]
assert all([isinstance(x, int) for x in slice_idx_arr])
self.files_df = pd.DataFrame(
data=[(subj, slice_idx, fp, lp) for subj, slice_idx, fp, lp in zip(subject_ids, slice_idx_arr,
label_paths, flair_paths)],
columns=['subject_id', 'slice_index', 'label_path', 'flair_path']
)
if exclude_slices is not None:
self.files_df = self.files_df[~self.files_df['slice_index'].isin(exclude_slices)]
# Apply split filter to images
images_to_use = self.dataset_split_df[self.dataset_split_df['split'] == self.split]['subject_id'].values
self.files_df = self.files_df[self.files_df['subject_id'].isin(images_to_use)]
def __getitem__(self, index):
flair_filepath = self.files_df['flair_path'].values[index]
label_filepath = self.files_df['label_path'].values[index]
flair_slice = nib.load(flair_filepath).get_data()
label_slice = nib.load(label_filepath).get_data()
batch = {'inputs': torch.tensor(flair_slice).unsqueeze(dim=0).unsqueeze(dim=1).to(torch.float),
'labels': torch.tensor(label_slice).unsqueeze(dim=0).unsqueeze(dim=1).to(torch.float)}
batch['inputs'] = to_var_gpu(batch['inputs'][:, 0, ...])
batch['labels'] = to_var_gpu(batch['labels'][:, 0, ...])
return batch
def __len__(self):
return len(self.files_df)
def get_slice_indices_for_subject_ids(self, subject_ids):
return self.files_df[self.files_df['subject_id'].isin(subject_ids)].index.values
class WholeVolumeDataset(Dataset):
"""
This Dataset object expects a data_dir which contains the following structure:
data_dir
|
----flair
|
----- FLAIR_<subject id as int>.nii.gz
----- FLAIR_<subject id as int>.nii.gz
----- FLAIR_<subject id as int>.nii.gz
----labels
|
----- wmh_<subject id as int>.nii.gz
----- wmh_<subject id as int>.nii.gz
----- wmh_<subject id as int>.nii.gz
"""
def __init__(self, data_dir, paddtarget, dataset_split_csv, split, synthesis=True, tumour_only=False):
assert isinstance(paddtarget, int)
assert 'flair' in os.listdir(data_dir)
assert 'labels' in os.listdir(data_dir)
self.dataset_split_df = pd.read_csv(dataset_split_csv, names=['subject_id', 'split'])
self.split = split
flair_filenames = os.listdir(os.path.join(data_dir, 'flair'))
subject_ids = np.array([x.replace('.nii.gz', '') for x in flair_filenames])
flair_paths = [os.path.join(data_dir, 'flair', x) for x in flair_filenames]
label_paths = [os.path.join(data_dir, 'labels', x.replace('FLAIR', 'wmh')) for x in flair_filenames]
self.files_df = pd.DataFrame(
data=[(subj, fp, lp) for subj, fp, lp in zip(subject_ids, label_paths, flair_paths)],
columns=['subject_id', 'label_path', 'flair_path']
)
# Apply split filter to images
images_to_use = self.dataset_split_df[self.dataset_split_df['split'] == self.split]['subject_id'].values
self.files_df = self.files_df[self.files_df['subject_id'].isin(images_to_use)]
def __getitem__(self, index):
flair_filepath = self.files_df['flair_path'].values[index]
label_filepath = self.files_df['label_path'].values[index]
inputs = nib.load(flair_filepath).get_data()
labels = nib.load(label_filepath).get_data()
# batch = {'inputs': inputs, 'labels': labels}
return inputs, labels
def __len__(self):
return len(self.files_df)
def get_subject_id_from_index(self, index):
return self.files_df[
self.files_df['flair_path'] == self.files_df['flair_path'].values[index]]['subject_id'].values[0]
class WholeVolumeDatasetTumour(Dataset):
"""
This Dataset object expects a data_dir which contains the following structure:
data_dir
|
----flair
|
----- FLAIR_<subject id as int>.nii.gz
----- FLAIR_<subject id as int>.nii.gz
----- FLAIR_<subject id as int>.nii.gz
----T1c
|
----- T1c_<subject id as int>.nii.gz
----- T1c_<subject id as int>.nii.gz
----- T1c_<subject id as int>.nii.gz
----T1
|
----- T1_<subject id as int>.nii.gz
----- T1_<subject id as int>.nii.gz
----- T1_<subject id as int>.nii.gz
----T2
|
----- T2_<subject id as int>.nii.gz
----- T2_<subject id as int>.nii.gz
----- T2_<subject id as int>.nii.gz
----labels
|
----- bin_<subject id as int>.nii.gz
----- bin_<subject id as int>.nii.gz
----- bin_<subject id as int>.nii.gz
"""
def __init__(self, data_dir, paddtarget, dataset_split_csv, split, tumour_only=False):
assert isinstance(paddtarget, int)
assert 'flair' in os.listdir(data_dir)
assert 'labels' in os.listdir(data_dir)
subject_id_arr = np.array(['_'.join(x.split('_')[1:]).replace('.nii.gz', '') for x in os.listdir(os.path.join(data_dir, 'flair'))])
self.dataset_split_df = pd.read_csv(dataset_split_csv, names=['subject_id', 'split'], dtype={'subject_id': str})
self.split = split
self.tumour_only = tumour_only
flair_paths = [os.path.join(data_dir, 'flair', 'FLAIR_' + str(id) + '.nii.gz') for id in subject_id_arr]
t1c_paths = [os.path.join(data_dir, 't1c', 'T1c_' + str(id) + '.nii.gz') for id in subject_id_arr]
t1_paths = [os.path.join(data_dir, 't1', 'T1_' + str(id) + '.nii.gz') for id in subject_id_arr]
t2_paths = [os.path.join(data_dir, 't2', 'T2_' + str(id) + '.nii.gz') for id in subject_id_arr]
label_dir = 'labels' if not tumour_only else 'labels_tumour_only'
label_paths = [os.path.join(data_dir, label_dir, 'bin_' + str(id) + '.nii.gz') for id in subject_id_arr]
self.files_df = pd.DataFrame(
data=[tup for tup in zip(subject_id_arr, label_paths, flair_paths, t1c_paths, t1_paths, t2_paths)],
columns=['subject_id', 'label_path', 'flair_path', 't1c_path', 't1_path', 't2_path']
)
# Apply split filter to images
images_to_use = self.dataset_split_df[self.dataset_split_df['split'] == self.split]['subject_id'].values
self.files_df = self.files_df[self.files_df['subject_id'].isin(images_to_use)]
def __getitem__(self, index):
flair_filepath = self.files_df['flair_path'].values[index]
t1c_filepath = self.files_df['t1c_path'].values[index]
t1_filepath = self.files_df['t1_path'].values[index]
t2_filepath = self.files_df['t2_path'].values[index]
label_filepath = self.files_df['label_path'].values[index]
flair = nib.load(flair_filepath).get_data()
t1c = nib.load(t1c_filepath).get_data()
t1 = nib.load(t1_filepath).get_data()
t2 = nib.load(t2_filepath).get_data()
labels = nib.load(label_filepath).get_data()
inputs = np.stack([flair, t1c, t1, t2], axis=0)
batch = {'inputs': inputs, 'labels': labels}
batch['inputs'] = to_var_gpu(batch['inputs'][:, 0, ...])
batch['labels'] = to_var_gpu(batch['labels'][:, 0, ...])
return batch
def __len__(self):
return len(self.files_df)
def get_subject_id_from_index(self, index):
return self.files_df[
self.files_df['flair_path'] == self.files_df['flair_path'].values[index]]['subject_id'].values[0]
class SliceDatasetTumour(Dataset):
"""
This Dataset object expects a data_dir which contains the following structure:
data_dir
|
----flair
|
----- FLAIR_<subject id as int>_slice_<slice id as int>.nii.gz
----- FLAIR_<subject id as int>_slice_<slice id as int>.nii.gz
----- FLAIR_<subject id as int>_slice_<slice id as int>.nii.gz
----T1c
|
----- T1c_<subject id as int>_slice_<slice id as int>.nii.gz
----- T1c_<subject id as int>_slice_<slice id as int>.nii.gz
----- T1c_<subject id as int>_slice_<slice id as int>.nii.gz
----T1
|
----- T1_<subject id as int>_slice_<slice id as int>.nii.gz
----- T1_<subject id as int>_slice_<slice id as int>.nii.gz
----- T1_<subject id as int>_slice_<slice id as int>.nii.gz
----T2
|
----- T2_<subject id as int>_slice_<slice id as int>.nii.gz
----- T2_<subject id as int>_slice_<slice id as int>.nii.gz
----- T2_<subject id as int>_slice_<slice id as int>.nii.gz
----labels
|
----- bin_<subject id as int>_slice_<slice id as int>.nii.gz
----- bin_<subject id as int>_slice_<slice id as int>.nii.gz
----- bin_<subject id as int>_slice_<slice id as int>.nii.gz
"""
def __init__(self, data_dir, paddtarget, slice_selection_method, dataset_split_csv, split, tumour_only=False):
assert slice_selection_method in ['mask', 'none']
assert isinstance(paddtarget, int)
assert 'flair' in os.listdir(data_dir)
assert 'labels' in os.listdir(data_dir)
self.paddtarget = paddtarget
self.slice_selection_method = slice_selection_method
self.dataset_split_df = pd.read_csv(dataset_split_csv, names=['subject_id', 'split'], dtype={'subject_id': str})
self.split = split
self.tumour_only = tumour_only
subject_basenames = ['_'.join(p.split('_')[1:]) for p in os.listdir(os.path.join(data_dir, 'flair'))]
label_dir = 'labels' if not tumour_only else 'labels_tumour_only'
label_paths = [os.path.join(data_dir, label_dir, 'bin_' + x) for x in subject_basenames]
flair_paths = [os.path.join(data_dir, 'flair', 'FLAIR_' + x) for x in subject_basenames]
t1c_paths = [os.path.join(data_dir, 't1c', 'T1c_' + x) for x in subject_basenames]
t1_paths = [os.path.join(data_dir, 't1', 'T1_' + x) for x in subject_basenames]
t2_paths = [os.path.join(data_dir, 't2', 'T2_' + x) for x in subject_basenames]
subject_id_arr = [x.split('_')[0] for x in subject_basenames]
slice_idx_arr = [int(x.split('_')[2].replace('.nii.gz', '')) for x in subject_basenames]
assert all([isinstance(x, int) for x in slice_idx_arr])
self.files_df = pd.DataFrame(
data=[tp for tp in zip(subject_id_arr, slice_idx_arr, label_paths,
flair_paths,t1c_paths, t1_paths, t2_paths)],
columns=['subject_id', 'slice_index', 'label_path', 'flair_path', 't1c_path', 't1_path', 't2_path']
)
# Apply split filter to images
images_to_use = self.dataset_split_df[self.dataset_split_df['split'] == self.split]['subject_id'].values
self.files_df = self.files_df[self.files_df['subject_id'].isin(images_to_use)]
def __getitem__(self, index):
flair_filepath = self.files_df['flair_path'].values[index]
t1c_filepath = self.files_df['t1c_path'].values[index]
t1_filepath = self.files_df['t1_path'].values[index]
t2_filepath = self.files_df['t2_path'].values[index]
label_filepath = self.files_df['label_path'].values[index]
flair_slice = nib.load(flair_filepath).get_data()
t1c_slice = nib.load(t1c_filepath).get_data()
t1_slice = nib.load(t1_filepath).get_data()
t2_slice = nib.load(t2_filepath).get_data()
label_slice = nib.load(label_filepath).get_data()
image_slice = np.stack([flair_slice, t1c_slice, t1_slice, t2_slice], axis=0)
inputs, labels = batch_adaptation(torch.tensor(image_slice).unsqueeze(dim=0),
torch.tensor(label_slice), self.paddtarget)
batch = {'inputs': inputs, 'labels': labels}
batch['inputs'] = to_var_gpu(batch['inputs'][:, 0, ...])
batch['labels'] = to_var_gpu(batch['labels'][:, 0, ...])
return batch
def __len__(self):
return len(self.files_df)
def get_slice_indices_for_subject_ids(self, subject_ids):
return self.files_df[self.files_df['subject_id'].isin(subject_ids)].index.values
class WholeVolumeDatasetMS(Dataset):
"""
This Dataset object expects a data_dir which contains the following structure:
data_dir
|
----flair
|
----- FLAIR_<subject id as int>.nii.gz
----- FLAIR_<subject id as int>.nii.gz
----- FLAIR_<subject id as int>.nii.gz
----T1
|
----- T1_<subject id as int>.nii.gz
----- T1_<subject id as int>.nii.gz
----- T1_<subject id as int>.nii.gz
----labels
|
----- bin_<subject id as int>.nii.gz
----- bin_<subject id as int>.nii.gz
----- bin_<subject id as int>.nii.gz
"""
def __init__(self, data_dir, paddtarget, dataset_split_csv, split):
assert isinstance(paddtarget, int)
assert 'flair' in os.listdir(data_dir)
assert 'labels' in os.listdir(data_dir)
subject_id_arr = np.array(['_'.join(x.split('_')[1:]).replace('.nii.gz', '') for x in os.listdir(os.path.join(data_dir, 'flair'))])
self.dataset_split_df = pd.read_csv(dataset_split_csv, names=['subject_id', 'split'], dtype={'subject_id': str})
self.split = split
flair_paths = [os.path.join(data_dir, 'flair', 'FLAIR_' + str(id) + '.nii.gz') for id in subject_id_arr]
t1_paths = [os.path.join(data_dir, 't1', 'T1_' + str(id) + '.nii.gz') for id in subject_id_arr]
label_paths = [os.path.join(data_dir, 'labels', 'bin_' + str(id) + '.nii.gz') for id in subject_id_arr]
self.files_df = pd.DataFrame(
data=[tup for tup in zip(subject_id_arr, label_paths, flair_paths, t1_paths)],
columns=['subject_id', 'label_path', 'flair_path', 't1_path']
)
# Apply split filter to images
images_to_use = self.dataset_split_df[self.dataset_split_df['split'] == self.split]['subject_id'].values
self.files_df = self.files_df[self.files_df['subject_id'].isin(images_to_use)]
def __getitem__(self, index):
flair_filepath = self.files_df['flair_path'].values[index]
t1_filepath = self.files_df['t1_path'].values[index]
label_filepath = self.files_df['label_path'].values[index]
flair = nib.load(flair_filepath).get_data()
t1 = nib.load(t1_filepath).get_data()
labels = nib.load(label_filepath).get_data()
inputs = np.stack([flair, t1], axis=0)
batch = {'inputs': inputs, 'labels': labels}
batch['inputs'] = to_var_gpu(batch['inputs'][:, 0, ...])
batch['labels'] = to_var_gpu(batch['labels'][:, 0, ...])
return batch
def __len__(self):
return len(self.files_df)
def get_subject_id_from_index(self, index):
return self.files_df[
self.files_df['flair_path'] == self.files_df['flair_path'].values[index]]['subject_id'].values[0]
class SliceDatasetMS(Dataset):
"""
This Dataset object expects a data_dir which contains the following structure:
data_dir
|
----flair
|
----- FLAIR_<subject id as int>_slice_<slice id as int>.nii.gz
----- FLAIR_<subject id as int>_slice_<slice id as int>.nii.gz
----- FLAIR_<subject id as int>_slice_<slice id as int>.nii.gz
----T1
|
----- T1_<subject id as int>_slice_<slice id as int>.nii.gz
----- T1_<subject id as int>_slice_<slice id as int>.nii.gz
----- T1_<subject id as int>_slice_<slice id as int>.nii.gz
----labels
|
----- bin_<subject id as int>_slice_<slice id as int>.nii.gz
----- bin_<subject id as int>_slice_<slice id as int>.nii.gz
----- bin_<subject id as int>_slice_<slice id as int>.nii.gz
"""
def __init__(self, data_dir, paddtarget, slice_selection_method, dataset_split_csv, split):
assert slice_selection_method in ['mask', 'none']
assert isinstance(paddtarget, int)
assert 'flair' in os.listdir(data_dir)
assert 'labels' in os.listdir(data_dir)
self.paddtarget = paddtarget
self.slice_selection_method = slice_selection_method
self.dataset_split_df = pd.read_csv(dataset_split_csv, names=['subject_id', 'split'], dtype={'subject_id': str})
self.split = split
subject_basenames = ['_'.join(p.split('_')[1:]) for p in os.listdir(os.path.join(data_dir, 'flair'))]
label_paths = [os.path.join(data_dir, 'labels', 'bin_' + x) for x in subject_basenames]
flair_paths = [os.path.join(data_dir, 'flair', 'FLAIR_' + x) for x in subject_basenames]
t1_paths = [os.path.join(data_dir, 't1', 'T1_' + x) for x in subject_basenames]
subject_id_arr = [x.split('_')[0] for x in subject_basenames]
slice_idx_arr = [int(x.split('_')[2].replace('.nii.gz', '')) for x in subject_basenames]
assert all([isinstance(x, int) for x in slice_idx_arr])
self.files_df = pd.DataFrame(
data=[tp for tp in zip(subject_id_arr, slice_idx_arr, label_paths,
flair_paths, t1_paths)],
columns=['subject_id', 'slice_index', 'label_path', 'flair_path', 't1_path']
)
# Apply split filter to images
images_to_use = self.dataset_split_df[self.dataset_split_df['split'] == self.split]['subject_id'].values
self.files_df = self.files_df[self.files_df['subject_id'].isin(images_to_use)]
def __getitem__(self, index):
flair_filepath = self.files_df['flair_path'].values[index]
t1_filepath = self.files_df['t1_path'].values[index]
label_filepath = self.files_df['label_path'].values[index]
flair_slice = nib.load(flair_filepath).get_data()
t1_slice = nib.load(t1_filepath).get_data()
label_slice = nib.load(label_filepath).get_data()
image_slice = np.stack([flair_slice, t1_slice], axis=0)
inputs, labels = batch_adaptation(torch.tensor(image_slice).unsqueeze(dim=0),
torch.tensor(label_slice), self.paddtarget)
batch = {'inputs': inputs, 'labels': labels}
batch['inputs'] = to_var_gpu(batch['inputs'][:, 0, ...])
batch['labels'] = to_var_gpu(batch['labels'][:, 0, ...])
return batch
def __len__(self):
return len(self.files_df)
def get_slice_indices_for_subject_ids(self, subject_ids):
return self.files_df[self.files_df['subject_id'].isin(subject_ids)].index.values
class SubsetTumour(WholeVolumeDatasetTumour):
r"""
Subset of a dataset at specified indices.
Arguments:
dataset (Dataset): The whole Dataset
indices (sequence): Indices in the whole set selected for subset
"""
def __init__(self, dataset, indices):
self.dataset = dataset
self.indices = indices
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
class Subset(WholeVolumeDataset):
r"""
Subset of a dataset at specified indices.
Arguments:
dataset (Dataset): The whole Dataset
indices (sequence): Indices in the whole set selected for subset
"""
def __init__(self, dataset, indices):
self.dataset = dataset
self.indices = indices
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
def get_monai_slice_dataset(data_dir, paddtarget, slice_selection_method, dataset_split_csv, split,
exclude_slices=None, synthesis=True, tumour_only=False):
"""
This function object expects a data_dir which contains the following structure:
data_dir
|
----flair
|
----- FLAIR_<subject id as int>_slice_<slice id as int>.nii.gz
----- FLAIR_<subject id as int>_slice_<slice id as int>.nii.gz
----- FLAIR_<subject id as int>_slice_<slice id as int>.nii.gz
----labels
|
----- wmh_<subject id as int>_slice_<slice id as int>.nii.gz
----- wmh_<subject id as int>_slice_<slice id as int>.nii.gz
----- wmh_<subject id as int>_slice_<slice id as int>.nii.gz
"""
assert slice_selection_method in ['mask', 'none']
assert isinstance(paddtarget, int)
assert 'flair' in os.listdir(data_dir)
assert 'labels' in os.listdir(data_dir)
dataset_split_df = pd.read_csv(dataset_split_csv, names=['subject_id', 'split'])
split = split
paddtarget = paddtarget
slice_selection_method = slice_selection_method
synthesis = synthesis
flair_filenames = os.listdir(os.path.join(data_dir, 'flair'))
subject_ids = [x.split('_slice')[0] for x in flair_filenames]
slice_idx_arr = [int(x.split('_')[3].replace('.nii.gz', '')) for x in flair_filenames]
label_paths = [os.path.join(data_dir, 'labels', x.replace('FLAIR', 'wmh')) for x in flair_filenames]
flair_paths = [os.path.join(data_dir, 'flair', x) for x in flair_filenames]
assert all([isinstance(x, int) for x in slice_idx_arr])
files_df = pd.DataFrame(
data=[(subj, slice_idx, fp, lp) for subj, slice_idx, fp, lp in zip(subject_ids, slice_idx_arr,
label_paths, flair_paths)],
columns=['subject_id', 'slice_index', 'label_path', 'flair_path']
)
if exclude_slices is not None:
files_df = files_df[~files_df['slice_index'].isin(exclude_slices)]
# Apply split filter to images
images_to_use = dataset_split_df[dataset_split_df['split'] == split]['subject_id'].values
files_df = files_df[files_df['subject_id'].isin(images_to_use)]
# Need to return a Monai Dataset object
# Expects data to be an array of dictionaries e.g
# [{ { {
# 'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',
# 'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',
# 'extra': 123 'extra': 456 'extra': 789
# },
monai_data_list = [{'inputs': row['flair_path'],
'labels': row['label_path']}
for _, row in files_df.iterrows()]
transforms = Compose([LoadImaged(keys=['inputs', 'labels']),
Orientationd(keys=['inputs', 'labels'], axcodes='RAS'),
AddChanneld(keys=['inputs', 'labels']),
ToTensord(keys=['inputs', 'labels']),
ResizeWithPadOrCropd(keys=['inputs', 'labels'],
spatial_size=(256, 256)),
SpatialCropd(keys=['inputs', 'labels'], roi_center=(127, 138), roi_size=(96, 96)),
ScaleIntensityd(keys=['inputs'], minv=0.0, maxv=1.0)
])
# Should normalise at the volume level
return MonaiDataset(data=monai_data_list, transform=transforms)
def infer_on_subject(model, output_path, whole_volume_path, files_df, subject_id, batch_size=10):
"""
Inner loop function to run inference on a single subject
"""
subject_files_df = files_df[files_df['subject_id'] == subject_id]
subject_files_df = subject_files_df.sort_values(by='slice_index')
monai_data_list = [{'inputs': row['flair_path'],
'labels': row['label_path']}
for _, row in subject_files_df.iterrows()]
transforms = Compose([LoadImaged(keys=['inputs', 'labels']),
Orientationd(keys=['inputs', 'labels'], axcodes='RAS'),
AddChanneld(keys=['inputs', 'labels']),
ToTensord(keys=['inputs', 'labels']),
ResizeWithPadOrCropd(keys=['inputs', 'labels'],
spatial_size=(256, 256)),
SpatialCropd(keys=['inputs', 'labels'], roi_center=(127, 138), roi_size=(96, 96)),
ScaleIntensityd(keys=['inputs'], minv=0.0, maxv=1.0)
])
individual_subject_dataset = MonaiDataset(data=monai_data_list, transform=transforms)
inference_dl = DataLoader(individual_subject_dataset, batch_size=batch_size, shuffle=False)
inverse_op = BatchInverseTransform(transform=individual_subject_dataset.transform, loader=inference_dl)
final_output = []
nifti_saver = NiftiSaver(resample=False)
img = nib.load(whole_volume_path)
for idx, batch in enumerate(inference_dl):
# Need to run inference here
outputs, outputs2 = model.inference_func(batch['inputs'].to(model.device))
tumour_preds = (torch.sigmoid(outputs) > 0.5).float().detach().cpu()
cochlea_preds = (torch.sigmoid(outputs2) > 0.5).float().detach().cpu() * 2.0
batch['inputs'] = torch.clamp(tumour_preds + cochlea_preds, min=0, max=2) # hack
final_output.append(np.stack(
[f['inputs'] for f in inverse_op(batch)]
))
volume = np.einsum('ijkl->jkli', np.concatenate(final_output))[:, ::-1, ::-1, ...]
nifti_saver = NiftiSaver(output_dir = Path(output_path).parent / 'mni_preds')
print(Path(output_path).parent / 'mni_preds')
print(Path(output_path).parent)
print(Path(output_path).name)
nifti_saver.save(volume, meta_data={'affine': img.affine, 'filename_or_obj': Path(output_path).name})
# Now save it in the original space
seg_path = str(Path(output_path).parent / 'mni_preds'/ subject_id / f"{subject_id}_seg.nii.gz")
img_path = whole_volume_path
orig_img_path = whole_volume_path.replace('FLAIR_', 'orig_FLAIR_')
orig_img = ants.image_read(orig_img_path)
img = ants.image_read(img_path)
seg = ants.image_read(seg_path)
resampled_img = ants.resample_image_to_target(image=img, target=orig_img, interp_type='linear')
resampled_seg = ants.resample_image_to_target(image=seg, target=orig_img, interp_type='nearestNeighbor')
transforms = ants.registration(fixed=orig_img, moving=resampled_img, type_of_transform='Affine')
transformed_seg = ants.apply_transforms(fixed=orig_img, moving=resampled_seg,
transformlist=transforms['fwdtransforms'], interpolator='nearestNeighbor')
transformed_img = ants.apply_transforms(fixed=orig_img, moving=resampled_img,
transformlist=transforms['fwdtransforms'], interpolator='linear')
subject_id = subject_id.split('_')[-1]
submission_filename = f"crossmoda_{subject_id}_Label.nii.gz"
output_path = Path(output_path).parent / 'submission_folder'
output_path.mkdir(parents=True, exist_ok=True)
if not (output_path / submission_filename).exists():
transformed_seg.to_file(str(output_path / submission_filename))
else:
'File at {} already exists'.format(str(output_path / submission_filename)) | 50.53457 | 139 | 0.61531 |
38b563cd62ec66f835f04d350a88f64b3583fadf | 2,171 | py | Python | maintenance/tests_old/test_parallel_opt.py | sfpd/rlreloaded | 650c64ec22ad45996c8c577d85b1a4f20aa1c692 | [
"MIT"
] | null | null | null | maintenance/tests_old/test_parallel_opt.py | sfpd/rlreloaded | 650c64ec22ad45996c8c577d85b1a4f20aa1c692 | [
"MIT"
] | null | null | null | maintenance/tests_old/test_parallel_opt.py | sfpd/rlreloaded | 650c64ec22ad45996c8c577d85b1a4f20aa1c692 | [
"MIT"
] | null | null | null | import argparse
import numpy as np
from cloud.cloud_interface import create_cloud,load_cloud_config,get_slave_names
from cloud.cluster_pool import ClusterPool
from cloud.slave_loop import slave_loop
from control3.common_util import chunk_slices
from control3.parallel import sum_count_reducer
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
class G: #pylint: disable=W0232
X = None
y = None
######### Pure functions #######
L2COEFF = 1e-4
def fc(w, X):
return 1. / (1. + np.exp(-X.dot(w)))
def floss(w,X,z):
N = X.shape[0]
c = fc(w,X)
EPSILON=1e-30
return -(z*np.log(c+EPSILON) + (1-z)*np.log(1-c+EPSILON)).sum() + 0.5*L2COEFF*N*w.dot(w),N
def fgradloss(w,X,z):
N = X.shape[0]
c = fc(w,X)
return ((c-z).reshape(-1,1)* X ).sum(axis=0) + L2COEFF*N*w,N
##################################
def f((w,sli)):
return floss(w,G.X[sli],G.y[sli])
def gradf((w,sli)):
return fgradloss(w,G.X[sli],G.y[sli])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--slave_addr",type=str)
args = parser.parse_args()
# The digits dataset
digits = datasets.load_digits()
G.X = digits['data']
G.y = digits['target']
if args.slave_addr:
slave_loop(args.slave_addr)
else:
cloud_config = load_cloud_config()
cloud = create_cloud(cloud_config)
cluster = "testparopt"
cloud.start_instances(instance_names=get_slave_names(3,instance_prefix=cluster))
pool = ClusterPool(cloud,cluster,start_mode="the_prestige")
slis = chunk_slices(G.X.shape[0], pool.size())
w = np.zeros(G.X.shape[1])
wslis = [(w,sli) for sli in slis]
loss,losscount = pool.mapreduce(f,sum_count_reducer,wslis)
grad,gradcount = pool.mapreduce(gradf,sum_count_reducer,wslis)
loss1,losscount1 = f((w,slice(0,None,None)))
grad1,gradcount1 = gradf((w,slice(0,None,None)))
assert np.allclose(loss,loss1)
assert np.allclose(losscount,losscount1)
assert np.allclose(grad,grad1)
assert np.allclose(gradcount,gradcount1)
| 30.152778 | 94 | 0.64947 |
9fa0ff0b6e0e149bccdca6a6c15816c2dfa25ff8 | 18,472 | py | Python | ucsmsdk/mometa/sysdebug/SysdebugAutoCoreFileExportTargetFsm.py | anoop1984/python_sdk | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | [
"Apache-2.0"
] | null | null | null | ucsmsdk/mometa/sysdebug/SysdebugAutoCoreFileExportTargetFsm.py | anoop1984/python_sdk | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | [
"Apache-2.0"
] | null | null | null | ucsmsdk/mometa/sysdebug/SysdebugAutoCoreFileExportTargetFsm.py | anoop1984/python_sdk | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | [
"Apache-2.0"
] | null | null | null | """This module contains the general information for SysdebugAutoCoreFileExportTargetFsm ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class SysdebugAutoCoreFileExportTargetFsmConsts():
COMPLETION_TIME_ = ""
CURRENT_FSM_CONFIGURE = "Configure"
CURRENT_FSM_NOP = "nop"
FSM_STATUS_FAIL = "fail"
FSM_STATUS_IN_PROGRESS = "inProgress"
FSM_STATUS_NOP = "nop"
FSM_STATUS_PENDING = "pending"
FSM_STATUS_SKIP = "skip"
FSM_STATUS_SUCCESS = "success"
FSM_STATUS_THROTTLED = "throttled"
RMT_ERR_CODE_ERR_2FA_AUTH_RETRY = "ERR-2fa-auth-retry"
RMT_ERR_CODE_ERR_ACTIVATE_FAILED = "ERR-ACTIVATE-failed"
RMT_ERR_CODE_ERR_ACTIVATE_IN_PROGRESS = "ERR-ACTIVATE-in-progress"
RMT_ERR_CODE_ERR_ACTIVATE_RETRY = "ERR-ACTIVATE-retry"
RMT_ERR_CODE_ERR_BIOS_TOKENS_OLD_BIOS = "ERR-BIOS-TOKENS-OLD-BIOS"
RMT_ERR_CODE_ERR_BIOS_TOKENS_OLD_CIMC = "ERR-BIOS-TOKENS-OLD-CIMC"
RMT_ERR_CODE_ERR_BIOS_NETWORK_BOOT_ORDER_NOT_FOUND = "ERR-BIOS-network-boot-order-not-found"
RMT_ERR_CODE_ERR_BOARDCTRLUPDATE_IGNORE = "ERR-BOARDCTRLUPDATE-ignore"
RMT_ERR_CODE_ERR_DIAG_CANCELLED = "ERR-DIAG-cancelled"
RMT_ERR_CODE_ERR_DIAG_FSM_RESTARTED = "ERR-DIAG-fsm-restarted"
RMT_ERR_CODE_ERR_DIAG_TEST_FAILED = "ERR-DIAG-test-failed"
RMT_ERR_CODE_ERR_DNLD_AUTHENTICATION_FAILURE = "ERR-DNLD-authentication-failure"
RMT_ERR_CODE_ERR_DNLD_HOSTKEY_MISMATCH = "ERR-DNLD-hostkey-mismatch"
RMT_ERR_CODE_ERR_DNLD_INVALID_IMAGE = "ERR-DNLD-invalid-image"
RMT_ERR_CODE_ERR_DNLD_NO_FILE = "ERR-DNLD-no-file"
RMT_ERR_CODE_ERR_DNLD_NO_SPACE = "ERR-DNLD-no-space"
RMT_ERR_CODE_ERR_DNLD_USB_UNMOUNTED = "ERR-DNLD-usb-unmounted"
RMT_ERR_CODE_ERR_DNS_DELETE_ERROR = "ERR-DNS-delete-error"
RMT_ERR_CODE_ERR_DNS_GET_ERROR = "ERR-DNS-get-error"
RMT_ERR_CODE_ERR_DNS_SET_ERROR = "ERR-DNS-set-error"
RMT_ERR_CODE_ERR_DIAGNOSTICS_IN_PROGRESS = "ERR-Diagnostics-in-progress"
RMT_ERR_CODE_ERR_DIAGNOSTICS_MEMTEST_IN_PROGRESS = "ERR-Diagnostics-memtest-in-progress"
RMT_ERR_CODE_ERR_DIAGNOSTICS_NETWORK_IN_PROGRESS = "ERR-Diagnostics-network-in-progress"
RMT_ERR_CODE_ERR_FILTER_ILLEGAL_FORMAT = "ERR-FILTER-illegal-format"
RMT_ERR_CODE_ERR_FSM_NO_SUCH_STATE = "ERR-FSM-no-such-state"
RMT_ERR_CODE_ERR_HOST_FRU_IDENTITY_MISMATCH = "ERR-HOST-fru-identity-mismatch"
RMT_ERR_CODE_ERR_HTTP_SET_ERROR = "ERR-HTTP-set-error"
RMT_ERR_CODE_ERR_HTTPS_SET_ERROR = "ERR-HTTPS-set-error"
RMT_ERR_CODE_ERR_IBMC_ANALYZE_RESULTS = "ERR-IBMC-analyze-results"
RMT_ERR_CODE_ERR_IBMC_CONNECT_ERROR = "ERR-IBMC-connect-error"
RMT_ERR_CODE_ERR_IBMC_CONNECTOR_INFO_RETRIEVAL_ERROR = "ERR-IBMC-connector-info-retrieval-error"
RMT_ERR_CODE_ERR_IBMC_FRU_RETRIEVAL_ERROR = "ERR-IBMC-fru-retrieval-error"
RMT_ERR_CODE_ERR_IBMC_INVALID_END_POINT_CONFIG = "ERR-IBMC-invalid-end-point-config"
RMT_ERR_CODE_ERR_IBMC_RESULTS_NOT_READY = "ERR-IBMC-results-not-ready"
RMT_ERR_CODE_ERR_MAX_SUBSCRIPTIONS_ALLOWED_ERROR = "ERR-MAX-subscriptions-allowed-error"
RMT_ERR_CODE_ERR_MO_CONFIG_CHILD_OBJECT_CANT_BE_CONFIGURED = "ERR-MO-CONFIG-child-object-cant-be-configured"
RMT_ERR_CODE_ERR_MO_META_NO_SUCH_OBJECT_CLASS = "ERR-MO-META-no-such-object-class"
RMT_ERR_CODE_ERR_MO_PROPERTY_NO_SUCH_PROPERTY = "ERR-MO-PROPERTY-no-such-property"
RMT_ERR_CODE_ERR_MO_PROPERTY_VALUE_OUT_OF_RANGE = "ERR-MO-PROPERTY-value-out-of-range"
RMT_ERR_CODE_ERR_MO_ACCESS_DENIED = "ERR-MO-access-denied"
RMT_ERR_CODE_ERR_MO_DELETION_RULE_VIOLATION = "ERR-MO-deletion-rule-violation"
RMT_ERR_CODE_ERR_MO_DUPLICATE_OBJECT = "ERR-MO-duplicate-object"
RMT_ERR_CODE_ERR_MO_ILLEGAL_CONTAINMENT = "ERR-MO-illegal-containment"
RMT_ERR_CODE_ERR_MO_ILLEGAL_CREATION = "ERR-MO-illegal-creation"
RMT_ERR_CODE_ERR_MO_ILLEGAL_ITERATOR_STATE = "ERR-MO-illegal-iterator-state"
RMT_ERR_CODE_ERR_MO_ILLEGAL_OBJECT_LIFECYCLE_TRANSITION = "ERR-MO-illegal-object-lifecycle-transition"
RMT_ERR_CODE_ERR_MO_NAMING_RULE_VIOLATION = "ERR-MO-naming-rule-violation"
RMT_ERR_CODE_ERR_MO_OBJECT_NOT_FOUND = "ERR-MO-object-not-found"
RMT_ERR_CODE_ERR_MO_RESOURCE_ALLOCATION = "ERR-MO-resource-allocation"
RMT_ERR_CODE_ERR_NTP_DELETE_ERROR = "ERR-NTP-delete-error"
RMT_ERR_CODE_ERR_NTP_GET_ERROR = "ERR-NTP-get-error"
RMT_ERR_CODE_ERR_NTP_SET_ERROR = "ERR-NTP-set-error"
RMT_ERR_CODE_ERR_POWER_CAP_UNSUPPORTED = "ERR-POWER-CAP-UNSUPPORTED"
RMT_ERR_CODE_ERR_POWER_PROFILE_IN_PROGRESS = "ERR-POWER-PROFILE-IN-PROGRESS"
RMT_ERR_CODE_ERR_SERVER_MIS_CONNECT = "ERR-SERVER-mis-connect"
RMT_ERR_CODE_ERR_SWITCH_INVALID_IF_CONFIG = "ERR-SWITCH-invalid-if-config"
RMT_ERR_CODE_ERR_TOKEN_REQUEST_DENIED = "ERR-TOKEN-request-denied"
RMT_ERR_CODE_ERR_UNABLE_TO_FETCH_BIOS_SETTINGS = "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS"
RMT_ERR_CODE_ERR_UPDATE_FAILED = "ERR-UPDATE-failed"
RMT_ERR_CODE_ERR_UPDATE_IN_PROGRESS = "ERR-UPDATE-in-progress"
RMT_ERR_CODE_ERR_UPDATE_RETRY = "ERR-UPDATE-retry"
RMT_ERR_CODE_ERR_AAA_CONFIG_MODIFY_ERROR = "ERR-aaa-config-modify-error"
RMT_ERR_CODE_ERR_ACCT_REALM_SET_ERROR = "ERR-acct-realm-set-error"
RMT_ERR_CODE_ERR_ADMIN_PASSWD_SET = "ERR-admin-passwd-set"
RMT_ERR_CODE_ERR_AUTH_ISSUE = "ERR-auth-issue"
RMT_ERR_CODE_ERR_AUTH_REALM_GET_ERROR = "ERR-auth-realm-get-error"
RMT_ERR_CODE_ERR_AUTH_REALM_SET_ERROR = "ERR-auth-realm-set-error"
RMT_ERR_CODE_ERR_AUTHENTICATION = "ERR-authentication"
RMT_ERR_CODE_ERR_AUTHORIZATION_REQUIRED = "ERR-authorization-required"
RMT_ERR_CODE_ERR_CLI_SESSION_LIMIT_REACHED = "ERR-cli-session-limit-reached"
RMT_ERR_CODE_ERR_CREATE_KEYRING = "ERR-create-keyring"
RMT_ERR_CODE_ERR_CREATE_LOCALE = "ERR-create-locale"
RMT_ERR_CODE_ERR_CREATE_ROLE = "ERR-create-role"
RMT_ERR_CODE_ERR_CREATE_TP = "ERR-create-tp"
RMT_ERR_CODE_ERR_CREATE_USER = "ERR-create-user"
RMT_ERR_CODE_ERR_DELETE_LOCALE = "ERR-delete-locale"
RMT_ERR_CODE_ERR_DELETE_ROLE = "ERR-delete-role"
RMT_ERR_CODE_ERR_DELETE_SESSION = "ERR-delete-session"
RMT_ERR_CODE_ERR_DELETE_USER = "ERR-delete-user"
RMT_ERR_CODE_ERR_DOWNGRADE_FAIL = "ERR-downgrade-fail"
RMT_ERR_CODE_ERR_EFI_DIAGNOSTICS_IN_PROGRESS = "ERR-efi-Diagnostics--in-progress"
RMT_ERR_CODE_ERR_ENABLE_MGMT_CONN = "ERR-enable-mgmt-conn"
RMT_ERR_CODE_ERR_EP_SET_ERROR = "ERR-ep-set-error"
RMT_ERR_CODE_ERR_GET_MAX_HTTP_USER_SESSIONS = "ERR-get-max-http-user-sessions"
RMT_ERR_CODE_ERR_HTTP_INITIALIZING = "ERR-http-initializing"
RMT_ERR_CODE_ERR_INSUFFICIENTLY_EQUIPPED = "ERR-insufficiently-equipped"
RMT_ERR_CODE_ERR_INTERNAL_ERROR = "ERR-internal-error"
RMT_ERR_CODE_ERR_LDAP_DELETE_ERROR = "ERR-ldap-delete-error"
RMT_ERR_CODE_ERR_LDAP_GET_ERROR = "ERR-ldap-get-error"
RMT_ERR_CODE_ERR_LDAP_GROUP_MODIFY_ERROR = "ERR-ldap-group-modify-error"
RMT_ERR_CODE_ERR_LDAP_GROUP_SET_ERROR = "ERR-ldap-group-set-error"
RMT_ERR_CODE_ERR_LDAP_SET_ERROR = "ERR-ldap-set-error"
RMT_ERR_CODE_ERR_LOCALE_SET_ERROR = "ERR-locale-set-error"
RMT_ERR_CODE_ERR_MAX_USERID_SESSIONS_REACHED = "ERR-max-userid-sessions-reached"
RMT_ERR_CODE_ERR_MISSING_METHOD = "ERR-missing-method"
RMT_ERR_CODE_ERR_MODIFY_LOCALE = "ERR-modify-locale"
RMT_ERR_CODE_ERR_MODIFY_ROLE = "ERR-modify-role"
RMT_ERR_CODE_ERR_MODIFY_USER = "ERR-modify-user"
RMT_ERR_CODE_ERR_MODIFY_USER_LOCALE = "ERR-modify-user-locale"
RMT_ERR_CODE_ERR_MODIFY_USER_ROLE = "ERR-modify-user-role"
RMT_ERR_CODE_ERR_PROVIDER_GROUP_MODIFY_ERROR = "ERR-provider-group-modify-error"
RMT_ERR_CODE_ERR_PROVIDER_GROUP_SET_ERROR = "ERR-provider-group-set-error"
RMT_ERR_CODE_ERR_RADIUS_GET_ERROR = "ERR-radius-get-error"
RMT_ERR_CODE_ERR_RADIUS_GLOBAL_SET_ERROR = "ERR-radius-global-set-error"
RMT_ERR_CODE_ERR_RADIUS_GROUP_SET_ERROR = "ERR-radius-group-set-error"
RMT_ERR_CODE_ERR_RADIUS_SET_ERROR = "ERR-radius-set-error"
RMT_ERR_CODE_ERR_REQUEST_TIMEOUT = "ERR-request-timeout"
RMT_ERR_CODE_ERR_RESET_ADAPTER = "ERR-reset-adapter"
RMT_ERR_CODE_ERR_ROLE_SET_ERROR = "ERR-role-set-error"
RMT_ERR_CODE_ERR_SECONDARY_NODE = "ERR-secondary-node"
RMT_ERR_CODE_ERR_SERVICE_NOT_READY = "ERR-service-not-ready"
RMT_ERR_CODE_ERR_SESSION_CACHE_FULL = "ERR-session-cache-full"
RMT_ERR_CODE_ERR_SESSION_NOT_FOUND = "ERR-session-not-found"
RMT_ERR_CODE_ERR_SET_NETWORK = "ERR-set-network"
RMT_ERR_CODE_ERR_SET_PASSWORD_STRENGTH_CHECK = "ERR-set-password-strength-check"
RMT_ERR_CODE_ERR_SET_PORT_CHANNEL = "ERR-set-port-channel"
RMT_ERR_CODE_ERR_STORE_PRE_LOGIN_BANNER_MSG = "ERR-store-pre-login-banner-msg"
RMT_ERR_CODE_ERR_TACACS_ENABLE_ERROR = "ERR-tacacs-enable-error"
RMT_ERR_CODE_ERR_TACACS_GLOBAL_SET_ERROR = "ERR-tacacs-global-set-error"
RMT_ERR_CODE_ERR_TACACS_GROUP_SET_ERROR = "ERR-tacacs-group-set-error"
RMT_ERR_CODE_ERR_TACACS_PLUS_GET_ERROR = "ERR-tacacs-plus-get-error"
RMT_ERR_CODE_ERR_TACACS_SET_ERROR = "ERR-tacacs-set-error"
RMT_ERR_CODE_ERR_TEST_ERROR_1 = "ERR-test-error-1"
RMT_ERR_CODE_ERR_TEST_ERROR_2 = "ERR-test-error-2"
RMT_ERR_CODE_ERR_TIMEZONE_SET_ERROR = "ERR-timezone-set-error"
RMT_ERR_CODE_ERR_USER_ACCOUNT_EXPIRED = "ERR-user-account-expired"
RMT_ERR_CODE_ERR_USER_SET_ERROR = "ERR-user-set-error"
RMT_ERR_CODE_ERR_XML_PARSE_ERROR = "ERR-xml-parse-error"
RMT_ERR_CODE_NONE = "none"
class SysdebugAutoCoreFileExportTargetFsm(ManagedObject):
"""This is SysdebugAutoCoreFileExportTargetFsm class."""
consts = SysdebugAutoCoreFileExportTargetFsmConsts()
naming_props = set([])
mo_meta = MoMeta("SysdebugAutoCoreFileExportTargetFsm", "sysdebugAutoCoreFileExportTargetFsm", "fsm", VersionMeta.Version211a, "OutputOnly", 0xf, [], [""], [u'sysdebugAutoCoreFileExportTarget'], [u'sysdebugAutoCoreFileExportTargetFsmStage'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"completion_time": MoPropertyMeta("completion_time", "completionTime", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [""], []),
"current_fsm": MoPropertyMeta("current_fsm", "currentFsm", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["Configure", "nop"], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"fsm_status": MoPropertyMeta("fsm_status", "fsmStatus", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, None, ["fail", "inProgress", "nop", "pending", "skip", "success", "throttled"], []),
"instance_id": MoPropertyMeta("instance_id", "instanceId", "uint", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"progress": MoPropertyMeta("progress", "progress", "byte", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], ["0-100"]),
"rmt_err_code": MoPropertyMeta("rmt_err_code", "rmtErrCode", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["ERR-2fa-auth-retry", "ERR-ACTIVATE-failed", "ERR-ACTIVATE-in-progress", "ERR-ACTIVATE-retry", "ERR-BIOS-TOKENS-OLD-BIOS", "ERR-BIOS-TOKENS-OLD-CIMC", "ERR-BIOS-network-boot-order-not-found", "ERR-BOARDCTRLUPDATE-ignore", "ERR-DIAG-cancelled", "ERR-DIAG-fsm-restarted", "ERR-DIAG-test-failed", "ERR-DNLD-authentication-failure", "ERR-DNLD-hostkey-mismatch", "ERR-DNLD-invalid-image", "ERR-DNLD-no-file", "ERR-DNLD-no-space", "ERR-DNLD-usb-unmounted", "ERR-DNS-delete-error", "ERR-DNS-get-error", "ERR-DNS-set-error", "ERR-Diagnostics-in-progress", "ERR-Diagnostics-memtest-in-progress", "ERR-Diagnostics-network-in-progress", "ERR-FILTER-illegal-format", "ERR-FSM-no-such-state", "ERR-HOST-fru-identity-mismatch", "ERR-HTTP-set-error", "ERR-HTTPS-set-error", "ERR-IBMC-analyze-results", "ERR-IBMC-connect-error", "ERR-IBMC-connector-info-retrieval-error", "ERR-IBMC-fru-retrieval-error", "ERR-IBMC-invalid-end-point-config", "ERR-IBMC-results-not-ready", "ERR-MAX-subscriptions-allowed-error", "ERR-MO-CONFIG-child-object-cant-be-configured", "ERR-MO-META-no-such-object-class", "ERR-MO-PROPERTY-no-such-property", "ERR-MO-PROPERTY-value-out-of-range", "ERR-MO-access-denied", "ERR-MO-deletion-rule-violation", "ERR-MO-duplicate-object", "ERR-MO-illegal-containment", "ERR-MO-illegal-creation", "ERR-MO-illegal-iterator-state", "ERR-MO-illegal-object-lifecycle-transition", "ERR-MO-naming-rule-violation", "ERR-MO-object-not-found", "ERR-MO-resource-allocation", "ERR-NTP-delete-error", "ERR-NTP-get-error", "ERR-NTP-set-error", "ERR-POWER-CAP-UNSUPPORTED", "ERR-POWER-PROFILE-IN-PROGRESS", "ERR-SERVER-mis-connect", "ERR-SWITCH-invalid-if-config", "ERR-TOKEN-request-denied", "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS", "ERR-UPDATE-failed", "ERR-UPDATE-in-progress", "ERR-UPDATE-retry", "ERR-aaa-config-modify-error", "ERR-acct-realm-set-error", "ERR-admin-passwd-set", "ERR-auth-issue", "ERR-auth-realm-get-error", "ERR-auth-realm-set-error", "ERR-authentication", "ERR-authorization-required", "ERR-cli-session-limit-reached", "ERR-create-keyring", "ERR-create-locale", "ERR-create-role", "ERR-create-tp", "ERR-create-user", "ERR-delete-locale", "ERR-delete-role", "ERR-delete-session", "ERR-delete-user", "ERR-downgrade-fail", "ERR-efi-Diagnostics--in-progress", "ERR-enable-mgmt-conn", "ERR-ep-set-error", "ERR-get-max-http-user-sessions", "ERR-http-initializing", "ERR-insufficiently-equipped", "ERR-internal-error", "ERR-ldap-delete-error", "ERR-ldap-get-error", "ERR-ldap-group-modify-error", "ERR-ldap-group-set-error", "ERR-ldap-set-error", "ERR-locale-set-error", "ERR-max-userid-sessions-reached", "ERR-missing-method", "ERR-modify-locale", "ERR-modify-role", "ERR-modify-user", "ERR-modify-user-locale", "ERR-modify-user-role", "ERR-provider-group-modify-error", "ERR-provider-group-set-error", "ERR-radius-get-error", "ERR-radius-global-set-error", "ERR-radius-group-set-error", "ERR-radius-set-error", "ERR-request-timeout", "ERR-reset-adapter", "ERR-role-set-error", "ERR-secondary-node", "ERR-service-not-ready", "ERR-session-cache-full", "ERR-session-not-found", "ERR-set-network", "ERR-set-password-strength-check", "ERR-set-port-channel", "ERR-store-pre-login-banner-msg", "ERR-tacacs-enable-error", "ERR-tacacs-global-set-error", "ERR-tacacs-group-set-error", "ERR-tacacs-plus-get-error", "ERR-tacacs-set-error", "ERR-test-error-1", "ERR-test-error-2", "ERR-timezone-set-error", "ERR-user-account-expired", "ERR-user-set-error", "ERR-xml-parse-error", "none"], ["0-4294967295"]),
"rmt_err_descr": MoPropertyMeta("rmt_err_descr", "rmtErrDescr", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rmt_rslt": MoPropertyMeta("rmt_rslt", "rmtRslt", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout),){0,32}(defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout){0,1}""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"completionTime": "completion_time",
"currentFsm": "current_fsm",
"descr": "descr",
"dn": "dn",
"fsmStatus": "fsm_status",
"instanceId": "instance_id",
"progress": "progress",
"rmtErrCode": "rmt_err_code",
"rmtErrDescr": "rmt_err_descr",
"rmtRslt": "rmt_rslt",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.completion_time = None
self.current_fsm = None
self.descr = None
self.fsm_status = None
self.instance_id = None
self.progress = None
self.rmt_err_code = None
self.rmt_err_descr = None
self.rmt_rslt = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "SysdebugAutoCoreFileExportTargetFsm", parent_mo_or_dn, **kwargs)
| 87.961905 | 3,649 | 0.758175 |
fadf633a2c614176b4f06e13b7845ebc5554706f | 1,060 | py | Python | cogs/extra.py | Nakukai/Koha | 67b014f277742fbff0532c372d6b0f49db739528 | [
"Apache-2.0"
] | 2 | 2021-02-16T11:44:13.000Z | 2021-02-23T17:41:22.000Z | cogs/extra.py | Nakukai/Koha | 67b014f277742fbff0532c372d6b0f49db739528 | [
"Apache-2.0"
] | 5 | 2021-02-14T01:36:12.000Z | 2021-02-22T11:29:21.000Z | cogs/extra.py | Nakukai/Koha | 67b014f277742fbff0532c372d6b0f49db739528 | [
"Apache-2.0"
] | 1 | 2021-02-14T14:58:05.000Z | 2021-02-14T14:58:05.000Z | import discord
from discord.ext import commands
class Extra(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def invite(self, ctx):
Invitation = discord.Embed(
color=0xFFB6C1,
description="**Want me to join your amazing server?**\n"
"[Tap here to invite me](%s)" %
("https://discord.com/api/oauth2/authorize?client_id=783865415746977824&permissions=3533888&scope=bot"))
await ctx.send(embed=Invitation)
@commands.command()
async def support(self, ctx):
Mayuport = discord.Embed(
color=0xFF0000,
description="**Have you found any bugs or just need help with something?**\n"
"[Tap here to join](%s)" % ("https://discord.gg/5zpHR7JpZA"))
await ctx.send(embed=Mayuport)
@commands.command()
async def supporter(sef, ctx):
await ctx.send("**Coming in Mayu V 0.0.3**")
def setup(client):
client.add_cog(Extra(client))
| 34.193548 | 128 | 0.600943 |
a3638429d2d4ff9a959b513c01503ff52e7a60bb | 3,838 | py | Python | test/algorithms/pes_samplers/test_bopes_sampler.py | davindratulsi/qiskit-nature | d5209bbfe6b83496dc2be327f65a24dcebdea9df | [
"Apache-2.0"
] | null | null | null | test/algorithms/pes_samplers/test_bopes_sampler.py | davindratulsi/qiskit-nature | d5209bbfe6b83496dc2be327f65a24dcebdea9df | [
"Apache-2.0"
] | null | null | null | test/algorithms/pes_samplers/test_bopes_sampler.py | davindratulsi/qiskit-nature | d5209bbfe6b83496dc2be327f65a24dcebdea9df | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests of BOPES Sampler."""
import unittest
from functools import partial
import numpy as np
from qiskit.algorithms import NumPyMinimumEigensolver
from qiskit.utils import algorithm_globals
from qiskit_nature.algorithms.ground_state_solvers import GroundStateEigensolver
from qiskit_nature.algorithms.pes_samplers.bopes_sampler import BOPESSampler
from qiskit_nature.algorithms.pes_samplers.potentials.morse_potential import MorsePotential
from qiskit_nature.drivers import Molecule, PySCFDriver
from qiskit_nature.mappers.second_quantization import ParityMapper
from qiskit_nature.operators.second_quantization.qubit_converter import QubitConverter
from qiskit_nature.problems.second_quantization.electronic import ElectronicStructureProblem
class TestBOPES(unittest.TestCase):
"""Tests of BOPES Sampler."""
def test_h2_bopes_sampler(self):
"""Test BOPES Sampler on H2"""
seed = 50
algorithm_globals.random_seed = seed
# Molecule
dof = partial(Molecule.absolute_distance, atom_pair=(1, 0))
m = Molecule(geometry=[['H', [0., 0., 1.]],
['H', [0., 0.45, 1.]]],
degrees_of_freedom=[dof])
mapper = ParityMapper()
converter = QubitConverter(mapper=mapper, two_qubit_reduction=True)
driver = PySCFDriver(molecule=m)
problem = ElectronicStructureProblem(driver)
solver = NumPyMinimumEigensolver()
me_gss = GroundStateEigensolver(converter, solver)
# BOPES sampler
sampler = BOPESSampler(gss=me_gss)
# absolute internuclear distance in Angstrom
points = [0.7, 1.0, 1.3]
results = sampler.sample(problem, points)
points_run = results.points
energies = results.energies
np.testing.assert_array_almost_equal(points_run, [0.7, 1.0, 1.3])
np.testing.assert_array_almost_equal(energies,
[-1.13618945, -1.10115033, -1.03518627], decimal=2)
def test_potential_interface(self):
"""Tests potential interface."""
seed = 50
algorithm_globals.random_seed = seed
stretch = partial(Molecule.absolute_distance, atom_pair=(1, 0))
# H-H molecule near equilibrium geometry
m = Molecule(geometry=[['H', [0., 0., 0.]],
['H', [1., 0., 0.]],
],
degrees_of_freedom=[stretch],
masses=[1.6735328E-27, 1.6735328E-27])
mapper = ParityMapper()
converter = QubitConverter(mapper=mapper)
driver = PySCFDriver(molecule=m)
problem = ElectronicStructureProblem(driver)
solver = NumPyMinimumEigensolver()
me_gss = GroundStateEigensolver(converter, solver)
# Run BOPESSampler with exact eigensolution
points = np.arange(0.45, 5.3, 0.3)
sampler = BOPESSampler(gss=me_gss)
res = sampler.sample(problem, points)
# Testing Potential interface
pot = MorsePotential(m)
pot.fit(res.points, res.energies)
np.testing.assert_array_almost_equal([pot.alpha, pot.r_0], [2.235, 0.720], decimal=3)
np.testing.assert_array_almost_equal([pot.d_e, pot.m_shift], [0.2107, -1.1419], decimal=3)
if __name__ == "__main__":
unittest.main()
| 35.869159 | 98 | 0.668838 |
1b270aebb5ab81b86aff0c689a5da103667d6ba6 | 6,637 | py | Python | zinnia/ping.py | Raekkeri/django-blog-zinnia | 1b7b03e1e0c27fdf2a84ff1fbf05aeeb5ac6f370 | [
"BSD-3-Clause"
] | 1 | 2018-05-28T08:46:41.000Z | 2018-05-28T08:46:41.000Z | zinnia/ping.py | elleven/zinnia_blog | 59270435c9f29f5c9672a41ea01fe34b228f8ef9 | [
"Apache-2.0"
] | null | null | null | zinnia/ping.py | elleven/zinnia_blog | 59270435c9f29f5c9672a41ea01fe34b228f8ef9 | [
"Apache-2.0"
] | null | null | null | """Pings utilities for Zinnia"""
import socket
from logging import getLogger
from threading import Thread
try:
from urllib.request import urlopen
from urllib.parse import urlsplit
from xmlrpc.client import Error
from xmlrpc.client import ServerProxy
except ImportError: # Python 2
from urllib2 import urlopen
from urlparse import urlsplit
from xmlrpclib import Error
from xmlrpclib import ServerProxy
from bs4 import BeautifulSoup
from django.contrib.sites.models import Site
from django.urls import reverse
from zinnia.flags import PINGBACK
from zinnia.settings import PROTOCOL
class URLRessources(object):
"""
Object defining the ressources of the Website.
"""
def __init__(self):
self.current_site = Site.objects.get_current()
self.site_url = '%s://%s' % (PROTOCOL, self.current_site.domain)
self.blog_url = '%s%s' % (self.site_url,
reverse('zinnia:entry_archive_index'))
self.blog_feed = '%s%s' % (self.site_url,
reverse('zinnia:entry_feed'))
class DirectoryPinger(Thread):
"""
Threaded web directory pinger.
"""
def __init__(self, server_name, entries, timeout=10):
self.results = []
self.timeout = timeout
self.entries = entries
self.server_name = server_name
self.server = ServerProxy(self.server_name)
self.ressources = URLRessources()
super(DirectoryPinger, self).__init__()
self.start()
def run(self):
"""
Ping entries to a directory in a thread.
"""
logger = getLogger('zinnia.ping.directory')
socket.setdefaulttimeout(self.timeout)
for entry in self.entries:
reply = self.ping_entry(entry)
self.results.append(reply)
logger.info('%s : %s', self.server_name, reply['message'])
socket.setdefaulttimeout(None)
def ping_entry(self, entry):
"""
Ping an entry to a directory.
"""
entry_url = '%s%s' % (self.ressources.site_url,
entry.get_absolute_url())
categories = '|'.join([c.title for c in entry.categories.all()])
try:
reply = self.server.weblogUpdates.extendedPing(
self.ressources.current_site.name,
self.ressources.blog_url, entry_url,
self.ressources.blog_feed, categories)
except Exception:
try:
reply = self.server.weblogUpdates.ping(
self.ressources.current_site.name,
self.ressources.blog_url, entry_url,
categories)
except Exception:
reply = {'message': '%s is an invalid directory.' %
self.server_name,
'flerror': True}
return reply
class ExternalUrlsPinger(Thread):
"""
Threaded external URLs pinger.
"""
def __init__(self, entry, timeout=10):
self.results = []
self.entry = entry
self.timeout = timeout
self.ressources = URLRessources()
self.entry_url = '%s%s' % (self.ressources.site_url,
self.entry.get_absolute_url())
super(ExternalUrlsPinger, self).__init__()
self.start()
def run(self):
"""
Ping external URLs in a Thread.
"""
logger = getLogger('zinnia.ping.external_urls')
socket.setdefaulttimeout(self.timeout)
external_urls = self.find_external_urls(self.entry)
external_urls_pingable = self.find_pingback_urls(external_urls)
for url, server_name in external_urls_pingable.items():
reply = self.pingback_url(server_name, url)
self.results.append(reply)
logger.info('%s : %s', url, reply)
socket.setdefaulttimeout(None)
def is_external_url(self, url, site_url):
"""
Check if the URL is an external URL.
"""
url_splitted = urlsplit(url)
if not url_splitted.netloc:
return False
return url_splitted.netloc != urlsplit(site_url).netloc
def find_external_urls(self, entry):
"""
Find external URLs in an entry.
"""
soup = BeautifulSoup(entry.html_content, 'html.parser')
external_urls = [a['href'] for a in soup.find_all('a')
if self.is_external_url(
a['href'], self.ressources.site_url)]
return external_urls
def find_pingback_href(self, content):
"""
Try to find LINK markups to pingback URL.
"""
soup = BeautifulSoup(content, 'html.parser')
for link in soup.find_all('link'):
dict_attr = dict(link.attrs)
if 'rel' in dict_attr and 'href' in dict_attr:
for rel_type in dict_attr['rel']:
if rel_type.lower() == PINGBACK:
return dict_attr.get('href')
def find_pingback_urls(self, urls):
"""
Find the pingback URL for each URLs.
"""
pingback_urls = {}
for url in urls:
try:
page = urlopen(url)
headers = page.info()
server_url = headers.get('X-Pingback')
if not server_url:
content_type = headers.get('Content-Type', '').split(
';')[0].strip().lower()
if content_type in ['text/html', 'application/xhtml+xml']:
server_url = self.find_pingback_href(
page.read(5 * 1024))
if server_url:
server_url_splitted = urlsplit(server_url)
if not server_url_splitted.netloc:
url_splitted = urlsplit(url)
server_url = '%s://%s%s' % (url_splitted.scheme,
url_splitted.netloc,
server_url)
pingback_urls[url] = server_url
except IOError:
pass
return pingback_urls
def pingback_url(self, server_name, target_url):
"""
Do a pingback call for the target URL.
"""
try:
server = ServerProxy(server_name)
reply = server.pingback.ping(self.entry_url, target_url)
except (Error, socket.error):
reply = '%s cannot be pinged.' % target_url
return reply
| 33.351759 | 78 | 0.560494 |
8401273e3a9d39f598898a6b59ed5906121f5ce2 | 39,922 | py | Python | skrf/circuit.py | JAnderson419/scikit-rf | b505374b5bf0baa60773ccfc4ec41c2a627df2b2 | [
"BSD-3-Clause"
] | null | null | null | skrf/circuit.py | JAnderson419/scikit-rf | b505374b5bf0baa60773ccfc4ec41c2a627df2b2 | [
"BSD-3-Clause"
] | 1 | 2021-07-15T14:45:49.000Z | 2021-07-15T14:45:49.000Z | skrf/circuit.py | JAnderson419/scikit-rf | b505374b5bf0baa60773ccfc4ec41c2a627df2b2 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
circuit (:mod:`skrf.circuit`)
========================================
The Circuit class represents a circuit of arbitrary topology,
consisting of an arbitrary number of N-ports networks.
Like in an electronic circuit simulator, the circuit must have one or more ports
connected to the circuit. The Circuit object allows one retrieving the M-ports network,
where M is the number of ports defined.
The results are returned in :class:`~skrf.circuit.Circuit` object.
Building a Circuit
------------------
.. autosummary::
:toctree: generated/
Circuit
Circuit.Port
Circuit.Ground
Representing a Circuit
----------------------
.. autosummary::
:toctree: generated/
Circuit.plot_graph
Network Representations
-----------------------
.. autosummary::
:toctree: generated/
Circuit.network
Circuit.s
Circuit.s_external
Circuit.s_active
Circuit.z_active
Circuit.y_active
Circuit.vswr_active
Circuit.port_z0
Voltages and Currents
---------------------
.. autosummary::
:toctree: generated/
Circuit.voltages
Circuit.voltages_external
Circuit.currents
Circuit.currents_external
Circuit internals
------------------
.. autosummary::
:toctree: generated/
Circuit.networks_dict
Circuit.networks_list
Circuit.connections_nb
Circuit.connections_list
Circuit.nodes_nb
Circuit.dim
Circuit.intersections_dict
Circuit.port_indexes
Circuit.C
Circuit.X
Graph representation
--------------------
.. autosummary::
:toctree: generated/
Circuit.graph
Circuit.G
Circuit.edges
Circuit.edge_labels
"""
from numbers import Number
from . network import Network, a2s
from . media import media
from . constants import INF, NumberLike
import numpy as np
try:
import networkx as nx
except ImportError as e:
pass
from itertools import chain, product
from scipy.linalg import block_diag
from typing import Iterable, List, TYPE_CHECKING, Tuple
if TYPE_CHECKING:
from .frequency import Frequency
class Circuit():
"""
Creates a circuit made of a set of N-ports networks.
For instructions on how to create Circuit see :func:`__init__`.
A Circuit object is representation a circuit assembly of an arbitrary
number of N-ports networks connected together via an arbitrary topology.
The algorithm used to calculate the resultant network can be found in [#]_.
References
----------
.. [#] P. Hallbjörner, Microw. Opt. Technol. Lett. 38, 99 (2003).
"""
def __init__(self, connections: List[List[Tuple]]) -> None:
"""
Circuit constructor. Creates a circuit made of a set of N-ports networks.
Parameters
----------
connections : list of list of tuples
Description of circuit connections.
Each connection is a described by a list of tuple.
Each tuple contains (network, network_port_nb).
Port number indexing starts from zero.
Examples
--------
Example of connections between two 1-port networks:
::
connections = [
[(network1, 0), (network2, 0)],
]
Example of a connection between three 1-port networks connected
to a single node:
::
connections = [
[(network1, 0), (network2, 0), (network3, 0)]
]
Example of a connection between two 1-port networks (port1 and port2)
and two 2-ports networks (ntw1 and ntw2):
::
connections = [
[(port1, 0), (ntw1, 0)],
[(ntw1, 1), (ntw2, 0)],
[(ntw2, 1), (port2, 0)]
]
Example of a connection between three 1-port networks (port1, port2 and port3)
and a 3-ports network (ntw):
::
connections = [
[(port1, 0), (ntw, 0)],
[(port2, 0), (ntw, 1)],
[(port3, 0), (ntw, 2)]
]
NB1: Creating 1-port network to be used a port can be made with :func:`Port`
NB2: The external ports indexing is defined by the order of appearance of
the ports in the connections list. Thus, the first network identified
as a port will be the 1st port of the resulting network (index 0),
the second network identified as a port will be the second port (index 1),
etc.
"""
self.connections = connections
# check if all networks have a name
for cnx in self.connections:
for (ntw, _) in cnx:
if not self._is_named(ntw):
raise AttributeError('All Networks must have a name. Faulty network:', ntw)
# list of networks for initial checks
ntws = self.networks_list()
# check if all networks have same frequency
ref_freq = ntws[0].frequency
for ntw in ntws:
if ntw.frequency != ref_freq:
raise AttributeError('All Networks must have same frequencies')
# All frequencies are the same, Circuit frequency can be any of the ntw
self.frequency = ntws[0].frequency
def _is_named(self, ntw):
"""
Return True is the network has a name, False otherwise
"""
if not ntw.name or ntw.name == '':
return False
else:
return True
@classmethod
def Port(cls, frequency: 'Frequency', name: str, z0: float = 50) -> 'Network':
"""
Return a 1-port Network to be used as a Circuit port.
Passing the frequency and name is mandatory. Port name must include
the word 'port' inside. (ex: 'Port1' or 'port_3')
Parameters
----------
frequency : :class:`~skrf.frequency.Frequency`
Frequency common to all other networks in the circuit
name : string
Name of the port.
Must include the word 'port' inside. (ex: 'Port1' or 'port_3')
z0 : real, optional
Characteristic impedance of the port. Default is 50 Ohm.
Returns
-------
port : :class:`~skrf.network.Network` object
1-port network
Examples
--------
.. ipython::
@suppress
In [16]: import skrf as rf
In [17]: freq = rf.Frequency(start=1, stop=2, npoints=101)
In [18]: port1 = rf.Circuit.Port(freq, name='Port1')
"""
_media = media.DefinedGammaZ0(frequency, z0=z0)
return _media.match(name=name)
@classmethod
def SeriesImpedance(cls, frequency: 'Frequency', Z: NumberLike, name: str, z0: float = 50) -> 'Network':
"""
Return a 2-port network of a series impedance.
Passing the frequency and name is mandatory.
Parameters
----------
frequency : :class:`~skrf.frequency.Frequency`
Frequency common to all other networks in the circuit
Z : complex array of shape n_freqs
Impedance
name : string
Name of the series impedance
z0 : real, optional
Characteristic impedance of the port. Default is 50 Ohm.
Returns
-------
serie_impedance : :class:`~skrf.network.Network` object
2-port network
Examples
--------
.. ipython::
@suppress
In [16]: import skrf as rf
In [17]: freq = rf.Frequency(start=1, stop=2, npoints=101)
In [18]: open = rf.Circuit.SeriesImpedance(freq, name='series_impedance')
"""
A = np.zeros(shape=(len(frequency), 2, 2), dtype=complex)
A[:, 0, 0] = 1
A[:, 0, 1] = Z
A[:, 1, 0] = 0
A[:, 1, 1] = 1
ntw = Network(frequency=frequency, z0=z0, name=name)
ntw.s = a2s(A)
return ntw
@classmethod
def ShuntAdmittance(cls, frequency: 'Frequency', Y: NumberLike, name: str, z0: float = 50) -> 'Network':
"""
Return a 2-port network of a shunt admittance.
Passing the frequency and name is mandatory.
Parameters
----------
frequency : :class:`~skrf.frequency.Frequency`
Frequency common to all other networks in the circuit
Y : complex array of shape n_freqs
Admittance
name : string
Name of the shunt admittance
z0 : real, optional
Characteristic impedance of the port. Default is 50 Ohm.
Returns
-------
shunt_admittance : :class:`~skrf.network.Network` object
2-port network
Examples
--------
.. ipython::
@suppress
In [16]: import skrf as rf
In [17]: freq = rf.Frequency(start=1, stop=2, npoints=101)
In [18]: open = rf.Circuit.ShuntAdmittance(freq, name='shunt_admittance')
"""
A = np.zeros(shape=(len(frequency), 2, 2), dtype=complex)
A[:, 0, 0] = 1
A[:, 0, 1] = 0
A[:, 1, 0] = Y
A[:, 1, 1] = 1
ntw = Network(frequency=frequency, z0=z0, name=name)
ntw.s = a2s(A)
return ntw
@classmethod
def Ground(cls, frequency: 'Frequency', name: str, z0: float = 50) -> 'Network':
"""
Return a 2-port network of a grounded link.
Passing the frequency and a name is mandatory.
The ground link is modelled as an infinite shunt admittance.
Parameters
----------
frequency : :class:`~skrf.frequency.Frequency`
Frequency common to all other networks in the circuit
name : string
Name of the ground.
z0 : real, optional
Characteristic impedance of the port. Default is 50 Ohm.
Returns
-------
ground : :class:`~skrf.network.Network` object
2-port network
Examples
--------
.. ipython::
@suppress
In [16]: import skrf as rf
In [17]: freq = rf.Frequency(start=1, stop=2, npoints=101)
In [18]: ground = rf.Circuit.Ground(freq, name='GND')
"""
return cls.ShuntAdmittance(frequency, Y=INF, name=name)
@classmethod
def Open(cls, frequency: 'Frequency', name: str, z0: float = 50) -> 'Network':
"""
Return a 2-port network of an open link.
Passing the frequency and name is mandatory.
The open link is modelled as an infinite series impedance.
Parameters
----------
frequency : :class:`~skrf.frequency.Frequency`
Frequency common to all other networks in the circuit
name : string
Name of the open.
z0 : real, optional
Characteristic impedance of the port. Default is 50 Ohm.
Returns
-------
open : :class:`~skrf.network.Network` object
2-port network
Examples
--------
.. ipython::
@suppress
In [16]: import skrf as rf
In [17]: freq = rf.Frequency(start=1, stop=2, npoints=101)
In [18]: open = rf.Circuit.Open(freq, name='open')
"""
return cls.SeriesImpedance(frequency, Z=INF, name=name)
def networks_dict(self, connections: List = None, min_nports: int = 1) -> dict:
"""
Return the dictionary of Networks from the connection setup X.
Parameters
----------
connections : List, optional
connections list, by default None (then uses the `self.connections`)
min_nports : int, optional
min number of ports, by default 1
Returns
-------
dict
Dictionnary of Networks
"""
if not connections:
connections = self.connections
ntws = []
for cnx in connections:
for (ntw, port) in cnx:
ntws.append(ntw)
return {ntw.name: ntw for ntw in ntws if ntw.nports >= min_nports}
def networks_list(self, connections: List = None, min_nports: int = 1) -> List:
"""
Return a list of unique networks (sorted by appearing order in connections).
Parameters
----------
connections : List, optional
connections list, by default None (then uses the `self.connections`)
min_nports : int, optional
min number of ports, by default 1
Returns
-------
list
List of unique networks
"""
if not connections:
connections = self.connections
ntw_dict = self.networks_dict(connections)
return [ntw for ntw in ntw_dict.values() if ntw.nports >= min_nports]
@property
def connections_nb(self) -> int:
"""
Return the number of intersections in the circuit.
"""
return len(self.connections)
@property
def connections_list(self) -> list:
"""
Return the full list of connections, including intersections.
The resulting list if of the form:
::
[
[connexion_number, connexion],
[connexion_number, connexion],
...
]
"""
return [[idx_cnx, cnx] for (idx_cnx, cnx) in enumerate(chain.from_iterable(self.connections))]
@property
def networks_nb(self) -> int:
"""
Return the number of connected networks (port excluded).
"""
return len(self.networks_list(self.connections))
@property
def nodes_nb(self) -> int:
"""
Return the number of nodes in the circuit.
"""
return self.connections_nb + self.networks_nb
@property
def dim(self) -> int:
"""
Return the dimension of the C, X and global S matrices.
It correspond to the sum of all connections.
"""
return np.sum([len(cnx) for cnx in self.connections])
@property
def G(self):
"""
Generate the graph of the circuit. Convenience shortname for :func:`graph`.
"""
return self.graph()
def graph(self):
"""
Generate the graph of the circuit.
Returns
-------
G: :class:`networkx.Graph`
graph object [#]_ .
References
----------
.. [#] https://networkx.github.io/
"""
try:
import networkx as nx
except ImportError as e:
raise ImportError('networkx package as not been installed and is required. ')
G = nx.Graph()
# Adding network nodes
G.add_nodes_from([it for it in self.networks_dict(self.connections)])
# Adding edges in the graph between connections and networks
for (idx, cnx) in enumerate(self.connections):
cnx_name = 'X'+str(idx)
# Adding connection nodes and edges
G.add_node(cnx_name)
for (ntw, ntw_port) in cnx:
ntw_name = ntw.name
G.add_edge(cnx_name, ntw_name)
return G
def is_connected(self) -> bool:
"""
Check if the circuit's graph is connected.
Check if every pair of vertices in the graph is connected.
"""
# Get the circuit graph. Will raise an error if the networkx package
# is not installed.
G = self.G
return nx.algorithms.components.is_connected(G)
@property
def intersections_dict(self) -> dict:
"""
Return a dictionary of all intersections with associated ports and z0:
::
{ k: [(ntw1_name, ntw1_port), (ntw1_z0, ntw2_name, ntw2_port), ntw2_z0], ... }
"""
inter_dict = {}
# for k in range(self.connections_nb):
# # get all edges connected to intersection Xk
# inter_dict[k] = list(nx.algorithms.boundary.edge_boundary(self.G, ('X'+str(k),) ))
for (k, cnx) in enumerate(self.connections):
inter_dict[k] = [(ntw, ntw_port, ntw.z0[0, ntw_port]) \
for (ntw, ntw_port) in cnx]
return inter_dict
@property
def edges(self) -> list:
"""
Return the list of all circuit connections
"""
return list(self.G.edges)
@property
def edge_labels(self) -> dict:
"""
Return a dictionary describing the port and z0 of all graph edges.
Dictionary is in the form:
::
{('ntw1_name', 'X0'): '3 (50+0j)',
('ntw2_name', 'X0'): '0 (50+0j)',
('ntw2_name', 'X1'): '2 (50+0j)', ... }
which can be used in `networkx.draw_networkx_edge_labels`
"""
# for all intersections,
# get the N interconnected networks and associated ports and z0
# and forge the edge label dictionary containing labels between
# two nodes
edge_labels = {}
for it in self.intersections_dict.items():
k, cnx = it
for idx in range(len(cnx)):
ntw, ntw_port, ntw_z0 = cnx[idx]
#ntw_z0 = ntw.z0[0,ntw_port]
edge_labels[(ntw.name, 'X'+str(k))] = str(ntw_port)+'\n'+\
str(np.round(ntw_z0, decimals=1))
return edge_labels
def _Y_k(self, cnx: List[Tuple]) -> np.ndarray:
"""
Return the sum of the system admittances of each intersection.
Parameters
----------
cnx : list of tuples
each tuple contains (network, port)
Returns
-------
y_k : :class:`numpy.ndarray`
"""
y_ns = []
for (ntw, ntw_port) in cnx:
# formula (2)
y_ns.append(1/ntw.z0[:,ntw_port] )
y_k = np.array(y_ns).sum(axis=0) # shape: (nb_freq,)
return y_k
def _Xnn_k(self, cnx_k: List[Tuple]) -> np.ndarray:
"""
Return the reflection coefficients x_nn of the connection matrix [X]_k.
Parameters
----------
cnx_k : list of tuples
each tuple contains (network, port)
Returns
-------
X_nn : :class:`numpy.ndarray`
shape `f x n`
"""
X_nn = []
y_k = self._Y_k(cnx_k)
for (ntw, ntw_port) in cnx_k:
# formula (1)
X_nn.append( 2/(ntw.z0[:,ntw_port]*y_k) - 1)
return np.array(X_nn).T # shape: (nb_freq, nb_n)
def _Xmn_k(self, cnx_k: List[Tuple]) -> np.ndarray:
"""
Return the transmission coefficient X_mn of the mth column of
intersection scattering matrix matrix [X]_k.
Parameters
----------
cnx_k : list of tuples
each tuple contains (network, port)
Returns
-------
X_mn : :class:`numpy.ndarray`
shape `f x n`
"""
# get the char.impedance of the n
X_mn = []
y_k = self._Y_k(cnx_k)
# There is a problem in the case of two-ports connexion:
# the formula (3) in P. Hallbjörner (2003) seems incorrect.
# Instead of Z_n one should have sqrt(Z_1 x Z_2).
# The formula works with respect to the example given in the paper
# (3 port connection), but not with 2-port connections made with skrf
if len(cnx_k) == 2:
z0s = []
for (ntw, ntw_port) in cnx_k:
z0s.append(ntw.z0[:,ntw_port])
z0eq = np.array(z0s).prod(axis=0)
for (ntw, ntw_port) in cnx_k:
X_mn.append( 2/(np.sqrt(z0eq) *y_k) )
else:
# formula (3)
for (ntw, ntw_port) in cnx_k:
X_mn.append( 2/(ntw.z0[:,ntw_port]*y_k) )
return np.array(X_mn).T # shape: (nb_freq, nb_n)
def _Xk(self, cnx_k: List[Tuple]) -> np.ndarray:
"""
Return the scattering matrices [X]_k of the individual intersections k.
Parameters
----------
cnx_k : list of tuples
each tuple contains (network, port)
Returns
-------
Xs : :class:`numpy.ndarray`
shape `f x n x n`
"""
Xnn = self._Xnn_k(cnx_k) # shape: (nb_freq, nb_n)
Xmn = self._Xmn_k(cnx_k) # shape: (nb_freq, nb_n)
# # for loop version
# Xs = []
# for (_Xnn, _Xmn) in zip(Xnn, Xmn): # for all frequencies
# # repeat Xmn along the lines
# _X = np.tile(_Xmn, (len(_Xmn), 1))
# _X[np.diag_indices(len(_Xmn))] = _Xnn
# Xs.append(_X)
# return np.array(Xs) # shape : nb_freq, nb_n, nb_n
# vectorized version
nb_n = Xnn.shape[1]
Xs = np.tile(Xmn, (nb_n, 1, 1)).swapaxes(1, 0)
Xs[:, np.arange(nb_n), np.arange(nb_n)] = Xnn
return Xs # shape : nb_freq, nb_n, nb_n
# TEST : Could we use media.splitter() instead ? -> does not work
# _media = media.DefinedGammaZ0(frequency=self.frequency)
# Xs = _media.splitter(len(cnx_k), z0=self._cnx_z0(cnx_k))
# return Xs.s
@property
def X(self) -> np.ndarray:
"""
Return the concatenated intersection matrix [X] of the circuit.
It is composed of the individual intersection matrices [X]_k assembled
by block diagonal.
Returns
-------
X : :class:`numpy.ndarray`
Note
----
There is a numerical bottleneck in this function,
when creating the block diagonal matrice [X] from the [X]_k matrices.
The difficulty comes from the fact that the shape of each [X]_k
matrix is `fxnxn`, so there is a loop over the frequencies
to create f times a block matrix [X]. I am still looking for a
vectorized implementation but didn't find it.
"""
# Xk = []
# for cnx in self.connections:
# Xk.append(self._Xk(cnx))
# Xk = np.array(Xk)
# #X = np.zeros(len(C.frequency), )
# Xf = []
# for (idx, f) in enumerate(self.frequency):
# Xf.append(block_diag(*Xk[:,idx,:]))
# return np.array(Xf) # shape: (nb_frequency, nb_inter*nb_n, nb_inter*nb_n)
# Slightly faster version
Xks = [self._Xk(cnx) for cnx in self.connections]
Xf = np.zeros((len(self.frequency), self.dim, self.dim), dtype='complex')
# TODO: avoid this for loop which is a bottleneck for large frequencies
for idx in np.nditer(np.arange(len(self.frequency))):
mat_list = [Xk[idx,:] for Xk in Xks]
Xf[idx,:] = block_diag(*mat_list) # bottleneck
return Xf
@property
def C(self) -> np.ndarray:
"""
Return the global scattering matrix of the networks.
Returns
-------
S : :class:`numpy.ndarray`
Global scattering matrix of the networks.
Shape `f x (nb_inter*nb_n) x (nb_inter*nb_n)`
"""
# list all networks which are not considered as "ports",
# that is which do not contain "port" in their network name
ntws = {k:v for k,v in self.networks_dict().items() if 'port' not in k.lower()}
# generate the port reordering indexes from each connections
ntws_ports_reordering = {ntw:[] for ntw in ntws}
for (idx_cnx, cnx) in self.connections_list:
ntw, ntw_port = cnx
if ntw.name in ntws.keys():
ntws_ports_reordering[ntw.name].append([ntw_port, idx_cnx])
# re-ordering scattering parameters
S = np.zeros((len(self.frequency), self.dim, self.dim), dtype='complex' )
for (ntw_name, ntw_ports) in ntws_ports_reordering.items():
# get the port re-ordering indexes (from -> to)
ntw_ports = np.array(ntw_ports)
# create the port permutations
from_port = list(product(ntw_ports[:,0], repeat=2))
to_port = list(product(ntw_ports[:,1], repeat=2))
#print(ntw_name, from_port, to_port)
for (_from, _to) in zip(from_port, to_port):
#print(f'{_from} --> {_to}')
S[:, _to[0], _to[1]] = ntws[ntw_name].s[:, _from[0], _from[1]]
return S # shape (nb_frequency, nb_inter*nb_n, nb_inter*nb_n)
@property
def s(self) -> np.ndarray:
"""
Return the global scattering parameters of the circuit.
Return the scattering parameters of both "inner" and "outer" ports.
Returns
-------
S : :class:`numpy.ndarray`
global scattering parameters of the circuit.
"""
# transpose is necessary to get expected result
#return np.transpose(self.X @ np.linalg.inv(np.identity(self.dim) - self.C @ self.X), axes=(0,2,1))
# does not use the @ operator for backward Python version compatibility
return np.transpose(np.matmul(self.X, np.linalg.inv(np.identity(self.dim) - np.matmul(self.C, self.X))), axes=(0,2,1))
@property
def port_indexes(self) -> list:
"""
Return the indexes of the "external" ports. These must be labelled "port".
Returns
-------
port_indexes : list
"""
port_indexes = []
for (idx_cnx, cnx) in enumerate(chain.from_iterable(self.connections)):
ntw, ntw_port = cnx
if 'port' in str.lower(ntw.name):
port_indexes.append(idx_cnx)
return port_indexes
def _cnx_z0(self, cnx_k: List[Tuple]) -> np.ndarray:
"""
Return the characteristic impedances of a specific connections.
Parameters
----------
cnx_k : list of tuples
each tuple contains (network, port)
Returns
-------
z0s : :class:`numpy.ndarray`
shape `f x nb_ports_at_cnx`
"""
z0s = []
for (ntw, ntw_port) in cnx_k:
z0s.append(ntw.z0[:,ntw_port])
return np.array(z0s).T # shape (nb_freq, nb_ports_at_cnx)
@property
def port_z0(self) -> np.ndarray:
"""
Return the external port impedances.
Returns
-------
z0s : :class:`numpy.ndarray`
shape `f x nb_ports`
"""
z0s = []
for cnx in self.connections:
for (ntw, ntw_port) in cnx:
z0s.append(ntw.z0[:,ntw_port])
return np.array(z0s)[self.port_indexes, :].T # shape (nb_freq, nb_ports)
@property
def s_external(self) -> np.ndarray:
"""
Return the scattering parameters for the external ports.
Returns
-------
S : :class:`numpy.ndarray`
Scattering parameters of the circuit for the external ports.
Shape `f x nb_ports x nb_ports`
"""
port_indexes = self.port_indexes
a, b = np.meshgrid(port_indexes, port_indexes)
S_ext = self.s[:, a, b]
return S_ext # shape (nb_frequency, nb_ports, nb_ports)
@property
def network(self) -> 'Network':
"""
Return the Network associated to external ports.
Returns
-------
ntw : :class:`~skrf.network.Network`
Network associated to external ports
"""
ntw = Network()
ntw.frequency = self.frequency
ntw.z0 = self.port_z0
ntw.s = self.s_external
return ntw
def s_active(self, a: NumberLike) -> np.ndarray:
"""
Return "active" s-parameters of the circuit's network for a defined wave excitation `a`.
The "active" s-parameter at a port is the reflection coefficients
when other ports are excited. It is an important quantity for active
phased array antennas.
Active s-parameters are defined by [#]_:
.. math::
\mathrm{active}(s)_{mn} = \sum_i s_{mi} \\frac{a_i}{a_n}
Parameters
----------
a : complex array of shape (n_ports)
forward wave complex amplitude (power-wave formulation [#]_)
Returns
-------
s_act : complex array of shape (n_freqs, n_ports)
active s-parameters for the excitation a
References
----------
.. [#] D. M. Pozar, IEEE Trans. Antennas Propag. 42, 1176 (1994).
.. [#] D. Williams, IEEE Microw. Mag. 14, 38 (2013).
"""
return self.network.s_active(a)
def z_active(self, a: NumberLike) -> np.ndarray:
"""
Return the "active" Z-parameters of the circuit's network for a defined wave excitation a.
The "active" Z-parameters are defined by:
.. math::
\mathrm{active}(z)_{m} = z_{0,m} \\frac{1 + \mathrm{active}(s)_m}{1 - \mathrm{active}(s)_m}
where :math:`z_{0,m}` is the characteristic impedance and
:math:`\mathrm{active}(s)_m` the active S-parameter of port :math:`m`.
Parameters
----------
a : complex array of shape (n_ports)
forward wave complex amplitude
Returns
-------
z_act : complex array of shape (nfreqs, nports)
active Z-parameters for the excitation a
See Also
--------
s_active : active S-parameters
y_active : active Y-parameters
vswr_active : active VSWR
"""
return self.network.z_active(a)
def y_active(self, a: NumberLike) -> np.ndarray:
"""
Return the "active" Y-parameters of the circuit's network for a defined wave excitation a.
The "active" Y-parameters are defined by:
.. math::
\mathrm{active}(y)_{m} = y_{0,m} \\frac{1 - \mathrm{active}(s)_m}{1 + \mathrm{active}(s)_m}
where :math:`y_{0,m}` is the characteristic admittance and
:math:`\mathrm{active}(s)_m` the active S-parameter of port :math:`m`.
Parameters
----------
a : complex array of shape (n_ports)
forward wave complex amplitude
Returns
-------
y_act : complex array of shape (nfreqs, nports)
active Y-parameters for the excitation a
See Also
--------
s_active : active S-parameters
z_active : active Z-parameters
vswr_active : active VSWR
"""
return self.network.y_active(a)
def vswr_active(self, a: NumberLike) -> np.ndarray:
"""
Return the "active" VSWR of the circuit's network for a defined wave excitation a.
The "active" VSWR is defined by :
.. math::
\mathrm{active}(vswr)_{m} = \\frac{1 + |\mathrm{active}(s)_m|}{1 - |\mathrm{active}(s)_m|}
where :math:`\mathrm{active}(s)_m` the active S-parameter of port :math:`m`.
Parameters
----------
a : complex array of shape (n_ports)
forward wave complex amplitude
Returns
-------
vswr_act : complex array of shape (nfreqs, nports)
active VSWR for the excitation a
See Also
--------
s_active : active S-parameters
z_active : active Z-parameters
y_active : active Y-parameters
"""
return self.network.vswr_active(a)
@property
def z0(self) -> np.ndarray:
"""
Characteristic impedances of "internal" ports.
Returns
-------
z0 : complex array of shape (nfreqs, nports)
Characteristic impedances of both "inner" and "outer" ports
"""
z0s = []
for cnx_idx, (ntw, ntw_port) in self.connections_list:
z0s.append(ntw.z0[:,ntw_port])
return np.array(z0s).T
@property
def connections_pair(self) -> List:
"""
List the connections by pair.
Each connection in the circuit is between a specific pair of two
(networks, port, z0).
Returns
-------
connections_pair : list
list of pair of connections
"""
return [self.connections_list[i:i+2] for i in range(0, len(self.connections_list), 2)]
@property
def _currents_directions(self) -> np.ndarray:
"""
Create a array of indices to define the sign of the current.
The currents are defined positive when entering an internal network.
Returns
-------
directions : array of int (nports, 2)
Note
----
This function is used in internal currents and voltages calculations.
"""
directions = np.zeros((self.dim,2), dtype='int')
for cnx_pair in self.connections_pair:
(cnx_idx_A, cnx_A), (cnx_idx_B, cnx_B) = cnx_pair
directions[cnx_idx_A,:] = cnx_idx_A, cnx_idx_B
directions[cnx_idx_B,:] = cnx_idx_B, cnx_idx_A
return directions
def _a(self, a_external: NumberLike) -> np.ndarray:
"""
Wave input array at "internal" ports.
Parameters
----------
a_external : array
power-wave input vector at ports
Returns
-------
a_internal : array
Wave input array at internal ports
"""
# create a zero array and fill the values corresponding to ports
a_internal = np.zeros(self.dim, dtype='complex')
a_internal[self.port_indexes] = a_external
return a_internal
def _a_external(self, power: NumberLike, phase: NumberLike) -> np.ndarray:
"""
Wave input array at Circuit's ports ("external" ports).
The array is defined from power and phase by:
.. math::
a = \sqrt(2 P_{in} ) e^{j \phi}
The factor 2 is in order to deal with peak values.
Parameters
----------
power : list or array
Input power at external ports in Watts [W]
phase : list or array
Input phase at external ports in radian [rad]
NB: the size of the power and phase array should match the number of ports
Returns
-------
a_external: array
Wave input array at Circuit's ports
"""
if len(power) != len(self.port_indexes):
raise ValueError('Length of power array does not match the number of ports of the circuit.')
if len(phase) != len(self.port_indexes):
raise ValueError('Length of phase array does not match the number of ports of the circuit.')
return np.sqrt(2*np.array(power))*np.exp(1j*np.array(phase))
def _b(self, a_internal: NumberLike) -> np.ndarray:
"""
Wave output array at "internal" ports
Parameters
----------
a_internal : array
Wave input array at internal ports
Returns
-------
b_internal : array
Wave output array at internal ports
Note
----
Wave input array at internal ports can be derived from power and
phase excitation at "external" ports using `_a(power, phase)` method.
"""
# return self.s @ a_internal
return np.matmul(self.s, a_internal)
def currents(self, power: NumberLike, phase: NumberLike) -> np.ndarray:
"""
Currents at internal ports.
NB: current direction is defined as positive when entering a node.
NB: external current sign are opposite than corresponding internal ones,
as the internal currents are actually flowing into the "port" networks
Parameters
----------
power : list or array
Input power at external ports in Watts [W]
phase : list or array
Input phase at external ports in radian [rad]
Returns
-------
I : complex array of shape (nfreqs, nports)
Currents in Amperes [A] (peak) at internal ports.
"""
# It is possible with Circuit to define connections between
# multiple (>2) ports at the same time in the connection setup, like :
# cnx = [
# [(ntw1, portA), (ntw2, portB), (ntw3, portC)], ...
#]
# Such a case is not supported with the present calculation method
# which only works with pair connections between ports, ie like:
# cnx = [
# [(ntw1, portA), (ntw2, portB)],
# [(ntw2, portD), (ntw3, portC)], ...
#]
# It should not be a huge limitation (?), since it should be always possible
# to add the proper splitting Network (such a "T" or hybrid or more)
# and connect this splitting Network ports to other Network ports.
# ie going from:
# [ntwA] ---- [ntwB]
# |
# |
# [ntwC]
# to:
# [ntwA] ------ [ntwD] ------ [ntwB]
# |
# |
# [ntwC]
for inter_idx, inter in self.intersections_dict.items():
if len(inter) > 2:
raise NotImplementedError('Connections between more than 2 ports are not supported (yet?)')
a = self._a(self._a_external(power, phase))
b = self._b(a)
z0s = self.z0
directions = self._currents_directions
Is = (b[:,directions[:,0]] - b[:,directions[:,1]])/np.sqrt(z0s)
return Is
def voltages(self, power: NumberLike, phase: NumberLike) -> np.ndarray:
"""
Voltages at internal ports.
Parameters
----------
power : list or array
Input power at external ports in Watts [W]
phase : list or array
Input phase at external ports in radian [rad]
Returns
-------
V : complex array of shape (nfreqs, nports)
Voltages in Amperes [A] (peak) at internal ports.
"""
# cf currents() for more details
for inter_idx, inter in self.intersections_dict.items():
if len(inter) > 2:
raise NotImplementedError('Connections between more than 2 ports are not supported (yet?)')
a = self._a(self._a_external(power, phase))
b = self._b(a)
z0s = self.z0
directions = self._currents_directions
Vs = (b[:,directions[:,0]] + b[:,directions[:,1]])*np.sqrt(z0s)
return Vs
def currents_external(self, power: NumberLike, phase: NumberLike) -> np.ndarray:
"""
Currents at external ports.
NB: current direction is defined positive when "entering" into port.
Parameters
----------
power : list or array
Input power at external ports in Watts [W]
phase : list or array
Input phase at external ports in radian [rad]
Returns
-------
I : complex array of shape (nfreqs, nports)
Currents in Amperes [A] (peak) at external ports.
"""
a = self._a(self._a_external(power, phase))
b = self._b(a)
z0s = self.z0
Is = []
for port_idx in self.port_indexes:
Is.append((a[port_idx] - b[:,port_idx])/np.sqrt(z0s[:,port_idx]))
return np.array(Is).T
def voltages_external(self, power: NumberLike, phase: NumberLike) -> np.ndarray:
"""
Voltages at external ports
Parameters
----------
power : list or array
Input power at external ports in Watts [W]
phase : list or array
Input phase at external ports in radian [rad]
Returns
-------
V : complex array of shape (nfreqs, nports)
Voltages in Volt [V] (peak) at ports
"""
a = self._a(self._a_external(power, phase))
b = self._b(a)
z0s = self.z0
Vs = []
for port_idx in self.port_indexes:
Vs.append((a[port_idx] + b[:,port_idx])*np.sqrt(z0s[:,port_idx]))
return np.array(Vs).T
| 30.591571 | 126 | 0.557011 |
589a6ab68ded9181a1b56c64e9085578b5289752 | 2,140 | py | Python | tests/util_test.py | mcdobe100/arkouda | 499ecb502da214ee71e9923fda9487c65ec4d616 | [
"MIT"
] | null | null | null | tests/util_test.py | mcdobe100/arkouda | 499ecb502da214ee71e9923fda9487c65ec4d616 | [
"MIT"
] | null | null | null | tests/util_test.py | mcdobe100/arkouda | 499ecb502da214ee71e9923fda9487c65ec4d616 | [
"MIT"
] | null | null | null | from base_test import ArkoudaTest
from context import arkouda as ak
from arkouda.util import attach
class utilTest(ArkoudaTest):
def test_simple_attach(self):
a = ak.array(["abc","123","def"])
b = ak.arange(10)
# Attach the Strings array and the pdarray to new objects
a_attached = attach(a.name)
a_typed_attach = attach(a.name, "strings")
b_attached = attach(b.name)
b_typed_attach = attach(b.name, "pdarray")
self.assertTrue((a == a_attached).all())
self.assertIsInstance(a_attached, ak.Strings)
self.assertTrue((a == a_typed_attach).all())
self.assertIsInstance(a_typed_attach, ak.Strings)
self.assertTrue((b == b_attached).all())
self.assertIsInstance(b_attached, ak.pdarray)
self.assertTrue((b == b_typed_attach).all())
self.assertIsInstance(b_typed_attach, ak.pdarray)
def test_categorical_attach(self):
strings = ak.array(["hurrah", ",", "hurrah", ",", "one", "by", "one", "marching", "go", "ants", "the"])
cat = ak.Categorical(strings)
cat.register("catTest")
attached = attach("catTest")
self.assertTrue((cat == attached).all())
self.assertIsInstance(attached, ak.Categorical)
attached_typed = attach("catTest", "Categorical")
self.assertTrue((cat == attached_typed).all())
self.assertIsInstance(attached_typed, ak.Categorical)
def test_segArray_attach(self):
a = [10, 11, 12, 13, 14, 15]
b = [20, 21]
c = [30, 31, 32, 33]
flat = a + b + c
akflat = ak.array(flat)
segments = ak.array([0, len(a), len(a) + len(b)])
segarr = ak.SegArray(segments, akflat)
segarr.register("segTest")
attached = attach("segTest")
self.assertTrue((segarr == attached).all())
self.assertIsInstance(attached, ak.SegArray)
attached_typed = attach("segTest", "SegArray")
self.assertTrue((segarr == attached_typed).all())
self.assertIsInstance(attached_typed, ak.SegArray) | 36.896552 | 112 | 0.603738 |
11992f425ed30770f26b86064a641df89d475df2 | 6,483 | py | Python | tushare/util/formula.py | Federer1976/tushare | 1640f2beb200fa99e10dbba0fd19d0b7138f034e | [
"BSD-3-Clause"
] | 1 | 2019-02-15T12:59:02.000Z | 2019-02-15T12:59:02.000Z | tushare/util/formula.py | Federer1976/tushare | 1640f2beb200fa99e10dbba0fd19d0b7138f034e | [
"BSD-3-Clause"
] | null | null | null | tushare/util/formula.py | Federer1976/tushare | 1640f2beb200fa99e10dbba0fd19d0b7138f034e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
def EMA(DF, N):
return pd.Series.ewm(DF, span=N, min_periods=N - 1, adjust=True).mean()
def MA(DF, N):
return pd.Series.rolling(DF, N).mean()
def SMA(DF, N, M):
DF = DF.fillna(0)
z = len(DF)
var = np.zeros(z)
var[0] = DF[0]
for i in range(1, z):
var[i] = (DF[i] * M + var[i - 1] * (N - M)) / N
for i in range(z):
DF[i] = var[i]
return DF
def ATR(DF, N):
C = DF['close']
H = DF['high']
L = DF['low']
TR1 = MAX(MAX((H - L), ABS(REF(C, 1) - H)), ABS(REF(C, 1) - L))
atr = MA(TR1, N)
return atr
def HHV(DF, N):
return pd.Series.rolling(DF, N).max()
def LLV(DF, N):
return pd.Series.rolling(DF, N).min()
def SUM(DF, N):
return pd.Series.rolling(DF, N).sum()
def ABS(DF):
return abs(DF)
def MAX(A, B):
var = IF(A > B, A, B)
return var
def MIN(A, B):
var = IF(A < B, A, B)
return var
def IF(COND, V1, V2):
var = np.where(COND, V1, V2)
for i in range(len(var)):
V1[i] = var[i]
return V1
def REF(DF, N):
var = DF.diff(N)
var = DF - var
return var
def STD(DF, N):
return pd.Series.rolling(DF, N).std()
def MACD(DF, FAST, SLOW, MID):
EMAFAST = EMA(DF, FAST)
EMASLOW = EMA(DF, SLOW)
DIFF = EMAFAST - EMASLOW
DEA = EMA(DIFF, MID)
MACD = (DIFF - DEA) * 2
DICT = {'DIFF': DIFF, 'DEA': DEA, 'MACD': MACD}
VAR = pd.DataFrame(DICT)
return VAR
def KDJ(DF, N, M1, M2):
C = DF['close']
H = DF['high']
L = DF['low']
RSV = (C - LLV(L, N)) / (HHV(H, N) - LLV(L, N)) * 100
K = SMA(RSV, M1, 1)
D = SMA(K, M2, 1)
J = 3 * K - 2 * D
DICT = {'KDJ_K': K, 'KDJ_D': D, 'KDJ_J': J}
VAR = pd.DataFrame(DICT)
return VAR
def OSC(DF, N, M): # 变动速率线
C = DF['close']
OS = (C - MA(C, N)) * 100
MAOSC = EMA(OS, M)
DICT = {'OSC': OS, 'MAOSC': MAOSC}
VAR = pd.DataFrame(DICT)
return VAR
def BBI(DF, N1, N2, N3, N4): # 多空指标
C = DF['close']
bbi = (MA(C, N1) + MA(C, N2) + MA(C, N3) + MA(C, N4)) / 4
DICT = {'BBI': bbi}
VAR = pd.DataFrame(DICT)
return VAR
def BBIBOLL(DF, N1, N2, N3, N4, N, M): # 多空布林线
bbiboll = BBI(DF, N1, N2, N3, N4)
UPER = bbiboll + M * STD(bbiboll, N)
DOWN = bbiboll - M * STD(bbiboll, N)
DICT = {'BBIBOLL': bbiboll, 'UPER': UPER, 'DOWN': DOWN}
VAR = pd.DataFrame(DICT)
return VAR
def PBX(DF, N1, N2, N3, N4, N5, N6): # 瀑布线
C = DF['close']
PBX1 = (EMA(C, N1) + EMA(C, 2 * N1) + EMA(C, 4 * N1)) / 3
PBX2 = (EMA(C, N2) + EMA(C, 2 * N2) + EMA(C, 4 * N2)) / 3
PBX3 = (EMA(C, N3) + EMA(C, 2 * N3) + EMA(C, 4 * N3)) / 3
PBX4 = (EMA(C, N4) + EMA(C, 2 * N4) + EMA(C, 4 * N4)) / 3
PBX5 = (EMA(C, N5) + EMA(C, 2 * N5) + EMA(C, 4 * N5)) / 3
PBX6 = (EMA(C, N6) + EMA(C, 2 * N6) + EMA(C, 4 * N6)) / 3
DICT = {'PBX1': PBX1, 'PBX2': PBX2, 'PBX3': PBX3,
'PBX4': PBX4, 'PBX5': PBX5, 'PBX6': PBX6}
VAR = pd.DataFrame(DICT)
return VAR
def BOLL(DF, N): # 布林线
C = DF['close']
boll = MA(C, N)
UB = boll + 2 * STD(C, N)
LB = boll - 2 * STD(C, N)
DICT = {'BOLL': boll, 'UB': UB, 'LB': LB}
VAR = pd.DataFrame(DICT)
return VAR
def ROC(DF, N, M): # 变动率指标
C = DF['close']
roc = 100 * (C - REF(C, N)) / REF(C, N)
MAROC = MA(roc, M)
DICT = {'ROC': roc, 'MAROC': MAROC}
VAR = pd.DataFrame(DICT)
return VAR
def MTM(DF, N, M): # 动量线
C = DF['close']
mtm = C - REF(C, N)
MTMMA = MA(mtm, M)
DICT = {'MTM': mtm, 'MTMMA': MTMMA}
VAR = pd.DataFrame(DICT)
return VAR
def MFI(DF, N): # 资金指标
C = DF['close']
H = DF['high']
L = DF['low']
VOL = DF['vol']
TYP = (C + H + L) / 3
V1 = SUM(IF(TYP > REF(TYP, 1), TYP * VOL, 0), N) / \
SUM(IF(TYP < REF(TYP, 1), TYP * VOL, 0), N)
mfi = 100 - (100 / (1 + V1))
DICT = {'MFI': mfi}
VAR = pd.DataFrame(DICT)
return VAR
def SKDJ(DF, N, M):
CLOSE = DF['close']
LOWV = LLV(DF['low'], N)
HIGHV = HHV(DF['high'], N)
RSV = EMA((CLOSE - LOWV) / (HIGHV - LOWV) * 100, M)
K = EMA(RSV, M)
D = MA(K, M)
DICT = {'SKDJ_K': K, 'SKDJ_D': D}
VAR = pd.DataFrame(DICT)
return VAR
def WR(DF, N, N1): # 威廉指标
HIGH = DF['high']
LOW = DF['low']
CLOSE = DF['close']
WR1 = 100 * (HHV(HIGH, N) - CLOSE) / (HHV(HIGH, N) - LLV(LOW, N))
WR2 = 100 * (HHV(HIGH, N1) - CLOSE) / (HHV(HIGH, N1) - LLV(LOW, N1))
DICT = {'WR1': WR1, 'WR2': WR2}
VAR = pd.DataFrame(DICT)
return VAR
def BIAS(DF, N1, N2, N3): # 乖离率
CLOSE = DF['close']
BIAS1 = (CLOSE - MA(CLOSE, N1)) / MA(CLOSE, N1) * 100
BIAS2 = (CLOSE - MA(CLOSE, N2)) / MA(CLOSE, N2) * 100
BIAS3 = (CLOSE - MA(CLOSE, N3)) / MA(CLOSE, N3) * 100
DICT = {'BIAS1': BIAS1, 'BIAS2': BIAS2, 'BIAS3': BIAS3}
VAR = pd.DataFrame(DICT)
return VAR
def RSI(DF, N1, N2, N3): # 相对强弱指标RSI1:SMA(MAX(CLOSE-LC,0),N1,1)/SMA(ABS(CLOSE-LC),N1,1)*100;
CLOSE = DF['close']
LC = REF(CLOSE, 1)
RSI1 = SMA(MAX(CLOSE - LC, 0), N1, 1) / SMA(ABS(CLOSE - LC), N1, 1) * 100
RSI2 = SMA(MAX(CLOSE - LC, 0), N2, 1) / SMA(ABS(CLOSE - LC), N2, 1) * 100
RSI3 = SMA(MAX(CLOSE - LC, 0), N3, 1) / SMA(ABS(CLOSE - LC), N3, 1) * 100
DICT = {'RSI1': RSI1, 'RSI2': RSI2, 'RSI3': RSI3}
VAR = pd.DataFrame(DICT)
return VAR
def ADTM(DF, N, M): # 动态买卖气指标
HIGH = DF['high']
LOW = DF['low']
OPEN = DF['open']
DTM = IF(OPEN <= REF(OPEN, 1), 0, MAX(
(HIGH - OPEN), (OPEN - REF(OPEN, 1))))
DBM = IF(OPEN >= REF(OPEN, 1), 0, MAX((OPEN - LOW), (OPEN - REF(OPEN, 1))))
STM = SUM(DTM, N)
SBM = SUM(DBM, N)
ADTM1 = IF(STM > SBM, (STM - SBM) / STM,
IF(STM == SBM, 0, (STM - SBM) / SBM))
MAADTM = MA(ADTM1, M)
DICT = {'ADTM': ADTM1, 'MAADTM': MAADTM}
VAR = pd.DataFrame(DICT)
return VAR
def DDI(DF, N, N1, M, M1): # 方向标准离差指数
H = DF['high']
L = DF['low']
DMZ = IF((H + L) <= (REF(H, 1) + REF(L, 1)), 0,
MAX(ABS(H - REF(H, 1)), ABS(L - REF(L, 1))))
DMF = IF((H + L) >= (REF(H, 1) + REF(L, 1)), 0,
MAX(ABS(H - REF(H, 1)), ABS(L - REF(L, 1))))
DIZ = SUM(DMZ, N) / (SUM(DMZ, N) + SUM(DMF, N))
DIF = SUM(DMF, N) / (SUM(DMF, N) + SUM(DMZ, N))
ddi = DIZ - DIF
ADDI = SMA(ddi, N1, M)
AD = MA(ADDI, M1)
DICT = {'DDI': ddi, 'ADDI': ADDI, 'AD': AD}
VAR = pd.DataFrame(DICT)
return VAR
| 24.83908 | 93 | 0.489588 |
6d419aba0ac21e540f99bd5f81077342d222f41f | 157 | py | Python | tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_PolyTrend_Seasonal_Hour_AR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_PolyTrend_Seasonal_Hour_AR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_PolyTrend_Seasonal_Hour_AR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['PolyTrend'] , ['Seasonal_Hour'] , ['AR'] ); | 39.25 | 79 | 0.745223 |
cda58dfc7618d0aeb296932b58ee50b1ec94bbe9 | 303 | py | Python | python/reverse_a_string_using_function.py | mujib2953/general-codes | e944ceac62c89d3fc15bf7b0ed42cac8eb510296 | [
"Apache-2.0"
] | null | null | null | python/reverse_a_string_using_function.py | mujib2953/general-codes | e944ceac62c89d3fc15bf7b0ed42cac8eb510296 | [
"Apache-2.0"
] | null | null | null | python/reverse_a_string_using_function.py | mujib2953/general-codes | e944ceac62c89d3fc15bf7b0ed42cac8eb510296 | [
"Apache-2.0"
] | null | null | null | # Author: Mujib Ansari
# Date: Jan 21, 2021
# Problem Statement: WAP to reverse a string using function
def reverse_string(s):
str = ""
for i in s:
str = i + str
return str
s = "Arigato"
print("Original string : ", s)
print("Reverse string : ", reverse_string(s))
| 18.9375 | 60 | 0.60396 |
8df16a8574b726fe974d6ea9c51117fff3ef49ac | 31,389 | py | Python | third_party/depot_tools/git_common.py | gengleilei/wee8 | a7bff18685ddfc7f16de825c9d3a12432d4138d5 | [
"BSD-3-Clause"
] | 1 | 2020-10-10T02:27:19.000Z | 2020-10-10T02:27:19.000Z | third_party/depot_tools/git_common.py | gengleilei/wee8 | a7bff18685ddfc7f16de825c9d3a12432d4138d5 | [
"BSD-3-Clause"
] | null | null | null | third_party/depot_tools/git_common.py | gengleilei/wee8 | a7bff18685ddfc7f16de825c9d3a12432d4138d5 | [
"BSD-3-Clause"
] | 1 | 2020-11-03T15:15:26.000Z | 2020-11-03T15:15:26.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Monkeypatch IMapIterator so that Ctrl-C can kill everything properly.
# Derived from https://gist.github.com/aljungberg/626518
from __future__ import print_function
from __future__ import unicode_literals
import multiprocessing.pool
import sys
import threading
from multiprocessing.pool import IMapIterator
def wrapper(func):
def wrap(self, timeout=None):
default_timeout = (1 << 31 if sys.version_info.major == 2 else
threading.TIMEOUT_MAX)
return func(self, timeout=timeout or default_timeout)
return wrap
IMapIterator.next = wrapper(IMapIterator.next)
IMapIterator.__next__ = IMapIterator.next
# TODO(iannucci): Monkeypatch all other 'wait' methods too.
import binascii
import collections
import contextlib
import functools
import logging
import os
import re
import setup_color
import shutil
import signal
import tempfile
import textwrap
import subprocess2
from io import BytesIO
if sys.version_info.major == 2:
# On Python 3, BrokenPipeError is raised instead.
BrokenPipeError = IOError
ROOT = os.path.abspath(os.path.dirname(__file__))
IS_WIN = sys.platform == 'win32'
TEST_MODE = False
def win_find_git():
for elem in os.environ.get('PATH', '').split(os.pathsep):
for candidate in ('git.exe', 'git.bat'):
path = os.path.join(elem, candidate)
if os.path.isfile(path):
return path
raise ValueError('Could not find Git on PATH.')
GIT_EXE = 'git' if not IS_WIN else win_find_git()
FREEZE = 'FREEZE'
FREEZE_SECTIONS = {
'indexed': 'soft',
'unindexed': 'mixed'
}
FREEZE_MATCHER = re.compile(r'%s.(%s)' % (FREEZE, '|'.join(FREEZE_SECTIONS)))
# NOTE: This list is DEPRECATED in favor of the Infra Git wrapper:
# https://chromium.googlesource.com/infra/infra/+/master/go/src/infra/tools/git
#
# New entries should be added to the Git wrapper, NOT to this list. "git_retry"
# is, similarly, being deprecated in favor of the Git wrapper.
#
# ---
#
# Retry a git operation if git returns a error response with any of these
# messages. It's all observed 'bad' GoB responses so far.
#
# This list is inspired/derived from the one in ChromiumOS's Chromite:
# <CHROMITE>/lib/git.py::GIT_TRANSIENT_ERRORS
#
# It was last imported from '7add3ac29564d98ac35ce426bc295e743e7c0c02'.
GIT_TRANSIENT_ERRORS = (
# crbug.com/285832
r'!.*\[remote rejected\].*\(error in hook\)',
# crbug.com/289932
r'!.*\[remote rejected\].*\(failed to lock\)',
# crbug.com/307156
r'!.*\[remote rejected\].*\(error in Gerrit backend\)',
# crbug.com/285832
r'remote error: Internal Server Error',
# crbug.com/294449
r'fatal: Couldn\'t find remote ref ',
# crbug.com/220543
r'git fetch_pack: expected ACK/NAK, got',
# crbug.com/189455
r'protocol error: bad pack header',
# crbug.com/202807
r'The remote end hung up unexpectedly',
# crbug.com/298189
r'TLS packet with unexpected length was received',
# crbug.com/187444
r'RPC failed; result=\d+, HTTP code = \d+',
# crbug.com/388876
r'Connection timed out',
# crbug.com/430343
# TODO(dnj): Resync with Chromite.
r'The requested URL returned error: 5\d+',
r'Connection reset by peer',
r'Unable to look up',
r'Couldn\'t resolve host',
)
GIT_TRANSIENT_ERRORS_RE = re.compile('|'.join(GIT_TRANSIENT_ERRORS),
re.IGNORECASE)
# git's for-each-ref command first supported the upstream:track token in its
# format string in version 1.9.0, but some usages were broken until 2.3.0.
# See git commit b6160d95 for more information.
MIN_UPSTREAM_TRACK_GIT_VERSION = (2, 3)
class BadCommitRefException(Exception):
def __init__(self, refs):
msg = ('one of %s does not seem to be a valid commitref.' %
str(refs))
super(BadCommitRefException, self).__init__(msg)
def memoize_one(**kwargs):
"""Memoizes a single-argument pure function.
Values of None are not cached.
Kwargs:
threadsafe (bool) - REQUIRED. Specifies whether to use locking around
cache manipulation functions. This is a kwarg so that users of memoize_one
are forced to explicitly and verbosely pick True or False.
Adds three methods to the decorated function:
* get(key, default=None) - Gets the value for this key from the cache.
* set(key, value) - Sets the value for this key from the cache.
* clear() - Drops the entire contents of the cache. Useful for unittests.
* update(other) - Updates the contents of the cache from another dict.
"""
assert 'threadsafe' in kwargs, 'Must specify threadsafe={True,False}'
threadsafe = kwargs['threadsafe']
if threadsafe:
def withlock(lock, f):
def inner(*args, **kwargs):
with lock:
return f(*args, **kwargs)
return inner
else:
def withlock(_lock, f):
return f
def decorator(f):
# Instantiate the lock in decorator, in case users of memoize_one do:
#
# memoizer = memoize_one(threadsafe=True)
#
# @memoizer
# def fn1(val): ...
#
# @memoizer
# def fn2(val): ...
lock = threading.Lock() if threadsafe else None
cache = {}
_get = withlock(lock, cache.get)
_set = withlock(lock, cache.__setitem__)
@functools.wraps(f)
def inner(arg):
ret = _get(arg)
if ret is None:
ret = f(arg)
if ret is not None:
_set(arg, ret)
return ret
inner.get = _get
inner.set = _set
inner.clear = withlock(lock, cache.clear)
inner.update = withlock(lock, cache.update)
return inner
return decorator
def _ScopedPool_initer(orig, orig_args): # pragma: no cover
"""Initializer method for ScopedPool's subprocesses.
This helps ScopedPool handle Ctrl-C's correctly.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
if orig:
orig(*orig_args)
@contextlib.contextmanager
def ScopedPool(*args, **kwargs):
"""Context Manager which returns a multiprocessing.pool instance which
correctly deals with thrown exceptions.
*args - Arguments to multiprocessing.pool
Kwargs:
kind ('threads', 'procs') - The type of underlying coprocess to use.
**etc - Arguments to multiprocessing.pool
"""
if kwargs.pop('kind', None) == 'threads':
pool = multiprocessing.pool.ThreadPool(*args, **kwargs)
else:
orig, orig_args = kwargs.get('initializer'), kwargs.get('initargs', ())
kwargs['initializer'] = _ScopedPool_initer
kwargs['initargs'] = orig, orig_args
pool = multiprocessing.pool.Pool(*args, **kwargs)
try:
yield pool
pool.close()
except:
pool.terminate()
raise
finally:
pool.join()
class ProgressPrinter(object):
"""Threaded single-stat status message printer."""
def __init__(self, fmt, enabled=None, fout=sys.stderr, period=0.5):
"""Create a ProgressPrinter.
Use it as a context manager which produces a simple 'increment' method:
with ProgressPrinter('(%%(count)d/%d)' % 1000) as inc:
for i in xrange(1000):
# do stuff
if i % 10 == 0:
inc(10)
Args:
fmt - String format with a single '%(count)d' where the counter value
should go.
enabled (bool) - If this is None, will default to True if
logging.getLogger() is set to INFO or more verbose.
fout (file-like) - The stream to print status messages to.
period (float) - The time in seconds for the printer thread to wait
between printing.
"""
self.fmt = fmt
if enabled is None: # pragma: no cover
self.enabled = logging.getLogger().isEnabledFor(logging.INFO)
else:
self.enabled = enabled
self._count = 0
self._dead = False
self._dead_cond = threading.Condition()
self._stream = fout
self._thread = threading.Thread(target=self._run)
self._period = period
def _emit(self, s):
if self.enabled:
self._stream.write('\r' + s)
self._stream.flush()
def _run(self):
with self._dead_cond:
while not self._dead:
self._emit(self.fmt % {'count': self._count})
self._dead_cond.wait(self._period)
self._emit((self.fmt + '\n') % {'count': self._count})
def inc(self, amount=1):
self._count += amount
def __enter__(self):
self._thread.start()
return self.inc
def __exit__(self, _exc_type, _exc_value, _traceback):
self._dead = True
with self._dead_cond:
self._dead_cond.notifyAll()
self._thread.join()
del self._thread
def once(function):
"""@Decorates |function| so that it only performs its action once, no matter
how many times the decorated |function| is called."""
has_run = [False]
def _wrapper(*args, **kwargs):
if not has_run[0]:
has_run[0] = True
function(*args, **kwargs)
return _wrapper
def unicode_repr(s):
result = repr(s)
return result[1:] if result.startswith('u') else result
## Git functions
def die(message, *args):
print(textwrap.dedent(message % args), file=sys.stderr)
sys.exit(1)
def blame(filename, revision=None, porcelain=False, abbrev=None, *_args):
command = ['blame']
if porcelain:
command.append('-p')
if revision is not None:
command.append(revision)
if abbrev is not None:
command.append('--abbrev=%d' % abbrev)
command.extend(['--', filename])
return run(*command)
def branch_config(branch, option, default=None):
return get_config('branch.%s.%s' % (branch, option), default=default)
def branch_config_map(option):
"""Return {branch: <|option| value>} for all branches."""
try:
reg = re.compile(r'^branch\.(.*)\.%s$' % option)
lines = get_config_regexp(reg.pattern)
return {reg.match(k).group(1): v for k, v in (l.split() for l in lines)}
except subprocess2.CalledProcessError:
return {}
def branches(use_limit=True, *args):
NO_BRANCH = ('* (no branch', '* (detached', '* (HEAD detached')
key = 'depot-tools.branch-limit'
limit = get_config_int(key, 20)
raw_branches = run('branch', *args).splitlines()
num = len(raw_branches)
if use_limit and num > limit:
die("""\
Your git repo has too many branches (%d/%d) for this tool to work well.
You may adjust this limit by running:
git config %s <new_limit>
You may also try cleaning up your old branches by running:
git cl archive
""", num, limit, key)
for line in raw_branches:
if line.startswith(NO_BRANCH):
continue
yield line.split()[-1]
def get_config(option, default=None):
try:
return run('config', '--get', option) or default
except subprocess2.CalledProcessError:
return default
def get_config_int(option, default=0):
assert isinstance(default, int)
try:
return int(get_config(option, default))
except ValueError:
return default
def get_config_list(option):
try:
return run('config', '--get-all', option).split()
except subprocess2.CalledProcessError:
return []
def get_config_regexp(pattern):
if IS_WIN: # pragma: no cover
# this madness is because we call git.bat which calls git.exe which calls
# bash.exe (or something to that effect). Each layer divides the number of
# ^'s by 2.
pattern = pattern.replace('^', '^' * 8)
return run('config', '--get-regexp', pattern).splitlines()
def current_branch():
try:
return run('rev-parse', '--abbrev-ref', 'HEAD')
except subprocess2.CalledProcessError:
return None
def del_branch_config(branch, option, scope='local'):
del_config('branch.%s.%s' % (branch, option), scope=scope)
def del_config(option, scope='local'):
try:
run('config', '--' + scope, '--unset', option)
except subprocess2.CalledProcessError:
pass
def diff(oldrev, newrev, *args):
return run('diff', oldrev, newrev, *args)
def freeze():
took_action = False
key = 'depot-tools.freeze-size-limit'
MB = 2**20
limit_mb = get_config_int(key, 100)
untracked_bytes = 0
root_path = repo_root()
for f, s in status():
if is_unmerged(s):
die("Cannot freeze unmerged changes!")
if limit_mb > 0:
if s.lstat == '?':
untracked_bytes += os.lstat(os.path.join(root_path, f)).st_size
if limit_mb > 0 and untracked_bytes > limit_mb * MB:
die("""\
You appear to have too much untracked+unignored data in your git
checkout: %.1f / %d MB.
Run `git status` to see what it is.
In addition to making many git commands slower, this will prevent
depot_tools from freezing your in-progress changes.
You should add untracked data that you want to ignore to your repo's
.git/info/exclude
file. See `git help ignore` for the format of this file.
If this data is intended as part of your commit, you may adjust the
freeze limit by running:
git config %s <new_limit>
Where <new_limit> is an integer threshold in megabytes.""",
untracked_bytes / (MB * 1.0), limit_mb, key)
try:
run('commit', '--no-verify', '-m', FREEZE + '.indexed')
took_action = True
except subprocess2.CalledProcessError:
pass
add_errors = False
try:
run('add', '-A', '--ignore-errors')
except subprocess2.CalledProcessError:
add_errors = True
try:
run('commit', '--no-verify', '-m', FREEZE + '.unindexed')
took_action = True
except subprocess2.CalledProcessError:
pass
ret = []
if add_errors:
ret.append('Failed to index some unindexed files.')
if not took_action:
ret.append('Nothing to freeze.')
return ' '.join(ret) or None
def get_branch_tree():
"""Get the dictionary of {branch: parent}, compatible with topo_iter.
Returns a tuple of (skipped, <branch_tree dict>) where skipped is a set of
branches without upstream branches defined.
"""
skipped = set()
branch_tree = {}
for branch in branches():
parent = upstream(branch)
if not parent:
skipped.add(branch)
continue
branch_tree[branch] = parent
return skipped, branch_tree
def get_or_create_merge_base(branch, parent=None):
"""Finds the configured merge base for branch.
If parent is supplied, it's used instead of calling upstream(branch).
"""
base = branch_config(branch, 'base')
base_upstream = branch_config(branch, 'base-upstream')
parent = parent or upstream(branch)
if parent is None or branch is None:
return None
actual_merge_base = run('merge-base', parent, branch)
if base_upstream != parent:
base = None
base_upstream = None
def is_ancestor(a, b):
return run_with_retcode('merge-base', '--is-ancestor', a, b) == 0
if base and base != actual_merge_base:
if not is_ancestor(base, branch):
logging.debug('Found WRONG pre-set merge-base for %s: %s', branch, base)
base = None
elif is_ancestor(base, actual_merge_base):
logging.debug('Found OLD pre-set merge-base for %s: %s', branch, base)
base = None
else:
logging.debug('Found pre-set merge-base for %s: %s', branch, base)
if not base:
base = actual_merge_base
manual_merge_base(branch, base, parent)
return base
def hash_multi(*reflike):
return run('rev-parse', *reflike).splitlines()
def hash_one(reflike, short=False):
args = ['rev-parse', reflike]
if short:
args.insert(1, '--short')
return run(*args)
def in_rebase():
git_dir = run('rev-parse', '--git-dir')
return (
os.path.exists(os.path.join(git_dir, 'rebase-merge')) or
os.path.exists(os.path.join(git_dir, 'rebase-apply')))
def intern_f(f, kind='blob'):
"""Interns a file object into the git object store.
Args:
f (file-like object) - The file-like object to intern
kind (git object type) - One of 'blob', 'commit', 'tree', 'tag'.
Returns the git hash of the interned object (hex encoded).
"""
ret = run('hash-object', '-t', kind, '-w', '--stdin', stdin=f)
f.close()
return ret
def is_dormant(branch):
# TODO(iannucci): Do an oldness check?
return branch_config(branch, 'dormant', 'false') != 'false'
def is_unmerged(stat_value):
return (
'U' in (stat_value.lstat, stat_value.rstat) or
((stat_value.lstat == stat_value.rstat) and stat_value.lstat in 'AD')
)
def manual_merge_base(branch, base, parent):
set_branch_config(branch, 'base', base)
set_branch_config(branch, 'base-upstream', parent)
def mktree(treedict):
"""Makes a git tree object and returns its hash.
See |tree()| for the values of mode, type, and ref.
Args:
treedict - { name: (mode, type, ref) }
"""
with tempfile.TemporaryFile() as f:
for name, (mode, typ, ref) in treedict.items():
f.write(('%s %s %s\t%s\0' % (mode, typ, ref, name)).encode('utf-8'))
f.seek(0)
return run('mktree', '-z', stdin=f)
def parse_commitrefs(*commitrefs):
"""Returns binary encoded commit hashes for one or more commitrefs.
A commitref is anything which can resolve to a commit. Popular examples:
* 'HEAD'
* 'origin/master'
* 'cool_branch~2'
"""
try:
return [binascii.unhexlify(h) for h in hash_multi(*commitrefs)]
except subprocess2.CalledProcessError:
raise BadCommitRefException(commitrefs)
RebaseRet = collections.namedtuple('RebaseRet', 'success stdout stderr')
def rebase(parent, start, branch, abort=False):
"""Rebases |start|..|branch| onto the branch |parent|.
Args:
parent - The new parent ref for the rebased commits.
start - The commit to start from
branch - The branch to rebase
abort - If True, will call git-rebase --abort in the event that the rebase
doesn't complete successfully.
Returns a namedtuple with fields:
success - a boolean indicating that the rebase command completed
successfully.
message - if the rebase failed, this contains the stdout of the failed
rebase.
"""
try:
args = ['--onto', parent, start, branch]
if TEST_MODE:
args.insert(0, '--committer-date-is-author-date')
run('rebase', *args)
return RebaseRet(True, '', '')
except subprocess2.CalledProcessError as cpe:
if abort:
run_with_retcode('rebase', '--abort') # ignore failure
return RebaseRet(False, cpe.stdout.decode('utf-8', 'replace'),
cpe.stderr.decode('utf-8', 'replace'))
def remove_merge_base(branch):
del_branch_config(branch, 'base')
del_branch_config(branch, 'base-upstream')
def repo_root():
"""Returns the absolute path to the repository root."""
return run('rev-parse', '--show-toplevel')
def upstream_default():
"""Returns the default branch name of the origin repository."""
try:
return run('rev-parse', '--abbrev-ref', 'origin/HEAD')
except subprocess2.CalledProcessError:
return 'origin/master'
def root():
return get_config('depot-tools.upstream', upstream_default())
@contextlib.contextmanager
def less(): # pragma: no cover
"""Runs 'less' as context manager yielding its stdin as a PIPE.
Automatically checks if sys.stdout is a non-TTY stream. If so, it avoids
running less and just yields sys.stdout.
The returned PIPE is opened on binary mode.
"""
if not setup_color.IS_TTY:
# On Python 3, sys.stdout doesn't accept bytes, and sys.stdout.buffer must
# be used.
yield getattr(sys.stdout, 'buffer', sys.stdout)
return
# Run with the same options that git uses (see setup_pager in git repo).
# -F: Automatically quit if the output is less than one screen.
# -R: Don't escape ANSI color codes.
# -X: Don't clear the screen before starting.
cmd = ('less', '-FRX')
try:
proc = subprocess2.Popen(cmd, stdin=subprocess2.PIPE)
yield proc.stdin
finally:
try:
proc.stdin.close()
except BrokenPipeError:
# BrokenPipeError is raised if proc has already completed,
pass
proc.wait()
def run(*cmd, **kwargs):
"""The same as run_with_stderr, except it only returns stdout."""
return run_with_stderr(*cmd, **kwargs)[0]
def run_with_retcode(*cmd, **kwargs):
"""Run a command but only return the status code."""
try:
run(*cmd, **kwargs)
return 0
except subprocess2.CalledProcessError as cpe:
return cpe.returncode
def run_stream(*cmd, **kwargs):
"""Runs a git command. Returns stdout as a PIPE (file-like object).
stderr is dropped to avoid races if the process outputs to both stdout and
stderr.
"""
kwargs.setdefault('stderr', subprocess2.VOID)
kwargs.setdefault('stdout', subprocess2.PIPE)
kwargs.setdefault('shell', False)
cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd
proc = subprocess2.Popen(cmd, **kwargs)
return proc.stdout
@contextlib.contextmanager
def run_stream_with_retcode(*cmd, **kwargs):
"""Runs a git command as context manager yielding stdout as a PIPE.
stderr is dropped to avoid races if the process outputs to both stdout and
stderr.
Raises subprocess2.CalledProcessError on nonzero return code.
"""
kwargs.setdefault('stderr', subprocess2.VOID)
kwargs.setdefault('stdout', subprocess2.PIPE)
kwargs.setdefault('shell', False)
cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd
try:
proc = subprocess2.Popen(cmd, **kwargs)
yield proc.stdout
finally:
retcode = proc.wait()
if retcode != 0:
raise subprocess2.CalledProcessError(retcode, cmd, os.getcwd(),
b'', b'')
def run_with_stderr(*cmd, **kwargs):
"""Runs a git command.
Returns (stdout, stderr) as a pair of strings.
kwargs
autostrip (bool) - Strip the output. Defaults to True.
indata (str) - Specifies stdin data for the process.
"""
kwargs.setdefault('stdin', subprocess2.PIPE)
kwargs.setdefault('stdout', subprocess2.PIPE)
kwargs.setdefault('stderr', subprocess2.PIPE)
kwargs.setdefault('shell', False)
autostrip = kwargs.pop('autostrip', True)
indata = kwargs.pop('indata', None)
decode = kwargs.pop('decode', True)
cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd
proc = subprocess2.Popen(cmd, **kwargs)
ret, err = proc.communicate(indata)
retcode = proc.wait()
if retcode != 0:
raise subprocess2.CalledProcessError(retcode, cmd, os.getcwd(), ret, err)
if autostrip:
ret = (ret or b'').strip()
err = (err or b'').strip()
if decode:
ret = ret.decode('utf-8', 'replace')
err = err.decode('utf-8', 'replace')
return ret, err
def set_branch_config(branch, option, value, scope='local'):
set_config('branch.%s.%s' % (branch, option), value, scope=scope)
def set_config(option, value, scope='local'):
run('config', '--' + scope, option, value)
def get_dirty_files():
# Make sure index is up-to-date before running diff-index.
run_with_retcode('update-index', '--refresh', '-q')
return run('diff-index', '--ignore-submodules', '--name-status', 'HEAD')
def is_dirty_git_tree(cmd):
w = lambda s: sys.stderr.write(s+"\n")
dirty = get_dirty_files()
if dirty:
w('Cannot %s with a dirty tree. Commit, freeze or stash your changes first.'
% cmd)
w('Uncommitted files: (git diff-index --name-status HEAD)')
w(dirty[:4096])
if len(dirty) > 4096: # pragma: no cover
w('... (run "git diff-index --name-status HEAD" to see full output).')
return True
return False
def status():
"""Returns a parsed version of git-status.
Returns a generator of (current_name, (lstat, rstat, src)) pairs where:
* current_name is the name of the file
* lstat is the left status code letter from git-status
* rstat is the left status code letter from git-status
* src is the current name of the file, or the original name of the file
if lstat == 'R'
"""
stat_entry = collections.namedtuple('stat_entry', 'lstat rstat src')
def tokenizer(stream):
acc = BytesIO()
c = None
while c != b'':
c = stream.read(1)
if c in (None, b'', b'\0'):
if len(acc.getvalue()):
yield acc.getvalue()
acc = BytesIO()
else:
acc.write(c)
def parser(tokens):
while True:
try:
status_dest = next(tokens).decode('utf-8')
except StopIteration:
return
stat, dest = status_dest[:2], status_dest[3:]
lstat, rstat = stat
if lstat == 'R':
src = next(tokens).decode('utf-8')
else:
src = dest
yield (dest, stat_entry(lstat, rstat, src))
return parser(tokenizer(run_stream('status', '-z', bufsize=-1)))
def squash_current_branch(header=None, merge_base=None):
header = header or 'git squash commit for %s.' % current_branch()
merge_base = merge_base or get_or_create_merge_base(current_branch())
log_msg = header + '\n'
if log_msg:
log_msg += '\n'
log_msg += run('log', '--reverse', '--format=%H%n%B', '%s..HEAD' % merge_base)
run('reset', '--soft', merge_base)
if not get_dirty_files():
# Sometimes the squash can result in the same tree, meaning that there is
# nothing to commit at this point.
print('Nothing to commit; squashed branch is empty')
return False
run('commit', '--no-verify', '-a', '-F', '-', indata=log_msg.encode('utf-8'))
return True
def tags(*args):
return run('tag', *args).splitlines()
def thaw():
took_action = False
for sha in run_stream('rev-list', 'HEAD').readlines():
sha = sha.strip().decode('utf-8')
msg = run('show', '--format=%f%b', '-s', 'HEAD')
match = FREEZE_MATCHER.match(msg)
if not match:
if not took_action:
return 'Nothing to thaw.'
break
run('reset', '--' + FREEZE_SECTIONS[match.group(1)], sha)
took_action = True
def topo_iter(branch_tree, top_down=True):
"""Generates (branch, parent) in topographical order for a branch tree.
Given a tree:
A1
B1 B2
C1 C2 C3
D1
branch_tree would look like: {
'D1': 'C3',
'C3': 'B2',
'B2': 'A1',
'C1': 'B1',
'C2': 'B1',
'B1': 'A1',
}
It is OK to have multiple 'root' nodes in your graph.
if top_down is True, items are yielded from A->D. Otherwise they're yielded
from D->A. Within a layer the branches will be yielded in sorted order.
"""
branch_tree = branch_tree.copy()
# TODO(iannucci): There is probably a more efficient way to do these.
if top_down:
while branch_tree:
this_pass = [(b, p) for b, p in branch_tree.items()
if p not in branch_tree]
assert this_pass, "Branch tree has cycles: %r" % branch_tree
for branch, parent in sorted(this_pass):
yield branch, parent
del branch_tree[branch]
else:
parent_to_branches = collections.defaultdict(set)
for branch, parent in branch_tree.items():
parent_to_branches[parent].add(branch)
while branch_tree:
this_pass = [(b, p) for b, p in branch_tree.items()
if not parent_to_branches[b]]
assert this_pass, "Branch tree has cycles: %r" % branch_tree
for branch, parent in sorted(this_pass):
yield branch, parent
parent_to_branches[parent].discard(branch)
del branch_tree[branch]
def tree(treeref, recurse=False):
"""Returns a dict representation of a git tree object.
Args:
treeref (str) - a git ref which resolves to a tree (commits count as trees).
recurse (bool) - include all of the tree's descendants too. File names will
take the form of 'some/path/to/file'.
Return format:
{ 'file_name': (mode, type, ref) }
mode is an integer where:
* 0040000 - Directory
* 0100644 - Regular non-executable file
* 0100664 - Regular non-executable group-writeable file
* 0100755 - Regular executable file
* 0120000 - Symbolic link
* 0160000 - Gitlink
type is a string where it's one of 'blob', 'commit', 'tree', 'tag'.
ref is the hex encoded hash of the entry.
"""
ret = {}
opts = ['ls-tree', '--full-tree']
if recurse:
opts.append('-r')
opts.append(treeref)
try:
for line in run(*opts).splitlines():
mode, typ, ref, name = line.split(None, 3)
ret[name] = (mode, typ, ref)
except subprocess2.CalledProcessError:
return None
return ret
def get_remote_url(remote='origin'):
try:
return run('config', 'remote.%s.url' % remote)
except subprocess2.CalledProcessError:
return None
def upstream(branch):
try:
return run('rev-parse', '--abbrev-ref', '--symbolic-full-name',
branch+'@{upstream}')
except subprocess2.CalledProcessError:
return None
def get_git_version():
"""Returns a tuple that contains the numeric components of the current git
version."""
version_string = run('--version')
version_match = re.search(r'(\d+.)+(\d+)', version_string)
version = version_match.group() if version_match else ''
return tuple(int(x) for x in version.split('.'))
def get_branches_info(include_tracking_status):
format_string = (
'--format=%(refname:short):%(objectname:short):%(upstream:short):')
# This is not covered by the depot_tools CQ which only has git version 1.8.
if (include_tracking_status and
get_git_version() >= MIN_UPSTREAM_TRACK_GIT_VERSION): # pragma: no cover
format_string += '%(upstream:track)'
info_map = {}
data = run('for-each-ref', format_string, 'refs/heads')
BranchesInfo = collections.namedtuple(
'BranchesInfo', 'hash upstream commits behind')
for line in data.splitlines():
(branch, branch_hash, upstream_branch, tracking_status) = line.split(':')
commits = None
base = get_or_create_merge_base(branch)
if base:
commits = int(run('rev-list', '--count', branch, '^%s' % base)) or None
behind_match = re.search(r'behind (\d+)', tracking_status)
behind = int(behind_match.group(1)) if behind_match else None
info_map[branch] = BranchesInfo(
hash=branch_hash, upstream=upstream_branch, commits=commits,
behind=behind)
# Set None for upstreams which are not branches (e.g empty upstream, remotes
# and deleted upstream branches).
missing_upstreams = {}
for info in info_map.values():
if info.upstream not in info_map and info.upstream not in missing_upstreams:
missing_upstreams[info.upstream] = None
result = info_map.copy()
result.update(missing_upstreams)
return result
def make_workdir_common(repository, new_workdir, files_to_symlink,
files_to_copy, symlink=None):
if not symlink:
symlink = os.symlink
os.makedirs(new_workdir)
for entry in files_to_symlink:
clone_file(repository, new_workdir, entry, symlink)
for entry in files_to_copy:
clone_file(repository, new_workdir, entry, shutil.copy)
def make_workdir(repository, new_workdir):
GIT_DIRECTORY_WHITELIST = [
'config',
'info',
'hooks',
'logs/refs',
'objects',
'packed-refs',
'refs',
'remotes',
'rr-cache',
'shallow',
]
make_workdir_common(repository, new_workdir, GIT_DIRECTORY_WHITELIST,
['HEAD'])
def clone_file(repository, new_workdir, link, operation):
if not os.path.exists(os.path.join(repository, link)):
return
link_dir = os.path.dirname(os.path.join(new_workdir, link))
if not os.path.exists(link_dir):
os.makedirs(link_dir)
src = os.path.join(repository, link)
if os.path.islink(src):
src = os.path.realpath(src)
operation(src, os.path.join(new_workdir, link))
| 28.406335 | 80 | 0.664914 |
bea1cef1f0598d8981f23e210cb64658682a65f2 | 778 | py | Python | wbsync/util/uri_constants.py | mistermboy/wikibase-sync | 4be2738719c72020159f1b113e3c4ba61b6ca2f0 | [
"MIT"
] | 5 | 2021-03-30T06:16:33.000Z | 2021-04-17T09:11:32.000Z | wbsync/util/uri_constants.py | mistermboy/wikibase-sync | 4be2738719c72020159f1b113e3c4ba61b6ca2f0 | [
"MIT"
] | 5 | 2020-12-25T18:50:14.000Z | 2021-04-18T10:01:41.000Z | wbsync/util/uri_constants.py | weso/wikibase-sync | da17aa1e691cde4c1c66bd87bc3ca3d7b899c261 | [
"MIT"
] | 4 | 2020-09-01T10:47:39.000Z | 2021-07-14T11:38:21.000Z | ASIO_BASE = 'http://purl.org/hercules/asio/core#'
RDFS_BASE = 'http://www.w3.org/2000/01/rdf-schema#'
RDFS_LABEL = f'{RDFS_BASE}label'
RDFS_COMMENT = f'{RDFS_BASE}comment'
RDFS_SUBCLASSOF = f'{RDFS_BASE}subClassOf'
RDF_BASE = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
RDF_TYPE = f'{RDF_BASE}type'
OWL_BASE = 'http://www.w3.org/2002/07/owl#'
OWL_CLASS = f'{OWL_BASE}Class'
OWL_DISJOINT_WITH = f'{OWL_BASE}disjointWith'
SCHEMA_BASE = 'https://schema.org/'
SCHEMA_DESCRIPTION = f'{SCHEMA_BASE}description'
SCHEMA_NAME = f'{SCHEMA_BASE}name'
SKOS_BASE = 'http://www.w3.org/2004/02/skos/core#'
SKOS_ALTLABEL = f'{SKOS_BASE}altLabel'
SKOS_PREFLABEL = f'{SKOS_BASE}prefLabel'
GEO_BASE = 'http://www.opengis.net/ont/geosparql#'
XSD_BASE = 'http://www.w3.org/2001/XMLSchema#'
| 29.923077 | 56 | 0.736504 |
5737162827e75229e51148a41ab56187c2e08eb1 | 10,563 | py | Python | cltk/lemmatize/backoff.py | adeloucas/cltk | 38b82530c11e153a9bd39791b50895d3a5a06ac0 | [
"MIT"
] | 1 | 2020-05-01T08:21:22.000Z | 2020-05-01T08:21:22.000Z | cltk/lemmatize/backoff.py | ecomp-shONgit/cltk | 7bc3ffd1bbbfa5d036297395d7e51b99b25b81ea | [
"MIT"
] | null | null | null | cltk/lemmatize/backoff.py | ecomp-shONgit/cltk | 7bc3ffd1bbbfa5d036297395d7e51b99b25b81ea | [
"MIT"
] | null | null | null | """Lemmatization module—includes several classes for different
lemmatizing approaches--based on training data, regex pattern matching,
etc. These can be chained together using the backoff parameter. Also,
includes a pre-built chain that uses models in cltk_data.
The logic behind the backoff lemmatizer is based on backoff POS-tagging in
NLTK and repurposes several of the tagging classes for lemmatization
tasks. See here for more info on sequential backoff tagging in NLTK:
http://www.nltk.org/_modules/nltk/tag/sequential.html
"""
import os
import re
from typing import List, Dict, Tuple, Set, Any, Generator
import reprlib
from nltk.probability import ConditionalFreqDist
from nltk.tag.api import TaggerI
from nltk.tag.sequential import SequentialBackoffTagger, ContextTagger, DefaultTagger, NgramTagger, UnigramTagger, RegexpTagger
from cltk.utils.file_operations import open_pickle
# Unused for now
#def backoff_lemmatizer(train_sents, lemmatizer_classes, backoff=None):
# """From Python Text Processing with NLTK Cookbook."""
# for cls in lemmatizer_classes:
# backoff = cls(train_sents, backoff=backoff)
# return backoff
class SequentialBackoffLemmatizer(SequentialBackoffTagger):
"""
Abstract base class for lemmatizers created as a subclass of
NLTK's SequentialBackoffTagger. Lemmatizers in this class "[tag]
words sequentially, left to right. Tagging of individual words is
performed by the ``choose_tag()`` method, which should be defined
by subclasses. If a tagger is unable to determine a tag for the
specified token, then its backoff tagger is consulted."
See: https://www.nltk.org/_modules/nltk/tag/sequential.html#SequentialBackoffTagger
:type _taggers: list
:ivar _taggers: A list of all the taggers in the backoff chain,
inc. self.
:type _repr: Repr object
:ivar _repr: An instance of Repr() from reprlib to handle list
and dict length in subclass __repr__'s
"""
def __init__(self: object, backoff: object, verbose: bool = False):
"""
Setup for SequentialBackoffLemmatizer
:param backoff: Next lemmatizer in backoff chain
:type verbose: bool
:param verbose: Flag to include which lemmatizer assigned in
a given tag in the return tuple
"""
SequentialBackoffTagger.__init__(self, backoff=None)
# Setup backoff chain
if backoff is None:
self._taggers = [self]
else:
self._taggers = [self] + backoff._taggers
self.VERBOSE = verbose
self.repr = reprlib.Repr()
self.repr.maxlist = 1
self.repr.maxdict = 1
def tag(self: object, tokens: List[str]):
""" Docs (mostly) inherited from TaggerI; cf.
https://www.nltk.org/_modules/nltk/tag/api.html#TaggerI.tag
Two tweaks:
1. Properly handle 'verbose' listing of current tagger in
the case of None (i.e. ``if tag: etc.``)
2. Keep track of taggers and change return depending on
'verbose' flag
:rtype list
:type tokens: list
:param tokens: List of tokens to tag
"""
tags = []
taggers = []
for i in range(len(tokens)):
tag, tagger = self.tag_one(tokens, i, tags)
tags.append(tag)
taggers.append(str(tagger)) if tag else taggers.append(None)
if self.VERBOSE:
return list(zip(tokens, tags, taggers))
else:
return list(zip(tokens, tags))
def tag_one(self: object, tokens: List[str], index: int, history: List[str]):
"""
Determine an appropriate tag for the specified token, and
return that tag. If this tagger is unable to determine a tag
for the specified token, then its backoff tagger is consulted.
:rtype: tuple
:type tokens: list
:param tokens: The list of words that are being tagged.
:type index: int
:param index: The index of the word whose tag should be
returned.
:type history: list(str)
:param history: A list of the tags for all words before index.
"""
lemma = None
for tagger in self._taggers:
lemma = tagger.choose_tag(tokens, index, history)
if lemma is not None:
break
return lemma, tagger
def lemmatize(self: object, tokens: List[str]):
"""
Transform tag method into custom method for lemmatizing
tasks. Cf. ``tag`` method above.
"""
return self.tag(tokens)
class DefaultLemmatizer(SequentialBackoffLemmatizer):
"""
Lemmatizer that assigns the same lemma to every token. Useful as the final
tagger in chain, e.g. to assign 'UNK' to all remaining unlemmatized tokens.
:type lemma: str
:param lemma: Lemma to assign to each token
>>> from cltk.lemmatize.latin.backoff import DefaultLemmatizer
>>> default_lemmatizer = DefaultLemmatizer('UNK')
>>> list(default_lemmatizer.lemmatize('arma virumque cano'.split()))
[('arma', 'UNK'), ('virumque', 'UNK'), ('cano', 'UNK')]
"""
def __init__(self: object, lemma: str = None, backoff: object = None, verbose: bool = False):
self.lemma = lemma
SequentialBackoffLemmatizer.__init__(self, backoff=None, verbose=verbose)
def choose_tag(self: object, tokens: List[str], index: int, history: List[str]):
return self.lemma
def __repr__(self: object):
return f'<{type(self).__name__}: lemma={self.lemma}>'
class IdentityLemmatizer(SequentialBackoffLemmatizer):
"""
Lemmatizer that returns a given token as its lemma. Like DefaultLemmatizer,
useful as the final tagger in a chain, e.g. to assign a possible form to
all remaining unlemmatized tokens, increasing the chance of a successful
match.
>>> from cltk.lemmatize.latin.backoff import IdentityLemmatizer
>>> identity_lemmatizer = IdentityLemmatizer()
>>> list(identity_lemmatizer.lemmatize('arma virumque cano'.split()))
[('arma', 'arma'), ('virumque', 'virumque'), ('cano', 'cano')]
"""
def __init__(self: object, backoff: object = None, verbose: bool = False):
SequentialBackoffLemmatizer.__init__(self, backoff=None, verbose=verbose)
def choose_tag(self: object, tokens: List[str], index: int, history: List[str]):
return tokens[index]
def __repr__(self: object):
return f'<{type(self).__name__}>'
class DictLemmatizer(SequentialBackoffLemmatizer):
"""Standalone version of 'model' function found in UnigramTagger; by
defining as its own class, it is clearer that this lemmatizer is
based on dictionary lookup and does not use training data."""
def __init__(self: object, lemmas: List[str], backoff: object = None, source: str = None, verbose: bool = False):
"""
Setup for DictLemmatizer().
:type lemmas: dict
:param lemmas: Dictionary with form {TOKEN: LEMMA} to be used
as foor 'lookup'-style lemmatization
:param backoff: Next lemmatizer in backoff chain.
"""
SequentialBackoffLemmatizer.__init__(self, backoff, verbose=verbose)
self.lemmas = lemmas
self.source = source
def choose_tag(self: object, tokens: List[str], index: int, history: List[str]):
"""
Looks up token in ``lemmas`` dict and returns the corresponding
value as lemma.
:rtype: str
:type tokens: list
:param tokens: List of tokens to be lemmatized
:type index: int
:param index: Int with current token
:type history: list
:param history: List with tokens that have already been lemmatized; NOT USED
"""
keys = self.lemmas.keys()
if tokens[index] in keys:
return self.lemmas[tokens[index]]
def __repr__(self: object):
if self.source:
return f'<{type(self).__name__}: {self.source}>'
else:
return f'<{type(self).__name__}: {self.repr.repr(self.lemmas)}>'
class UnigramLemmatizer(SequentialBackoffLemmatizer, UnigramTagger):
"""
Standalone version of 'train' function found in UnigramTagger; by
defining as its own class, it is clearer that this lemmatizer is
based on training data and not on dictionary.
"""
def __init__(self: object, train=None, model=None, backoff: object = None, source: str = None, cutoff=0, verbose: bool = False):
"""
Setup for UnigramLemmatizer()
"""
SequentialBackoffLemmatizer.__init__(self, backoff=None, verbose=verbose)
UnigramTagger.__init__(self, train, model, backoff, cutoff)
self.train = train
self.source = source
def __repr__(self: object):
if self.source:
return f'<{type(self).__name__}: {self.source}>'
else:
return f'<{type(self).__name__}: {self.repr.repr(self.train)}>'
class RegexpLemmatizer(SequentialBackoffLemmatizer, RegexpTagger):
""""""
def __init__(self: object, regexps=None, source=None, backoff=None, verbose: bool = False):
"""Setup for RegexpLemmatizer()
:type regexps: list
:param regexps: List of tuples of form (PATTERN, REPLACEMENT)
:param backoff: Next lemmatizer in backoff chain.
"""
SequentialBackoffLemmatizer.__init__(self, backoff=None, verbose=verbose)
RegexpTagger.__init__(self, regexps, backoff)
self._regexs = regexps
self.source = source
def choose_tag(self: object, tokens: List[str], index: int, history: List[str]):
"""Use regular expressions for rules-based lemmatizing based on word endings;
tokens are matched for patterns with the base kept as a group; an word ending
replacement is added to the (base) group.
:rtype: str
:type tokens: list
:param tokens: List of tokens to be lemmatized
:type index: int
:param index: Int with current token
:type history: list
:param history: List with tokens that have already been lemmatized; NOT USED
"""
for pattern, replace in self._regexs:
if re.search(pattern, tokens[index]):
return re.sub(pattern, replace, tokens[index])
def __repr__(self: object):
if self.source:
return f'<{type(self).__name__}: {self.source}>'
else:
return f'<{type(self).__name__}: {self.repr.repr(self._regexs)}>'
| 38.834559 | 132 | 0.65578 |
884b9ccd95c0ffab9690529445ccfda939f04ea9 | 90,010 | py | Python | src/redgrease/gears.py | wolkenarchitekt/redgrease | bf39651e6ad44774edac61b51a1444a668c49468 | [
"MIT"
] | 17 | 2021-02-26T23:03:39.000Z | 2022-01-26T11:21:49.000Z | src/redgrease/gears.py | wolkenarchitekt/redgrease | bf39651e6ad44774edac61b51a1444a668c49468 | [
"MIT"
] | 87 | 2021-02-16T08:54:59.000Z | 2021-08-18T07:21:39.000Z | src/redgrease/gears.py | wolkenarchitekt/redgrease | bf39651e6ad44774edac61b51a1444a668c49468 | [
"MIT"
] | 3 | 2021-04-21T07:57:43.000Z | 2021-10-04T09:13:14.000Z | # -*- coding: utf-8 -*-
# from __future__ import annotations
"""
GearsFunction and Operation definitions
"""
__author__ = "Anders Åström"
__contact__ = "anders@lyngon.com"
__copyright__ = "2021, Lyngon Pte. Ltd."
__licence__ = """The MIT License
Copyright © 2021 Lyngon Pte. Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the “Software”), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numbers
import operator
from typing import Any, Dict, Generic, Hashable, Iterable, Optional, Type, TypeVar
import redgrease.sugar as sugar
import redgrease.utils
from redgrease.typing import (
Accumulator,
BatchReducer,
Expander,
Extractor,
Filterer,
InputRecord,
Key,
Mapper,
OutputRecord,
Processor,
Reducer,
Registrator,
)
T = TypeVar("T")
################################################################################
# Default Operands #
################################################################################
def _default_accumulator(acc, r):
acc = acc if isinstance(acc, list) else [acc]
acc.append(r)
return acc
def _default_extractor(r):
return hash(r)
def _default_reducer(_, acc, r):
return _default_accumulator(acc, r)
def _default_batch_reducer(_, records):
return len(records)
################################################################################
# Operations #
################################################################################
class Operation:
"""Abstract base class for Gear function operations.
Operations are the building block of RedisGears functions.
Different operation types can be used to achieve a variety of results to
meet various data processing needs.
Operations can have zero or more arguments that control their operation.
Depending on the operation's type arguments may be language-native data types
and function callbacks.
Attributes:
kwargs (dict):
Any left-over keyword arguments not explicitly consumed by the operation.
"""
def __init__(self, **kwargs):
"""Instantiate a Operation with left-over args"""
self.kwargs = kwargs
def add_to(self, function):
"""Placeholder method for adding the operation to the end of a Gear function.
This method must be implemented in each subclass, and will throw a
``NotImplementedException`` if called directly on the `Operation` superclass.
Args:
function (Union[Type, OpenGearFunction]):
The "open" gear function to append this operation to.
If, and only if, the operation is a reader function (always and only
the first operation in any Gear function), then the `function`
argument must instead be a GearsBuilder class/type.
Raises:
NotImplementedError:
If invoked on the `Operation` superclass, and,or not implemented in the
subclass.
"""
raise NotImplementedError(
"Builder Operation has not implemented the `add_to` method: "
f"'{self.__class__.__name__}'"
)
class Nop(Operation):
"""No Operation.
This Operation does nothing.
"""
def add_to(self, function: "OpenGearFunction") -> "OpenGearFunction":
"""Returns the function, unmodified.
Args:
function (Union[Type, OpenGearFunction]):
The "open" gear function to append this operation to.
If, and only if, the operation is a reader function (always and only
the first operation in any Gear function), then the `function`
argument must instead be a GearsBuilder class/type.
Returns:
OpenGearFunction:
The function unmodified.
"""
return function
class Reader(Operation):
"""Reader operation
The Reader operation is always and only the first operation of any GearFunction.
It defines which reader type to use and its arguments.
Attributes:
reader (str):
The type of reader (https://oss.redislabs.com/redisgears/readers.html)
- ``"KeysReader"``
- ``"KeysOnlyReader"``
- ``"StreamReader"``
- ``"PythonReader"``
- ``"ShardsIDReader"``
- ``"CommandReader"``
defaultArg (str):
Argument that the reader may need. These are usually a key's name, prefix,
glob-like or a regular expression. Its use depends on the function's reader
type and action.
desc (str):
Function description.
"""
def __init__(
self,
reader: str = sugar.ReaderType.KeysReader,
defaultArg: str = "*",
desc: Optional[str] = None,
**kwargs,
) -> None:
"""Instantiate Reader operation
Args:
reader (str, optional):
Reader type.
Defaults to sugar.ReaderType.KeysReader.
defaultArg (str, optional):
Reader default arguments.
Defaults to "*".
desc (Optional[str], optional):
Function description.
Defaults to ``None``.
"""
super().__init__(**kwargs)
self.reader = reader
self.defaultArg = defaultArg
self.desc = desc
def add_to(self, builder: Type) -> "OpenGearFunction":
"""Create a new gear function based on this reader information.
Args:
builder (Type):
GearsBuilder class. Defines the constructor to use to create the
Gear function.
Returns:
OpenGearFunction:
Returns a minimal "open" gear function, consisting only of the
reader.
"""
return builder(self.reader, self.defaultArg, self.desc, **self.kwargs)
class Run(Operation):
"""Run action
The Run action runs a Gear function as a batch.
The function is executed once and exits once the data is exhausted by its reader.
The Run action can only be the last operation of any GearFunction, and it
effectivesly 'closes' it to further operations.
Attributes:
arg (str, optional):
Argument that's passed to the reader, overriding its defaultArg.
It means the following:
- A glob-like pattern for the KeysReader and KeysOnlyReader readers.
- A key name for the StreamReader reader.
- A Python generator for the PythonReader reader.
convertToStr (bool):
When `True`, adds a map operation to the flow's end that stringifies
records.
collect (bool):
When `True`, adds a collect operation to flow's end.
"""
def __init__(
self,
arg: Optional[str] = None,
convertToStr: bool = True,
collect: bool = True,
**kwargs,
) -> None:
"""Instantiate a Run action
Args:
arg (Optional[str], optional):
Optional argument that's passed to the reader, overriding its
defaultArg.
It means the following:
- A glob-like pattern for the KeysReader and KeysOnlyReader readers.
- A key name for the StreamReader reader.
- A Python generator for the PythonReader reader.
Defaults to ``None``.
convertToStr (bool, optional):
When `True`, adds a map operation to the flow's end that stringifies
records.
Defaults to ``True``.
collect (bool, optional):
When `True`, adds a collect operation to flow's end.
Defaults to ``True``.
"""
super().__init__(**kwargs)
self.arg = arg
self.convertToStr = convertToStr
self.collect = collect
def add_to(self, function: "OpenGearFunction") -> "ClosedGearFunction":
"""Closes a Gear function with the Run action.
Args:
function (OpenGearFunction):
The "open" Gear function to close with the run action.
Returns:
ClosedGearFunction:
A closed Gear batch function that is ready to run in RedisGears.
"""
import cloudpickle
return function.map(lambda x: cloudpickle.dumps(x, protocol=4)).run(
self.arg, False, self.collect, **self.kwargs
)
class Register(Operation):
"""Register action
The Register action registers a function as an event handler.
The function is executed each time an event arrives.
Each time it is executed, the function operates on the event's data and once done
it is suspended until its future invocations by new events.
The Register action can only be the last operation of any GearFunction, and it
effectivesly 'closes' it to further operations.
Attributes:
prefix (str):
Key prefix pattern to match on.
Not relevant for 'CommandReader' readers (see 'trigger').
convertToStr (bool):
When `True`, adds a map operation to the flow's end that stringifies
records.
collect (bool):
When `True`, adds a collect operation to flow's end.
mode (str):
The execution mode of the triggered function.
onRegistered (Callable):
A function callback that's called on each shard upon function registration.s
"""
def __init__(
self,
prefix: str = "*",
convertToStr: bool = True,
collect: bool = True,
mode: str = sugar.TriggerMode.Async,
onRegistered: Registrator = None,
**kwargs,
) -> None:
"""Instantiate a Register action
Args:
prefix (str, optional):
Key prefix pattern to match on.
Not relevant for 'CommandReader' readers (see 'trigger').
Defaults to '*'.
convertToStr (bool, optional):
When ``True`` adds a map operation to the flow's end that stringifies
records.
Defaults to ``True``.
collect (bool, optional):
When ``True`` adds a collect operation to flow's end.
Defaults to ``False``.
mode (str, optional):
The execution mode of the function.
Can be one of::
- ``"async"``:
Execution will be asynchronous across the entire cluster.
- ``"async_local"``:
Execution will be asynchronous and restricted to the handling shard.
- ``"sync"``:
Execution will be synchronous and local
Defaults to `redgrease.TriggerMode.Async` (``"async"``)
onRegistered (Registrator, optional):
A function callback that's called on each shard upon function
registration.
It is a good place to initialize non-serializeable objects such as
network connections.
Defaults to ``None``.
"""
super().__init__(**kwargs)
self.prefix = prefix
self.convertToStr = convertToStr
self.collect = collect
self.mode = mode
self.onRegistered = onRegistered
def add_to(self, function: "OpenGearFunction") -> "ClosedGearFunction":
"""Closes a Gear function with the Register action.
Args:
function (OpenGearFunction):
The "open" Gear function to close with the register action.
Returns:
ClosedGearFunction:
A closed "event-mode" Gear function that is ready to be registered on a
RedisGears system.
"""
import cloudpickle
return function.map(lambda x: cloudpickle.dumps(x, protocol=4)).register(
self.prefix,
False,
self.collect,
mode=self.mode,
onRegistered=self.onRegistered,
**self.kwargs,
)
################################################################################
# Operations #
################################################################################
class Map(Operation):
"""The local Map operation performs the one-to-one (1:1) mapping of records.
It requires one mapper function.
Attributes:
op (:data:`redgrease.typing.Mapper`):
The mapper function to map on all input records.
"""
def __init__(self, op: Mapper, **kwargs) -> None:
"""Instantiate a Map operation.
Args:
op (:data:`redgrease.typing.Mapper`):
Function to map on the input records.
The function must take one argument as input (input record) and
return something as an output (output record).
"""
super().__init__(**kwargs)
self.op = op
def add_to(self, function: "OpenGearFunction") -> "OpenGearFunction":
"""Adds the operation to an "open" Gear function.
Args:
function (OpenGearFunction):
The "open" gear function to add the operation to.
Returns:
OpenGearFunction:
The function with the operation added to the end.
"""
return function.map(self.op, **self.kwargs)
class FlatMap(Operation):
"""The local FlatMap operation performs one-to-many (1:N) mapping of records.
It requires one expander function that maps a single input record to potentially
multiple output records.
FlatMap is nearly identical to the Map operation in purpose and use.
Unlike regular mapping, however, when FlatMap returns a sequence / iterator,
each element in the sequence is turned into a separate output record.
Attributes:
op (:data:`redgrease.typing.Expander`):
The mapper function to map on all input records.
"""
def __init__(self, op: Expander, **kwargs) -> None:
"""Instantiate a FlatMap operation.
Args:
op (:data:`redgrease.typing.Expander`):
Function to map on the input records.
The function must take one argument as input (input record) and
return an iterable as an output (output records).
"""
super().__init__(**kwargs)
self.op = op
def add_to(self, function: "OpenGearFunction") -> "OpenGearFunction":
"""Adds the operation to an "open" Gear function.
Args:
function (OpenGearFunction):
The "open" gear function to add the operation to.
Returns:
OpenGearFunction:
The function with the operation added to the end.
"""
return function.flatmap(self.op, **self.kwargs)
class ForEach(Operation):
"""The local ForEach operation performs one-to-the-same (1=1) mapping.
It requires one processor function to perform some work that's related to the
input record.
Its output record is a copy of the input, which means anything the callback returns
is discarded.
Args:
op (:data:`redgrease.typing.Processor`):
Function to run on the input records.
"""
def __init__(self, op: Processor, **kwargs) -> None:
"""Instantiate a ForEach operation.
Args:
op (:data:`redgrease.typing.Processor`):
Function to run on each of the input records.
The function must take one argument as input (input record) and
should not return anything.
"""
super().__init__(**kwargs)
self.op = op
def add_to(self, function: "OpenGearFunction") -> "OpenGearFunction":
"""Adds the operation to an "open" Gear function.
Args:
function (OpenGearFunction):
The "open" gear function to add the operation to.
Returns:
OpenGearFunction:
The function with the operation added to the end.
"""
return function.foreach(self.op, **self.kwargs)
class Filter(Operation):
"""The local Filter operation performs one-to-zero-or-one (1:(0|1)) filtering of
records.
It requires a filterer function.
An input record that yields a falsehood will be discarded and only truthful ones
will be output.
Args:
op (:data:`redgrease.typing.Filterer`):
Predicate function to run on the input records.
"""
def __init__(self, op: Filterer, **kwargs) -> None:
"""Instantiate a Filter operation.
Args:
op (:data:`redgrease.typing.Filterer`):
Function to apply on the input records, to decide which ones to keep.
The function must take one argument as input (input record) and
return a bool. The input records evaluated to ``True`` will be kept as
output records.
"""
super().__init__(**kwargs)
self.op = op
def add_to(self, function: "OpenGearFunction") -> "OpenGearFunction":
"""Adds the operation to an "open" Gear function.
Args:
function (OpenGearFunction):
The "open" gear function to add the operation to.
Returns:
OpenGearFunction:
The function with the operation added to the end.
"""
return function.filter(self.op, **self.kwargs)
class Accumulate(Operation):
"""The local Accumulate operation performs many-to-one mapping (N:1) of records.
It requires one accumulator function.
Once input records are exhausted its output is a single record consisting of the
accumulator's value.
Args:
op (:data:`redgrease.typing.Accumulator`):
Accumulation function to run on the input records.
"""
def __init__(self, op: Accumulator, **kwargs) -> None:
"""Instantiate an Accumulate operation.
Args:
op (:data:`redgrease.typing.Accumulator`):
Function to to apply on the input records.
The function must take two arguments as input:
- the input record, and
- An accumulator value.
It should aggregate the input record into the accumulator variable,
which stores the state between the function's invocations.
The function must return the accumulator's updated value.
"""
super().__init__(**kwargs)
self.op = op
def add_to(self, function: "OpenGearFunction") -> "OpenGearFunction":
"""Adds the operation to an "open" Gear function.
Args:
function (OpenGearFunction):
The "open" gear function to add the operation to.
Returns:
OpenGearFunction:
The function with the operation added to the end.
"""
return function.accumulate(self.op, **self.kwargs)
class LocalGroupBy(Operation):
"""The local LocalGroupBy operation performs many-to-less mapping (N:M) of records.
The operation requires two functions, an extractor and a reducer.
The output records consist of the grouping key and its respective reduce value.
Attributes:
extractor (:data:`redgrease.typing.Extractor`):
Function that extracts the key to group by from input records.
reducer (:data:`redgrease.typing.Reducer`):
Function that reduces the records in each group to an output record.
"""
def __init__(
self,
extractor: Extractor,
reducer: Reducer,
**kwargs,
) -> None:
"""Instantiate a LocalGroupBy operator.
Args:
extractor (:data:`redgrease.typing.Extractor`):
Function to apply on the input records, to extact the grouping key.
The function must take one argument as input (input record) and
return a string (key).
The groups are defined by the value of the key.
reducer (:data:`redgrease.typing.Reducer`):
Function to apply on the records of each group, to reduce to a single
value (per group).
The function must take (a) a key, (b) an input record and (c) a
variable that's called an accumulator.
It performs similarly to the accumulator callback, with the difference
being that it maintains an accumulator per reduced key / group.
"""
super().__init__(**kwargs)
self.extractor = extractor
self.reducer = reducer
def add_to(self, function: "OpenGearFunction") -> "OpenGearFunction":
"""Adds the operation to an "open" Gear function.
Args:
function (OpenGearFunction):
The "open" gear function to add the operation to.
Returns:
OpenGearFunction:
The function with the operation added to the end.
"""
return function.localgroupby(self.extractor, self.reducer, **self.kwargs)
class Limit(Operation):
"""The local Limit operation limits the number of records.
It accepts two numeric arguments: a starting position in the input records "array"
and a maximal number of output records.
Attributes:
start (int):
Starting index (0-based) of the input record to start from
length (int):
The maximum number of records to let through.
"""
def __init__(self, length: int, start: int = 0, **kwargs) -> None:
"""Instantiate a Limit operation
Args:
length (int):
The maximum number of records.
start (int, optional):
The index of the first input record.
Defaults to 0.
"""
super().__init__(**kwargs)
self.length = length
self.start = start
def add_to(self, function: "OpenGearFunction") -> "OpenGearFunction":
"""Adds the operation to an "open" Gear function.
Args:
function (OpenGearFunction):
The "open" gear function to add the operation to.
Returns:
OpenGearFunction:
The function with the operation added to the end.
"""
return function.limit(self.length, self.start, **self.kwargs)
class Collect(Operation):
"""The global Collect operation collects the result records from all of the
shards to the originating one.
"""
def __init__(self, **kwargs):
"""Instantiate a Collect operation."""
super().__init__(**kwargs)
def add_to(self, function: "OpenGearFunction") -> "OpenGearFunction":
"""Adds the operation to an "open" Gear function.
Args:
function (OpenGearFunction):
The "open" gear function to add the operation to.
Returns:
OpenGearFunction:
The function with the operation added to the end.
"""
return function.collect(**self.kwargs)
class Repartition(Operation):
"""The global Repartition operation repartitions the records by them shuffling
between shards.
It accepts a single key extractor function.
The extracted key is used for computing the record's new placement in the cluster
(i.e. hash slot).
The operation then moves the record from its original shard to the new one.
Attributes:
extractor (redgrease.typing.Extractor):
A function deciding the destination shard of an input record.
"""
def __init__(self, extractor: Extractor, **kwargs) -> None:
"""Instantiate a Repartition operation
Args:
extractor (:data:`redgrease.typing.Extractor`):
Function that takes a record and calculates a key that is used to
determine the hash slot, and consequently the shard, that the record
should migrate to to.
The function must take one argument as input (input record) and
return a string (key).
The hash slot, and consequently the destination shard, is determined by
hthe value of the key.
"""
super().__init__(**kwargs)
self.extractor = extractor
def add_to(self, function: "OpenGearFunction") -> "OpenGearFunction":
"""Adds the operation to an "open" Gear function.
Args:
function (OpenGearFunction):
The "open" gear function to add the operation to.
Returns:
OpenGearFunction:
The function with the operation added to the end.
"""
return function.repartition(self.extractor, **self.kwargs)
class Aggregate(Operation):
"""The Aggregate operation performs many-to-one mapping (N:1) of records.
Aggregate provides an alternative to the local accumulate operation as it takes
the partitioning of data into consideration.
Furthermore, because records are aggregated locally before collection,
its performance is usually superior.
It requires a zero value and two accumulator functions for computing the local
and global aggregates.
The operation is made of these steps::
1. The local accumulator is executed locally and initialized with the zero value.
2. A global collect moves all records to the originating engine.
3. The global accumulator is executed locally by the originating engine.
Its output is a single record consisting of the accumulator's global value.
Attributes:
zero (Any):
The initial / zero value for the accumulator variable.
seqOp (:data:`redgrease.typing.Accumulator`):
A local accumulator function, applied locally on each shard.
combOp (:data:`redgrease.typing.Accumulator`):
A global accumulator function, applied on the results of the local
accumulations.
"""
def __init__(
self,
zero: Any,
seqOp: Accumulator,
combOp: Accumulator,
**kwargs,
) -> None:
"""Instantiates an Aggregate operation.
Args:
zero (Any):
The initial / zero value of the accumulator variable.
seqOp (:data:`redgrease.typing.Accumulator`):
A function to be applied on each of the input records, locally per
shard.
It must take two parameters:
- an accumulator value, from previous calls
- an input record
The function aggregates the input into the accumulator variable,
which stores the state between the function's invocations.
The function must return the accumulator's updated value.
combOp (:data:`redgrease.typing.Accumulator`):
A function to be applied on each of the aggregated results of the local
aggregation (i.e. the output of `seqOp`).
It must take two parameters:
- an accumulator value, from previous calls
- an input record
The function aggregates the input into the accumulator variable,
which stores the state between the function's invocations.
The function must return the accumulator's updated value.
"""
super().__init__(**kwargs)
self.zero = zero
self.seqOp = seqOp
self.combOp = combOp
def add_to(self, function: "OpenGearFunction") -> "OpenGearFunction":
"""Adds the operation to an "open" Gear function.
Args:
function (OpenGearFunction):
The "open" gear function to add the operation to.
Returns:
OpenGearFunction:
The function with the operation added to the end.
"""
return function.aggregate(self.zero, self.seqOp, self.combOp, **self.kwargs)
class AggregateBy(Operation):
"""AggregateBy operation performs many-to-less mapping (N:M) of records.
It is similar to the Aggregate operation but aggregates per key.
It requires a an extractor callback, a zero value and two reducers callbacks for
computing the local and global aggregates.
The operation is made of these steps::
1. Extraction of the groups using extractor.
2. The local reducer is executed locally and initialized with the zero value.
3. A global repartition operation that uses the extractor.
4. The global reducer is executed on each shard once it is repartitioned with its
relevant keys.
Output list of records, one for each key. The output records consist of the
grouping key and its respective reducer's value.
Attributes:
extractor (:data:`redgrease.typing.Extractor`):
Function that extracts the key to group by from input records.
zero (Any):
The initial / zero value for the accumulator variable.
seqOp (:data:`redgrease.typing.Accumulator`):
A local accumulator function, applied locally on each shard.
combOp (:data:`redgrease.typing.Accumulator`):
A global accumulator function, applied on the results of the local
accumulations.
"""
def __init__(
self,
extractor: Extractor,
zero: Any,
seqOp: Reducer,
combOp: Reducer,
**kwargs,
) -> None:
"""Instantiate an AggregateBy operation.
Args:
extractor (:data:`redgrease.typing.Extractor`):
Function to apply on the input records, to extact the grouping key.
The function must take one argument as input (input record) and
return a string (key).
The groups are defined by the value of the key.
zero (Any):
The initial / zero value of the accumulator variable.
seqOp (:data:`redgrease.typing.Accumulator`):
A function to be applied on each of the input records, locally per
shard and group.
It must take two parameters:
- an accumulator value, from previous calls
- an input record
The function aggregates the input into the accumulator variable,
which stores the state between the function's invocations.
The function must return the accumulator's updated value.
combOp (:data:`redgrease.typing.Accumulator`):
A function to be applied on each of the aggregated results of the local
aggregation (i.e. the output of `seqOp`).
It must take two parameters:
- an accumulator value, from previous calls
- an input record
The function aggregates the input into the accumulator variable,
which stores the state between the function's invocations.
The function must return the accumulator's updated value.
"""
super().__init__(**kwargs)
self.extractor = extractor
self.zero = zero
self.seqOp = seqOp
self.combOp = combOp
def add_to(self, function: "OpenGearFunction") -> "OpenGearFunction":
"""Adds the operation to an "open" Gear function.
Args:
function (OpenGearFunction):
The "open" gear function to add the operation to.
Returns:
OpenGearFunction:
The function with the operation added to the end.
"""
return function.aggregateby(
self.extractor, self.zero, self.seqOp, self.combOp, **self.kwargs
)
class GroupBy(Operation):
"""GroupBy * operation performs a many-to-less (N:M) grouping of records.
It is similar to AggregateBy but uses only a global reducer.
It can be used in cases where locally reducing the data isn't possible.
The operation requires two functions; an extractor a reducer.
The operation is made of these steps::
1. A global repartition operation that uses the extractor.
2. The reducer is locally invoked.
Output is a locally-reduced list of records, one for each key.
The output records consist of the grouping key and its respective accumulator's
value.
Attributes:
extractor (:data:`redgrease.typing.Extractor`):
Function that extracts the key to group by from input records.
reducer (:data:`redgrease.typing.Reducer`):
Function that reduces the records of each group to a value
"""
def __init__(
self,
extractor: Extractor,
reducer: Reducer,
**kwargs,
) -> None:
"""Instantiate a GroupBy operation.
Args:
extractor (:data:`redgrease.typing.Extractor`):
Function to apply on the input records, to extact the grouping key.
The function must take one argument as input (input record) and
return a string (key).
The groups are defined by the value of the key.
reducer (:data:`redgrease.typing.Reducer`):
Function to apply on the records of each group, to reduce to a single
value (per group).
The function must take (a) a key, (b) an input record and (c) a
variable that's called an accumulator.
It performs similarly to the accumulator callback, with the difference
being that it maintains an accumulator per reduced key / group.
"""
super().__init__(**kwargs)
self.extractor = extractor
self.reducer = reducer
def add_to(self, function: "OpenGearFunction") -> "OpenGearFunction":
"""Adds the operation to an "open" Gear function.
Args:
function (OpenGearFunction):
The "open" gear function to add the operation to.
Returns:
OpenGearFunction:
The function with the operation added to the end.
"""
return function.groupby(self.extractor, self.reducer, **self.kwargs)
class BatchGroupBy(Operation):
"""BatchGroupBy operation performs a many-to-less (N:M) grouping of records.
Prefer the GroupBy Operation
----------------------------
Instead of using BatchGroupBy, prefer using the GroupBy operation as it is more
efficient and performant. Only use BatchGroupBy when the reducer's logic
requires the full list of records for each input key.
The operation requires two functions; an extractor a batch reducer.
The operation is made of these steps::
1. A global repartition operation that uses the extractor
2. A local localgroupby operation that uses the batch reducer
Once finished, the operation locally outputs a record for each key and its
respective accumulator value.
Increased memory consumption
----------------------------
Using this operation may cause a substantial increase in memory usage during
runtime.
Attributes:
extractor (:data:`redgrease.typing.Extractor`):
Function that extracts the key to group by from input records.
reducer (:data:`redgrease.typing.Reducer`):
Function that reduces the records of each group to a value
"""
def __init__(
self,
extractor: Extractor,
reducer: BatchReducer,
**kwargs,
) -> None:
"""Instantiate a BatchGroupBy operation.
Args:
extractor (:data:`redgrease.typing.Extractor`):
Function to apply on the input records, to extact the grouping key.
The function must take one argument as input (input record) and
return a string (key).
The groups are defined by the value of the key.
reducer (:data:`redgrease.typing.Reducer`):
Function to apply on the records of each group, to reduce to a single
value (per group).
The function must take (a) a key, (b) an input record and (c) a
variable that's called an accumulator.
It performs similarly to the accumulator callback, with the difference
being that it maintains an accumulator per reduced key / group.
"""
super().__init__(**kwargs)
self.extractor = extractor
self.reducer = reducer
def add_to(self, function: "OpenGearFunction") -> "OpenGearFunction":
"""Adds the operation to an "open" Gear function.
Args:
function (OpenGearFunction):
The "open" gear function to add the operation to.
Returns:
OpenGearFunction:
The function with the operation added to the end.
"""
return function.batchgroupby(self.extractor, self.reducer, **self.kwargs)
class Sort(Operation):
"""Sort operation sorts the records.
It allows to control the sort order.
The operation is made of the following steps::
1. A global aggregate operation collects and combines all records.
2. A local sort is performed on the list.
3. The list is flatmapped to records.
Increased memory consumption
----------------------------
Using this operation may cause an increase in memory usage during runtime due
to the list being copied during the sorting operation.
Attributes:
reverse (bool):
Defines if the sorting order is descending (``True``) or ascending
(``False``).
"""
def __init__(self, reverse: bool = True, **kwargs) -> None:
"""Instantiate a Sort operation.
Args:
reverse (bool, optional):
Sort in descending order (higher to lower).
Defaults to ``True``.
"""
super().__init__(**kwargs)
self.reverse = reverse
def add_to(self, function: "OpenGearFunction") -> "OpenGearFunction":
"""Adds the operation to an "open" Gear function.
Args:
function (OpenGearFunction):
The "open" gear function to add the operation to.
Returns:
OpenGearFunction:
The function with the operation added to the end.
"""
return function.sort(self.reverse, **self.kwargs)
class Distinct(Operation):
"""The Distinct operation returns distinct records.
It requires no arguments.
The operation is made of the following steps::
1. A aggregate operation locally reduces the records to sets that are then
collected and unionized globally.
2. A local flatmap operation turns the set into records.
"""
def __init__(self, **kwargs):
"""Instantiate a Distinct operation."""
super().__init__(**kwargs)
def add_to(self, function: "OpenGearFunction") -> "OpenGearFunction":
"""Adds the operation to an "open" Gear function.
Args:
function (OpenGearFunction):
The "open" gear function to add the operation to.
Returns:
OpenGearFunction:
The function with the operation added to the end.
"""
return function.distinct(**self.kwargs)
class Count(Operation):
"""The Count operation counts the number of input records.
It requires no arguments.
The operation is made of an aggregate operation that uses local counting and
global summing accumulators.
"""
def __init__(self, **kwargs):
"""Instantiate a Count operation."""
super().__init__(**kwargs)
def add_to(self, function: "OpenGearFunction") -> "OpenGearFunction":
"""Adds the operation to an "open" Gear function.
Args:
function (OpenGearFunction):
The "open" gear function to add the operation to.
Returns:
OpenGearFunction:
The function with the operation added to the end.
"""
return function.count(**self.kwargs)
class CountBy(Operation):
"""The CountBy operation counts the records grouped by key.
It requires a single extractor function.
The operation is made of an aggregateby operation that uses local counting and
global summing accumulators.
Attributes:
extractor (:data:`redgrease.typing.Extractor`):
Function that extracts the key to group by from input records.
"""
def __init__(self, extractor: Extractor, **kwargs) -> None:
"""Instantiate a CountBy operation.
Args:
extractor (:data:`redgrease.typing.Extractor`):
Function to apply on the input records, to extact the grouping key.
The function must take one argument as input (input record) and
return a string (key).
The groups are defined by the value of the key.
"""
super().__init__(**kwargs)
self.extractor = extractor
def add_to(self, function: "OpenGearFunction") -> "OpenGearFunction":
"""Adds the operation to an "open" Gear function.
Args:
function (OpenGearFunction):
The "open" gear function to add the operation to.
Returns:
OpenGearFunction:
The function with the operation added to the end.
"""
return function.countby(self.extractor, **self.kwargs)
class Avg(Operation):
"""The Avg operation returns the arithmetic average of records.
It has an optional value extractor function.
The operation is made of the following steps::
1. A aggregate operation locally reduces the records to tuples of sum and count
that are globally combined.
2. A local map operation calculates the average from the global tuple.
Attributes:
extractor (:data:`redgrease.typing.Extractor`):
Function that extracts the key to group by from input records.
"""
def __init__(self, extractor: Extractor, **kwargs) -> None:
"""Instantiate an Avg operation.
Args:
extractor (:data:`redgrease.typing.Extractor`):
Function to apply on the input records, to extact the grouping key.
The function must take one argument as input (input record) and
return a string (key).
The groups are defined by the value of the key.
"""
super().__init__(**kwargs)
self.extractor = extractor
def add_to(self, function: "OpenGearFunction") -> "OpenGearFunction":
"""Adds the operation to an "open" Gear function.
Args:
function (OpenGearFunction):
The "open" gear function to add the operation to.
Returns:
OpenGearFunction:
The function with the operation added to the end.
"""
return function.avg(self.extractor, **self.kwargs)
################################################################################
# GearFunctions #
################################################################################
class GearFunction(Generic[T]):
"""Abstract base class for both "open" and closed Gear functions.
The base `GearFunction` class is not intended to be instantiated by API users.
A GearFunction is a chain of consecutive Operations.
Attributes:
operation (Operation):
The last operation in the functions chain of operations.
input_function (OpenGearFunction):
The function (chain of operations) that provides the input records to the
`operation`. Two different GearFunctions can share the same `input_function`
requirements (Iterable[str], optional):
A set of requirements / dependencies (Python packages) that the operation
requires in order to execute.
"""
def __init__(
self,
operation: Operation,
input_function: "OpenGearFunction" = None,
requirements: Optional[Iterable[str]] = None,
) -> None:
"""Instantiate a GearFunction
Args:
operation (Operation):
The last operation in the functions chain of operations.
input_function (OpenGearFunction, optional):
The function (chain of operations) that provides the input records to
the `operation`.
Defaults to ``None``.
requirements (Optional[Iterable[str]], optional):
A set of requirements / dependencies (Python packages) that the
operation requires in order to execute.
Defaults to ``None``.
"""
self.operation = operation
self.input_function = input_function
self.requirements = set(requirements if requirements else [])
if input_function:
self.requirements = self.requirements.union(input_function.requirements)
@property
def reader(self) -> Optional[str]:
"""The reader type, generating the initial input records to the GearFunction.
Returns:
str:
Either ``"KeysReader"``, ``"KeysOnlyReader"``, ``"StreamReader"``,
``"PythonReader"``, ``"ShardsIDReader"``, ``"CommandReader"`` or
``None`` (If no reader is defined).
"""
if isinstance(self.operation, Reader):
return self.operation.reader
if self.input_function:
return self.input_function.reader
return None
@property
def supports_batch_mode(self) -> bool:
"""Indicates if the function can run in Batch-mode, by closing it with a
`run` action.
Returns:
bool:
``True`` if the function supports batch mode, ``False`` if not.
"""
return self.reader in [
sugar.ReaderType.KeysReader,
sugar.ReaderType.KeysOnlyReader,
sugar.ReaderType.StreamReader,
sugar.ReaderType.PythonReader,
sugar.ReaderType.ShardsIDReader,
]
@property
def supports_event_mode(self) -> bool:
"""Indicates if the function can run in Event-mode, by closing it with a
`register` action.
Returns:
bool:
``True`` if the function supports event mode, ``False`` if not.
"""
return self.reader in [
sugar.ReaderType.KeysReader,
sugar.ReaderType.StreamReader,
sugar.ReaderType.CommandReader,
]
class ClosedGearFunction(GearFunction[T]):
"""Closed Gear functions are GearsFunctions that have been "closed" with a
:ref:`op_action_run` action or a :ref:`op_action_register` action.
Closed Gear functions cannot add more :ref:`operations`, but can be executed in
RedisGears.
"""
def __init__(
self,
operation: Operation,
input_function: "OpenGearFunction" = None,
requirements: Optional[Iterable[str]] = None,
) -> None:
""" """
super().__init__(
operation, input_function=input_function, requirements=requirements
)
def on(
self,
gears_server,
unblocking: bool = False,
requirements: Iterable[str] = None,
replace: bool = None,
**kwargs,
):
"""Execute the function on a RedisGears.
This is equivalent to passing the function to `Gears.pyexecute`
Args:
gears_server ([type]):
Redis client / connection object.
unblocking (bool, optional):
Execute function unblocking, i.e. asynchronous.
Defaults to ``False``.
requirements (Iterable[str], optional):
Additional requirements / dependency Python packages.
Defaults to ``None``.
Returns:
redgrease.data.ExecutionResult:
The result of the function, just as `Gears.pyexecute`
"""
if hasattr(gears_server, "gears"):
gears_server = gears_server.gears
if not hasattr(gears_server, "pyexecute"):
from redgrease.client import Gears
gears_server = Gears(gears_server)
try:
return gears_server.pyexecute(
self, unblocking=unblocking, requirements=requirements, **kwargs
)
except Exception as ex:
# TODO: This is ugly. just to keep 'redis' from being imported to "gears"
if ex.__class__.__name__ != "DuplicateTriggerError":
raise
# If we get an error because the trigger already is registered,
# then we check the 'replace' argument for what to do:
# - `replace is ``None``` : Re-raise the error
# - `replace is ``False``` : Ignore the error
# - `replace is ``True``` : Unregister the previous and re-register the new
if replace is None or "trigger" not in self.operation.kwargs:
raise
if replace is False:
return gears_server._trigger_proxy(self.operation.kwargs["trigger"])
# Find and replace the registered trigger.
trigger = self.operation.kwargs["trigger"]
regs = gears_server.dumpregistrations(trigger=trigger)
if len(regs) != 1:
raise
gears_server.unregister(regs[0].id)
# Try registering again
return gears_server.pyexecute(
self, unblocking=unblocking, requirements=requirements, **kwargs
)
class OpenGearFunction(GearFunction[InputRecord]):
"""An open Gear function is a Gear function that is not yet "closed" with a
:ref:`op_action_run` action or a :ref:`op_action_register` action.
Open Gear functions can be used to create new "open" gear functions by
applying :ref:`operations`, or it can create a closed Gear function by applying
either the :ref:`op_action_run` action or a :ref:`op_action_register` action.
"""
def __init__(
self,
operation: Operation,
input_function: "OpenGearFunction" = None,
requirements: Optional[Iterable[str]] = None,
) -> None:
""" """
super().__init__(
operation, input_function=input_function, requirements=requirements
)
def run(
self,
arg: str = None, # TODO: This can also be a Python generator
convertToStr: bool = True,
collect: bool = True,
# Helpers, all must be None
# Other Redgrease args
requirements: Iterable[str] = None,
on=None,
# Other Redis Gears args
**kwargs,
# TODO: Add all the Reader specific args here
) -> ClosedGearFunction[InputRecord]:
"""Create a "closed" function to be :ref:`op_action_run` as in "batch-mode".
Batch functions are executed once and exits once the data is
exhausted by its reader.
Args:
arg (str, optional):
An optional argument that's passed to the reader as its defaultArg.
It means the following:
- A glob-like pattern for the KeysReader and KeysOnlyReader readers.
- A key name for the StreamReader reader.
- A Python generator for the PythonReader reader.
Defaults to ``None``.
convertToStr (bool, optional):
When ``True``, adds a map operation to the flow's end that stringifies
records.
Defaults to ``False``.
collect (bool, optional):
When ``True`` adds a collect operation to flow's end.
Defaults to ``False``.
requirements (Iterable[str], optional):
Additional requirements / dependency Python packages.
Defaults to ``None``.
on (redis.Redis):
Immediately execute the function on this RedisGears system.
**kwargs:
Additional parameters to the run operation.
Returns:
Union[ClosedGearFunction, redgrease.data.ExecutionResult]:
A new closed batch function, if `on` is **not** specified.
An execution result, if `on` **is** specified.
Raises:
TypeError:
If the function does not support batch mode.
"""
if not self.supports_batch_mode:
raise TypeError(f"Batch mode (run) is not supported for '{self.reader}'")
gear_fun: ClosedGearFunction = ClosedGearFunction[InputRecord](
Run(arg=arg, convertToStr=convertToStr, collect=collect, **kwargs),
input_function=self,
requirements=requirements,
)
if redgrease.GEARS_RUNTIME:
return redgrease.runtime.run(gear_fun, redgrease.GearsBuilder)
if on:
return gear_fun.on(on)
return gear_fun
def register( # noqa: C901
self,
prefix: str = "*",
convertToStr: bool = True,
collect: bool = True,
# Helpers, all must be None
mode: str = None,
onRegistered: Registrator = None,
eventTypes: Iterable[str] = None,
keyTypes: Iterable[str] = None,
readValue: bool = None,
batch: int = None,
duration: int = None,
onFailedPolicy: str = None,
onFailedRetryInterval: int = None,
trimStream: bool = None,
trigger: str = None, # Reader Specific: CommandReader
# Other Redgrease args
requirements: Iterable[str] = None,
on=None,
# Other Redis Gears args
**kwargs,
# TODO: Add all the Reader specific args here
) -> ClosedGearFunction[InputRecord]:
"""Create a "closed" function to be :ref:`op_action_register` 'ed as an
event-triggered function.
Event functions are executed each time an event arrives.
Each time it is executed, the function operates on the event's
data and once done is suspended until its future invocations by
new events.
Args:
prefix (str, optional):
Key prefix pattern to match on.
Not relevant for 'CommandReader' readers (see 'trigger').
Defaults to ``"*"``.
convertToStr (bool, optional):
When ``True`` adds a map operation to the flow's end that stringifies
records.
Defaults to ``True``.
collect (bool, optional):
When ``True`` adds a collect operation to flow's end.
Defaults to ``False``.
mode (str, optional):
The execution mode of the function.
Can be one of:
- ``"async"``:
Execution will be asynchronous across the entire cluster.
- ``"async_local"``:
Execution will be asynchronous and restricted to the handling shard.
- ``"sync"``:
Execution will be synchronous and local.
Defaults to ``"async"``.
onRegistered (Registrator, optional):
A function that's called on each shard upon function registration.
It is a good place to initialize non-serializeable objects such as
network connections.
Defaults to ``None``.
eventTypes (Iterable[str], optional):
For KeysReader only.
A whitelist of event types that trigger execution when the KeysReader
are used. The list may contain one or more:
- Any Redis or module command
- Any Redis event
Defaults to ``None``.
keyTypes (Iterable[str], optional):
For KeysReader and KeysOnlyReader only.
A whitelist of key types that trigger execution when using the
KeysReader or KeysOnlyReader readers.
The list may contain one or more from the following:
- Redis core types:
``"string"``, ``"hash"``, ``"list"``, ``"set"``, ``"zset"`` or
``"stream"``
- Redis module types:
``"module"``
Defaults to ``None``.
readValue (bool, optional):
For KeysReader only.
When ``False`` the value will not be read, so the 'type' and 'value'
of the record will be set to ``None``.
Defaults to ``True``.
batch (int, optional):
For StreamReader only.
The number of new messages that trigger execution.
Defaults to 1.
duration (int, optional):
For StreamReader only.
The time to wait before execution is triggered, regardless of the batch
size (0 for no duration).
Defaults to 0.
onFailedPolicy (str, optional):
For StreamReader only.
The policy for handling execution failures.
May be one of:
- ``"continue"``:
Ignores a failure and continues to the next execution.
This is the default policy.
- ``"abort"``:
Stops further executions.
- ``"retry"``:
Retries the execution after an interval specified with
onFailedRetryInterval (default is one second).
Defaults to ``"continue"``.
onFailedRetryInterval (int, optional):
For StreamReader only.
The interval (in milliseconds) in which to retry in case onFailedPolicy
is 'retry'.
Defaults to 1.
trimStream (bool, optional):
For StreamReader only.
When ``True`` the stream will be trimmed after execution
Defaults to ``True``.
trigger (str):
For 'CommandReader' only, and mandatory.
The trigger string that will trigger the function.
requirements (Iterable[str], optional):
Additional requirements / dependency Python packages.
Defaults to ``None``.
on (redis.Redis):
Immediately execute the function on this RedisGears system.
**kwargs:
Additional parameters to the register operation.
Returns:
Union[ClosedGearFunction, redgrease.data.ExecutionResult]:
A new closed event function, if `on` is **not** specified.
An execution result, if `on` **is** specified.
Raises:
TypeError:
If the function does not support event mode.
"""
if mode is not None:
kwargs["mode"] = mode
if onRegistered is not None:
kwargs["onRegistered"] = onRegistered
if not self.supports_event_mode:
raise TypeError(f"Event mode (run) is not supported for '{self.reader}'")
if eventTypes is not None:
kwargs["eventTypes"] = list(eventTypes)
if keyTypes is not None:
kwargs["keyTypes"] = list(keyTypes)
if readValue is not None:
kwargs["readValue"] = readValue
if batch is not None:
kwargs["batch"] = batch
if duration is not None:
kwargs["duration"] = duration
if onFailedPolicy is not None:
kwargs["onFailedPolicy"] = onFailedPolicy
if onFailedRetryInterval is not None:
kwargs["onFailedRetryInterval"] = onFailedRetryInterval
if trimStream is not None:
kwargs["trimStream"] = trimStream
if trigger is not None:
kwargs["trigger"] = trigger
replace = kwargs.pop("replace", None)
gear_fun = ClosedGearFunction[InputRecord](
Register(
prefix=prefix,
convertToStr=convertToStr,
collect=collect,
**kwargs,
),
input_function=self,
requirements=requirements,
)
if redgrease.GEARS_RUNTIME:
return redgrease.runtime.run(gear_fun, redgrease.GearsBuilder)
if on:
return gear_fun.on(on, replace=replace)
return gear_fun
def map(
self,
op: Mapper[
InputRecord,
OutputRecord,
],
# Other Redgrease args
requirements: Iterable[str] = None,
# Other Redis Gears args
**kwargs,
) -> "OpenGearFunction[OutputRecord]":
"""Instance-local :ref:`op_map` operation that performs a one-to-one (1:1) mapping of
records.
Args:
op (:data:`redgrease.typing.Mapper`):
Function to map on the input records.
The function must take one argument as input (input record) and
return something as an output (output record).
requirements (Iterable[str], optional):
Additional requirements / dependency Python packages.
Defaults to ``None``.
**kwargs:
Additional parameters to the :ref:`op_map` operation.
Returns:
OpenGearFunction:
A new "open" gear function with a :ref:`op_map` operation as last step.
"""
op = redgrease.utils.passfun(op)
return OpenGearFunction(
Map(op=op, **kwargs),
input_function=self,
requirements=requirements,
)
def flatmap(
self,
op: Expander[InputRecord, OutputRecord] = None,
# Other Redgrease args
requirements: Iterable[str] = None,
# Other Redis Gears args
**kwargs,
) -> "OpenGearFunction[Iterable[OutputRecord]]":
"""Instance-local :ref:`op_flatmap` operation that performs one-to-many (1:N) mapping
of records.
Args:
op (:data:`redgrease.typing.Expander`, optional):
Function to map on the input records.
The function must take one argument as input (input record) and
return an iterable as an output (output records).
Defaults to the 'identity-function', I.e. if input is an iterable will
be expanded.
requirements (Iterable[str], optional):
Additional requirements / dependency Python packages.
Defaults to ``None``.
**kwargs:
Additional parameters to the :ref:`op_flatmap` operation.
Returns:
OpenGearFunction:
A new "open" gear function with a :ref:`op_flatmap` operation as last
step.
"""
op = redgrease.utils.passfun(op)
return OpenGearFunction(
FlatMap(op=op, **kwargs),
input_function=self,
requirements=requirements,
)
def foreach(
self,
op: Processor[InputRecord],
# Other Redgrease args
requirements: Iterable[str] = None,
# Other Redis Gears args
**kwargs,
) -> "OpenGearFunction[InputRecord]":
"""Instance-local :ref:`op_foreach` operation performs one-to-the-same (1=1) mapping.
Args:
op (:data:`redgrease.typing.Processor`):
Function to run on each of the input records.
The function must take one argument as input (input record) and
should not return anything.
requirements (Iterable[str], optional):
Additional requirements / dependency Python packages.
Defaults to ``None``.
**kwargs:
Additional parameters to the :ref:`op_foreach` operation.
Returns:
OpenGearFunction:
A new "open" gear function with a :ref:`op_foreach` operation as last
step.
"""
op = redgrease.utils.passfun(op)
return OpenGearFunction(
ForEach(op=op, **kwargs),
input_function=self,
requirements=requirements,
)
def filter(
self,
op: Filterer[InputRecord] = None,
# Other Redgrease args
requirements: Iterable[str] = None,
# Other Redis Gears args
**kwargs,
) -> "OpenGearFunction[InputRecord]":
"""Instance-local :ref:`op_filter` operation performs one-to-zero-or-one (1:bool)
filtering of records.
Args:
op (:data:`redgrease.typing.Filterer`, optional):
Function to apply on the input records, to decide which ones to keep.
The function must take one argument as input (input record) and
return a bool. The input records evaluated to ``True`` will be kept as
output records.
Defaults to the 'identity-function', i.e. records are filtered based on
their own trueness or falseness.
requirements (Iterable[str], optional):
Additional requirements / dependency Python packages.
Defaults to ``None``.
**kwargs:
Additional parameters to the :ref:`op_filter` operation.
Returns:
OpenGearFunction:
A new "open" gear function with a :ref:`op_filter` operation as last
step.
"""
op = redgrease.utils.passfun(op)
return OpenGearFunction(
Filter(op=op, **kwargs),
input_function=self,
requirements=requirements,
)
def accumulate(
self,
op: Accumulator[T, InputRecord] = None,
# Other Redgrease args
requirements: Iterable[str] = None,
# Other Redis Gears args
**kwargs,
) -> "OpenGearFunction[T]":
"""Instance-local :ref:`op_accumulate` operation performs many-to-one mapping (N:1) of
records.
Args:
op (:data:`redgrease.typing.Accumulator`, optional):
Function to to apply on the input records.
The function must take two arguments as input:
- An accumulator value, and
- The input record.
It should aggregate the input record into the accumulator variable,
which stores the state between the function's invocations.
The function must return the accumulator's updated value.
Defaults to a list accumulator, I.e. the output will be a list of
all inputs.
requirements (Iterable[str], optional):
Additional requirements / dependency Python packages.
Defaults to ``None``.
**kwargs:
Additional parameters to the :ref:`op_accumulate` operation.
Returns:
OpenGearFunction:
A new "open" gear function with :ref:`op_accumulate` operation as last
step.
"""
op = redgrease.utils.passfun(op, default=_default_accumulator)
return OpenGearFunction(
Accumulate(op=op, **kwargs),
input_function=self,
requirements=requirements,
)
def localgroupby(
self,
extractor: Extractor[InputRecord, Key] = None,
reducer: Reducer[Key, T, InputRecord] = None,
# Other Redgrease args
requirements: Iterable[str] = None,
# Other Redis Gears args
**kwargs,
) -> "OpenGearFunction[Dict[Key, T]]":
"""Instance-local :ref:`op_localgroupby` operation performs many-to-less mapping (N:M)
of records.
Args:
extractor (:data:`redgrease.typing.Extractor`, optional):
Function to apply on the input records, to extact the grouping key.
The function must take one argument as input (input record) and
return a string (key).
The groups are defined by the value of the key.
Defaults to the hash of the input.
reducer (:data:`redgrease.typing.Reducer`, optional):
Function to apply on the records of each group, to reduce to a single
value (per group).
The function must take (a) a key, (b) an input record and (c) a
variable that's called an accumulator.
It performs similarly to the accumulator callback, with the difference
being that it maintains an accumulator per reduced key / group.
Defaults to a list accumulator, I.e. the output will be a list of
all inputs, for each group.
requirements (Iterable[str], optional):
Additional requirements / dependency Python packages.
Defaults to ``None``.
**kwargs:
Additional parameters to the :ref:`op_localgroupby` operation.
Returns:
OpenGearFunction:
A new "open" gear function with a :ref:`op_localgroupby` operation as
last step.
"""
extractor = redgrease.utils.passfun(extractor, default=_default_extractor)
reducer = redgrease.utils.passfun(reducer, default=_default_reducer)
return OpenGearFunction(
LocalGroupBy(extractor=extractor, reducer=reducer, **kwargs),
input_function=self,
requirements=requirements,
)
def limit(
self,
length: int,
start: int = 0,
# Other Redis Gears args
**kwargs,
) -> "OpenGearFunction[InputRecord]":
"""Instance-local :ref:`op_limit` operation limits the number of records.
Args:
length (int):
The maximum number of records.
start (int, optional):
The index of the first input record.
Defaults to 0.
requirements (Iterable[str], optional):
Additional requirements / dependency Python packages.
Defaults to ``None``.
**kwargs:
Additional parameters to the :ref:`op_limit` operation.
Returns:
OpenGearFunction:
A new "open" gear function with a :ref:`op_limit` operation as last
step.
"""
return OpenGearFunction(
Limit(length=length, start=start, **kwargs),
input_function=self,
)
def collect(self, **kwargs) -> "OpenGearFunction[InputRecord]":
"""Cluster-global :ref:`op_collect` operation collects the result records.
Args:
**kwargs:
Additional parameters to the :ref:`op_collect` operation.
Returns:
OpenGearFunction:
A new "open" gear function with a :ref:`op_collect` operation as last
step.
"""
return OpenGearFunction(
Collect(**kwargs),
input_function=self,
)
def repartition(
self,
extractor: Extractor[InputRecord, Hashable],
# Other Redgrease args
requirements: Iterable[str] = None,
# Other Redis Gears args
**kwargs,
) -> "OpenGearFunction[InputRecord]":
"""Cluster-global :ref:`op_repartition` operation repartitions the records by
shuffling
them between shards.
Args:
extractor (:data:`Extractor`):
Function that takes a record and calculates a key that is used to
determine the hash slot, and consequently the shard, that the record
should migrate to to.
The function must take one argument as input (input record) and
return a string (key).
The hash slot, and consequently the destination shard, is determined by
the value of the key.
requirements (Iterable[str], optional):
Additional requirements / dependency Python packages.
Defaults to ``None``.
**kwargs:
Additional parameters to the :ref:`op_repartition` operation.
Returns:
OpenGearFunction:
A new "open" gear function with a :ref:`op_repartition` operation as
last step.
"""
return OpenGearFunction(
Repartition(extractor=extractor, **kwargs),
input_function=self,
requirements=requirements,
)
def aggregate(
self,
zero: T = None,
seqOp: Accumulator[T, InputRecord] = None,
combOp: Accumulator[T, T] = None,
# Other Redgrease args
requirements: Iterable[str] = None,
# Other Redis Gears args
**kwargs,
) -> "OpenGearFunction[T]":
"""Distributed :ref:`op_aggregate` operation perform an aggregation on local
data then a global aggregation on the local aggregations.
Args:
zero (Any, optional):
The initial / zero value of the accumulator variable.
Defaults to an empty list.
seqOp (:data:`redgrease.typing.Accumulator`, optional):
A function to be applied on each of the input records, locally per
shard.
It must take two parameters:
- an accumulator value, from previous calls
- an input record
The function aggregates the input into the accumulator variable,
which stores the state between the function's invocations.
The function must return the accumulator's updated value.
Defaults to addition, if 'zero' is a number and to a list accumulator
if 'zero' is a list.
combOp (:data:`redgrease.typing.Accumulator`, optional):
A function to be applied on each of the aggregated results of the local
aggregation (i.e. the output of `seqOp`).
It must take two parameters:
- an accumulator value, from previous calls
- an input record
The function aggregates the input into the accumulator variable,
which stores the state between the function's invocations.
The function must return the accumulator's updated value.
Defaults to re-use the `seqOp` function.
requirements (Iterable[str], optional):
Additional requirements / dependency Python packages.
Defaults to ``None``.
**kwargs:
Additional parameters to the ref:`op_aggregate` operation.
Returns:
OpenGearFunction:
A new "open" gear function with a ref:`op_aggregate` operation as last
step.
"""
_zero = zero if zero is not None else []
if not seqOp:
if isinstance(_zero, numbers.Number):
seqOp = operator.add
elif isinstance(_zero, list):
seqOp = _default_accumulator
combOp = combOp or operator.add
else:
raise ValueError(
"No operatod provided, and unable to deduce a reasonable default."
)
seqOp = redgrease.utils.passfun(seqOp)
combOp = redgrease.utils.passfun(combOp, default=seqOp)
return OpenGearFunction(
Aggregate(zero=_zero, seqOp=seqOp, combOp=combOp, **kwargs),
input_function=self,
requirements=requirements,
)
def aggregateby(
self,
extractor: Extractor[InputRecord, Key] = None,
zero: T = None,
seqOp: Reducer[Key, T, InputRecord] = None,
combOp: Reducer[Key, T, T] = None,
# Other Redgrease args
requirements: Iterable[str] = None,
# Other Redis Gears args
**kwargs,
) -> "OpenGearFunction[Dict[Key, T]]":
"""Distributed :ref:`op_aggregateby` operation, behaves like aggregate, but
separated on each key, extracted using the extractor.
Args:
extractor (:data:`redgrease.typing.Extractor`, optional):
Function to apply on the input records, to extact the grouping key.
The function must take one argument as input (input record) and
return a string (key).
The groups are defined by the value of the key.
Defaults to the hash of the input.
zero (Any, optional):
The initial / zero value of the accumulator variable.
Defaults to an empty list.
seqOp (:data:`redgrease.typing.Accumulator`, optional):
A function to be applied on each of the input records, locally per
shard and group.
It must take two parameters:
- an accumulator value, from previous calls
- an input record
The function aggregates the input into the accumulator variable,
which stores the state between the function's invocations.
The function must return the accumulator's updated value.
Defaults to a list reducer.
combOp (:data:`redgrease.typing.Accumulator`):
A function to be applied on each of the aggregated results of the local
aggregation (i.e. the output of `seqOp`).
It must take two parameters:
- an accumulator value, from previous calls
- an input record
The function aggregates the input into the accumulator variable,
which stores the state between the function's invocations.
The function must return the accumulator's updated value.
Defaults to re-use the `seqOp` function.
requirements (Iterable[str], optional):
Additional requirements / dependency Python packages.
Defaults to ``None``.
**kwargs:
Additional parameters to the :ref:`op_aggregateby` operation.
Returns:
OpenGearFunction:
A new "open" gear function with a :ref:`op_aggregateby` operation as
last step.
"""
_zero = zero if zero is not None else []
extractor = redgrease.utils.passfun(extractor, default=_default_extractor)
seqOp = redgrease.utils.passfun(seqOp, _default_reducer)
combOp = redgrease.utils.passfun(combOp, seqOp)
return OpenGearFunction(
AggregateBy(
extractor=extractor, zero=_zero, seqOp=seqOp, combOp=combOp, **kwargs
),
input_function=self,
requirements=requirements,
)
def groupby(
self,
extractor: Extractor[InputRecord, Key] = None,
reducer: Reducer[Key, T, InputRecord] = None,
# Other Redgrease args
requirements: Iterable[str] = None,
# Other Redis Gears args
**kwargs,
) -> "OpenGearFunction[Dict[Key, T]]":
"""Cluster-local :ref:`op_groupby` operation performing a many-to-less (N:M)
grouping of records.
Args:
extractor (:data:`redgrease.typing.Extractor`, optional):
Function to apply on the input records, to extact the grouping key.
The function must take one argument as input (input record) and
return a string (key).
The groups are defined by the value of the key.
Defaults to the hash of the input.
reducer (:data:`redgrease.typing.Reducer`, optional):
Function to apply on the records of each group, to reduce to a single
value (per group).
The function must take (a) a key, (b) an input record and (c) a
variable that's called an accumulator.
It performs similarly to the accumulator callback, with the difference
being that it maintains an accumulator per reduced key / group.
Defaults to a list reducer.
requirements (Iterable[str], optional):
Additional requirements / dependency Python packages.
Defaults to ``None``.
**kwargs:
Additional parameters to the :ref:`op_groupby` operation.
Returns:
OpenGearFunction:
A new "open" gear function with a :ref:`op_groupby` operation as last
step.
"""
extractor = redgrease.utils.passfun(extractor, default=_default_extractor)
reducer = redgrease.utils.passfun(reducer, default=_default_reducer)
return OpenGearFunction(
GroupBy(extractor=extractor, reducer=reducer, **kwargs),
input_function=self,
requirements=requirements,
)
def batchgroupby(
self,
extractor: Extractor[InputRecord, Key] = None,
reducer: BatchReducer[Key, T, InputRecord] = None,
# Other Redgrease args
requirements: Iterable[str] = None,
# Other Redis Gears args
**kwargs,
) -> "OpenGearFunction[Dict[Key, T]]":
"""Cluster-local :ref:`op_groupby` operation, performing a many-to-less (N:M)
grouping of records.
Note: Using this operation may cause a substantial increase in memory usage
during runtime. Consider using the GroupBy
Args:
extractor (:data:`redgrease.typing.Extractor`, optional):
Function to apply on the input records, to extact the grouping key.
The function must take one argument as input (input record) and
return a string (key).
The groups are defined by the value of the key.
Defaults to the hash of the input.
reducer (:data:`redgrease.typing.Reducer`):
Function to apply on the records of each group, to reduce to a single
value (per group).
The function must take (a) a key, (b) an input record and (c) a
variable that's called an accumulator.
It performs similarly to the accumulator callback, with the difference
being that it maintains an accumulator per reduced key / group.
Default is the length (`len`) of the input.
**kwargs:
Additional parameters to the :ref:`op_groupby` operation.
Returns:
OpenGearFunction:
A new "open" gear function with a :ref:`op_groupby` operation as last
step.
"""
extractor = redgrease.utils.passfun(extractor, default=_default_extractor)
reducer = redgrease.utils.passfun(reducer, default=_default_batch_reducer)
return OpenGearFunction(
BatchGroupBy(extractor=extractor, reducer=reducer, **kwargs),
input_function=self,
requirements=requirements,
)
def sort(
self,
reverse: bool = True,
# Other Redgrease args
requirements: Iterable[str] = None,
# Other Redis Gears args
**kwargs,
) -> "OpenGearFunction[InputRecord]":
""":ref:`op_sort` the records
Args:
reverse (bool, optional):
Sort in descending order (higher to lower).
Defaults to ``True``.
requirements (Iterable[str], optional):
Additional requirements / dependency Python packages.
Defaults to ``None``.
**kwargs:
Additional parameters to the :ref:`op_sort` operation.
Returns:
OpenGearFunction:
A new "open" gear function with a :ref:`op_sort` operation as last
step.
"""
return OpenGearFunction(
Sort(reverse=reverse, **kwargs),
input_function=self,
requirements=requirements,
)
def distinct(self, **kwargs) -> "OpenGearFunction[InputRecord]":
"""Keep only the :ref:`op_distinct` values in the data.
Args:
**kwargs:
Additional parameters to the :ref:`op_distinct` operation.
Returns:
OpenGearFunction:
A new "open" gear function with a :ref:`op_distinct` operation as last
step.
"""
return OpenGearFunction(
Distinct(**kwargs),
input_function=self,
)
def count(self, **kwargs) -> "OpenGearFunction[int]":
""":ref:`op_count` the number of records in the execution.
Args:
**kwargs:
Additional parameters to the :ref:`op_count` operation.
Returns:
OpenGearFunction:
A new "open" gear function with a :ref:`op_count` operation as last
step.
"""
return OpenGearFunction(
Count(**kwargs),
input_function=self,
)
def countby(
self,
extractor: Extractor[InputRecord, Hashable] = lambda x: str(x),
# Other Redgrease args
requirements: Iterable[str] = None,
# Other Redis Gears args
**kwargs,
) -> "OpenGearFunction[Dict[Hashable, int]]":
"""Distributed :ref:`op_countby` operation counting the records grouped by key.
Args:
extractor (:data:`redgrease.typing.Extractor`):
Function to apply on the input records, to extact the grouping key.
The function must take one argument as input (input record) and
return a string (key).
The groups are defined by the value of the key.
Defaults to ``lambda x: str(x)``.
requirements (Iterable[str], optional):
Additional requirements / dependency Python packages.
Defaults to ``None``.
**kwargs:
Additional parameters to the :ref:`op_countby` operation.
Returns:
OpenGearFunction:
A new "open" gear function with a :ref:`op_countby` operation as last
step.
"""
return OpenGearFunction(
CountBy(extractor=extractor, **kwargs),
input_function=self,
requirements=requirements,
)
def avg(
self,
extractor: Extractor[InputRecord, float] = lambda x: float(
x if isinstance(x, (int, float, str)) else str(x)
),
# Other Redgrease args
requirements: Iterable[str] = None,
# Other Redis Gears args
**kwargs,
) -> "OpenGearFunction[float]":
"""Distributed :ref:`op_avg` operation, calculating arithmetic average
of the records.
Args:
extractor (:data:`redgrease.typing.Extractor`):
Function to apply on the input records, to extact the grouping key.
The function must take one argument as input (input record) and
return a string (key).
The groups are defined by the value of the key.
Defaults to ``lambda x: float(x)``.
requirements (Iterable[str], optional):
Additional requirements / dependency Python packages.
Defaults to ``None``.
**kwargs:
Additional parameters to the :ref:`op_avg` operation.
Returns:
OpenGearFunction:
A new "open" gear function with an :ref:`op_avg` operation as last
step.
"""
return OpenGearFunction(
Avg(extractor=extractor, **kwargs),
input_function=self,
requirements=requirements,
)
| 35.577075 | 94 | 0.584002 |
e316011986842bd359ce24ef66689e5d67490fd6 | 18,018 | py | Python | rtcclient/template.py | sadikkuzu-mba/rtcclient | 831d9eef57f0daca86728ea38743925f6db017fc | [
"Apache-2.0"
] | 37 | 2015-07-31T03:21:55.000Z | 2021-08-04T10:30:13.000Z | rtcclient/template.py | sadikkuzu-mba/rtcclient | 831d9eef57f0daca86728ea38743925f6db017fc | [
"Apache-2.0"
] | 94 | 2015-07-29T14:27:13.000Z | 2022-03-10T16:53:30.000Z | rtcclient/template.py | sadikkuzu-mba/rtcclient | 831d9eef57f0daca86728ea38743925f6db017fc | [
"Apache-2.0"
] | 37 | 2015-11-11T15:06:39.000Z | 2022-03-01T12:21:48.000Z | from rtcclient.base import RTCBase
import logging
import xmltodict
import os
import jinja2
import jinja2.meta
from rtcclient import exception
from rtcclient import _search_path
import six
from rtcclient.utils import remove_empty_elements
from xml.sax.saxutils import escape
class Templater(RTCBase):
"""A wrapped class used to generate and render templates
from some copied workitems
:param rtc_obj: a reference to the
:class:`rtcclient.client.RTCClient` object
:param searchpath: the folder to store your templates.
If `None`, the default search path
(/your/site-packages/rtcclient/templates) will be loaded automatically.
"""
log = logging.getLogger("template.Templater")
def __init__(self, rtc_obj, searchpath=None):
self.rtc_obj = rtc_obj
RTCBase.__init__(self, self.rtc_obj.url)
if searchpath is None:
self.searchpath = _search_path
else:
self.searchpath = searchpath
self.loader = jinja2.FileSystemLoader(searchpath=self.searchpath)
self.environment = jinja2.Environment(loader=self.loader,
trim_blocks=True)
def __str__(self):
return "Templater for %s" % self.rtc_obj
def get_rtc_obj(self):
return self.rtc_obj
def render(self, template, **kwargs):
"""Renders the template
:param template: The template to render.
The template is actually a file, which is usually generated
by :class:`rtcclient.template.Templater.getTemplate`
and can also be modified by user accordingly.
:param kwargs: The `kwargs` dict is used to fill the template.
These two parameter are mandatory:
* description
* title
Some of below parameters (which may not be included in some
customized workitem type ) are mandatory if `keep` (parameter in
:class:`rtcclient.template.Templater.getTemplate`) is set to
`False`; Optional for otherwise.
* teamArea (Team Area)
* ownedBy (Owned By)
* plannedFor(Planned For)
* severity(Severity)
* priority(Priority)
* filedAgainst(Filed Against)
Actually all these needed keywords/attributes/fields can be
retrieved by :class:`rtcclient.template.Templater.listFields`
:return: the :class:`string` object
:rtype: string
"""
if kwargs.get("title", None) is not None:
kwargs["title"] = escape(kwargs["title"])
if kwargs.get("description", None) is not None:
kwargs["description"] = escape(kwargs["description"])
try:
temp = self.environment.get_template(template)
return temp.render(**kwargs)
except AttributeError:
err_msg = "Invalid value for 'template'"
self.log.error(err_msg)
raise exception.BadValue(err_msg)
def renderFromWorkitem(self, copied_from, keep=False,
encoding="UTF-8", **kwargs):
"""Render the template directly from some to-be-copied
:class:`rtcclient.workitem.Workitem` without saving to a file
:param copied_from: the to-be-copied
:class:`rtcclient.workitem.Workitem` id
:param keep (default is False): If `True`, some of the below fields
will remain unchangeable with the to-be-copied
:class:`rtcclient.workitem.Workitem`.
Otherwise for `False`.
* teamArea (Team Area)
* ownedBy (Owned By)
* plannedFor(Planned For)
* severity(Severity)
* priority(Priority)
* filedAgainst(Filed Against)
:param encoding (default is "UTF-8"): coding format
:param kwargs: The `kwargs` dict is used to fill the template.
These two parameter are mandatory:
* description
* title
Some of below parameters (which may not be included in some
customized workitem type ) are mandatory if `keep` is set to
`False`; Optional for otherwise.
* teamArea (Team Area)
* ownedBy (Owned By)
* plannedFor(Planned For)
* severity(Severity)
* priority(Priority)
* filedAgainst(Filed Against)
Actually all these needed keywords/attributes/fields can be
retrieved by
:class:`rtcclient.template.Templater.listFieldsFromWorkitem`
:return: the :class:`string` object
:rtype: string
"""
temp = jinja2.Template(self.getTemplate(copied_from,
template_name=None,
template_folder=None,
keep=keep,
encoding=encoding))
rendered_data = temp.render(**kwargs)
return remove_empty_elements(rendered_data)
def listFields(self, template):
"""List all the attributes to be rendered from the template file
:param template: The template to render.
The template is actually a file, which is usually generated
by :class:`rtcclient.template.Templater.getTemplate` and can also
be modified by user accordingly.
:return: a :class:`set` contains all the needed attributes
:rtype: set
"""
try:
temp_source = self.environment.loader.get_source(self.environment,
template)
return self.listFieldsFromSource(temp_source)
except AttributeError:
err_msg = "Invalid value for 'template'"
self.log.error(err_msg)
raise exception.BadValue(err_msg)
def listFieldsFromWorkitem(self, copied_from, keep=False):
"""List all the attributes to be rendered directly from some
to-be-copied :class:`rtcclient.workitem.Workitem`
:param copied_from: the to-be-copied
:class:`rtcclient.workitem.Workitem` id
:param keep: (default is False) If `True`, some of below parameters
(which will not be included in some customized
:class:`rtcclient.workitem.Workitem` type ) will remain
unchangeable with the to-be-copied
:class:`rtcclient.workitem.Workitem`.
Otherwise for `False`.
* teamArea (Team Area)
* ownedBy (Owned By)
* plannedFor(Planned For)
* severity(Severity)
* priority(Priority)
* filedAgainst(Filed Against)
:return: a :class:`set` contains all the needed attributes
:rtype: set
"""
temp_source = self.getTemplate(copied_from,
template_name=None,
template_folder=None,
keep=keep)
return self.listFieldsFromSource(temp_source)
def listFieldsFromSource(self, template_source):
"""List all the attributes to be rendered directly from template
source
:param template_source: the template source (usually represents the
template content in string format)
:return: a :class:`set` contains all the needed attributes
:rtype: set
"""
ast = self.environment.parse(template_source)
return jinja2.meta.find_undeclared_variables(ast)
def getTemplate(self, copied_from, template_name=None,
template_folder=None, keep=False, encoding="UTF-8"):
"""Get template from some to-be-copied
:class:`rtcclient.workitem.Workitem`
The resulting XML document is returned as a :class:`string`, but if
`template_name` (a string value) is specified,
it is written there instead.
:param copied_from: the to-be-copied
:class:`rtcclient.workitem.Workitem` id (integer or
equivalent string)
:param template_name: the template file name
:param template_folder: the folder to store template file
:param keep: (default is False) If `True`, some of below parameters
(which may not be included in some customized
:class:`rtcclient.workitem.Workitem` type ) will remain
unchangeable with the to-be-copied
:class:`rtcclient.workitem.Workitem`.
Otherwise for `False`.
* teamArea (Team Area)
* ownedBy (Owned By)
* plannedFor(Planned For)
* severity(Severity)
* priority(Priority)
* filedAgainst(Filed Against)
:param encoding: (default is "UTF-8") coding format
:return:
* a :class:`string` object: if `template_name` is not specified
* write the template to file `template_name`: if `template_name` is
specified
"""
try:
if isinstance(copied_from, bool) or isinstance(copied_from, float):
raise ValueError()
if isinstance(copied_from, six.string_types):
copied_from = int(copied_from)
if not isinstance(copied_from, int):
raise ValueError()
except ValueError:
err_msg = "Please input a valid workitem id you want to copy from"
self.log.error(err_msg)
raise exception.BadValue(err_msg)
self.log.info("Fetch the template from <Workitem %s> with [keep]=%s",
copied_from, keep)
if template_folder is None:
template_folder = self.searchpath
# identify whether output to a file
if template_name is not None:
template_file_path = os.path.join(template_folder,
template_name)
output = open(template_file_path, "w")
else:
template_file_path = None
output = None
workitem_url = "/".join([self.url,
"oslc/workitems/%s" % copied_from])
resp = self.get(workitem_url,
verify=False,
proxies=self.rtc_obj.proxies,
headers=self.rtc_obj.headers)
raw_data = xmltodict.parse(resp.content)
# pre-adjust the template:
# remove some attribute to avoid being overwritten, which will only be
# generated when the workitem is created
wk_raw_data = raw_data.get("oslc_cm:ChangeRequest")
self._remove_long_fields(wk_raw_data)
# Be cautious when you want to modify these fields
# These fields have been tested as must-removed one
remove_fields = ["@rdf:about",
"dc:created",
"dc:creator",
"dc:identifier",
"rtc_cm:contextId",
"rtc_cm:comments",
"rtc_cm:state",
"dc:type",
"rtc_cm:subscribers",
"dc:modified",
"rtc_cm:modifiedBy",
"rtc_cm:resolved",
"rtc_cm:resolvedBy",
"rtc_cm:resolution",
"rtc_cm:startDate",
"rtc_cm:timeSpent",
"rtc_cm:progressTracking",
"rtc_cm:projectArea",
"oslc_cm:relatedChangeManagement",
"oslc_cm:trackedWorkItem",
"oslc_cm:tracksWorkItem",
"rtc_cm:timeSheet",
"oslc_pl:schedule"]
for remove_field in remove_fields:
try:
wk_raw_data.pop(remove_field)
self.log.debug("Successfully remove field [%s] from the "
"template originated from <Workitem %s>",
remove_field,
copied_from)
except:
self.log.warning("No field named [%s] in this template "
"from <Workitem %s>", remove_field,
copied_from)
continue
wk_raw_data["dc:description"] = "{{ description }}"
wk_raw_data["dc:title"] = "{{ title }}"
if keep:
if template_file_path:
self.log.info("Writing the template to file %s",
template_file_path)
return xmltodict.unparse(raw_data, output=output,
encoding=encoding,
pretty=True)
replace_fields = [("rtc_cm:teamArea", "{{ teamArea }}"),
("rtc_cm:ownedBy", "{{ ownedBy }}"),
("rtc_cm:plannedFor", "{{ plannedFor }}"),
("rtc_cm:foundIn", "{{ foundIn }}"),
("oslc_cm:severity", "{{ severity }}"),
("oslc_cm:priority", "{{ priority }}"),
("rtc_cm:filedAgainst", "{{ filedAgainst }}")]
for field in replace_fields:
try:
if field[0] in wk_raw_data:
wk_raw_data[field[0]]["@rdf:resource"] = field[1]
self.log.debug("Successfully replace field [%s] with [%s]",
field[0], field[1])
except:
self.log.warning("Cannot replace field [%s]", field[0])
continue
if template_file_path:
self.log.info("Writing the template to file %s",
template_file_path)
return xmltodict.unparse(raw_data, output=output,
encoding=encoding)
def _remove_long_fields(self, wk_raw_data):
"""Remove long fields: These fields are can only customized after
the workitems are created
"""
match_str_list = ["rtc_cm:com.ibm.",
"calm:"]
keys = list(wk_raw_data.keys())
for key in keys:
for match_str in match_str_list:
if key.startswith(match_str):
try:
wk_raw_data.pop(key)
self.log.debug("Successfully remove field [%s] from "
"the template", key)
except:
self.log.warning("Cannot remove field [%s] from the "
"template", key)
continue
def getTemplates(self, workitems, template_folder=None,
template_names=None, keep=False, encoding="UTF-8"):
"""Get templates from a group of to-be-copied :class:`Workitems` and
write them to files named after the names in `template_names`
respectively.
:param workitems: a :class:`list`/:class:`tuple`/:class:`set`
contains the ids (integer or equivalent string) of some
to-be-copied :class:`Workitems`
:param template_names: a :class:`list`/:class:`tuple`/:class:`set`
contains the template file names for copied :class:`Workitems`.
If `None`, the new template files will be named after the
:class:`rtcclient.workitem.Workitem` id with "`.template`" as a
postfix
:param template_folder: refer to
:class:`rtcclient.template.Templater.getTemplate`
:param keep: (default is False) refer to
:class:`rtcclient.template.Templater.getTemplate`
:param encoding: (default is "UTF-8") refer to
:class:`rtcclient.template.Templater.getTemplate`
"""
if (not workitems or isinstance(workitems, six.string_types) or
isinstance(workitems, int) or
isinstance(workitems, float) or
not hasattr(workitems, "__iter__")):
error_msg = "Input parameter 'workitems' is not iterable"
self.log.error(error_msg)
raise exception.BadValue(error_msg)
if template_names is not None:
if not hasattr(template_names, "__iter__"):
error_msg = "Input parameter 'template_names' is not iterable"
self.log.error(error_msg)
raise exception.BadValue(error_msg)
if len(workitems) != len(template_names):
error_msg = "".join(["Input parameters 'workitems' and ",
"'template_names' have different length"])
self.log.error(error_msg)
raise exception.BadValue(error_msg)
for index, wk_id in enumerate(workitems):
try:
if template_names is not None:
template_name = template_names[index]
else:
template_name = ".".join([wk_id, "template"])
self.getTemplate(wk_id,
template_name=template_name,
template_folder=template_folder,
keep=keep,
encoding=encoding)
except Exception as excp:
self.log.error("Exception occurred when fetching"
"template from <Workitem %s>: %s",
str(wk_id), excp)
continue
self.log.info("Successfully fetch all the templates from "
"workitems: %s", workitems)
| 41.231121 | 79 | 0.544622 |
d18eefe2cfdb8f172c974eb82d7224b7b1de4189 | 8,643 | py | Python | main/main_cv.py | Lamiane/conv_qsar_fast | e495777293199a6b529dbcb7530b2240d2310e09 | [
"MIT"
] | 2 | 2020-07-07T14:21:30.000Z | 2020-10-22T10:11:43.000Z | main/main_cv.py | Lamiane/conv_qsar_fast | e495777293199a6b529dbcb7530b2240d2310e09 | [
"MIT"
] | null | null | null | main/main_cv.py | Lamiane/conv_qsar_fast | e495777293199a6b529dbcb7530b2240d2310e09 | [
"MIT"
] | null | null | null | import os
import sys
import datetime
import numpy as np
from distutils.util import strtobool
from conv_qsar_fast.main.core import build_model, train_model, save_model
from conv_qsar_fast.main.test import test_model, test_embeddings_demo
from conv_qsar_fast.main.data import get_data_full
from conv_qsar_fast.utils.parse_cfg import read_config
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: {} "settings.cfg"'.format(sys.argv[0]))
quit(1)
print(f"Running main_cv for config: {sys.argv[1]}")
# Load settings
try:
config = read_config(sys.argv[1])
except:
print('Could not read config file {}'.format(sys.argv[1]))
quit(1)
# Get model label
try:
fpath = config['IO']['model_fpath']
except KeyError:
print('Must specify model_fpath in IO in config')
quit(1)
# make directory
try:
os.makedirs(os.path.dirname(fpath))
except: # folder exists
pass
###################################################################################
# # # DEFINE DATA
###################################################################################
data_kwargs = config['DATA']
if '__name__' in data_kwargs:
del data_kwargs['__name__']
if 'molecular_attributes' in data_kwargs:
data_kwargs['molecular_attributes'] = strtobool(data_kwargs['molecular_attributes'])
if 'smiles_index' in data_kwargs:
data_kwargs['smiles_index'] = int(data_kwargs['smiles_index'])
if 'y_index' in data_kwargs:
data_kwargs['y_index'] = int(data_kwargs['y_index'])
if 'skipline' in data_kwargs:
data_kwargs['skipline'] = strtobool(data_kwargs['skipline'])
# defining data split: cross-validation or regular train-valid-test
if strtobool(data_kwargs['cv']):
fold_keys = [key for key in data_kwargs if "fold" in key]
all_folds = []
for key in fold_keys:
all_folds.append(data_kwargs[key])
del data_kwargs[key]
trains = []
vals = []
for val_fold in all_folds:
trains.append([fold for fold in all_folds if fold != val_fold])
vals.append([val_fold, ])
splits = list(zip(trains, vals))
else:
splits = list(zip([[data_kwargs['train'], ], ], [[data_kwargs['val'], ], ]))
del data_kwargs['train']
del data_kwargs['val']
# test set is always test set
test_path = [data_kwargs['test'], ]
del data_kwargs['test']
del data_kwargs['cv']
# Iterate through all folds
ref_fpath = fpath
for fold_idx, (train_paths, validation_path) in enumerate(splits):
fpath = ref_fpath.replace('<this_fold>', str(1+fold_idx))
###################################################################################
# # # BUILD MODEL
###################################################################################
print('...building model')
try:
kwargs = config['ARCHITECTURE']
if '__name__' in kwargs: del kwargs['__name__']
if 'embedding_size' in kwargs:
kwargs['embedding_size'] = int(kwargs['embedding_size'])
if 'hidden' in kwargs:
kwargs['hidden'] = int(kwargs['hidden'])
if 'hidden2' in kwargs:
kwargs['hidden2'] = int(kwargs['hidden2'])
if 'depth' in kwargs:
kwargs['depth'] = int(kwargs['depth'])
if 'dr1' in kwargs:
kwargs['dr1'] = float(kwargs['dr1'])
if 'dr2' in kwargs:
kwargs['dr2'] = float(kwargs['dr2'])
if 'output_size' in kwargs:
kwargs['output_size'] = int(kwargs['output_size'])
if 'optimizer' in kwargs:
kwargs['optimizer'] = kwargs['optimizer']
if 'lr' in kwargs:
kwargs['lr'] = float(kwargs['lr'])
if 'molecular_attributes' in config['DATA']:
kwargs['molecular_attributes'] = config['DATA']['molecular_attributes']
model = build_model(**kwargs)
print('...built untrained model')
except KeyboardInterrupt:
print('User cancelled model building')
quit(1)
###################################################################################
# # # LOAD DATA
###################################################################################
print(f"Using CV fold {1+fold_idx}/{len(splits)}")
data = get_data_full(train_paths=train_paths, validation_path=validation_path, test_path=test_path, **data_kwargs)
###################################################################################
# # # LOAD WEIGHTS?
###################################################################################
if 'weights_fpath' in config['IO']:
weights_fpath = config['IO']['weights_fpath']
else:
weights_fpath = fpath + '.h5'
try:
use_old_weights = strtobool(config['IO']['use_existing_weights'])
except KeyError:
print('Must specify whether or not to use existing model weights')
quit(1)
if use_old_weights and os.path.isfile(weights_fpath):
model.load_weights(weights_fpath)
print('...loaded weight information')
# Reset final dense?
if 'reset_final' in config['IO']:
if config['IO']['reset_final'] in ['true', 'y', 'Yes', 'True', '1']:
layer = model.layers[-1]
layer.W.set_value((layer.init(layer.W.shape.eval()).eval()).astype(np.float32))
layer.b.set_value(np.zeros(layer.b.shape.eval(), dtype=np.float32))
elif use_old_weights and not os.path.isfile(weights_fpath):
print('Weights not found at specified path {}'.format(weights_fpath))
quit(1)
else:
pass
###################################################################################
# # # TRAIN THE MODEL
###################################################################################
# Train model
try:
print('...training model')
kwargs = config['TRAINING']
if '__name__' in kwargs:
del kwargs['__name__'] # from configparser
if 'nb_epoch' in kwargs:
kwargs['nb_epoch'] = int(kwargs['nb_epoch'])
if 'batch_size' in kwargs:
kwargs['batch_size'] = int(kwargs['batch_size'])
if 'patience' in kwargs:
kwargs['patience'] = int(kwargs['patience'])
(model, loss, val_loss) = train_model(model, data, **kwargs)
print('...trained model')
except KeyboardInterrupt:
pass
###################################################################################
# # # SAVE MODEL
###################################################################################
# Get the current time
tstamp = datetime.datetime.utcnow().strftime('%m-%d-%Y_%H-%M')
print('...saving model')
save_model(model, loss, val_loss, fpath=fpath, config=config, tstamp=tstamp)
print('...saved model')
###################################################################################
# # # TEST MODEL
###################################################################################
print('...testing model')
test_kwargs = config['TEST']
calculate_parity = False
if 'calculate_parity' in test_kwargs:
calculate_parity = strtobool(test_kwargs['calculate_parity'])
calculate_rocauc = False
if 'calculate_rocauc' in test_kwargs:
calculate_rocauc = strtobool(test_kwargs['calculate_rocauc'])
_ = test_model(model, data, fpath, tstamp=tstamp, batch_size=int(config['TRAINING']['batch_size']),
calculate_parity=calculate_parity, calculate_rocauc=calculate_rocauc)
print('...tested model')
###################################################################################
# # # TEST EMBEDDINGS?
###################################################################################
# Testing embeddings?
try:
if strtobool(config['TESTING']['test_embedding']):
test_embeddings_demo(model, fpath)
except KeyError:
pass | 39.108597 | 122 | 0.483513 |
09f35ceeddec41af26cc05239cee29050af07fb8 | 160 | py | Python | Chapter10/clean_sample.py | fbitti/Bioinformatics-with-Python-Cookbook-Second-Edition | 89281e5e3474cb79aeb87e75eb1a1849c338813a | [
"MIT"
] | 244 | 2018-06-28T05:05:01.000Z | 2022-03-28T07:59:19.000Z | Chapter10/clean_sample.py | fbitti/Bioinformatics-with-Python-Cookbook-Second-Edition | 89281e5e3474cb79aeb87e75eb1a1849c338813a | [
"MIT"
] | 22 | 2019-01-19T08:30:18.000Z | 2021-11-02T20:15:48.000Z | Chapter10/clean_sample.py | fbitti/Bioinformatics-with-Python-Cookbook-Second-Edition | 89281e5e3474cb79aeb87e75eb1a1849c338813a | [
"MIT"
] | 178 | 2018-05-12T10:05:29.000Z | 2022-03-31T03:01:59.000Z | import sys
sys.stdout.write('ID_1 ID_2 missing\n0 0 0 \n')
for line in sys.stdin:
ind = line.rstrip()
sys.stdout.write('%s %s 0\n' % (ind, ind))
| 17.777778 | 47 | 0.60625 |
4aa1299e971832221abff9ebf98dc3181628441e | 7,612 | py | Python | percolation.py | rafaelpleite/Algoritmos | 2f02ac89b4f71dd9780b8cbf0c054db64b68b7ae | [
"MIT"
] | null | null | null | percolation.py | rafaelpleite/Algoritmos | 2f02ac89b4f71dd9780b8cbf0c054db64b68b7ae | [
"MIT"
] | null | null | null | percolation.py | rafaelpleite/Algoritmos | 2f02ac89b4f71dd9780b8cbf0c054db64b68b7ae | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#------------------------------------------------------------------
# LEIA E PREENCHA O CABEÇALHO
# NÃO ALTERE OS NOMES DAS FUNÇÕES
# NÃO APAGUE OS DOCSTRINGS
# NÃO INCLUA NENHUM import ...
#------------------------------------------------------------------
'''
Nome: Rafael Prudêncio Leite
NUSP: ********
Ao preencher esse cabeçalho com o meu nome e o meu número USP,
declaro que todas as partes originais desse exercício programa (EP)
foram desenvolvidas e implementadas por mim e que portanto não
constituem desonestidade acadêmica ou plágio.
Declaro também que sou responsável por todas as cópias desse
programa e que não distribui ou facilitei a sua distribuição.
Estou ciente que os casos de plágio e desonestidade acadêmica
serão tratados segundo os critérios divulgados na página da
disciplina.
Entendo que EPs sem assinatura devem receber nota zero e, ainda
assim, poderão ser punidos por desonestidade acadêmica.
Abaixo descreva qualquer ajuda que você recebeu para fazer este
EP. Inclua qualquer ajuda recebida por pessoas (inclusive
monitores e colegas). Com exceção de material de MAC0110 e MAC0122,
caso você tenha utilizado alguma informação, trecho de código,...
indique esse fato abaixo para que o seu programa não seja
considerado plágio ou irregular.
Exemplo:
A monitora me explicou que eu devia utilizar a função int() quando
fazemos leitura de números inteiros.
A minha função quicksort() foi baseada na descrição encontrada na
página https://www.ime.usp.br/~pf/algoritmos/aulas/quick.html.
Descrição de ajuda ou indicação de fonte:
'''
import numpy as np
#--------------------------------------------------------------------------
# constantes
BLOCKED = 0 # sítio bloqueado
OPEN = 1 # sítio aberto
FULL = 2 # sítio cheio
class Fila:
def __init__(self):
self.itens = []
def vazia(self):
return self.itens == []
def insere(self, item):
self.itens.append(item)
def remove(self):
self.itens.pop[0]
class Percolation:
'''
Representa uma grade com todos os sítios inicialmente bloqueados.
'''
def __init__(self, entrada):
self.perco = np.full((entrada, entrada), BLOCKED) if type(entrada) == int else np.full(entrada, BLOCKED)
self.shape = self.perco.shape
'''
def shape(self):
return self.perco.shape
'''
def __str__(self):
n_open = int(0)
fig = str('\n') + str('+---')*self.perco.shape[1] + '+\n/ '
for i in range(len(self.perco)):
for j in range(len(self.perco[0])):
if self.perco[i, j] == BLOCKED:
fig += ' / '
elif self.perco[i, j] == OPEN:
fig += 'o / '
n_open += 1
else:
fig += 'x / '
n_open += 1
fig += str('\n') + str('+---')*self.perco.shape[1] + '+\n/ '
return fig[:-2] + 'grade de dimensão: '+ str(self.perco.shape[0]) + 'x' + str(self.perco.shape[1]) + '\nNúmero de sítios abertos: ' + str(n_open)+'\npercolou: '+ str(perco(self))
def is_open(self, int1, int2):
if (int1, int2) >= (0,0) and len(self.perco) - int1 >= 0 and len(self.perco[0]) - int2 >= 0:
if self.perco[int1, int2] == OPEN or self.perco[int1, int2] == FULL:
return True
else:
return False
else:
print('Valor inserido invalido.')
return None
def is_full(self, int1, int2):
if (int1, int2) >= (0,0) and len(self.perco) - int1 >= 0 and len(self.perco[0]) - int2 >= 0:
if self.perco[int1, int2] == 2:
return True
else:
return False
else:
print('Valor inserido invalido.')
return None
def no_open(self):
n_int = 0
for i in range(0, self.shape[0]):
for j in range(0, self.shape[1]):
if self.perco[i,j] == OPEN or self.perco[i, j] == FULL:
n_int += 1
return n_int
def get_grid(self):
return np.copy(self.perco)
def open(self, int1, int2):
if (int1+1, int2+1) > self.shape or int1<0 or int2<0:
print('A posição: [{},{}] está fora da grade.'.format(int1,int2))
return None
if self.perco[int1, int2] == BLOCKED:
self.perco[int1, int2] = OPEN
if int1 == 0:
self.perco[int1, int2] = FULL
look(self, [int1, int2])
def __setitem__(self, instance, value):
self.perco[instance] = value
def percolates(self):
return perco(self)
def perco(matrix):
for i in range(matrix.shape[1]):
if matrix.perco[matrix.shape[0]-1, i] == FULL:
return True
return False
def look(matrix, pos):
q = []
q.append(pos)
p = []
while not q == []:
i = q.pop()[:]
p.append(i)
if i[0] + 2 <= matrix.shape[0] and matrix.is_open(i[0]+1, i[1]) == True:
if matrix.is_full(i[0]+1, i[1]) == True:
matrix[i[0], i[1]] = FULL
if [i[0]+1, i[1]] not in p:
q.append([i[0]+1, i[1]])
p.append([i[0]+1, i[1]])
if i[0] > 0 and matrix.is_open(i[0]-1, i[1]) == True:
if matrix.is_full(i[0]-1, i[1]) == True:
matrix[i[0], i[1]] = FULL
if [i[0]-1, i[1]] not in p:
q.append([i[0]-1, i[1]])
p.append([i[0]-1, i[1]])
if i[1] + 2 <= matrix.shape[1] and matrix.is_open(i[0], i[1]+1) == True:
if matrix.is_full(i[0], i[1]+1) == True:
matrix[i[0], i[1]] = FULL
if [i[0], i[1]+1] not in p:
q.append([i[0], i[1]+1])
p.append([i[0], i[1]+1])
if i[1] > 0 and matrix.is_open(i[0], i[1]-1) == True:
if matrix.is_full(i[0], i[1]-1) == True:
matrix[i[0], i[1]] = FULL
if [i[0], i[1]-1] not in p:
q.append([i[0], i[1]-1])
p.append([i[0], i[1]-1])
'''
def look(matrix, pos): #array, tuple
if matrix.perco[pos[0], pos[1]] == OPEN:
if pos[0] == 0:
matrix[pos[0], pos[1]] = FULL
if pos[0] > 0: #olha de baixo p/ cima
if matrix.is_open(pos[0]-1, pos[1]) == True:
if matrix.is_full(pos[0]-1, pos[1]) == True:
matrix[pos[0], pos[1]] = FULL
look(matrix, (pos[0]-1, pos[1]))
if pos[1] + 2 <= matrix.shape[1]: #olha da esq p/ dir
if matrix.is_open(pos[0], pos[1]+1) == True:
if matrix.is_full(pos[0], pos[1]+1) == True:
matrix[pos[0], pos[1]] = FULL #TEM UM ELSE: AQUI EM BAIXO???
look(matrix, (pos[0], pos[1]+1))
if pos[1] > 0: #olha da dir p/ esq
if matrix.is_open(pos[0], pos[1]-1) == True:
if matrix.is_full(pos[0], pos[1]-1) == True:
matrix[pos[0], pos[1]] = FULL
look(matrix, (pos[0], pos[1]-1))
'''
| 32.391489 | 186 | 0.485155 |
e782bd8739b6d9d18e92766068ccef718f1c3cc0 | 16,672 | py | Python | google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py | dwkk-google/python-aiplatform | 016415122f93029cdd7703b6dfd6d9a431d1d571 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py | dwkk-google/python-aiplatform | 016415122f93029cdd7703b6dfd6d9a431d1d571 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py | dwkk-google/python-aiplatform | 016415122f93029cdd7703b6dfd6d9a431d1d571 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.aiplatform_v1beta1.types import index
from google.cloud.aiplatform_v1beta1.types import index_service
from google.longrunning import operations_pb2 # type: ignore
from .base import IndexServiceTransport, DEFAULT_CLIENT_INFO
class IndexServiceGrpcTransport(IndexServiceTransport):
"""gRPC backend transport for IndexService.
A service for creating and managing Vertex AI's Index
resources.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def create_index(
self,
) -> Callable[[index_service.CreateIndexRequest], operations_pb2.Operation]:
r"""Return a callable for the create index method over gRPC.
Creates an Index.
Returns:
Callable[[~.CreateIndexRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_index" not in self._stubs:
self._stubs["create_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexService/CreateIndex",
request_serializer=index_service.CreateIndexRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_index"]
@property
def get_index(self) -> Callable[[index_service.GetIndexRequest], index.Index]:
r"""Return a callable for the get index method over gRPC.
Gets an Index.
Returns:
Callable[[~.GetIndexRequest],
~.Index]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_index" not in self._stubs:
self._stubs["get_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexService/GetIndex",
request_serializer=index_service.GetIndexRequest.serialize,
response_deserializer=index.Index.deserialize,
)
return self._stubs["get_index"]
@property
def list_indexes(
self,
) -> Callable[
[index_service.ListIndexesRequest], index_service.ListIndexesResponse
]:
r"""Return a callable for the list indexes method over gRPC.
Lists Indexes in a Location.
Returns:
Callable[[~.ListIndexesRequest],
~.ListIndexesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_indexes" not in self._stubs:
self._stubs["list_indexes"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexService/ListIndexes",
request_serializer=index_service.ListIndexesRequest.serialize,
response_deserializer=index_service.ListIndexesResponse.deserialize,
)
return self._stubs["list_indexes"]
@property
def update_index(
self,
) -> Callable[[index_service.UpdateIndexRequest], operations_pb2.Operation]:
r"""Return a callable for the update index method over gRPC.
Updates an Index.
Returns:
Callable[[~.UpdateIndexRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_index" not in self._stubs:
self._stubs["update_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexService/UpdateIndex",
request_serializer=index_service.UpdateIndexRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_index"]
@property
def delete_index(
self,
) -> Callable[[index_service.DeleteIndexRequest], operations_pb2.Operation]:
r"""Return a callable for the delete index method over gRPC.
Deletes an Index. An Index can only be deleted when all its
[DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes]
had been undeployed.
Returns:
Callable[[~.DeleteIndexRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_index" not in self._stubs:
self._stubs["delete_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexService/DeleteIndex",
request_serializer=index_service.DeleteIndexRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_index"]
def close(self):
self.grpc_channel.close()
__all__ = ("IndexServiceGrpcTransport",)
| 43.080103 | 87 | 0.633937 |
1a32a0f2ce07e26dbe1a452582baa1551b35f990 | 5,353 | py | Python | blobrl/agents/categorical_dqn.py | garaytc/reinforcement | e6af258bf2ac3b45c20e0ed3d2f58ca7bc2b232f | [
"Apache-2.0"
] | 12 | 2020-05-19T18:58:55.000Z | 2021-02-21T20:26:46.000Z | blobrl/agents/categorical_dqn.py | garaytc/reinforcement | e6af258bf2ac3b45c20e0ed3d2f58ca7bc2b232f | [
"Apache-2.0"
] | 39 | 2020-05-19T18:41:42.000Z | 2021-01-16T08:31:06.000Z | blobrl/agents/categorical_dqn.py | garaytc/reinforcement | e6af258bf2ac3b45c20e0ed3d2f58ca7bc2b232f | [
"Apache-2.0"
] | 2 | 2020-05-19T15:15:04.000Z | 2020-05-21T08:45:59.000Z | import torch
import torch.nn.functional as F
import torch.optim as optim
from gym.spaces import flatten
from blobrl.agents import DQN
from blobrl.memories import ExperienceReplay
from blobrl.networks import C51Network
class CategoricalDQN(DQN):
def __init__(self, observation_space, action_space, memory=ExperienceReplay(), network=None, num_atoms=51,
r_min=-10, r_max=10, step_train=1, batch_size=32, gamma=1.0,
optimizer=None, greedy_exploration=None, device=None):
"""
:param device: torch device to run agent
:type: torch.device
:param action_space:
:param observation_space:
:param memory:
:param network:
:param num_atoms:
:param r_min:
:param r_max:
:param step_train:
:param batch_size:
:param gamma:
:param optimizer:
:param greedy_exploration:
"""
if network is None and optimizer is None:
network = C51Network(observation_space=observation_space,
action_space=action_space)
num_atoms = 51
optimizer = optim.Adam(network.parameters())
super().__init__(observation_space=observation_space, action_space=action_space, memory=memory,
network=network, step_train=step_train, batch_size=batch_size, gamma=gamma,
loss=None, optimizer=optimizer, greedy_exploration=greedy_exploration, device=device)
self.num_atoms = num_atoms
self.r_min = r_min
self.r_max = r_max
self.delta_z = (r_max - r_min) / float(num_atoms - 1)
self.z = torch.tensor([r_min + i * self.delta_z for i in range(num_atoms)], device=self.device)
def get_action(self, observation):
""" Return action choice by the agents
:param observation: stat of environment
:type observation: gym.Space
"""
if not self.greedy_exploration.be_greedy(self.step) and self.with_exploration:
return self.action_space.sample()
observation = torch.tensor([flatten(self.observation_space, observation)], device=self.device).float()
prediction = self.network.forward(observation)
def return_values(values):
if isinstance(values, list):
return [return_values(v) for v in values]
q_values = values * self.z
q_values = torch.sum(q_values, dim=2)
return torch.argmax(q_values).detach().item()
return return_values(prediction)
def apply_loss(self, next_prediction, prediction, actions, rewards, next_observations, dones, len_space):
if isinstance(next_prediction, list):
[self.apply_loss(n, p, a, rewards, next_observations, dones, c) for n, p, a, c in
zip(next_prediction, prediction, actions.permute(1, 0, *[i for i in range(2, len(actions.shape))]),
len_space)]
else:
q_values_next = next_prediction * self.z
q_values_next = torch.sum(q_values_next, dim=2)
actions = F.one_hot(actions.long(), num_classes=len_space)
actions_next = torch.argmax(q_values_next, dim=1)
actions_next = F.one_hot(actions_next, num_classes=len_space)
dones = dones.view(-1, 1)
tz = rewards.view(-1, 1) + self.gamma * self.z * (1 - dones)
tz = tz.clamp(self.r_min, self.r_max)
b = (tz - self.r_min) / self.delta_z
l, u = b.floor().to(torch.int64), b.ceil().to(torch.int64)
l[(u > 0) * (l == u)] -= 1
u[(l < (self.num_atoms - 1)) * (l == u)] += 1
m_prob = torch.zeros((self.batch_size, len_space, self.num_atoms), device=self.device)
predictions_next = next_prediction[actions_next == 1, :]
offset = torch.linspace(0, (self.batch_size - 1) * self.num_atoms, self.batch_size,
device=self.device).view(-1,
1)
offset = offset.expand(self.batch_size, self.num_atoms)
u_index = (u + offset).view(-1).to(torch.int64)
l_index = (l + offset).view(-1).to(torch.int64)
predictions_next = (dones + (1 - dones) * predictions_next)
m_prob_action = m_prob[actions == 1, :].view(-1)
m_prob_action.index_add_(0, u_index, (predictions_next * (u - b)).view(-1))
m_prob_action.index_add_(0, l_index, (predictions_next * (b - l)).view(-1))
m_prob[actions == 1, :] = m_prob_action.view(-1, self.num_atoms)
self.optimizer.zero_grad()
loss = - prediction.log() * m_prob
loss.sum((1, 2)).mean().backward(retain_graph=True)
def __str__(self):
return 'CategoricalDQN-' + str(self.observation_space) + "-" + str(self.action_space) + "-" + str(
self.network) + "-" + str(self.memory) + "-" + str(self.step_train) + "-" + str(
self.step) + "-" + str(self.batch_size) + "-" + str(self.gamma) + "-" + str(self.loss) + "-" + str(
self.optimizer) + "-" + str(self.greedy_exploration) + "-" + str(self.num_atoms) + "-" + str(
self.r_min) + "-" + str(self.r_max) + "-" + str(self.delta_z) + "-" + str(self.z)
| 40.862595 | 112 | 0.592004 |
600fa789512d9b2c1938a6bea93d8e4f9bd2e81a | 3,353 | py | Python | cbf_item_rec.py | tmscarla/polimi-recsys-2018 | 48f97e81b5a8353271ba73797376fdd84cb70af4 | [
"Apache-2.0"
] | 6 | 2019-02-15T12:22:23.000Z | 2020-10-27T14:29:25.000Z | cbf_item_rec.py | tmscarla/polimi-recsys-2018 | 48f97e81b5a8353271ba73797376fdd84cb70af4 | [
"Apache-2.0"
] | null | null | null | cbf_item_rec.py | tmscarla/polimi-recsys-2018 | 48f97e81b5a8353271ba73797376fdd84cb70af4 | [
"Apache-2.0"
] | 1 | 2021-12-03T21:29:17.000Z | 2021-12-03T21:29:17.000Z | import similaripy as sim
import scipy.sparse as sps
from evaluator import Evaluator
import numpy as np
import pandas as pd
from tqdm import tqdm
class CBFRecommender(object):
"""
A random recommender. It recommends 10 random tracks for each playlist.
"""
def __init__(self, datareader):
self.datareader = datareader
self.prediction = []
def __str__(self):
return "CBFRec"
def fit(self, mode="cosine", al_id=True, ar_id=True, top_k=100):
self.urm = self.datareader.get_urm()
# self.icm = self.datareader.get_icm(alid=al_id, arid=ar_id)
self.icm = self.datareader.get_icm(alid=True, arid=False)
self.icm -= 0.45*self.datareader.get_icm(alid=False, arid=True)
# Train the model
print("["+mode+"]")
if mode == "cosine":
self.model = sim.cosine(self.icm,
k=top_k,
verbose=True)
elif mode == "as_cosine":
self. model = sim.asymmetric_cosine(self.icm,
alpha=0.7,
k=top_k,
verbose=True)
elif mode == "dot":
self. model = sim.dot_product(self.icm,
k=top_k,
verbose=True)
def recommend(self, remove_seed=True):
"""
Compute a single recommendation for a target playlist.
:param remove_seed: removed seed tracks
:return: recommended_tracks or recommended_tracks_uri
"""
# Compute user recommendations
user_recommendations = sim.dot_product(self.urm,
self.model,
target_rows=list(self.datareader.target_playlists),
k=100,
verbose=False)
# Recommend random tracks
for t in self.datareader.target_playlists:
scores = user_recommendations[t].toarray()[0]
tracks = scores.argsort()[-100:][::-1]
if remove_seed:
hold_ix = ~np.in1d(tracks, self.urm.indices[self.urm.indptr[t]:self.urm.indptr[t+1]])
recommended_tracks = tracks[hold_ix]
recommended_tracks = recommended_tracks[0:10]
recommended_tracks_str = ' '.join([str(i) for i in recommended_tracks])
self.prediction.append([t, recommended_tracks_str])
else:
recommended_tracks_str = ' '.join([str(i) for i in tracks[:10]])
self.prediction.append([t, recommended_tracks_str])
# Save CSV
df = pd.DataFrame(self.prediction, columns=['playlist_id', 'track_ids'])
df.to_csv(str(self) + '.csv', sep=',', index=False)
if __name__ == '__main__':
from datareader import Datareader
dr = Datareader()
rec = CBFRecommender(dr)
rec.fit(mode="as_cosine", al_id=True, ar_id=True, top_k=50)
rec.recommend()
ev = Evaluator()
prova_da_valutare = pd.read_csv(str(rec) + '.csv')
dict_tua_sol = ev.csv_to_dict(prova_da_valutare)
print(ev.evaluate_dict( dict_tua_sol ))
| 32.240385 | 101 | 0.539219 |
fcc558e6af63886984baedefdb584ee6bf1f945b | 547 | py | Python | packages/python/plotly/plotly/validators/sunburst/marker/_coloraxis.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/sunburst/marker/_coloraxis.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/sunburst/marker/_coloraxis.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class ColoraxisValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(
self, plotly_name="coloraxis", parent_name="sunburst.marker", **kwargs
):
super(ColoraxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", None),
edit_type=kwargs.pop("edit_type", "calc"),
regex=kwargs.pop("regex", "/^coloraxis([2-9]|[1-9][0-9]+)?$/"),
**kwargs,
)
| 34.1875 | 78 | 0.617916 |
2056e1196459c8c0b994eb30f92729d850c3fbcb | 8,945 | py | Python | application/analytics_project/settings.py | rendex85/analytics_backend-1 | 02ac45bde8754491795c99fbddcfc249d64a2fd3 | [
"MIT"
] | null | null | null | application/analytics_project/settings.py | rendex85/analytics_backend-1 | 02ac45bde8754491795c99fbddcfc249d64a2fd3 | [
"MIT"
] | null | null | null | application/analytics_project/settings.py | rendex85/analytics_backend-1 | 02ac45bde8754491795c99fbddcfc249d64a2fd3 | [
"MIT"
] | null | null | null | """
Django settings for analytics_project project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env = environ.Env()
environ.Env.read_env() # reading .env file
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env('DEBUG')
DATA_UPLOAD_MAX_NUMBER_FIELDS = 100000
#ALLOWED_HOSTS = ['94.250.249.177', '94.250.249.177:8000', 'localhost', '127.0.0.1']
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'dataprocessing',
'django_summernote',
'rest_framework.authtoken',
'rest_framework_simplejwt',
'djoser',
'corsheaders',
'crispy_forms',
'workprogramsapp',
'django_tables2',
'django_filters',
'bootstrap_pagination',
'rest_framework_swagger',
'onlinecourse',
'records',
'gia_practice_app',
#'django_extensions',
# 'ckeditor',
# 'ckeditor_uploader',
#'oauth2_provider',
#'social_django',
#'rest_framework_social_oauth2',
# 'social_auth',
# 'social_django', # django social auth
# 'rest_social_auth', # this package
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#'social_django.middleware.SocialAuthExceptionMiddleware',
#'django.middleware.common.BrokenLinkEmailsMiddleware',
#'django.middleware.common.CommonMiddleware',
#'dataprocessing.CorsMiddleware',
]
# MIDDLEWARE_CLASSES = [
# 'dataprocessing.CorsMiddleware',
# ]
ROOT_URLCONF = 'analytics_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
#'social_django.context_processors.backends',
#'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'analytics_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {'default': env.db('DATABASE_URL')}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static-backend/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static-backend')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
AUTH_USER_MODEL = 'dataprocessing.User'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAdminUser',
'rest_framework.permissions.AllowAny',
),
'PAGE_SIZE': 10,
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'EXCEPTION_HANDLER': 'rest_framework_json_api.exceptions.exception_handler',
'DEFAULT_PAGINATION_CLASS':
'rest_framework_json_api.pagination.PageNumberPagination',
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
# 'rest_framework_json_api.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
# 'rest_framework_json_api.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_METADATA_CLASS': 'rest_framework_json_api.metadata.JSONAPIMetadata',
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema',
'DEFAULT_FILTER_BACKENDS': ['django_filters.rest_framework.DjangoFilterBackend']
}
from datetime import timedelta
SIMPLE_JWT = {
# 'AUTH_HEADER_TYPES': ('JWT',),
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=480),
'REFRESH_TOKEN_LIFETIME': timedelta(days=7),
}
AUTHENTICATION_BACKENDS = [
#'social_core.backends.github.GithubOAuth2',
'dataprocessing.social_auth_backend.FiwareAuth',
'social_core.backends.facebook.FacebookOAuth2',
'dataprocessing.itmo_backends.ItmoOAuth2',
'django.contrib.auth.backends.ModelBackend'
]
# SOCIAL_AUTH_ITMOOAUTH2_KEY = ''
# SOCIAL_AUTH_ITMOOAUTH2_SECRET = ''
# CLIENT = 'nexoVnlgoNJnTuZ3CNBcbHgayXmhRjJUYfOb'
# SECRET = 'GV4SDAMfv5pgE3jzblcW7HUcND5pywqQL4be'
#
# SOCIAL_AUTH_AUTH0_DOMAIN = os.getenv("SOCIAL_AUTH_AUTH0_DOMAIN")
# SOCIAL_AUTH_AUTH0_KEY = os.getenv("SOCIAL_AUTH_AUTH0_KEY")
# SOCIAL_AUTH_AUTH0_SECRET = os.getenv("SOCIAL_AUTH_AUTH0_SECRET")
# FIWARE_APP_ID = ''
# FIWARE_API_SECRET = ''
# FIWARE_IDM_ENDPOINT = 'https://login.itmo.ru/cas/oauth2.0/authorize'
#
# FIWARE_IDM_API_VERSION = 2
# FIWARE_KEYSTONE_ENDPOINT = 'http://cloud.lab.fiware.org:4731'
#
# SOCIAL_AUTH_ENABLED_BACKENDS = ('fiware',)
# SOCIAL_AUTH_RAISE_EXCEPTIONS = False
AUTHENTICATION_BACKENDS = [
#'social_core.backends.github.GithubOAuth2',
# 'dataprocessing.social_auth_backend.FiwareAuth',
# 'social_core.backends.facebook.FacebookOAuth2',
# 'dataprocessing.itmo_backends.ItmoOAuth2',
'django.contrib.auth.backends.ModelBackend'
]
CORS_ORIGIN_ALLOW_ALL = True
# #CORS_ALLOW_CREDENTIALS = True
# SESSION_COOKIE_SAMESITE = False
# CORS_ORIGIN_WHITELIST = [
# 'http://localhost:8080',
# ]
# CORS_ORIGIN_REGEX_WHITELIST = [
# 'http://localhost:8080',
# ]
DJOSER = {
'PASSWORD_RESET_CONFIRM_URL': '#/password/reset/confirm/{uid}/{token}',
'USERNAME_RESET_CONFIRM_URL': '#/username/reset/confirm/{uid}/{token}',
#'ACTIVATION_URL': '#/activate/{uid}/{token}',
#'SEND_ACTIVATION_EMAIL': True,
#'SERIALIZERS': {},
'SET_USERNAME_RETYPE': True,
'SERIALIZERS': {
#'user': 'dataprocessing.serializers.UserSerializer',
#'current_user': 'dataprocessing.serializers.UserSerializer',
#'user_create': 'dataprocessing.serializers.UserSerializer',
},
}
AUTH_USER_MODEL = 'dataprocessing.User'
SWAGGER_SETTINGS = {
'SECURITY_DEFINITIONS': {
'api_key': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization'
}
},
}
ISU = {
"ISU_CLIENT_ID": env('ISU_CLIENT_ID'),
"ISU_CLIENT_SECRET": env('ISU_CLIENT_SECRET'),
"ISU_REDIRECT_URI": env('ISU_REDIRECT_URI'),
"ISU_FINISH_URI": env('ISU_FINISH_URI_WITH_PROTOCOL'),
}
BARS = {
"BARS_LOGIN": env('BARS_LOGIN'),
"BARS_PASSWORD": env('BARS_PASSWORD'),
"BARS_URL":env('BARS_URL'),
}
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST_USER = env("EMAIL_HOST_USER")
EMAIL_HOST = env("EMAIL_HOST")
EMAIL_PORT = env("EMAIL_PORT")
EMAIL_USE_TLS = env("EMAIL_USE_TLS")
EMAIL_HOST_PASSWORD = env("EMAIL_HOST_PASSWORD")
SERVER_EMAIL = EMAIL_HOST_USER
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER | 29.916388 | 91 | 0.7109 |
450a340b74e753fbaa2b39d8aa34bab8eba52f6a | 25,125 | py | Python | tests/sphinx/test_base.py | dgehringer/pyiron_atomistics | f8f2d573a483e802c8e5840998a0769378b95e31 | [
"BSD-3-Clause"
] | null | null | null | tests/sphinx/test_base.py | dgehringer/pyiron_atomistics | f8f2d573a483e802c8e5840998a0769378b95e31 | [
"BSD-3-Clause"
] | 1 | 2022-02-24T09:51:10.000Z | 2022-02-24T09:51:10.000Z | tests/sphinx/test_base.py | srmnitc/pyiron_atomistics | 2c8052b082f2c4fb6f6291ac2b1f801ea7ab1567 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import os
import numpy as np
import unittest
import warnings
import scipy.constants
from pyiron_atomistics.project import Project
from pyiron_atomistics.atomistics.structure.periodic_table import PeriodicTable
from pyiron_atomistics.atomistics.structure.atoms import Atoms
from pyiron_atomistics.sphinx.base import Group
BOHR_TO_ANGSTROM = (
scipy.constants.physical_constants["Bohr radius"][0] / scipy.constants.angstrom
)
HARTREE_TO_EV = scipy.constants.physical_constants["Hartree energy in eV"][0]
HARTREE_OVER_BOHR_TO_EV_OVER_ANGSTROM = HARTREE_TO_EV / BOHR_TO_ANGSTROM
class TestSphinx(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.file_location = os.path.dirname(os.path.abspath(__file__))
cls.project = Project(os.path.join(cls.file_location, "../static/sphinx"))
pt = PeriodicTable()
pt.add_element(parent_element="Fe", new_element="Fe_up", spin="0.5")
Fe_up = pt.element("Fe_up")
cls.basis = Atoms(
elements=[Fe_up, Fe_up],
scaled_positions=[[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]],
cell=2.6 * np.eye(3),
)
cls.sphinx = cls.project.create_job("Sphinx", "job_sphinx")
cls.sphinx_band_structure = cls.project.create_job("Sphinx", "sphinx_test_bs")
cls.sphinx_2_3 = cls.project.create_job("Sphinx", "sphinx_test_2_3")
cls.sphinx_2_5 = cls.project.create_job("Sphinx", "sphinx_test_2_5")
cls.sphinx_aborted = cls.project.create_job("Sphinx", "sphinx_test_aborted")
cls.sphinx.structure = cls.basis
cls.sphinx.fix_spin_constraint = True
cls.sphinx_band_structure.structure = cls.project.create_structure("Fe", "bcc", 2.81)
cls.sphinx_band_structure.structure = cls.sphinx_band_structure.structure.create_line_mode_structure()
cls.sphinx_2_3.structure = Atoms(
elements=["Fe", "Fe"],
scaled_positions=[[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]],
cell=2.6 * np.eye(3),
)
cls.sphinx_2_5.structure = Atoms(
elements=["Fe", "Ni"],
scaled_positions=[[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]],
cell=2.83 * np.eye(3),
)
cls.sphinx_2_5.structure.set_initial_magnetic_moments([2,2])
cls.sphinx_aborted.structure = Atoms(
elements=32 * ["Fe"],
scaled_positions=np.arange(32 * 3).reshape(-1, 3) / (32 * 3),
cell=3.5 * np.eye(3),
)
cls.sphinx_aborted.status.aborted = True
cls.current_dir = os.path.abspath(os.getcwd())
cls.sphinx._create_working_directory()
cls.sphinx_2_3._create_working_directory()
cls.sphinx.input["VaspPot"] = False
cls.sphinx.structure.add_tag(selective_dynamics=(True, True, True))
cls.sphinx.structure.selective_dynamics[1] = (False, False, False)
cls.sphinx.load_default_groups()
cls.sphinx.fix_symmetry = False
cls.sphinx.write_input()
cls.sphinx_2_3.to_hdf()
cls.sphinx_2_3.decompress()
cls.sphinx_2_5.decompress()
cls.sphinx_2_5.collect_output()
@classmethod
def tearDownClass(cls):
cls.sphinx_2_3.decompress()
cls.sphinx_2_5.decompress()
cls.file_location = os.path.dirname(os.path.abspath(__file__))
os.remove(
os.path.join(
cls.file_location,
"../static/sphinx/job_sphinx_hdf5/job_sphinx/input.sx",
)
)
os.remove(
os.path.join(
cls.file_location,
"../static/sphinx/job_sphinx_hdf5/job_sphinx/spins.in",
)
)
os.remove(
os.path.join(
cls.file_location,
"../static/sphinx/job_sphinx_hdf5/job_sphinx/Fe_GGA.atomicdata",
)
)
os.rmdir(
os.path.join(
cls.file_location, "../static/sphinx/job_sphinx_hdf5/job_sphinx"
)
)
os.rmdir(os.path.join(cls.file_location, "../static/sphinx/job_sphinx_hdf5"))
os.remove(
os.path.join(cls.file_location, "../static/sphinx/sphinx_test_2_3.h5")
)
def test_id_pyi_to_spx(self):
self.assertEqual(len(self.sphinx.id_pyi_to_spx), len(self.sphinx.structure))
self.assertEqual(len(self.sphinx.id_spx_to_pyi), len(self.sphinx.structure))
def test_potential(self):
self.assertEqual([], self.sphinx.list_potentials())
self.assertEqual(['Fe_GGA'], self.sphinx_2_3.list_potentials())
self.assertEqual(['Fe_GGA'], self.sphinx_2_5.list_potentials())
self.sphinx_2_3.potential.Fe = 'Fe_GGA'
self.sphinx_2_5.potential["Fe"] = 'Fe_GGA'
self.assertEqual('Fe_GGA', list(self.sphinx_2_3.potential.to_dict().values())[0])
self.assertEqual('Fe_GGA', list(self.sphinx_2_5.potential.to_dict().values())[0])
def test_write_input(self):
file_content = [
'//job_sphinx\n',
'//SPHInX input file generated by pyiron\n',
'\n',
'format paw;\n',
'include <parameters.sx>;\n',
'\n',
'pawPot {\n',
'\tspecies {\n',
'\t\tname = "Fe";\n',
'\t\tpotType = "AtomPAW";\n',
'\t\telement = "Fe";\n',
'\t\tpotential = "Fe_GGA.atomicdata";\n',
'\t}\n',
'}\n',
'structure {\n',
'\tcell = [[4.913287924027003, 0.0, 0.0], [0.0, 4.913287924027003, 0.0], [0.0, 0.0, 4.913287924027003]];\n',
'\tspecies {\n',
'\t\telement = "Fe";\n',
'\t\tatom {\n',
'\t\t\tlabel = "spin_0.5";\n',
'\t\t\tcoords = [0.0, 0.0, 0.0];\n',
'\t\t\tmovable;\n',
'\t\t}\n',
'\t\tatom {\n',
'\t\t\tlabel = "spin_0.5";\n',
'\t\t\tcoords = [2.4566439620135014, 2.4566439620135014, 2.4566439620135014];\n',
'\t\t}\n',
'\t}\n',
'\tsymmetry {\n',
'\t\toperator {\n',
'\t\t\tS = [[1,0,0],[0,1,0],[0,0,1]];\n',
'\t\t}\n',
'\t}\n',
'}\n',
'basis {\n',
'\teCut = 24.989539079445393;\n',
'\tkPoint {\n',
'\t\tcoords = [0.5, 0.5, 0.5];\n',
'\t\tweight = 1;\n',
'\t\trelative;\n',
'\t}\n',
'\tfolding = [4, 4, 4];\n',
'\tsaveMemory;\n',
'}\n',
'PAWHamiltonian {\n',
'\tnEmptyStates = 6;\n',
'\tekt = 0.2;\n',
'\tMethfesselPaxton = 1;\n',
'\txc = PBE;\n',
'\tspinPolarized;\n',
'}\n',
'initialGuess {\n',
'\twaves {\n',
'\t\tlcao {}\n',
'\t\tpawBasis;\n',
'\t}\n',
'\trho {\n',
'\t\tatomicOrbitals;\n',
'\t\tatomicSpin {\n',
'\t\t\tlabel = "spin_0.5";\n',
'\t\t\tspin = 0.5;\n',
'\t\t}\n',
'\t\tatomicSpin {\n',
'\t\t\tlabel = "spin_0.5";\n',
'\t\t\tspin = 0.5;\n',
'\t\t}\n',
'\t}\n',
'\tnoWavesStorage = false;\n',
'}\n',
'main {\n',
'\tscfDiag {\n',
'\t\trhoMixing = 1.0;\n',
'\t\tspinMixing = 1.0;\n',
'\t\tdEnergy = 3.674932217565499e-06;\n',
'\t\tmaxSteps = 100;\n',
'\t\tpreconditioner {\n',
'\t\t\ttype = KERKER;\n',
'\t\t\tscaling = 1.0;\n',
'\t\t\tspinScaling = 1.0;\n',
'\t\t}\n',
'\t\tblockCCG {}\n',
'\t}\n',
'\tevalForces {\n',
'\t\tfile = "relaxHist.sx";\n',
'\t}\n',
'}\n',
'spinConstraint {\n',
'\tfile = "spins.in";\n',
'}\n'
]
file_name = os.path.join(
self.file_location, "../static/sphinx/job_sphinx_hdf5/job_sphinx/input.sx"
)
with open(file_name) as input_sx:
lines = input_sx.readlines()
self.assertEqual(file_content, lines)
def test_plane_wave_cutoff(self):
with self.assertRaises(ValueError):
self.sphinx.plane_wave_cutoff = -1
with warnings.catch_warnings(record=True) as w:
self.sphinx.plane_wave_cutoff = 25
self.assertEqual(len(w), 1)
self.sphinx.plane_wave_cutoff = 340
self.assertEqual(self.sphinx.plane_wave_cutoff, 340)
def test_set_kpoints(self):
mesh = [2, 3, 4]
center_shift = [0.1, 0.1, 0.1]
trace = {"my_path": [("GAMMA", "H"), ("H", "N"), ("P", "H")]}
kpoints_group = Group({
'relative': True,
'from': {
'coords': np.array([0.0, 0.0, 0.0]),
'label': '"GAMMA"'
},
'to': [
{'coords': np.array([0.5, -0.5, 0.5]),
'nPoints': 20, 'label': '"H"'},
{'coords': np.array([0.0, 0.0, 0.5]),
'nPoints': 20, 'label': '"N"'},
{'coords': np.array([0.25, 0.25, 0.25]),
'nPoints': 0, 'label': '"P"'},
{'coords': np.array([0.5, -0.5, 0.5]),
'nPoints': 20, 'label': '"H"'},
]
})
with self.assertRaises(ValueError):
self.sphinx_band_structure.set_kpoints(symmetry_reduction="pyiron rules!")
with self.assertRaises(ValueError):
self.sphinx_band_structure.set_kpoints(scheme="no valid scheme")
with self.assertRaises(ValueError):
self.sphinx_band_structure.set_kpoints(scheme="Line", path_name="my_path")
self.sphinx_band_structure.structure.add_high_symmetry_path(trace)
with self.assertRaises(ValueError):
self.sphinx_band_structure.set_kpoints(scheme="Line", n_path=20)
with self.assertRaises(AssertionError):
self.sphinx_band_structure.set_kpoints(scheme="Line", path_name="wrong name", n_path=20)
self.sphinx_band_structure.set_kpoints(scheme="Line", path_name="my_path", n_path=20)
self.assertTrue("kPoint" not in self.sphinx_band_structure.input.sphinx.basis)
self.assertEqual(self.sphinx_band_structure.input.sphinx.to_sphinx(kpoints_group),
self.sphinx_band_structure.input.sphinx.basis.kPoints.to_sphinx())
self.sphinx_band_structure.set_kpoints(scheme="MP", mesh=mesh, center_shift=center_shift)
self.assertTrue("kPoints" not in self.sphinx_band_structure.input.sphinx.basis)
self.assertEqual(self.sphinx_band_structure.input.KpointFolding, mesh)
self.assertEqual(self.sphinx_band_structure.input.KpointCoords, center_shift)
self.assertEqual(self.sphinx_band_structure.get_k_mesh_by_cell(2 * np.pi / 2.81), [1, 1, 1])
def test_set_empty_states(self):
with self.assertRaises(ValueError):
self.sphinx.set_empty_states(-1)
self.sphinx.set_empty_states(666)
self.assertEqual(self.sphinx.input["EmptyStates"], 666)
self.sphinx.set_empty_states()
self.assertEqual(self.sphinx.input["EmptyStates"], "auto")
def test_fix_spin_constraint(self):
self.assertTrue(self.sphinx.fix_spin_constraint)
with self.assertRaises(ValueError):
self.sphinx.fix_spin_constraint = 3
self.sphinx.fix_spin_constraint = False
self.assertIsInstance(self.sphinx.fix_spin_constraint, bool)
def test_calc_static(self):
self.sphinx.calc_static(algorithm="wrong_algorithm")
self.assertFalse(
"keepRho"
in self.sphinx.input.sphinx.main.to_sphinx()
)
self.assertTrue(
"blockCCG"
in self.sphinx.input.sphinx.main.to_sphinx()
)
self.sphinx.restart_file_list.append("randomfile")
self.sphinx.calc_static(algorithm="ccg")
self.assertTrue(
"keepRho"
in self.sphinx.input.sphinx.main.to_sphinx()
)
self.assertEqual(self.sphinx.input["Estep"], 100)
self.assertTrue(
"CCG"
in self.sphinx.input.sphinx.main.to_sphinx()
)
def test_calc_minimize(self):
self.sphinx.calc_minimize(electronic_steps=100, ionic_steps=50)
self.assertEqual(self.sphinx.input["Estep"], 100)
self.assertEqual(self.sphinx.input["Istep"], 50)
self.assertEqual(self.sphinx.input.sphinx.main['ricQN']['maxSteps'], '50')
def test_get_scf_group(self):
with warnings.catch_warnings(record=True) as w:
test_scf = self.sphinx_band_structure.get_scf_group(algorithm="wrong")
self.assertEqual(len(w), 1)
ref_scf = {
'rhoMixing': '1.0',
'spinMixing': '1.0',
'dEnergy': 3.674932217565499e-06,
'preconditioner': {'type': 'KERKER', 'scaling': 1.0, 'spinScaling': 1.0},
'maxSteps': '100',
'blockCCG': {}}
self.assertEqual(test_scf, ref_scf)
ref_scf = {
'rhoMixing': '1.0',
'spinMixing': '1.0',
'nPulaySteps': '0',
'dEnergy': 3.674932217565499e-06,
'maxSteps': '100',
'preconditioner': {'type': 0},
'blockCCG': {
'maxStepsCCG': 0,
'blockSize': 0,
'nSloppy': 0},
'noWavesStorage': True
}
self.sphinx_band_structure.input["nPulaySteps"] = 0
self.sphinx_band_structure.input["preconditioner"] = 0
self.sphinx_band_structure.input["maxStepsCCG"] = 0
self.sphinx_band_structure.input["blockSize"] = 0
self.sphinx_band_structure.input["nSloppy"] = 0
self.sphinx_band_structure.input["WriteWaves"] = False
test_scf = self.sphinx_band_structure.get_scf_group()
self.assertEqual(test_scf, ref_scf)
def test_check_setup(self):
self.assertFalse(self.sphinx.check_setup())
self.sphinx_band_structure.load_default_groups()
self.sphinx_band_structure.input.sphinx.basis.kPoint = {"coords": "0.5, 0.5, 0.5"}
self.assertFalse(self.sphinx_band_structure.check_setup())
self.sphinx_band_structure.load_default_groups()
self.sphinx_band_structure.server.cores = 2000
self.assertFalse(self.sphinx_band_structure.check_setup())
self.sphinx_band_structure.input["EmptyStates"] = "auto"
self.assertFalse(self.sphinx_band_structure.check_setup())
self.sphinx_band_structure.structure.add_tag(spin=None)
for i in range(len(self.sphinx_band_structure.structure)):
self.sphinx_band_structure.structure.spin[i] = 4
self.assertFalse(self.sphinx_band_structure.check_setup())
def test_set_check_overlap(self):
self.assertRaises(TypeError, self.sphinx_band_structure.set_check_overlap, 0)
def test_set_occupancy_smearing(self):
self.assertRaises(
ValueError, self.sphinx_band_structure.set_occupancy_smearing, 0.1, 0.1
)
self.assertRaises(
ValueError, self.sphinx_band_structure.set_occupancy_smearing, "fermi", -0.1
)
self.sphinx_band_structure.set_occupancy_smearing("fermi", 0.1)
self.assertTrue('FermiDirac' in self.sphinx_band_structure.input)
self.sphinx_band_structure.set_occupancy_smearing("methfessel", 0.1)
self.assertTrue('MethfesselPaxton' in self.sphinx_band_structure.input)
def test_load_default_groups(self):
backup = self.sphinx_band_structure.structure.copy()
self.sphinx_band_structure.structure = None
self.assertRaises(
AssertionError, self.sphinx_band_structure.load_default_groups
)
self.sphinx_band_structure.structure = backup
def test_validate_ready_to_run(self):
backup = self.sphinx_band_structure.structure.copy()
self.sphinx_band_structure.structure = None
self.assertRaises(AssertionError, self.sphinx_band_structure.validate_ready_to_run)
self.sphinx_band_structure.structure = backup
self.sphinx_band_structure.input["THREADS"] = 20
self.sphinx_band_structure.server.cores = 10
self.assertRaises(AssertionError, self.sphinx_band_structure.validate_ready_to_run)
self.sphinx_band_structure.input.sphinx.main.clear()
self.assertRaises(AssertionError, self.sphinx_band_structure.validate_ready_to_run)
backup = self.sphinx.input.sphinx.basis.eCut
self.sphinx.input.sphinx.basis.eCut = 400
self.assertFalse(self.sphinx.validate_ready_to_run())
self.sphinx.input.sphinx.basis.eCut = backup
backup = self.sphinx.input.sphinx.basis.kPoint.copy()
self.sphinx.input.sphinx.basis.kPoint.clear()
self.sphinx.input.sphinx.basis.kPoint.coords = [0.5, 0.5, 0.25]
self.sphinx.input.sphinx.basis.kPoint.weight = 1
self.assertFalse(self.sphinx.validate_ready_to_run())
self.sphinx.input.sphinx.basis.kPoint = backup
backup = self.sphinx.input.sphinx.PAWHamiltonian.ekt
self.sphinx.input.sphinx.PAWHamiltonian.ekt = 0.0001
self.assertFalse(self.sphinx.validate_ready_to_run())
self.sphinx.input.sphinx.PAWHamiltonian.ekt = backup
backup = self.sphinx.input.sphinx.PAWHamiltonian.xc
self.sphinx.input.sphinx.PAWHamiltonian.xc = "Wrong"
self.assertFalse(self.sphinx.validate_ready_to_run())
self.sphinx.input.sphinx.PAWHamiltonian.xc = backup
backup = self.sphinx.input.sphinx.PAWHamiltonian.xc
self.sphinx.input.sphinx.PAWHamiltonian.xc = "Wrong"
self.assertFalse(self.sphinx.validate_ready_to_run())
self.sphinx.input.sphinx.PAWHamiltonian.xc = backup
backup = self.sphinx.input.sphinx.PAWHamiltonian.nEmptyStates
self.sphinx.input.sphinx.PAWHamiltonian.nEmptyStates = 100
self.assertFalse(self.sphinx.validate_ready_to_run())
self.sphinx.input.sphinx.PAWHamiltonian.nEmptyStates = backup
backup = self.sphinx.input.sphinx.structure.copy()
self.sphinx.input.sphinx.structure.cell = [[0,0,0],[0,0,0],[0,0,0]]
self.assertFalse(self.sphinx.validate_ready_to_run())
self.sphinx.input.sphinx.structure = backup
self.assertTrue(self.sphinx.validate_ready_to_run())
def test_set_mixing_parameters(self):
self.assertRaises(
ValueError, self.sphinx.set_mixing_parameters, "LDA", 7, 1.0, 1.0
)
self.assertRaises(
ValueError, self.sphinx.set_mixing_parameters, "PULAY", 7, -0.1, 1.0
)
self.assertRaises(
ValueError, self.sphinx.set_mixing_parameters, "PULAY", 7, 1.0, 2.0
)
self.assertRaises(
ValueError, self.sphinx.set_mixing_parameters, "PULAY", 7, 1.0, 1.0, -0.1, 0.5
)
self.assertRaises(
ValueError, self.sphinx.set_mixing_parameters, "PULAY", 7, 1.0, 1.0, 0.1, -0.5
)
self.sphinx.set_mixing_parameters(
method="PULAY",
n_pulay_steps=7,
density_mixing_parameter=0.5,
spin_mixing_parameter=0.2,
density_residual_scaling=0.1,
spin_residual_scaling=0.3,
)
self.assertEqual(self.sphinx.input["rhoMixing"], 0.5)
self.assertEqual(self.sphinx.input["spinMixing"], 0.2)
self.assertEqual(self.sphinx.input["rhoResidualScaling"], 0.1)
self.assertEqual(self.sphinx.input["spinResidualScaling"], 0.3)
def test_exchange_correlation_functional(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.sphinx.exchange_correlation_functional = "llda"
self.assertEqual(len(w), 1)
self.assertIsInstance(w[-1].message, SyntaxWarning)
self.sphinx.exchange_correlation_functional = "pbe"
self.assertEqual(self.sphinx.exchange_correlation_functional, "PBE")
def test_write_structure(self):
cell = (self.sphinx.structure.cell / BOHR_TO_ANGSTROM).tolist()
pos_2 = (self.sphinx.structure.positions[1] / BOHR_TO_ANGSTROM).tolist()
file_content = [
f'cell = {cell};\n',
'species {\n',
'\telement = "Fe";\n',
'\tatom {\n',
'\t\tlabel = "spin_0.5";\n',
'\t\tcoords = [0.0, 0.0, 0.0];\n',
'\t\tmovable;\n',
'\t}\n',
'\tatom {\n',
'\t\tlabel = "spin_0.5";\n',
'\t\tcoords = [2.4566439620135014, 2.4566439620135014, 2.4566439620135014];\n',
'\t}\n',
'}\n',
]
self.assertEqual(''.join(file_content), self.sphinx.input.sphinx.structure.to_sphinx())
def test_collect_aborted(self):
with self.assertRaises(AssertionError):
self.sphinx_aborted.collect_output()
def test_collect_2_5(self):
output = self.sphinx_2_5._output_parser
output.collect(directory=self.sphinx_2_5.working_directory)
self.assertTrue(all(np.diff(output.generic.dft.computation_time) > 0))
self.assertTrue(all(output.generic.dft.energy_free - output.generic.dft.energy_int < 0))
self.assertTrue(all(output.generic.dft.energy_free - output.generic.dft.energy_zero < 0))
list_values = [
"scf_energy_int",
"scf_energy_zero",
"scf_energy_free",
"scf_convergence",
"scf_electronic_entropy",
"atom_scf_spins",
]
for list_one in list_values:
for list_two in list_values:
self.assertEqual(
len(output.generic.dft[list_one]), len(output.generic.dft[list_two])
)
rho = self.sphinx_2_5._output_parser.charge_density
vel = self.sphinx_2_5._output_parser.electrostatic_potential
self.assertIsNotNone(rho.total_data)
self.assertIsNotNone(vel.total_data)
def test_check_band_occupancy(self):
self.assertTrue(self.sphinx_2_5.output.check_band_occupancy())
self.assertTrue(self.sphinx_2_5.nbands_convergence_check())
def test_collect_2_3(self):
file_location = os.path.join(
self.file_location, "../static/sphinx/sphinx_test_2_3_hdf5/sphinx_test_2_3/"
)
residue_lst = np.loadtxt(file_location + "residue.dat")[:, 1].reshape(1, -1)
residue_lst = (residue_lst).tolist()
energy_int_lst = np.loadtxt(file_location + "energy.dat")[:, 2].reshape(1, -1)
energy_int_lst = (energy_int_lst * HARTREE_TO_EV).tolist()
with open(file_location + "sphinx.log") as ffile:
energy_free_lst = [[float(line.split('=')[-1]) * HARTREE_TO_EV for line in ffile if line.startswith('F(')]]
eig_lst = [np.loadtxt(file_location + "eps.dat")[:, 1:].tolist()]
self.sphinx_2_3.collect_output()
self.assertEqual(
residue_lst, self.sphinx_2_3._output_parser.generic.dft["scf_residue"]
)
self.assertEqual(
energy_int_lst, self.sphinx_2_3._output_parser.generic.dft["scf_energy_int"]
)
self.assertEqual(
eig_lst,
self.sphinx_2_3._output_parser.generic.dft["bands_eigen_values"].tolist(),
)
self.assertEqual(
energy_free_lst,
self.sphinx_2_3._output_parser.generic.dft["scf_energy_free"],
)
self.assertEqual(
21.952 * BOHR_TO_ANGSTROM ** 3, self.sphinx_2_3._output_parser.generic["volume"]
)
def test_structure_parsing(self):
self.sphinx_2_3._output_parser.collect_relaxed_hist(
file_name="relaxedHist_2.sx", cwd=self.sphinx_2_3.working_directory
)
def test_density_of_states(self):
dos = self.sphinx_2_5.get_density_of_states()
self.assertLess(dos['grid'][dos['dos'][0].argmax()], 0)
def test_convergence_precision(self):
job = self.project.create_job(job_type='Sphinx', job_name='energy_convergence')
job.structure = self.project.create_ase_bulk('Al', 'fcc', 3.5)
job.set_convergence_precision(ionic_energy_tolerance=1e-5, electronic_energy=1e-8)
job.calc_minimize(ionic_steps=250, electronic_steps=200)
self.assertAlmostEqual(float(job.input.sphinx.main.ricQN.bornOppenheimer.scfDiag.dEnergy)*HARTREE_TO_EV, 1e-8)
self.assertAlmostEqual(float(job.input.sphinx.main.ricQN.dEnergy)*HARTREE_TO_EV, 1e-5)
self.assertEqual(int(job.input.sphinx.main.ricQN.maxSteps), 250)
self.assertEqual(int(job.input.sphinx.main.ricQN.bornOppenheimer.scfDiag.maxSteps), 200)
if __name__ == "__main__":
unittest.main()
| 42.01505 | 120 | 0.60597 |
b53bc853c8dd2914a35661cbff98d84c84ca1e87 | 969 | py | Python | hhttp/master.py | lemoi/hhttp | 8bd813e0d153985261e38a57590748fa55e79766 | [
"MIT"
] | null | null | null | hhttp/master.py | lemoi/hhttp | 8bd813e0d153985261e38a57590748fa55e79766 | [
"MIT"
] | null | null | null | hhttp/master.py | lemoi/hhttp | 8bd813e0d153985261e38a57590748fa55e79766 | [
"MIT"
] | null | null | null | import os, sys, socket, selectors
from .manager import Manager
from .consts import CONFIG_PARAM
#the most efficient select implementation available on the current platform
SELECT = selectors.DefaultSelector()
CPU_NUM = os.cpu_count()
#ipv4
AF = socket.AF_INET #AF_INET6
HOST = CONFIG_PARAM['host']
PORT = int(CONFIG_PARAM['port'])
def run():
SOCK = socket.socket(AF, socket.SOCK_STREAM)
SOCK.bind((HOST, PORT))
SOCK.listen(100)
SOCK.setblocking(False)
SELECT.register(SOCK, selectors.EVENT_READ)
M = Manager(CPU_NUM)
print('hhttp is running')
while True:
events = SELECT.select()
for key, _ in events:
if key.fileobj is SOCK:
conn, addr = SOCK.accept()
# conn.setblocking(False) 多线程 Resource temporarily unavailable 错误
SELECT.register(conn, selectors.EVENT_READ)
else:
SELECT.unregister(key.fd)
M.put(key.fileobj) | 30.28125 | 81 | 0.651187 |
fa5910dee91ae37002bfc2dfb1bd0c4d8a66869a | 2,687 | py | Python | azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/packet_capture_parameters.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/packet_capture_parameters.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/packet_capture_parameters.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2017-01-20T18:25:46.000Z | 2017-05-12T21:31:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PacketCaptureParameters(Model):
"""Parameters that define the create packet capture operation.
All required parameters must be populated in order to send to Azure.
:param target: Required. The ID of the targeted resource, only VM is
currently supported.
:type target: str
:param bytes_to_capture_per_packet: Number of bytes captured per packet,
the remaining bytes are truncated. Default value: 0 .
:type bytes_to_capture_per_packet: int
:param total_bytes_per_session: Maximum size of the capture output.
Default value: 1073741824 .
:type total_bytes_per_session: int
:param time_limit_in_seconds: Maximum duration of the capture session in
seconds. Default value: 18000 .
:type time_limit_in_seconds: int
:param storage_location: Required.
:type storage_location:
~azure.mgmt.network.v2016_09_01.models.PacketCaptureStorageLocation
:param filters:
:type filters:
list[~azure.mgmt.network.v2016_09_01.models.PacketCaptureFilter]
"""
_validation = {
'target': {'required': True},
'storage_location': {'required': True},
}
_attribute_map = {
'target': {'key': 'target', 'type': 'str'},
'bytes_to_capture_per_packet': {'key': 'bytesToCapturePerPacket', 'type': 'int'},
'total_bytes_per_session': {'key': 'totalBytesPerSession', 'type': 'int'},
'time_limit_in_seconds': {'key': 'timeLimitInSeconds', 'type': 'int'},
'storage_location': {'key': 'storageLocation', 'type': 'PacketCaptureStorageLocation'},
'filters': {'key': 'filters', 'type': '[PacketCaptureFilter]'},
}
def __init__(self, **kwargs):
super(PacketCaptureParameters, self).__init__(**kwargs)
self.target = kwargs.get('target', None)
self.bytes_to_capture_per_packet = kwargs.get('bytes_to_capture_per_packet', 0)
self.total_bytes_per_session = kwargs.get('total_bytes_per_session', 1073741824)
self.time_limit_in_seconds = kwargs.get('time_limit_in_seconds', 18000)
self.storage_location = kwargs.get('storage_location', None)
self.filters = kwargs.get('filters', None)
| 43.33871 | 95 | 0.665426 |
4f4d46774538aa09598577c7d9d0519392ef77d9 | 97,810 | py | Python | fkie_node_manager/src/fkie_node_manager/node_tree_model.py | srv/multimaster_fkie | a6b0e51c2f52a5ad9cc5baae3b8c46ee45e65372 | [
"BSD-3-Clause"
] | null | null | null | fkie_node_manager/src/fkie_node_manager/node_tree_model.py | srv/multimaster_fkie | a6b0e51c2f52a5ad9cc5baae3b8c46ee45e65372 | [
"BSD-3-Clause"
] | null | null | null | fkie_node_manager/src/fkie_node_manager/node_tree_model.py | srv/multimaster_fkie | a6b0e51c2f52a5ad9cc5baae3b8c46ee45e65372 | [
"BSD-3-Clause"
] | null | null | null | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from python_qt_binding.QtCore import QFile, QRect, Qt, Signal
from python_qt_binding.QtGui import QIcon, QImage, QStandardItem, QStandardItemModel
try:
from python_qt_binding.QtGui import QItemDelegate
except Exception:
from python_qt_binding.QtWidgets import QItemDelegate
from datetime import datetime
import re
import roslib
import rospy
import traceback
from diagnostic_msgs.msg import KeyValue
from fkie_master_discovery.common import get_hostname, subdomain
from fkie_master_discovery.master_info import NodeInfo
from fkie_node_manager_daemon.common import sizeof_fmt, isstring, utf8
from fkie_node_manager_daemon import url as nmdurl
from fkie_node_manager.common import lnamespace, namespace, normns
from fkie_node_manager.name_resolution import NameResolution, MasterEntry
from fkie_node_manager.parameter_handler import ParameterHandler
import fkie_node_manager as nm
class CellItem(QStandardItem):
'''
Item for a cell. References to a node item.
'''
ITEM_TYPE = Qt.UserRole + 41
def __init__(self, name, item=None, parent=None):
'''
Initialize the CellItem object with given values.
:param name: the name of the group
:param parent: the parent item. In most cases this is the HostItem. The variable is used to determine the different columns of the NodeItem.
:type parent: :class:`QtGui.QStandardItem` <https://srinikom.github.io/pyside-docs/PySide/QtGui/QStandardItem.html>
'''
QStandardItem.__init__(self)
self.parent_item = parent
self._name = name
self.item = item
@property
def name(self):
'''
The name of this group.
:rtype: str
'''
return self._name
# ###############################################################################
# ############# GrouptItem ##############
# ###############################################################################
class GroupItem(QStandardItem):
'''
The GroupItem stores the information about a group of nodes.
'''
ITEM_TYPE = Qt.UserRole + 25
def __init__(self, name, parent=None, has_remote_launched_nodes=False, is_group=False):
'''
Initialize the GroupItem object with given values.
:param str name: the name of the group
:param parent: the parent item. In most cases this is the HostItem. The variable is used to determine the different columns of the NodeItem.
:type parent: :class:`QtGui.QStandardItem` <https://srinikom.github.io/pyside-docs/PySide/QtGui/QStandardItem.html>
:param bool is_group: True if this is a capability group. In other case it is a namespace group.
'''
dname = name
if dname.rfind('@') <= 0:
if is_group:
dname = '{' + dname + '}'
else:
dname = dname + '/'
QStandardItem.__init__(self, dname)
self.parent_item = parent
self._name = name
self.setIcon(nm.settings().icon('state_off.png'))
self.descr_type = self.descr_name = self.descr = ''
self.descr_images = []
self._capcabilities = dict()
self._has_remote_launched_nodes = has_remote_launched_nodes
self._remote_launched_nodes_updated = False
'''
:ivar: dict(config : dict(namespace: dict(group:dict('type' : str, 'images' : [str], 'description' : str, 'nodes' : [str]))))
'''
self._re_cap_nodes = dict()
self._is_group = is_group
self._state = NodeItem.STATE_OFF
self.diagnostic_level = 0
self.is_system_group = name == 'SYSTEM'
self._clearup_mark_delete = False
@property
def name(self):
'''
The name of this group.
:rtype: str
'''
return self._name
@name.setter
def name(self, new_name):
'''
Set the new name of this group and updates the displayed name of the item.
:param str new_name: The new name of the group. Used also to identify the group.
'''
self._name = new_name
if self._is_group:
self.setText('{' + self._name + '}')
else:
self.setText(self._name + '/')
@property
def state(self):
'''
The state of this group.
:rtype: int
'''
return self._state
@property
def is_group(self):
return self._is_group
@property
def cfgs(self):
return self.get_configs()
def get_namespace(self):
name = self._name
if type(self) == HostItem:
name = rospy.names.SEP
elif type(self) == GroupItem and self._is_group:
name = namespace(self._name)
result = name
if self.parent_item is not None:
result = normns(self.parent_item.get_namespace() + rospy.names.SEP) + normns(result + rospy.names.SEP)
return normns(result)
def count_nodes(self):
'''
:retrun: Returns count of nodes inside this group.
:rtype: int
'''
result = 0
for i in range(self.rowCount()):
item = self.child(i)
if isinstance(item, GroupItem):
result += item.count_nodes()
elif isinstance(item, NodeItem):
result += 1
return result
def is_in_cap_group(self, nodename, config, ns, groupname):
'''
Returns `True` if the group contains the node.
:param str nodename: the name of the node to test
:param str config: the configuration name
:param str ns: namespace
:param str groupname: the group name
:return: `True`, if the nodename is in the group
:rtype: bool
'''
try:
if self._re_cap_nodes[(config, ns, groupname)].match(nodename):
return True
except Exception:
pass
return False
def _create_cap_nodes_pattern(self, config, cap):
for ns, groups in cap.items():
for groupname, descr in groups.items():
try:
nodes = descr['nodes']
def_list = ['\A' + n.strip().replace('*', '.*') + '\Z' for n in nodes]
if def_list:
self._re_cap_nodes[(config, ns, groupname)] = re.compile('|'.join(def_list), re.I)
else:
self._re_cap_nodes[(config, ns, groupname)] = re.compile('\b', re.I)
except Exception:
rospy.logwarn("create_cap_nodes_pattern: %s" % traceback.format_exc(1))
def add_capabilities(self, config, capabilities, masteruri):
'''
Add new capabilities. Based on this capabilities the node are grouped. The
view will be updated.
:param str config: The name of the configuration containing this new capabilities.
:param str masteruri: The masteruri is used only used, if new nodes are created.
:param capabilities: The capabilities, which defines groups and containing nodes.
:type capabilities: dict(namespace: dict(group: dict('type': str, 'images': list(str), 'description': str, 'nodes': list(str))))
'''
self._capcabilities[config] = capabilities
self._create_cap_nodes_pattern(config, capabilities)
# update the view
for ns, groups in capabilities.items():
for group, descr in groups.items():
group_changed = False
# create nodes for each group
nodes = descr['nodes']
if nodes:
groupItem = self.get_group_item(roslib.names.ns_join(ns, group), nocreate=False)
groupItem.descr_name = group
if descr['type']:
groupItem.descr_type = descr['type']
if descr['description']:
groupItem.descr = descr['description']
if descr['images']:
groupItem.descr_images = list(descr['images'])
# move the nodes from host to the group
group_changed = self.move_nodes2group(groupItem, config, ns, group, self)
# create new or update existing items in the group
for node_name in nodes:
# do not add nodes with * in the name
if not re.search(r"\*", node_name):
items = groupItem.get_node_items_by_name(node_name)
if items:
for item in items:
item.add_config(config)
group_changed = True
else:
items = self.get_node_items_by_name(node_name)
if items:
# copy the state of the existing node
groupItem.add_node(items[0].node_info, config)
elif config:
groupItem.add_node(NodeInfo(node_name, masteruri), config)
group_changed = True
if group_changed:
groupItem.update_displayed_config()
groupItem.updateIcon()
def move_nodes2group(self, group_item, config, ns, groupname, host_item):
'''
Returns `True` if the group was changed by adding a new node.
:param GroupItem group_item: item to parse the children for nodes.
:param str config: the configuration name
:param str ns: namespace
:param str groupname: the group name
:param HostItem host_item: the host item contain the capability groups
:return: `True`, if the group was changed by adding a new node.
:rtype: bool
'''
self_changed = False
group_changed = False
for i in reversed(range(self.rowCount())):
item = self.child(i)
if isinstance(item, NodeItem):
if host_item.is_in_cap_group(item.name, config, ns, groupname):
row = self.takeRow(i)
group_item._add_row_sorted(row)
group_changed = True
elif isinstance(item, GroupItem) and not item.is_group:
group_changed = item.move_nodes2group(group_item, config, ns, groupname, host_item)
if self_changed:
self.update_displayed_config()
self.updateIcon()
return group_changed
def rem_capablities(self, config):
'''
Removes internal entry of the capability, so the new nodes are not grouped.
To update view :meth:`NodeTreeModel.remove_config` and :meth:`GroupItem.clearup`
must be called.
:param str config: The name of the configuration containing this new capabilities.
'''
try:
del self._capcabilities[config]
except Exception:
pass
else:
# todo update view?
pass
def get_capability_groups(self, node_name):
'''
Returns the names of groups, which contains the given node.
:param str node_name: The name of the node
:return: The name of the configuration containing this new capabilities.
:rtype: dict(config : list(str))
'''
result = dict() # dict(config : [group names])
try:
for cfg, cap in self._capcabilities.items():
for ns, groups in cap.items():
for group, _ in groups.items(): # _:=decription
if self.is_in_cap_group(node_name, cfg, ns, group):
if cfg not in result:
result[cfg] = []
result[cfg].append(roslib.names.ns_join(ns, group))
except Exception:
pass
return result
def exists_capability_group(self, ns, group_name):
'''
Returns True if the group exists in capability list.
:param str ns: Namespace of the group
:param str group_name: The name of the group
:return: True if the group exists in capability list.
:rtype: bool
'''
try:
if type(self) == HostItem:
# replace last namespace separator if it is not the only one
if len(ns) > 1:
ns = ns.rstrip(rospy.names.SEP)
for _cfg, cap in self._capcabilities.items():
for gns, groups in cap.items():
for group, _decription in groups.items():
if ns == gns and group == group_name:
return True
elif self.parent_item is not None:
return self.parent_item.exists_capability_group(ns, group_name)
except Exception:
pass
return False
def clear_multiple_screens(self):
for i in range(self.rowCount()):
item = self.child(i)
if isinstance(item, GroupItem):
item.clear_multiple_screens()
elif isinstance(item, NodeItem):
item.has_multiple_screens = False
def get_node_items_by_name(self, node_name, recursive=True):
'''
Since the same node can be included by different groups, this method searches
for all nodes with given name and returns these items.
:param str node_name: The name of the node
:param bool recursive: Searches in (sub) groups
:return: The list with node items.
:rtype: list(:class:`QtGui.QStandardItem` <https://srinikom.github.io/pyside-docs/PySide/QtGui/QStandardItem.html>)
'''
result = []
for i in range(self.rowCount()):
item = self.child(i)
if isinstance(item, GroupItem):
if recursive:
result[len(result):] = item.get_node_items_by_name(node_name)
elif isinstance(item, NodeItem) and item == node_name:
return [item]
return result
def get_node_items(self, recursive=True):
'''
Returns all nodes in this group and subgroups.
:param bool recursive: returns the nodes of the subgroups
:return: The list with node items.
:rtype: list(:class:`QtGui.QStandardItem` <https://srinikom.github.io/pyside-docs/PySide/QtGui/QStandardItem.html>)
'''
result = []
for i in range(self.rowCount()):
item = self.child(i)
if isinstance(item, GroupItem):
if recursive:
result[len(result):] = item.get_node_items()
elif isinstance(item, NodeItem):
result.append(item)
return result
def get_group_item(self, group_name, is_group=True, nocreate=False):
'''
Returns a GroupItem with given name. If no group with this name exists, a
new one will be created. The given name will be split by slashes if exists
and subgroups are created.
:param str group_name: the name of the group
:param bool is_group: True if it is a capability group. False if a namespace group. (Default: True)
:param bool nocreate: avoid creation of new group if not exists. (Default: False)
:return: The group with given name of None if `nocreate` is True and group not exists.
:rtype: :class:`GroupItem` or None
'''
lns, rns = group_name, ''
if nm.settings().group_nodes_by_namespace:
lns, rns = lnamespace(group_name)
if lns == rospy.names.SEP and type(self) == HostItem:
lns, rns = lnamespace(rns)
if lns == rospy.names.SEP:
return self
for i in range(self.rowCount()):
item = self.child(i)
if isinstance(item, GroupItem):
if item == lns and not item._clearup_mark_delete:
if rns:
return item.get_group_item(rns, is_group, nocreate)
return item
elif item > lns and not nocreate:
items = []
newItem = GroupItem(lns, self, is_group=(is_group and not rns))
items.append(newItem)
cfgitem = CellItem(group_name, newItem)
items.append(cfgitem)
self.insertRow(i, items)
if rns:
return newItem.get_group_item(rns, is_group, nocreate)
return newItem
if nocreate:
return None
items = []
newItem = GroupItem(lns, self, is_group=(is_group and not rns))
items.append(newItem)
cfgitem = CellItem(group_name, newItem)
items.append(cfgitem)
self.appendRow(items)
if rns:
return newItem.get_group_item(rns, is_group, nocreate)
return newItem
def add_node(self, node, cfg=None):
'''
Adds a new node with given name.
:param node: the NodeInfo of the node to create
:type node: :class:`NodeInfo`
:param str cfg: The configuration, which describes the node
'''
groups = self.get_capability_groups(node.name)
if groups:
for _, group_list in groups.items():
for group_name in group_list:
# insert in the group
groupItem = self.get_group_item(group_name, is_group=True)
groupItem.add_node(node, cfg)
else:
group_item = self
if type(group_item) == HostItem:
# insert in the group
group_item = self.get_group_item(namespace(node.name), is_group=False)
# insert in order
new_item_row = NodeItem.newNodeRow(node.name, node.masteruri)
group_item._add_row_sorted(new_item_row)
new_item_row[0].set_node_info(node)
if cfg or cfg == '':
new_item_row[0].add_config(cfg)
group_item.updateIcon()
def _add_row_sorted(self, row):
for i in range(self.rowCount()):
item = self.child(i)
if item > row[0].name:
self.insertRow(i, row)
row[0].parent_item = self
return
self.appendRow(row)
row[0].parent_item = self
def clearup(self, fixed_node_names=None):
'''
Removes not running and not configured nodes.
:param list(str) fixed_node_names: If the list is not None, the node not in the list are set to not running!
'''
self._clearup(fixed_node_names)
self._mark_groups_to_delete()
self._remove_marked_groups()
def _clearup(self, fixed_node_names=None):
'''
Removes not running and not configured nodes.
:param list(str) fixed_node_names: If the list is not None, the node not in the list are set to not running!
'''
removed = False
# move running nodes without configuration to the upper layer, remove not running and duplicate nodes
for i in reversed(range(self.rowCount())):
item = self.child(i)
if isinstance(item, NodeItem):
# set the running state of the node to None
if fixed_node_names is not None:
if item.name not in fixed_node_names:
item.set_node_info(NodeInfo(item.name, item.node_info.masteruri))
if not (item.has_configs() or item.is_running() or item.published or item.subscribed or item.services):
removed = True
self._remove_row(i)
else: # if type(item) == GroupItem:
removed = item._clearup(fixed_node_names) or removed
if self.rowCount() == 0 and self.parent_item is not None:
self.parent_item._remove_group(self.name)
elif removed:
self.updateIcon()
return removed
def _mark_groups_to_delete(self):
for i in range(self.rowCount()):
item = self.child(i)
if isinstance(item, NodeItem):
# remove group if only one node is inside
if not isinstance(self, HostItem):
if self.parent_item is not None:
if self.is_group:
# check to remove capability group
if not self.exists_capability_group(self.parent_item.get_namespace(), self.name):
self._clearup_mark_delete = True
elif self.rowCount() == 1:
self._clearup_mark_delete = True
else:
item._mark_groups_to_delete()
if self.rowCount() == 1:
# remove if subgroup marked to remove and this has only this group
self._clearup_mark_delete = item._clearup_mark_delete
def _remove_marked_groups(self):
rows2add = []
for i in reversed(range(self.rowCount())):
item = self.child(i)
if isinstance(item, GroupItem):
if item._clearup_mark_delete:
rows = self._take_node_rows(item)
if rows:
rows2add = rows2add + rows
self._remove_row(i)
else:
item._remove_marked_groups()
for row in rows2add:
self._add_row_sorted(row)
self.updateIcon()
def _take_node_rows(self, group):
result = []
for i in reversed(range(group.rowCount())):
item = group.child(i)
if isinstance(item, NodeItem):
result.append(group.takeRow(i))
else:
result = result + item._take_node_rows(item)
return result
def _remove_group(self, name):
for i in range(self.rowCount()):
item = self.child(i)
if type(item) == GroupItem and item == name and item.rowCount() == 0:
self._remove_row(i)
return # we assume only one group with same name can exists
def _remove_row(self, index):
item = self.child(index)
item.parent_item = None
try:
cellitem = self.child(index, 1)
cellitem.parent_item = None
cellitem.item = None
except Exception as e:
rospy.logdebug_throttle(10, utf8(e))
self.removeRow(index)
def reset_remote_launched_nodes(self):
self._remote_launched_nodes_updated = False
def remote_launched_nodes_updated(self):
if self._has_remote_launched_nodes:
return self._remote_launched_nodes_updated
return True
def update_running_state(self, nodes, create_nodes=True):
'''
Updates the running state of the nodes given in a dictionary.
:param nodes: A dictionary with node names and their running state described by L{NodeInfo}.
:type nodes: dict(str: :class:`fkie_master_discovery.NodeInfo` <http://docs.ros.org/kinetic/api/fkie_master_discovery/html/modules.html#fkie_master_discovery.master_info.NodeInfo>)
:return: a list with :class:`fkie_master_discovery.NodeInfo` items, which are changed their PID or URI.
'''
updated_nodes = []
if isinstance(nodes, dict):
for (name, node) in nodes.items():
# get the node items
items = self.get_node_items_by_name(name)
if items:
for item in items:
# update the node item
run_changed = item.set_node_info(node)
#updated_nodes.append(node)
if run_changed:
updated_nodes.append(node)
elif create_nodes:
# create the new node
self.add_node(node)
updated_nodes.append(node)
if self._has_remote_launched_nodes:
self._remote_launched_nodes_updated = True
self.clearup(list(nodes.keys()))
elif isinstance(nodes, list):
self.clearup(nodes)
return updated_nodes
def get_nodes_running(self):
'''
Returns the names of all running nodes. A running node is defined by his
PID.
:see: :class:`master_dicovery_fkie.NodeInfo` <http://docs.ros.org/kinetic/api/fkie_master_discovery/html/modules.html#fkie_master_discovery.master_info.NodeInfo>
:return: A list with node names
:rtype: list(str)
'''
result = []
for i in range(self.rowCount()):
item = self.child(i)
if isinstance(item, GroupItem):
result[len(result):] = item.get_nodes_running()
elif isinstance(item, NodeItem) and item.node_info.pid is not None:
result.append(item.name)
return result
def set_duplicate_nodes(self, running_nodes, is_sync_running=False):
'''
While a synchronization same node on different hosts have the same name, the
nodes with the same on other host are marked.
:param running_nodes: The dictionary with names of running nodes and their masteruri
:type running_nodes: dict(str: str)
:param bool is_sync_running: If the master_sync is running, the nodes are marked
as ghost nodes. So they are handled as running nodes, but has not run
informations. This nodes are running on remote host, but are not
syncronized because of filter or errors.
'''
ignore = ['/master_sync', '/master_discovery', '/node_manager', '/node_manager_daemon']
for i in range(self.rowCount()):
item = self.child(i)
if isinstance(item, GroupItem):
item.set_duplicate_nodes(running_nodes, is_sync_running)
elif isinstance(item, NodeItem):
if is_sync_running:
item.is_ghost = (item.node_info.uri is None and (item.name in running_nodes and running_nodes[item.name] == item.node_info.masteruri))
item.has_running = (item.node_info.uri is None and item.name not in ignore and (item.name in running_nodes and running_nodes[item.name] != item.node_info.masteruri))
else:
if item.is_ghost:
item.is_ghost = False
item.has_running = (item.node_info.uri is None and item.name not in ignore and (item.name in running_nodes))
def updateIcon(self):
if isinstance(self, HostItem):
# skip the icon update on a host item
return
has_running = False
has_off = False
has_duplicate = False
has_ghosts = False
self.diagnostic_level = 0
for i in range(self.rowCount()):
item = self.child(i)
if isinstance(item, (GroupItem, NodeItem)):
if item.state == NodeItem.STATE_WARNING:
self.setIcon(nm.settings().icon('crystal_clear_warning.png'))
self._state = NodeItem.STATE_WARNING
if self.parent_item is not None:
self.parent_item.updateIcon()
return
elif item.state == NodeItem.STATE_OFF:
has_off = True
elif item.state == NodeItem.STATE_RUN:
has_running = True
elif item.state == NodeItem.STATE_GHOST:
has_ghosts = True
elif item.state == NodeItem.STATE_DUPLICATE:
has_duplicate = True
elif item.state == NodeItem.STATE_PARTS:
has_running = True
has_off = True
if item.state == NodeItem.STATE_RUN or isinstance(item, GroupItem):
if item.diagnostic_level > self.diagnostic_level:
self.diagnostic_level = item.diagnostic_level
if self.diagnostic_level > 0:
self.setIcon(NodeItem._diagnostic_level2icon(self.diagnostic_level))
else:
if has_duplicate:
self._state = NodeItem.STATE_DUPLICATE
self.setIcon(nm.settings().icon('imacadam_stop.png'))
elif has_ghosts:
self._state = NodeItem.STATE_GHOST
self.setIcon(nm.settings().icon('state_ghost.png'))
elif has_running and has_off:
self._state = NodeItem.STATE_PARTS
self.setIcon(nm.settings().icon('state_part.png'))
elif not has_running:
self._state = NodeItem.STATE_OFF
self.setIcon(nm.settings().icon('state_off.png'))
elif not has_off and has_running:
self._state = NodeItem.STATE_RUN
self.setIcon(nm.settings().icon('state_run.png'))
if self.parent_item is not None:
self.parent_item.updateIcon()
def _create_html_list(self, title, items):
result = ''
if items:
result += '<b><u>%s</u></b>' % title
if len(items) > 1:
result += ' <span style="color:gray;">[%d]</span>' % len(items)
result += '<ul><span></span><br>'
for i in items:
result += '<a href="node://%s">%s</a><br>' % (i, i)
result += '</ul>'
return result
def update_tooltip(self):
'''
Creates a tooltip description based on text set by :meth:`update_description`
and all childs of this host with valid sensor description. The result is
returned as a HTML part.
:return: the tooltip description coded as a HTML part
:rtype: str
'''
tooltip = self.generate_description(False)
self.setToolTip(tooltip if tooltip else self.name)
return tooltip
def generate_description(self, extended=True):
tooltip = ''
if self.descr_type or self.descr_name or self.descr:
tooltip += '<h4>%s</h4><dl>' % self.descr_name
if self.descr_type:
tooltip += '<dt>Type: %s</dt></dl>' % self.descr_type
if extended:
try:
from docutils import examples
if self.descr:
tooltip += '<b><u>Detailed description:</u></b>'
tooltip += examples.html_body(utf8(self.descr))
except Exception:
rospy.logwarn("Error while generate description for a tooltip: %s", traceback.format_exc(1))
tooltip += '<br>'
# get nodes
nodes = []
for j in range(self.rowCount()):
nodes.append(self.child(j).name)
if nodes:
tooltip += self._create_html_list('Nodes:', nodes)
return '<div>%s</div>' % tooltip
def update_description(self, descr_type, descr_name, descr):
'''
Sets the description of the robot. To update the tooltip of the host item use :meth:`update_tooltip`.
:param str descr_type: the type of the robot
:param str descr_name: the name of the robot
:param str descr: the description of the robot as a reStructuredText<http://docutils.sourceforge.net/rst.html>
'''
self.descr_type = descr_type
self.descr_name = descr_name
self.descr = descr
def update_displayed_config(self):
'''
Updates the configuration representation in other column.
'''
if self.parent_item is not None:
# get nodes
cfgs = []
for j in range(self.rowCount()):
if self.child(j).cfgs:
cfgs[len(cfgs):] = self.child(j).cfgs
if cfgs:
cfgs = list(set(cfgs))
cfg_col = self.parent_item.child(self.row(), NodeItem.COL_CFG)
if cfg_col is not None and isinstance(cfg_col, QStandardItem):
cfg_col.setText('[%d]' % len(cfgs) if len(cfgs) > 1 else "")
# set tooltip
# removed for clarity !!!
# tooltip = ''
# if len(cfgs) > 0:
# tooltip = ''
# if len(cfgs) > 0:
# tooltip = ''.join([tooltip, '<h4>', 'Configurations:', '</h4><dl>'])
# for c in cfgs:
# if NodeItem.is_default_cfg(c):
# tooltip = ''.join([tooltip, '<dt>[default]', c[0], '</dt>'])
# else:
# tooltip = ''.join([tooltip, '<dt>', c, '</dt>'])
# tooltip = ''.join([tooltip, '</dl>'])
# cfg_col.setToolTip(''.join(['<div>', tooltip, '</div>']))
# set icons
has_launches = NodeItem.has_launch_cfgs(cfgs)
has_defaults = NodeItem.has_default_cfgs(cfgs)
if has_launches and has_defaults:
cfg_col.setIcon(nm.settings().icon('crystal_clear_launch_file_def_cfg.png'))
elif has_launches:
cfg_col.setIcon(nm.settings().icon('crystal_clear_launch_file.png'))
elif has_defaults:
cfg_col.setIcon(nm.settings().icon('default_cfg.png'))
else:
cfg_col.setIcon(QIcon())
def get_configs(self):
'''
Returns the set for launch configurations.
:rtype: set(str)
'''
cfgs = []
for j in range(self.rowCount()):
if isinstance(self.child(j), GroupItem):
cfgs[len(cfgs):] = self.child(j).get_configs()
elif self.child(j).cfgs:
cfgs[len(cfgs):] = self.child(j).cfgs
return set(cfgs)
def get_count_mscreens(self):
'''
Returns count for nodes with multiple screens
'''
result = 0
for j in range(self.rowCount()):
if isinstance(self.child(j), GroupItem):
result += self.child(j).get_count_mscreens()
elif self.child(j).has_multiple_screens:
result += 1
return result
def type(self):
return GroupItem.ITEM_TYPE
def __eq__(self, item):
'''
Compares the name of the group.
'''
if isstring(item):
return self.name.lower() == item.lower()
elif item is not None and type(item) == GroupItem:
return self.name.lower() == item.name.lower()
return False
def __ne__(self, item):
return not (self == item)
def __gt__(self, item):
'''
Compares the name of the group.
'''
if isstring(item):
# put the group with SYSTEM nodes at the end
if self.is_system_group:
if self.name.lower() != item.lower():
return True
elif item.lower() == 'system':
return False
return self.name.lower() > item.lower()
elif item is not None and type(item) == GroupItem:
# put the group with SYSTEM nodes at the end
if item.is_system_group:
if self.name.lower() != item.lower():
return True
elif self.is_syste_group:
return False
return self.name.lower() > item.name.lower()
return False
# ###############################################################################
# ############# HostItem ##############
# ###############################################################################
class HostItem(GroupItem):
'''
The HostItem stores the information about a host.
'''
ITEM_TYPE = Qt.UserRole + 26
def __init__(self, masteruri, address, local, master_entry, parent=None):
'''
Initialize the HostItem object with given values.
:param str masteruri: URI of the ROS master assigned to the host
:param str address: the address of the host
:param bool local: is this host the localhost where the node_manager is running.
'''
self._has_remote_launched_nodes = False
self._masteruri = masteruri
self._host = address
self._mastername = address
self._master_entry = master_entry
self._local = None
self._diagnostics = []
name = self.create_host_description(master_entry)
GroupItem.__init__(self, name, parent, has_remote_launched_nodes=self._has_remote_launched_nodes)
self.descr_type = self.descr_name = self.descr = ''
self.sysmon_state = False
self.local = local
@property
def host(self):
return self._host
@property
def hostname(self):
# return nm.nameres().hostname(self._host)
return self._host
@property
def addresses(self):
return nm.nameres().resolve_cached(self._host)
@property
def masteruri(self):
return self._masteruri
@property
def mastername(self):
result = self._master_entry.get_mastername()
if result is None or not result:
result = self.hostname
return result
@property
def local(self):
return self._local
@local.setter
def local(self, islocal):
if self._local != islocal:
self._local = islocal
name = self.create_host_description(self._master_entry)
image_file = nm.settings().robot_image_file(name)
if QFile.exists(image_file):
self.setIcon(QIcon(image_file))
else:
if self._local:
self.setIcon(nm.settings().icon('crystal_clear_miscellaneous.png'))
else:
self.setIcon(nm.settings().icon('remote.png'))
@property
def diagnostics(self):
return list(self._diagnostics)
def update_system_diagnostics(self, diagnostics):
del self._diagnostics[:]
for diagnostic in diagnostics.status:
if diagnostic.hardware_id == self.hostname:
self._diagnostics.append(diagnostic)
self.update_tooltip()
self.name = self.create_host_description(self._master_entry)
def create_host_description(self, master_entry):
'''
Returns the name generated from masteruri and address
:param masteruri: URI of the ROS master assigned to the host
:param str address: the address of the host
'''
name = master_entry.get_mastername()
if not name:
name = master_entry.get_address()
hostname = master_entry.get_address()
if not nm.settings().show_domain_suffix:
name = subdomain(name)
result = '%s@%s' % (name, hostname)
maddr = get_hostname(master_entry.masteruri)
mname = nm.nameres().hostname(maddr)
if mname is None:
mname = utf8(maddr)
if mname != hostname:
result += '[%s]' % master_entry.masteruri
self._has_remote_launched_nodes = True
return result
def update_tooltip(self):
'''
Creates a tooltip description based on text set by :meth:`update_description`
and all childs of this host with valid sensor description. The result is
returned as a HTML part.
:return: the tooltip description coded as a HTML part
:rtype: str
'''
tooltip = self.generate_description(False)
self.setToolTip(tooltip if tooltip else self.name)
return tooltip
def generate_description(self, extended=True):
from docutils import examples
tooltip = ''
if self.descr_type or self.descr_name or self.descr:
tooltip += '<h4>%s</h4><dl>' % self.descr_name
if self.descr_type:
tooltip += '<dt>Type: %s</dt></dl>' % self.descr_type
if extended:
try:
if self.descr:
tooltip += '<b><u>Detailed description:</u></b>'
tooltip += examples.html_body(self.descr, input_encoding='utf8')
except Exception:
rospy.logwarn("Error while generate description for a tooltip: %s", traceback.format_exc(1))
tooltip += '<br>'
tooltip += '<h3>%s</h3>' % self.mastername
tooltip += '<font size="+1"><i>%s</i></font><br>' % self.masteruri
tooltip += '<font size="+1">Host: <b>%s%s</b></font><br>' % (self.hostname, ' %s' % self.addresses if self.addresses else '')
if extended:
tooltip += '<a href="open-sync-dialog://%s">open sync dialog</a>' % (utf8(self.masteruri).replace('http://', ''))
tooltip += '<p>'
tooltip += '<a href="show-all-screens://%s">show all screens</a>' % (utf8(self.masteruri).replace('http://', ''))
tooltip += '<p>'
tooltip += '<a href="rosclean://%s" title="calls `rosclean purge` at `%s`">rosclean purge</a>' % (self.masteruri.replace('http://', ''), self.hostname)
tooltip += '<p>'
tooltip += '<a href="poweroff://%s" title="calls `sudo poweroff` at `%s` via SSH">poweroff `%s`</a>' % (self.hostname, self.hostname, self.hostname)
tooltip += '<p>'
tooltip += '<a href="remove-all-launch-server://%s">kill all launch server</a>' % utf8(self.masteruri).replace('http://', '')
tooltip += '<p>'
if self.local:
icon_path_settings = nm.settings().icon_path('crystal_clear_settings_24.png')
sysmon_setup_str = '<a href="nmd-cfg://%s" title="Configure Daemon"><img src="%s" alt="configure"></a>' % (utf8(self.masteruri).replace('http://', ''), icon_path_settings)
sysmon_state_str = 'disable' if self.sysmon_state else 'enable'
sysmon_switch_str = '<a href="sysmon-switch://%s">%s</a>' % (utf8(self.masteruri).replace('http://', ''), sysmon_state_str)
tooltip += '<h3>System Monitoring: (%s) %s</h3>' % (sysmon_switch_str, sysmon_setup_str)
if self._diagnostics:
for diag in self._diagnostics:
try:
free = None
free_percent = None
stamp = None
others = []
for val in diag.values:
if val.key == 'Free [%]':
free_percent = float(val.value)
elif val.key == 'Free':
free = sizeof_fmt(float(val.value))
elif val.key == 'Timestamp':
stamp = val.value
else:
others.append((val.key, val.value))
tooltip += '\n<b>%s:</b> <font color=grey>%s</font>' % (diag.name, stamp)
if diag.level > 0:
tooltip += '\n<dt><font color="red">%s</font></dt>' % (diag.message.replace('>', '>').replace('<', '<'))
else:
tooltip += '\n<dt><font color="grey">%s</font></dt>' % (diag.message.replace('>', '>').replace('<', '<'))
if free is not None:
tooltip += '\n<dt><em>%s:</em> %s (%s%%)</dt>' % ('Free', free, free_percent)
cpu_processes = 3
for key, value in others:
key_fmt = key
val_fmt = value
if '[1s]' in key:
val_fmt = '%s/s' % sizeof_fmt(float(value))
key_fmt = key_fmt.replace(' [1s]', '')
elif '[%]' in key:
val_fmt = '%s%%' % value
key_fmt = key_fmt.replace(' [%]', '')
elif '[degree]' in key:
val_fmt = '%s°C' % value
key_fmt = key_fmt.replace(' [degree]', '')
if key == 'Process load':
kill_ref = ''
pid = self._pid_from_str(val_fmt)
if pid:
kill_ref = ' <a href="kill-pid://pid%s">kill</a>' % pid
tooltip += '\n<dt><font color="red">%s</font>%s</dt>' % (val_fmt, kill_ref)
cpu_processes -= 1
else:
tooltip += '\n<dt><em>%s:</em> %s</dt>' % (key_fmt, val_fmt)
if cpu_processes > 0 and diag.name == 'CPU Load':
for _idx in range(cpu_processes):
tooltip += '\n<dt><font color="grey">%s</font></dt>' % ('--')
except Exception as err:
tooltip += '\n<dt><font color="red">%s</font></dt>' % (utf8(err))
tooltip += '<br>'
# get sensors
capabilities = []
for j in range(self.rowCount()):
item = self.child(j)
if isinstance(item, GroupItem):
capabilities.append(item.name)
if capabilities:
tooltip += '<br>'
tooltip += '<b><u>Capabilities:</u></b>'
try:
tooltip += examples.html_body('- %s' % ('\n- '.join(capabilities)), input_encoding='utf8')
except Exception:
rospy.logwarn("Error while generate description for a tooltip: %s", traceback.format_exc(1))
return '<div>%s</div>' % tooltip if tooltip else ''
def _pid_from_str(self, string):
re_if = re.compile(r".*\[(?P<pid>.*?)\]")
for pid in re_if.findall(string):
return pid
return ''
def type(self):
return HostItem.ITEM_TYPE
def __eq__(self, item):
'''
Compares the address of the masteruri.
'''
if isstring(item):
rospy.logwarn("compare HostItem with unicode deprecated")
return False
elif isinstance(item, tuple):
return nmdurl.equal_uri(self.masteruri, item[0]) and self.host == item[1]
elif isinstance(item, MasterEntry):
return self._master_entry == item
elif isinstance(item, HostItem):
return self._master_entry == item._master_entry
return False
def __gt__(self, item):
'''
Compares the address of the masteruri.
'''
if isstring(item):
rospy.logwarn("compare HostItem with unicode deprecated")
return False
elif isinstance(item, tuple):
return self.masteruri > item[0]
elif isinstance(item, HostItem):
return self.masteruri > item.masteruri
return False
# ###############################################################################
# ############# NodeItem ##############
# ###############################################################################
class NodeItem(QStandardItem):
'''
The NodeItem stores the information about the node using the ExtendedNodeInfo
class and represents it in a :class:`QTreeView`<https://srinikom.github.io/pyside-docs/PySide/QtGui/QTreeView.html> using the
:class:`QStandardItemModel` <https://srinikom.github.io/pyside-docs/PySide/QtGui/QStandardItemModel.html>
'''
ITEM_TYPE = QStandardItem.UserType + 35
NAME_ROLE = Qt.UserRole + 1
COL_CFG = 1
# COL_URI = 2
STATE_OFF = 0
STATE_RUN = 1
STATE_WARNING = 2
STATE_GHOST = 3
STATE_DUPLICATE = 4
STATE_PARTS = 5
def __init__(self, node_info):
'''
Initialize the NodeItem instance.
:param node_info: the node information
:type node_info: :class:`fkie_master_discovery.NodeInfo` <http://docs.ros.org/kinetic/api/fkie_master_discovery/html/modules.html#fkie_master_discovery.master_info.NodeInfo>
'''
QStandardItem.__init__(self, node_info.name)
self._parent_item = None
self._node_info = node_info.copy()
# self.ICONS = {'empty' : QIcon(),
# 'run' : nm.settings().icon('state_run.png'),
# 'off' :nm.settings().icon('state_off.png'),
# 'warning' : nm.settings().icon('crystal_clear_warning.png'),
# 'stop' : QIcon('icons/imacadam_stop.png'),
# 'cfg+def' : nm.settings().icon('crystal_clear_launch_file_def_cfg.png'),
# 'cfg' : nm.settings().icon('crystal_clear_launch_file.png'),
# 'default_cfg' : nm.settings().icon('default_cfg.png')
# }
self._cfgs = []
self.launched_cfg = None # is used to store the last configuration to launch the node
self.next_start_cfg = None # is used to set the configuration for next start of the node
self._std_config = None # it's config with empty name. for default proposals
self._is_ghost = False
self._has_running = False
self.setIcon(nm.settings().icon('state_off.png'))
self._state = NodeItem.STATE_OFF
self.diagnostic_array = []
self.nodelet_mngr = ''
self.nodelets = []
self.has_screen = True
self.has_multiple_screens = False
self._with_namespace = rospy.names.SEP in node_info.name
self.kill_on_stop = False
self._kill_parameter_handler = ParameterHandler()
self._kill_parameter_handler.parameter_values_signal.connect(self._on_kill_param_values)
@property
def state(self):
return self._state
@property
def name(self):
return self._node_info.name
@name.setter
def name(self, new_name):
self.setText(new_name)
@property
def masteruri(self):
return self._node_info.masteruri
@property
def published(self):
return self._node_info.publishedTopics
@property
def subscribed(self):
return self._node_info.subscribedTopics
@property
def services(self):
return self._node_info.services
@property
def parent_item(self):
return self._parent_item
@parent_item.setter
def parent_item(self, parent_item):
self._parent_item = parent_item
if parent_item is None:
self.setText(self._node_info.name)
self._with_namespace = rospy.names.SEP in self._node_info.name
else:
new_name = self._node_info.name.replace(parent_item.get_namespace(), '', 1)
self.setText(new_name)
self._with_namespace = rospy.names.SEP in new_name
@property
def node_info(self):
'''
Returns the NodeInfo instance of this node.
:rtype: :class:`fkie_master_discovery.NodeInfo` <http://docs.ros.org/kinetic/api/fkie_master_discovery/html/modules.html#fkie_master_discovery.master_info.NodeInfo>
'''
return self._node_info
def set_node_info(self, node_info):
'''
Sets the NodeInfo and updates the view, if needed.
'''
abbos_changed = False
run_changed = False
# print "!!!", self.name
# print " subs: ", self._node_info.subscribedTopics, node_info.subscribedTopics
# print " pubs: ", self._node_info.publishedTopics, node_info.publishedTopics
# print " srvs: ", self._node_info.services, node_info.services
if self._node_info.publishedTopics != node_info.publishedTopics:
abbos_changed = True
self._node_info._publishedTopics = list(node_info.publishedTopics)
if self._node_info.subscribedTopics != node_info.subscribedTopics:
abbos_changed = True
self._node_info._subscribedTopics = list(node_info.subscribedTopics)
if self._node_info.services != node_info.services:
abbos_changed = True
self._node_info._services = list(node_info.services)
if self._node_info.pid != node_info.pid:
self._node_info.pid = node_info.pid
run_changed = True
if self._node_info.uri != node_info.uri:
self._node_info.uri = node_info.uri
run_changed = True
# delete diagnostics messages on stop or start nodes
if run_changed:
del self.diagnostic_array[:]
# update the tooltip and icon
if run_changed and (self.is_running() or self.has_configs) or abbos_changed:
self.has_screen = True
self.update_dispayed_name()
# self.update_displayed_url()
if self.parent_item is not None:
self.parent_item.updateIcon()
if run_changed and self.is_running():
# 'kill_on_stop' is deprecated
self._kill_parameter_handler.requestParameterValues(self.masteruri, [roslib.names.ns_join(self.name, 'kill_on_stop'), roslib.names.ns_join(self.name, 'nm/kill_on_stop')])
return True
return False
@property
def uri(self):
if self._node_info.uri is not None:
if self._node_info.uri == 'None':
self._node_info.uri = None
return self._node_info.uri
@property
def pid(self):
return self._node_info.pid
@property
def has_running(self):
'''
Returns `True`, if there are exists other nodes with the same name. This
variable must be set manually!
:rtype: bool
'''
return self._has_running
@has_running.setter
def has_running(self, state):
'''
Sets however other node with the same name are running or not (on other hosts)
and updates the view of this item.
'''
if self._has_running != state:
self._has_running = state
if self.has_configs() or self.is_running():
self.update_dispayed_name()
if self.parent_item is not None and not isinstance(self.parent_item, HostItem):
self.parent_item.updateIcon()
@property
def is_ghost(self):
'''
Returns `True`, if there are exists other runnig nodes with the same name. This
variable must be set manually!
:rtype: bool
'''
return self._is_ghost
@is_ghost.setter
def is_ghost(self, state):
'''
Sets however other node with the same name is running (on other hosts) and
and the host showing this node the master_sync is running, but the node is
not synchronized.
'''
if self._is_ghost != state:
self._is_ghost = state
if self.has_configs() or self.is_running():
self.update_dispayed_name()
if self.parent_item is not None and not isinstance(self.parent_item, HostItem):
self.parent_item.updateIcon()
@property
def with_namespace(self):
'''
Returns `True` if the node name contains a '/' in his name
:rtype: bool
'''
return self._with_namespace
@property
def host(self):
pitem = self.parent_item
while pitem is not None:
if type(pitem) == HostItem:
return pitem.host
else:
pitem = pitem.parent_item
return None
def append_diagnostic_status(self, diagnostic_status):
if self.diagnostic_array:
last_item = self.diagnostic_array[-1]
if last_item.level == diagnostic_status.level:
if last_item.message == diagnostic_status.message:
return
dt_key = KeyValue()
dt_key.key = 'recvtime'
dt_key.value = datetime.now().strftime("%d.%m.%Y %H:%M:%S.%f")
if diagnostic_status.values and diagnostic_status.values[-1].key == 'recvtime':
diagnostic_status.values[-1].value = dt_key.value
else:
diagnostic_status.values.append(dt_key)
self.diagnostic_array.append(diagnostic_status)
self.update_dispayed_name()
if self.parent_item is not None and not isinstance(self.parent_item, HostItem):
self.parent_item.updateIcon()
if len(self.diagnostic_array) > 15:
del self.diagnostic_array[0]
def data(self, role):
if role == self.NAME_ROLE:
return self.name
else:
return QStandardItem.data(self, role)
@staticmethod
def _diagnostic_level2icon(level):
if level == 1:
return nm.settings().icon('state_diag_warn.png')
elif level == 2:
return nm.settings().icon('state_diag_error.png')
elif level == 3:
return nm.settings().icon('state_diag_stale.png')
else:
return nm.settings().icon('state_diag_other.png')
@property
def diagnostic_level(self):
if self.diagnostic_array:
return self.diagnostic_array[-1].level
return 0
def _on_kill_param_values(self, masteruri, code, msg, params):
if code == 1:
# assumption: all parameter are 'kill_on_stop' parameter
for _p, (code_n, _msg_n, val) in params.items():
if code_n == 1:
self.kill_on_stop = val
def update_dispayed_name(self):
'''
Updates the name representation of the Item
'''
tooltip = '<h4>%s</h4><dl>' % self.node_info.name
tooltip += '<dt><b>URI:</b> %s</dt>' % self.node_info.uri
tooltip += '<dt><b>PID:</b> %s</dt>' % self.node_info.pid
if self.nodelet_mngr:
tooltip += '<dt><b>Nodelet manager</b>: %s</dt>' % self.nodelet_mngr
if self.nodelets:
tooltip += '<dt><b>This is nodelet manager for %d nodes</b></dt>' % len(self.nodelets)
tooltip += '<dt><b>ORG.MASTERURI:</b> %s</dt></dl>' % self.node_info.masteruri
master_discovered = nm.nameres().has_master(self.node_info.masteruri)
# local = False
# if not self.node_info.uri is None and not self.node_info.masteruri is None:
# local = (get_hostname(self.node_info.uri) == get_hostname(self.node_info.masteruri))
if self.node_info.pid is not None:
self._state = NodeItem.STATE_RUN
if self.diagnostic_array and self.diagnostic_array[-1].level > 0:
level = self.diagnostic_array[-1].level
self.setIcon(self._diagnostic_level2icon(level))
self.setToolTip(self.diagnostic_array[-1].message)
else:
self.setIcon(nm.settings().icon('state_run.png'))
self.setToolTip('')
elif self.node_info.uri is not None and not self.node_info.isLocal:
self._state = NodeItem.STATE_RUN
self.setIcon(nm.settings().icon('state_unknown.png'))
tooltip += '<dl><dt>(Remote nodes will not be ping, so they are always marked running)</dt></dl>'
tooltip += '</dl>'
self.setToolTip('<div>%s</div>' % tooltip)
# elif not self.node_info.isLocal and not master_discovered and not self.node_info.uri is None:
# # elif not local and not master_discovered and not self.node_info.uri is None:
# self._state = NodeItem.STATE_RUN
# self.setIcon(nm.settings().icon('state_run.png'))
# tooltip = ''.join([tooltip, '<dl><dt>(Remote nodes will not be ping, so they are always marked running)</dt></dl>'])
# tooltip = ''.join([tooltip, '</dl>'])
# self.setToolTip(''.join(['<div>', tooltip, '</div>']))
elif self.node_info.pid is None and self.node_info.uri is None and (self.node_info.subscribedTopics or self.node_info.publishedTopics or self.node_info.services):
self.setIcon(nm.settings().icon('crystal_clear_warning.png'))
self._state = NodeItem.STATE_WARNING
tooltip += '<dl><dt>Can\'t get node contact information, but there exists publisher, subscriber or services of this node.</dt></dl>'
tooltip += '</dl>'
self.setToolTip('<div>%s</div>' % tooltip)
elif self.node_info.uri is not None:
self._state = NodeItem.STATE_WARNING
self.setIcon(nm.settings().icon('crystal_clear_warning.png'))
if not self.node_info.isLocal and master_discovered:
tooltip = '<h4>%s is not local, however the ROS master on this host is discovered, but no information about this node received!</h4>' % self.node_info.name
self.setToolTip('<div>%s</div>' % tooltip)
elif self.is_ghost:
self._state = NodeItem.STATE_GHOST
self.setIcon(nm.settings().icon('state_ghost.png'))
tooltip = '<h4>The node is running, but not synchronized because of filter or errors, see master_sync log.</h4>'
self.setToolTip('<div>%s</div>' % tooltip)
elif self.has_running:
self._state = NodeItem.STATE_DUPLICATE
self.setIcon(nm.settings().icon('imacadam_stop.png'))
tooltip = '<h4>There are nodes with the same name on remote hosts running. These will be terminated, if you run this node! (Only if master_sync is running or will be started somewhere!)</h4>'
self.setToolTip('<div>%s</div>' % tooltip)
else:
self._state = NodeItem.STATE_OFF
self.setIcon(nm.settings().icon('state_off.png'))
self.setToolTip('')
# removed common tooltip for clarity !!!
# self.setToolTip(''.join(['<div>', tooltip, '</div>']))
def update_displayed_url(self):
'''
Updates the URI representation in other column.
'''
if self.parent_item is not None:
uri_col = self.parent_item.child(self.row(), NodeItem.COL_URI)
if uri_col is not None and isinstance(uri_col, QStandardItem):
uri_col.setText(utf8(self.node_info.uri) if self.node_info.uri is not None else "")
def update_displayed_config(self):
'''
Updates the configuration representation in other column.
'''
if self.parent_item is not None:
cfg_col = self.parent_item.child(self.row(), NodeItem.COL_CFG)
if cfg_col is not None and isinstance(cfg_col, QStandardItem):
cfg_count = len(self._cfgs)
cfg_col.setText(utf8(''.join(['[', utf8(cfg_count), ']'])) if cfg_count > 1 else "")
# no tooltip for clarity !!!
# set icons
has_launches = NodeItem.has_launch_cfgs(self._cfgs)
has_defaults = NodeItem.has_default_cfgs(self._cfgs)
if has_launches and has_defaults:
cfg_col.setIcon(nm.settings().icon('crystal_clear_launch_file_def_cfg.png'))
elif has_launches:
cfg_col.setIcon(nm.settings().icon('crystal_clear_launch_file.png'))
elif has_defaults:
cfg_col.setIcon(nm.settings().icon('default_cfg.png'))
else:
cfg_col.setIcon(QIcon())
# the update of the group will be perform in node_tree_model to reduce calls
# if isinstance(self.parent_item, GroupItem):
# self.parent_item.update_displayed_config()
@property
def cfgs(self):
'''
Returns the list with all launch configurations assigned to this item.
:rtype: list(str)
'''
return self._cfgs
def add_config(self, cfg):
'''
Add the given configurations to the node.
:param str cfg: the loaded configuration, which contains this node.
'''
if cfg == '':
self._std_config = cfg
if cfg and cfg not in self._cfgs:
self._cfgs.append(cfg)
self.update_displayed_config()
def rem_config(self, cfg):
'''
Remove the given configurations from the node.
:param str cfg: the loaded configuration, which contains this node.
'''
result = False
if cfg == '':
self._std_config = None
result = True
if cfg in self._cfgs:
self._cfgs.remove(cfg)
result = True
if result and (self.has_configs() or self.is_running()):
self.update_displayed_config()
return result
def readd(self):
'''
Remove this node from current group and put it in new one, defined by namespace.
This is only executed if parent_item is valid and the name of this node has namespace.
'''
if self.parent_item is not None and self.with_namespace:
row = None
for i in reversed(range(self.parent_item.rowCount())):
item = self.parent_item.child(i)
if (type(item) == NodeItem) and item.name == self.name:
row = self.parent_item.takeRow(i)
break
group_item = self.parent_item.get_group_item(namespace(item.name), is_group=False)
group_item._add_row_sorted(row)
group_item.updateIcon()
def type(self):
return NodeItem.ITEM_TYPE
@classmethod
def newNodeRow(self, name, masteruri):
'''
Creates a new node row and returns it as a list with items. This list is
used for the visualization of node data as a table row.
:param str name: the node name
:param str masteruri: the URI or the ROS master assigned to this node.
:return: the list for the representation as a row list(node name, configuration)
:rtype: list(:class:`NodeItem`, :class:`QtGui.QStandardItem` <https://srinikom.github.io/pyside-docs/PySide/QtGui/QStandardItem.html>)
'''
items = []
item = NodeItem(NodeInfo(name, masteruri))
items.append(item)
cfgitem = CellItem(name, item)
items.append(cfgitem)
# uriitem = QStandardItem()
# items.append(uriitem)
return items
def has_configs(self):
return not (len(self._cfgs) == 0)
def is_running(self):
return not (self._node_info.pid is None and self._node_info.uri is None)
def has_std_cfg(self):
return self._std_config == ''
def count_launch_cfgs(self):
result = 0
for c in self.cfgs:
if not self.is_default_cfg(c):
result += 1
return result
def count_default_cfgs(self):
result = 0
for c in self.cfgs:
if self.is_default_cfg(c):
result += 1
return result
@classmethod
def has_launch_cfgs(cls, cfgs):
for c in cfgs:
if not cls.is_default_cfg(c):
return True
return False
@classmethod
def has_default_cfgs(cls, cfgs):
for c in cfgs:
if cls.is_default_cfg(c):
return True
return False
@classmethod
def is_default_cfg(cls, cfg):
return isinstance(cfg, tuple)
def __eq__(self, item):
'''
Compares the name of the node.
'''
if isstring(item):
return self.name == item
elif item is not None and type(item) == NodeItem:
return self.name == item.name
return False
def __gt__(self, item):
'''
Compares the name of the node.
'''
if isstring(item):
return self.name > item
elif item is not None and type(item) == NodeItem:
return self.name > item.name
return False
# ###############################################################################
# ############# NodeTreeModel ##############
# ###############################################################################
class NodeTreeModel(QStandardItemModel):
'''
The model to show the nodes running in a ROS system or loaded by a launch
configuration.
'''
header = [('Name', 450),
('Info', -1)]
# ('URI', -1)]
hostInserted = Signal(HostItem)
''':ivar HostItem hostInserted: the Qt signal, which is emitted, if a new host was inserted. Parameter: :class:`QtCore.QModelIndex` <https://srinikom.github.io/pyside-docs/PySide/QtCore/QModelIndex.html> of the inserted host item'''
def __init__(self, host_address, masteruri, parent=None):
'''
Initialize the model.
'''
super(NodeTreeModel, self).__init__(parent)
self.setColumnCount(len(NodeTreeModel.header))
self.setHorizontalHeaderLabels([label for label, _ in NodeTreeModel.header])
self._local_host_address = host_address
self._local_masteruri = masteruri
self._std_capabilities = {'/': {'SYSTEM': {'images': [],
'nodes': ['/rosout',
'/master_discovery',
'/zeroconf',
'/master_sync',
'/node_manager',
'/node_manager_daemon',
'/dynamic_reconfigure/*'],
'type': '',
'description': 'This group contains the system management nodes.'}}}
# create a handler to request the parameter
self.parameterHandler = ParameterHandler()
# self.parameterHandler.parameter_list_signal.connect(self._on_param_list)
self.parameterHandler.parameter_values_signal.connect(self._on_param_values)
# self.parameterHandler.delivery_result_signal.connect(self._on_delivered_values)
@property
def local_addr(self):
return self._local_host_address
def flags(self, index):
if not index.isValid():
return Qt.NoItemFlags
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
def _set_std_capabilities(self, host_item):
if host_item is not None:
cap = self._std_capabilities
mastername = roslib.names.SEP.join(['', host_item.mastername, '*', 'default_cfg'])
if mastername not in cap['/']['SYSTEM']['nodes']:
cap['/']['SYSTEM']['nodes'].append(mastername)
host_item.add_capabilities('', cap, host_item.masteruri)
return cap
return dict(self._std_capabilities)
def get_hostitem(self, masteruri, address):
'''
Searches for the host item in the model. If no item is found a new one will
created and inserted in sorted order.
:param str masteruri: ROS master URI
:param str address: the address of the host
:return: the item associated with the given master
:rtype: :class:`HostItem`
'''
if masteruri is None:
return None
master_entry = nm.nameres().get_master(masteruri, address)
host = (masteruri, address)
local = (self.local_addr in [address] + nm.nameres().resolve_cached(address) and
nmdurl.equal_uri(self._local_masteruri, masteruri))
# find the host item by address
root = self.invisibleRootItem()
for i in range(root.rowCount()):
host_item = root.child(i)
if host_item == master_entry:
host_item.local = local
return host_item
elif host_item > host:
items = []
hostItem = HostItem(masteruri, address, local, master_entry)
items.append(hostItem)
cfgitem = CellItem(masteruri, hostItem)
items.append(cfgitem)
self.insertRow(i, items)
self.hostInserted.emit(hostItem)
self._set_std_capabilities(hostItem)
return hostItem
items = []
hostItem = HostItem(masteruri, address, local, master_entry)
items.append(hostItem)
cfgitem = CellItem(masteruri, hostItem)
items.append(cfgitem)
self.appendRow(items)
self.hostInserted.emit(hostItem)
self._set_std_capabilities(hostItem)
return hostItem
def update_model_data(self, nodes, info_masteruri):
'''
Updates the model data.
:param nodes: a dictionary with name and info objects of the nodes.
:type nodes: dict(str: :class:`NodeInfo`)
'''
# separate into different hosts
hosts = dict()
muris = []
addresses = []
updated_nodes = []
local_info = nmdurl.equal_uri(self._local_masteruri, info_masteruri)
for i in reversed(range(self.invisibleRootItem().rowCount())):
host = self.invisibleRootItem().child(i)
host.reset_remote_launched_nodes()
for (name, node) in nodes.items():
addr = get_hostname(node.uri if node.uri is not None else node.masteruri)
addresses.append(addr)
muris.append(node.masteruri)
host = self.get_hostitem(node.masteruri, addr)
if host not in hosts:
hosts[host] = dict()
hosts[host][name] = node
# update nodes for each host
for (host_item, nodes_filtered) in hosts.items():
# rename the host item if needed
if host_item is not None:
updated_nodes.extend(host_item.update_running_state(nodes_filtered, local_info))
# request for all nodes in host the parameter capability_group
self._requestCapabilityGroupParameter(host_item)
# update nodes of the hosts, which are not more exists
for i in reversed(range(self.invisibleRootItem().rowCount())):
host = self.invisibleRootItem().child(i)
# remove hosts if they are not updated
if host.masteruri not in muris:
host.update_running_state({}, local_info)
# remove hosts which are connected to local master using ROS_MASTER_URI
if (not host.local and host.host not in addresses):
host.update_running_state({}, local_info)
self._remove_empty_hosts()
return updated_nodes
# update the duplicate state
# self.set_duplicate_nodes(self.get_nodes_running())
def _requestCapabilityGroupParameter(self, host_item):
if host_item is not None:
items = host_item.get_node_items()
params = [roslib.names.ns_join(item.name, 'capability_group') for item in items if not item.has_configs() and item.is_running() and not host_item.is_in_cap_group(item.name, '', '/', 'SYSTEM')]
if params:
self.parameterHandler.requestParameterValues(host_item.masteruri, params)
def _on_param_values(self, masteruri, code, msg, params):
'''
Updates the capability groups of nodes from ROS parameter server.
:param str masteruri: The URI of the ROS parameter server
:param int code: The return code of the request. If not 1, the message is set and the list can be ignored.
:param str msg: The message of the result.
:param params: The dictionary of parameter names and request result as tuple(code, statusMessage, parameterValue)
:type params: dict(str : (int, str, str)))
'''
host = get_hostname(masteruri)
hostItem = self.get_hostitem(masteruri, host)
changed = False
if hostItem is not None and code == 1:
capabilities = self._set_std_capabilities(hostItem)
available_ns = set(['/'])
available_groups = set(['SYSTEM'])
# assumption: all parameter are 'capability_group' parameter
for p, (code_n, _, val) in params.items(): # _:=msg_n
nodename = roslib.names.namespace(p).rstrip(roslib.names.SEP)
ns = roslib.names.namespace(nodename).rstrip(roslib.names.SEP)
if not ns:
ns = roslib.names.SEP
available_ns.add(ns)
if code_n == 1:
# add group
if val:
available_groups.add(val)
if ns not in capabilities:
capabilities[ns] = dict()
if val not in capabilities[ns]:
capabilities[ns][val] = {'images': [], 'nodes': [], 'type': '', 'description': 'This group is created from `capability_group` parameter of the node defined in ROS parameter server.'}
if nodename not in capabilities[ns][val]['nodes']:
capabilities[ns][val]['nodes'].append(nodename)
changed = True
else:
try:
for group, _ in list(capabilities[ns].items()):
try:
# remove the config from item, if parameter was not foun on the ROS parameter server
groupItem = hostItem.get_group_item(roslib.names.ns_join(ns, group), nocreate=True)
if groupItem is not None:
nodeItems = groupItem.get_node_items_by_name(nodename, True)
for item in nodeItems:
item.rem_config('')
capabilities[ns][group]['nodes'].remove(nodename)
# remove the group, if empty
if not capabilities[ns][group]['nodes']:
del capabilities[ns][group]
if not capabilities[ns]:
del capabilities[ns]
groupItem.update_displayed_config()
changed = True
except Exception:
pass
except Exception:
pass
# clearup namespaces to remove empty groups
for ns in list(capabilities.keys()):
if ns and ns not in available_ns:
del capabilities[ns]
changed = True
else:
for group in list(capabilities[ns].keys()):
if group and group not in available_groups:
del capabilities[ns][group]
changed = True
# update the capabilities and the view
if changed:
if capabilities:
hostItem.add_capabilities('', capabilities, hostItem.masteruri)
hostItem.clearup()
else:
rospy.logwarn("Error on retrieve \'capability group\' parameter from %s: %s", utf8(masteruri), msg)
def update_system_diagnostics(self, masteruri, diagnostics):
host = get_hostname(masteruri)
host_item = self.get_hostitem(masteruri, host)
if host_item is not None:
host_item.update_system_diagnostics(diagnostics)
def sysmon_set_state(self, masteruri, state):
host = get_hostname(masteruri)
host_item = self.get_hostitem(masteruri, host)
if host_item is not None:
host_item.sysmon_state = state
def set_std_capablilities(self, capabilities):
'''
Sets the default capabilities description, which is assigned to each new
host.
:param capabilities: the structure for capabilities
:type capabilities: {namespace: {group: {'type': str, 'description': str, 'nodes': [str]}}}
'''
self._std_capabilities = capabilities
def add_capabilities(self, masteruri, host_address, cfg, capabilities):
'''
Adds groups to the model
:param str masteruri: ROS master URI
:param str host_address: the address the host
:param str cfg: the configuration name (launch file name)
:param capabilities: the structure for capabilities
:type capabilities: {namespace: {group: {'type': str, 'description': str, 'nodes': [str]}}}
'''
hostItem = self.get_hostitem(masteruri, host_address)
if hostItem is not None:
# add new capabilities
hostItem.add_capabilities(cfg, capabilities, hostItem.masteruri)
self._remove_empty_hosts()
def append_config(self, masteruri, host_address, nodes):
'''
Adds nodes to the model. If the node is already in the model, only his
configuration list will be extended.
:param str masteruri: ROS master URI
:param str host_address: the address the host
:param nodes: a dictionary with node names and their configurations
:type nodes: {str: str}
'''
hostItem = self.get_hostitem(masteruri, host_address)
if hostItem is not None:
groups = {}
for (name, cfg) in nodes.items():
items = hostItem.get_node_items_by_name(name)
for item in items:
if item.parent_item is not None:
groups[item.parent_item.get_namespace()] = item.parent_item
item.add_config(cfg)
item.readd()
if not items:
# create the new node
node_info = NodeInfo(name, masteruri)
hostItem.add_node(node_info, cfg)
# get the group of the added node to be able to update the group view, if needed
items = hostItem.get_node_items_by_name(name)
for item in items:
if item.parent_item is not None:
groups[item.parent_item.get_namespace()] = item.parent_item
# update the changed groups
for _name, g in groups.items():
g.update_displayed_config()
hostItem.clearup()
self._remove_empty_hosts()
# update the duplicate state
# self.set_duplicate_nodes(self.get_nodes_running())
def remove_config(self, cfg):
'''
Removes nodes from the model. If node is running or containing in other
launch or default configurations , only his configuration list will be
reduced.
:param str cfg: the name of the confugration to close
'''
for i in reversed(range(self.invisibleRootItem().rowCount())):
host = self.invisibleRootItem().child(i)
items = host.get_node_items()
groups = {}
for item in items:
removed = item.rem_config(cfg)
if removed and item.parent_item is not None:
groups[item.parent_item.get_namespace()] = item.parent_item
for _name, g in groups.items():
g.update_displayed_config()
host.rem_capablities(cfg)
host.clearup()
if host.rowCount() == 0:
self.invisibleRootItem().removeRow(i)
elif groups:
# request for all nodes in host the parameter capability_group
self._requestCapabilityGroupParameter(host)
def _remove_empty_hosts(self):
# remove empty hosts
for i in reversed(range(self.invisibleRootItem().rowCount())):
host = self.invisibleRootItem().child(i)
if host.rowCount() == 0: # or not host.remote_launched_nodes_updated():
self.invisibleRootItem().removeRow(i)
def get_tree_node(self, node_name, masteruri):
'''
Since the same node can be included by different groups, this method searches
for all nodes with given name and returns these items.
:param str node_name: The name of the node
:return: The list with node items.
:rtype: list(:class:`QtGui.QStandardItem` <https://srinikom.github.io/pyside-docs/PySide/QtGui/QStandardItem.html>)
'''
for i in reversed(range(self.invisibleRootItem().rowCount())):
host = self.invisibleRootItem().child(i)
if host is not None and (masteruri is None or nmdurl.equal_uri(host.masteruri, masteruri)):
res = host.get_node_items_by_name(node_name)
if res:
return res
return []
def clear_multiple_screens(self, masteruri):
for i in reversed(range(self.invisibleRootItem().rowCount())):
host = self.invisibleRootItem().child(i)
if host is not None and (masteruri is None or nmdurl.equal_uri(host.masteruri, masteruri)):
host.clear_multiple_screens()
def get_node_items_by_name(self, nodes, only_local=True):
'''
Returns a list with matched nodes.
:rtype: list(str)
'''
result = list()
# # determine all running nodes
for i in reversed(range(self.invisibleRootItem().rowCount())):
host = self.invisibleRootItem().child(i)
if (only_local and host is not None and host.local) or not only_local:
for node in nodes:
result[len(nodes):] = host.get_node_items_by_name(node)
return result
def get_nodes_running(self):
'''
Returns a list with all known running nodes.
:rtype: list(str)
'''
running_nodes = list()
# # determine all running nodes
for i in reversed(range(self.invisibleRootItem().rowCount())):
host = self.invisibleRootItem().child(i)
if host is not None: # should not occur
running_nodes[len(running_nodes):] = host.get_nodes_running()
return running_nodes
def set_duplicate_nodes(self, running_nodes, is_sync_running=False):
'''
If there are a synchronization running, you have to avoid to running the
node with the same name on different hosts. This method helps to find the
nodes with same name running on other hosts and loaded by a configuration.
The nodes loaded by a configuration will be inform about a currently running
nodes, so a warning can be displayed!
:param running_nodes: The dictionary with names of running nodes and their masteruri
:type running_nodes: {str: str}
:param bool is_sync_running: If the master_sync is running, the nodes are marked
as ghost nodes. So they are handled as running nodes, but has not run
informations. This nodes are running on remote host, but are not
syncronized because of filter or errors.
'''
for i in reversed(range(self.invisibleRootItem().rowCount())):
host = self.invisibleRootItem().child(i)
if host is not None: # should not occur
host.set_duplicate_nodes(running_nodes, is_sync_running)
def update_host_description(self, masteruri, host, descr_type, descr_name, descr):
'''
Updates the description of a host.
:param str masteruri: ROS master URI of the host to update
:param str host: host to update
:param str descr_type: the type of the robot
:param str descr_name: the name of the robot
:param str descr: the description of the robot as a reStructuredText<http://docutils.sourceforge.net/rst.html>
'''
root = self.invisibleRootItem()
for i in range(root.rowCount()):
if root.child(i) == (utf8(masteruri), utf8(host)):
h = root.child(i)
h.update_description(descr_type, descr_name, descr)
return h.update_tooltip()
# ###############################################################################
# ############# NodeInfoIconsDelegate ##############
# ###############################################################################
class NodeInfoIconsDelegate(QItemDelegate):
'''
Decorates the info column.
'''
def __init__(self, parent=None, *args):
QItemDelegate.__init__(self, parent, *args)
self._idx_icon = 1
self._hspacing = 2
self._vspacing = 2
self._icon_size = 0
self.IMAGES = {}
def _scale_icons(self, icon_size):
self._icon_size = icon_size
params = (self._icon_size, self._icon_size, Qt.IgnoreAspectRatio, Qt.SmoothTransformation)
self.IMAGES = {'launchfile': nm.settings().image('crystal_clear_launch_file.png').scaled(*params),
'defaultcfg': nm.settings().image('default_cfg.png').scaled(*params),
'nodelet': nm.settings().image('crystal_clear_nodelet.png').scaled(*params),
'nodelet_mngr': nm.settings().image('crystal_clear_nodelet_mngr.png').scaled(*params),
'warning': nm.settings().image('crystal_clear_warning.png').scaled(*params),
'noscreen': nm.settings().image('crystal_clear_no_io.png').scaled(*params),
'misc': nm.settings().image('crystal_clear_miscellaneous.png').scaled(*params),
'group': nm.settings().image('crystal_clear_group.png').scaled(*params),
'mscreens': nm.settings().image('crystal_clear_mscreens.png').scaled(*params),
'sysmon': nm.settings().image('crystal_clear_get_parameter.png').scaled(*params),
'clock_warn': nm.settings().image('crystal_clear_xclock_fail.png').scaled(*params),
'cpu_warn': nm.settings().image('hight_load.png').scaled(*params),
'cpu_temp_warn': nm.settings().image('temperatur_warn.png').scaled(*params),
'hdd_warn': nm.settings().image('crystal_clear_hdd_warn.png').scaled(*params),
'net_warn': nm.settings().image('sekkyumu_net_warn.png').scaled(*params),
'mem_warn': nm.settings().image('mem_warn.png').scaled(*params)
}
def paint(self, painter, option, index):
if option.rect.height() - self._vspacing * 2 != self._icon_size:
self._icon_size = option.rect.height() - self._vspacing * 2
self._scale_icons(self._icon_size)
painter.save()
self._idx_icon = 1
# we assume the model has an filter proxy installed
model_index = index.model().mapToSource(index)
item = model_index.model().itemFromIndex(model_index)
if isinstance(item, CellItem):
if isinstance(item.item, NodeItem):
tooltip = ''
if item.item.has_multiple_screens:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['mscreens'])
tooltip += 'multiple screens'
if not item.item.has_screen:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['noscreen'])
tooltip += 'no screen'
lcfgs = item.item.count_launch_cfgs()
if lcfgs > 0:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['launchfile'])
if lcfgs > 1:
painter.drawText(rect, Qt.AlignCenter, str(lcfgs))
if item.item.nodelets:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['nodelet_mngr'])
if item.item.nodelet_mngr:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['nodelet'])
if item.item.nodelets:
tooltip += "%sThis is a nodelet manager" % '\n' if tooltip else ''
elif item.item.nodelet_mngr:
tooltip += "%sThis is a nodelet for %s" % ('\n' if tooltip else '', item.item.nodelet_mngr)
item.setToolTip(tooltip)
elif isinstance(item.item, HostItem):
tooltip = ''
if item.item.sysmon_state:
tooltip += '<dt><font color="orange">%s</font></dt>' % ("active pull for system diagnostic is enabled")
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['sysmon'])
diagnistics = item.item.diagnostics
for diag in diagnistics:
if diag.level > 0:
tooltip += '\n<dt><font color="red">%s</font></dt>' % (diag.message.replace('>', '>').replace('<', '<'))
if 'Network Load' in diag.name:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['net_warn'])
if 'CPU Load' in diag.name:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['cpu_warn'])
if 'CPU Temperature' in diag.name:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['cpu_temp_warn'])
if 'Memory Usage' in diag.name:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['mem_warn'])
if 'HDD Usage' in diag.name:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['hdd_warn'])
item.setToolTip(tooltip)
elif isinstance(item.item, GroupItem):
lcfgs = len(item.item.get_configs())
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['group'])
count_nodes = item.item.count_nodes()
if count_nodes > 1:
painter.drawText(rect, Qt.AlignCenter, str(count_nodes))
if lcfgs > 0:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['launchfile'])
if lcfgs > 1:
painter.drawText(rect, Qt.AlignCenter, str(lcfgs))
mscrens = item.item.get_count_mscreens()
if mscrens > 0:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['mscreens'])
# if mscrens > 1:
# painter.drawText(rect, Qt.AlignCenter, str(mscrens))
painter.restore()
def calcDecorationRect(self, main_rect, image=True):
rect = QRect()
rect.setX(main_rect.x() + self._idx_icon + self._hspacing)
rect.setY(main_rect.y() + self._vspacing)
rect.setWidth(self._icon_size if image else main_rect.width() - self._idx_icon)
rect.setHeight(self._icon_size)
self._idx_icon += self._icon_size + self._hspacing
return rect
| 43.723737 | 236 | 0.566588 |
982d148b1d7493298de788ebd9ee1db60b5b1494 | 4,367 | py | Python | ttpp/metrics.py | shchur/triangular-tpp | 19df15fb6f0701e0a2cb9f6afc8a5dfe87a80f40 | [
"MIT"
] | 19 | 2020-11-10T11:31:05.000Z | 2022-03-19T16:15:02.000Z | ttpp/metrics.py | shchur/triangular-tpp | 19df15fb6f0701e0a2cb9f6afc8a5dfe87a80f40 | [
"MIT"
] | null | null | null | ttpp/metrics.py | shchur/triangular-tpp | 19df15fb6f0701e0a2cb9f6afc8a5dfe87a80f40 | [
"MIT"
] | 5 | 2021-03-03T10:24:50.000Z | 2021-12-13T09:20:06.000Z | import numpy as np
from scipy.stats import wasserstein_distance
from typing import List
def counting_distance(x: np.ndarray, Y: np.ndarray, t_max: float):
"""Computes the distance between the counting process of x and y.
This computation is batched and expects a 1D array for x and a 2D array for Y.
From: https://arxiv.org/abs/1705.08051
Args:
x (np.ndarray): Event times for x
Y (np.ndarray): List of sequences
t_max (float): max time
Returns:
np.ndarray: distance
"""
x = x[None].repeat(Y.shape[0], 0)
x_len = (x < t_max).sum(-1)
y_len = (Y < t_max).sum(-1)
to_swap = x_len > y_len
x[to_swap], Y[to_swap] = x[to_swap], Y[to_swap]
mask_x = x < t_max
mask_y = Y < t_max
result = (np.abs(x - Y) * mask_x).sum(-1)
result += ((t_max - Y) * (~mask_x & mask_y)).sum(-1)
return result
def gaussian_kernel(x: np.ndarray, sigma2: float = 1):
return np.exp(-x/(2*sigma2))
def match_shapes(X: List, Y: List, t_max: float):
"""Match shapes between two lists of np.ndarray. Returns two np.ndarray with the same length in the second dim.
Args:
X (List): List of sequences
Y (List): List of sequences
t_max (float): max time
"""
max_x = max([(x < t_max).sum() for x in X])
max_y = max([(y < t_max).sum() for y in Y])
max_size = max(max_x, max_y)
new_X = np.ones((len(X), max_size)) * t_max
new_Y = np.ones((len(Y), max_size)) * t_max
for i, x in enumerate(X):
x = x[x < t_max]
new_X[i, :len(x)] = x
for i, y in enumerate(Y):
y = y[y < t_max]
new_Y[i, :len(y)] = y
return new_X, new_Y
def MMD(X: List, Y: List, t_max: float, sample_size: int = None, sigma: float = None):
"""Computes the maximum mean discrepency between the samples X and samples Y.
MMD is defined as E[k(x, x)] - 2*E[k(x, y)] + E[k(y, y)]. We use a Gaussian kernel
with the counting distance. k(x, y) = exp(-d(x, y)/(2*sigma2)) where d is the counting distance
and sigma is either given or estimated as the median distance between all pairs.
Args:
X (List): List of sequences
Y (List): List of sequences
t_max (float): max time
sample_size (int, optional): If given MMD is only computed for subsets of X and Y.
This improves performance at the cost of performance. Defaults to None.
sigma (float, optional): Sigma for the Gaussian kernel. If not given it is estimated. Defaults to None.
Returns:
float: MMD(X, Y)
"""
# Do some shape matching
X, Y = match_shapes(X, Y, t_max)
# Sample from both distributions
if sample_size is not None:
X = [X[i] for i in np.random.choice(len(X), sample_size)]
Y = [Y[i] for i in np.random.choice(len(Y), sample_size)]
# Normalize the time
X = X/t_max
Y = Y/t_max
t_max = 1
x_x_d = []
for i, x1 in enumerate(X):
x_x_d.append(counting_distance(x1, X, t_max=t_max))
x_x_d = np.concatenate(x_x_d)
x_y_d = []
for x in X:
x_y_d.append(counting_distance(x, Y, t_max=t_max))
x_y_d = np.concatenate(x_y_d)
y_y_d = []
for i, y1 in enumerate(Y):
y_y_d.append(counting_distance(y1, Y, t_max=t_max))
y_y_d = np.concatenate(y_y_d)
if sigma is None:
sigma = np.median(np.concatenate([x_x_d, x_y_d, y_y_d]))
sigma2 = sigma**2
E_x_x = np.mean(gaussian_kernel(x_x_d, sigma2))
E_x_y = np.mean(gaussian_kernel(x_y_d, sigma2))
E_y_y = np.mean(gaussian_kernel(y_y_d, sigma2))
return np.sqrt(E_x_x - 2*E_x_y + E_y_y), sigma
def lengths_distribution_wasserstein_distance(X: List, Y: List, t_max: float, mean_number_items: float):
"""Returns the Wasserstein between the distribution of sequence lengths between X and Y.
Args:
X (List): List of sequences
Y (List): List of sequences
t_max (float): max time
mean_number_items (float): Mean number of events from the dataset. This is used for normalization
Returns:
float: Wasserstein distance
"""
X_lengths = np.array([(s < t_max).sum().item() for s in X])
Y_lengths = np.array([(s < t_max).sum().item() for s in Y])
return wasserstein_distance(X_lengths/mean_number_items, Y_lengths/mean_number_items)
| 34.65873 | 115 | 0.622853 |
1dcbc23799952a4305c271253b09474d23fee4b0 | 3,084 | py | Python | huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/show_network_ip_availabilities_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/show_network_ip_availabilities_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/show_network_ip_availabilities_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowNetworkIpAvailabilitiesRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'network_id': 'str'
}
attribute_map = {
'network_id': 'network_id'
}
def __init__(self, network_id=None):
"""ShowNetworkIpAvailabilitiesRequest - a model defined in huaweicloud sdk"""
self._network_id = None
self.discriminator = None
self.network_id = network_id
@property
def network_id(self):
"""Gets the network_id of this ShowNetworkIpAvailabilitiesRequest.
网络ID
:return: The network_id of this ShowNetworkIpAvailabilitiesRequest.
:rtype: str
"""
return self._network_id
@network_id.setter
def network_id(self, network_id):
"""Sets the network_id of this ShowNetworkIpAvailabilitiesRequest.
网络ID
:param network_id: The network_id of this ShowNetworkIpAvailabilitiesRequest.
:type: str
"""
self._network_id = network_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowNetworkIpAvailabilitiesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.052632 | 85 | 0.565175 |
49ad8b4f4a5a01ef5ee0b715d24f1aa8e9a5a467 | 10,394 | py | Python | pywindowframes/elements.py | afmelin/pywindowframes | eab943940aff9924b013a07999a2abeee4a0b520 | [
"MIT"
] | 1 | 2021-02-16T18:25:15.000Z | 2021-02-16T18:25:15.000Z | pywindowframes/elements.py | afmelin/pywindowframes | eab943940aff9924b013a07999a2abeee4a0b520 | [
"MIT"
] | 5 | 2021-02-16T18:21:44.000Z | 2021-02-16T18:28:45.000Z | pywindowframes/elements.py | afmelin/pywindowframes | eab943940aff9924b013a07999a2abeee4a0b520 | [
"MIT"
] | null | null | null | import pygame as pg
from time import time
class BaseElement:
def __init__(self, name, window, pos=None, size=None, border=True, grid_pos=None, grid_size=None):
"""
Using grid size and grid rects is a lot easier than using pos and size directly. Just set a window size
to a certain grid_size and you will know exactly how much the window will fit, as long as all elements
are also grid sized
NOTE: Recommended use is to use grid_pos and grid_size!
:param name: this is important, it is used as an identifier
:param window: which window it belongs to
:param pos: position in pixels. If set to None, grid_pos must be used instead
:param size: size in pixels. If set to None, grid_size must be used instead
:param border: bool, border around element
:param grid_pos: x, y values corresponding to grid rect positions.
:param grid_size: w, h values corresponding to grid rect sizes (w = 1 == grid_rect_size[0])
"""
self.name = name # this is used as identifier when posting events!
# grid vars
self.grid_size = grid_size
self.grid_pos = grid_pos
self.window = window
self.window_size = self.window.size
self.size = self.internal_size(size)
self.rect = pg.Rect((0, 0), self.size)
self.pos = self.internal_pos(pos)
self.pos_string = None
# surface (transparent per default)
self.surface = pg.Surface(self.size)
self.surface.set_colorkey((1, 1, 1))
self.surface.fill((1, 1, 1))
# visual
self.border = border
self.border_color = (0, 0, 0)
self.border_mouse_over_color = (255, 0, 0)
# timings
self.was_clicked_time = 0
# click flags
self.mouse_over = False
self.clicked = False
self.dragged = False # why not? Might be useful
# other flags
self.has_changed = True
# owner window list
self.window.elements.append(self)
def internal_pos(self, pos):
"""
This is the position the element will attempt to assume.
Coordinates are in window coordinates (excluding top border).
:param pos: tuple/list of int/float (will be converted to int)
:return: (int, int)
"""
x = 0
y = 0
if not pos:
assert isinstance(self.grid_pos, tuple), "No element.pos or element.grid_pos found!" \
" Use one of them when instancing element"
x = self.grid_pos[0] * self.window.grid_rect_size[0]
y = self.grid_pos[1] * self.window.grid_rect_size[1]
if isinstance(pos, (tuple, list)):
assert isinstance(pos[0], (int, float)), "element.pos tuple/list must be (int or float, int or float)"
assert isinstance(pos[1], (int, float)), "element.pos tuple/list must be (int or float, int or float)"
x = pos[0]
y = pos[1]
# print(f"{self.name}, internal pos returned {int(x), int(y)}")
return int(x), int(y)
def internal_size(self, size):
"""
This is the position the element will attempt to assume.
Coordinates are in window coordinates (excluding top border).
:param pos: tuple/list of int/float (will be converted to int)
:return: (int, int)
"""
x = 0
y = 0
if not size:
assert isinstance(self.grid_size, tuple), "No element.size or element.grid_size found" \
"Use one of them when instancing element"
x = self.grid_size[0] * self.window.grid_rect_size[0]
y = self.grid_size[1] * self.window.grid_rect_size[1]
if isinstance(size, (tuple, list)):
assert isinstance(size[0], (int, float)), "Position tuple/list must be (int or float, int or float)"
assert isinstance(size[1], (int, float)), "Position tuple/list must be (int or float, int or float)"
x = size[0]
y = size[1]
return int(x), int(y)
def window_has_changed(self, window):
"""
If the window the element is associated with is changed/reinstanced, this method
must be called
"""
self.window = window
self.window_size = self.window.size
print("Window instance changed, updating element", self.__class__)
def set_mouse_over(self):
"""
Call this for proper behavior, don't change attributes directly
"""
self.mouse_over = True
self.has_changed = True
#@debdec
def draw(self):
# print("Trying to draw element", self.name)
# update rect
self.rect = pg.Rect(self.pos, self.size)
# mouse over color
color = self.border_color
if self.mouse_over:
color = self.border_mouse_over_color
# print(f"{self.name} is mouse over and changing color to {self.border_mouse_over_color}")
# draw border if True
if self.border:
self.surface = pg.Surface(self.size)
self.surface.set_colorkey((1, 1, 1))
self.surface.fill((1, 1, 1))
pg.draw.rect(self.surface, color, (0, 0, self.size[0], self.size[1]), border_radius=10, width=1)
def post_event(self, event):
self.window.add_window_event(event)
#@debdec
def remake_border(self, radius=0):
color = self.border_color
if self.mouse_over:
color = self.border_mouse_over_color
pg.draw.rect(self.surface, color, ((0, 0), self.size), width=1, border_radius=radius)
# @debdec
def on_click(self):
self.clicked = True
self.was_clicked_time = time()
self.custom_on_click()
def custom_on_click(self):
# override if custom behavior is wanted
pass
# @debdec
def reset_flags(self):
"""
Call this first of all methods when iterating through elements
"""
if self.clicked:
self.clicked = False
self.has_changed = True
if self.mouse_over:
self.mouse_over = False
self.has_changed = True
self.dragged = False
#@debdec
def update(self):
self.rect = pg.Rect(self.pos, self.size)
if self.has_changed:
# print(f"{self.name} has changed = True")
self.draw()
self.has_changed = False
self.custom_update()
def custom_update(self):
# override for custom behavior
pass
class Button(BaseElement):
def __init__(self, name, window, pos, size, text, border=True, grid_size=None, grid_pos=None):
super().__init__(name, window, pos, size, border, grid_size, grid_pos)
# button text
pg.font.init()
self.text = text
self.text_color = (0, 0, 0)
self.text_font = pg.font.Font(None, 32)
self.text_surface = self.text_font.render(self.text, False, self.text_color, None)
self.text_surface_pos = (0, 0)
self.click_text_color = (255, 0, 0)
self.text_surface_has_changed = True
self.adjust_size_to_text()
# override
# @debdec
def custom_on_click(self):
print(f"{self.name} was clicked")
event = "-".join([self.name, "was_clicked"])
self.post_event(event)
def adjust_size_to_text(self):
if self.size[0] < self.text_surface.get_size()[0]:
original_y = self.size[1]
self.size = self.text_surface.get_size()[0] + 5, original_y
#@debdec
def click_text_color_change(self):
color = self.text_color
if self.clicked:
color = self.click_text_color
self.text_font = pg.font.Font(None, 32)
self.text_surface = self.text_font.render(self.text, False, color, None)
self.text_surface_pos = (0, 0)
self.text_surface_has_changed = True
def center_text(self):
self.text_surface_pos = self.size[0] / 2 - self.text_surface.get_size()[0] / 2,\
self.size[1] / 2 - self.text_surface.get_size()[1] / 2
# @debdec
def custom_update(self):
self.click_text_color_change()
if self.text_surface_has_changed:
self.center_text()
self.text_surface_has_changed = False
self.surface.blit(self.text_surface, self.text_surface_pos)
class DynamicSurface(BaseElement):
"""
Updates it's surface with an external surface on a specified interval
"""
def __init__(self, name, window, pos=None, size=None, border=True,
surface_to_blit_function=None,
surface_update_interval=0,
grid_pos=None,
grid_size=None):
super().__init__(name, window, pos, size, border, grid_pos, grid_size)
# see update_surface for instructions
self.surface_to_blit_position = 0, 0
self.surface_to_blit_function = surface_to_blit_function
self.surface_update_interval = surface_update_interval
self.last_update = 0
def custom_update(self):
self.check_interval()
def check_interval(self):
if time() > self.last_update + self.surface_update_interval:
self.update_surface()
# override
def draw(self):
...
def resize_to_surface(self):
...
def update_surface(self):
"""
When instancing this class, provide a reference (surface_to_blit_function)
to at function complying with the following criteria:
* No parameters
* Returns a pygame.Surface
This surface will be updated each frame or at the interval specified
"""
surface_to_blit = pg.Surface((0, 0))
if self.surface_to_blit_function:
surface_to_blit = self.surface_to_blit_function()
self.resize_to_surface()
self.surface.blit(surface_to_blit, (0, 0))
self.remake_border()
| 35.114865 | 115 | 0.585723 |
46f7731c0e5de3228f484d7ccd006c7137c2c7eb | 259 | py | Python | electrumsv/wallet_database/exceptions.py | electrumsv/electrumsv | a2d9027ccec338cadfca778888e6ef7f077b1651 | [
"MIT"
] | 136 | 2019-01-10T15:49:09.000Z | 2022-02-20T04:46:39.000Z | electrumsv/wallet_database/exceptions.py | electrumsv/electrumsv | a2d9027ccec338cadfca778888e6ef7f077b1651 | [
"MIT"
] | 790 | 2019-01-07T01:53:35.000Z | 2022-03-30T23:04:28.000Z | electrumsv/wallet_database/exceptions.py | electrumsv/electrumsv | a2d9027ccec338cadfca778888e6ef7f077b1651 | [
"MIT"
] | 65 | 2019-01-10T23:55:30.000Z | 2021-12-19T06:47:13.000Z | class DataPackingError(Exception):
pass
class DatabaseUpdateError(Exception):
pass
class KeyInstanceNotFoundError(Exception):
pass
class TransactionAlreadyExistsError(Exception):
pass
class TransactionRemovalError(Exception):
pass
| 13.631579 | 47 | 0.776062 |
bc190ef54fbf1ea7cb678a2ed718df8c2cdd8110 | 691 | py | Python | misctests/strength_conversion_test.py | anttikantee/wbc | cf8c25f01eb3aeb96a82e39568bd7df52dc21c60 | [
"0BSD"
] | 7 | 2019-02-27T11:48:09.000Z | 2022-02-05T06:02:34.000Z | misctests/strength_conversion_test.py | anttikantee/brewcalc | 1e40773012a59510717a3b0fa426ea3137dde9d1 | [
"0BSD"
] | null | null | null | misctests/strength_conversion_test.py | anttikantee/brewcalc | 1e40773012a59510717a3b0fa426ea3137dde9d1 | [
"0BSD"
] | null | null | null | #!/usr/bin/env python
from WBC.wbc import Recipe
from WBC.units import Strength
def testme(vals, fun, ifun):
rv = []
for x in vals:
v = fun(x)
iv = ifun(v)
diff = abs(x - iv)
rv.append((x, v, iv, diff))
return rv
def prt(v):
for x in v:
print x[0], x[1], x[2], x[3]
sv = sorted(v, key=lambda x: x[3], reverse=True)
print 'max diff at', sv[0][0], '(' + str(sv[0][3]) + ')'
if __name__ == '__main__':
r = [x / 1000.0 for x in range(1001, 1140)]
print 'SG to Plato'
v = testme(r, Strength.sg_to_plato, Strength.plato_to_sg)
prt(v)
print
r = [x / 10.0 for x in range(0, 300, 2)]
print 'Plato to SG'
v = testme(r, Strength.plato_to_sg, Strength.sg_to_plato)
prt(v)
| 20.939394 | 58 | 0.612156 |
924f4c1a3f3831dd98e2aba2ba3508eb153815b7 | 8,133 | py | Python | setup_ts.py | Arunachalam96/nni | c80ed3e904e74db7126cecf5e24b9fc7c895c401 | [
"MIT"
] | null | null | null | setup_ts.py | Arunachalam96/nni | c80ed3e904e74db7126cecf5e24b9fc7c895c401 | [
"MIT"
] | null | null | null | setup_ts.py | Arunachalam96/nni | c80ed3e904e74db7126cecf5e24b9fc7c895c401 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Script for building TypeScript modules.
This script is called by `setup.py` and common users should avoid using this directly.
It compiles TypeScript source files in `ts` directory,
and copies (or links) JavaScript output as well as dependencies to `nni_node`.
You can set environment `GLOBAL_TOOLCHAIN=1` to use global node and yarn, if you know what you are doing.
"""
from io import BytesIO
import json
import os
from pathlib import Path
import shutil
import subprocess
import sys
import tarfile
from zipfile import ZipFile
node_version = 'v16.3.0'
yarn_version = 'v1.22.10'
def build(release):
"""
Compile TypeScript modules and copy or symlink to nni_node directory.
`release` is the version number without leading letter "v".
If `release` is None or empty, this is a development build and uses symlinks on Linux/macOS;
otherwise this is a release build and copies files instead.
On Windows it always copies files because creating symlink requires extra privilege.
"""
if release or not os.environ.get('GLOBAL_TOOLCHAIN'):
download_toolchain()
prepare_nni_node()
compile_ts()
if release or sys.platform == 'win32':
copy_nni_node(release)
else:
symlink_nni_node()
def clean(clean_all=False):
"""
Remove TypeScript-related intermediate files.
Python intermediate files are not touched here.
"""
shutil.rmtree('nni_node', ignore_errors=True)
for file_or_dir in generated_files:
path = Path(file_or_dir)
if path.is_symlink() or path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(path)
if clean_all:
shutil.rmtree('toolchain', ignore_errors=True)
if sys.platform == 'linux' or sys.platform == 'darwin':
node_executable = 'node'
node_spec = f'node-{node_version}-{sys.platform}-x64'
node_download_url = f'https://nodejs.org/dist/{node_version}/{node_spec}.tar.xz'
node_extractor = lambda data: tarfile.open(fileobj=BytesIO(data), mode='r:xz')
node_executable_in_tarball = 'bin/node'
yarn_executable = 'yarn'
yarn_download_url = f'https://github.com/yarnpkg/yarn/releases/download/{yarn_version}/yarn-{yarn_version}.tar.gz'
path_env_seperator = ':'
elif sys.platform == 'win32':
node_executable = 'node.exe'
node_spec = f'node-{node_version}-win-x64'
node_download_url = f'https://nodejs.org/dist/{node_version}/{node_spec}.zip'
node_extractor = lambda data: ZipFile(BytesIO(data))
node_executable_in_tarball = 'node.exe'
yarn_executable = 'yarn.cmd'
yarn_download_url = f'https://github.com/yarnpkg/yarn/releases/download/{yarn_version}/yarn-{yarn_version}.tar.gz'
path_env_seperator = ';'
else:
raise RuntimeError('Unsupported system')
def download_toolchain():
"""
Download and extract node and yarn.
"""
if Path('toolchain/node', node_executable_in_tarball).is_file():
return
Path('toolchain').mkdir(exist_ok=True)
import requests # place it here so setup.py can install it before importing
_print(f'Downloading node.js from {node_download_url}')
resp = requests.get(node_download_url)
resp.raise_for_status()
_print('Extracting node.js')
tarball = node_extractor(resp.content)
tarball.extractall('toolchain')
shutil.rmtree('toolchain/node', ignore_errors=True)
Path('toolchain', node_spec).rename('toolchain/node')
_print(f'Downloading yarn from {yarn_download_url}')
resp = requests.get(yarn_download_url)
resp.raise_for_status()
_print('Extracting yarn')
tarball = tarfile.open(fileobj=BytesIO(resp.content), mode='r:gz')
tarball.extractall('toolchain')
shutil.rmtree('toolchain/yarn', ignore_errors=True)
Path(f'toolchain/yarn-{yarn_version}').rename('toolchain/yarn')
def prepare_nni_node():
"""
Create clean nni_node diretory, then copy node runtime to it.
"""
shutil.rmtree('nni_node', ignore_errors=True)
Path('nni_node').mkdir()
Path('nni_node/__init__.py').write_text('"""NNI node.js modules."""\n')
node_src = Path('toolchain/node', node_executable_in_tarball)
node_dst = Path('nni_node', node_executable)
shutil.copy(node_src, node_dst)
def compile_ts():
"""
Use yarn to download dependencies and compile TypeScript code.
"""
_print('Building NNI manager')
_yarn('ts/nni_manager')
_yarn('ts/nni_manager', 'build')
# todo: I don't think these should be here
shutil.rmtree('ts/nni_manager/dist/config', ignore_errors=True)
shutil.copytree('ts/nni_manager/config', 'ts/nni_manager/dist/config')
_print('Building web UI')
_yarn('ts/webui')
_yarn('ts/webui', 'build')
_print('Building NAS UI')
_yarn('ts/nasui')
_yarn('ts/nasui', 'build')
def symlink_nni_node():
"""
Create symlinks to compiled JS files.
If you manually modify and compile TS source files you don't need to install again.
"""
_print('Creating symlinks')
for path in Path('ts/nni_manager/dist').iterdir():
_symlink(path, Path('nni_node', path.name))
_symlink('ts/nni_manager/package.json', 'nni_node/package.json')
_symlink('ts/nni_manager/node_modules', 'nni_node/node_modules')
_symlink('ts/webui/build', 'nni_node/static')
Path('nni_node/nasui').mkdir(exist_ok=True)
_symlink('ts/nasui/build', 'nni_node/nasui/build')
_symlink('ts/nasui/server.js', 'nni_node/nasui/server.js')
def copy_nni_node(version):
"""
Copy compiled JS files to nni_node.
This is meant for building release package, so you need to provide version string.
The version will written to `package.json` in nni_node directory,
while `package.json` in ts directory will be left unchanged.
"""
_print('Copying files')
# copytree(..., dirs_exist_ok=True) is not supported by Python 3.6
for path in Path('ts/nni_manager/dist').iterdir():
if path.is_file():
shutil.copyfile(path, Path('nni_node', path.name))
else:
shutil.copytree(path, Path('nni_node', path.name))
package_json = json.load(open('ts/nni_manager/package.json'))
if version:
while len(version.split('.')) < 3: # node.js semver requires at least three parts
version = version + '.0'
package_json['version'] = version
json.dump(package_json, open('nni_node/package.json', 'w'), indent=2)
# reinstall without development dependencies
_yarn('ts/nni_manager', '--prod', '--cwd', str(Path('nni_node').resolve()))
shutil.copytree('ts/webui/build', 'nni_node/static')
Path('nni_node/nasui').mkdir(exist_ok=True)
shutil.copytree('ts/nasui/build', 'nni_node/nasui/build')
shutil.copyfile('ts/nasui/server.js', 'nni_node/nasui/server.js')
_yarn_env = dict(os.environ)
# `Path('nni_node').resolve()` does not work on Windows if the directory not exists
_yarn_env['PATH'] = str(Path().resolve() / 'nni_node') + path_env_seperator + os.environ['PATH']
_yarn_path = Path().resolve() / 'toolchain/yarn/bin' / yarn_executable
def _yarn(path, *args):
if os.environ.get('GLOBAL_TOOLCHAIN'):
subprocess.run(['yarn', *args], cwd=path, check=True)
else:
subprocess.run([str(_yarn_path), *args], cwd=path, check=True, env=_yarn_env)
def _symlink(target_file, link_location):
target = Path(target_file)
link = Path(link_location)
relative = os.path.relpath(target, link.parent)
link.symlink_to(relative, target.is_dir())
def _print(*args):
if sys.platform == 'win32':
print(*args, flush=True)
else:
print('\033[1;36m#', *args, '\033[0m', flush=True)
generated_files = [
'ts/nni_manager/dist',
'ts/nni_manager/node_modules',
'ts/webui/build',
'ts/webui/node_modules',
'ts/nasui/build',
'ts/nasui/node_modules',
# unit test
'ts/nni_manager/.nyc_output',
'ts/nni_manager/exp_profile.json',
'ts/nni_manager/htmlcov',
'ts/nni_manager/metrics.json',
'ts/nni_manager/trial_jobs.json',
]
| 32.532 | 118 | 0.689168 |
75aa0c5277836ab8982db1594d881c43429ea474 | 1,415 | py | Python | app/core/models.py | bowo-anakdesa/belajar-django-api | 3f2a456f29bbc18ec6e4383b929de41c2d2edbf5 | [
"MIT"
] | 1 | 2019-06-28T02:02:56.000Z | 2019-06-28T02:02:56.000Z | app/core/models.py | bowo-anakdesa/django-api | 3f2a456f29bbc18ec6e4383b929de41c2d2edbf5 | [
"MIT"
] | null | null | null | app/core/models.py | bowo-anakdesa/django-api | 3f2a456f29bbc18ec6e4383b929de41c2d2edbf5 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Create and save a new user"""
if not email:
raise ValueError('User must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Create and saves a new super user"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user"""
email = models.EmailField(max_length=225, unique=True)
name = models.CharField(max_length=225)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class Tag(models.Model):
"""Tag to be used for a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
| 28.877551 | 76 | 0.675618 |
6731e1c2e9310afe6a8e0f52fd74134649e5e7ca | 3,955 | py | Python | archive/sphinx.py | kennethpjdyer/bacch | 739575af1cf7135e8bc661415e782502276359af | [
"BSD-3-Clause"
] | 3 | 2015-02-26T05:02:08.000Z | 2015-04-11T23:41:05.000Z | archive/sphinx.py | dionysiac-endeavors/bacch | 739575af1cf7135e8bc661415e782502276359af | [
"BSD-3-Clause"
] | 11 | 2016-07-30T05:45:21.000Z | 2016-07-30T06:33:01.000Z | archive/sphinx.py | dionysiac-endeavors/bacch | 739575af1cf7135e8bc661415e782502276359af | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2017, Kenneth P. J. Dyer <kenneth@avoceteditors.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the name of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#from .builders import BacchBuilder, GnomonBuilder
from .build import BacchBuilder
#########################################
# Set Up the Module
def setup(app):
""" Functions sets up the Sphinx extensions for Bacch """
# Builders
app.add_builder(BacchBuilder)
#########################
# Configuration
show_titleformat = {
"before": "",
"after": ""}
configs = [
# Project Setup
('bacch_masters', None, []),
('bacch_master_override', None, ''),
('bacch_pdfbuild', None, ''),
('bacch_pdfbuild_options', None, []),
('bacch_use_parts', False, False),
('gnomon_use_parts', False, False),
('gnomon', False, ''),
# Document Configuration
('bacch_options', None, []),
('gnomon_options', None, []),
('bacch_gnomon_packages', {}, {}),
('bacch_packages', {}, {}),
('gnomon_packages', {}, {}),
('bacch_gnomon_config', [], []),
('bacch_config', [], []),
('gnomon_config', [], []),
('bacch_showtitlelist', False, False),
('gnomon_showtitlelist', False, False),
('bacch_showtitlelist_format', {}, show_titleformat),
('gnomon_showtitlelist_format', {}, show_titleformat),
('bacch_noindent', False, False),
('gnomon_noindent', False, False),
('bacch_lettrine', False, False),
('bacch_lettrine_conf', {}, {}),
('gnomon_lettrine', False, False),
('gnomon_lettrine_conf', {}, {}),
('bacch_numbers', False, False),
('gnomon_numbers', False, False),
('bacch_pdf', False, False),
# Formatting
('bacch_titlepage', None, ''),
('gnomon_titlepage', None, ''),
('bacch_tocpage', False, False),
('gnomon_tocpage', False, False),
('bacch_author', '', ''),
('bacch_author_runner', '', ''),
('bacch_title_runner', '', ''),
('bacch_index', 0, ''),
('bacch_documents', None, True),
('bacch_latex_block', 'BLOCK', True),
('bacch_latex_var', 'VAR', True)
]
for (var, default, rebuild) in configs:
app.add_config_value(var, default, rebuild)
| 38.77451 | 79 | 0.60708 |
20d9dfb1e5f6757c80cfe7a43477b3c63552c333 | 365 | py | Python | cracking-the-coding-interview/src/chapter1/palindrome.py | silphire/training-with-books | bd07f7376996828b6cb4000d654cdc5f53d1c589 | [
"MIT"
] | null | null | null | cracking-the-coding-interview/src/chapter1/palindrome.py | silphire/training-with-books | bd07f7376996828b6cb4000d654cdc5f53d1c589 | [
"MIT"
] | 4 | 2020-01-04T14:05:45.000Z | 2020-01-19T14:53:03.000Z | cracking-the-coding-interview/src/chapter1/palindrome.py | silphire/training-with-books | bd07f7376996828b6cb4000d654cdc5f53d1c589 | [
"MIT"
] | null | null | null | # 1.4
from collections import Counter
def palindrome(s: str) -> bool:
s = s.replace(' ', '')
if len(s) == 0:
return True
c = Counter(s)
a = Counter(c.values())
odds = 0
evens = 0
for k, v in a.items():
if k % 2 == 0:
evens += v
else:
odds += v
return odds == 0 or odds == 1
| 18.25 | 33 | 0.446575 |
4b315d99b885f67bca9bd8f9e32645470a5d8448 | 1,915 | py | Python | inference/online_inference/src/app.py | made-ml-in-prod-2021/marina-zav | 7b4b6e5f333707001e36dfb014dcd36bf975d969 | [
"FTL"
] | null | null | null | inference/online_inference/src/app.py | made-ml-in-prod-2021/marina-zav | 7b4b6e5f333707001e36dfb014dcd36bf975d969 | [
"FTL"
] | null | null | null | inference/online_inference/src/app.py | made-ml-in-prod-2021/marina-zav | 7b4b6e5f333707001e36dfb014dcd36bf975d969 | [
"FTL"
] | null | null | null | import logging
import sys
import time
from typing import List, Optional
import uvicorn
from fastapi import FastAPI
from fastapi.exceptions import RequestValidationError
from fastapi.responses import PlainTextResponse
from sklearn.pipeline import Pipeline
from src.entities import (
read_app_params,
HeartDiseaseModelRequest,
HeartDiseaseModelResponse,
)
from src.models import make_predict, load_model
logger = logging.getLogger(__name__)
handler = logging.StreamHandler(sys.stdout)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
DEFAULT_CONFIG_PATH = "configs/app_config.yaml"
model: Optional[Pipeline] = None
app = FastAPI()
@app.exception_handler(RequestValidationError)
async def validation_exception_handler(request, exc):
return PlainTextResponse(str(exc), status_code=400)
@app.get("/")
def main():
return "it is entry point of our predictor"
@app.on_event("startup")
def load_app_model():
time.sleep(30)
app_params = read_app_params("configs/app_config.yaml")
logger.info("Start loading model")
global model
model = load_model(app_params.model_path)
logger.info("Model loaded")
@app.get("/predict/", response_model=List[HeartDiseaseModelResponse])
def predict(request: HeartDiseaseModelRequest):
return make_predict(request.data, request.features, model)
@app.get("/predict_new/", response_model=List[HeartDiseaseModelResponse])
def predict(request: HeartDiseaseModelRequest):
# For checking new code version (new docker image)
return make_predict(request.data, request.features, model)
@app.get("/healthz")
def health() -> bool:
return not (model is None)
def setup_app():
app_params = read_app_params(DEFAULT_CONFIG_PATH)
logger.info(f"Running app on {app_params.host} with port {app_params.port}")
uvicorn.run(app, host=app_params.host, port=app_params.port)
if __name__ == "__main__":
setup_app()
| 26.232877 | 80 | 0.769191 |
e22c2b245026e9188f8c14a3c1eaaf2c45bd7fc6 | 19,144 | py | Python | aas_core_codegen/csharp/description.py | aas-core-works/aas-core-codegen | afec2cf363b6cb69816e7724a2b58626e2165869 | [
"MIT"
] | 5 | 2021-12-29T12:55:34.000Z | 2022-03-01T17:57:21.000Z | aas_core_codegen/csharp/description.py | aas-core-works/aas-core-codegen | afec2cf363b6cb69816e7724a2b58626e2165869 | [
"MIT"
] | 10 | 2021-12-29T02:15:55.000Z | 2022-03-09T11:04:22.000Z | aas_core_codegen/csharp/description.py | aas-core-works/aas-core-codegen | afec2cf363b6cb69816e7724a2b58626e2165869 | [
"MIT"
] | 2 | 2021-12-29T01:42:12.000Z | 2022-02-15T13:46:33.000Z | """Render descriptions to C# documentation comments."""
import io
import textwrap
import xml.sax.saxutils
from typing import Tuple, Optional, List
import docutils.nodes
import docutils.parsers.rst.roles
import docutils.utils
from icontract import require
from aas_core_codegen import intermediate
from aas_core_codegen.common import Stripped, Error, assert_never, Identifier
from aas_core_codegen.csharp import (
naming as csharp_naming,
)
from aas_core_codegen.csharp.common import INDENT as I
from aas_core_codegen.intermediate import (
doc as intermediate_doc,
rendering as intermediate_rendering,
_translate as intermediate_translate,
)
class _ElementRenderer(intermediate_rendering.DocutilsElementTransformer[str]):
"""Render descriptions as C# docstring XML."""
def transform_text(
self, element: docutils.nodes.Text
) -> Tuple[Optional[str], Optional[List[str]]]:
return xml.sax.saxutils.escape(element.astext()), None
def transform_symbol_reference_in_doc(
self, element: intermediate_doc.SymbolReference
) -> Tuple[Optional[str], Optional[List[str]]]:
name = None # type: Optional[str]
if isinstance(element.symbol, intermediate.Enumeration):
name = csharp_naming.enum_name(element.symbol.name)
elif isinstance(element.symbol, intermediate.ConstrainedPrimitive):
# NOTE (mristin, 2021-12-17):
# We do not generate a class for constrained primitives, but we
# leave it as class name, as that is what we used for ``Verify*`` function.
name = csharp_naming.class_name(element.symbol.name)
elif isinstance(element.symbol, intermediate.Class):
if isinstance(element.symbol, intermediate.AbstractClass):
# NOTE (mristin, 2021-12-25):
# We do not generate C# code for abstract classes, so we have to refer
# to the interface.
name = csharp_naming.interface_name(element.symbol.name)
elif isinstance(element.symbol, intermediate.ConcreteClass):
# NOTE (mristin, 2021-12-25):
# Though a concrete class can have multiple descendants and the writer
# might actually want to refer to the *interface* instead of
# the concrete class, we do the best effort here and resolve it to the
# name of the concrete class.
name = csharp_naming.class_name(element.symbol.name)
else:
assert_never(element.symbol)
else:
# NOTE (mristin, 2022-03-30):
# This is a very special case where we had problems with an interface.
# We leave this check here, just in case the bug resurfaces.
if isinstance(element.symbol, intermediate_translate._PlaceholderSymbol):
return None, [
f"Unexpected placeholder for the symbol: {element.symbol}; "
f"this is a bug"
]
assert_never(element.symbol)
assert name is not None
return f"<see cref={xml.sax.saxutils.quoteattr(name)} />", None
def transform_attribute_reference_in_doc(
self, element: intermediate_doc.AttributeReference
) -> Tuple[Optional[str], Optional[List[str]]]:
cref = None # type: Optional[str]
if isinstance(element.reference, intermediate_doc.PropertyReference):
symbol_name = None # type: Optional[str]
if isinstance(element.reference.cls, intermediate.AbstractClass):
# We do not generate C# code for abstract classes, so we have to refer
# to the interface.
symbol_name = csharp_naming.interface_name(element.reference.cls.name)
elif isinstance(element.reference.cls, intermediate.ConcreteClass):
# NOTE (mristin, 2021-12-25):
# Though a concrete class can have multiple descendants and the writer
# might actually want to refer to the *interface* instead of
# the concrete class, we do the best effort here and resolve it to the
# name of the concrete class.
symbol_name = csharp_naming.class_name(element.reference.cls.name)
else:
assert_never(element.reference.cls)
prop_name = csharp_naming.property_name(element.reference.prop.name)
assert symbol_name is not None
cref = f"{symbol_name}.{prop_name}"
elif isinstance(
element.reference, intermediate_doc.EnumerationLiteralReference
):
symbol_name = csharp_naming.enum_name(element.reference.symbol.name)
literal_name = csharp_naming.enum_literal_name(
element.reference.literal.name
)
cref = f"{symbol_name}.{literal_name}"
else:
# NOTE (mristin, 2022-03-30):
# This is a very special case where we had problems with an interface.
# We leave this check here, just in case the bug resurfaces.
if isinstance(
element.reference, intermediate_translate._PlaceholderAttributeReference
):
return None, [
f"Unexpected placeholder "
f"for the attribute reference: {element.reference}; "
f"this is a bug"
]
assert_never(element.reference)
assert cref is not None
return f"<see cref={xml.sax.saxutils.quoteattr(cref)} />", None
def transform_argument_reference_in_doc(
self, element: intermediate_doc.ArgumentReference
) -> Tuple[Optional[str], Optional[List[str]]]:
arg_name = csharp_naming.argument_name(Identifier(element.reference))
return f"<paramref name={xml.sax.saxutils.quoteattr(arg_name)} />", None
def transform_constraint_reference_in_doc(
self, element: intermediate_doc.ConstraintReference
) -> Tuple[Optional[str], Optional[List[str]]]:
return f"Constraint {element.reference}", None
def transform_literal(
self, element: docutils.nodes.literal
) -> Tuple[Optional[str], Optional[List[str]]]:
return f"<c>{xml.sax.saxutils.escape(element.astext())}</c>", None
def transform_paragraph(
self, element: docutils.nodes.paragraph
) -> Tuple[Optional[str], Optional[List[str]]]:
parts = [] # type: List[str]
for child in element.children:
text, error = self.transform(child)
if error is not None:
return None, error
assert text is not None
parts.append(text)
return "".join(parts), None
def transform_emphasis(
self, element: docutils.nodes.emphasis
) -> Tuple[Optional[str], Optional[List[str]]]:
parts = [] # type: List[str]
for child in element.children:
text, error = self.transform(child)
if error is not None:
return None, error
assert text is not None
parts.append(text)
return "<em>{}</em>".format("".join(parts)), None
def transform_list_item(
self, element: docutils.nodes.list_item
) -> Tuple[Optional[str], Optional[List[str]]]:
parts = [] # type: List[str]
errors = [] # type: List[str]
for child in element.children:
text, child_errors = self.transform(child)
if child_errors is not None:
errors.extend(child_errors)
else:
assert text is not None
parts.append(text)
if len(errors) > 0:
return None, errors
return "<li>{}</li>".format("".join(parts)), None
def transform_bullet_list(
self, element: docutils.nodes.bullet_list
) -> Tuple[Optional[str], Optional[List[str]]]:
parts = ["<ul>\n"]
errors = [] # type: List[str]
for child in element.children:
text, child_errors = self.transform(child)
if child_errors is not None:
errors.extend(child_errors)
else:
assert text is not None
parts.append(f"{text}\n")
if len(errors) > 0:
return None, errors
parts.append("</ul>")
return "".join(parts), None
def transform_note(
self, element: docutils.nodes.note
) -> Tuple[Optional[str], Optional[List[str]]]:
parts = [] # type: List[str]
errors = [] # type: List[str]
for child in element.children:
text, child_errors = self.transform(child)
if child_errors is not None:
errors.extend(child_errors)
else:
assert text is not None
parts.append(text)
if len(errors) > 0:
return None, errors
return "".join(parts), None
def transform_reference(
self, element: docutils.nodes.reference
) -> Tuple[Optional[str], Optional[List[str]]]:
parts = [] # type: List[str]
errors = [] # type: List[str]
for child in element.children:
text, child_errors = self.transform(child)
if child_errors is not None:
errors.extend(child_errors)
else:
assert text is not None
parts.append(text)
if len(errors) > 0:
return None, errors
return "".join(parts), None
def _transform_children_joined_with_double_new_line(
self, element: docutils.nodes.Element
) -> Tuple[Optional[str], Optional[List[str]]]:
"""Transform the ``element``'s children and join them with a double new-line."""
if len(element.children) == 0:
return "", None
if len(element.children) == 1:
return self.transform(element.children[0])
parts = [] # type: List[str]
errors = [] # type: List[str]
for child in element.children:
part, child_errors = self.transform(child)
if child_errors is not None:
errors.extend(child_errors)
else:
assert part is not None
parts.append(part)
if len(errors) > 0:
return None, errors
return "\n\n".join(parts), None
def transform_field_body(
self, element: docutils.nodes.field_body
) -> Tuple[Optional[str], Optional[List[str]]]:
return self._transform_children_joined_with_double_new_line(element=element)
def transform_document(
self, element: docutils.nodes.field_body
) -> Tuple[Optional[str], Optional[List[str]]]:
return self._transform_children_joined_with_double_new_line(element=element)
@require(lambda line: "\n" not in line)
def _slash_slash_slash_line(line: str) -> str:
"""Prepend ``///`` to the ``line``."""
if len(line) == 0:
return "///"
return f"/// {line}"
def _generate_summary_remarks(
description: intermediate.SummaryRemarksDescription,
) -> Tuple[Optional[Stripped], Optional[List[Error]]]:
"""Generate the documentation comment for a summary-remarks-constraints."""
errors = [] # type: List[Error]
renderer = _ElementRenderer()
summary, summary_errors = renderer.transform(description.summary)
if summary_errors is not None:
errors.extend(
Error(description.parsed.node, message) for message in summary_errors
)
remarks = [] # type: List[str]
for remark in description.remarks:
remark, remark_errors = renderer.transform(remark)
if remark_errors is not None:
errors.extend(
Error(description.parsed.node, message) for message in remark_errors
)
else:
assert remark is not None
remarks.append(remark)
if len(errors) > 0:
return None, errors
assert summary is not None
# Don't use textwrap.dedent to preserve the formatting
blocks = [
Stripped(
f"""\
<summary>
{summary}
</summary>"""
)
]
if len(remarks) > 0:
remarks_joined = "\n\n".join(remarks)
blocks.append(
Stripped(
f"""\
<remarks>
{remarks_joined}
</remarks>"""
)
)
commented_lines = [
_slash_slash_slash_line(line) for block in blocks for line in block.splitlines()
]
return Stripped("\n".join(commented_lines)), None
def _generate_summary_remarks_constraints(
description: intermediate.SummaryRemarksConstraintsDescription,
) -> Tuple[Optional[Stripped], Optional[List[Error]]]:
"""Generate the documentation comment for a summary-remarks-constraints."""
errors = [] # type: List[Error]
renderer = _ElementRenderer()
summary, summary_errors = renderer.transform(description.summary)
if summary_errors is not None:
errors.extend(
Error(description.parsed.node, message) for message in summary_errors
)
remarks = [] # type: List[str]
for remark in description.remarks:
remark, remark_errors = renderer.transform(remark)
if remark_errors is not None:
errors.extend(
Error(description.parsed.node, message) for message in remark_errors
)
else:
assert remark is not None
remarks.append(remark)
constraints = [] # type: List[str]
for identifier, body_element in description.constraints_by_identifier.items():
body, body_errors = renderer.transform(body_element)
if body_errors is not None:
errors.extend(
Error(description.parsed.node, message) for message in body_errors
)
else:
assert body is not None
constraints.append(
f"Constraint {xml.sax.saxutils.escape(identifier)}:\n{body}"
)
if len(errors) > 0:
return None, errors
assert summary is not None
# Don't use textwrap.dedent to preserve the formatting
blocks = [
Stripped(
f"""\
<summary>
{summary}
</summary>"""
)
]
if len(constraints) > 0:
constraints_writer = io.StringIO()
constraints_writer.write("Constraints:\n<ul>\n")
for constraint in constraints:
constraints_writer.write(textwrap.indent(f"<li>\n{constraint}\n</li>\n", I))
constraints_writer.write("</ul>")
remarks.append(constraints_writer.getvalue())
if len(remarks) > 0:
remarks_joined = "\n\n".join(remarks)
blocks.append(
Stripped(
f"""\
<remarks>
{remarks_joined}
</remarks>"""
)
)
commented_lines = [
_slash_slash_slash_line(line) for block in blocks for line in block.splitlines()
]
return Stripped("\n".join(commented_lines)), None
def generate_meta_model_comment(
description: intermediate.MetaModelDescription,
) -> Tuple[Optional[Stripped], Optional[List[Error]]]:
"""Generate the documentation comment for the given meta-model."""
return _generate_summary_remarks_constraints(description)
def generate_symbol_comment(
description: intermediate.SymbolDescription,
) -> Tuple[Optional[Stripped], Optional[List[Error]]]:
"""Generate the documentation comment for the given symbol."""
return _generate_summary_remarks_constraints(description)
def generate_property_comment(
description: intermediate.PropertyDescription,
) -> Tuple[Optional[Stripped], Optional[List[Error]]]:
"""Generate the documentation comment for the given property."""
return _generate_summary_remarks_constraints(description)
def generate_enumeration_literal_comment(
description: intermediate.EnumerationLiteralDescription,
) -> Tuple[Optional[Stripped], Optional[List[Error]]]:
"""Generate the documentation comment for the given enumeration literal."""
return _generate_summary_remarks(description)
def generate_signature_comment(
description: intermediate.SignatureDescription,
) -> Tuple[Optional[Stripped], Optional[List[Error]]]:
"""
Generate the documentation comment for the given signature.
A signature, in this context, means a function or a method signature.
"""
errors = [] # type: List[Error]
renderer = _ElementRenderer()
summary, summary_errors = renderer.transform(description.summary)
if summary_errors is not None:
errors.extend(
Error(description.parsed.node, message) for message in summary_errors
)
remarks = [] # type: List[str]
for remark in description.remarks:
remark, remark_errors = renderer.transform(remark)
if remark_errors is not None:
errors.extend(
Error(description.parsed.node, message) for message in remark_errors
)
else:
assert remark is not None
remarks.append(remark)
params = [] # type: List[Stripped]
for name, body_element in description.arguments_by_name.items():
body, body_errors = renderer.transform(body_element)
if body_errors is not None:
errors.extend(
Error(description.parsed.node, message) for message in body_errors
)
else:
assert body is not None
# Don't use textwrap.dedent to preserve the formatting
params.append(
Stripped(
f"""\
<param name={xml.sax.saxutils.quoteattr(name)}>
{body}
</param>"""
)
)
returns = None # type: Optional[str]
if description.returns is not None:
# We need to help the type checker in PyCharm a bit.
assert isinstance(description.returns, docutils.nodes.field_body)
returns, returns_errors = renderer.transform(description.returns)
if returns_errors is not None:
errors.extend(
Error(description.parsed.node, message) for message in returns_errors
)
else:
assert returns is not None
if len(errors) > 0:
return None, errors
assert summary is not None
# Don't use textwrap.dedent to preserve the formatting
blocks = [
Stripped(
f"""\
<summary>
{summary}
</summary>"""
)
]
if len(remarks) > 0:
remarks_joined = "\n\n".join(remarks)
blocks.append(
Stripped(
f"""\
<remarks>
{remarks_joined}
</remarks>"""
)
)
if len(params) > 0:
params_joined = "\n".join(params)
blocks.append(Stripped(params_joined))
if returns is not None:
blocks.append(
Stripped(
f"""\
<returns>
{returns}
</returns>"""
)
)
commented_lines = [
_slash_slash_slash_line(line) for block in blocks for line in block.splitlines()
]
return Stripped("\n".join(commented_lines)), None
| 33.293913 | 88 | 0.617269 |
b75d8793911f64f3b879dd499516fed2435b57a0 | 11,768 | py | Python | aries-test-harness/features/steps/0453-issue-credential-v2.py | Matt-Spence/aries-agent-test-harness | a7868625d9675f2e54897b1a84b54fdb1b75dcaa | [
"Apache-2.0"
] | null | null | null | aries-test-harness/features/steps/0453-issue-credential-v2.py | Matt-Spence/aries-agent-test-harness | a7868625d9675f2e54897b1a84b54fdb1b75dcaa | [
"Apache-2.0"
] | null | null | null | aries-test-harness/features/steps/0453-issue-credential-v2.py | Matt-Spence/aries-agent-test-harness | a7868625d9675f2e54897b1a84b54fdb1b75dcaa | [
"Apache-2.0"
] | null | null | null | from behave import *
import json
from agent_backchannel_client import agent_backchannel_GET, agent_backchannel_POST, expected_agent_state
from agent_test_utils import format_cred_proposal_by_aip_version
from time import sleep
CRED_FORMAT_INDY = "indy"
CRED_FORMAT_JSON_LD = "json-ld"
@given('"{issuer}" is ready to issue a "{cred_format}" credential')
def step_impl(context, issuer: str, cred_format: str = CRED_FORMAT_INDY):
if cred_format == CRED_FORMAT_INDY:
# Call legacy indy ready to issue credential step
context.execute_steps(f'''
Given '"{issuer}" is ready to issue a credential'
''')
elif cred_format == CRED_FORMAT_JSON_LD:
issuer_url = context.config.userdata.get(issuer)
data = {
"did_method": context.did_method,
"proof_type": context.proof_type
}
(resp_status, resp_text) = agent_backchannel_POST(issuer_url + "/agent/command/", "issue-credential-v2", operation="prepare-json-ld", data=data)
assert resp_status == 200, f'issue-credential-v2/prepare-json-ld: resp_status {resp_status} is not 200; {resp_text}'
resp_json = json.loads(resp_text)
# TODO: it would be nice to not depend on the schema name for the issuer did dict
if 'issuer_did_dict' in context:
context.issuer_did_dict[context.schema['schema_name']] = resp_json["did"]
else:
context.issuer_did_dict = {context.schema['schema_name']: resp_json["did"]}
else:
raise Exception(f"Unknown credential format {cred_format}")
@given('"{holder}" proposes a "{cred_format}" credential to "{issuer}" with {credential_data}')
@when('"{holder}" proposes a "{cred_format}" credential to "{issuer}" with {credential_data}')
def step_impl(context, holder, cred_format, issuer, credential_data):
if "schema_dict" in context:
for schema in context.schema_dict:
try:
credential_data_json_file = open('features/data/cred_data_' + schema.lower() + '.json')
credential_data_json = json.load(credential_data_json_file)
except FileNotFoundError:
print(FileNotFoundError + ': features/data/cred_data_' + schema.lower() + '.json')
if 'credential_data_dict' in context:
context.credential_data_dict[schema] = credential_data_json[credential_data]['attributes']
else:
context.credential_data_dict = {schema: credential_data_json[credential_data]['attributes']}
if "AIP20" in context.tags:
if 'filters_dict' in context:
context.filters_dict[schema] = credential_data_json[credential_data]['filters']
else:
context.filters_dict = {schema: credential_data_json[credential_data]['filters']}
for schema in context.schema_dict:
context.credential_data = context.credential_data_dict[schema]
context.schema = context.schema_dict[schema]
if "AIP20" in context.tags:
context.filters = context.filters_dict[schema]
holder_url = context.config.userdata.get(holder)
# check for a schema template already loaded in the context. If it is, it was loaded from an external Schema, so use it.
if "credential_data" in context:
cred_data = context.credential_data
if "AIP20" in context.tags:
# We only want to send data for the cred format being used
assert cred_format in context.filters, f"credential data has no filter for cred format {cred_format}"
filters = {
cred_format: context.filters[cred_format]
}
# This call may need to be formated by cred_format instead of version. Reassess when more types are used.
credential_offer = format_cred_proposal_by_aip_version(
context, "AIP20", cred_data, context.connection_id_dict[holder][issuer], filters)
(resp_status, resp_text) = agent_backchannel_POST(holder_url + "/agent/command/", "issue-credential-v2", operation="send-proposal", data=credential_offer)
assert resp_status == 200, f'resp_status {resp_status} is not 200; {resp_text}'
resp_json = json.loads(resp_text)
# Check the State of the credential
assert resp_json["state"] == "proposal-sent"
# Get the thread ID from the response text.
context.cred_thread_id = resp_json["thread_id"]
@given('"{issuer}" offers the "{cred_format}" credential')
@when('"{issuer}" offers the "{cred_format}" credential')
def step_impl(context, issuer, cred_format):
issuer_url = context.config.userdata.get(issuer)
# if context does not have the credential thread id then the proposal was not the starting point for the protocol.
if not "cred_thread_id" in context:
if "credential_data" in context:
cred_data = context.credential_data
# We only want to send data for the cred format being used
assert cred_format in context.filters, f"credential data has no filter for cred format {cred_format}"
filters = {
cred_format: context.filters[cred_format]
}
credential_offer = format_cred_proposal_by_aip_version(context, "AIP20", cred_data, context.connection_id_dict[issuer][context.holder_name], filters)
(resp_status, resp_text) = agent_backchannel_POST(issuer_url + "/agent/command/", "issue-credential-v2", operation="send-offer", data=credential_offer)
assert resp_status == 200, f'resp_status {resp_status} is not 200; {resp_text}'
resp_json = json.loads(resp_text)
context.cred_thread_id = resp_json["thread_id"]
else:
# If context has the credential thread id then the proposal was done.
(resp_status, resp_text) = agent_backchannel_POST(issuer_url + "/agent/command/", "issue-credential-v2", operation="send-offer", id=context.cred_thread_id)
assert resp_status == 200, f'resp_status {resp_status} is not 200; {resp_text}'
resp_json = json.loads(resp_text)
# Check the issuers State
assert resp_json["state"] == "offer-sent"
# Check the state of the holder after issuers call of send-offer
assert expected_agent_state(context.holder_url, "issue-credential-v2", context.cred_thread_id, "offer-received")
@when('"{holder}" requests the "{cred_format}" credential')
def step_impl(context, holder, cred_format):
holder_url = context.holder_url
# # If @indy then we can be sure we cannot start the protocol from this command. We can be sure that we have previously
# # reveived the thread_id.
# if "Indy" in context.tags:
# sleep(1)
(resp_status, resp_text) = agent_backchannel_POST(holder_url + "/agent/command/", "issue-credential-v2", operation="send-request", id=context.cred_thread_id)
# # If we are starting from here in the protocol you won't have the cred_ex_id or the thread_id
# else:
# (resp_status, resp_text) = agent_backchannel_POST(holder_url + "/agent/command/", "issue-credential-v2", operation="send-request", id=context.connection_id_dict[holder][context.issuer_name])
assert resp_status == 200, f'resp_status {resp_status} is not 200; {resp_text}'
resp_json = json.loads(resp_text)
assert resp_json["state"] == "request-sent"
# Verify issuer status
assert expected_agent_state(context.issuer_url, "issue-credential-v2", context.cred_thread_id, "request-received")
@when('"{issuer}" issues the "{cred_format}" credential')
def step_impl(context, issuer, cred_format):
issuer_url = context.config.userdata.get(issuer)
credential_issue = {
"comment": "issuing credential"
}
(resp_status, resp_text) = agent_backchannel_POST(issuer_url + "/agent/command/", "issue-credential-v2", operation="issue", id=context.cred_thread_id, data=credential_issue)
assert resp_status == 200, f'resp_status {resp_status} is not 200; {resp_text}'
resp_json = json.loads(resp_text)
assert resp_json["state"] == "credential-issued"
# Verify holder status
assert expected_agent_state(context.holder_url, "issue-credential-v2", context.cred_thread_id, "credential-received")
@when('"{holder}" acknowledges the "{cred_format}" credential issue')
def step_impl(context, holder, cred_format):
holder_url = context.config.userdata.get(holder)
# a credential id shouldn't be needed with a cred_ex_id being passed
# credential_id = {
# "credential_id": context.cred_thread_id,
# }
credential_id = {
"comment": "storing credential"
}
sleep(1)
(resp_status, resp_text) = agent_backchannel_POST(holder_url + "/agent/command/", "issue-credential-v2", operation="store", id=context.cred_thread_id, data=credential_id)
assert resp_status == 200, f'resp_status {resp_status} is not 200; {resp_text}'
resp_json = json.loads(resp_text)
assert resp_json["state"] == "done"
credential_id = resp_json[cred_format]["credential_id"]
#credential_id = resp_json["cred_ex_record"]["cred_id_stored"]
if 'credential_id_dict' in context:
try:
context.credential_id_dict[context.schema['schema_name']].append(credential_id)
except KeyError:
context.credential_id_dict[context.schema['schema_name']] = [credential_id]
else:
context.credential_id_dict = {context.schema['schema_name']: [credential_id]}
# Verify issuer status
# TODO This is returning none instead of Done. Should this be the case. Needs investigation.
#assert expected_agent_state(context.issuer_url, "issue-credential-v2", context.cred_thread_id, "done")
# if the credential supports revocation, get the Issuers webhook callback JSON from the store command
# From that JSON save off the credential revocation identifier, and the revocation registry identifier.
if "support_revocation" in context:
if context.support_revocation:
(resp_status, resp_text) = agent_backchannel_GET(context.config.userdata.get(context.issuer_name) + "/agent/response/", "revocation-registry", id=context.cred_thread_id)
assert resp_status == 200, f'resp_status {resp_status} is not 200; {resp_text}'
resp_json = json.loads(resp_text)
context.cred_rev_id = resp_json["revocation_id"]
context.rev_reg_id = resp_json["revoc_reg_id"]
@then('"{holder}" has the "{cred_format}" credential issued')
def step_impl(context, holder, cred_format):
holder_url = context.config.userdata.get(holder)
# get the credential from the holders wallet
(resp_status, resp_text) = agent_backchannel_GET(holder_url + "/agent/command/", "credential", id=context.credential_id_dict[context.schema['schema_name']][-1])
assert resp_status == 200, f'resp_status {resp_status} is not 200; {resp_text}'
resp_json = json.loads(resp_text)
if cred_format == CRED_FORMAT_INDY:
#assert resp_json["schema_id"] == context.issuer_schema_id_dict[context.schema["schema_name"]]
#assert resp_json["cred_def_id"] == context.credential_definition_id_dict[context.schema["schema_name"]]
assert resp_json["referent"] == context.credential_id_dict[context.schema['schema_name']][-1]
elif cred_format == CRED_FORMAT_JSON_LD:
# TODO: do not use schema name for credential_id_dict
assert resp_json["credential_id"] == context.credential_id_dict[context.schema['schema_name']][-1]
@when('"{holder}" negotiates the offer with another proposal of the "{cred_format}" credential to "{issuer}"')
def step_impl(context, holder, cred_format, issuer):
context.execute_steps('''
When "''' + holder + '''" proposes a "''' + cred_format + '''" credential to "''' + issuer + '''"
''') | 49.238494 | 200 | 0.701734 |
1417eaf111939fa332430aaf9b1dd4f35f49420f | 259 | py | Python | nydkcd11/projects/urls.py | asi14/nydkc11 | 4c40f7f3d0829ee3659a5edc9a03ffdb47b5bdc4 | [
"MIT"
] | null | null | null | nydkcd11/projects/urls.py | asi14/nydkc11 | 4c40f7f3d0829ee3659a5edc9a03ffdb47b5bdc4 | [
"MIT"
] | 9 | 2020-02-11T21:47:24.000Z | 2022-03-11T23:11:44.000Z | nydkcd11/projects/urls.py | asi14/nydkc11 | 4c40f7f3d0829ee3659a5edc9a03ffdb47b5bdc4 | [
"MIT"
] | 1 | 2018-04-26T04:37:10.000Z | 2018-04-26T04:37:10.000Z | from django.conf.urls import url
from . import views
app_name = 'projects'
urlpatterns = [
url(r'^(?P<level_id>[0-9]+)/(?P<slug>[\w-]+)/$', views.detail, name = 'detail'),
url(r'^(?P<level_id>[0-9]+)/$', views.detail_redirect, name = 'detail_redirect'),
]
| 28.777778 | 82 | 0.637066 |
b83a2263058fd0903bb0366a0a6a4bb18d1952e2 | 14,627 | py | Python | autotabular/pipeline/components/base.py | jianzhnie/AutoTabular | fb407300adf97532a26d33f7442d2a606fa30512 | [
"Apache-2.0"
] | 48 | 2021-09-06T08:09:26.000Z | 2022-03-28T13:02:54.000Z | autotabular/pipeline/components/base.py | Fanxingye/Autotabular | d630c78290a52f8c73885afb16884e18135c34f6 | [
"Apache-2.0"
] | null | null | null | autotabular/pipeline/components/base.py | Fanxingye/Autotabular | d630c78290a52f8c73885afb16884e18135c34f6 | [
"Apache-2.0"
] | 7 | 2021-09-23T07:28:46.000Z | 2021-10-02T21:15:18.000Z | import importlib
import inspect
import pkgutil
import sys
from collections import OrderedDict
from autotabular.pipeline.constants import SPARSE
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_random_state
def find_components(package, directory, base_class):
components = OrderedDict()
for module_loader, module_name, ispkg in pkgutil.iter_modules([directory]):
full_module_name = '%s.%s' % (package, module_name)
if full_module_name not in sys.modules and not ispkg:
module = importlib.import_module(full_module_name)
for member_name, obj in inspect.getmembers(module):
if inspect.isclass(obj) and issubclass(obj, base_class) and \
obj != base_class:
# TODO test if the obj implements the interface
# Keep in mind that this only instantiates the ensemble_wrapper,
# but not the real target classifier
classifier = obj
components[module_name] = classifier
return components
class ThirdPartyComponents(object):
def __init__(self, base_class):
self.base_class = base_class
self.components = OrderedDict()
def add_component(self, obj):
if inspect.isclass(obj) and self.base_class in obj.__bases__:
name = obj.__name__
classifier = obj
else:
raise TypeError('add_component works only with a subclass of %s' %
str(self.base_class))
properties = set(classifier.get_properties())
should_be_there = {
'shortname', 'name', 'handles_regression',
'handles_classification', 'handles_multiclass',
'handles_multilabel', 'handles_multioutput', 'is_deterministic',
'input', 'output'
}
for property in properties:
if property not in should_be_there:
raise ValueError('Property %s must not be specified for '
'algorithm %s. Only the following properties '
'can be specified: %s' %
(property, name, str(should_be_there)))
for property in should_be_there:
if property not in properties:
raise ValueError('Property %s not specified for algorithm %s' %
(property, name))
self.components[name] = classifier
class AutotabularComponent(BaseEstimator):
@staticmethod
def get_properties(dataset_properties=None):
"""Get the properties of the underlying algorithm.
Find more information at :ref:`get_properties`
Parameters
----------
dataset_properties : dict, optional (default=None)
Returns
-------
dict
"""
raise NotImplementedError()
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
"""Return the configuration space of this classification algorithm.
Parameters
----------
dataset_properties : dict, optional (default=None)
Returns
-------
Configspace.configuration_space.ConfigurationSpace
The configuration space of this classification algorithm.
"""
raise NotImplementedError()
def fit(self, X, y):
"""The fit function calls the fit function of the underlying scikit-
learn model and returns `self`.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,) or shape = (n_sample, n_labels)
Returns
-------
self : returns an instance of self.
Targets
Notes
-----
Please see the `scikit-learn API documentation
<https://scikit-learn.org/stable/developers/develop.html#apis-of-scikit-learn-objects>`_
for further information.
"""
raise NotImplementedError()
def set_hyperparameters(self, configuration, init_params=None):
params = configuration.get_dictionary()
for param, value in params.items():
if not hasattr(self, param):
raise ValueError('Cannot set hyperparameter %s for %s because '
'the hyperparameter does not exist.' %
(param, str(self)))
setattr(self, param, value)
if init_params is not None:
for param, value in init_params.items():
if not hasattr(self, param):
raise ValueError('Cannot set init param %s for %s because '
'the init param does not exist.' %
(param, str(self)))
setattr(self, param, value)
return self
def __str__(self):
name = self.get_properties()['name']
return 'autotabular.pipeline %s' % name
class IterativeComponent(AutotabularComponent):
def fit(self, X, y, sample_weight=None):
self.iterative_fit(X, y, n_iter=2, refit=True)
iteration = 2
while not self.configuration_fully_fitted():
n_iter = int(2**iteration / 2)
self.iterative_fit(X, y, n_iter=n_iter, refit=False)
iteration += 1
return self
@staticmethod
def get_max_iter():
raise NotImplementedError()
def get_current_iter(self):
raise NotImplementedError()
class IterativeComponentWithSampleWeight(AutotabularComponent):
def fit(self, X, y, sample_weight=None):
self.iterative_fit(
X, y, n_iter=2, refit=True, sample_weight=sample_weight)
iteration = 2
while not self.configuration_fully_fitted():
n_iter = int(2**iteration / 2)
self.iterative_fit(
X, y, n_iter=n_iter, sample_weight=sample_weight)
iteration += 1
return self
@staticmethod
def get_max_iter():
raise NotImplementedError()
def get_current_iter(self):
raise NotImplementedError()
class AutotabularClassificationAlgorithm(AutotabularComponent):
"""Provide an abstract interface for classification algorithms in auto-
sklearn.
See :ref:`extending` for more information.
"""
def __init__(self):
self.estimator = None
self.properties = None
def predict(self, X):
"""The predict function calls the predict function of the underlying
scikit-learn model and returns an array with the predictions.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
array, shape = (n_samples,) or shape = (n_samples, n_labels)
Returns the predicted values
Notes
-----
Please see the `scikit-learn API documentation
<https://scikit-learn.org/stable/developers/develop.html#apis-of-scikit-learn-objects>`_
for further information.
"""
raise NotImplementedError()
def predict_proba(self, X):
"""Predict probabilities.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)
"""
raise NotImplementedError()
def get_estimator(self):
"""Return the underlying estimator object.
Returns
-------
estimator : the underlying estimator object
"""
return self.estimator
class AutotabularPreprocessingAlgorithm(TransformerMixin,
AutotabularComponent):
"""Provide an abstract interface for preprocessing algorithms in auto-
sklearn.
See :ref:`extending` for more information.
"""
def __init__(self):
self.preprocessor = None
def transform(self, X):
"""The transform function calls the transform function of the
underlying scikit-learn model and returns the transformed array.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
X : array
Return the transformed training data
Notes
-----
Please see the `scikit-learn API documentation
<https://scikit-learn.org/stable/developers/develop.html#apis-of-scikit-learn-objects>`_
for further information.
"""
raise NotImplementedError()
def get_preprocessor(self):
"""Return the underlying preprocessor object.
Returns
-------
preprocessor : the underlying preprocessor object
"""
return self.preprocessor
class AutotabularRegressionAlgorithm(AutotabularComponent):
"""Provide an abstract interface for regression algorithms in Auto-tabular.
Make a subclass of this and put it into the directory `autotabular/pipeline/components/regression` to make it available.
"""
def __init__(self):
self.estimator = None
self.properties = None
def predict(self, X):
"""The predict function calls the predict function of the underlying
scikit-learn model and returns an array with the predictions.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
array, shape = (n_samples,) or shape = (n_samples, n_targets)
Returns the predicted values
Notes
-----
Please see the `scikit-learn API documentation
<https://scikit-learn.org/stable/developers/develop.html#apis-of-scikit-learn-objects>`_
for further information.
"""
raise NotImplementedError()
def get_estimator(self):
"""Return the underlying estimator object.
Returns
-------
estimator : the underlying estimator object
"""
return self.estimator
class AutotabularChoice(object):
def __init__(self, dataset_properties, random_state=None):
"""
Parameters
----------
dataset_properties : dict
Describes the dataset to work on, this can change the
configuration space constructed by Auto-tabular. Mandatory
properties are:
* target_type: classification or regression
Optional properties are:
* multiclass: whether the dataset is a multiclass classification
dataset.
* multilabel: whether the dataset is a multilabel classification
dataset
"""
# Since all calls to get_hyperparameter_search_space will be done by the
# pipeline on construction, it is not necessary to construct a
# configuration space at this location!
# self.configuration = self.get_hyperparameter_search_space(
# dataset_properties).get_default_configuration()
if random_state is None:
self.random_state = check_random_state(1)
else:
self.random_state = check_random_state(random_state)
# Since the pipeline will initialize the hyperparameters, it is not
# necessary to do this upon the construction of this object
# self.set_hyperparameters(self.configuration)
self.choice = None
def get_components(cls):
raise NotImplementedError()
def get_available_components(self,
dataset_properties=None,
include=None,
exclude=None):
if dataset_properties is None:
dataset_properties = {}
if include is not None and exclude is not None:
raise ValueError(
'The argument include and exclude cannot be used together.')
available_comp = self.get_components()
if include is not None:
for incl in include:
if incl not in available_comp:
raise ValueError('Trying to include unknown component: '
'%s' % incl)
components_dict = OrderedDict()
for name in available_comp:
if include is not None and name not in include:
continue
elif exclude is not None and name in exclude:
continue
if 'sparse' in dataset_properties and dataset_properties['sparse']:
# In case the dataset is sparse, ignore
# components that do not handle sparse data
# Auto-tabular uses SPARSE constant as a mechanism
# to indicate whether a component can handle sparse data.
# If SPARSE is not in the input properties of the component, it
# means SPARSE is not a valid input to this component, so filter it out
if SPARSE not in available_comp[name].get_properties(
)['input']:
continue
components_dict[name] = available_comp[name]
return components_dict
def set_hyperparameters(self, configuration, init_params=None):
new_params = {}
params = configuration.get_dictionary()
choice = params['__choice__']
del params['__choice__']
for param, value in params.items():
param = param.replace(choice, '').replace(':', '')
new_params[param] = value
if init_params is not None:
for param, value in init_params.items():
param = param.replace(choice, '').replace(':', '')
new_params[param] = value
new_params['random_state'] = self.random_state
self.new_params = new_params
self.choice = self.get_components()[choice](**new_params)
return self
def get_hyperparameter_search_space(self,
dataset_properties=None,
default=None,
include=None,
exclude=None):
raise NotImplementedError()
def fit(self, X, y, **kwargs):
# Allows to use check_is_fitted on the choice object
self.fitted_ = True
if kwargs is None:
kwargs = {}
return self.choice.fit(X, y, **kwargs)
def predict(self, X):
return self.choice.predict(X)
| 32.649554 | 124 | 0.593697 |
7054c018c57d4ea9944d9d0540ac864e087f74bc | 3,339 | py | Python | tests/test_assign_to_workflow_seqlab_quantstudio.py | EdinburghGenomics/clarity_scripts | 472299fc4edd4e0a08895ecc7c5630b253322e28 | [
"MIT"
] | 2 | 2018-06-18T16:31:09.000Z | 2021-03-31T20:13:39.000Z | tests/test_assign_to_workflow_seqlab_quantstudio.py | EdinburghGenomics/clarity_scripts | 472299fc4edd4e0a08895ecc7c5630b253322e28 | [
"MIT"
] | 99 | 2016-02-15T16:21:51.000Z | 2022-03-11T23:43:26.000Z | tests/test_assign_to_workflow_seqlab_quantstudio.py | EdinburghGenomics/clarity_scripts | 472299fc4edd4e0a08895ecc7c5630b253322e28 | [
"MIT"
] | null | null | null | from unittest.mock import Mock, patch, PropertyMock, call
from scripts.assign_workflow_seqlab_quantstudio import AssignWorkflowSeqLabQuantStudio
from tests.test_common import TestEPP, fake_artifact
def fake_all_outputs(unique=False, resolve=False):
"""Return a list of mocked artifacts which contain samples which contain artifacts... Simple!"""
return (
Mock(id='ao1', samples=[Mock(artifact=fake_artifact('a1'), id='s1',
udf={'Prep Workflow': 'TruSeq PCR-Free DNA Sample Prep',
'Species': 'Homo sapiens'})]),
Mock(id='ao2', samples=[
Mock(artifact=fake_artifact('a2'), id='s2', udf={'Prep Workflow': 'TruSeq Nano DNA Sample Prep'})]),
Mock(id='ao3', samples=[Mock(artifact=fake_artifact('a3'), id='s3',
udf={'Prep Workflow': 'TruSeq Nano DNA Sample Prep', 'Species': 'Homo sapiens',
'2D Barcode': 'fluidX1'})]),
Mock(id='ao4',
samples=[Mock(artifact=fake_artifact('a4'), id='s4', udf={'Prep Workflow': 'KAPA DNA Sample Prep'})])
)
class TestAssignWorkflowSeqLabQuantStudio(TestEPP):
def setUp(self):
self.patched_process = patch.object(
AssignWorkflowSeqLabQuantStudio,
'process',
new_callable=PropertyMock(return_value=Mock(all_outputs=fake_all_outputs))
)
self.patched_get_workflow_stage = patch(
'scripts.assign_workflow_seqlab_quantstudio.get_workflow_stage',
return_value=Mock(uri='a_uri')
)
self.patch_find_art = patch(
'scripts.assign_workflow_seqlab_quantstudio.find_newest_artifact_originating_from',
return_value=Mock(id='fx3')
)
self.epp = AssignWorkflowSeqLabQuantStudio(self.default_argv)
def test_assign(self):
with self.patched_get_workflow_stage as pws, self.patched_lims, self.patched_process, self.patch_find_art:
self.epp._run()
pws.assert_has_calls((
call(self.epp.lims, 'TruSeq PCR-Free DNA Sample Prep', 'Visual QC'),
call(self.epp.lims, 'TruSeq Nano DNA Sample Prep', 'Visual QC'),
call(self.epp.lims, 'KAPA Non-Pooling Sample Prep EG 1.0 WF',
'Sequencing Plate Picogreen EG 1.0 ST'),
call(self.epp.lims, 'QuantStudio EG1.0', 'QuantStudio Plate Preparation EG1.0'),
))
# first routing (pcr free)
route_args = self.epp.lims.route_artifacts.call_args_list[0]
assert sorted([a.id for a in route_args[0][0]]) == ['ao1']
assert self.epp.lims.route_artifacts.call_args[1] == {'stage_uri': 'a_uri'}
# second routing (nano)
route_args = self.epp.lims.route_artifacts.call_args_list[1]
assert sorted([a.id for a in route_args[0][0]]) == ['ao2', 'ao3']
# third routing (kapa)
route_args = self.epp.lims.route_artifacts.call_args_list[2]
assert sorted([a.id for a in route_args[0][0]]) == ['ao4']
# fourth routing (quantstudio)
route_args = self.epp.lims.route_artifacts.call_args_list[3]
assert sorted([a.id for a in route_args[0][0]]) == ['a1', 'fx3']
| 49.102941 | 116 | 0.607966 |
35846e883b4d57821454f8b5a66685c746fe58df | 40,162 | py | Python | localstack/services/cloudformation/cloudformation_api.py | Shika9/localstack | a3af1b783595cf8b89e17a97bf5fa483b21ebe9c | [
"Apache-2.0"
] | null | null | null | localstack/services/cloudformation/cloudformation_api.py | Shika9/localstack | a3af1b783595cf8b89e17a97bf5fa483b21ebe9c | [
"Apache-2.0"
] | null | null | null | localstack/services/cloudformation/cloudformation_api.py | Shika9/localstack | a3af1b783595cf8b89e17a97bf5fa483b21ebe9c | [
"Apache-2.0"
] | null | null | null | import json
import logging
import traceback
from typing import Any, Dict, List, Optional, overload
import xmltodict
from flask import Flask, request
from requests.models import Response
from typing_extensions import Literal
from localstack.services.generic_proxy import RegionBackend
from localstack.utils.aws import aws_responses, aws_stack
from localstack.utils.aws.aws_responses import (
extract_url_encoded_param_list,
flask_error_response_xml,
requests_response_xml,
requests_to_flask_response,
)
from localstack.utils.cloudformation import template_deployer, template_preparer
from localstack.utils.cloudformation.template_preparer import prepare_template_body
from localstack.utils.common import (
clone,
clone_safe,
is_none_or_empty,
long_uid,
parse_request_data,
recurse_object,
select_attributes,
short_uid,
timestamp_millis,
)
APP_NAME = "cloudformation_api"
app = Flask(APP_NAME)
LOG = logging.getLogger(__name__)
XMLNS_CF = "http://cloudformation.amazonaws.com/doc/2010-05-15/"
class StackSet(object):
"""A stack set contains multiple stack instances."""
def __init__(self, metadata=None):
if metadata is None:
metadata = {}
self.metadata = metadata
# list of stack instances
self.stack_instances = []
# maps operation ID to stack set operation details
self.operations = {}
@property
def stack_set_name(self):
return self.metadata.get("StackSetName")
class StackInstance(object):
"""A stack instance belongs to a stack set and is specific to a region / account ID."""
def __init__(self, metadata=None):
if metadata is None:
metadata = {}
self.metadata = metadata
# reference to the deployed stack belonging to this stack instance
self.stack = None
class Stack(object):
def __init__(self, metadata=None, template=None):
if template is None:
template = {}
self.metadata = metadata or {}
self.template = template or {}
self._template_raw = clone_safe(self.template)
self.template_original = clone_safe(self.template)
# initialize resources
for resource_id, resource in self.template_resources.items():
resource["LogicalResourceId"] = self.template_original["Resources"][resource_id][
"LogicalResourceId"
] = (resource.get("LogicalResourceId") or resource_id)
# initialize stack template attributes
self.template["StackId"] = self.metadata["StackId"] = self.metadata.get(
"StackId"
) or aws_stack.cloudformation_stack_arn(self.stack_name, short_uid())
self.template["Parameters"] = self.template.get("Parameters") or {}
self.template["Outputs"] = self.template.get("Outputs") or {}
# initialize metadata
self.metadata["Parameters"] = self.metadata.get("Parameters") or []
self.metadata["StackStatus"] = "CREATE_IN_PROGRESS"
self.metadata["CreationTime"] = self.metadata.get("CreationTime") or timestamp_millis()
# maps resource id to resource state
self._resource_states = {}
# list of stack events
self.events = []
# list of stack change sets
self.change_sets = []
# initialize parameters
for i in range(1, 100):
key = "Parameters.member.%s.ParameterKey" % i
value = "Parameters.member.%s.ParameterValue" % i
key = self.metadata.get(key)
value = self.metadata.get(value)
if not key:
break
self.metadata["Parameters"].append({"ParameterKey": key, "ParameterValue": value})
def describe_details(self):
attrs = [
"StackId",
"StackName",
"Description",
"StackStatusReason",
"StackStatus",
"Capabilities",
"ParentId",
"RootId",
"RoleARN",
"CreationTime",
"DeletionTime",
"LastUpdatedTime",
"ChangeSetId",
]
result = select_attributes(self.metadata, attrs)
result["Tags"] = self.tags
result["Outputs"] = self.outputs
result["Parameters"] = self.stack_parameters()
for attr in ["Capabilities", "Tags", "Outputs", "Parameters"]:
result[attr] = result.get(attr, [])
return result
def set_stack_status(self, status):
self.metadata["StackStatus"] = status
self.metadata["StackStatusReason"] = "Deployment %s" % (
"failed" if "FAILED" in status else "succeeded"
)
self.add_stack_event(self.stack_name, self.stack_id, status)
def add_stack_event(self, resource_id: str, physical_res_id: str, status: str):
event = {
"EventId": long_uid(),
"Timestamp": timestamp_millis(),
"StackId": self.stack_id,
"StackName": self.stack_name,
"LogicalResourceId": resource_id,
"PhysicalResourceId": physical_res_id,
"ResourceStatus": status,
"ResourceType": "AWS::CloudFormation::Stack",
}
self.events.insert(0, event)
def set_resource_status(self, resource_id: str, status: str, physical_res_id: str = None):
"""Update the deployment status of the given resource ID and publish a corresponding stack event."""
self._set_resource_status_details(resource_id, physical_res_id=physical_res_id)
state = self.resource_states.setdefault(resource_id, {})
state["PreviousResourceStatus"] = state.get("ResourceStatus")
state["ResourceStatus"] = status
state["LastUpdatedTimestamp"] = timestamp_millis()
self.add_stack_event(resource_id, physical_res_id, status)
def _set_resource_status_details(self, resource_id: str, physical_res_id: str = None):
"""Helper function to ensure that the status details for the given resource ID are up-to-date."""
resource = self.resources.get(resource_id)
if resource is None:
# make sure we delete the states for any non-existing/deleted resources
self._resource_states.pop(resource_id, None)
return
state = self._resource_states.setdefault(resource_id, {})
attr_defaults = (
("LogicalResourceId", resource_id),
("PhysicalResourceId", physical_res_id),
)
for res in [resource, state]:
for attr, default in attr_defaults:
res[attr] = res.get(attr) or default
state["StackName"] = state.get("StackName") or self.stack_name
state["StackId"] = state.get("StackId") or self.stack_id
state["ResourceType"] = state.get("ResourceType") or self.resources[resource_id].get("Type")
return state
def resource_status(self, resource_id: str):
result = self._lookup(self.resource_states, resource_id)
return result
@property
def resource_states(self):
for resource_id in list(self._resource_states.keys()):
self._set_resource_status_details(resource_id)
return self._resource_states
@property
def stack_name(self):
return self.metadata["StackName"]
@property
def stack_id(self):
return self.metadata["StackId"]
# TODO: potential performance issues due to many stack_parameters calls (cache or limit actual invocations)
@property
def resources(self): # TODO: not actually resources, split apart
"""Return dict of resources, parameters, conditions, and other stack metadata."""
result = dict(self.template_resources)
def add_params(defaults=True):
for param in self.stack_parameters(defaults=defaults):
if param["ParameterKey"] not in result:
resolved_value = param.get("ResolvedValue")
result[param["ParameterKey"]] = {
"Type": "Parameter",
"LogicalResourceId": param["ParameterKey"],
"Properties": {
"Value": (
resolved_value
if resolved_value is not None
else param["ParameterValue"]
)
},
}
add_params(defaults=False)
# TODO: conditions and mappings don't really belong here and should be handled separately
for name, value in self.conditions.items():
if name not in result:
result[name] = {
"Type": "Parameter",
"LogicalResourceId": name,
"Properties": {"Value": value},
}
for name, value in self.mappings.items():
if name not in result:
result[name] = {
"Type": "Parameter",
"LogicalResourceId": name,
"Properties": {"Value": value},
}
add_params(defaults=True)
return result
@property
def template_resources(self):
return self.template.setdefault("Resources", {})
@property
def tags(self):
return aws_responses.extract_tags(self.metadata)
@property
def imports(self):
def _collect(o, **kwargs):
if isinstance(o, dict):
import_val = o.get("Fn::ImportValue")
if import_val:
result.add(import_val)
return o
result = set()
recurse_object(self.resources, _collect)
return result
@property
def outputs(self):
result = []
# first, fetch the outputs of nested child stacks
for stack in self.nested_stacks:
result.extend(stack.outputs)
# now, fetch the outputs of this stack
for k, details in self.template.get("Outputs", {}).items():
value = None
try:
template_deployer.resolve_refs_recursively(self.stack_name, details, self.resources)
value = details["Value"]
except Exception as e:
LOG.debug("Unable to resolve references in stack outputs: %s - %s", details, e)
exports = details.get("Export") or {}
export = exports.get("Name")
export = template_deployer.resolve_refs_recursively(
self.stack_name, export, self.resources
)
description = details.get("Description")
entry = {
"OutputKey": k,
"OutputValue": value,
"Description": description,
"ExportName": export,
}
result.append(entry)
return result
# TODO: check if metadata already populated/resolved and use it if possible (avoid unnecessary re-resolving)
def stack_parameters(self, defaults=True) -> List[Dict[str, Any]]:
result = {}
# add default template parameter values
if defaults:
for key, value in self.template_parameters.items():
param_value = value.get("Default")
result[key] = {
"ParameterKey": key,
"ParameterValue": param_value,
}
# TODO: extract dynamic parameter resolving
# TODO: support different types and refactor logic to use metadata (here not yet populated properly)
param_type = value.get("Type", "")
if not is_none_or_empty(param_type):
if param_type == "AWS::SSM::Parameter::Value<String>":
ssm_client = aws_stack.connect_to_service("ssm")
resolved_value = ssm_client.get_parameter(Name=param_value)["Parameter"][
"Value"
]
result[key]["ResolvedValue"] = resolved_value
elif param_type.startswith("AWS::"):
LOG.info(
f"Parameter Type '{param_type}' is currently not supported. Coming soon, stay tuned!"
)
else:
# lets assume we support the normal CFn parameters
pass
# add stack parameters
result.update({p["ParameterKey"]: p for p in self.metadata["Parameters"]})
# add parameters of change sets
for change_set in self.change_sets:
result.update({p["ParameterKey"]: p for p in change_set.metadata["Parameters"]})
result = list(result.values())
return result
@property
def template_parameters(self):
return self.template["Parameters"]
@property
def conditions(self):
return self.template.get("Conditions", {})
@property
def mappings(self):
return self.template.get("Mappings", {})
@property
def exports_map(self):
result = {}
for export in CloudFormationRegion.get().exports:
result[export["Name"]] = export
return result
@property
def nested_stacks(self):
"""Return a list of nested stacks that have been deployed by this stack."""
result = [
r for r in self.template_resources.values() if r["Type"] == "AWS::CloudFormation::Stack"
]
result = [find_stack(r["Properties"].get("StackName")) for r in result]
result = [r for r in result if r]
return result
@property
def status(self):
return self.metadata["StackStatus"]
@property
def resource_types(self):
return [r.get("Type") for r in self.template_resources.values()]
def resource(self, resource_id):
return self._lookup(self.resources, resource_id)
def _lookup(self, resource_map, resource_id):
resource = resource_map.get(resource_id)
if not resource:
raise Exception(
'Unable to find details for resource "%s" in stack "%s"'
% (resource_id, self.stack_name)
)
return resource
def copy(self):
return Stack(metadata=dict(self.metadata), template=dict(self.template))
class StackChangeSet(Stack):
def __init__(self, params=None, template=None):
if template is None:
template = {}
if params is None:
params = {}
super(StackChangeSet, self).__init__(params, template)
name = self.metadata["ChangeSetName"]
if not self.metadata.get("ChangeSetId"):
self.metadata["ChangeSetId"] = aws_stack.cf_change_set_arn(
name, change_set_id=short_uid()
)
stack = self.stack = find_stack(self.metadata["StackName"])
self.metadata["StackId"] = stack.stack_id
self.metadata["Status"] = "CREATE_PENDING"
@property
def change_set_id(self):
return self.metadata["ChangeSetId"]
@property
def change_set_name(self):
return self.metadata["ChangeSetName"]
@property
def resources(self):
result = dict(self.stack.resources)
result.update(self.resources)
return result
@property
def changes(self):
result = self.metadata["Changes"] = self.metadata.get("Changes", [])
return result
class CloudFormationRegion(RegionBackend):
def __init__(self):
# maps stack ID to stack details
self.stacks: Dict[str, Stack] = {}
# maps stack set ID to stack set details
self.stack_sets: Dict[str, StackSet] = {}
@property
def exports(self):
exports = []
output_keys = {}
for stack_id, stack in self.stacks.items():
for output in stack.outputs:
export_name = output.get("ExportName")
if not export_name:
continue
if export_name in output_keys:
# TODO: raise exception on stack creation in case of duplicate exports
LOG.warning(
"Found duplicate export name %s in stacks: %s %s",
export_name,
output_keys[export_name],
stack.stack_id,
)
entry = {
"ExportingStackId": stack.stack_id,
"Name": export_name,
"Value": output["OutputValue"],
}
exports.append(entry)
output_keys[export_name] = stack.stack_id
return exports
# --------------
# API ENDPOINTS
# --------------
def create_stack(req_params):
state = CloudFormationRegion.get()
template_deployer.prepare_template_body(req_params) # TODO: avoid mutating req_params directly
template = template_preparer.parse_template(req_params["TemplateBody"])
stack_name = template["StackName"] = req_params.get("StackName")
stack = Stack(req_params, template)
# find existing stack with same name, and remove it if this stack is in DELETED state
existing = ([s for s in state.stacks.values() if s.stack_name == stack_name] or [None])[0]
if existing:
if "DELETE" not in existing.status:
return error_response(
'Stack named "%s" already exists with status "%s"' % (stack_name, existing.status),
code=400,
code_string="ValidationError",
)
state.stacks.pop(existing.stack_id)
state.stacks[stack.stack_id] = stack
LOG.debug(
'Creating stack "%s" with %s resources ...', stack.stack_name, len(stack.template_resources)
)
deployer = template_deployer.TemplateDeployer(stack)
try:
# TODO: create separate step to first resolve parameters
deployer.deploy_stack()
except Exception as e:
stack.set_stack_status("CREATE_FAILED")
msg = 'Unable to create stack "%s": %s' % (stack.stack_name, e)
LOG.debug("%s %s", msg, traceback.format_exc())
return error_response(msg, code=400, code_string="ValidationError")
result = {"StackId": stack.stack_id}
return result
def create_stack_set(req_params):
state = CloudFormationRegion.get()
stack_set = StackSet(req_params)
stack_set_id = short_uid()
stack_set.metadata["StackSetId"] = stack_set_id
state.stack_sets[stack_set_id] = stack_set
result = {"StackSetId": stack_set_id}
return result
def create_stack_instances(req_params):
state = CloudFormationRegion.get()
set_name = req_params.get("StackSetName")
stack_set = [sset for sset in state.stack_sets.values() if sset.stack_set_name == set_name]
if not stack_set:
return not_found_error('Stack set named "%s" does not exist' % set_name)
stack_set = stack_set[0]
op_id = req_params.get("OperationId") or short_uid()
sset_meta = stack_set.metadata
accounts = extract_url_encoded_param_list(req_params, "Accounts.member.%s")
accounts = accounts or extract_url_encoded_param_list(
req_params, "DeploymentTargets.Accounts.member.%s"
)
regions = extract_url_encoded_param_list(req_params, "Regions.member.%s")
stacks_to_await = []
for account in accounts:
for region in regions:
# deploy new stack
LOG.debug('Deploying instance for stack set "%s" in region "%s"', set_name, region)
cf_client = aws_stack.connect_to_service("cloudformation", region_name=region)
kwargs = select_attributes(sset_meta, "TemplateBody") or select_attributes(
sset_meta, "TemplateURL"
)
stack_name = "sset-%s-%s" % (set_name, account)
result = cf_client.create_stack(StackName=stack_name, **kwargs)
stacks_to_await.append((stack_name, region))
# store stack instance
instance = {
"StackSetId": sset_meta["StackSetId"],
"OperationId": op_id,
"Account": account,
"Region": region,
"StackId": result["StackId"],
"Status": "CURRENT",
"StackInstanceStatus": {"DetailedStatus": "SUCCEEDED"},
}
instance = StackInstance(instance)
stack_set.stack_instances.append(instance)
# wait for completion of stack
for stack in stacks_to_await:
aws_stack.await_stack_completion(stack[0], region_name=stack[1])
# record operation
operation = {
"OperationId": op_id,
"StackSetId": stack_set.metadata["StackSetId"],
"Action": "CREATE",
"Status": "SUCCEEDED",
}
stack_set.operations[op_id] = operation
result = {"OperationId": op_id}
return result
def delete_stack(req_params):
stack_name = req_params.get("StackName")
stack = find_stack(stack_name)
deployer = template_deployer.TemplateDeployer(stack)
deployer.delete_stack()
return {}
def delete_stack_set(req_params):
state = CloudFormationRegion.get()
set_name = req_params.get("StackSetName")
stack_set = [sset for sset in state.stack_sets.values() if sset.stack_set_name == set_name]
if not stack_set:
return not_found_error('Stack set named "%s" does not exist' % set_name)
for instance in stack_set[0].stack_instances:
deployer = template_deployer.TemplateDeployer(instance.stack)
deployer.delete_stack()
return {}
def update_stack(req_params):
stack_name = req_params.get("StackName")
stack = find_stack(stack_name)
if not stack:
return not_found_error('Unable to update non-existing stack "%s"' % stack_name)
template_preparer.prepare_template_body(req_params)
template = template_preparer.parse_template(req_params["TemplateBody"])
new_stack = Stack(req_params, template)
deployer = template_deployer.TemplateDeployer(stack)
try:
deployer.update_stack(new_stack)
except Exception as e:
stack.set_stack_status("UPDATE_FAILED")
msg = 'Unable to update stack "%s": %s' % (stack_name, e)
LOG.debug("%s %s", msg, traceback.format_exc())
return error_response(msg, code=400, code_string="ValidationError")
result = {"StackId": stack.stack_id}
return result
def update_stack_set(req_params):
state = CloudFormationRegion.get()
set_name = req_params.get("StackSetName")
stack_set = [sset for sset in state.stack_sets.values() if sset.stack_set_name == set_name]
if not stack_set:
return not_found_error('Stack set named "%s" does not exist' % set_name)
stack_set = stack_set[0]
stack_set.metadata.update(req_params)
op_id = req_params.get("OperationId") or short_uid()
operation = {
"OperationId": op_id,
"StackSetId": stack_set.metadata["StackSetId"],
"Action": "UPDATE",
"Status": "SUCCEEDED",
}
stack_set.operations[op_id] = operation
return {"OperationId": op_id}
def describe_stacks(req_params):
state = CloudFormationRegion.get()
stack_name = req_params.get("StackName")
stack_list = list(state.stacks.values())
stacks = [
s.describe_details() for s in stack_list if stack_name in [None, s.stack_name, s.stack_id]
]
if stack_name and not stacks:
return error_response(
"Stack with id %s does not exist" % stack_name,
code=400,
code_string="ValidationError",
)
result = {"Stacks": stacks}
return result
def list_stacks(req_params):
state = CloudFormationRegion.get()
stack_status_filters = _get_status_filter_members(req_params)
stacks = [
s.describe_details()
for s in state.stacks.values()
if not stack_status_filters or s.status in stack_status_filters
]
attrs = [
"StackId",
"StackName",
"TemplateDescription",
"CreationTime",
"LastUpdatedTime",
"DeletionTime",
"StackStatus",
"StackStatusReason",
"ParentId",
"RootId",
"DriftInformation",
]
stacks = [select_attributes(stack, attrs) for stack in stacks]
result = {"StackSummaries": stacks}
return result
def describe_stack_resource(req_params):
stack_name = req_params.get("StackName")
resource_id = req_params.get("LogicalResourceId")
stack = find_stack(stack_name)
if not stack:
return stack_not_found_error(stack_name)
details = stack.resource_status(resource_id)
result = {"StackResourceDetail": details}
return result
def describe_stack_resources(req_params):
stack_name = req_params.get("StackName")
resource_id = req_params.get("LogicalResourceId")
phys_resource_id = req_params.get("PhysicalResourceId")
if phys_resource_id and stack_name:
return error_response("Cannot specify both StackName and PhysicalResourceId", code=400)
# TODO: filter stack by PhysicalResourceId!
stack = find_stack(stack_name)
if not stack:
return stack_not_found_error(stack_name)
statuses = [
res_status
for res_id, res_status in stack.resource_states.items()
if resource_id in [res_id, None]
]
return {"StackResources": statuses}
def list_stack_resources(req_params):
result = describe_stack_resources(req_params)
if not isinstance(result, dict):
return result
result = {"StackResourceSummaries": result.pop("StackResources")}
return result
def list_stack_instances(req_params):
state = CloudFormationRegion.get()
set_name = req_params.get("StackSetName")
stack_set = [sset for sset in state.stack_sets.values() if sset.stack_set_name == set_name]
if not stack_set:
return not_found_error('Stack set named "%s" does not exist' % set_name)
stack_set = stack_set[0]
result = [inst.metadata for inst in stack_set.stack_instances]
result = {"Summaries": result}
return result
ChangeSetTypes = Literal["CREATE", "UPDATE", "IMPORT"]
def create_change_set(req_params: Dict[str, Any]):
change_set_type: ChangeSetTypes = req_params.get("ChangeSetType", "UPDATE")
stack_name: Optional[str] = req_params.get("StackName")
change_set_name: Optional[str] = req_params.get("ChangeSetName")
template_body: Optional[str] = req_params.get("TemplateBody")
# s3 or secretsmanager url
template_url: Optional[str] = req_params.get("TemplateUrl") or req_params.get("TemplateURL")
if is_none_or_empty(change_set_name):
return error_response(
"ChangeSetName required", 400, "ValidationError"
) # TODO: check proper message
if is_none_or_empty(stack_name):
return error_response(
"StackName required", 400, "ValidationError"
) # TODO: check proper message
stack: Optional[Stack] = find_stack(stack_name)
# validate and resolve template
if template_body and template_url:
return error_response(
"Specify exactly one of 'TemplateBody' or 'TemplateUrl'", 400, "ValidationError"
) # TODO: check proper message
if not template_body and not template_url:
return error_response(
"Specify exactly one of 'TemplateBody' or 'TemplateUrl'", 400, "ValidationError"
) # TODO: check proper message
prepare_template_body(req_params) # TODO: function has too many unclear responsibilities
template = template_preparer.parse_template(req_params["TemplateBody"])
del req_params["TemplateBody"] # TODO: stop mutating req_params
template["StackName"] = stack_name
template[
"ChangeSetName"
] = change_set_name # TODO: validate with AWS what this is actually doing?
if change_set_type == "UPDATE":
# add changeset to existing stack
if stack is None:
return error_response(
f"Stack '{stack_name}' does not exist.", 400, "ValidationError"
) # stack should exist already
elif change_set_type == "CREATE":
# create new (empty) stack
if stack is not None:
return error_response(
f"Stack {stack_name} already exists", 400, "ValidationError"
) # stack should not exist yet (TODO: check proper message)
state = CloudFormationRegion.get()
empty_stack_template = dict(template)
empty_stack_template["Resources"] = {}
req_params_copy = clone_stack_params(req_params)
stack = Stack(req_params_copy, empty_stack_template)
state.stacks[stack.stack_id] = stack
stack.set_stack_status("REVIEW_IN_PROGRESS")
elif change_set_type == "IMPORT":
raise NotImplementedError() # TODO: implement importing resources
else:
msg = f"1 validation error detected: Value '{change_set_type}' at 'changeSetType' failed to satisfy constraint: Member must satisfy enum value set: [IMPORT, UPDATE, CREATE]"
return error_response(msg, code=400, code_string="ValidationError")
change_set = StackChangeSet(req_params, template)
# TODO: refactor the flow here
deployer = template_deployer.TemplateDeployer(change_set)
deployer.construct_changes(
stack,
change_set,
change_set_id=change_set.change_set_id,
append_to_changeset=True,
) # TODO: ignores return value (?)
deployer.apply_parameter_changes(change_set, change_set) # TODO: bandaid to populate metadata
stack.change_sets.append(change_set)
change_set.metadata[
"Status"
] = "CREATE_COMPLETE" # technically for some time this should first be CREATE_PENDING
change_set.metadata[
"ExecutionStatus"
] = "AVAILABLE" # technically for some time this should first be UNAVAILABLE
return {"StackId": change_set.stack_id, "Id": change_set.change_set_id}
def execute_change_set(req_params):
stack_name = req_params.get("StackName")
cs_name = req_params.get("ChangeSetName")
change_set = find_change_set(cs_name, stack_name=stack_name)
if not change_set:
return not_found_error(
'Unable to find change set "%s" for stack "%s"' % (cs_name, stack_name)
)
LOG.debug(
'Executing change set "%s" for stack "%s" with %s resources ...',
cs_name,
stack_name,
len(change_set.template_resources),
)
deployer = template_deployer.TemplateDeployer(change_set.stack)
deployer.apply_change_set(change_set)
change_set.stack.metadata["ChangeSetId"] = change_set.change_set_id
return {}
def list_change_sets(req_params):
stack_name = req_params.get("StackName")
stack = find_stack(stack_name)
if not stack:
return not_found_error('Unable to find stack "%s"' % stack_name)
result = [cs.metadata for cs in stack.change_sets]
result = {"Summaries": result}
return result
def list_stack_sets(req_params):
state = CloudFormationRegion.get()
result = [sset.metadata for sset in state.stack_sets.values()]
result = {"Summaries": result}
return result
def describe_change_set(req_params):
stack_name = req_params.get("StackName")
cs_name = req_params.get("ChangeSetName")
change_set: Optional[StackChangeSet] = find_change_set(cs_name, stack_name=stack_name)
if not change_set:
return not_found_error(
'Unable to find change set "%s" for stack "%s"' % (cs_name, stack_name)
)
return change_set.metadata
def describe_stack_set(req_params):
state = CloudFormationRegion.get()
set_name = req_params.get("StackSetName")
result = [
sset.metadata for sset in state.stack_sets.values() if sset.stack_set_name == set_name
]
if not result:
return not_found_error('Unable to find stack set "%s"' % set_name)
result = {"StackSet": result[0]}
return result
def describe_stack_set_operation(req_params):
state = CloudFormationRegion.get()
set_name = req_params.get("StackSetName")
stack_set = [sset for sset in state.stack_sets.values() if sset.stack_set_name == set_name]
if not stack_set:
return not_found_error('Unable to find stack set "%s"' % set_name)
stack_set = stack_set[0]
op_id = req_params.get("OperationId")
result = stack_set.operations.get(op_id)
if not result:
LOG.debug(
'Unable to find operation ID "%s" for stack set "%s" in list: %s',
op_id,
set_name,
list(stack_set.operations.keys()),
)
return not_found_error(
'Unable to find operation ID "%s" for stack set "%s"' % (op_id, set_name)
)
result = {"StackSetOperation": result}
return result
def list_exports(req_params):
state = CloudFormationRegion.get()
result = {"Exports": state.exports}
return result
def list_imports(req_params):
state = CloudFormationRegion.get()
export_name = req_params.get("ExportName")
importing_stack_names = []
for stack in state.stacks.values():
if export_name in stack.imports:
importing_stack_names.append(stack.stack_name)
result = {"Imports": importing_stack_names}
return result
def validate_template(req_params):
try:
result = template_preparer.validate_template(req_params)
result = "<tmp>%s</tmp>" % result
result = xmltodict.parse(result)["tmp"]
return result
except Exception as err:
return error_response("Template Validation Error: %s" % err)
def describe_stack_events(req_params):
stack_name = req_params.get("StackName")
state = CloudFormationRegion.get()
events = []
for stack_id, stack in state.stacks.items():
if stack_name in [None, stack.stack_name, stack.stack_id]:
events.extend(stack.events)
return {"StackEvents": events}
def delete_change_set(req_params):
stack_name = req_params.get("StackName")
cs_name = req_params.get("ChangeSetName")
change_set = find_change_set(cs_name, stack_name=stack_name)
if not change_set:
return not_found_error(
'Unable to find change set "%s" for stack "%s"' % (cs_name, stack_name)
)
change_set.stack.change_sets = [
cs for cs in change_set.stack.change_sets if cs.change_set_name != cs_name
]
return {}
def get_template(req_params):
stack_name = req_params.get("StackName")
cs_name = req_params.get("ChangeSetName")
stack = find_stack(stack_name)
if cs_name:
stack = find_change_set(stack_name=stack_name, cs_name=cs_name)
if not stack:
return stack_not_found_error(stack_name)
result = {"TemplateBody": json.dumps(stack._template_raw)}
return result
def get_template_summary(req_params):
stack_name = req_params.get("StackName")
stack = None
if stack_name:
stack = find_stack(stack_name)
if not stack:
return stack_not_found_error(stack_name)
else:
template_deployer.prepare_template_body(req_params)
template = template_preparer.parse_template(req_params["TemplateBody"])
req_params["StackName"] = "tmp-stack"
stack = Stack(req_params, template)
result = stack.describe_details()
id_summaries = {}
for resource_id, resource in stack.template_resources.items():
res_type = resource["Type"]
id_summaries[res_type] = id_summaries.get(res_type) or []
id_summaries[res_type].append(resource_id)
result["ResourceTypes"] = list(id_summaries.keys())
result["ResourceIdentifierSummaries"] = [
{"ResourceType": key, "LogicalResourceIds": values} for key, values in id_summaries.items()
]
return result
# -----------------
# MAIN ENTRY POINT
# -----------------
def serve(port: int, quiet: bool = True):
from localstack.services import generic_proxy # moved here to fix circular import errors
return generic_proxy.serve_flask_app(app=app, port=port)
@app.route("/", methods=["POST"])
def handle_request():
data = request.get_data()
req_params = parse_request_data(request.method, request.path, data)
action = req_params.get("Action", "")
func = ENDPOINTS.get(action)
if not func:
return "", 404
result = func(req_params)
result = _response(action, result)
return result
ENDPOINTS = {
"CreateChangeSet": create_change_set,
"CreateStack": create_stack,
"CreateStackInstances": create_stack_instances,
"CreateStackSet": create_stack_set,
"DeleteChangeSet": delete_change_set,
"DeleteStack": delete_stack,
"DeleteStackSet": delete_stack_set,
"DescribeChangeSet": describe_change_set,
"DescribeStackEvents": describe_stack_events,
"DescribeStackResource": describe_stack_resource,
"DescribeStackResources": describe_stack_resources,
"DescribeStacks": describe_stacks,
"DescribeStackSet": describe_stack_set,
"DescribeStackSetOperation": describe_stack_set_operation,
"ExecuteChangeSet": execute_change_set,
"GetTemplate": get_template,
"GetTemplateSummary": get_template_summary,
"ListChangeSets": list_change_sets,
"ListExports": list_exports,
"ListImports": list_imports,
"ListStackInstances": list_stack_instances,
"ListStacks": list_stacks,
"ListStackResources": list_stack_resources,
"ListStackSets": list_stack_sets,
"UpdateStack": update_stack,
"UpdateStackSet": update_stack_set,
"ValidateTemplate": validate_template,
}
# ---------------
# UTIL FUNCTIONS
# ---------------
@overload
def error_response(msg: str, code: int, code_string: str, xmlns: str = XMLNS_CF):
...
def error_response(*args, **kwargs):
kwargs["xmlns"] = kwargs.get("xmlns") or XMLNS_CF
return flask_error_response_xml(*args, **kwargs)
def not_found_error(message: str, error_type: str = None):
error_type = error_type or "ResourceNotFoundException"
return error_response(message, code=404, code_string=error_type)
def stack_not_found_error(stack_name: str, error_type: str = None):
error_type = error_type or "ValidationError"
return not_found_error("Stack with id %s does not exist" % stack_name, error_type=error_type)
def clone_stack_params(stack_params):
try:
return clone(stack_params)
except Exception as e:
LOG.info("Unable to clone stack parameters: %s", e)
return stack_params
def find_stack(stack_name: str) -> Optional[Stack]:
state = CloudFormationRegion.get()
return (
[s for s in state.stacks.values() if stack_name in [s.stack_name, s.stack_id]] or [None]
)[0]
def find_change_set(cs_name: str, stack_name: Optional[str] = None) -> Optional[StackChangeSet]:
state = CloudFormationRegion.get()
stack = find_stack(stack_name)
stacks = [stack] if stack else state.stacks.values()
result = [
cs
for s in stacks
for cs in s.change_sets
if cs_name in [cs.change_set_id, cs.change_set_name]
]
return (result or [None])[0]
def _response(action, result):
if isinstance(result, (dict, str)):
result = requests_response_xml(action, result, xmlns=XMLNS_CF)
if isinstance(result, Response):
result = requests_to_flask_response(result)
return result
def _get_status_filter_members(req_params):
"""
Creates a set of status from the requests parameters
The API request params specify two parameters of the endpoint:
- NextToken: Token for next page
- StackStatusFilter.member.N: Status to use as a filter (it conforms a list)
StackStatusFilter.member.N abstracts the list of status in the request, and it is sent
as different parameters, as the N suggests.
Docs:
https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ListStacks.html
Returns:
set: set of status to filter upon
"""
return {
value for param, value in req_params.items() if param.startswith("StackStatusFilter.member")
}
| 36.247292 | 181 | 0.642224 |
01dd6e1257107cb7bb076f9c343e9b06751c4593 | 6,954 | py | Python | graphics/VTK-7.0.0/Filters/General/Testing/Python/tableBasedClip.py | hlzz/dotfiles | 0591f71230c919c827ba569099eb3b75897e163e | [
"BSD-3-Clause"
] | 4 | 2016-03-30T14:31:52.000Z | 2019-02-02T05:01:32.000Z | graphics/VTK-7.0.0/Filters/General/Testing/Python/tableBasedClip.py | hlzz/dotfiles | 0591f71230c919c827ba569099eb3b75897e163e | [
"BSD-3-Clause"
] | null | null | null | graphics/VTK-7.0.0/Filters/General/Testing/Python/tableBasedClip.py | hlzz/dotfiles | 0591f71230c919c827ba569099eb3b75897e163e | [
"BSD-3-Clause"
] | 2 | 2019-08-30T23:36:13.000Z | 2019-11-08T16:52:01.000Z | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
import sys
class TestClip(Testing.vtkTest):
def testImage2DScalar(self):
planes = ['XY', 'XZ', 'YZ']
expectedNCells = [38, 46, 42]
expectedNClippedCells = [104, 104, 106]
for plane, nCells, nClippedCells in zip(planes,expectedNCells,expectedNClippedCells):
r = vtk.vtkRTAnalyticSource()
r.SetXFreq(600);
r.SetYFreq(400);
r.SetZFreq(900);
if plane == 'XY':
r.SetWholeExtent(-5, 5, -5, 5, 0, 0)
elif plane == 'XZ':
r.SetWholeExtent(-5, 5, 0, 0, -5, 5)
else:
r.SetWholeExtent(0, 0, -5, 5, -5, 5)
r.Update()
c = vtk.vtkTableBasedClipDataSet()
c.SetInputConnection(r.GetOutputPort())
c.SetUseValueAsOffset(0)
c.SetValue(150)
c.SetInsideOut(1)
c.SetGenerateClippedOutput(1)
c.Update()
self.assertEqual(c.GetOutput().GetNumberOfCells(), nCells)
self.assertEqual(c.GetClippedOutput().GetNumberOfCells(), nClippedCells)
def testImage(self):
r = vtk.vtkRTAnalyticSource()
r.SetWholeExtent(-5, 5, -5, 5, -5, 5)
r.Update()
s = vtk.vtkSphere()
s.SetRadius(2)
s.SetCenter(0,0,0)
c = vtk.vtkTableBasedClipDataSet()
c.SetInputConnection(r.GetOutputPort())
c.SetClipFunction(s)
c.SetInsideOut(1)
c.Update()
self.assertEqual(c.GetOutput().GetNumberOfCells(), 64)
def testRectilinear(self):
rt = vtk.vtkRTAnalyticSource()
rt.SetWholeExtent(-5, 5, -5, 5, -5, 5)
rt.Update()
i = rt.GetOutput()
r = vtk.vtkRectilinearGrid()
dims = i.GetDimensions()
r.SetDimensions(dims)
exts = i.GetExtent()
orgs = i.GetOrigin()
xs = vtk.vtkFloatArray()
xs.SetNumberOfTuples(dims[0])
for d in range(dims[0]):
xs.SetTuple1(d, orgs[0] + exts[0] + d)
r.SetXCoordinates(xs)
ys = vtk.vtkFloatArray()
ys.SetNumberOfTuples(dims[1])
for d in range(dims[1]):
ys.SetTuple1(d, orgs[1] + exts[2] + d)
r.SetYCoordinates(ys)
zs = vtk.vtkFloatArray()
zs.SetNumberOfTuples(dims[2])
for d in range(dims[2]):
zs.SetTuple1(d, orgs[2] + exts[4] + d)
r.SetZCoordinates(zs)
s = vtk.vtkSphere()
s.SetRadius(2)
s.SetCenter(0,0,0)
c = vtk.vtkTableBasedClipDataSet()
c.SetInputData(r)
c.SetClipFunction(s)
c.SetInsideOut(1)
c.Update()
self.assertEqual(c.GetOutput().GetNumberOfCells(), 64)
def testStructured2D(self):
planes = ['XY', 'XZ', 'YZ']
expectedNCells = [42, 34, 68]
for plane, nCells in zip(planes,expectedNCells):
rt = vtk.vtkRTAnalyticSource()
if plane == 'XY':
rt.SetWholeExtent(-5, 5, -5, 5, 0, 0)
elif plane == 'XZ':
rt.SetWholeExtent(-5, 5, 0, 0, -5, 5)
else:
rt.SetWholeExtent(0, 0, -5, 5, -5, 5)
rt.Update()
i = rt.GetOutput()
st = vtk.vtkStructuredGrid()
st.SetDimensions(i.GetDimensions())
nps = i.GetNumberOfPoints()
ps = vtk.vtkPoints()
ps.SetNumberOfPoints(nps)
for idx in range(nps):
ps.SetPoint(idx, i.GetPoint(idx))
st.SetPoints(ps)
cyl = vtk.vtkCylinder()
cyl.SetRadius(2)
cyl.SetCenter(0,0,0)
transform = vtk.vtkTransform()
transform.RotateWXYZ(45,20,1,10)
cyl.SetTransform(transform)
c = vtk.vtkTableBasedClipDataSet()
c.SetInputData(st)
c.SetClipFunction(cyl)
c.SetInsideOut(1)
c.Update()
self.assertEqual(c.GetOutput().GetNumberOfCells(), nCells)
def testStructured(self):
rt = vtk.vtkRTAnalyticSource()
rt.SetWholeExtent(-5, 5, -5, 5, -5, 5)
rt.Update()
i = rt.GetOutput()
st = vtk.vtkStructuredGrid()
st.SetDimensions(i.GetDimensions())
nps = i.GetNumberOfPoints()
ps = vtk.vtkPoints()
ps.SetNumberOfPoints(nps)
for idx in range(nps):
ps.SetPoint(idx, i.GetPoint(idx))
st.SetPoints(ps)
s = vtk.vtkSphere()
s.SetRadius(2)
s.SetCenter(0,0,0)
c = vtk.vtkTableBasedClipDataSet()
c.SetInputData(st)
c.SetClipFunction(s)
c.SetInsideOut(1)
c.Update()
self.assertEqual(c.GetOutput().GetNumberOfCells(), 64)
def testUnstructured(self):
rt = vtk.vtkRTAnalyticSource()
rt.SetWholeExtent(-5, 5, -5, 5, -5, 5)
t = vtk.vtkThreshold()
t.SetInputConnection(rt.GetOutputPort())
t.ThresholdByUpper(-10)
s = vtk.vtkSphere()
s.SetRadius(2)
s.SetCenter(0,0,0)
c = vtk.vtkTableBasedClipDataSet()
c.SetInputConnection(t.GetOutputPort())
c.SetClipFunction(s)
c.SetInsideOut(1)
c.Update()
self.assertEqual(c.GetOutput().GetNumberOfCells(), 64)
eg = vtk.vtkEnSightGoldReader()
eg.SetCaseFileName(VTK_DATA_ROOT + "/Data/EnSight/elements.case")
eg.Update()
pl = vtk.vtkPlane()
pl.SetOrigin(3.5, 3.5, 0.5)
pl.SetNormal(0, 0, 1)
c.SetInputConnection(eg.GetOutputPort())
c.SetClipFunction(pl)
c.SetInsideOut(1)
c.Update()
data = c.GetOutputDataObject(0).GetBlock(0)
self.assertEqual(data.GetNumberOfCells(), 75)
rw = vtk.vtkRenderWindow()
ren = vtk.vtkRenderer()
rw.AddRenderer(ren)
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(data)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
ren.AddActor(actor)
ac = ren.GetActiveCamera()
ac.SetPosition(-7.9, 9.7, 14.6)
ac.SetFocalPoint(3.5, 3.5, 0.5)
ac.SetViewUp(0.08, 0.93, -0.34)
rw.Render()
ren.ResetCameraClippingRange()
rtTester = vtk.vtkTesting()
for arg in sys.argv[1:]:
rtTester.AddArgument(arg)
rtTester.AddArgument("-V")
rtTester.AddArgument("tableBasedClip.png")
rtTester.SetRenderWindow(rw)
rw.Render()
rtResult = rtTester.RegressionTest(10)
if __name__ == "__main__":
Testing.main([(TestClip, 'test')])
| 29.591489 | 94 | 0.533937 |
d8d090a475c0e257b183fb8b55de1158d1c569b2 | 1,391 | py | Python | examples/examples.py | vdejager/pipetaxon | 2dcf2f4cf3fd9ca8677ba313c0191df45ccc3f77 | [
"MIT"
] | 18 | 2018-11-26T03:04:18.000Z | 2021-05-12T16:25:27.000Z | examples/examples.py | vdejager/pipetaxon | 2dcf2f4cf3fd9ca8677ba313c0191df45ccc3f77 | [
"MIT"
] | 13 | 2018-11-26T17:41:41.000Z | 2022-02-19T19:49:51.000Z | examples/examples.py | vdejager/pipetaxon | 2dcf2f4cf3fd9ca8677ba313c0191df45ccc3f77 | [
"MIT"
] | 3 | 2018-11-26T15:20:15.000Z | 2020-11-27T07:26:17.000Z | import requests
# Get a taxonomy from pipetaxon.voorloop.com anonymously
#
response = requests.get('http://pipetaxon.voorloop.com/api/taxonomy/4373')
print(response.json())
# Get a taxonomy from pipetaxon.voorloop.com using a token ( http://pipetaxon.voorloop.com/register/ )
#
token = 'Token 2ea7fa00-YOUR-REAL-KEY-HERE-bc2aa7caf531'
response = requests.get('http://pipetaxon.voorloop.com/api/taxonomy/4373', headers={'Authorization': token})
print(response.json())
# Get a taxonomy from your local pipetaxon instance
#
response = requests.get('http://localhost:8000/api/taxonomy/4373')
print(response.json())
# Retrieve the lineage of taxid 4373
#
response = requests.get('http://pipetaxon.voorloop.com/api/taxonomy/4373/lineage')
print(response.json())
# Get all taxonomies at rank 'Order'
#
response = requests.get('http://pipetaxon.voorloop.com/api/taxonomy/?rank=order')
print(response.json())
# Get all taxonomies at division 4
#
response = requests.get('http://pipetaxon.voorloop.com/api/taxonomy/?division=4')
print(response.json())
# Search for taxonomies named *Clostridium*
#
response = requests.get('http://pipetaxon.voorloop.com/api/taxonomy/?search=clostridium')
print(response.json())
# Execute an LCA query on a taxid list
#
response = requests.get('http://pipetaxon.voorloop.com/api/taxonomy/lca/?taxid_list=2363,37487,87346')
print(response.json())
| 23.982759 | 108 | 0.751258 |
74ef5f0c4733be5687c50fe2a444c53cadb70fca | 18,679 | py | Python | qa/rpc-tests/test_framework/comptool.py | citypayorg/utb | 2b9aff79fc19367c67d986ceae4511f7ef116e6a | [
"MIT"
] | 1 | 2020-04-09T09:23:12.000Z | 2020-04-09T09:23:12.000Z | qa/rpc-tests/test_framework/comptool.py | citypayorg/utb | 2b9aff79fc19367c67d986ceae4511f7ef116e6a | [
"MIT"
] | 1 | 2020-02-19T20:32:41.000Z | 2020-02-19T20:32:41.000Z | qa/rpc-tests/test_framework/comptool.py | citypayorg/utb | 2b9aff79fc19367c67d986ceae4511f7ef116e6a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Compare two or more utbds to each other.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
TestNode behaves as follows:
Configure with a BlockStore and TxStore
on_inv: log the message but don't request
on_headers: log the chain tip
on_pong: update ping response map (for synchronization)
on_getheaders: provide headers via BlockStore
on_getdata: provide blocks via BlockStore
"""
from .mininode import *
from .blockstore import BlockStore, TxStore
from .util import p2p_port
import logging
logger=logging.getLogger("TestFramework.comptool")
global mininode_lock
class RejectResult(object):
"""Outcome that expects rejection of a transaction or block."""
def __init__(self, code, reason=b''):
self.code = code
self.reason = reason
def match(self, other):
if self.code != other.code:
return False
return other.reason.startswith(self.reason)
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
NodeConnCB.__init__(self)
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self, conn):
self.closed = True
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def on_reject(self, conn, message):
if message.message == b'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == b'block':
self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
def send_header(self, header):
m = msg_headers()
m.headers.append(header)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of
# [obj, True/False/None, hash/None]:
# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance(object):
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager(object):
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.test_nodes = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
test_node = TestNode(self.block_store, self.tx_store)
self.test_nodes.append(test_node)
self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
test_node.add_connection(self.connections[-1])
def clear_all_connections(self):
self.connections = []
self.test_nodes = []
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.test_nodes)
return wait_until(disconnected, timeout=10)
def wait_for_verack(self):
def veracked():
return all(node.verack_received for node in self.test_nodes)
return wait_until(veracked, timeout=10)
def wait_for_pings(self, counter, timeout=float('inf')):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
return wait_until(received_pongs, timeout=timeout)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(blocks_requested, attempts=20*num_blocks, sleep=0.1):
raise AssertionError("Not all nodes requested block")
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter, timeout=300)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(transaction_requested, attempts=20*num_events):
raise AssertionError("Not all nodes requested transaction")
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
if c.cb.bestblockhash == blockhash:
return False
if blockhash not in c.cb.block_reject_map:
logger.error('Block not in reject map: %064x' % (blockhash))
return False
if not outcome.match(c.cb.block_reject_map[blockhash]):
logger.error('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash))
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
if txhash in c.cb.lastInv:
return False
if txhash not in c.cb.tx_reject_map:
logger.error('Tx not in reject map: %064x' % (txhash))
return False
if not outcome.match(c.cb.tx_reject_map[txhash]):
logger.error('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash))
return False
elif ((txhash in c.cb.lastInv) != outcome):
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
tip = block.sha256
# each test_obj can have an optional third argument
# to specify the tip we should compare with
# (default is to use the block being tested)
if len(test_obj) >= 3:
tip = test_obj[2]
# Add to shared block_store, set as current block
# If there was an open getdata request for the block
# previously, and we didn't have an entry in the
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
# the earlier one is outstanding.
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
# if we expect success, send inv and sync every block
# if we expect failure, just push the block and see what happens.
if outcome == True:
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
else:
[ c.send_message(msg_block(block)) for c in self.connections ]
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter, timeout=300)
self.ping_counter += 1
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
block_header = CBlockHeader(block)
[ c.cb.send_header(block_header) for c in self.connections ]
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
[ c.cb.send_header(block_header) for c in self.connections ]
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
logger.info("Test %d: PASS" % test_number)
test_number += 1
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
| 45.337379 | 149 | 0.601156 |
45206bbf15872f24a1fcaaf32e70f2afa10161b1 | 5,563 | py | Python | python/tests/test_standard_files.py | benjeffery/kastore | 186fa617b889bc88f86fab1559368934149e21e2 | [
"MIT"
] | 4 | 2019-01-23T00:49:35.000Z | 2021-12-03T15:39:28.000Z | python/tests/test_standard_files.py | benjeffery/kastore | 186fa617b889bc88f86fab1559368934149e21e2 | [
"MIT"
] | 168 | 2018-03-21T18:59:19.000Z | 2022-03-04T13:03:55.000Z | python/tests/test_standard_files.py | benjeffery/kastore | 186fa617b889bc88f86fab1559368934149e21e2 | [
"MIT"
] | 8 | 2019-03-09T14:30:28.000Z | 2021-03-26T07:17:13.000Z | """
Tests reading in the standard test files.
"""
import os.path
import unittest
import numpy as np
import kastore as kas
class StandardFilesMixin:
"""
Read in the standard files.
"""
@classmethod
def setUpClass(cls):
# Figure out where this is being run from and set the test data
# path accordingly.
cwd = os.getcwd()
cls.test_data_path = "test-data"
if cwd.endswith("python"):
cls.test_data_path = "../test-data"
def read_file(self, filename):
full_path = os.path.join(self.test_data_path, filename)
return kas.load(full_path, engine=self.engine, read_all=False)
def test_empty_file(self):
self.assertRaises(EOFError, self.read_file, "malformed/empty_file.kas")
def test_bad_type(self):
self.assertRaises(
kas.FileFormatError, self.read_file, "malformed/bad_type_11.kas"
)
self.assertRaises(
kas.FileFormatError, self.read_file, "malformed/bad_type_20.kas"
)
def test_bad_filesizes(self):
self.assertRaises(
kas.FileFormatError, self.read_file, "malformed/bad_filesize_0_-1.kas"
)
self.assertRaises(
kas.FileFormatError, self.read_file, "malformed/bad_filesize_0_1.kas"
)
self.assertRaises(
kas.FileFormatError, self.read_file, "malformed/bad_filesize_0_1024.kas"
)
self.assertRaises(
kas.FileFormatError, self.read_file, "malformed/bad_filesize_10_-1.kas"
)
self.assertRaises(
kas.FileFormatError, self.read_file, "malformed/bad_filesize_10_1.kas"
)
self.assertRaises(
kas.FileFormatError, self.read_file, "malformed/bad_filesize_10_1024.kas"
)
def test_bad_magic_number(self):
self.assertRaises(
kas.FileFormatError, self.read_file, "malformed/bad_magic_number.kas"
)
def test_version_0(self):
self.assertRaises(
kas.VersionTooOldError, self.read_file, "malformed/version_0.kas"
)
def test_version_100(self):
self.assertRaises(
kas.VersionTooNewError, self.read_file, "malformed/version_100.kas"
)
def test_truncated_file(self):
self.assertRaises(
kas.FileFormatError, self.read_file, "malformed/truncated_file.kas"
)
def test_key_offset_outside_file(self):
self.assertRaises(
kas.FileFormatError, self.read_file, "malformed/key_offset_outside_file.kas"
)
def test_array_offset_outside_file(self):
self.assertRaises(
kas.FileFormatError,
self.read_file,
"malformed/array_offset_outside_file.kas",
)
def test_key_len_outside_file(self):
self.assertRaises(
kas.FileFormatError, self.read_file, "malformed/key_len_outside_file.kas"
)
def test_array_len_outside_file(self):
self.assertRaises(
kas.FileFormatError, self.read_file, "malformed/array_len_outside_file.kas"
)
def test_bad_array_start(self):
self.assertRaises(
kas.FileFormatError, self.read_file, "malformed/bad_array_start_-8.kas"
)
self.assertRaises(
kas.FileFormatError, self.read_file, "malformed/bad_array_start_-1.kas"
)
self.assertRaises(
kas.FileFormatError, self.read_file, "malformed/bad_array_start_1.kas"
)
self.assertRaises(
kas.FileFormatError, self.read_file, "malformed/bad_array_start_8.kas"
)
def test_truncated_file_correct_size(self):
self.assertRaises(
kas.FileFormatError,
self.read_file,
"malformed/truncated_file_correct_size_100.kas",
)
self.assertRaises(
kas.FileFormatError,
self.read_file,
"malformed/truncated_file_correct_size_128.kas",
)
self.assertRaises(
kas.FileFormatError,
self.read_file,
"malformed/truncated_file_correct_size_129.kas",
)
self.assertRaises(
kas.FileFormatError,
self.read_file,
"malformed/truncated_file_correct_size_200.kas",
)
def test_all_types(self):
dtypes = [
"int8",
"uint8",
"uint32",
"int32",
"uint64",
"int64",
"float32",
"float64",
]
for n in range(10):
filename = f"v1/all_types_{n}_elements.kas"
data = self.read_file(filename)
for dtype in dtypes:
self.assertTrue(np.array_equal(data[dtype], np.arange(n, dtype=dtype)))
class TestStandardFilesPyEngine(StandardFilesMixin, unittest.TestCase):
engine = kas.PY_ENGINE
read_all = False
class TestStandardFilesCEngine(StandardFilesMixin, unittest.TestCase):
engine = kas.C_ENGINE
read_all = False
class TestStandardFilesPyEngineReadAll(StandardFilesMixin, unittest.TestCase):
engine = kas.PY_ENGINE
read_all = True
class TestStandardFilesCEngineReadAll(StandardFilesMixin, unittest.TestCase):
engine = kas.C_ENGINE
read_all = True
class TestStandardFilesLoads(StandardFilesMixin, unittest.TestCase):
def read_file(self, filename):
full_path = os.path.join(self.test_data_path, filename)
with open(full_path, "rb") as f:
encoded = f.read()
return kas.loads(encoded)
| 30.398907 | 88 | 0.633831 |
9449d85876dd907000c8ed7ded20a7e188334300 | 83 | py | Python | nes/processors/cpu/instructions/load_store/ldy.py | Hexadorsimal/pynes | dbb3d40c1240fa27f70fa798bcec09188755eec2 | [
"MIT"
] | 1 | 2017-05-13T18:57:09.000Z | 2017-05-13T18:57:09.000Z | nes/processors/cpu/instructions/load_store/ldy.py | Hexadorsimal/py6502 | dbb3d40c1240fa27f70fa798bcec09188755eec2 | [
"MIT"
] | 7 | 2020-10-24T17:16:56.000Z | 2020-11-01T14:10:23.000Z | nes/processors/cpu/instructions/load_store/ldy.py | Hexadorsimal/pynes | dbb3d40c1240fa27f70fa798bcec09188755eec2 | [
"MIT"
] | null | null | null | from .load import LoadInstruction
class Ldy(LoadInstruction):
register = 'y'
| 13.833333 | 33 | 0.73494 |
7168b44361eae44cacf4237d3e0064e3310b2f2e | 1,699 | py | Python | blog/migrations/0001_initial.py | k115626/myblog | 71df4a1c2e300f41ba2580200ec17e9b778de138 | [
"MIT"
] | null | null | null | blog/migrations/0001_initial.py | k115626/myblog | 71df4a1c2e300f41ba2580200ec17e9b778de138 | [
"MIT"
] | null | null | null | blog/migrations/0001_initial.py | k115626/myblog | 71df4a1c2e300f41ba2580200ec17e9b778de138 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.3 on 2021-01-01 23:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=68)),
('body', models.TextField()),
('created_time', models.DateTimeField()),
('modified_time', models.DateTimeField()),
('excerpt', models.CharField(blank=True, max_length=256)),
('author', models.ForeignKey(on_delete=True, to=settings.AUTH_USER_MODEL)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category')),
('tags', models.ManyToManyField(blank=True, to='blog.Tag')),
],
),
]
| 36.934783 | 114 | 0.57681 |
51abaee307242d5909b5e2b4cf4b4c2aa9423cba | 1,867 | py | Python | examples/tutorial.py | kaji-project/influxdb-python | b88454a598f1f638b9d1679fc2dd712fec30dc51 | [
"MIT"
] | 1 | 2021-11-12T23:51:31.000Z | 2021-11-12T23:51:31.000Z | examples/tutorial.py | kaji-project/influxdb-python | b88454a598f1f638b9d1679fc2dd712fec30dc51 | [
"MIT"
] | null | null | null | examples/tutorial.py | kaji-project/influxdb-python | b88454a598f1f638b9d1679fc2dd712fec30dc51 | [
"MIT"
] | 1 | 2020-12-18T14:26:53.000Z | 2020-12-18T14:26:53.000Z | import argparse
from influxdb import InfluxDBClient
def main(host='localhost', port=8086):
user = 'root'
password = 'root'
dbname = 'example'
dbuser = 'smly'
dbuser_password = 'my_secret_password'
query = 'select column_one from foo;'
json_body = [{
"points": [
["1", 1, 1.0],
["2", 2, 2.0]
],
"name": "foo",
"columns": ["column_one", "column_two", "column_three"]
}]
client = InfluxDBClient(host, port, user, password, dbname)
print("Create database: " + dbname)
client.create_database(dbname)
dbusers = client.get_database_users()
print("Get list of database users: {0}".format(dbusers))
print("Add database user: " + dbuser)
client.add_database_user(dbuser, dbuser_password)
print("Make user a database admin")
client.set_database_admin(dbuser)
print("Remove admin privilege from user")
client.unset_database_admin(dbuser)
dbusers = client.get_database_users()
print("Get list of database users again: {0}".format(dbusers))
print("Switch user: " + dbuser)
client.switch_user(dbuser, dbuser_password)
print("Write points: {0}".format(json_body))
client.write_points(json_body)
print("Queying data: " + query)
result = client.query(query)
print("Result: {0}".format(result))
print("Switch user: " + user)
client.switch_user(user, password)
print("Delete database: " + dbname)
client.delete_database(dbname)
def parse_args():
parser = argparse.ArgumentParser(
description='example code to play with InfluxDB')
parser.add_argument('--host', type=str, required=True)
parser.add_argument('--port', type=int, required=True)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(host=args.host, port=args.port)
| 26.295775 | 66 | 0.651312 |
c9f8c3b78a456335dcef78ce9af0cd8dd9327853 | 2,899 | py | Python | mmdet/core/bbox/samplers/ohem_sampler.py | yutliu/AerialDetection_val | cf84255c75e7710ab9aa24b3fa407a9a658256a0 | [
"Apache-2.0"
] | null | null | null | mmdet/core/bbox/samplers/ohem_sampler.py | yutliu/AerialDetection_val | cf84255c75e7710ab9aa24b3fa407a9a658256a0 | [
"Apache-2.0"
] | null | null | null | mmdet/core/bbox/samplers/ohem_sampler.py | yutliu/AerialDetection_val | cf84255c75e7710ab9aa24b3fa407a9a658256a0 | [
"Apache-2.0"
] | null | null | null | import torch
from .base_sampler import BaseSampler
from ..transforms import bbox2roi
class OHEMSampler(BaseSampler):
def __init__(self,
num,
pos_fraction,
context,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
if not hasattr(context, 'num_stages'):
self.bbox_roi_extractor = context.bbox_roi_extractor
self.bbox_head = context.bbox_head
else:
self.bbox_roi_extractor = context.bbox_roi_extractor[
context.current_stage]
self.bbox_head = context.bbox_head[context.current_stage]
def hard_mining(self, inds, num_expected, bboxes, labels, feats):
with torch.no_grad():
rois = bbox2roi([bboxes])
bbox_feats = self.bbox_roi_extractor(
feats[:self.bbox_roi_extractor.num_inputs], rois)
cls_score, _ = self.bbox_head(bbox_feats)
all_loss = self.bbox_head.loss(
cls_score=cls_score,
bbox_pred=None,
labels=labels,
label_weights=cls_score.new_ones(cls_score.size(0)),
bbox_targets=None,
bbox_weights=None,
reduce=False)
if 'loss_cls' in all_loss:
loss = all_loss['loss_cls']
else:
loss = all_loss['rbbox_loss_cls']
_, topk_loss_inds = loss.topk(num_expected)
return inds[topk_loss_inds]
def _sample_pos(self,
assign_result,
num_expected,
bboxes=None,
feats=None,
**kwargs):
# Sample some hard positive samples
pos_inds = torch.nonzero(assign_result.gt_inds > 0)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds],
assign_result.labels[pos_inds], feats)
def _sample_neg(self,
assign_result,
num_expected,
bboxes=None,
feats=None,
**kwargs):
# Sample some hard negative samples
neg_inds = torch.nonzero(assign_result.gt_inds == 0)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds],
assign_result.labels[neg_inds], feats)
| 37.166667 | 77 | 0.540186 |
4cebd3983717a90b5572d9f4f68cc60423e9d1d0 | 2,903 | py | Python | test/functional/mempool_resurrect.py | ComputerCraftr/Xuez-Core | c7428cf4e51363eb6510e4541e7292c98a7d3a9c | [
"MIT"
] | 1 | 2022-01-18T21:37:41.000Z | 2022-01-18T21:37:41.000Z | test/functional/mempool_resurrect.py | ComputerCraftr/Xuez-Core | c7428cf4e51363eb6510e4541e7292c98a7d3a9c | [
"MIT"
] | null | null | null | test/functional/mempool_resurrect.py | ComputerCraftr/Xuez-Core | c7428cf4e51363eb6510e4541e7292c98a7d3a9c | [
"MIT"
] | 2 | 2021-02-24T04:17:06.000Z | 2021-11-30T23:41:27.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test resurrection of mined transactions when the blockchain is re-organized."""
from test_framework.blocktools import create_raw_transaction
from test_framework.test_framework import XUEZTestFramework
from test_framework.util import assert_equal
class MempoolCoinbaseTest(XUEZTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node0_address = self.nodes[0].getnewaddress()
# Spend block 1/2/3's coinbase transactions
# Mine a block.
# Create three more transactions, spending the spends
# Mine another block.
# ... make sure all the transactions are confirmed
# Invalidate both blocks
# ... make sure all the transactions are put back in the mempool
# Mine a new block
# ... make sure all the transactions are confirmed again.
b = [self.nodes[0].getblockhash(n) for n in range(1, 4)]
coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b]
spends1_raw = [create_raw_transaction(self.nodes[0], txid, node0_address, amount=49.99) for txid in coinbase_txids]
spends1_id = [self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw]
blocks = []
blocks.extend(self.nodes[0].generate(1))
spends2_raw = [create_raw_transaction(self.nodes[0], txid, node0_address, amount=49.98) for txid in spends1_id]
spends2_id = [self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw]
blocks.extend(self.nodes[0].generate(1))
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert tx["confirmations"] > 0
# Use invalidateblock to re-org back
for node in self.nodes:
node.invalidateblock(blocks[0])
# All txns should be back in mempool with 0 confirmations
assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id))
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert tx["confirmations"] == 0
# Generate another block, they should all get mined
self.nodes[0].generate(1)
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert tx["confirmations"] > 0
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| 40.887324 | 123 | 0.673786 |
c70631884d2468e74e9d159701d1a04f31586d95 | 654 | py | Python | quantipy/core/helpers/constants.py | encount/quantipy3 | 01fe350b79594ba162cd48ce91f6e547e74265fe | [
"MIT"
] | 67 | 2015-07-29T18:39:46.000Z | 2022-01-10T12:32:26.000Z | quantipy/core/helpers/constants.py | encount/quantipy3 | 01fe350b79594ba162cd48ce91f6e547e74265fe | [
"MIT"
] | 1,052 | 2015-07-10T15:14:17.000Z | 2021-11-14T11:14:58.000Z | quantipy/core/helpers/constants.py | encount/quantipy3 | 01fe350b79594ba162cd48ce91f6e547e74265fe | [
"MIT"
] | 15 | 2016-04-06T14:40:08.000Z | 2020-08-12T18:36:30.000Z | #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
''' Constant mapping appropriate quantipy types to pandas dtypes
'''
DTYPE_MAP = {
"float": ["float64", "float32", "float16"],
"int": ["int64", "int32", "int16", "int8", "int0", "float64", "float32", "float16"],
"string": ["object"],
"date": ["datetime64"],
"time": ["timedelta64"],
"bool": ["bool"],
"single": ["int64", "int32", "int16", "int8", "int0", "float64", "float32", "float16"],
"dichotomous set": [],
"categorical set": [],
"delimited set": ["object"],
"grid": []
}
MAPPED_PATTERN = "^[^@].*[@].*[^@]$" | 36.333333 | 92 | 0.455657 |
db32250e2ee6db08e45c90f1284709f55acdff6f | 517 | py | Python | project/client/crawlers/job_crawler.py | zubairalam/getyourjobdone | 5e3f37b7679180e45cfea31ab7d60f60e9e30f65 | [
"MIT"
] | null | null | null | project/client/crawlers/job_crawler.py | zubairalam/getyourjobdone | 5e3f37b7679180e45cfea31ab7d60f60e9e30f65 | [
"MIT"
] | null | null | null | project/client/crawlers/job_crawler.py | zubairalam/getyourjobdone | 5e3f37b7679180e45cfea31ab7d60f60e9e30f65 | [
"MIT"
] | null | null | null | import re
import requests
from lxml import html
import pymongo
client = pymongo.MongoClient("127.0.0.1", 27017)
db = client.jobs
def scrap_jobpages():
url = 'http://jobsearch.naukri.com/top-skill-jobs'
response = requests.get(url)
tree = html.fromstring(response.text)
joburls = tree.xpath("//div[@class='multiColumn colCount_four']/a")
for url in joburls:
db.jobpages.insert({'label': url.attrib['title'], 'url': url.attrib['href']})
if __name__=="__main__":
# scrap_jobpages()
| 23.5 | 85 | 0.682785 |
2b7035edb0dd91c834dbb2b115512f0aa9fb7a13 | 1,466 | py | Python | CyberPi/Python with CyberPi 015.py | nothingszpt/PythonWithHardware | 95b580824f1bb11e36c0a27fdcbd4aec07548b59 | [
"MIT"
] | 2 | 2020-08-15T02:49:19.000Z | 2020-08-15T02:49:31.000Z | CyberPi/Python with CyberPi 015.py | nothingszpt/PythonWithHardware | 95b580824f1bb11e36c0a27fdcbd4aec07548b59 | [
"MIT"
] | null | null | null | CyberPi/Python with CyberPi 015.py | nothingszpt/PythonWithHardware | 95b580824f1bb11e36c0a27fdcbd4aec07548b59 | [
"MIT"
] | 1 | 2022-02-24T05:30:30.000Z | 2022-02-24T05:30:30.000Z | """"
名称:015 想亮哪里亮哪里
硬件:童芯派
功能介绍:通过输入信息,控制童芯派灯光的颜色
难度:⭐⭐
支持的模式:在线
使用功能解读:
1. 双分支结构
if-条件1 :
----条件成立执行的代码
elif-条件2
----条件2成立时执行的代码
elif-条件3
----条件3成立时执行的代码
else:
----条件1 2 3 都不成立时执行的代码
在多分支结构当中,可以增设多个elif语句在里面。
- 表示空格
条件成立,返回值则为True,条件不成立则返回值False
"""
# ---------程序分割线----------------程序分割线----------------程序分割线----------
import cyberpi
cyberpi.led.off() # 熄灭所有LED灯,进行灯光初始化
led_num = int(input("请输入制定的LED的位置(1-5): ")) # 输入要打开的LED灯位置序号
if led_num == 1: # 用if判断light_num中的数据是否为1
cyberpi.led.on(255, 255, 255, 1) # 条件成立则打开第1个LED灯
elif led_num == 2: # 否则判断light_num中的数据是否为2
cyberpi.led.on(255, 255, 255, 2) # 条件成立则打开第2个LED灯
elif led_num == 3: # 否则判断light_num中的数据是否为3
cyberpi.led.on(255, 255, 255, 3) # 条件成立则打开第3个LED灯
elif led_num == 4: # 否则判断light_num中的数据是否为4
cyberpi.led.on(255, 255, 255, 4) # 条件成立则打开第4个LED灯
elif led_num == 5: # 否则判断light_num中的数据是否为5
cyberpi.led.on(255, 255, 255, 5) # 条件成立则打开第5个LED灯
else: # 否则
cyberpi.led.off() # 灯光保持关闭
# 拓展
# 1.你可以用数字来控制童芯派播放制定的音效吗?
| 29.32 | 82 | 0.45839 |
3fe7ee97d5f2fca0e78abb36b2d0eb238135c0e9 | 18,664 | py | Python | rosbridge_server/src/tornado/test/twisted_test.py | ipa-fxm/rosbridge_suite | 7c7fa43c00383f39f64945c68f0a6290b00a53fc | [
"BSD-2-Clause"
] | null | null | null | rosbridge_server/src/tornado/test/twisted_test.py | ipa-fxm/rosbridge_suite | 7c7fa43c00383f39f64945c68f0a6290b00a53fc | [
"BSD-2-Clause"
] | null | null | null | rosbridge_server/src/tornado/test/twisted_test.py | ipa-fxm/rosbridge_suite | 7c7fa43c00383f39f64945c68f0a6290b00a53fc | [
"BSD-2-Clause"
] | null | null | null | # Author: Ovidiu Predescu
# Date: July 2011
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unittest for the twisted-style reactor.
"""
from __future__ import absolute_import, division, with_statement
import os
import signal
import thread
import threading
import unittest
try:
import fcntl
import twisted
from twisted.internet.defer import Deferred
from twisted.internet.interfaces import IReadDescriptor, IWriteDescriptor
from twisted.internet.protocol import Protocol
from twisted.web.client import Agent
from twisted.web.resource import Resource
from twisted.web.server import Site
from twisted.python import log
from tornado.platform.twisted import TornadoReactor
from zope.interface import implements
except ImportError:
fcntl = None
twisted = None
IReadDescriptor = IWriteDescriptor = None
def implements(f):
pass
from tornado.httpclient import AsyncHTTPClient
from tornado.ioloop import IOLoop
from tornado.platform.auto import set_close_exec
from tornado.testing import get_unused_port
from tornado.util import import_object
from tornado.web import RequestHandler, Application
def save_signal_handlers():
saved = {}
for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGCHLD]:
saved[sig] = signal.getsignal(sig)
assert "twisted" not in repr(saved), repr(saved)
return saved
def restore_signal_handlers(saved):
for sig, handler in saved.iteritems():
signal.signal(sig, handler)
class ReactorTestCase(unittest.TestCase):
def setUp(self):
self._saved_signals = save_signal_handlers()
self._io_loop = IOLoop()
self._reactor = TornadoReactor(self._io_loop)
def tearDown(self):
self._io_loop.close(all_fds=True)
restore_signal_handlers(self._saved_signals)
class ReactorWhenRunningTest(ReactorTestCase):
def test_whenRunning(self):
self._whenRunningCalled = False
self._anotherWhenRunningCalled = False
self._reactor.callWhenRunning(self.whenRunningCallback)
self._reactor.run()
self.assertTrue(self._whenRunningCalled)
self.assertTrue(self._anotherWhenRunningCalled)
def whenRunningCallback(self):
self._whenRunningCalled = True
self._reactor.callWhenRunning(self.anotherWhenRunningCallback)
self._reactor.stop()
def anotherWhenRunningCallback(self):
self._anotherWhenRunningCalled = True
class ReactorCallLaterTest(ReactorTestCase):
def test_callLater(self):
self._laterCalled = False
self._now = self._reactor.seconds()
self._timeout = 0.001
dc = self._reactor.callLater(self._timeout, self.callLaterCallback)
self.assertEqual(self._reactor.getDelayedCalls(), [dc])
self._reactor.run()
self.assertTrue(self._laterCalled)
self.assertTrue(self._called - self._now > self._timeout)
self.assertEqual(self._reactor.getDelayedCalls(), [])
def callLaterCallback(self):
self._laterCalled = True
self._called = self._reactor.seconds()
self._reactor.stop()
class ReactorTwoCallLaterTest(ReactorTestCase):
def test_callLater(self):
self._later1Called = False
self._later2Called = False
self._now = self._reactor.seconds()
self._timeout1 = 0.0005
dc1 = self._reactor.callLater(self._timeout1, self.callLaterCallback1)
self._timeout2 = 0.001
dc2 = self._reactor.callLater(self._timeout2, self.callLaterCallback2)
self.assertTrue(self._reactor.getDelayedCalls() == [dc1, dc2] or
self._reactor.getDelayedCalls() == [dc2, dc1])
self._reactor.run()
self.assertTrue(self._later1Called)
self.assertTrue(self._later2Called)
self.assertTrue(self._called1 - self._now > self._timeout1)
self.assertTrue(self._called2 - self._now > self._timeout2)
self.assertEqual(self._reactor.getDelayedCalls(), [])
def callLaterCallback1(self):
self._later1Called = True
self._called1 = self._reactor.seconds()
def callLaterCallback2(self):
self._later2Called = True
self._called2 = self._reactor.seconds()
self._reactor.stop()
class ReactorCallFromThreadTest(ReactorTestCase):
def setUp(self):
super(ReactorCallFromThreadTest, self).setUp()
self._mainThread = thread.get_ident()
def tearDown(self):
self._thread.join()
super(ReactorCallFromThreadTest, self).tearDown()
def _newThreadRun(self):
self.assertNotEqual(self._mainThread, thread.get_ident())
if hasattr(self._thread, 'ident'): # new in python 2.6
self.assertEqual(self._thread.ident, thread.get_ident())
self._reactor.callFromThread(self._fnCalledFromThread)
def _fnCalledFromThread(self):
self.assertEqual(self._mainThread, thread.get_ident())
self._reactor.stop()
def _whenRunningCallback(self):
self._thread = threading.Thread(target=self._newThreadRun)
self._thread.start()
def testCallFromThread(self):
self._reactor.callWhenRunning(self._whenRunningCallback)
self._reactor.run()
class ReactorCallInThread(ReactorTestCase):
def setUp(self):
super(ReactorCallInThread, self).setUp()
self._mainThread = thread.get_ident()
def _fnCalledInThread(self, *args, **kwargs):
self.assertNotEqual(thread.get_ident(), self._mainThread)
self._reactor.callFromThread(lambda: self._reactor.stop())
def _whenRunningCallback(self):
self._reactor.callInThread(self._fnCalledInThread)
def testCallInThread(self):
self._reactor.callWhenRunning(self._whenRunningCallback)
self._reactor.run()
class Reader:
implements(IReadDescriptor)
def __init__(self, fd, callback):
self._fd = fd
self._callback = callback
def logPrefix(self):
return "Reader"
def close(self):
self._fd.close()
def fileno(self):
return self._fd.fileno()
def connectionLost(self, reason):
self.close()
def doRead(self):
self._callback(self._fd)
class Writer:
implements(IWriteDescriptor)
def __init__(self, fd, callback):
self._fd = fd
self._callback = callback
def logPrefix(self):
return "Writer"
def close(self):
self._fd.close()
def fileno(self):
return self._fd.fileno()
def connectionLost(self, reason):
self.close()
def doWrite(self):
self._callback(self._fd)
class ReactorReaderWriterTest(ReactorTestCase):
def _set_nonblocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def setUp(self):
super(ReactorReaderWriterTest, self).setUp()
r, w = os.pipe()
self._set_nonblocking(r)
self._set_nonblocking(w)
set_close_exec(r)
set_close_exec(w)
self._p1 = os.fdopen(r, "rb", 0)
self._p2 = os.fdopen(w, "wb", 0)
def tearDown(self):
super(ReactorReaderWriterTest, self).tearDown()
self._p1.close()
self._p2.close()
def _testReadWrite(self):
"""
In this test the writer writes an 'x' to its fd. The reader
reads it, check the value and ends the test.
"""
self.shouldWrite = True
def checkReadInput(fd):
self.assertEquals(fd.read(), 'x')
self._reactor.stop()
def writeOnce(fd):
if self.shouldWrite:
self.shouldWrite = False
fd.write('x')
self._reader = Reader(self._p1, checkReadInput)
self._writer = Writer(self._p2, writeOnce)
self._reactor.addWriter(self._writer)
# Test that adding the reader twice adds it only once to
# IOLoop.
self._reactor.addReader(self._reader)
self._reactor.addReader(self._reader)
def testReadWrite(self):
self._reactor.callWhenRunning(self._testReadWrite)
self._reactor.run()
def _testNoWriter(self):
"""
In this test we have no writer. Make sure the reader doesn't
read anything.
"""
def checkReadInput(fd):
self.fail("Must not be called.")
def stopTest():
# Close the writer here since the IOLoop doesn't know
# about it.
self._writer.close()
self._reactor.stop()
self._reader = Reader(self._p1, checkReadInput)
# We create a writer, but it should never be invoked.
self._writer = Writer(self._p2, lambda fd: fd.write('x'))
# Test that adding and removing the writer leaves us with no writer.
self._reactor.addWriter(self._writer)
self._reactor.removeWriter(self._writer)
# Test that adding and removing the reader doesn't cause
# unintended effects.
self._reactor.addReader(self._reader)
# Wake up after a moment and stop the test
self._reactor.callLater(0.001, stopTest)
def testNoWriter(self):
self._reactor.callWhenRunning(self._testNoWriter)
self._reactor.run()
# Test various combinations of twisted and tornado http servers,
# http clients, and event loop interfaces.
class CompatibilityTests(unittest.TestCase):
def setUp(self):
self.saved_signals = save_signal_handlers()
self.io_loop = IOLoop()
self.reactor = TornadoReactor(self.io_loop)
def tearDown(self):
self.reactor.disconnectAll()
self.io_loop.close(all_fds=True)
restore_signal_handlers(self.saved_signals)
def start_twisted_server(self):
class HelloResource(Resource):
isLeaf = True
def render_GET(self, request):
return "Hello from twisted!"
site = Site(HelloResource())
self.twisted_port = get_unused_port()
self.reactor.listenTCP(self.twisted_port, site, interface='127.0.0.1')
def start_tornado_server(self):
class HelloHandler(RequestHandler):
def get(self):
self.write("Hello from tornado!")
app = Application([('/', HelloHandler)],
log_function=lambda x: None)
self.tornado_port = get_unused_port()
app.listen(self.tornado_port, address='127.0.0.1', io_loop=self.io_loop)
def run_ioloop(self):
self.stop_loop = self.io_loop.stop
self.io_loop.start()
self.reactor.fireSystemEvent('shutdown')
def run_reactor(self):
self.stop_loop = self.reactor.stop
self.stop = self.reactor.stop
self.reactor.run()
def tornado_fetch(self, url, runner):
responses = []
client = AsyncHTTPClient(self.io_loop)
def callback(response):
responses.append(response)
self.stop_loop()
client.fetch(url, callback=callback)
runner()
self.assertEqual(len(responses), 1)
responses[0].rethrow()
return responses[0]
def twisted_fetch(self, url, runner):
# http://twistedmatrix.com/documents/current/web/howto/client.html
chunks = []
client = Agent(self.reactor)
d = client.request('GET', url)
class Accumulator(Protocol):
def __init__(self, finished):
self.finished = finished
def dataReceived(self, data):
chunks.append(data)
def connectionLost(self, reason):
self.finished.callback(None)
def callback(response):
finished = Deferred()
response.deliverBody(Accumulator(finished))
return finished
d.addCallback(callback)
def shutdown(ignored):
self.stop_loop()
d.addBoth(shutdown)
runner()
self.assertTrue(chunks)
return ''.join(chunks)
def testTwistedServerTornadoClientIOLoop(self):
self.start_twisted_server()
response = self.tornado_fetch(
'http://localhost:%d' % self.twisted_port, self.run_ioloop)
self.assertEqual(response.body, 'Hello from twisted!')
def testTwistedServerTornadoClientReactor(self):
self.start_twisted_server()
response = self.tornado_fetch(
'http://localhost:%d' % self.twisted_port, self.run_reactor)
self.assertEqual(response.body, 'Hello from twisted!')
def testTornadoServerTwistedClientIOLoop(self):
self.start_tornado_server()
response = self.twisted_fetch(
'http://localhost:%d' % self.tornado_port, self.run_ioloop)
self.assertEqual(response, 'Hello from tornado!')
def testTornadoServerTwistedClientReactor(self):
self.start_tornado_server()
response = self.twisted_fetch(
'http://localhost:%d' % self.tornado_port, self.run_reactor)
self.assertEqual(response, 'Hello from tornado!')
if twisted is None:
del ReactorWhenRunningTest
del ReactorCallLaterTest
del ReactorTwoCallLaterTest
del ReactorCallFromThreadTest
del ReactorCallInThread
del ReactorReaderWriterTest
del CompatibilityTests
else:
# Import and run as much of twisted's test suite as possible.
# This is unfortunately rather dependent on implementation details,
# but there doesn't appear to be a clean all-in-one conformance test
# suite for reactors.
#
# This is a list of all test suites using the ReactorBuilder
# available in Twisted 11.0.0 and 11.1.0 (and a blacklist of
# specific test methods to be disabled).
twisted_tests = {
'twisted.internet.test.test_core.ObjectModelIntegrationTest': [],
'twisted.internet.test.test_core.SystemEventTestsBuilder': [
'test_iterate', # deliberately not supported
],
'twisted.internet.test.test_fdset.ReactorFDSetTestsBuilder': [
"test_lostFileDescriptor", # incompatible with epoll and kqueue
],
'twisted.internet.test.test_process.ProcessTestsBuilder': [
# Doesn't work on python 2.5
'test_systemCallUninterruptedByChildExit',
# Doesn't clean up its temp files
'test_shebang',
],
# Process tests appear to work on OSX 10.7, but not 10.6
#'twisted.internet.test.test_process.PTYProcessTestsBuilder': [
# 'test_systemCallUninterruptedByChildExit',
# ],
'twisted.internet.test.test_tcp.TCPClientTestsBuilder': [
'test_badContext', # ssl-related; see also SSLClientTestsMixin
],
'twisted.internet.test.test_tcp.TCPPortTestsBuilder': [
# These use link-local addresses and cause firewall prompts on mac
'test_buildProtocolIPv6AddressScopeID',
'test_portGetHostOnIPv6ScopeID',
'test_serverGetHostOnIPv6ScopeID',
'test_serverGetPeerOnIPv6ScopeID',
],
'twisted.internet.test.test_tcp.TCPConnectionTestsBuilder': [],
'twisted.internet.test.test_tcp.WriteSequenceTests': [],
'twisted.internet.test.test_tcp.AbortConnectionTestCase': [],
'twisted.internet.test.test_threads.ThreadTestsBuilder': [],
'twisted.internet.test.test_time.TimeTestsBuilder': [],
# Extra third-party dependencies (pyOpenSSL)
#'twisted.internet.test.test_tls.SSLClientTestsMixin': [],
'twisted.internet.test.test_udp.UDPServerTestsBuilder': [],
'twisted.internet.test.test_unix.UNIXTestsBuilder': [
# Platform-specific. These tests would be skipped automatically
# if we were running twisted's own test runner.
'test_connectToLinuxAbstractNamespace',
'test_listenOnLinuxAbstractNamespace',
],
'twisted.internet.test.test_unix.UNIXDatagramTestsBuilder': [
'test_listenOnLinuxAbstractNamespace',
],
'twisted.internet.test.test_unix.UNIXPortTestsBuilder': [],
}
for test_name, blacklist in twisted_tests.iteritems():
try:
test_class = import_object(test_name)
except (ImportError, AttributeError):
continue
for test_func in blacklist:
if hasattr(test_class, test_func):
# The test_func may be defined in a mixin, so clobber
# it instead of delattr()
setattr(test_class, test_func, lambda self: None)
def make_test_subclass(test_class):
class TornadoTest(test_class):
_reactors = ["tornado.platform.twisted._TestReactor"]
def buildReactor(self):
self.__saved_signals = save_signal_handlers()
return test_class.buildReactor(self)
def unbuildReactor(self, reactor):
test_class.unbuildReactor(self, reactor)
# Clean up file descriptors (especially epoll/kqueue
# objects) eagerly instead of leaving them for the
# GC. Unfortunately we can't do this in reactor.stop
# since twisted expects to be able to unregister
# connections in a post-shutdown hook.
reactor._io_loop.close(all_fds=True)
restore_signal_handlers(self.__saved_signals)
TornadoTest.__name__ = test_class.__name__
return TornadoTest
test_subclass = make_test_subclass(test_class)
globals().update(test_subclass.makeTestCaseClasses())
# Since we're not using twisted's test runner, it's tricky to get
# logging set up well. Most of the time it's easiest to just
# leave it turned off, but while working on these tests you may want
# to uncomment one of the other lines instead.
log.defaultObserver.stop()
#import sys; log.startLogging(sys.stderr, setStdout=0)
#log.startLoggingWithObserver(log.PythonLoggingObserver().emit, setStdout=0)
if __name__ == "__main__":
unittest.main()
| 34.820896 | 80 | 0.658165 |
b04cbf3f9f8bd35e3f9408ec02867b9373c2c132 | 7,126 | py | Python | Plugin for Interpretable Machine Learning /plot/ice.py | GraceRuisiGu/NLP | ac75052e4cd0edcaa228fdd41d6ab4b109e8a0f1 | [
"MIT"
] | null | null | null | Plugin for Interpretable Machine Learning /plot/ice.py | GraceRuisiGu/NLP | ac75052e4cd0edcaa228fdd41d6ab4b109e8a0f1 | [
"MIT"
] | null | null | null | Plugin for Interpretable Machine Learning /plot/ice.py | GraceRuisiGu/NLP | ac75052e4cd0edcaa228fdd41d6ab4b109e8a0f1 | [
"MIT"
] | null | null | null | from __future__ import division
import six
from matplotlib import colors, cm
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import h2o
def _get_grid_points(x, num_grid_points):
if num_grid_points is None:
return x.unique()
else:
# unique is necessary, because if num_grid_points is too much larger
# than x.shape[0], there will be duplicate quantiles (even with
# interpolation)
return x.quantile(np.linspace(0, 1, num_grid_points)).unique()
def _get_point_x_ilocs(grid_index, data_index):
data_level = 'data_{}'.format(grid_index.name)
return (np.abs(np.subtract
.outer(grid_index,
data_index.get_level_values(data_level)))
.argmin(axis=0))
def _get_quantiles(x):
return np.greater.outer(x, x).sum(axis=1) / x.size
def ice(data, column, model, num_grid_points=None):
"""
Generate individual conditional expectation (ICE) curves for a model.
:param data: the sample data from which to generate ICE curves
:type data: ``pandas`` ``DataFrame``
:param column: the name of the column in ``data`` that will be varied to
generate ICE curves
:type column: ``str``
:param predict: the function that generates predictions from the model.
Must accept a ``DataFrame`` with the same columns as ``data``.
:type predict: callable
:param num_grid_points: the number of grid points to use for the independent
variable of the ICE curves. The independent variable values for the
curves will be quantiles of the data.
If ``None``, the values of the independent variable will be the unique
values of ``data[column]``.
:type num_grid_points: ``None`` or ``int``
:return: A ``DataFrame`` whose columns are ICE curves. The row index is the
independent variable, and the column index is the original data point
corresponding to that ICE curve.
:rtype: ``pandas`` ``DataFrame``
"""
data = data.as_data_frame()
x_s = _get_grid_points(data[column], num_grid_points)
ice_data, orig_column = _to_ice_data(data, column, x_s)
hf = h2o.H2OFrame(ice_data)
hfd = model.predict(hf)
ice_data['ice_y'] = hfd.as_data_frame()['predict'].as_matrix()
ice_data['data_{}'.format(column)] = orig_column
other_columns = ['data_{}'.format(column)] + [col for col in data.columns if col != column]
ice_data = ice_data.pivot_table(values='ice_y', index=other_columns, columns=column).T
return ice_data
def ice_plot(ice_data, frac_to_plot=1.,
plot_points=False, point_kwargs=None,
x_quantile=False, plot_pdp=False,
centered=False, centered_quantile=0.,
color_by=None, cmap=None, figsize=(14,11),
ax=None, pdp_kwargs=None, **kwargs):
"""
Plot the ICE curves
:param ice_data: the ICE data generated by :func:`pycebox.ice.ice`
:type ice_data: ``pandas`` ``DataFrame``
:param frac_to_plot: the fraction of ICE curves to plot. If less than one,
randomly samples columns of ``ice_data`` to plot.
:type frac_to_plot: ``float``
:param plot_points: whether or not to plot the original data points on the
ICE curves. In this case, ``point_kwargs`` is passed as keyword
arguments to plot.
:type plot_points: ``bool``
:param x_quantile: if ``True``, the plotted x-coordinates are the quantiles of
``ice_data.index``
:type x_quantile: ``bool``
:param plot_pdp: if ``True``, plot the partial depdendence plot. In this
case, ``pdp_kwargs`` is passed as keyword arguments to ``plot``.
:param centered: if ``True``, each ICE curve is centered to zero at the
percentile closest to ``centered_quantile``.
:type centered: ``bool``
:param color_by: If a string, color the ICE curve by that level of the
column index.
If callable, color the ICE curve by its return value when applied to a
``DataFrame`` of the column index of ``ice_data``
:type color_by: ``None``, ``str``, or callable
:param cmap:
:type cmap: ``matplotlib`` ``Colormap``
:param figsize: size of the figure
:type figsize: tuple (width, height)
:param ax: the ``Axes`` on which to plot the ICE curves
:type ax: ``None`` or ``matplotlib`` ``Axes``
Other keyword arguments are passed to ``plot``
"""
if not ice_data.index.is_monotonic_increasing:
ice_data = ice_data.sort_index()
if centered:
quantiles = _get_quantiles(ice_data.index)
centered_quantile_iloc = np.abs(quantiles - centered_quantile).argmin()
ice_data = ice_data - ice_data.iloc[centered_quantile_iloc]
if frac_to_plot < 1.:
n_cols = ice_data.shape[1]
icols = np.random.choice(n_cols, size=frac_to_plot * n_cols, replace=False)
plot_ice_data = ice_data.iloc[:, icols]
else:
plot_ice_data = ice_data
if x_quantile:
x = _get_quantiles(ice_data.index)
else:
x = ice_data.index
if plot_points:
point_x_ilocs = _get_point_x_ilocs(plot_ice_data.index, plot_ice_data.columns)
point_x = x[point_x_ilocs]
point_y = plot_ice_data.values[point_x_ilocs, np.arange(point_x_ilocs.size)]
if ax is None:
_, ax = plt.subplots(figsize=figsize)
if color_by is not None:
if isinstance(color_by, six.string_types):
colors_raw = plot_ice_data.columns.get_level_values(color_by).values
elif hasattr(color_by, '__call__'):
col_df = pd.DataFrame(list(plot_ice_data.columns.values), columns=plot_ice_data.columns.names)
colors_raw = color_by(col_df)
else:
raise ValueError('color_by must be a string or function')
norm = colors.Normalize(colors_raw.min(), colors_raw.max())
m = cm.ScalarMappable(norm=norm, cmap=cmap)
for color_raw, (_, ice_curve) in zip(colors_raw, plot_ice_data.iteritems()):
c = m.to_rgba(color_raw)
ax.plot(x, ice_curve, c=c, zorder=0, **kwargs)
else:
ax.plot(x, plot_ice_data, zorder=0, **kwargs)
if plot_points:
ax.scatter(point_x, point_y, zorder=10, **(point_kwargs or {}))
if plot_pdp:
pdp_kwargs = pdp_kwargs or {}
pdp_data = pdp(ice_data)
ax.plot(x, pdp_data, **pdp_kwargs)
return ax
def pdp(ice_data):
"""
Calculate a partial dependence plot from ICE data
:param ice_data: the ICE data generated by :func:`pycebox.ice.ice`
:type ice_data: ``pandas`` ``DataFrame``
:return: the partial dependence plot curve
:rtype: ``pandas`` ``Series``
"""
return ice_data.mean(axis=1)
def _to_ice_data(data, column, x_s):
"""
Create the DataFrame necessary for ICE calculations
"""
ice_data = pd.DataFrame(np.repeat(data.values, x_s.size, axis=0), columns=data.columns)
data_column = ice_data[column].copy()
ice_data[column] = np.tile(x_s, data.shape[0])
return ice_data, data_column
| 34.425121 | 106 | 0.65689 |
973b778f95bc8e256743f17fec821e3fea23c5b5 | 589 | py | Python | camera.py | ender18g/CircleBot | 548d0de8eac7452ce442068f86d64efc1924808c | [
"MIT"
] | 5 | 2021-01-20T19:29:11.000Z | 2021-04-29T17:13:10.000Z | camera.py | ender18g/CircleBot | 548d0de8eac7452ce442068f86d64efc1924808c | [
"MIT"
] | null | null | null | camera.py | ender18g/CircleBot | 548d0de8eac7452ce442068f86d64efc1924808c | [
"MIT"
] | null | null | null | import cv2
from imutils.video.pivideostream import PiVideoStream
import imutils
import time
import numpy as np
class VideoCamera(object):
def __init__(self, flip = False):
self.vs = PiVideoStream().start()
self.flip = flip
time.sleep(2.0)
def __del__(self):
self.vs.stop()
def flip_if_needed(self, frame):
if self.flip:
return np.flip(frame, 0)
return frame
def get_frame(self):
frame = self.flip_if_needed(self.vs.read())
ret, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tobytes()
| 23.56 | 53 | 0.626486 |
991e6b60284f523fba36d5810d9bf75a9c678d36 | 1,712 | py | Python | multiprobe/run/resolve_index.py | h324yang/multiprobe | ee4a32cd8bc6d8fc2649830ecde55fe0b6ae9478 | [
"MIT"
] | null | null | null | multiprobe/run/resolve_index.py | h324yang/multiprobe | ee4a32cd8bc6d8fc2649830ecde55fe0b6ae9478 | [
"MIT"
] | null | null | null | multiprobe/run/resolve_index.py | h324yang/multiprobe | ee4a32cd8bc6d8fc2649830ecde55fe0b6ae9478 | [
"MIT"
] | 1 | 2020-09-17T17:32:34.000Z | 2020-09-17T17:32:34.000Z | from getpass import getpass
import argparse
import os
from tqdm import tqdm
from multiprobe.data import WikipediaIndex, PagePropertiesDatabase
from multiprobe.utils import chunk
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data-dir', '-d', type=str, required=True)
parser.add_argument('--language', '-l', type=str, required=True)
parser.add_argument('--pickled', action='store_true')
parser.add_argument('--chunk-size', type=int, default=200)
parser.add_argument('--username', type=str, required=True)
parser.add_argument('--password', '-p', action='store_true')
parser.add_argument('--env-password', '-e', action='store_true')
args = parser.parse_args()
password = getpass() if args.password else None
password = os.environ.get('MYSQL_PASSWORD') if args.env_password else password
index = WikipediaIndex.from_dir(args.data_dir, args.language, True, pickled=args.pickled)
db = PagePropertiesDatabase(args.username, password)
num_resolved = 0
num_total = 0
for info_lst in tqdm(list(chunk(index.index_infos, args.chunk_size)), desc='Looking up entity IDs'):
infos = list(filter(lambda x: x.entity_id is None, info_lst))
num_total += len(info_lst)
num_resolved += len(info_lst) - len(infos)
if not infos:
continue
qids = db.bulk_find_qid(args.language, [info.page_id for info in infos])
for info, qid in zip(infos, qids):
info.entity_id = qid
num_resolved += len(list(filter(lambda x: x is not None, qids)))
print(f'{100 * num_resolved / num_total:.4}% total resolved entities.')
index.save()
if __name__ == '__main__':
main() | 39.813953 | 104 | 0.688084 |
3b274b3a3685011f4b7dc112755cbba064720072 | 1,762 | py | Python | scavenger.py | ehsan/scavenger2 | 9c0ced38e73d3cf2ef5aae55445bfb752d9602dd | [
"MIT"
] | 1 | 2021-11-30T16:33:29.000Z | 2021-11-30T16:33:29.000Z | scavenger.py | ehsan/scavenger2 | 9c0ced38e73d3cf2ef5aae55445bfb752d9602dd | [
"MIT"
] | null | null | null | scavenger.py | ehsan/scavenger2 | 9c0ced38e73d3cf2ef5aae55445bfb752d9602dd | [
"MIT"
] | null | null | null | from mrcc import CCJob
import csv
import urllib2
import hashlib
class Scavenger(CCJob):
hashes = list()
def configure_options(self):
try:
samples = "https://raw.githubusercontent.com/ehsan/scavenger/master/data/samples.txt"
with urllib2.urlopen(samples) as file:
reader = csv.reader([file.read()])
for row in reader:
name = row[0]
url = row[1]
try:
response = urllib2.urlopen(url).read()
# Try to use the first 2048 characters
response = response[:2048]
hash = hashlib.sha1(response).hexdigest()
self.hashes.append((name, hash))
except Exception:
continue
except Exception:
return
def process_record(self, record):
# WARC records have three different types:
# ["application/warc-fields", "application/javascript; msgtype=request", "application/javascript; msgtype=response"]
# We're only interested in the HTTP responses
if 'javascript' in record['Content-Type'] and \
'msgtype=response' in record['Content-Type']:
payload = record.payload.read()
# Try to use the first 2048 characters
content = payload[:2048]
hash = hashlib.sha1(content).hexdigest()
for entry in self.hashes:
if hash == entry[1]:
# Found a cryptojacker!
yield entry[0], record.url
self.increment_counter('commoncrawl', 'found_scripts', 1)
if __name__ == '__main__':
Scavenger.run()
| 35.959184 | 125 | 0.53916 |
6db6a5098b31764894692cb91bf5c45f87873266 | 449 | py | Python | test/async/test_backends.py | shuckc/hip | bf64a3d74976153b103bdd903fa8061160862346 | [
"Apache-2.0",
"MIT"
] | 70 | 2019-11-25T23:18:20.000Z | 2022-02-09T17:04:30.000Z | test/async/test_backends.py | shuckc/hip | bf64a3d74976153b103bdd903fa8061160862346 | [
"Apache-2.0",
"MIT"
] | 125 | 2018-04-02T09:28:22.000Z | 2019-11-25T18:56:25.000Z | test/async/test_backends.py | shuckc/hip | bf64a3d74976153b103bdd903fa8061160862346 | [
"Apache-2.0",
"MIT"
] | 13 | 2019-11-26T15:38:44.000Z | 2022-01-05T19:35:32.000Z | import asyncio
import curio
import trio
from hip._backends._loader import normalize_backend
def test_sniff_backends():
async def _test_sniff_async(expected_name):
backend = normalize_backend(None, async_mode=True)
assert backend.name == expected_name
trio.run(_test_sniff_async, "trio")
curio.run(_test_sniff_async, "anyio")
loop = asyncio.get_event_loop()
loop.run_until_complete(_test_sniff_async("anyio"))
| 26.411765 | 58 | 0.755011 |
7bd66dae1850e961d56f48bf7e14ddbf1b890e71 | 7,431 | py | Python | python/dglke/infer_emb_sim.py | VoVAllen/dgl-ke | ae4f720253ba6d22ed7ad4b20a16c57d04aac018 | [
"Apache-2.0"
] | 1 | 2022-02-06T09:04:03.000Z | 2022-02-06T09:04:03.000Z | python/dglke/infer_emb_sim.py | aksnzhy/dgl-ke | 31fb9654584ab490e1a68b8f97370f297f3d2b7e | [
"Apache-2.0"
] | null | null | null | python/dglke/infer_emb_sim.py | aksnzhy/dgl-ke | 31fb9654584ab490e1a68b8f97370f297f3d2b7e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# infer_emb.py
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import argparse
from .utils import load_entity_data, load_raw_emb_data, load_raw_emb_mapping
from .models.infer import EmbSimInfer
class ArgParser(argparse.ArgumentParser):
def __init__(self):
super(ArgParser, self).__init__()
self.add_argument('--mfile', type=str, default=None,
help='ID mapping file.')
self.add_argument('--emb_file', type=str, default=None,
help='Numpy file containing the embeddings.')
self.add_argument('--format', type=str,
help='The format of input data'\
'l_r: two list of objects are provided as left objects and right objects.\n' \
'l_*: one list of objects is provided as left objects list and treat all objects in emb_file as right objects\n'
'*_r: one list of objects is provided as right objects list and treat all objects in emb_file as left objects\n'
'*: treat all objects in the emb_file as both left objects and right objects')
self.add_argument('--data_files', type=str, default=None, nargs='+',
help='A list of data file names. This is used to provide necessary files containing the requried data ' \
'according to the format, e.g., for l_r, two files are required as left_data and right_data; ' \
'for l_*, only one file is required; for *, no file is required')
self.add_argument('--raw_data', default=False, action='store_true',
help='whether the data profiled in data_files is in the raw object naming space, e.g. string name of the entity' \
'or in DGL-KE converted integer id space \n' \
'If True, the data is in the original naming space and the inference program will do the id translation' \
'according to id mapping files generated during the training progress. \n' \
'If False, the data is just interger ids and it is assumed that user has already done the id translation')
self.add_argument('--exec_mode', type=str, default='all',
help='How to calculate scores for element pairs and calculate topK: \n' \
'pairwise: both left and right objects are provided with the same length N, and we will calculate the similarity pair by pair:'\
'result = topK([score(l_i, r_i)]) for i in N, the result shape will be (K,)'
'all: both left and right objects are provided as L and R, and we calculate all possible combinations of (l_i, r_j):' \
'result = topK([[score(l_i, rj) for l_i in L] for r_j in R]), the result shape will be (K,)\n'
'batch_left: both left and right objects are provided as L and R,, and we calculate topK for each element in L:' \
'result = topK([score(l_i, r_j) for r_j in R]) for l_j in L, the result shape will be (sizeof(L), K)\n')
self.add_argument('--topK', type=int, default=10,
help='How many results are returned')
self.add_argument('--sim_func', type=str, default='cosine',
help='What kind of similarity function is used in ranking and will be output: \n' \
'cosine: use cosine similarity, score = $\frac{x \cdot y}{||x||_2||y||_2}$' \
'l2: use l2 similarity, score = -$||x - y||_2$ \n' \
'l1: use l1 similarity, score = -$||x - y||_1$ \n' \
'dot: use dot product similarity, score = $x \cdot y$ \n' \
'ext_jaccard: use extended jaccard similarity, score = $\frac{x \cdot y}{||x||_{2}^{2} + ||y||_{2}^{2} - x \cdot y}$ \n')
self.add_argument('--output', type=str, default='result.tsv',
help='Where to store the result, should be a single file')
self.add_argument('--gpu', type=int, default=-1,
help='GPU device to use in inference, -1 means CPU')
def main():
args = ArgParser().parse_args()
assert args.emb_file != None, 'emb_file should be provided for entity embeddings'
data_files = args.data_files
if args.format == 'l_r':
if args.raw_data:
head, id2e_map, e2id_map = load_raw_emb_data(file=data_files[0],
map_f=args.mfile)
tail, _, _ = load_raw_emb_data(file=data_files[1],
e2id_map=e2id_map)
else:
head = load_entity_data(data_files[0])
tail = load_entity_data(data_files[1])
elif args.format == 'l_*':
if args.raw_data:
head, id2e_map, e2id_map = load_raw_emb_data(file=data_files[0],
map_f=args.mfile)
else:
head = load_entity_data(data_files[0])
tail = load_entity_data()
elif args.format == '*_r':
if args.raw_data:
tail, id2e_map, e2id_map = load_raw_emb_data(file=data_files[0],
map_f=args.mfile)
else:
tail = load_entity_data(data_files[0])
head = load_entity_data()
elif args.format == '*':
if args.raw_data:
id2e_map = load_raw_emb_mapping(map_f=args.mfile)
head = load_entity_data()
tail = load_entity_data()
if args.exec_mode == 'pairwise':
pair_wise = True
bcast = False
elif args.exec_mode == 'batch_left':
pair_wise = False
bcast = True
elif args.exec_mode == 'all':
pair_wise = False
bcast = False
else:
assert False, 'Unknow execution model'
model = EmbSimInfer(args.gpu, args.emb_file, args.sim_func)
model.load_emb()
result = model.topK(head, tail, bcast=bcast, pair_ws=pair_wise, k=args.topK)
with open(args.output, 'w+') as f:
f.write('left\tright\tscore\n')
for res in result:
hl, tl, sl = res
hl = hl.tolist()
tl = tl.tolist()
sl = sl.tolist()
for h, t, s in zip(hl, tl, sl):
if args.raw_data:
h = id2e_map[h]
t = id2e_map[t]
f.write('{}\t{}\t{}\n'.format(h, t, s))
print('Inference Done')
if __name__ == '__main__':
main() | 53.847826 | 159 | 0.560894 |
4f1f3236b68ac71fe44ae97b6454bd9a611e82c6 | 729 | py | Python | py-royalmail/setup.py | QwadwoNyamekye/purplship-carriers | ce34e3054de246e3d85ddf6928b607193d061ae2 | [
"MIT"
] | null | null | null | py-royalmail/setup.py | QwadwoNyamekye/purplship-carriers | ce34e3054de246e3d85ddf6928b607193d061ae2 | [
"MIT"
] | null | null | null | py-royalmail/setup.py | QwadwoNyamekye/purplship-carriers | ce34e3054de246e3d85ddf6928b607193d061ae2 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='py-royalmail',
version='1.0-alpha',
description='Royal Mail Python Data domain library',
url='https://github.com/PurplShip/purplship-carriers/tree/master/py-royalmail',
author='PurplShip',
author_email='danielk.developer@gmail.com',
license='MIT',
packages=['pyroyalmail'],
install_requires=[
'attrs==18.2.0',
'jstruct==1.0.0'
],
dependency_links=[
'https://git.io/purplship',
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
zip_safe=False) | 31.695652 | 85 | 0.578875 |
83bb1202a25827ca4161b62e2f94814750e71c8c | 860 | py | Python | juggler.py | singhpraveen2010/experimental | 5fff00d8137ae069af3a66e5c4ebaff39f7acf80 | [
"MIT"
] | 1 | 2018-11-17T19:00:04.000Z | 2018-11-17T19:00:04.000Z | juggler.py | singhpraveen2010/experimental | 5fff00d8137ae069af3a66e5c4ebaff39f7acf80 | [
"MIT"
] | 1 | 2017-12-28T04:53:56.000Z | 2018-02-15T07:54:42.000Z | juggler.py | sara-02/junk | 3c23165eafb600b7af2d15231fb9508e2f26bbc6 | [
"MIT"
] | null | null | null | #Source: http://www.geeksforgeeks.org/juggler-sequence/
from math import (sqrt,
floor)
def is_even(number):
return number % 2 == 0
def square_root(number):
return sqrt(number)
def pow_3(number):
return number**3
def floor_num(number):
return int(floor(number))
def generate_juggler_series(number):
print(number),
while number != 1:
temp = number
if not is_even(number):
temp = pow_3(number)
temp = floor_num(square_root(temp))
print temp,
number = temp
def main():
number = int(raw_input("Enter the starting integer of series:: "))
if number <= 0:
print("Only Works for positive integers.")
return
print("The juggler series of {} is").format(number)
generate_juggler_series(number)
if __name__ == '__main__':
main()
| 19.545455 | 70 | 0.622093 |
27e2249cb3ac77006963c167394651d77b19ec39 | 1,142 | py | Python | src/applications/package_manager.py | VMoM/dotstar | 3448d0053796955d75bfaae28ae702aff449841f | [
"MIT"
] | null | null | null | src/applications/package_manager.py | VMoM/dotstar | 3448d0053796955d75bfaae28ae702aff449841f | [
"MIT"
] | null | null | null | src/applications/package_manager.py | VMoM/dotstar | 3448d0053796955d75bfaae28ae702aff449841f | [
"MIT"
] | null | null | null | class PackageManager:
"""
An object that contains the name used by dotstar and the true name of the PM (the one used by the OS, that dotstar
calls when installing).
dotstar don't uses the same name as the OS because snap has two mods of installation (sandbox and classic)
so we have to differentiate them.
Contains too the command shape, a string with a %s placeholder
"""
def __init__(
self,
dotstar_name: str,
system_name: str,
command_shape: str,
multiple_apps_query_support: bool
) -> None:
"""
:param dotstar_name: the name of the PM that dotstar uses
:param system_name: the name of the PM that the OS uses
:param command_shape: the shape of the command. Must have a %s placeholder
:param multiple_apps_query_support: if the PM supports query with multiple names (like "pacman -Sy atom gedit")
"""
self.dotstar_name = dotstar_name
self.system_name = system_name
self.command_shape = command_shape
self.multiple_apps_query_support = multiple_apps_query_support
| 43.923077 | 119 | 0.669002 |
326ea8ec35bcdca9eb7142a29bd57514a4e119a3 | 9,301 | py | Python | board.py | pirtim/mascarade_table | 21e0e463025291dc038c5be9f6c0676f0cfedfb3 | [
"MIT"
] | null | null | null | board.py | pirtim/mascarade_table | 21e0e463025291dc038c5be9f6c0676f0cfedfb3 | [
"MIT"
] | null | null | null | board.py | pirtim/mascarade_table | 21e0e463025291dc038c5be9f6c0676f0cfedfb3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#pylint: disable=W1202
from __future__ import division
import functools
import logging
import operator
from collections import OrderedDict, namedtuple
import cards
from inputs import input_for_choice, input_for_confirmation
from player import Player
# dodac named tuple na (player, val), bedzie ladniej
PlayerVal = namedtuple('PlayerVal', ['name', 'val'])
OrderedDictPlayers = OrderedDict
OrderedDictPlayers.items_p = lambda self: [PlayerVal(key, val) for key, val in self.items()]
OrderedDictPlayers.iteritems_p = lambda self: (PlayerVal(key, val) for key, val in self.iteritems())
# PublicBoard = namedtuple('PublicBoard', ['players_with_gold'])
# http://stackoverflow.com/a/31174427
def rsetattr(obj, attr, val):
pre, _, post = attr.rpartition('.')
return setattr(rgetattr(obj, pre) if pre else obj, post, val)
sentinel = object()
def rgetattr(obj, attr, default=sentinel):
if default is sentinel:
_getattr = getattr
else:
def _getattr(obj, name):
return getattr(obj, name, default)
return functools.reduce(_getattr, [obj]+attr.split('.'))
def gen_next_players_list(players_list, index):
for i in players_list[index+1:]:
yield i
for i in players_list[:index]:
yield i
class PublicBoard(object):
def __init__(self, players_with_gold, public_history):
self.players_with_gold = players_with_gold
self.public_history = public_history
class GameHistory(OrderedDict):
pass
class Board(object):
def __init__(self, players_num, types_of_players, players_names, cards_names, start_gold):
self.players_num = players_num
self.types_of_players = types_of_players
self.players_names = players_names
self.cards_names = cards_names
self.players = OrderedDict()
for index, (Bot, name, card_name) in enumerate(zip(self.types_of_players, self.players_names, self.cards_names)):
self.players[name] = Player(index, Bot(), name, cards.cards[card_name], start_gold)
self.current_player = self.players.items()[0][1]
self.court = 0
self.round_num = 0
self.public_history = GameHistory({})
self.true_history = GameHistory({})
self.public_board = PublicBoard(self.method_from_players('gold'), self.public_history)
for name, player in self.players.iteritems():
player.bot.public_board = self.public_board
def public_board_update(self):
self.public_board.players_with_gold = self.method_from_players('gold')
# def get_public_board(self):
# '''
# Returns tuple, eg: (OrderedDictPlayers('Tom' : 8, 'Mark' : 6, ...), PublicHistory())
# Example:
# >>> my_board = Board(4, ['Tom', 'Mark', 'Bob', 'Chris'], ['King', 'Queen', 'Judge', 'Bishop'], 6)
# >>> pb = my_board.get_public_board()
# >>> pb.players_with_gold
# OrderedDict([('Tom', 6), ('Mark', 6), ('Bob', 6), ('Chris', 6)])
# >>> pb.players_with_gold['Tom']
# 6
# >>> pb.players_with_gold.items_p()[0].name
# 'Tom'
# '''
# return PublicBoard(self.method_from_players('gold'), self.public_history)
def reshufle_cards(self):
return NotImplemented
def next_step(self):
logging.info('Start of round number {}'.format(self.round_num))
logging.debug('Players Cards: {}'.format(self.method_from_players('card.name')))
logging.debug('Players Gold: {}'.format(self.method_from_players('gold')))
question = '{}, what do you do?'.format(self.current_player.get_repr())
choices = ['PEEK', 'ANNOUNCE', 'EXCHANGE']
decision = input_for_choice(self.current_player, 'move', choices, question)
if decision == 'PEEK':
self.current_player.peek_card()
elif decision == 'ANNOUNCE':
self.announcement()
elif decision == 'EXCHANGE':
self.current_player.potential_exchange_handler(self.players, self.players_names)
else:
raise ValueError('Wrong decision. Get: \'{}\'. Should be one from: {}.'.format(
decision, choices))
if not self.true_history.has_key('game_result'):
self.check_end_condition()
if self.true_history.has_key('game_result'):
logging.info('End of game at round number {}'.format(self.round_num))
logging.info('Player {} won!'.format('Not Implemented'))
logging.debug('Players Gold: {}'.format(self.method_from_players('gold')))
return True, self.true_history
self.next_player()
return False, self.public_history
def announcement(self):
question = 'What do you announce?'
choices = set(self.cards_names)
what_declare = input_for_choice(self.current_player, 'what_announce', choices, question)
logging.info('Player: ' + self.current_player.get_repr() + ' has declared '
+ what_declare + '.')
claimants = []
for name in gen_next_players_list(self.players_names, self.current_player.index):
question = '{}, do you claim {} yourself?'.format(
self.players[name].get_repr(), what_declare)
claim = input_for_confirmation(self.current_player, 'claim', question)
if claim:
claimants.append(name)
logging.info('{} has claimed {}.'.format(self.players[name].get_repr(), what_declare))
if claimants:
claimants = [self.current_player.name] + claimants
claimants_with_cards = self.method_from_players('card.name', claimants)
for name, card_name in claimants_with_cards.iteritems():
if card_name == what_declare:
self.players[name].play_card(self)
logging.info('{} said the truth. He is a {}.'.format(self.players[name].get_repr(), what_declare))
# tutaj powinien sprawdzac koniec gry, a moze nie?
for name, card_name in claimants_with_cards.iteritems():
if card_name != what_declare:
self.players[name].gold -= 1
self.court += 1
logging.info('{} lied. He really is a {}, not a {}.'.format(self.players[name].get_repr(), self.players[name].card.name, what_declare))
else:
self.current_player.play_card(self, cards.cards[what_declare])
def next_player(self):
self.current_player = self.players.items()[(self.current_player.index + 1) % self.players_num][1]
self.round_num += 1
def method_from_players(self, method, players=None):
if players == None:
players = self.players
if hasattr(players, 'iteritems'):
return OrderedDictPlayers([(key,rgetattr(value, method)) for key, value in players.iteritems()])
else:
return OrderedDictPlayers([(name,rgetattr(self.players[name], method)) for name in players])
def check_end_condition(self, cheat_player = None):
# powinna powodowac koniec petli bezposrednio a nie posrednio przez zmiane zmiennej,
# sprawdzanej pozniej
if cheat_player != None:
if cheat_player.gold >= 10:
result = OrderedDict([
('type_of_end', 'rich_win'),
('name', cheat_player.name),
('gold', cheat_player.gold),
('info', 'cheat_win')
])
self.true_history.update([('game_result', result)])
return
richest = self.max_rich_player()
poorest = self.min_rich_player()
if richest[0].val >= 13:
result = OrderedDict([
('type_of_end', 'rich_win'),
('name', richest[0].name),
('gold', richest[0].val),
('info', None)
])
self.true_history.update([('game_result', result)])
return
if poorest[0].val <= 0:
result = OrderedDict([
('type_of_end', 'poor_win'),
('name', richest[0].name),
('gold', richest[0].val),
('info', None)
])
self.true_history.update([('game_result', result)])
return
def max_rich_player(self, all_players = False):
'''Returns list(tuple(richest_player1, his_gold),tuple(richest_player2, his_gold),...)'''
gold = self.method_from_players('gold')
gold_sorted = sorted(gold.iteritems_p(), key=lambda x:-x.val)
if all_players:
return gold_sorted
else:
return filter(lambda x: x.val == gold_sorted[0].val, gold_sorted)
def min_rich_player(self, all_players = False):
'''Returns list(tuple(poorest_player1, his_gold),tuple(poorest_player2, his_gold),...)'''
gold = self.method_from_players('gold')
gold_sorted = sorted(gold.iteritems_p(), key=lambda x:+x.val)
if all_players:
return gold_sorted
else:
return filter(lambda x: x.val == gold_sorted[0].val, gold_sorted)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41.70852 | 155 | 0.611225 |
9b3575701ad5276c515ec4eaafe6cefc0e98be3e | 8,071 | py | Python | students/K33401/Do_Thien/Lr3/project_lw3/my_app/migrations/0001_initial.py | DoVanThien/ITMO_ICT_WebDevelopment_2021-2022 | b9951f4e0e419a6d07c7d38f48df876878424a04 | [
"MIT"
] | null | null | null | students/K33401/Do_Thien/Lr3/project_lw3/my_app/migrations/0001_initial.py | DoVanThien/ITMO_ICT_WebDevelopment_2021-2022 | b9951f4e0e419a6d07c7d38f48df876878424a04 | [
"MIT"
] | null | null | null | students/K33401/Do_Thien/Lr3/project_lw3/my_app/migrations/0001_initial.py | DoVanThien/ITMO_ICT_WebDevelopment_2021-2022 | b9951f4e0e419a6d07c7d38f48df876878424a04 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2021-12-03 01:00
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Floor',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.SmallIntegerField()),
],
),
migrations.CreateModel(
name='Guest',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30, verbose_name='First name')),
('last_name', models.CharField(max_length=30, verbose_name='Last name')),
('middle_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Middle name')),
('date_of_birth', models.DateField(verbose_name='Birthday')),
('address', models.CharField(default='VP 5/7', max_length=50, verbose_name='Address')),
('city', models.CharField(max_length=30, verbose_name='City')),
('email', models.CharField(blank=True, max_length=50, null=True, verbose_name='Email')),
('phone', models.CharField(default='897777777', max_length=20, verbose_name='Phone number')),
('passport', models.CharField(default='C5555555', max_length=15)),
],
),
migrations.CreateModel(
name='Staff',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30, verbose_name='First name')),
('last_name', models.CharField(max_length=30, verbose_name='Last name')),
('middle_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Middle name')),
('date_of_birth', models.DateField(null=True, verbose_name='Birthday')),
('phone', models.CharField(max_length=20, null=True, verbose_name='Phone number')),
('address', models.CharField(max_length=50, null=True, verbose_name='Address')),
],
),
migrations.CreateModel(
name='Status',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('e', 'empty'), ('f', 'full')], max_length=1, verbose_name='Status')),
],
),
migrations.CreateModel(
name='Type',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('1', '1'), ('2', '2'), ('3', '3')], max_length=1, verbose_name='Type')),
('price', models.IntegerField(verbose_name='Price')),
],
),
migrations.CreateModel(
name='Schedule',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(verbose_name='Date')),
('floor_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='my_app.floor')),
('staff_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='my_app.staff')),
],
),
migrations.CreateModel(
name='Room',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.CharField(default='100', max_length=10, verbose_name='Room number')),
('phone', models.CharField(max_length=20, verbose_name='Phone number')),
('floor_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='my_app.floor')),
('status_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='my_app.status')),
('type_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='my_app.type')),
],
),
migrations.CreateModel(
name='Reservation',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(verbose_name='Date')),
('check_in', models.DateField(verbose_name='Check in')),
('check_out', models.DateField(verbose_name='Check out')),
('adults', models.SmallIntegerField()),
('children', models.SmallIntegerField(blank=True, null=True)),
('amount', models.IntegerField()),
('guest_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='my_app.guest')),
('room_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='my_app.room')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('tel', models.CharField(blank=True, max_length=15, null=True, verbose_name='Phone number')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 62.084615 | 329 | 0.611448 |
2fea29d961e56e53996bd63cfcd87b5d2ee7112b | 27,470 | py | Python | tensorflow/python/tools/saved_model_cli.py | digimatronics/tensorflow1 | b0ab95c7af7c051d78dd74bd0a3032ce35273ea6 | [
"Apache-2.0"
] | 1 | 2017-09-29T09:12:22.000Z | 2017-09-29T09:12:22.000Z | tensorflow/python/tools/saved_model_cli.py | digimatronics/tensorflow1 | b0ab95c7af7c051d78dd74bd0a3032ce35273ea6 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/tools/saved_model_cli.py | digimatronics/tensorflow1 | b0ab95c7af7c051d78dd74bd0a3032ce35273ea6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Command-line interface to inspect and execute a graph in a SavedModel.
If TensorFlow is installed on your system through pip, the 'saved_model_cli'
binary can be invoked directly from command line.
At a high level, SavedModel CLI allows users to both inspect and execute
computations on a MetaGraphDef in a SavedModel. These are done through `show`
and `run` commands. Following is the usage of the two commands. SavedModel
CLI will also display these information with -h option.
'show' command usage: saved_model_cli show [-h] --dir DIR [--tag_set TAG_SET]
[--signature_def SIGNATURE_DEF_KEY]
Examples:
To show all available tag-sets in the SavedModel:
$saved_model_cli show --dir /tmp/saved_model
To show all available SignatureDef keys in a MetaGraphDef specified by its
tag-set:
$saved_model_cli show --dir /tmp/saved_model --tag_set serve
For a MetaGraphDef with multiple tags in the tag-set, all tags must be passed
in, separated by ',':
$saved_model_cli show --dir /tmp/saved_model --tag_set serve,gpu
To show all inputs and outputs TensorInfo for a specific SignatureDef specified
by the SignatureDef key in a MetaGraphDef:
$saved_model_cli show --dir /tmp/saved_model --tag_set serve
--signature_def serving_default
Example output:
The given SavedModel SignatureDef contains the following input(s):
inputs['input0'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
inputs['input1'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
The given SavedModel SignatureDef contains the following output(s):
outputs['output'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
Method name is: tensorflow/serving/regress
To show all available information in the SavedModel:
$saved_model_cli show --dir /tmp/saved_model --all
usage: saved_model_cli run [-h] --dir DIR --tag_set TAG_SET --signature_def
SIGNATURE_DEF_KEY [--inputs INPUTS]
[--input_exprs INPUT_EXPRS] [--outdir OUTDIR]
[--overwrite] [--tf_debug]
Examples:
To run input tensors from files through a MetaGraphDef and save the output
tensors to files:
$saved_model_cli run --dir /tmp/saved_model --tag_set serve
--signature_def serving_default --inputs x=/tmp/124.npz
--input_exprs 'x2=np.ones((6,2))' --outdir /tmp/out
To observe the intermediate Tensor values in the runtime graph, use the
--tf_debug flag, e.g.:
$saved_model_cli run --dir /tmp/saved_model --tag_set serve
--signature_def serving_default --inputs 'x=/tmp/124.npz;x2=/tmp/123.npy'
--outdir /tmp/out --tf_debug
To build this tool from source, run:
$bazel build tensorflow/python/tools:saved_model_cli
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import sys
import warnings
import numpy as np
from tensorflow.contrib.saved_model.python.saved_model import reader
from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
from tensorflow.core.framework import types_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.platform import app
from tensorflow.python.saved_model import loader
def _show_tag_sets(saved_model_dir):
"""Prints the tag-sets stored in SavedModel directory.
Prints all the tag-sets for MetaGraphs stored in SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
"""
tag_sets = reader.get_saved_model_tag_sets(saved_model_dir)
print('The given SavedModel contains the following tag-sets:')
for tag_set in sorted(tag_sets):
print(', '.join(sorted(tag_set)))
def _show_signature_def_map_keys(saved_model_dir, tag_set):
"""Prints the keys for each SignatureDef in the SignatureDef map.
Prints the list of SignatureDef keys from the SignatureDef map specified by
the given tag-set and SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
tag_set: Group of tag(s) of the MetaGraphDef to get SignatureDef map from,
in string format, separated by ','. For tag-set contains multiple tags,
all tags must be passed in.
"""
signature_def_map = get_signature_def_map(saved_model_dir, tag_set)
print('The given SavedModel MetaGraphDef contains SignatureDefs with the '
'following keys:')
for signature_def_key in sorted(signature_def_map.keys()):
print('SignatureDef key: \"%s\"' % signature_def_key)
def _get_inputs_tensor_info_from_meta_graph_def(meta_graph_def,
signature_def_key):
"""Gets TensorInfo for all inputs of the SignatureDef.
Returns a dictionary that maps each input key to its TensorInfo for the given
signature_def_key in the meta_graph_def
Args:
meta_graph_def: MetaGraphDef protocol buffer with the SignatureDef map to
look up SignatureDef key.
signature_def_key: A SignatureDef key string.
Returns:
A dictionary that maps input tensor keys to TensorInfos.
"""
return signature_def_utils.get_signature_def_by_key(meta_graph_def,
signature_def_key).inputs
def _get_outputs_tensor_info_from_meta_graph_def(meta_graph_def,
signature_def_key):
"""Gets TensorInfos for all outputs of the SignatureDef.
Returns a dictionary that maps each output key to its TensorInfo for the given
signature_def_key in the meta_graph_def.
Args:
meta_graph_def: MetaGraphDef protocol buffer with the SignatureDefmap to
look up signature_def_key.
signature_def_key: A SignatureDef key string.
Returns:
A dictionary that maps output tensor keys to TensorInfos.
"""
return signature_def_utils.get_signature_def_by_key(meta_graph_def,
signature_def_key).outputs
def _show_inputs_outputs(saved_model_dir, tag_set, signature_def_key):
"""Prints input and output TensorInfos.
Prints the details of input and output TensorInfos for the SignatureDef mapped
by the given signature_def_key.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
tag_set: Group of tag(s) of the MetaGraphDef, in string format, separated by
','. For tag-set contains multiple tags, all tags must be passed in.
signature_def_key: A SignatureDef key string.
"""
meta_graph_def = get_meta_graph_def(saved_model_dir, tag_set)
inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
print('The given SavedModel SignatureDef contains the following input(s):')
for input_key, input_tensor in sorted(inputs_tensor_info.items()):
print('inputs[\'%s\'] tensor_info:' % input_key)
_print_tensor_info(input_tensor)
print('The given SavedModel SignatureDef contains the following output(s):')
for output_key, output_tensor in sorted(outputs_tensor_info.items()):
print('outputs[\'%s\'] tensor_info:' % output_key)
_print_tensor_info(output_tensor)
print('Method name is: %s' %
meta_graph_def.signature_def[signature_def_key].method_name)
def _print_tensor_info(tensor_info):
"""Prints details of the given tensor_info.
Args:
tensor_info: TensorInfo object to be printed.
"""
print(' dtype: ' + types_pb2.DataType.keys()[tensor_info.dtype])
# Display shape as tuple.
if tensor_info.tensor_shape.unknown_rank:
shape = 'unknown_rank'
else:
dims = [str(dim.size) for dim in tensor_info.tensor_shape.dim]
shape = ', '.join(dims)
shape = '(' + shape + ')'
print(' shape: ' + shape)
print(' name: ' + tensor_info.name)
def _show_all(saved_model_dir):
"""Prints tag-set, SignatureDef and Inputs/Outputs information in SavedModel.
Prints all tag-set, SignatureDef and Inputs/Outputs information stored in
SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
"""
tag_sets = reader.get_saved_model_tag_sets(saved_model_dir)
for tag_set in sorted(tag_sets):
tag_set = ', '.join(tag_set)
print('\nMetaGraphDef with tag-set: \'' + tag_set +
'\' contains the following SignatureDefs:')
signature_def_map = get_signature_def_map(saved_model_dir, tag_set)
for signature_def_key in sorted(signature_def_map.keys()):
print('\nsignature_def[\'' + signature_def_key + '\']:')
_show_inputs_outputs(saved_model_dir, tag_set, signature_def_key)
def get_meta_graph_def(saved_model_dir, tag_set):
"""Gets MetaGraphDef from SavedModel.
Returns the MetaGraphDef for the given tag-set and SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect or execute.
tag_set: Group of tag(s) of the MetaGraphDef to load, in string format,
separated by ','. For tag-set contains multiple tags, all tags must be
passed in.
Raises:
RuntimeError: An error when the given tag-set does not exist in the
SavedModel.
Returns:
A MetaGraphDef corresponding to the tag-set.
"""
saved_model = reader.read_saved_model(saved_model_dir)
set_of_tags = set(tag_set.split(','))
for meta_graph_def in saved_model.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set_of_tags:
return meta_graph_def
raise RuntimeError('MetaGraphDef associated with tag-set ' + tag_set +
' could not be found in SavedModel')
def get_signature_def_map(saved_model_dir, tag_set):
"""Gets SignatureDef map from a MetaGraphDef in a SavedModel.
Returns the SignatureDef map for the given tag-set in the SavedModel
directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect or execute.
tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in
string format, separated by ','. For tag-set contains multiple tags, all
tags must be passed in.
Returns:
A SignatureDef map that maps from string keys to SignatureDefs.
"""
meta_graph = get_meta_graph_def(saved_model_dir, tag_set)
return meta_graph.signature_def
def run_saved_model_with_feed_dict(saved_model_dir, tag_set, signature_def_key,
input_tensor_key_feed_dict, outdir,
overwrite_flag, tf_debug=False):
"""Runs SavedModel and fetch all outputs.
Runs the input dictionary through the MetaGraphDef within a SavedModel
specified by the given tag_set and SignatureDef. Also save the outputs to file
if outdir is not None.
Args:
saved_model_dir: Directory containing the SavedModel to execute.
tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in
string format, separated by ','. For tag-set contains multiple tags, all
tags must be passed in.
signature_def_key: A SignatureDef key string.
input_tensor_key_feed_dict: A dictionary maps input keys to numpy ndarrays.
outdir: A directory to save the outputs to. If the directory doesn't exist,
it will be created.
overwrite_flag: A boolean flag to allow overwrite output file if file with
the same name exists.
tf_debug: A boolean flag to use TensorFlow Debugger (TFDBG) to observe the
intermediate Tensor values and runtime GraphDefs while running the
SavedModel.
Raises:
RuntimeError: An error when output file already exists and overwrite is not
enabled.
"""
# Get a list of output tensor names.
meta_graph_def = get_meta_graph_def(saved_model_dir, tag_set)
# Re-create feed_dict based on input tensor name instead of key as session.run
# uses tensor name.
inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
inputs_feed_dict = {
inputs_tensor_info[key].name: tensor
for key, tensor in input_tensor_key_feed_dict.items()
}
# Get outputs
outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
# Sort to preserve order because we need to go from value to key later.
output_tensor_keys_sorted = sorted(outputs_tensor_info.keys())
output_tensor_names_sorted = [
outputs_tensor_info[tensor_key].name
for tensor_key in output_tensor_keys_sorted
]
with session.Session(graph=ops_lib.Graph()) as sess:
loader.load(sess, tag_set.split(','), saved_model_dir)
if tf_debug:
sess = local_cli_wrapper.LocalCLIDebugWrapperSession(sess)
outputs = sess.run(output_tensor_names_sorted, feed_dict=inputs_feed_dict)
for i, output in enumerate(outputs):
output_tensor_key = output_tensor_keys_sorted[i]
print('Result for output key %s:\n%s' % (output_tensor_key, output))
# Only save if outdir is specified.
if outdir:
# Create directory if outdir does not exist
if not os.path.isdir(outdir):
os.makedirs(outdir)
output_full_path = os.path.join(outdir, output_tensor_key + '.npy')
# If overwrite not enabled and file already exist, error out
if not overwrite_flag and os.path.exists(output_full_path):
raise RuntimeError(
'Output file %s already exists. Add \"--overwrite\" to overwrite'
' the existing output files.' % output_full_path)
np.save(output_full_path, output)
print('Output %s is saved to %s' % (output_tensor_key,
output_full_path))
def preprocess_inputs_arg_string(inputs_str):
"""Parses input arg into dictionary that maps input to file/variable tuple.
Parses input string in the format of, for example,
"input1=filename1[variable_name1],input2=filename2" into a
dictionary looks like
{'input_key1': (filename1, variable_name1),
'input_key2': (file2, None)}
, which maps input keys to a tuple of file name and variable name(None if
empty).
Args:
inputs_str: A string that specified where to load inputs. Inputs are
separated by semicolons.
* For each input key:
'<input_key>=<filename>' or
'<input_key>=<filename>[<variable_name>]'
* The optional 'variable_name' key will be set to None if not specified.
Returns:
A dictionary that maps input keys to a tuple of file name and variable name.
Raises:
RuntimeError: An error when the given input string is in a bad format.
"""
input_dict = {}
inputs_raw = inputs_str.split(';')
for input_raw in filter(bool, inputs_raw): # skip empty strings
# Format of input=filename[variable_name]'
match = re.match(r'([^=]+)=([^\[\]]+)\[([^\[\]]+)\]$', input_raw)
if match:
input_dict[match.group(1)] = match.group(2), match.group(3)
else:
# Format of input=filename'
match = re.match(r'([^=]+)=([^\[\]]+)$', input_raw)
if match:
input_dict[match.group(1)] = match.group(2), None
else:
raise RuntimeError(
'--inputs "%s" format is incorrect. Please follow'
'"<input_key>=<filename>", or'
'"<input_key>=<filename>[<variable_name>]"' % input_raw)
return input_dict
def preprocess_input_exprs_arg_string(input_exprs_str):
"""Parses input arg into dictionary that maps input key to python expression.
Parses input string in the format of 'input_key=<python expression>' into a
dictionary that maps each input_key to its python expression.
Args:
input_exprs_str: A string that specifies python expression for input keys.
Each input is separated by semicolon. For each input key:
'input_key=<python expression>'
Returns:
A dictionary that maps input keys to python expressions.
Raises:
RuntimeError: An error when the given input string is in a bad format.
"""
input_dict = {}
for input_raw in filter(bool, input_exprs_str.split(';')):
if '=' not in input_exprs_str:
raise RuntimeError('--input_exprs "%s" format is incorrect. Please follow'
'"<input_key>=<python expression>"' % input_exprs_str)
input_key, expr = input_raw.split('=')
input_dict[input_key] = expr
return input_dict
def load_inputs_from_input_arg_string(inputs_str, input_exprs_str):
"""Parses input arg strings and create inputs feed_dict.
Parses '--inputs' string for inputs to be loaded from file, and parses
'--input_exprs' string for inputs to be evaluated from python expression.
Args:
inputs_str: A string that specified where to load inputs. Each input is
separated by semicolon.
* For each input key:
'<input_key>=<filename>' or
'<input_key>=<filename>[<variable_name>]'
* The optional 'variable_name' key will be set to None if not specified.
* File specified by 'filename' will be loaded using numpy.load. Inputs
can be loaded from only .npy, .npz or pickle files.
* The "[variable_name]" key is optional depending on the input file type
as descripted in more details below.
When loading from a npy file, which always contains a numpy ndarray, the
content will be directly assigned to the specified input tensor. If a
variable_name is specified, it will be ignored and a warning will be
issued.
When loading from a npz zip file, user can specify which variable within
the zip file to load for the input tensor inside the square brackets. If
nothing is specified, this function will check that only one file is
included in the zip and load it for the specified input tensor.
When loading from a pickle file, if no variable_name is specified in the
square brackets, whatever that is inside the pickle file will be passed
to the specified input tensor, else SavedModel CLI will assume a
dictionary is stored in the pickle file and the value corresponding to
the variable_name will be used.
input_exprs_str: A string that specified python expressions for inputs.
* In the format of: '<input_key>=<python expression>'.
* numpy module is available as np.
Returns:
A dictionary that maps input tensor keys to numpy ndarrays.
Raises:
RuntimeError: An error when a key is specified, but the input file contains
multiple numpy ndarrays, none of which matches the given key.
RuntimeError: An error when no key is specified, but the input file contains
more than one numpy ndarrays.
"""
tensor_key_feed_dict = {}
inputs = preprocess_inputs_arg_string(inputs_str)
input_exprs = preprocess_input_exprs_arg_string(input_exprs_str)
for input_tensor_key, (filename, variable_name) in inputs.items():
data = np.load(filename)
# When a variable_name key is specified for the input file
if variable_name:
# if file contains a single ndarray, ignore the input name
if isinstance(data, np.ndarray):
warnings.warn(
'Input file %s contains a single ndarray. Name key \"%s\" ignored.'
% (filename, variable_name))
tensor_key_feed_dict[input_tensor_key] = data
else:
if variable_name in data:
tensor_key_feed_dict[input_tensor_key] = data[variable_name]
else:
raise RuntimeError(
'Input file %s does not contain variable with name \"%s\".' %
(filename, variable_name))
# When no key is specified for the input file.
else:
# Check if npz file only contains a single numpy ndarray.
if isinstance(data, np.lib.npyio.NpzFile):
variable_name_list = data.files
if len(variable_name_list) != 1:
raise RuntimeError(
'Input file %s contains more than one ndarrays. Please specify '
'the name of ndarray to use.' % filename)
tensor_key_feed_dict[input_tensor_key] = data[variable_name_list[0]]
else:
tensor_key_feed_dict[input_tensor_key] = data
# When input is a python expression:
for input_tensor_key, py_expr in input_exprs.items():
if input_tensor_key in tensor_key_feed_dict:
warnings.warn(
'input_key %s has been specified with both --inputs and --input_exprs'
' options. Value in --input_exprs will be used.' % input_tensor_key)
# ast.literal_eval does not work with numpy expressions
tensor_key_feed_dict[input_tensor_key] = eval(py_expr) # pylint: disable=eval-used
return tensor_key_feed_dict
def show(args):
"""Function triggered by show command.
Args:
args: A namespace parsed from command line.
"""
# If all tag is specified, display all information.
if args.all:
_show_all(args.dir)
else:
# If no tag is specified, display all tag_set, if no signaure_def key is
# specified, display all SignatureDef keys, else show input output tensor
# infomation corresponding to the given SignatureDef key
if args.tag_set is None:
_show_tag_sets(args.dir)
else:
if args.signature_def is None:
_show_signature_def_map_keys(args.dir, args.tag_set)
else:
_show_inputs_outputs(args.dir, args.tag_set, args.signature_def)
def run(args):
"""Function triggered by run command.
Args:
args: A namespace parsed from command line.
"""
tensor_key_feed_dict = load_inputs_from_input_arg_string(
args.inputs, args.input_exprs)
run_saved_model_with_feed_dict(args.dir, args.tag_set, args.signature_def,
tensor_key_feed_dict, args.outdir,
args.overwrite, tf_debug=args.tf_debug)
def create_parser():
"""Creates a parser that parse the command line arguments.
Returns:
A namespace parsed from command line arguments.
"""
parser = argparse.ArgumentParser(
description='saved_model_cli: Command-line interface for SavedModel')
parser.add_argument('-v', '--version', action='version', version='0.1.0')
subparsers = parser.add_subparsers(
title='commands', description='valid commands', help='additional help')
# show command
show_msg = (
'Usage examples:\n'
'To show all tag-sets in a SavedModel:\n'
'$saved_model_cli show --dir /tmp/saved_model\n'
'To show all available SignatureDef keys in a '
'MetaGraphDef specified by its tag-set:\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve\n'
'For a MetaGraphDef with multiple tags in the tag-set, all tags must be '
'passed in, separated by \';\':\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve,gpu\n\n'
'To show all inputs and outputs TensorInfo for a specific'
' SignatureDef specified by the SignatureDef key in a'
' MetaGraph.\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve'
'--signature_def serving_default\n\n'
'To show all available information in the SavedModel\n:'
'$saved_model_cli show --dir /tmp/saved_model --all')
parser_show = subparsers.add_parser(
'show',
description=show_msg,
formatter_class=argparse.RawTextHelpFormatter)
parser_show.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to inspect')
parser_show.add_argument(
'--all',
action='store_true',
help='if set, will output all infomation in given SavedModel')
parser_show.add_argument(
'--tag_set',
type=str,
default=None,
help='tag-set of graph in SavedModel to show, separated by \',\'')
parser_show.add_argument(
'--signature_def',
type=str,
default=None,
metavar='SIGNATURE_DEF_KEY',
help='key of SignatureDef to display input(s) and output(s) for')
parser_show.set_defaults(func=show)
# run command
run_msg = ('Usage example:\n'
'To run input tensors from files through a MetaGraphDef and save'
' the output tensors to files:\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve'
'--signature_def serving_default '
'--inputs input1_key=/tmp/124.npz[x],input2_key=/tmp/123.npy'
'--input_exprs \'input3_key=np.ones(2)\' --outdir=/out\n\n'
'For more information about input file format, please see:\n'
'https://www.tensorflow.org/programmers_guide/saved_model_cli\n')
parser_run = subparsers.add_parser(
'run', description=run_msg, formatter_class=argparse.RawTextHelpFormatter)
parser_run.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to execute')
parser_run.add_argument(
'--tag_set',
type=str,
required=True,
help='tag-set of graph in SavedModel to load, separated by \',\'')
parser_run.add_argument(
'--signature_def',
type=str,
required=True,
metavar='SIGNATURE_DEF_KEY',
help='key of SignatureDef to run')
msg = ('Loading inputs from files, in the format of \'<input_key>=<filename>,'
' or \'<input_key>=<filename>[<variable_name>]\', separated by \';\'.'
' The file format can only be from .npy, .npz or pickle.')
parser_run.add_argument('--inputs', type=str, default='', help=msg)
msg = ('Specifying inputs by python expressions, in the format of'
' "<input_key>=\'<python expression>\'", separated by \';\'. '
'numpy module is available as \'np\'. '
'Will override duplicate input_keys from --inputs option.')
parser_run.add_argument('--input_exprs', type=str, default='', help=msg)
parser_run.add_argument(
'--outdir',
type=str,
default=None,
help='if specified, output tensor(s) will be saved to given directory')
parser_run.add_argument(
'--overwrite',
action='store_true',
help='if set, output file will be overwritten if it already exists.')
parser_run.add_argument(
'--tf_debug',
action='store_true',
help='if set, will use TensorFlow Debugger (tfdbg) to watch the '
'intermediate Tensors and runtime GraphDefs while running the '
'SavedModel.')
parser_run.set_defaults(func=run)
return parser
def main():
parser = create_parser()
args = parser.parse_args()
if not args.inputs and not args.input_exprs:
args.error('At least one of --inputs and --input_exprs is required')
args.func(args)
if __name__ == '__main__':
sys.exit(main())
| 39.298999 | 87 | 0.702039 |
73c2a7e219b469c56cadb90ec0996607d34067a3 | 987 | py | Python | pw_bloat/py/setup.py | antmicro/pigweed | a308c3354a6131425e3f484f07f05a1813948860 | [
"Apache-2.0"
] | null | null | null | pw_bloat/py/setup.py | antmicro/pigweed | a308c3354a6131425e3f484f07f05a1813948860 | [
"Apache-2.0"
] | 1 | 2021-06-18T13:54:41.000Z | 2021-06-18T13:54:41.000Z | pw_bloat/py/setup.py | antmicro/pigweed | a308c3354a6131425e3f484f07f05a1813948860 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""pw_bloat"""
import setuptools # type: ignore
setuptools.setup(
name='pw_bloat',
version='0.0.1',
author='Pigweed Authors',
author_email='pigweed-developers@googlegroups.com',
description='Tools for generating binary size report cards',
packages=setuptools.find_packages(),
package_data={'pw_bloat': ['py.typed']},
zip_safe=False,
install_requires=['pw_cli'],
)
| 34.034483 | 79 | 0.734549 |
eed12e711199091f11e82b81487e7b1bac9841ea | 4,781 | py | Python | tests/integration/widgets/tables/test_data_table.py | samwill/bokeh | 228132eba4b696b91b2a77f7e9d07771ba868093 | [
"BSD-3-Clause"
] | 1 | 2021-05-03T15:19:05.000Z | 2021-05-03T15:19:05.000Z | tests/integration/widgets/tables/test_data_table.py | samwill/bokeh | 228132eba4b696b91b2a77f7e9d07771ba868093 | [
"BSD-3-Clause"
] | 3 | 2021-09-08T03:16:42.000Z | 2022-03-12T00:57:18.000Z | tests/integration/widgets/tables/test_data_table.py | samwill/bokeh | 228132eba4b696b91b2a77f7e9d07771ba868093 | [
"BSD-3-Clause"
] | 2 | 2021-01-12T18:22:24.000Z | 2021-10-30T00:32:02.000Z | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from bokeh._testing.util.selenium import RECORD, ButtonWrapper, get_table_cell
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, CustomJS, DataTable, TableColumn
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
pytest_plugins = (
"bokeh._testing.plugins.project",
)
@pytest.mark.selenium
class Test_CellEditor_Base:
def setup_method(self):
source = ColumnDataSource({'values': self.values})
column = TableColumn(field='values', title='values', editor=self.editor())
self.table = DataTable(source=source, columns=[column], editable=True, width=600)
# this is triggered on selection changes
source.selected.js_on_change('indices', CustomJS(args=dict(s=source), code=RECORD("values", "s.data.values")))
@pytest.mark.selenium
class Test_DataTable:
def test_row_highlights_reflect_no_initial_selection(self, bokeh_model_page) -> None:
source = ColumnDataSource({'values': [1, 2]})
column = TableColumn(field='values', title='values')
table = DataTable(source=source, columns=[column], editable=False, width=600)
page = bokeh_model_page(table)
row0 = get_table_cell(page.driver, 1, 1)
assert 'selected' not in row0.get_attribute('class')
row1 = get_table_cell(page.driver, 2, 1)
assert 'selected' not in row1.get_attribute('class')
assert page.has_no_console_errors()
def test_row_highlights_reflect_initial_selection(self, bokeh_model_page) -> None:
source = ColumnDataSource({'values': [1, 2]})
source.selected.indices = [1]
column = TableColumn(field='values', title='values')
table = DataTable(source=source, columns=[column], editable=False, width=600)
page = bokeh_model_page(table)
row0 = get_table_cell(page.driver, 1, 1)
assert 'selected' not in row0.get_attribute('class')
row1 = get_table_cell(page.driver, 2, 1)
assert 'selected' in row1.get_attribute('class')
assert page.has_no_console_errors()
def test_row_highlights_reflect_ui_selection(self, bokeh_model_page) -> None:
source = ColumnDataSource({'values': [1, 2]})
column = TableColumn(field='values', title='values')
table = DataTable(source=source, columns=[column], editable=False, width=600)
page = bokeh_model_page(table)
row0 = get_table_cell(page.driver, 1, 1)
assert 'selected' not in row0.get_attribute('class')
row1 = get_table_cell(page.driver, 2, 1)
assert 'selected' not in row1.get_attribute('class')
cell = get_table_cell(page.driver, 2, 1)
cell.click()
row0 = get_table_cell(page.driver, 1, 1)
assert 'selected' not in row0.get_attribute('class')
row1 = get_table_cell(page.driver, 2, 1)
assert 'selected' in row1.get_attribute('class')
assert page.has_no_console_errors()
def test_row_highlights_reflect_js_selection(self, bokeh_model_page) -> None:
source = ColumnDataSource({'values': [1, 2]})
col = TableColumn(field='values', title='values')
table = DataTable(source=source, columns=[col], editable=False, width=600)
button = ButtonWrapper("Click", callback=CustomJS(args=dict(s=source), code="""
s.selected.indices = [1]
"""))
page = bokeh_model_page(column(button.obj, table))
row0 = get_table_cell(page.driver, 1, 1)
assert 'selected' not in row0.get_attribute('class')
row1 = get_table_cell(page.driver, 2, 1)
assert 'selected' not in row1.get_attribute('class')
button.click(page.driver)
row0 = get_table_cell(page.driver, 1, 1)
assert 'selected' not in row0.get_attribute('class')
row1 = get_table_cell(page.driver, 2, 1)
assert 'selected' in row1.get_attribute('class')
assert page.has_no_console_errors()
| 36.776923 | 118 | 0.585024 |
26c94055eb84c6a715605c54ed415631966f5656 | 159 | py | Python | app1/app/api/v2/hello/__init__.py | hzjsea/fastapi-admin | 6a72e83ad967d5a33a1c9d1e861685d68bef06b1 | [
"Apache-2.0"
] | null | null | null | app1/app/api/v2/hello/__init__.py | hzjsea/fastapi-admin | 6a72e83ad967d5a33a1c9d1e861685d68bef06b1 | [
"Apache-2.0"
] | null | null | null | app1/app/api/v2/hello/__init__.py | hzjsea/fastapi-admin | 6a72e83ad967d5a33a1c9d1e861685d68bef06b1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# encoding: utf-8
"""
@author: hzjsea
@file: __init__.py.py
@time: 2021/11/19 10:33 上午
"""
from .hello import router as hello_router
| 13.25 | 41 | 0.685535 |
6b8a1032d8978e039dc082ffaf4d99d4b0f633b2 | 8,900 | py | Python | vivisect/tests/testremote.py | sprout42/vivisect | bbb8072abd0dbe728f08f4f4d1e36a87c7b57c36 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | vivisect/tests/testremote.py | sprout42/vivisect | bbb8072abd0dbe728f08f4f4d1e36a87c7b57c36 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | vivisect/tests/testremote.py | sprout42/vivisect | bbb8072abd0dbe728f08f4f4d1e36a87c7b57c36 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2021-10-12T15:37:24.000Z | 2021-10-21T15:25:31.000Z | import os
import sys
import time
import tempfile
import unittest
import threading
import multiprocessing as mp
import vivisect
import vivisect.cli as vivcli
import vivisect.const as v_const
import vivisect.tests.helpers as helpers
import vivisect.remote.server as v_r_server
import envi.memcanvas as e_mcanvas
def runServer(name, port):
dirn = os.path.dirname(name)
testfile = helpers.getTestPath('windows', 'amd64', 'firefox.exe')
# load the file in so we get some workspace events, but not so many to make
# this test take forever
vw = vivcli.VivCli()
vw.loadFromFile(testfile)
vw.setMeta('StorageName', name)
vw.saveWorkspace()
v_r_server.runMainServer(dirn, port)
class VivisectRemoteTests(unittest.TestCase):
'''
So...what would be fun is basically a chain of remote workspaces all tied in interesting
configurations.
'''
def test_basic(self):
testfile = helpers.getTestPath('windows', 'amd64', 'firefox.exe')
good = vivcli.VivCli()
good.loadFromFile(testfile)
host = 'localhost'
port = 0x4097
with tempfile.TemporaryDirectory() as tmpd:
tmpf = tempfile.NamedTemporaryFile(dir=tmpd, delete=False)
try:
proc = mp.Process(target=runServer, args=(tmpf.name, port,))
proc.daemon = True
proc.start()
# give the other process time to spin up
time.sleep(0.5)
# So...yea. The server could not be up yet, but I'm not waiting a million years to
# wait for it.
retry = 0
conn = False
while retry < 5:
try:
server = v_r_server.connectToServer(host, port)
conn = True
break
except:
retry += 1
time.sleep(0.2)
if not conn:
self.fail('Could not connect to %s:%s' % (host, port))
wslist = server.listWorkspaces()
self.assertEqual(len(wslist), 1)
self.assertEqual(server.getServerVersion(), 20130820)
rmtvw = v_r_server.getServerWorkspace(server, wslist[0])
rmtvw2 = v_r_server.getServerWorkspace(server, wslist[0])
# So the consumption of events from the server is *also* threaded, so I've got to do some blocking
# to get us to wait on things
retry = 0
while retry < 5:
locs = rmtvw2.getLocations()
if len(locs) > 1388:
break
retry += 1
time.sleep(0.3)
sys.stderr.write('%d' % retry)
self.assertEqual(len(rmtvw2.getLocations()), 1389)
self.assertEqual(set(rmtvw2.getLocations()), set(good.getLocations()))
self.assertEqual(set(rmtvw2.getXrefs()), set(good.getXrefs()))
# test some of the follow-the-leader framework
testuuid = 'uuid_of_my_dearest_friend_1'
# first just create a leader session:
rmtvw.iAmLeader(testuuid, "atlas' moving castle")
retry = 0
while retry < 5:
# only one session, so we'll run this once - local
ldrsess = rmtvw2.getLeaderSessions().get(testuuid)
if ldrsess is not None:
break
retry += 1
time.sleep(.1)
sys.stderr.write('%d' % retry)
(user, fname) = ldrsess
self.assertEqual(fname, "atlas' moving castle")
# now let's move around a bit
rmtvw.followTheLeader(testuuid, '0x31337')
retry = 0
while retry < 5:
# only one session, so we'll run this once - local
ldrloc = rmtvw2.getLeaderLoc(testuuid)
if ldrloc is not None:
break
retry += 1
time.sleep(.1)
sys.stderr.write('%d' % retry)
self.assertEqual(ldrloc, '0x31337')
# now let's rename things
rmtvw.modifyLeaderSession(testuuid, 'rakuy0', "rakuy0's moving castle")
retry = 0
while retry < 5:
# only one session, so we'll run this once - local
ldrsess = list(rmtvw2.getLeaderSessions().items())[0]
uuid, (user, fname) = ldrsess
if user == 'rakuy0':
break
retry += 1
time.sleep(.1)
sys.stderr.write('%d' % retry)
self.assertEqual(uuid, testuuid)
self.assertEqual(user, 'rakuy0')
self.assertEqual(fname, "rakuy0's moving castle")
self.assertEqual(rmtvw2.getLeaderInfo(testuuid), ('rakuy0', "rakuy0's moving castle"))
# test the CLI:
rmtvw.canvas = e_mcanvas.StringMemoryCanvas(rmtvw)
# empty leaders
rmtvw.do_leaders('')
output = rmtvw.canvas.strval
self.assertIn('Manage Leader Sessions.', output)
rmtvw.canvas.strval = ''
# list leaders
rmtvw.do_leaders('list')
output = rmtvw.canvas.strval
self.assertIn("rakuy0's moving castle", output)
rmtvw.canvas.strval = ''
# modify a leader session
rmtvw.do_leaders('mod %s foo foo bar baz' % testuuid)
retry = 0
while retry < 5:
ldrsess = rmtvw2.getLeaderSessions().get(testuuid)
if ldrsess:
user, fname = ldrsess
if user == 'foo':
break
retry += 1
time.sleep(.1)
sys.stderr.write('%d' % retry)
output = rmtvw.canvas.strval
self.assertIn("foo bar baz", output)
rmtvw.canvas.strval = ''
# kill leader session
rmtvw.do_leaders('kill %s' % testuuid)
retry = 0
while retry < 5:
ldrsess = rmtvw2.getLeaderSessions().get(testuuid)
if not ldrsess:
break
retry += 1
time.sleep(.1)
sys.stderr.write('%d' % retry)
output = rmtvw.canvas.strval
self.assertIn("*Ended: foo's session 'foo bar baz'", output)
rmtvw.canvas.strval = ''
# kill all (which means we need to add a few)
# repopulate
rmtvw.iAmLeader(testuuid+'1', "castle 1")
rmtvw.iAmLeader(testuuid+'2', "castle 2")
rmtvw.iAmLeader(testuuid+'3', "castle 3")
retry = 0
while retry < 5:
# only one session, so we'll run this once - local
ldrsess = rmtvw2.getLeaderSessions().get(testuuid+'3')
if ldrsess is not None:
break
retry += 1
time.sleep(.1)
sys.stderr.write('%d' % retry)
# kill them all!
rmtvw.do_leaders('killall')
retry = 0
while retry < 5:
ldrsessions = rmtvw2.getLeaderSessions()
if not len(ldrsessions):
break
retry += 1
time.sleep(.1)
sys.stderr.write('%d' % retry)
output = rmtvw.canvas.strval
self.assertIn("castle 3", output)
self.assertEqual(len(rmtvw2.getLeaderSessions()), 0)
rmtvw.canvas.strval = ''
# close down tests
try:
rmtvw.server = None
rmtvw2.server = None
q = rmtvw.chan_lookup.get(rmtvw.rchan)
if q:
# So it's not reeeealy auto analysis fini, but it's a good enough stand-in to get
# the server thread to shutdown cleaner
q.puts((v_const.VWE_AUTOANALFIN, None))
proc.terminate()
proc.close()
except:
pass
finally:
tmpf.close()
os.unlink(tmpf.name)
| 35.742972 | 114 | 0.479101 |
2c69ddc58d42f9329ec300976500526bfcc5cfcc | 7,888 | py | Python | exoatlas/visualizations/panels/ErrorPanel.py | zkbt/exopop | 5e8b9d391fe9e2d39c623d7ccd7eca8fd0f0f3f8 | [
"MIT"
] | 4 | 2020-06-24T16:38:27.000Z | 2022-01-23T01:57:19.000Z | exoatlas/visualizations/panels/ErrorPanel.py | zkbt/exopop | 5e8b9d391fe9e2d39c623d7ccd7eca8fd0f0f3f8 | [
"MIT"
] | 4 | 2018-09-20T23:12:30.000Z | 2019-05-15T15:31:58.000Z | exoatlas/visualizations/panels/ErrorPanel.py | zkbt/exopop | 5e8b9d391fe9e2d39c623d7ccd7eca8fd0f0f3f8 | [
"MIT"
] | null | null | null | from .Panel import *
from ..ink_errorbar import *
__all__ = ['ErrorPanel']
def remove_unit(x):
'''
A simple wrapper to help make sure that we're dealing
just with numerical values, instead of astropy Quantitys
with units.
'''
if type(x) == u.quantity.Quantity:
return x.value
else:
return x
class ErrorPanel(Panel):
'''
Error is a general wrapper for making scatter plots
where planets are represented with 2D error bars, with
their intensity scaled to some overall visual weight.
'''
@property
def x_unc(self):
l, u = self.x_lowerupper
#return np.sqrt(l*u)
return 0.5*(l + u)
@property
def y_unc(self):
l, u = self.y_lowerupper
#return np.sqrt(l*u)
return 0.5*(l + u)
def intensity(self, invisible_fraction=0.8,
x_power=1,
y_power=1):
'''
What visual intensity should each datapoint have?
By default, this will be set by the product of the
fractional uncertainties on both x and y.
Parameters
----------
invisible_fraction : float
The aproximate 2D fractional uncertainty at which
a point will fade to invisible. For example, if
set to 0.7, then points will disappear when
(sigma_x/x)**2 + (sigma_y/y)**2 > 0.7**2
x_power : float
The power to which x is raised in the quantity
used to define the visual weight. For example,
if we want visual weight to scale with the
fractional uncertainty on density (mass/radius**3)
for x showing mass and y showing radius, we'd
choose x_power=1 and y_power=3.
y_power : float
(see `x_power` above)
Returns
-------
intensity : np.array
The intensity of the points for the current
population, as a numeric value between 0 and 1.
By default,
'''
dlnx = self.x_unc/self.x*np.abs(x_power)
dlny = self.y_unc/self.y*np.abs(y_power)
# things with bigger errors should have lower weight
weight = (1 - np.sqrt(dlnx**2 + dlny**2)/invisible_fraction)
# clip the weights above 1 (shouldn't exist?) or below zero
# clipped = np.minimum(np.maximum(weight, 0), 1)
# return the visual weight
return remove_unit(weight)
def plot(self, key, ax=None, labelkw={}, **kw):
'''
Add the points for a particular population to this panel.
Parameters
----------
key : str
The item in the self.pops dictionary to add.
ax :
Into what ax should we place this plot?
If None, use default.
labelkw : dict
Keywords for labeling the planet names.
**kw : dict
Any extra keywords will be passed on to `errorbar`
'''
# focus attention on that population
self.point_at(key)
# make sure we're plotting into the appropriate axes
try:
plt.sca(self.ax)
except AttributeError:
self.setup(ax=ax)
# define the data we're trying to plot
x = remove_unit(self.x)
y = remove_unit(self.y)
# set the base color to use throughout
default_color = plt.scatter([],[]).get_facecolor()[0]
color = self.pop.color or default_color
marker = self.pop.marker or 'o'
# if the entire population is exact (e.g., Solar System),
# then don't include any errors when plotting
if self.pop.exact:
# define plotting keywords without errorbars
plotkw = dict(color=color,
edgecolor=color,
marker=marker,
**kw)
plotkw['alpha'] = 1
plotkw['zorder'] = 1e9
self.scattered[key] = plt.scatter(x, y, **plotkw)
# FIXME, 5/25/20: I think BubblePanel is doing
# something a little more clever with being able
# to manage face and edge colors separately.
# Perhaps we should set things up so that we might
# inherit some of this skills here in ErrorPanel
else:
# define the error bars to be plotting
xl, xu = self.x_lowerupper
x_unc = remove_unit(np.vstack([xl, xu]))
yl, yu = self.y_lowerupper
y_unc = remove_unit(np.vstack([yl, yu]))
width=1
kw = dict(#marker='o',
linewidth=0,
elinewidth=width,
alpha=1.0,
#capthick=width,
#capsize=2,
#markersize=3)
#color=self.pop.color,
#markeredgecolor=self.pop.color,
)
# define an Nx4 array of RGBA colors for the N points
weights = self.intensity()
# remove everything with infinite errorbars
ok = (np.isfinite(xl) &
np.isfinite(xu) &
np.isfinite(x) &
np.isfinite(yl) &
np.isfinite(yu) &
np.isfinite(y) &
np.isfinite(weights))
n_nouncertainty = sum(ok == False)
self.speak(f'skipping {n_nouncertainty} planets that are missing data or uncertainties')
# kludge to remove those that cross zero
with np.errstate(invalid='ignore'):
ok *= (self.x - xl) > 0
ok *= (self.y - yl) > 0
# FIXME, 5/25/2020: This kludge always useful on
# logarithmic axes (which are the only that have
# been defined so far), but at some point we
# might want to use linear axes too, where we might
# not want to throw out values that might go
# negative.
n_consistentwithzero = sum(ok == False) - n_nouncertainty
self.speak(f'skipping {n_consistentwithzero} planets that are consistent with zero')
if (len(x) > 1) & (self.pop.plotkw.get('ink', True)):
self.speak('plotting inked errorbars, this may take a while')
# FIXME, 5/25/2020: We should make the
# "invisible" color be something more flexible
# than white, in case we're plotting on a dark
# background. Remember, things look messy if
# we use alpha to do the visual weighting for
# these errorbars, because it introduces many
# more intersecting lines.
self.scattered[key] = ink_errorbar(x[ok], y[ok],
yerr=y_unc[:, ok],
xerr=x_unc[:, ok],
c=weights[ok],
cmap=one2another(bottom='white',
top=color,
alphabottom=1.0,
alphatop=1.0),
**kw)
else:
self.scattered[key] = self.ax.errorbar(x[ok], y[ok],
yerr=y_unc[:, ok],
xerr=x_unc[:, ok],
color=self.pop.color,
**kw)
# set the scales, limits, labels
self.finish_plot(labelkw=labelkw)
| 36.859813 | 100 | 0.49962 |
48370b29c0ee5eede9183f77075b69846fb7cfb7 | 5,642 | py | Python | tr/mainloop_test.py | DentonGentry/gfiber-catawampus | b01e4444f3c7f12b1af7837203b37060fd443bb7 | [
"Apache-2.0"
] | 2 | 2017-10-03T16:06:29.000Z | 2020-09-08T13:03:13.000Z | tr/mainloop_test.py | DentonGentry/gfiber-catawampus | b01e4444f3c7f12b1af7837203b37060fd443bb7 | [
"Apache-2.0"
] | null | null | null | tr/mainloop_test.py | DentonGentry/gfiber-catawampus | b01e4444f3c7f12b1af7837203b37060fd443bb7 | [
"Apache-2.0"
] | 1 | 2017-05-07T17:39:02.000Z | 2017-05-07T17:39:02.000Z | #!/usr/bin/python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test app for mainloop stuff."""
__author__ = 'apenwarr@google.com (Avery Pennarun)'
import os
import select
import socket
from wvtest import unittest
import weakref
import google3
import tornado.ioloop
import garbage
import mainloop
REQUEST_STRING = 'this is the request\n'
idler = [0, 0]
@mainloop.WaitUntilIdle
def IdleFunc():
print 'i0'
idler[0] += 1
class IdleClass(object):
@mainloop.WaitUntilIdle
def ClassIdleFunc(self):
print 'i1: %r' % self
idler[1] += 1
class MainLoopTest(unittest.TestCase):
"""Tests for mainloop.MainLoop."""
def setUp(self):
self.gccheck = garbage.GcChecker()
def tearDown(self):
self.gccheck.Done()
def _GotLine(self, line):
print 'got line: %r' % (line,)
tornado.ioloop.IOLoop.instance().stop()
self.assertEqual(line, REQUEST_STRING)
self.got += line
def _MakeHandler(self, sock, request):
lr = mainloop.LineReader(sock, request, self._GotLine)
self.handler = weakref.ref(lr)
def _SendRequest(self, stream):
if stream:
stream.write(REQUEST_STRING)
def testMainLoop(self):
self.got = ''
loop = mainloop.MainLoop()
listener = loop.ListenInet(('', 0), self._MakeHandler)
stream = loop.Connect(listener.family, listener.address, self._SendRequest)
loop.Start(timeout=5)
print 'after loop 1'
self.assertEqual(self.got, REQUEST_STRING)
stream.close()
print 'after close'
loop.RunOnce(timeout=5)
print 'after loop 2'
# This slightly weird test ensures that the LineReader object actually
# gets destroyed after its client closes. If it didn't, we would have
# a memory leak. self.handler is itself a weakref so that its own
# existence doesn't prevent the object from being destroyed, thus
# defeating our test.
self.assertEqual(self.handler(), None)
def testMainLoop2(self):
loop = mainloop.MainLoop()
loop.RunOnce()
del loop
loop = mainloop.MainLoop()
loop.RunOnce()
def testIdler(self):
print
print 'testIdler'
loop = mainloop.MainLoop()
loop.RunOnce()
idler[0] = 0
idler[1] = 0
IdleFunc()
IdleFunc()
loop.RunOnce()
self.assertEquals(idler, [1, 0])
loop.RunOnce()
self.assertEquals(idler, [1, 0])
i1 = IdleClass()
i2 = IdleClass()
i1.ClassIdleFunc()
i1.ClassIdleFunc()
i2.ClassIdleFunc()
i2.ClassIdleFunc()
loop.RunOnce()
self.assertEquals(idler, [1, 2])
def testReentrance(self):
print
print 'testReentrance'
loop = mainloop.MainLoop()
loop.RunOnce()
s1, s2 = socket.socketpair()
s2.send('x')
select.select([s1], [], [])
# Now the 'x' has reached s1
def Handler(fd, events):
loop.ioloop.remove_handler(fd)
# NOTE(apenwarr): This simulates a case where epoll (or something)
# somehow returns an event to tornado even after the handler has
# been unregistered for that fd. I don't see how that can possibly
# happen, but apparently it does in the field. I can't find a way
# to reproduce it normally, so we fake it by just adding the current
# event back in.
loop.ioloop._events[fd] = events
loop.ioloop.add_handler(s1.fileno(), Handler, loop.ioloop.READ)
loop.RunOnce()
loop.RunOnce()
self.assertEquals(s1.recv(1), 'x')
def testFdReplacement(self):
print
print 'testFdReplacement'
loop = mainloop.MainLoop()
loop.RunOnce()
s1, s2 = socket.socketpair()
s3, unused_s4 = socket.socketpair()
fd = os.dup(s1.fileno())
print 'fds are: %d %d %d' % (s1.fileno(), s2.fileno(), fd)
count = [0]
def Handler(fd, events):
count[0] += 1
print 'handler: %r %r count=%d' % (fd, events, count[0])
loop.ioloop.add_handler(s1.fileno(), Handler, loop.ioloop.READ)
loop.RunOnce()
self.assertEquals(count[0], 0)
s2.close()
loop.RunOnce()
self.assertEquals(count[0], 1)
loop.RunOnce()
self.assertEquals(count[0], 2)
# so far so good. Now replace s1's fd with a totally different
# (and not active) socket. s1's endpoint still exists as a copy at
# 'fd', but s1's original fd, which is the one we're waiting on,
# is no longer producing events.
# epoll() and select() diverge in behaviour here; epoll weirdly
# keeps returning events related to s1 but which report the original fd
# (now owned by the non-eventful copy of s3). select() will return
# nothing if you select on the original fd, because it sees s3, not s1.
# Phew.
# Unfortunately libcurl sometimes produces this behaviour (generally,
# when it closes its http socket and immediately replaces it), so we
# need to validate that weird things won't happen in that case.
s1fn = s1.fileno()
s1.close()
os.dup2(s3.fileno(), s1fn)
loop.ioloop.remove_handler(s1fn)
loop.ioloop.add_handler(s1fn, Handler, loop.ioloop.READ)
loop.RunOnce()
self.assertEquals(count[0], 2)
if __name__ == '__main__':
unittest.main()
| 29.082474 | 79 | 0.672988 |
04b4c0941ac016d63925e5ab03caf16e2cfe111c | 1,301 | py | Python | test/python/classical_function_compiler/test_simulate.py | WiFisunset/qiskit-terra | e122c9c0cef78d1ba4ac57442cd03fb0363ba93c | [
"Apache-2.0"
] | 1 | 2021-06-09T11:22:21.000Z | 2021-06-09T11:22:21.000Z | test/python/classical_function_compiler/test_simulate.py | WiFisunset/qiskit-terra | e122c9c0cef78d1ba4ac57442cd03fb0363ba93c | [
"Apache-2.0"
] | 1 | 2019-10-03T12:22:41.000Z | 2019-10-03T12:22:41.000Z | test/python/classical_function_compiler/test_simulate.py | nonhermitian/qiskit-terra | 6a2602a9ecf9b1a3345de1516b873ac7b3da587f | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests LogicNetwork.simulate method."""
import unittest
from ddt import ddt, data
from qiskit.circuit.classicalfunction import classical_function as compile_classical_function
from qiskit.circuit.classicalfunction.classicalfunction import HAS_TWEEDLEDUM
from qiskit.test import QiskitTestCase
from .utils import get_truthtable_from_function, example_list
@ddt
class TestSimulate(QiskitTestCase):
"""Tests LogicNetwork.simulate method"""
@data(*example_list())
@unittest.skipUnless(HAS_TWEEDLEDUM, "tweedledum not available")
def test_(self, a_callable):
"""Tests LogicSimulate.simulate() on all the examples"""
network = compile_classical_function(a_callable)
truth_table = network.simulate_all()
self.assertEqual(truth_table, get_truthtable_from_function(a_callable))
| 38.264706 | 93 | 0.775557 |
8331d2146687494c4950e9244383130dbb08a23d | 5,254 | py | Python | network_devices_manager.py | Network-imperium/network-device-manager | ea922e36240369d60be8ac74ffa69d8a7919d289 | [
"MIT"
] | 1 | 2021-03-30T15:25:02.000Z | 2021-03-30T15:25:02.000Z | network_devices_manager.py | Network-imperium/network-device-manager | ea922e36240369d60be8ac74ffa69d8a7919d289 | [
"MIT"
] | null | null | null | network_devices_manager.py | Network-imperium/network-device-manager | ea922e36240369d60be8ac74ffa69d8a7919d289 | [
"MIT"
] | null | null | null | import json
import tkinter as tk
from datetime import datetime
from tkinter.font import Font
from ssh_executor import *
class Device:
def __init__ (self, host, name):
self.host = host
self.name = name
class MainApp:
def __init__ (self, master):
# Define Tk variables
self.host = tk.StringVar(value="IPv4 address")
self.device_name = tk.StringVar(value="Device name")
self.username = tk.StringVar(value="Auth username")
self.password = tk.StringVar(value="Password")
self.command = tk.StringVar(value="Device commands")
default_font = tk.font.nametofont("TkFixedFont")
default_font.configure(size=8)
# Define app variables
self.current_device = None
self.devices = []
self.commands = ["Refresh", "Save run", "Save startup", "Save version", "Run terminal"]
# Define graphic elements
self.master = master
self.master.option_add("*Font", default_font)
self.master.title("Imperium - Tools - Network Devices Manager")
self.entry_host = tk.Entry(master, textvariable=self.host)
self.entry_name = tk.Entry(master, textvariable=self.device_name)
self.entry_username = tk.Entry(master, textvariable=self.username)
self.entry_password = tk.Entry(master, textvariable=self.password)
self.button_add_device = tk.Button(master, text="Add", command=self.add_device)
self.button_connect = tk.Button(master, text="Connect", command=self.login)
self.button_remove_device = tk.Button(master, text="Delete", command=self.remove_device)
self.scroll_devices = tk.Scrollbar(master)
self.list_devices = tk.Listbox(master, selectmode=tk.SINGLE, xscrollcommand=self.scroll_devices.set)
self.menu_commands = tk.OptionMenu(master, self.command, *self.commands)
# Stick elements to the window
self.entry_host.grid(row=0, column=0, sticky=tk.W+tk.E)
self.entry_name.grid(row=0, column=1, sticky=tk.W+tk.E)
self.entry_username.grid(row=7, column=0, sticky=tk.W+tk.E)
self.entry_password.grid(row=7, column=1, sticky=tk.W+tk.E)
self.button_add_device.grid(row=0, column=2, sticky=tk.W+tk.E)
self.button_connect.grid(row=7, column=2, sticky=tk.W+tk.E)
self.button_remove_device.grid(row=7, column=3, columnspan=2, ipadx="40px", sticky=tk.W+tk.E)
self.scroll_devices.grid(row=1, column=4, rowspan=6, sticky=tk.N+tk.S)
self.list_devices.grid(row=1, columnspan=4, rowspan=6, sticky=tk.W+tk.E)
self.menu_commands.grid(row=0, column=3, columnspan=2, sticky=tk.W+tk.E)
# Init state of GUI elements
self.list_devices.insert(0, 'IP'.center(16) + 'NAME'.center(24) + 'STATUS'.center(14))
self.command.trace("w", self.execute_command)
self.load_config_json()
def load_config_json (self):
try:
with open("ndm_config.json", "r+") as file:
for device in json.loads(file.read()):
host = device['host']
name = device['name']
self.list_devices.insert(tk.END, host.ljust(16) + name.ljust(24))
device = Device(host, name)
self.devices.append(device)
except IOError:
print("File not accessible")
def update_config_json (self):
with open("ndm_config.json", "w") as file:
file.write(json.dumps([device.__dict__ for device in self.devices]))
file.close()
def add_device (self):
host = str(self.host.get())
name = str(self.device_name.get())
self.list_devices.insert(tk.END, host.ljust(16) + name.ljust(24))
device = Device(host, name)
self.devices.append(device)
self.update_config_json()
def execute_command (self, *args):
command = self.command.get()
if command == self.commands[0]:
self.refresh_status()
if command == self.commands[1]:
self.execute_save_run()
self.command.set("Device commands")
def login (self):
selection = self.list_devices.curselection()
if selection and selection[0] != 0:
self.current_device = self.devices[selection[0] - 1]
username = self.username.get()
password = self.password.get()
self.client = Client(self.current_device.host, username, password)
self.client._connect()
return self.client
def remove_device (self):
selection = self.list_devices.curselection()
if selection and selection[0] != 0:
self.list_devices.delete(selection[0])
self.devices.pop(selection[0] - 1)
self.update_config_json()
def refresh_status (self):
pass
def execute_save_run (self):
if self.client:
config = [line.replace('\n', '') for line in self.client.execute("show run")]
filename = self.current_device.name + '_' + str(datetime.now().date()).replace('-','')
with open(filename, "w") as file:
file.write(''.join(config))
file.close()
| 38.350365 | 108 | 0.619147 |
2c22b036f728e18b35a20d09205df104680211e8 | 2,849 | py | Python | pipeline/async_predict.py | CornellDataScience/NN_Workshop_SP2020 | da526e6ffdb51e1ea5b1e75e2581686eb46e7ede | [
"MIT"
] | null | null | null | pipeline/async_predict.py | CornellDataScience/NN_Workshop_SP2020 | da526e6ffdb51e1ea5b1e75e2581686eb46e7ede | [
"MIT"
] | null | null | null | pipeline/async_predict.py | CornellDataScience/NN_Workshop_SP2020 | da526e6ffdb51e1ea5b1e75e2581686eb46e7ede | [
"MIT"
] | null | null | null | from collections import deque
from pipeline.pipeline import Pipeline
from pipeline.libs.async_predictor import AsyncPredictor
class AsyncPredict(Pipeline):
"""Pipeline task to perform prediction asynchronously (in separate processes)."""
def __init__(self, model_path, load_model_fn, num_cpus=1, queue_size=3, ordered=True):
self.predictor = AsyncPredictor(model_path,
load_model_fn,
num_cpus=num_cpus,
queue_size=queue_size,
ordered=ordered)
self.ordered = ordered
self.buffer_size = self.predictor.num_procs * queue_size
super().__init__()
def generator(self):
if self.ordered:
return self.serial_generator()
else:
return self.parallel_generator()
def serial_generator(self):
buffer = deque()
stop = False
buffer_cnt = 0
while self.has_next() and not stop:
try:
data = next(self.source)
buffer.append(data)
self.predictor.put(data["image"])
buffer_cnt += 1
except StopIteration:
stop = True
if buffer_cnt >= self.buffer_size:
predictions = self.predictor.get()
data = buffer.popleft()
data["predictions"] = predictions
if self.filter(data):
yield self.map(data)
while len(buffer):
predictions = self.predictor.get()
data = buffer.popleft()
data["predictions"] = predictions
if self.filter(data):
yield self.map(data)
def parallel_generator(self):
buffer = {}
stop = False
buffer_cnt = 0
while self.has_next() and not stop:
try:
data = next(self.source)
buffer[data["image_id"]] = data
self.predictor.put((data["image_id"], data["image"]))
buffer_cnt += 1
except StopIteration:
stop = True
if buffer_cnt >= self.buffer_size:
image_id, predictions = self.predictor.get()
data = buffer[image_id]
data["predictions"] = predictions
del buffer[image_id]
if self.filter(data):
yield self.map(data)
while len(buffer.keys()):
image_id, predictions = self.predictor.get()
data = buffer[image_id]
data["predictions"] = predictions
del buffer[image_id]
if self.filter(data):
yield self.map(data)
def cleanup(self):
self.predictor.shutdown()
| 31.655556 | 90 | 0.523693 |
912bfa176bb63ed87da15d398becacc04e54c266 | 816 | py | Python | molpy/cli.py | OliverLemke/molpy | 1be180bf7ff44cfb2c90a6e3b8d4429955de6a53 | [
"MIT"
] | null | null | null | molpy/cli.py | OliverLemke/molpy | 1be180bf7ff44cfb2c90a6e3b8d4429955de6a53 | [
"MIT"
] | 6 | 2020-02-12T14:23:16.000Z | 2020-02-13T15:15:50.000Z | molpy/cli.py | OliverLemke/molpy | 1be180bf7ff44cfb2c90a6e3b8d4429955de6a53 | [
"MIT"
] | null | null | null | import argparse
from .util import read_xyz, distance
parser = argparse.ArgumentParser(description="Reads XYZ files and calculates the distance between two atoms at index1 and index2")
parser.add_argument('filename', type=str, help="The XYZ file to read")
parser.add_argument('index1', type=int, help="Index of the first atom")
parser.add_argument('index2', type=int, help="Index of the second atom")
def main():
args = parser.parse_args()
mol = read_xyz(args.filename)
print(f"Reading XYZ-file:{args.filename}")
s1 = mol["labels"][args.index1]
s2 = mol["labels"][args.index2]
print(f"Calculating distance between {s1} at index {args.index1} and {s2} at index {args.index2}")
dist = distance(mol["geometry"][args.index1,:],mol["geometry"][args.index2,:])
print(f"The distance is {dist:.3f} Angstrom")
| 35.478261 | 130 | 0.732843 |
06ef1ce64361c9ae9087eebabb52aee58056aeaf | 851 | py | Python | el-wrapper.py | bonifield/extractlinks | 2b7bdca0f42a8da70e95a7861aeff08e73d86b62 | [
"MIT"
] | null | null | null | el-wrapper.py | bonifield/extractlinks | 2b7bdca0f42a8da70e95a7861aeff08e73d86b62 | [
"MIT"
] | null | null | null | el-wrapper.py | bonifield/extractlinks | 2b7bdca0f42a8da70e95a7861aeff08e73d86b62 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# el-wrapper.py | jq '.'
import requests
from extractlinks import ExtractLinks as EL
# suppress warning
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
#URL = "http://google.com/"
URL = "http://cnn.com/"
heady={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:86.0) Gecko/20100101 Firefox/86.0',
'Accept': 'text/html'
}
proxies = {
'http': 'http://127.0.0.1:8888',
'https': 'https://127.0.0.1:8888'
}
r = requests.get(URL, headers=heady, allow_redirects=True, verify=False)
e = EL(content=r)
print(e.json)
#print(e.links_all)
#for i in e.links_all:
# print(i)
#for i in e.types_all:
# print(i)
#for i in e.tags_all:
# print(i)
#for i in e.attributes_all:
# print(i)
#for d in e.urlbreakdown_generator_dict():
# print(d)
#for j in e.urlbreakdown_generator_json():
# print(j) | 20.756098 | 96 | 0.695652 |
e106f6e245e680f88d801db283f49966d8447e6c | 615 | py | Python | test/test_05_delete_group.py | oostapenko84/python_training | 0308c2beae5a375a32c9972412f2920f1cc7d69d | [
"Apache-2.0"
] | null | null | null | test/test_05_delete_group.py | oostapenko84/python_training | 0308c2beae5a375a32c9972412f2920f1cc7d69d | [
"Apache-2.0"
] | null | null | null | test/test_05_delete_group.py | oostapenko84/python_training | 0308c2beae5a375a32c9972412f2920f1cc7d69d | [
"Apache-2.0"
] | null | null | null | __author__ = 'olga.ostapenko'
from model.group import Group
def test_delete_first_group(app):
app.session.login(username="admin", password="secret")
if app.group.count() == 0:
app.group.create(Group(name="test", header="test", footer="test"))
old_groups = app.group.get_group_list()
app.group.delete_first_group()
new_groups = app.group.get_group_list()
assert len(old_groups) - 1 == len(new_groups)
old_groups[0:1] = []
assert old_groups == new_groups
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
app.session.logout()
| 34.166667 | 93 | 0.700813 |
9bbfe38a79f7fcebf6964f8cf12f5b46ebfc31c4 | 1,385 | py | Python | tests/conftest.py | bhanuvrat/rainy-day | 34a6b82a08f32a7e230d3434f658d6c271007993 | [
"MIT"
] | null | null | null | tests/conftest.py | bhanuvrat/rainy-day | 34a6b82a08f32a7e230d3434f658d6c271007993 | [
"MIT"
] | null | null | null | tests/conftest.py | bhanuvrat/rainy-day | 34a6b82a08f32a7e230d3434f658d6c271007993 | [
"MIT"
] | null | null | null | # Standard Library
import functools
# Third Party Stuff
import pytest
from unittest import mock
class PartialMethodCaller:
def __init__(self, obj, **partial_params):
self.obj = obj
self.partial_params = partial_params
def __getattr__(self, name):
return functools.partial(getattr(self.obj, name), **self.partial_params)
@pytest.fixture(autouse=True, scope='function')
def cleared_cache():
"""Fixture that exposes django cache, which is empty to start with.
This fixture also makes sures that cache is cleared before running each and every test case.
"""
from django.core.cache import cache
cache.clear()
return cache
@pytest.fixture
def client():
from rest_framework.test import APIClient
class _Client(APIClient):
def login(self, user=None, backend="django.contrib.auth.backends.ModelBackend", **credentials):
if user is None:
return super(_Client, self).login(**credentials)
with mock.patch('django.contrib.auth.authenticate') as authenticate:
user.backend = backend
authenticate.return_value = user
return super(_Client, self).login(**credentials)
@property
def json(self):
return PartialMethodCaller(obj=self, content_type='application/json;charset="utf-8"')
return _Client()
| 28.854167 | 103 | 0.675812 |
cc816ed6238b5be8a1f3698b9ef4e39c81f45150 | 5,524 | py | Python | Nonparametric_Regression/moving_mean.py | AlparslanErol/Course_Related | 9a59eb2857c525769b046b7b2a7706ec4a1cdba8 | [
"MIT"
] | null | null | null | Nonparametric_Regression/moving_mean.py | AlparslanErol/Course_Related | 9a59eb2857c525769b046b7b2a7706ec4a1cdba8 | [
"MIT"
] | null | null | null | Nonparametric_Regression/moving_mean.py | AlparslanErol/Course_Related | 9a59eb2857c525769b046b7b2a7706ec4a1cdba8 | [
"MIT"
] | null | null | null | # IMPORT LIBRARIES
# =============================================================================
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
# =============================================================================
# VERSION CHECK
# =============================================================================
pd.set_option('display.float_format', lambda x: '%.3f' % x)
sns.set(style='white', context='notebook', palette='deep')
warnings.filterwarnings('ignore')
sns.set_style('white')
# =============================================================================
# SET UP DATA
df = pd.read_csv("./hw04_data_set.csv")
# SPLIT DATA TRAIN AND TEST
# =============================================================================
num_train = 150
num_test = 122
train = df[0:num_train]
test = df[num_train:]
# =============================================================================
# SORTING BY ERUPTIONS
# =============================================================================
train = train.sort_values(by = ["eruptions"])
train = train.reset_index(drop=True)
test = test.sort_values(by = ["eruptions"])
test = test.reset_index(drop=True)
train_np_erup = np.array(train["eruptions"])
train_np_wait = np.array(train["waiting"])
test_np_erup = np.array(test["eruptions"])
test_np_wait = np.array(test["waiting"])
# =============================================================================
# CONFIGS
# =============================================================================
widht_param = 0.37
origin_param = 1.5
num_bins = 10
end_point = (origin_param) + (widht_param*num_bins)
bins = np.linspace(origin_param, end_point, num_bins+1)
# =============================================================================
# FUNCTIONS
# =============================================================================
#RETURN NUMBER OF INPUTS IN EACH BIN
def bin_(data):
temp = 0
binss = np.zeros(num_bins)
for index, value in enumerate(data["eruptions"]):
if not((value > (origin_param + (temp * widht_param))) & (value <= (origin_param + ((temp + 1) * widht_param)))):
temp = temp + 1
binss[temp] = binss[temp] + 1
else:
binss[temp] = binss[temp] + 1
return binss
#RETURN MIDDLE VALUE OF EACH BIN
def binn_mid(data):
output = np.ceil(bin_(data)/2)
return output
# DIGITIZE DATA WTIH BIN NUMBERS
def split_bin(data):
output = np.zeros(len(data))
number = 1
temp = 0
for index, value in enumerate(data["waiting"]):
output[index] = number
temp = temp + 1
if temp == bin_(data)[number - 1]:
number = number + 1
temp = 0
if number == (num_bins + 1):
break
return output
# RETURN MEAN OF EACH BIN
def bin_means(data):
output = [data.waiting[split_bin(data) == i].mean() for i in range(1, len(bins))]
return output
# ALGORITHM FOR REGRESSOGRAM
def regressogram(data):
output = np.zeros(len(data))
number = 1
temp = 0
for index, value in enumerate(data["waiting"]):
output[index] = bin_means(data)[number-1]
temp = temp + 1
if temp == bin_(data)[number - 1]:
number = number + 1
temp = 0
if number == (num_bins + 1):
break
return output
# FUNCTION FOR EVALUATION
def evaluate(data):
# ROOT MEAN SQUARE ERROR RMSE CALCULATION
differences = moving_avg(data) - data["waiting"]
differences_squared = differences ** 2
mean_of_differences_squared = differences_squared.mean()
rmse_val = np.sqrt(mean_of_differences_squared)
rmse_val = float("{0:.4f}".format(rmse_val))
# OUTPUT
print("Running Mean Smoother => RMSE is ", rmse_val, " when h is ", widht_param)
# IMPLEMENTATION OF RUNNING MEAN ALGORITHM
def moving_avg(data):
output = np.zeros(len(data))
number = 1
temp = 0
tmp = 0
dummy = 0
for index, value in enumerate(data.waiting):
output[index] = (data.waiting[index] + data.waiting[dummy + binn_mid(data)[tmp]])/2
temp = temp + 1
if temp == bin_(data)[number - 1]:
number = number + 1
temp = 0
dummy = np.sum(bin_(data)[:tmp+1])
tmp = tmp + 1
if number == (num_bins + 1):
break
return output
# =============================================================================
if __name__ == '__main__':
# PLOTTING
# =============================================================================
a = plt.scatter(train["eruptions"], train["waiting"], edgecolors='b')
b = plt.scatter(test["eruptions"], test["waiting"], color='r')
plt.plot(train["eruptions"],moving_avg(train), linewidth = 3, color = 'k')
plt.legend((a,b),
('train', 'test'),
scatterpoints=1,
loc='upper left',
ncol=3,
fontsize=10)
plt.xlabel('Eruption time (min)')
plt.ylabel('Waiting time to next eruption (min)')
plt.title('h = 0.37')
plt.show()
# =============================================================================
# EVALUATION WITH TEST DATA
# =============================================================================
evaluate(test)
# ============================================================================= | 32.116279 | 121 | 0.463794 |
70c3be45ba353fb3ba350fc459dd20000eebc7e5 | 590 | py | Python | ddcz/forms/comments.py | jimmeak/graveyard | 4c0f9d5e8b6c965171d9dc228c765b662f5b7ab4 | [
"MIT"
] | 6 | 2018-06-10T09:47:50.000Z | 2022-02-13T12:22:07.000Z | ddcz/forms/comments.py | dracidoupe/graveyard | 706e17cc60879623115bc9b8e98c0b74a607c9b1 | [
"MIT"
] | 268 | 2018-05-30T21:54:50.000Z | 2022-01-08T21:00:03.000Z | ddcz/forms/comments.py | jimmeak/graveyard | 4c0f9d5e8b6c965171d9dc228c765b662f5b7ab4 | [
"MIT"
] | 4 | 2018-09-14T03:50:08.000Z | 2021-04-19T19:36:23.000Z | from enum import Enum
from django import forms
class CommentAction(Enum):
ADD = "a"
DELETE = "d"
class PhorumCommentForm(forms.Form):
post_type = forms.CharField(
widget=forms.HiddenInput(attrs={"value": CommentAction.ADD.value})
)
text = forms.CharField(
label="", widget=forms.Textarea(attrs={"class": "comment__textarea"})
)
class DeletePhorumCommentForm(forms.Form):
post_type = forms.CharField(
widget=forms.HiddenInput(attrs={"value": CommentAction.DELETE.value})
)
post_id = forms.CharField(widget=forms.HiddenInput)
| 24.583333 | 77 | 0.689831 |
5bfa3fc4d1dd47c4a6411f9d873a6e247ec52f77 | 15,576 | py | Python | nova/api/openstack/compute/contrib/cells.py | yuyuyu101/nova | 0f3776d036be5c2174eb848af1e312f7df35de26 | [
"Apache-2.0"
] | null | null | null | nova/api/openstack/compute/contrib/cells.py | yuyuyu101/nova | 0f3776d036be5c2174eb848af1e312f7df35de26 | [
"Apache-2.0"
] | null | null | null | nova/api/openstack/compute/contrib/cells.py | yuyuyu101/nova | 0f3776d036be5c2174eb848af1e312f7df35de26 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The cells extension."""
from oslo.config import cfg
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.cells import rpc_driver
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
authorize = extensions.extension_authorizer('compute', 'cells')
def make_cell(elem):
elem.set('name')
elem.set('username')
elem.set('type')
elem.set('rpc_host')
elem.set('rpc_port')
caps = xmlutil.SubTemplateElement(elem, 'capabilities',
selector='capabilities')
cap = xmlutil.SubTemplateElement(caps, xmlutil.Selector(0),
selector=xmlutil.get_items)
cap.text = 1
make_capacity(elem)
def make_capacity(cell):
def get_units_by_mb(capacity_info):
return capacity_info['units_by_mb'].items()
capacity = xmlutil.SubTemplateElement(cell, 'capacities',
selector='capacities')
ram_free = xmlutil.SubTemplateElement(capacity, 'ram_free',
selector='ram_free')
ram_free.set('total_mb', 'total_mb')
unit_by_mb = xmlutil.SubTemplateElement(ram_free, 'unit_by_mb',
selector=get_units_by_mb)
unit_by_mb.set('mb', 0)
unit_by_mb.set('unit', 1)
disk_free = xmlutil.SubTemplateElement(capacity, 'disk_free',
selector='disk_free')
disk_free.set('total_mb', 'total_mb')
unit_by_mb = xmlutil.SubTemplateElement(disk_free, 'unit_by_mb',
selector=get_units_by_mb)
unit_by_mb.set('mb', 0)
unit_by_mb.set('unit', 1)
cell_nsmap = {None: wsgi.XMLNS_V10}
class CellTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cell', selector='cell')
make_cell(root)
return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
class CellsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cells')
elem = xmlutil.SubTemplateElement(root, 'cell', selector='cells')
make_cell(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
class CellDeserializer(wsgi.XMLDeserializer):
"""Deserializer to handle xml-formatted cell create requests."""
def _extract_capabilities(self, cap_node):
caps = {}
for cap in cap_node.childNodes:
cap_name = cap.tagName
caps[cap_name] = self.extract_text(cap)
return caps
def _extract_cell(self, node):
cell = {}
cell_node = self.find_first_child_named(node, 'cell')
extract_fns = {
'capabilities': self._extract_capabilities,
'rpc_port': lambda child: int(self.extract_text(child)),
}
for child in cell_node.childNodes:
name = child.tagName
extract_fn = extract_fns.get(name, self.extract_text)
cell[name] = extract_fn(child)
return cell
def default(self, string):
"""Deserialize an xml-formatted cell create request."""
node = xmlutil.safe_minidom_parse_string(string)
return {'body': {'cell': self._extract_cell(node)}}
def _filter_keys(item, keys):
"""
Filters all model attributes except for keys
item is a dict
"""
return dict((k, v) for k, v in item.iteritems() if k in keys)
def _fixup_cell_info(cell_info, keys):
"""
If the transport_url is present in the cell, derive username,
rpc_host, and rpc_port from it.
"""
if 'transport_url' not in cell_info:
return
# Disassemble the transport URL
transport_url = cell_info.pop('transport_url')
try:
transport = rpc_driver.parse_transport_url(transport_url)
except ValueError:
# Just go with None's
for key in keys:
cell_info.setdefault(key, None)
return cell_info
transport_field_map = {'rpc_host': 'hostname', 'rpc_port': 'port'}
for key in keys:
if key in cell_info:
continue
transport_field = transport_field_map.get(key, key)
cell_info[key] = transport[transport_field]
def _scrub_cell(cell, detail=False):
keys = ['name', 'username', 'rpc_host', 'rpc_port']
if detail:
keys.append('capabilities')
cell_info = _filter_keys(cell, keys + ['transport_url'])
_fixup_cell_info(cell_info, keys)
cell_info['type'] = 'parent' if cell['is_parent'] else 'child'
return cell_info
class Controller(object):
"""Controller for Cell resources."""
def __init__(self, ext_mgr):
self.compute_api = compute.API()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.ext_mgr = ext_mgr
def _get_cells(self, ctxt, req, detail=False):
"""Return all cells."""
# Ask the CellsManager for the most recent data
items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt)
items = common.limited(items, req)
items = [_scrub_cell(item, detail=detail) for item in items]
return dict(cells=items)
@wsgi.serializers(xml=CellsTemplate)
def index(self, req):
"""Return all cells in brief."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req)
@wsgi.serializers(xml=CellsTemplate)
def detail(self, req):
"""Return all cells in detail."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req, detail=True)
@wsgi.serializers(xml=CellTemplate)
def info(self, req):
"""Return name and capabilities for this cell."""
context = req.environ['nova.context']
authorize(context)
cell_capabs = {}
my_caps = CONF.cells.capabilities
for cap in my_caps:
key, value = cap.split('=')
cell_capabs[key] = value
cell = {'name': CONF.cells.name,
'type': 'self',
'rpc_host': None,
'rpc_port': 0,
'username': None,
'capabilities': cell_capabs}
return dict(cell=cell)
@wsgi.serializers(xml=CellTemplate)
def capacities(self, req, id=None):
"""Return capacities for a given cell or all cells."""
# TODO(kaushikc): return capacities as a part of cell info and
# cells detail calls in v3, along with capabilities
if not self.ext_mgr.is_loaded('os-cell-capacities'):
raise exc.HTTPNotFound()
context = req.environ['nova.context']
authorize(context)
try:
capacities = self.cells_rpcapi.get_capacities(context,
cell_name=id)
except exception.CellNotFound:
msg = (_("Cell %(id)s not found.") % {'id': id})
raise exc.HTTPNotFound(explanation=msg)
return dict(cell={"capacities": capacities})
@wsgi.serializers(xml=CellTemplate)
def show(self, req, id):
"""Return data about the given cell name. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
cell = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound:
raise exc.HTTPNotFound()
return dict(cell=_scrub_cell(cell))
def delete(self, req, id):
"""Delete a child or parent cell entry. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
num_deleted = self.cells_rpcapi.cell_delete(context, id)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
if num_deleted == 0:
raise exc.HTTPNotFound()
return {}
def _validate_cell_name(self, cell_name):
"""Validate cell name is not empty and doesn't contain '!' or '.'."""
if not cell_name:
msg = _("Cell name cannot be empty")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
if '!' in cell_name or '.' in cell_name:
msg = _("Cell name cannot contain '!' or '.'")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _validate_cell_type(self, cell_type):
"""Validate cell_type is 'parent' or 'child'."""
if cell_type not in ['parent', 'child']:
msg = _("Cell type must be 'parent' or 'child'")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _normalize_cell(self, cell, existing=None):
"""
Normalize input cell data. Normalizations include:
* Converting cell['type'] to is_parent boolean.
* Merging existing transport URL with transport information.
"""
# Start with the cell type conversion
if 'type' in cell:
self._validate_cell_type(cell['type'])
cell['is_parent'] = cell['type'] == 'parent'
del cell['type']
# Avoid cell type being overwritten to 'child'
elif existing:
cell['is_parent'] = existing['is_parent']
else:
cell['is_parent'] = False
# Now we disassemble the existing transport URL...
transport = {}
if existing and 'transport_url' in existing:
transport = rpc_driver.parse_transport_url(
existing['transport_url'])
# Copy over the input fields
transport_field_map = {
'username': 'username',
'password': 'password',
'hostname': 'rpc_host',
'port': 'rpc_port',
'virtual_host': 'rpc_virtual_host',
}
for key, input_field in transport_field_map.items():
# Set the default value of the field; using setdefault()
# lets us avoid overriding the existing transport URL
transport.setdefault(key, None)
# Only override the value if we're given an override
if input_field in cell:
transport[key] = cell.pop(input_field)
# Now set the transport URL
cell['transport_url'] = rpc_driver.unparse_transport_url(transport)
@wsgi.serializers(xml=CellTemplate)
@wsgi.deserializers(xml=CellDeserializer)
def create(self, req, body):
"""Create a child cell entry."""
context = req.environ['nova.context']
authorize(context)
if 'cell' not in body:
msg = _("No cell information in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
if 'name' not in cell:
msg = _("No cell name in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
self._validate_cell_name(cell['name'])
self._normalize_cell(cell)
try:
cell = self.cells_rpcapi.cell_create(context, cell)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@wsgi.serializers(xml=CellTemplate)
@wsgi.deserializers(xml=CellDeserializer)
def update(self, req, id, body):
"""Update a child cell entry. 'id' is the cell name to update."""
context = req.environ['nova.context']
authorize(context)
if 'cell' not in body:
msg = _("No cell information in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
cell.pop('id', None)
if 'name' in cell:
self._validate_cell_name(cell['name'])
try:
# NOTE(Vek): There is a race condition here if multiple
# callers are trying to update the cell
# information simultaneously. Since this
# operation is administrative in nature, and
# will be going away in the future, I don't see
# it as much of a problem...
existing = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound:
raise exc.HTTPNotFound()
self._normalize_cell(cell, existing)
try:
cell = self.cells_rpcapi.cell_update(context, id, cell)
except exception.CellNotFound:
raise exc.HTTPNotFound()
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
def sync_instances(self, req, body):
"""Tell all cells to sync instance info."""
context = req.environ['nova.context']
authorize(context)
project_id = body.pop('project_id', None)
deleted = body.pop('deleted', False)
updated_since = body.pop('updated_since', None)
if body:
msg = _("Only 'updated_since', 'project_id' and 'deleted' are "
"understood.")
raise exc.HTTPBadRequest(explanation=msg)
if updated_since:
try:
timeutils.parse_isotime(updated_since)
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
self.cells_rpcapi.sync_instances(context, project_id=project_id,
updated_since=updated_since, deleted=deleted)
class Cells(extensions.ExtensionDescriptor):
"""Enables cells-related functionality such as adding neighbor cells,
listing neighbor cells, and getting the capabilities of the local cell.
"""
name = "Cells"
alias = "os-cells"
namespace = "http://docs.openstack.org/compute/ext/cells/api/v1.1"
updated = "2013-05-14T00:00:00+00:00"
def get_resources(self):
coll_actions = {
'detail': 'GET',
'info': 'GET',
'sync_instances': 'POST',
'capacities': 'GET',
}
memb_actions = {
'capacities': 'GET',
}
res = extensions.ResourceExtension('os-cells',
Controller(self.ext_mgr), collection_actions=coll_actions,
member_actions=memb_actions)
return [res]
| 35.724771 | 78 | 0.615049 |
d9be4807b6e25c2f847cce96447d4e062bdbbc4a | 2,096 | py | Python | app.py | FurySwordXD/VNC | f515b95cbb64f21d730cd08fcb4dbad8aa51dc67 | [
"MIT"
] | 19 | 2019-04-09T05:38:57.000Z | 2022-03-13T14:08:29.000Z | app.py | csoni70/VNC | f515b95cbb64f21d730cd08fcb4dbad8aa51dc67 | [
"MIT"
] | 5 | 2021-03-18T22:50:19.000Z | 2022-03-11T23:42:16.000Z | app.py | csoni70/VNC | f515b95cbb64f21d730cd08fcb4dbad8aa51dc67 | [
"MIT"
] | 5 | 2019-08-02T09:48:01.000Z | 2022-03-20T19:19:30.000Z | import eel
from vnc import VNC
from threading import Thread
import atexit
import sys
from input_manager import InputManager
from vnc import VNC
status = 'None'
connection = 'None'
vnc = VNC()
input_manager = InputManager()
eel.init('web')
@eel.expose
def host():
global status
global vnc
global transmit_thread
global input_manager
print('Hosting...')
status = 'host'
transmit_thread = Thread(target=vnc.transmit)
transmit_thread.daemon = True
transmit_thread.start()
input_thread = Thread(target=input_manager.receive_input, args=[])
input_thread.daemon = True
input_thread.start()
@eel.expose
def stop_host():
global status
status = 'None'
print("Stopping server...")
@eel.expose
def connect(ip):
global status
global vnc
global connection
print('Connecting...')
status = 'client'
vnc.ip = ip
input_manager.ip = ip
try:
vnc.start_receive()
input_manager.connect_input()
connection = 'active'
except Exception as e:
print('Connection failed...')
@eel.expose
def transmit_input(data, event_type):
if status == 'client':
if event_type == 'keydown':
input_manager.transmit_input(keydown=data)
pass
elif event_type == 'keyup':
input_manager.transmit_input(keyup=data)
pass
elif event_type == 'mousemove':
#print(data)
input_manager.transmit_input(mouse_pos=data)
pass
elif event_type == 'mousedown':
#print(data)
input_manager.transmit_input(mouse_pos=data['pos'], mouse_down=data['button'])
elif event_type == 'mouseup':
#print(data)
input_manager.transmit_input(mouse_pos=data['pos'], mouse_up=data['button'])
eel.start('index.html', block=False, port=8080)
while True:
if status == 'host':
eel.updateScreen(vnc.image_serializer().decode())
elif status == 'client':
if connection == 'active':
eel.updateScreen(vnc.receive())
eel.sleep(.01) | 24.091954 | 90 | 0.63645 |
73a4916ad6fc1c09f7fe263a72305aa6985bbf62 | 529 | py | Python | Multidimensional-Arrays/snake_moves.py | dechevh/Python-Advanced | 9daf33771b9096db77bcbf05ae2a4591b876c723 | [
"MIT"
] | 2 | 2020-09-15T19:12:26.000Z | 2020-09-15T19:12:30.000Z | Multidimensional-Arrays/snake_moves.py | dechevh/Python-Advanced | 9daf33771b9096db77bcbf05ae2a4591b876c723 | [
"MIT"
] | 1 | 2021-07-06T09:20:49.000Z | 2021-07-06T09:20:49.000Z | Multidimensional-Arrays/snake_moves.py | dechevh/Python-Advanced | 9daf33771b9096db77bcbf05ae2a4591b876c723 | [
"MIT"
] | null | null | null | from _collections import deque
rows, cols = [int(x) for x in input().split()]
text = deque(input())
matrix = []
for row in range(rows):
matrix.append([])
for col in range(cols):
matrix[row].append("")
for row in range(rows):
for col in range(cols):
current_col = col
current_char = text.popleft()
if row % 2 != 0:
current_col = cols - 1 - col
matrix[row][current_col] = current_char
text.append(current_char)
for row in matrix:
print(''.join(row))
| 22.041667 | 47 | 0.591682 |
30a11838a820849a36fa3adc851b522523727c8d | 26,321 | py | Python | Lib/site-packages/qwt/scale_widget.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | Lib/site-packages/qwt/scale_widget.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/qwt/scale_widget.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed under the terms of the Qwt License
# Copyright (c) 2002 Uwe Rathmann, for the original C++ code
# Copyright (c) 2015 Pierre Raybaut, for the Python translation/optimization
# (see LICENSE file for more details)
"""
QwtScaleWidget
--------------
.. autoclass:: QwtScaleWidget
:members:
"""
from qwt.scale_draw import QwtScaleDraw
from qwt.scale_engine import QwtLinearScaleEngine
from qwt.color_map import QwtLinearColorMap
from qwt.text import QwtText
from qwt.painter import QwtPainter
from qwt.interval import QwtInterval
from qwt.color_map import QwtColorMap
from qtpy.QtGui import QPainter, QPalette
from qtpy.QtWidgets import QWidget, QSizePolicy, QStyleOption, QStyle, QApplication
from qtpy.QtCore import Qt, QRectF, QSize, Signal, QEvent
import numpy as np
class ColorBar(object):
def __init__(self):
self.isEnabled = None
self.width = None
self.interval = QwtInterval()
self.colorMap = QwtColorMap()
class QwtScaleWidget_PrivateData(object):
def __init__(self):
self.scaleDraw = None
self.borderDist = [None] * 2
self.minBorderDist = [None] * 2
self.scaleLength = None
self.margin = None
self.titleOffset = None
self.spacing = None
self.title = QwtText()
self.layoutFlags = None
self.colorBar = ColorBar()
class QwtScaleWidget(QWidget):
"""
A Widget which contains a scale
This Widget can be used to decorate composite widgets with
a scale.
Layout flags:
* `QwtScaleWidget.TitleInverted`: The title of vertical scales is painted from top to bottom. Otherwise it is painted from bottom to top.
.. py:class:: QwtScaleWidget([parent=None])
Alignment default is `QwtScaleDraw.LeftScale`.
:param parent: Parent widget
:type parent: QWidget or None
.. py:class:: QwtScaleWidget(align, parent)
:noindex:
:param int align: Alignment
:param QWidget parent: Parent widget
"""
scaleDivChanged = Signal()
# enum LayoutFlag
TitleInverted = 1
def __init__(self, *args):
self.__data = None
align = QwtScaleDraw.LeftScale
if len(args) == 0:
parent = None
elif len(args) == 1:
(parent,) = args
elif len(args) == 2:
align, parent = args
else:
raise TypeError(
"%s() takes 0, 1 or 2 argument(s) (%s given)"
% (self.__class__.__name__, len(args))
)
super(QwtScaleWidget, self).__init__(parent)
self.initScale(align)
def initScale(self, align):
"""
Initialize the scale
:param int align: Alignment
"""
self.__data = QwtScaleWidget_PrivateData()
self.__data.layoutFlags = 0
if align == QwtScaleDraw.RightScale:
self.__data.layoutFlags |= self.TitleInverted
self.__data.borderDist = [0, 0]
self.__data.minBorderDist = [0, 0]
self.__data.margin = 4
self.__data.titleOffset = 0
self.__data.spacing = 2
self.__data.scaleDraw = QwtScaleDraw()
self.__data.scaleDraw.setAlignment(align)
self.__data.scaleDraw.setLength(10)
self.__data.scaleDraw.setScaleDiv(
QwtLinearScaleEngine().divideScale(0.0, 100.0, 10, 5)
)
self.__data.colorBar.colorMap = QwtLinearColorMap()
self.__data.colorBar.isEnabled = False
self.__data.colorBar.width = 10
flags = Qt.AlignmentFlag(Qt.AlignHCenter | Qt.TextExpandTabs | Qt.TextWordWrap)
self.__data.title.setRenderFlags(flags)
self.__data.title.setFont(self.font())
policy = QSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed)
if self.__data.scaleDraw.orientation() == Qt.Vertical:
policy.transpose()
self.setSizePolicy(policy)
self.setAttribute(Qt.WA_WState_OwnSizePolicy, False)
def setLayoutFlag(self, flag, on=True):
"""
Toggle an layout flag
:param int flag: Layout flag
:param bool on: True/False
.. seealso::
:py:meth:`testLayoutFlag()`
"""
if (self.__data.layoutFlags & flag != 0) != on:
if on:
self.__data.layoutFlags |= flag
else:
self.__data.layoutFlags &= ~flag
self.update()
def testLayoutFlag(self, flag):
"""
Test a layout flag
:param int flag: Layout flag
:return: True/False
.. seealso::
:py:meth:`setLayoutFlag()`
"""
return self.__data.layoutFlags & flag
def setTitle(self, title):
"""
Give title new text contents
:param title: New title
:type title: qwt.text.QwtText or str
.. seealso::
:py:meth:`title()`
"""
if isinstance(title, QwtText):
flags = title.renderFlags() & (~int(Qt.AlignTop | Qt.AlignBottom))
title.setRenderFlags(flags)
if title != self.__data.title:
self.__data.title = title
self.layoutScale()
else:
if self.__data.title.text() != title:
self.__data.title.setText(title)
self.layoutScale()
def setAlignment(self, alignment):
"""
Change the alignment
:param int alignment: New alignment
Valid alignment values: see :py:class:`qwt.scale_draw.QwtScaleDraw`
.. seealso::
:py:meth:`alignment()`
"""
if self.__data.scaleDraw:
self.__data.scaleDraw.setAlignment(alignment)
if not self.testAttribute(Qt.WA_WState_OwnSizePolicy):
policy = QSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed)
if self.__data.scaleDraw.orientation() == Qt.Vertical:
policy.transpose()
self.setSizePolicy(policy)
self.setAttribute(Qt.WA_WState_OwnSizePolicy, False)
self.layoutScale()
def alignment(self):
"""
:return: position
.. seealso::
:py:meth:`setAlignment()`
"""
if not self.scaleDraw():
return QwtScaleDraw.LeftScale
return self.scaleDraw().alignment()
def setBorderDist(self, dist1, dist2):
"""
Specify distances of the scale's endpoints from the
widget's borders. The actual borders will never be less
than minimum border distance.
:param int dist1: Left or top Distance
:param int dist2: Right or bottom distance
.. seealso::
:py:meth:`borderDist()`
"""
if dist1 != self.__data.borderDist[0] or dist2 != self.__data.borderDist[1]:
self.__data.borderDist = [dist1, dist2]
self.layoutScale()
def setMargin(self, margin):
"""
Specify the margin to the colorBar/base line.
:param int margin: Margin
.. seealso::
:py:meth:`margin()`
"""
margin = max([0, margin])
if margin != self.__data.margin:
self.__data.margin = margin
self.layoutScale()
def setSpacing(self, spacing):
"""
Specify the distance between color bar, scale and title
:param int spacing: Spacing
.. seealso::
:py:meth:`spacing()`
"""
spacing = max([0, spacing])
if spacing != self.__data.spacing:
self.__data.spacing = spacing
self.layoutScale()
def setLabelAlignment(self, alignment):
"""
Change the alignment for the labels.
:param int spacing: Spacing
.. seealso::
:py:meth:`qwt.scale_draw.QwtScaleDraw.setLabelAlignment()`,
:py:meth:`setLabelRotation()`
"""
self.__data.scaleDraw.setLabelAlignment(alignment)
self.layoutScale()
def setLabelRotation(self, rotation):
"""
Change the rotation for the labels.
:param float rotation: Rotation
.. seealso::
:py:meth:`qwt.scale_draw.QwtScaleDraw.setLabelRotation()`,
:py:meth:`setLabelFlags()`
"""
self.__data.scaleDraw.setLabelRotation(rotation)
self.layoutScale()
def setLabelAutoSize(self, state):
"""
Set the automatic size option for labels (default: on).
:param bool state: On/off
.. seealso::
:py:meth:`qwt.scale_draw.QwtScaleDraw.setLabelAutoSize()`
"""
self.__data.scaleDraw.setLabelAutoSize(state)
self.layoutScale()
def setScaleDraw(self, scaleDraw):
"""
Set a scale draw
scaleDraw has to be created with new and will be deleted in
class destructor or the next call of `setScaleDraw()`.
scaleDraw will be initialized with the attributes of
the previous scaleDraw object.
:param qwt.scale_draw.QwtScaleDraw scaleDraw: ScaleDraw object
.. seealso::
:py:meth:`scaleDraw()`
"""
if scaleDraw is None or scaleDraw == self.__data.scaleDraw:
return
sd = self.__data.scaleDraw
if sd is not None:
scaleDraw.setAlignment(sd.alignment())
scaleDraw.setScaleDiv(sd.scaleDiv())
transform = None
if sd.scaleMap().transformation():
transform = sd.scaleMap().transformation().copy()
scaleDraw.setTransformation(transform)
self.__data.scaleDraw = scaleDraw
self.layoutScale()
def scaleDraw(self):
"""
:return: scaleDraw of this scale
.. seealso::
:py:meth:`qwt.scale_draw.QwtScaleDraw.setScaleDraw()`
"""
return self.__data.scaleDraw
def title(self):
"""
:return: title
.. seealso::
:py:meth:`setTitle`
"""
return self.__data.title
def startBorderDist(self):
"""
:return: start border distance
.. seealso::
:py:meth:`setBorderDist`
"""
return self.__data.borderDist[0]
def endBorderDist(self):
"""
:return: end border distance
.. seealso::
:py:meth:`setBorderDist`
"""
return self.__data.borderDist[1]
def margin(self):
"""
:return: margin
.. seealso::
:py:meth:`setMargin`
"""
return self.__data.margin
def spacing(self):
"""
:return: distance between scale and title
.. seealso::
:py:meth:`setSpacing`
"""
return self.__data.spacing
def paintEvent(self, event):
painter = QPainter(self)
painter.setClipRegion(event.region())
opt = QStyleOption()
opt.initFrom(self)
self.style().drawPrimitive(QStyle.PE_Widget, opt, painter, self)
self.draw(painter)
def draw(self, painter):
"""
Draw the scale
:param QPainter painter: Painter
"""
self.__data.scaleDraw.draw(painter, self.palette())
if (
self.__data.colorBar.isEnabled
and self.__data.colorBar.width > 0
and self.__data.colorBar.interval.isValid()
):
self.drawColorBar(painter, self.colorBarRect(self.contentsRect()))
r = self.contentsRect()
if self.__data.scaleDraw.orientation() == Qt.Horizontal:
r.setLeft(r.left() + self.__data.borderDist[0])
r.setWidth(r.width() - self.__data.borderDist[1])
else:
r.setTop(r.top() + self.__data.borderDist[0])
r.setHeight(r.height() - self.__data.borderDist[1])
if not self.__data.title.isEmpty():
self.drawTitle(painter, self.__data.scaleDraw.alignment(), r)
def colorBarRect(self, rect):
"""
Calculate the the rectangle for the color bar
:param QRectF rect: Bounding rectangle for all components of the scale
:return: Rectangle for the color bar
"""
cr = QRectF(rect)
if self.__data.scaleDraw.orientation() == Qt.Horizontal:
cr.setLeft(cr.left() + self.__data.borderDist[0])
cr.setWidth(cr.width() - self.__data.borderDist[1] + 1)
else:
cr.setTop(cr.top() + self.__data.borderDist[0])
cr.setHeight(cr.height() - self.__data.borderDist[1] + 1)
sda = self.__data.scaleDraw.alignment()
if sda == QwtScaleDraw.LeftScale:
cr.setLeft(cr.right() - self.__data.margin - self.__data.colorBar.width)
cr.setWidth(self.__data.colorBar.width)
elif sda == QwtScaleDraw.RightScale:
cr.setLeft(cr.left() + self.__data.margin)
cr.setWidth(self.__data.colorBar.width)
elif sda == QwtScaleDraw.BottomScale:
cr.setTop(cr.top() + self.__data.margin)
cr.setHeight(self.__data.colorBar.width)
elif sda == QwtScaleDraw.TopScale:
cr.setTop(cr.bottom() - self.__data.margin - self.__data.colorBar.width)
cr.setHeight(self.__data.colorBar.width)
return cr
def resizeEvent(self, event):
self.layoutScale(False)
def layoutScale(self, update_geometry=True):
"""
Recalculate the scale's geometry and layout based on
the current geometry and fonts.
:param bool update_geometry: Notify the layout system and call update to redraw the scale
"""
bd0, bd1 = self.getBorderDistHint()
if self.__data.borderDist[0] > bd0:
bd0 = self.__data.borderDist[0]
if self.__data.borderDist[1] > bd1:
bd1 = self.__data.borderDist[1]
colorBarWidth = 0
if self.__data.colorBar.isEnabled and self.__data.colorBar.interval.isValid():
colorBarWidth = self.__data.colorBar.width + self.__data.spacing
r = self.contentsRect()
if self.__data.scaleDraw.orientation() == Qt.Vertical:
y = r.top() + bd0
length = r.height() - (bd0 + bd1)
if self.__data.scaleDraw.alignment() == QwtScaleDraw.LeftScale:
x = r.right() - 1.0 - self.__data.margin - colorBarWidth
else:
x = r.left() + self.__data.margin + colorBarWidth
else:
x = r.left() + bd0
length = r.width() - (bd0 + bd1)
if self.__data.scaleDraw.alignment() == QwtScaleDraw.BottomScale:
y = r.top() + self.__data.margin + colorBarWidth
else:
y = r.bottom() - 1.0 - self.__data.margin - colorBarWidth
self.__data.scaleDraw.move(x, y)
self.__data.scaleDraw.setLength(length)
extent = np.ceil(self.__data.scaleDraw.extent(self.font()))
self.__data.titleOffset = (
self.__data.margin + self.__data.spacing + colorBarWidth + extent
)
if update_geometry:
self.updateGeometry()
# for some reason updateGeometry does not send a LayoutRequest
# event when the parent is not visible and has no layout
widget = self.parentWidget()
if widget and not widget.isVisible() and widget.layout() is None:
if widget.testAttribute(Qt.WA_WState_Polished):
QApplication.postEvent(
self.parentWidget(), QEvent(QEvent.LayoutRequest)
)
self.update()
def drawColorBar(self, painter, rect):
"""
Draw the color bar of the scale widget
:param QPainter painter: Painter
:param QRectF rect: Bounding rectangle for the color bar
.. seealso::
:py:meth:`setColorBarEnabled()`
"""
if not self.__data.colorBar.interval.isValid():
return
sd = self.__data.scaleDraw
QwtPainter.drawColorBar(
painter,
self.__data.colorBar.colorMap,
self.__data.colorBar.interval.normalized(),
sd.scaleMap(),
sd.orientation(),
rect,
)
def drawTitle(self, painter, align, rect):
"""
Rotate and paint a title according to its position into a given rectangle.
:param QPainter painter: Painter
:param int align: Alignment
:param QRectF rect: Bounding rectangle
"""
r = rect
flags = self.__data.title.renderFlags() & (
~int(Qt.AlignTop | Qt.AlignBottom | Qt.AlignVCenter)
)
if align == QwtScaleDraw.LeftScale:
angle = -90.0
flags |= Qt.AlignTop
r.setRect(
r.left(), r.bottom(), r.height(), r.width() - self.__data.titleOffset
)
elif align == QwtScaleDraw.RightScale:
angle = -90.0
flags |= Qt.AlignTop
r.setRect(
r.left() + self.__data.titleOffset,
r.bottom(),
r.height(),
r.width() - self.__data.titleOffset,
)
elif align == QwtScaleDraw.BottomScale:
angle = 0.0
flags |= Qt.AlignBottom
r.setTop(r.top() + self.__data.titleOffset)
else:
angle = 0.0
flags |= Qt.AlignTop
r.setBottom(r.bottom() - self.__data.titleOffset)
if self.__data.layoutFlags & self.TitleInverted:
if align in (QwtScaleDraw.LeftScale, QwtScaleDraw.RightScale):
angle = -angle
r.setRect(r.x() + r.height(), r.y() - r.width(), r.width(), r.height())
painter.save()
painter.setFont(self.font())
painter.setPen(self.palette().color(QPalette.Text))
painter.translate(r.x(), r.y())
if angle != 0.0:
painter.rotate(angle)
title = self.__data.title
title.setRenderFlags(flags)
title.draw(painter, QRectF(0.0, 0.0, r.width(), r.height()))
painter.restore()
def scaleChange(self):
"""
Notify a change of the scale
This method can be overloaded by derived classes. The default
implementation updates the geometry and repaints the widget.
"""
self.layoutScale()
def sizeHint(self):
return self.minimumSizeHint()
def minimumSizeHint(self):
o = self.__data.scaleDraw.orientation()
length = 0
mbd1, mbd2 = self.getBorderDistHint()
length += max([0, self.__data.borderDist[0] - mbd1])
length += max([0, self.__data.borderDist[1] - mbd2])
length += self.__data.scaleDraw.minLength(self.font())
dim = self.dimForLength(length, self.font())
if length < dim:
length = dim
dim = self.dimForLength(length, self.font())
size = QSize(length + 2, dim)
if o == Qt.Vertical:
size.transpose()
left, right, top, bottom = self.getContentsMargins()
return size + QSize(left + right, top + bottom)
def titleHeightForWidth(self, width):
"""
Find the height of the title for a given width.
:param int width: Width
:return: Height
"""
return np.ceil(self.__data.title.heightForWidth(width, self.font()))
def dimForLength(self, length, scaleFont):
"""
Find the minimum dimension for a given length.
dim is the height, length the width seen in direction of the title.
:param int length: width for horizontal, height for vertical scales
:param QFont scaleFont: Font of the scale
:return: height for horizontal, width for vertical scales
"""
extent = np.ceil(self.__data.scaleDraw.extent(scaleFont))
dim = self.__data.margin + extent + 1
if not self.__data.title.isEmpty():
dim += self.titleHeightForWidth(length) + self.__data.spacing
if self.__data.colorBar.isEnabled and self.__data.colorBar.interval.isValid():
dim += self.__data.colorBar.width + self.__data.spacing
return dim
def getBorderDistHint(self):
"""
Calculate a hint for the border distances.
This member function calculates the distance
of the scale's endpoints from the widget borders which
is required for the mark labels to fit into the widget.
The maximum of this distance an the minimum border distance
is returned.
:param int start: Return parameter for the border width at the beginning of the scale
:param int end: Return parameter for the border width at the end of the scale
.. warning::
The minimum border distance depends on the font.
.. seealso::
:py:meth:`setMinBorderDist()`, :py:meth:`getMinBorderDist()`,
:py:meth:`setBorderDist()`
"""
start, end = self.__data.scaleDraw.getBorderDistHint(self.font())
if start < self.__data.minBorderDist[0]:
start = self.__data.minBorderDist[0]
if end < self.__data.minBorderDist[1]:
end = self.__data.minBorderDist[1]
return start, end
def setMinBorderDist(self, start, end):
"""
Set a minimum value for the distances of the scale's endpoints from
the widget borders. This is useful to avoid that the scales
are "jumping", when the tick labels or their positions change
often.
:param int start: Minimum for the start border
:param int end: Minimum for the end border
.. seealso::
:py:meth:`getMinBorderDist()`, :py:meth:`getBorderDistHint()`
"""
self.__data.minBorderDist = [start, end]
def getMinBorderDist(self):
"""
Get the minimum value for the distances of the scale's endpoints from
the widget borders.
:param int start: Return parameter for the border width at the beginning of the scale
:param int end: Return parameter for the border width at the end of the scale
.. seealso::
:py:meth:`setMinBorderDist()`, :py:meth:`getBorderDistHint()`
"""
return self.__data.minBorderDist
def setScaleDiv(self, scaleDiv):
"""
Assign a scale division
The scale division determines where to set the tick marks.
:param qwt.scale_div.QwtScaleDiv scaleDiv: Scale Division
.. seealso::
For more information about scale divisions,
see :py:class:`qwt.scale_div.QwtScaleDiv`.
"""
sd = self.__data.scaleDraw
if sd.scaleDiv() != scaleDiv:
sd.setScaleDiv(scaleDiv)
self.layoutScale()
self.scaleDivChanged.emit()
def setTransformation(self, transformation):
"""
Set the transformation
:param qwt.transform.Transform transformation: Transformation
.. seealso::
:py:meth:`qwt.scale_draw.QwtAbstractScaleDraw.scaleDraw()`,
:py:class:`qwt.scale_map.QwtScaleMap`
"""
self.__data.scaleDraw.setTransformation(transformation)
self.layoutScale()
def setColorBarEnabled(self, on):
"""
En/disable a color bar associated to the scale
:param bool on: On/Off
.. seealso::
:py:meth:`isColorBarEnabled()`, :py:meth:`setColorBarWidth()`
"""
if on != self.__data.colorBar.isEnabled:
self.__data.colorBar.isEnabled = on
self.layoutScale()
def isColorBarEnabled(self):
"""
:return: True, when the color bar is enabled
.. seealso::
:py:meth:`setColorBarEnabled()`, :py:meth:`setColorBarWidth()`
"""
return self.__data.colorBar.isEnabled
def setColorBarWidth(self, width):
"""
Set the width of the color bar
:param int width: Width
.. seealso::
:py:meth:`colorBarWidth()`, :py:meth:`setColorBarEnabled()`
"""
if width != self.__data.colorBar.width:
self.__data.colorBar.width = width
if self.isColorBarEnabled():
self.layoutScale()
def colorBarWidth(self):
"""
:return: Width of the color bar
.. seealso::
:py:meth:`setColorBarWidth()`, :py:meth:`setColorBarEnabled()`
"""
return self.__data.colorBar.width
def colorBarInterval(self):
"""
:return: Value interval for the color bar
.. seealso::
:py:meth:`setColorMap()`, :py:meth:`colorMap()`
"""
return self.__data.colorBar.interval
def setColorMap(self, interval, colorMap):
"""
Set the color map and value interval, that are used for displaying
the color bar.
:param qwt.interval.QwtInterval interval: Value interval
:param qwt.color_map.QwtColorMap colorMap: Color map
.. seealso::
:py:meth:`colorMap()`, :py:meth:`colorBarInterval()`
"""
self.__data.colorBar.interval = interval
if colorMap != self.__data.colorBar.colorMap:
self.__data.colorBar.colorMap = colorMap
if self.isColorBarEnabled():
self.layoutScale()
def colorMap(self):
"""
:return: Color map
.. seealso::
:py:meth:`setColorMap()`, :py:meth:`colorBarInterval()`
"""
return self.__data.colorBar.colorMap
| 31.446834 | 143 | 0.569963 |
02534aa4926dd5d9cefd03fbc0f5ada6664e1e15 | 33,530 | py | Python | ryu/topology/switches.py | uiuc-srg/ryu | 2a597f812270ea9690269a20bf659f334c323eb6 | [
"Apache-2.0"
] | null | null | null | ryu/topology/switches.py | uiuc-srg/ryu | 2a597f812270ea9690269a20bf659f334c323eb6 | [
"Apache-2.0"
] | null | null | null | ryu/topology/switches.py | uiuc-srg/ryu | 2a597f812270ea9690269a20bf659f334c323eb6 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import struct
import time
import json
from ryu import cfg
from ryu.topology import event
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.exception import RyuException
from ryu.lib import addrconv, hub
from ryu.lib.mac import DONTCARE_STR
from ryu.lib.dpid import dpid_to_str, str_to_dpid
from ryu.lib.port_no import port_no_to_str
from ryu.lib.packet import packet, ethernet
from ryu.lib.packet import lldp, ether_types
from ryu.lib.packet import arp, ipv4, ipv6
from ryu.ofproto.ether import ETH_TYPE_LLDP
from ryu.ofproto import nx_match
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_4
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_cli_opts([
cfg.BoolOpt('observe-links', default=False,
help='observe link discovery events.'),
cfg.BoolOpt('install-lldp-flow', default=True,
help='link discovery: explicitly install flow entry '
'to send lldp packet to controller'),
cfg.BoolOpt('explicit-drop', default=True,
help='link discovery: explicitly drop lldp packet in')
])
class Port(object):
# This is data class passed by EventPortXXX
def __init__(self, dpid, ofproto, ofpport):
super(Port, self).__init__()
self.dpid = dpid
self._ofproto = ofproto
self._config = ofpport.config
self._state = ofpport.state
self.port_no = ofpport.port_no
self.hw_addr = ofpport.hw_addr
self.name = ofpport.name
def is_reserved(self):
return self.port_no > self._ofproto.OFPP_MAX
def is_down(self):
return (self._state & self._ofproto.OFPPS_LINK_DOWN) > 0 \
or (self._config & self._ofproto.OFPPC_PORT_DOWN) > 0
def is_live(self):
# NOTE: OF1.2 has OFPPS_LIVE state
# return (self._state & self._ofproto.OFPPS_LIVE) > 0
return not self.is_down()
def to_dict(self):
return {'dpid': dpid_to_str(self.dpid),
'port_no': port_no_to_str(self.port_no),
'hw_addr': self.hw_addr,
'name': self.name.rstrip('\0')}
# for Switch.del_port()
def __eq__(self, other):
return self.dpid == other.dpid and self.port_no == other.port_no
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.dpid, self.port_no))
def __str__(self):
LIVE_MSG = {False: 'DOWN', True: 'LIVE'}
return 'Port<dpid=%s, port_no=%s, %s>' % \
(self.dpid, self.port_no, LIVE_MSG[self.is_live()])
class Switch(object):
# This is data class passed by EventSwitchXXX
def __init__(self, dp):
super(Switch, self).__init__()
self.dp = dp
self.ports = []
def add_port(self, ofpport):
port = Port(self.dp.id, self.dp.ofproto, ofpport)
if not port.is_reserved():
self.ports.append(port)
def del_port(self, ofpport):
self.ports.remove(Port(ofpport))
def to_dict(self):
d = {'dpid': dpid_to_str(self.dp.id),
'ports': [port.to_dict() for port in self.ports]}
return d
def __str__(self):
msg = 'Switch<dpid=%s, ' % self.dp.id
for port in self.ports:
msg += str(port) + ' '
msg += '>'
return msg
class Link(object):
# This is data class passed by EventLinkXXX
def __init__(self, src, dst):
super(Link, self).__init__()
self.src = src
self.dst = dst
def to_dict(self):
d = {'src': self.src.to_dict(),
'dst': self.dst.to_dict()}
return d
# this type is used for key value of LinkState
def __eq__(self, other):
return self.src == other.src and self.dst == other.dst
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.src, self.dst))
def __str__(self):
return 'Link: %s to %s' % (self.src, self.dst)
class Host(object):
# This is data class passed by EventHostXXX
def __init__(self, mac, port):
super(Host, self).__init__()
self.port = port
self.mac = mac
self.ipv4 = []
self.ipv6 = []
def to_dict(self):
d = {'mac': self.mac,
'ipv4': self.ipv4,
'ipv6': self.ipv6,
'port': self.port.to_dict()}
return d
def __eq__(self, host):
return self.mac == host.mac and self.port == host.port
def __str__(self):
msg = 'Host<mac=%s, port=%s,' % (self.mac, str(self.port))
msg += ','.join(self.ipv4)
msg += ','.join(self.ipv6)
msg += '>'
return msg
class HostState(dict):
# mac address -> Host class
def __init__(self):
super(HostState, self).__init__()
def add(self, host):
mac = host.mac
self.setdefault(mac, host)
def update_ip(self, host, ip_v4=None, ip_v6=None):
mac = host.mac
host = None
if mac in self:
host = self[mac]
if not host:
return
if ip_v4 != None and ip_v4 not in host.ipv4:
host.ipv4.append(ip_v4)
if ip_v6 != None and ip_v6 not in host.ipv6:
host.ipv6.append(ip_v6)
def get_by_dpid(self, dpid):
result = []
for mac in self:
host = self[mac]
if host.port.dpid == dpid:
result.append(host)
return result
class PortState(dict):
# dict: int port_no -> OFPPort port
# OFPPort is defined in ryu.ofproto.ofproto_v1_X_parser
def __init__(self):
super(PortState, self).__init__()
def add(self, port_no, port):
self[port_no] = port
def remove(self, port_no):
del self[port_no]
def modify(self, port_no, port):
self[port_no] = port
class PortData(object):
def __init__(self, is_down, lldp_data):
super(PortData, self).__init__()
self.is_down = is_down
self.lldp_data = lldp_data
self.timestamp = None
self.sent = 0
def lldp_sent(self):
self.timestamp = time.time()
self.sent += 1
def lldp_received(self):
self.sent = 0
def lldp_dropped(self):
return self.sent
def clear_timestamp(self):
self.timestamp = None
def set_down(self, is_down):
self.is_down = is_down
def __str__(self):
return 'PortData<live=%s, timestamp=%s, sent=%d>' \
% (not self.is_down, self.timestamp, self.sent)
class PortDataState(dict):
# dict: Port class -> PortData class
# slimed down version of OrderedDict as python 2.6 doesn't support it.
_PREV = 0
_NEXT = 1
_KEY = 2
def __init__(self):
super(PortDataState, self).__init__()
self._root = root = [] # sentinel node
root[:] = [root, root, None] # [_PREV, _NEXT, _KEY]
# doubly linked list
self._map = {}
def _remove_key(self, key):
link_prev, link_next, key = self._map.pop(key)
link_prev[self._NEXT] = link_next
link_next[self._PREV] = link_prev
def _append_key(self, key):
root = self._root
last = root[self._PREV]
last[self._NEXT] = root[self._PREV] = self._map[key] = [last, root,
key]
def _prepend_key(self, key):
root = self._root
first = root[self._NEXT]
first[self._PREV] = root[self._NEXT] = self._map[key] = [root, first,
key]
def _move_last_key(self, key):
self._remove_key(key)
self._append_key(key)
def _move_front_key(self, key):
self._remove_key(key)
self._prepend_key(key)
def add_port(self, port, lldp_data):
if port not in self:
self._prepend_key(port)
self[port] = PortData(port.is_down(), lldp_data)
else:
self[port].is_down = port.is_down()
def lldp_sent(self, port):
port_data = self[port]
port_data.lldp_sent()
self._move_last_key(port)
return port_data
def lldp_received(self, port):
self[port].lldp_received()
def move_front(self, port):
port_data = self.get(port, None)
if port_data is not None:
port_data.clear_timestamp()
self._move_front_key(port)
def set_down(self, port):
is_down = port.is_down()
port_data = self[port]
port_data.set_down(is_down)
port_data.clear_timestamp()
if not is_down:
self._move_front_key(port)
return is_down
def get_port(self, port):
return self[port]
def del_port(self, port):
del self[port]
self._remove_key(port)
def __iter__(self):
root = self._root
curr = root[self._NEXT]
while curr is not root:
yield curr[self._KEY]
curr = curr[self._NEXT]
def clear(self):
for node in self._map.values():
del node[:]
root = self._root
root[:] = [root, root, None]
self._map.clear()
dict.clear(self)
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) pairs in od'
for k in self:
yield (k, self[k])
class LinkState(dict):
# dict: Link class -> timestamp
def __init__(self):
super(LinkState, self).__init__()
self._map = {}
def get_peer(self, src):
return self._map.get(src, None)
def update_link(self, src, dst):
link = Link(src, dst)
self[link] = time.time()
self._map[src] = dst
# return if the reverse link is also up or not
rev_link = Link(dst, src)
return rev_link in self
def link_down(self, link):
del self[link]
del self._map[link.src]
def rev_link_set_timestamp(self, rev_link, timestamp):
# rev_link may or may not in LinkSet
if rev_link in self:
self[rev_link] = timestamp
def port_deleted(self, src):
dst = self.get_peer(src)
if dst is None:
raise KeyError()
link = Link(src, dst)
rev_link = Link(dst, src)
del self[link]
del self._map[src]
# reverse link might not exist
self.pop(rev_link, None)
rev_link_dst = self._map.pop(dst, None)
return dst, rev_link_dst
class LLDPPacket(object):
# make a LLDP packet for link discovery.
CHASSIS_ID_PREFIX = 'dpid:'
CHASSIS_ID_PREFIX_LEN = len(CHASSIS_ID_PREFIX)
CHASSIS_ID_FMT = CHASSIS_ID_PREFIX + '%s'
PORT_ID_STR = '!I' # uint32_t
PORT_ID_SIZE = 4
class LLDPUnknownFormat(RyuException):
message = '%(msg)s'
@staticmethod
def lldp_packet(dpid, port_no, dl_addr, ttl):
pkt = packet.Packet()
dst = lldp.LLDP_MAC_NEAREST_BRIDGE
src = dl_addr
ethertype = ETH_TYPE_LLDP
eth_pkt = ethernet.ethernet(dst, src, ethertype)
pkt.add_protocol(eth_pkt)
tlv_chassis_id = lldp.ChassisID(
subtype=lldp.ChassisID.SUB_LOCALLY_ASSIGNED,
chassis_id=(LLDPPacket.CHASSIS_ID_FMT %
dpid_to_str(dpid)).encode('ascii'))
tlv_port_id = lldp.PortID(subtype=lldp.PortID.SUB_PORT_COMPONENT,
port_id=struct.pack(
LLDPPacket.PORT_ID_STR,
port_no))
tlv_ttl = lldp.TTL(ttl=ttl)
tlv_end = lldp.End()
tlvs = (tlv_chassis_id, tlv_port_id, tlv_ttl, tlv_end)
lldp_pkt = lldp.lldp(tlvs)
pkt.add_protocol(lldp_pkt)
pkt.serialize()
return pkt.data
@staticmethod
def lldp_parse(data):
pkt = packet.Packet(data)
i = iter(pkt)
eth_pkt = i.next()
assert type(eth_pkt) == ethernet.ethernet
lldp_pkt = i.next()
if type(lldp_pkt) != lldp.lldp:
raise LLDPPacket.LLDPUnknownFormat()
tlv_chassis_id = lldp_pkt.tlvs[0]
if tlv_chassis_id.subtype != lldp.ChassisID.SUB_LOCALLY_ASSIGNED:
raise LLDPPacket.LLDPUnknownFormat(
msg='unknown chassis id subtype %d' % tlv_chassis_id.subtype)
chassis_id = tlv_chassis_id.chassis_id
if not chassis_id.startswith(LLDPPacket.CHASSIS_ID_PREFIX):
raise LLDPPacket.LLDPUnknownFormat(
msg='unknown chassis id format %s' % chassis_id)
src_dpid = str_to_dpid(chassis_id[LLDPPacket.CHASSIS_ID_PREFIX_LEN:])
tlv_port_id = lldp_pkt.tlvs[1]
if tlv_port_id.subtype != lldp.PortID.SUB_PORT_COMPONENT:
raise LLDPPacket.LLDPUnknownFormat(
msg='unknown port id subtype %d' % tlv_port_id.subtype)
port_id = tlv_port_id.port_id
if len(port_id) != LLDPPacket.PORT_ID_SIZE:
raise LLDPPacket.LLDPUnknownFormat(
msg='unknown port id %d' % port_id)
(src_port_no, ) = struct.unpack(LLDPPacket.PORT_ID_STR, port_id)
return src_dpid, src_port_no
class Switches(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION, ofproto_v1_2.OFP_VERSION,
ofproto_v1_3.OFP_VERSION, ofproto_v1_4.OFP_VERSION]
_EVENTS = [event.EventSwitchEnter, event.EventSwitchLeave,
event.EventPortAdd, event.EventPortDelete,
event.EventPortModify,
event.EventLinkAdd, event.EventLinkDelete,
event.EventHostAdd]
DEFAULT_TTL = 120 # unused. ignored.
LLDP_PACKET_LEN = len(LLDPPacket.lldp_packet(0, 0, DONTCARE_STR, 0))
LLDP_SEND_GUARD = .05
LLDP_SEND_PERIOD_PER_PORT = .9
TIMEOUT_CHECK_PERIOD = 5.
LINK_TIMEOUT = TIMEOUT_CHECK_PERIOD * 2
LINK_LLDP_DROP = 5
def __init__(self, *args, **kwargs):
super(Switches, self).__init__(*args, **kwargs)
self.name = 'switches'
self.dps = {} # datapath_id => Datapath class
self.port_state = {} # datapath_id => ports
self.ports = PortDataState() # Port class -> PortData class
self.links = LinkState() # Link class -> timestamp
self.hosts = HostState() # mac address -> Host class list
self.is_active = True
self.link_discovery = self.CONF.observe_links
if self.link_discovery:
self.install_flow = self.CONF.install_lldp_flow
self.explicit_drop = self.CONF.explicit_drop
self.lldp_event = hub.Event()
self.link_event = hub.Event()
self.threads.append(hub.spawn(self.lldp_loop))
self.threads.append(hub.spawn(self.link_loop))
def close(self):
self.is_active = False
if self.link_discovery:
self.lldp_event.set()
self.link_event.set()
hub.joinall(self.threads)
def _register(self, dp):
assert dp.id is not None
self.dps[dp.id] = dp
if dp.id not in self.port_state:
self.port_state[dp.id] = PortState()
for port in dp.ports.values():
self.port_state[dp.id].add(port.port_no, port)
def _unregister(self, dp):
if dp.id in self.dps:
del self.dps[dp.id]
del self.port_state[dp.id]
def _get_switch(self, dpid):
if dpid in self.dps:
switch = Switch(self.dps[dpid])
for ofpport in self.port_state[dpid].values():
switch.add_port(ofpport)
return switch
def _get_port(self, dpid, port_no):
switch = self._get_switch(dpid)
if switch:
for p in switch.ports:
if p.port_no == port_no:
return p
def _port_added(self, port):
lldp_data = LLDPPacket.lldp_packet(
port.dpid, port.port_no, port.hw_addr, self.DEFAULT_TTL)
self.ports.add_port(port, lldp_data)
# LOG.debug('_port_added dpid=%s, port_no=%s, live=%s',
# port.dpid, port.port_no, port.is_live())
def _link_down(self, port):
try:
dst, rev_link_dst = self.links.port_deleted(port)
except KeyError:
# LOG.debug('key error. src=%s, dst=%s',
# port, self.links.get_peer(port))
return
link = Link(port, dst)
self.send_event_to_observers(event.EventLinkDelete(link))
if rev_link_dst:
rev_link = Link(dst, rev_link_dst)
self.send_event_to_observers(event.EventLinkDelete(rev_link))
self.ports.move_front(dst)
def _is_edge_port(self, port):
for link in self.links:
if port == link.src or port == link.dst:
return False
return True
@set_ev_cls(ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def state_change_handler(self, ev):
dp = ev.datapath
assert dp is not None
LOG.debug(dp)
if ev.state == MAIN_DISPATCHER:
dp_multiple_conns = False
if dp.id in self.dps:
LOG.warning('multiple connections from %s', dpid_to_str(dp.id))
dp_multiple_conns = True
self._register(dp)
switch = self._get_switch(dp.id)
LOG.debug('register %s', switch)
# Do not send event while dp has multiple connections.
if not dp_multiple_conns:
self.send_event_to_observers(event.EventSwitchEnter(switch))
if not self.link_discovery:
return
if self.install_flow:
ofproto = dp.ofproto
ofproto_parser = dp.ofproto_parser
# TODO:XXX need other versions
if ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
rule = nx_match.ClsRule()
rule.set_dl_dst(addrconv.mac.text_to_bin(
lldp.LLDP_MAC_NEAREST_BRIDGE))
rule.set_dl_type(ETH_TYPE_LLDP)
actions = [ofproto_parser.OFPActionOutput(
ofproto.OFPP_CONTROLLER, self.LLDP_PACKET_LEN)]
dp.send_flow_mod(
rule=rule, cookie=0, command=ofproto.OFPFC_ADD,
idle_timeout=0, hard_timeout=0, actions=actions,
priority=0xFFFF)
elif ofproto.OFP_VERSION >= ofproto_v1_2.OFP_VERSION:
match = ofproto_parser.OFPMatch(
eth_type=ETH_TYPE_LLDP,
eth_dst=lldp.LLDP_MAC_NEAREST_BRIDGE)
# OFPCML_NO_BUFFER is set so that the LLDP is not
# buffered on switch
parser = ofproto_parser
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER
)]
inst = [parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)]
mod = parser.OFPFlowMod(datapath=dp, match=match,
idle_timeout=0, hard_timeout=0,
instructions=inst,
priority=0xFFFF)
dp.send_msg(mod)
else:
LOG.error('cannot install flow. unsupported version. %x',
dp.ofproto.OFP_VERSION)
# Do not add ports while dp has multiple connections to controller.
if not dp_multiple_conns:
for port in switch.ports:
if not port.is_reserved():
self._port_added(port)
self.lldp_event.set()
elif ev.state == DEAD_DISPATCHER:
# dp.id is None when datapath dies before handshake
if dp.id is None:
return
switch = self._get_switch(dp.id)
self._unregister(dp)
LOG.debug('unregister %s', switch)
self.send_event_to_observers(event.EventSwitchLeave(switch))
if not self.link_discovery:
return
for port in switch.ports:
if not port.is_reserved():
self.ports.del_port(port)
self._link_down(port)
self.lldp_event.set()
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def port_status_handler(self, ev):
msg = ev.msg
reason = msg.reason
dp = msg.datapath
ofpport = msg.desc
if reason == dp.ofproto.OFPPR_ADD:
# LOG.debug('A port was added.' +
# '(datapath id = %s, port number = %s)',
# dp.id, ofpport.port_no)
self.port_state[dp.id].add(ofpport.port_no, ofpport)
self.send_event_to_observers(
event.EventPortAdd(Port(dp.id, dp.ofproto, ofpport)))
if not self.link_discovery:
return
port = self._get_port(dp.id, ofpport.port_no)
if port and not port.is_reserved():
self._port_added(port)
self.lldp_event.set()
elif reason == dp.ofproto.OFPPR_DELETE:
# LOG.debug('A port was deleted.' +
# '(datapath id = %s, port number = %s)',
# dp.id, ofpport.port_no)
self.port_state[dp.id].remove(ofpport.port_no)
self.send_event_to_observers(
event.EventPortDelete(Port(dp.id, dp.ofproto, ofpport)))
if not self.link_discovery:
return
port = self._get_port(dp.id, ofpport.port_no)
if port and not port.is_reserved():
self.ports.del_port(port)
self._link_down(port)
self.lldp_event.set()
else:
assert reason == dp.ofproto.OFPPR_MODIFY
# LOG.debug('A port was modified.' +
# '(datapath id = %s, port number = %s)',
# dp.id, ofpport.port_no)
self.port_state[dp.id].modify(ofpport.port_no, ofpport)
self.send_event_to_observers(
event.EventPortModify(Port(dp.id, dp.ofproto, ofpport)))
if not self.link_discovery:
return
port = self._get_port(dp.id, ofpport.port_no)
if port and not port.is_reserved():
if self.ports.set_down(port):
self._link_down(port)
self.lldp_event.set()
@staticmethod
def _drop_packet(msg):
buffer_id = msg.buffer_id
if buffer_id == msg.datapath.ofproto.OFP_NO_BUFFER:
return
dp = msg.datapath
# TODO:XXX
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
dp.send_packet_out(buffer_id, msg.in_port, [])
elif dp.ofproto.OFP_VERSION >= ofproto_v1_2.OFP_VERSION:
dp.send_packet_out(buffer_id, msg.match['in_port'], [])
else:
dp.send_packet_out(buffer_id, msg.match['in_port'], [])
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def lldp_packet_in_handler(self, ev):
if not self.link_discovery:
return
msg = ev.msg
try:
src_dpid, src_port_no = LLDPPacket.lldp_parse(msg.data)
except LLDPPacket.LLDPUnknownFormat as e:
# This handler can receive all the packtes which can be
# not-LLDP packet. Ignore it silently
return
dst_dpid = msg.datapath.id
if msg.datapath.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
dst_port_no = msg.in_port
elif msg.datapath.ofproto.OFP_VERSION >= ofproto_v1_2.OFP_VERSION:
dst_port_no = msg.match['in_port']
else:
LOG.error('cannot accept LLDP. unsupported version. %x',
msg.datapath.ofproto.OFP_VERSION)
src = self._get_port(src_dpid, src_port_no)
if not src or src.dpid == dst_dpid:
return
try:
self.ports.lldp_received(src)
except KeyError:
# There are races between EventOFPPacketIn and
# EventDPPortAdd. So packet-in event can happend before
# port add event. In that case key error can happend.
# LOG.debug('lldp_received: KeyError %s', e)
pass
dst = self._get_port(dst_dpid, dst_port_no)
if not dst:
return
old_peer = self.links.get_peer(src)
# LOG.debug("Packet-In")
# LOG.debug(" src=%s", src)
# LOG.debug(" dst=%s", dst)
# LOG.debug(" old_peer=%s", old_peer)
if old_peer and old_peer != dst:
old_link = Link(src, old_peer)
self.send_event_to_observers(event.EventLinkDelete(old_link))
link = Link(src, dst)
if link not in self.links:
self.send_event_to_observers(event.EventLinkAdd(link))
# remove hosts from edge port
for host in self.hosts.values():
if self._is_edge_port(host.port):
del self.hosts[host.mac]
if not self.links.update_link(src, dst):
# reverse link is not detected yet.
# So schedule the check early because it's very likely it's up
self.ports.move_front(dst)
self.lldp_event.set()
if self.explicit_drop:
self._drop_packet(msg)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def host_discovery_packet_in_handler(self, ev):
msg = ev.msg
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
# ignore lldp packet
if eth.ethertype == ETH_TYPE_LLDP:
return
datapath = msg.datapath
dpid = datapath.id
port_no = -1
if msg.datapath.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
port_no = msg.in_port
else:
port_no = msg.match['in_port']
port = self._get_port(dpid, port_no)
# can't find this port(ex: logic port)
if not port:
return
# ignore switch-to-switch port
if not self._is_edge_port(port):
return
host_mac = eth.src
host = Host(host_mac, port)
if host_mac not in self.hosts:
self.hosts.add(host)
ev = event.EventHostAdd(host)
self.send_event_to_observers(ev)
# arp packet, update ip address
if eth.ethertype == ether_types.ETH_TYPE_ARP:
arp_pkt = pkt.get_protocols(arp.arp)[0]
self.hosts.update_ip(host, ip_v4=arp_pkt.src_ip)
# ipv4 packet, update ipv4 address
elif eth.ethertype == ether_types.ETH_TYPE_IP:
ipv4_pkt = pkt.get_protocols(ipv4.ipv4)[0]
self.hosts.update_ip(host, ip_v4=ipv4_pkt.src)
# ipv6 packet, update ipv6 address
elif eth.ethertype == ether_types.ETH_TYPE_IPV6:
# TODO: need to handle NDP
ipv6_pkt = pkt.get_protocols(ipv6.ipv6)[0]
self.hosts.update_ip(host, ip_v6=ipv6_pkt.src)
def send_lldp_packet(self, port):
try:
port_data = self.ports.lldp_sent(port)
except KeyError as e:
# ports can be modified during our sleep in self.lldp_loop()
# LOG.debug('send_lldp: KeyError %s', e)
return
if port_data.is_down:
return
dp = self.dps.get(port.dpid, None)
if dp is None:
# datapath was already deleted
return
# LOG.debug('lldp sent dpid=%s, port_no=%d', dp.id, port.port_no)
# TODO:XXX
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
actions = [dp.ofproto_parser.OFPActionOutput(port.port_no)]
dp.send_packet_out(actions=actions, data=port_data.lldp_data)
elif dp.ofproto.OFP_VERSION >= ofproto_v1_2.OFP_VERSION:
actions = [dp.ofproto_parser.OFPActionOutput(port.port_no)]
out = dp.ofproto_parser.OFPPacketOut(
datapath=dp, in_port=dp.ofproto.OFPP_CONTROLLER,
buffer_id=dp.ofproto.OFP_NO_BUFFER, actions=actions,
data=port_data.lldp_data)
dp.send_msg(out)
else:
LOG.error('cannot send lldp packet. unsupported version. %x',
dp.ofproto.OFP_VERSION)
def lldp_loop(self):
while self.is_active:
self.lldp_event.clear()
now = time.time()
timeout = None
ports_now = []
ports = []
for (key, data) in self.ports.items():
if data.timestamp is None:
ports_now.append(key)
continue
expire = data.timestamp + self.LLDP_SEND_PERIOD_PER_PORT
if expire <= now:
ports.append(key)
continue
timeout = expire - now
break
for port in ports_now:
self.send_lldp_packet(port)
for port in ports:
self.send_lldp_packet(port)
hub.sleep(self.LLDP_SEND_GUARD) # don't burst
if timeout is not None and ports:
timeout = 0 # We have already slept
# LOG.debug('lldp sleep %s', timeout)
self.lldp_event.wait(timeout=timeout)
def link_loop(self):
while self.is_active:
self.link_event.clear()
now = time.time()
deleted = []
for (link, timestamp) in self.links.items():
# LOG.debug('%s timestamp %d (now %d)', link, timestamp, now)
if timestamp + self.LINK_TIMEOUT < now:
src = link.src
if src in self.ports:
port_data = self.ports.get_port(src)
# LOG.debug('port_data %s', port_data)
if port_data.lldp_dropped() > self.LINK_LLDP_DROP:
deleted.append(link)
for link in deleted:
self.links.link_down(link)
# LOG.debug('delete %s', link)
self.send_event_to_observers(event.EventLinkDelete(link))
dst = link.dst
rev_link = Link(dst, link.src)
if rev_link not in deleted:
# It is very likely that the reverse link is also
# disconnected. Check it early.
expire = now - self.LINK_TIMEOUT
self.links.rev_link_set_timestamp(rev_link, expire)
if dst in self.ports:
self.ports.move_front(dst)
self.lldp_event.set()
self.link_event.wait(timeout=self.TIMEOUT_CHECK_PERIOD)
@set_ev_cls(event.EventSwitchRequest)
def switch_request_handler(self, req):
# LOG.debug(req)
dpid = req.dpid
switches = []
if dpid is None:
# reply all list
for dp in self.dps.values():
switches.append(self._get_switch(dp.id))
elif dpid in self.dps:
switches.append(self._get_switch(dpid))
rep = event.EventSwitchReply(req.src, switches)
self.reply_to_request(req, rep)
@set_ev_cls(event.EventLinkRequest)
def link_request_handler(self, req):
# LOG.debug(req)
dpid = req.dpid
if dpid is None:
links = self.links
else:
links = [link for link in self.links if link.src.dpid == dpid]
rep = event.EventLinkReply(req.src, dpid, links)
self.reply_to_request(req, rep)
@set_ev_cls(event.EventHostRequest)
def host_request_handler(self, req):
dpid = req.dpid
hosts = []
if dpid is None:
for mac in self.hosts:
hosts.append(self.hosts[mac])
else:
hosts = self.hosts.get_by_dpid(dpid)
rep = event.EventHostReply(req.src, dpid, hosts)
self.reply_to_request(req, rep)
| 33.296922 | 79 | 0.574083 |
e32bf12196115431798b952b595a7ed78e19dcef | 9,130 | py | Python | tests/models/test_patient.py | ZviBaratz/django-dicom | fc5d5443ebcab9af9705a2e81c58662789a34c62 | [
"Apache-2.0"
] | 8 | 2018-12-25T11:00:31.000Z | 2022-02-03T12:05:56.000Z | tests/models/test_patient.py | ZviBaratz/django-dicom | fc5d5443ebcab9af9705a2e81c58662789a34c62 | [
"Apache-2.0"
] | 49 | 2019-09-04T11:36:00.000Z | 2022-03-20T12:33:04.000Z | tests/models/test_patient.py | ZviBaratz/django-dicom | fc5d5443ebcab9af9705a2e81c58662789a34c62 | [
"Apache-2.0"
] | 4 | 2019-06-23T18:09:07.000Z | 2019-08-30T15:43:18.000Z | from django.test import TestCase
from django_dicom.models import Image, Patient, Series, Study
from tests.fixtures import (TEST_IMAGE_FIELDS, TEST_PATIENT_FIELDS,
TEST_SERIES_FIELDS, TEST_STUDY_FIELDS)
class PatientTestCase(TestCase):
"""
Tests for the :class:`~django_dicom.models.patient.Patient` model.
"""
@classmethod
def setUpTestData(cls):
"""
Creates instances to be used in the tests.
For more information see Django's :class:`~django.test.TestCase`
documentation_.
.. _documentation:
https://docs.djangoproject.com/en/2.2/topics/testing/tools/#testcase
"""
TEST_SERIES_FIELDS["patient"] = Patient.objects.create(
**TEST_PATIENT_FIELDS
)
TEST_SERIES_FIELDS["study"] = Study.objects.create(**TEST_STUDY_FIELDS)
TEST_IMAGE_FIELDS["series"] = Series.objects.create(
**TEST_SERIES_FIELDS
)
Image.objects.create(**TEST_IMAGE_FIELDS)
def setUp(self):
"""
Adds the created instances to the tests' contexts.
For more information see unittest's :meth:`~unittest.TestCase.setUp` method.
"""
self.image = Image.objects.get(uid=TEST_IMAGE_FIELDS["uid"])
self.series = Series.objects.get(uid=TEST_SERIES_FIELDS["uid"])
self.study = self.series.study
self.patient = self.series.patient
##########
# Fields #
##########
# uid
def test_uid_max_length(self):
"""
DICOM's `Patient ID`_ attribute may only be as long as 64 characters (
see the Long String (LO) `value-representation specification`).
.. _Patient ID: https://dicom.innolitics.com/ciods/mr-image/patient/00100020
.. _value-representation specification: http://dicom.nema.org/medical/dicom/current/output/chtml/part05/sect_6.2.html
"""
field = self.patient._meta.get_field("uid")
self.assertEqual(field.max_length, 64)
def test_uid_is_unique(self):
"""
Validates that the *UID* field is unique.
"""
field = self.patient._meta.get_field("uid")
self.assertTrue(field.unique)
def test_uid_verbose_name(self):
"""
Test the *UID* field vebose name.
"""
field = self.patient._meta.get_field("uid")
self.assertEqual(field.verbose_name, "Patient UID")
def test_uid_blank_and_null(self):
"""
Every :class:`~django_dicom.models.patient.Patient` instance must have a UID.
"""
field = self.patient._meta.get_field("uid")
self.assertFalse(field.blank)
self.assertFalse(field.null)
# date_of_birth
def test_date_of_birth_blank_and_null(self):
"""
The `Patient's Birth Date`_ attribute may be empty (`type 2 data element`_).
.. _Patient's Birth Date: https://dicom.innolitics.com/ciods/mr-image/patient/00100030
.. _type 2 data element: http://dicom.nema.org/dicom/2013/output/chtml/part05/sect_7.4.html#sect_7.4.3
"""
field = self.patient._meta.get_field("date_of_birth")
self.assertTrue(field.blank)
self.assertTrue(field.null)
# sex
def test_sex_max_length(self):
"""
Tests that the sex field has the expected max_length.
"""
field = self.patient._meta.get_field("sex")
self.assertEqual(field.max_length, 1)
def test_sex_blank_and_null(self):
"""
The `Patient's Sex`_ attribute may be empty (`type 2 data element`_).
.. _Patient's Sex: https://dicom.innolitics.com/ciods/mr-image/patient/00100040
.. _type 2 data element: http://dicom.nema.org/dicom/2013/output/chtml/part05/sect_7.4.html#sect_7.4.3
"""
field = self.patient._meta.get_field("sex")
self.assertTrue(field.blank)
self.assertTrue(field.null)
# name
def test_name_blank_and_null(self):
"""
Tests that the name fields are blankable and nullable according to the
`Patient's Name`_ DICOM attribute definition (`type 2 data element`_).
.. _Patient's Name: https://dicom.innolitics.com/ciods/mr-image/patient/00100010
.. _type 2 data element: http://dicom.nema.org/dicom/2013/output/chtml/part05/sect_7.4.html#sect_7.4.3
"""
for field_name in Patient._NAME_PARTS:
field = self.patient._meta.get_field(field_name)
self.assertTrue(field.blank)
self.assertTrue(field.null)
def test_name_max_length(self):
"""
Tests that the name fields has the expected max_length (see the Person
Name (PN) `value-representation specification`_).
.. _value-representation specification: http://dicom.nema.org/medical/dicom/current/output/chtml/part05/sect_6.2.html
"""
for field_name in Patient._NAME_PARTS:
field = self.patient._meta.get_field(field_name)
self.assertEqual(field.max_length, 64)
###########
# Methods #
###########
def test_string(self):
"""
Tests that an :meth:`~django_dicom.models.patient.Patient.__str__` method returns
its UID.
For more information see `Django's str method documentation`_.
.. _Django's str method documentation: https://docs.djangoproject.com/en/2.2/ref/models/instances/#str
"""
self.assertEqual(str(self.patient), self.patient.uid)
def test_get_absolute_url(self):
"""
Tests the :meth:`~django_dicom.models.patient.Patient.get_absolute_url` method
returns the expeted url.
`More information`_
.. _More information: https://docs.djangoproject.com/en/2.2/ref/models/instances/#get-absolute-url
"""
url = self.patient.get_absolute_url()
expected = f"/dicom/patient/{self.patient.id}/"
self.assertEqual(url, expected)
def test_get_full_name(self):
"""
Tests that the :meth:`~django_dicom.models.patient.Patient.get_full_name`
method returns "{given_name} {family_name}".
"""
result = self.patient.get_full_name()
expected = "Zvi Baratz"
self.assertEqual(result, expected)
def test_update_patient_name(self):
"""
Tests patient name update according to the DICOM header `Patient's
Name (PN)`_ data element fields.
.. _Patient's Name (PN):
http://dicom.nema.org/medical/dicom/current/output/chtml/part05/sect_6.2.html
"""
self.patient.given_name = None
self.patient.family_name = None
self.patient.update_patient_name(self.image.header)
self.assertEqual(self.patient.family_name, "Baratz")
self.assertEqual(self.patient.given_name, "Zvi")
self.assertEqual(self.patient.middle_name, "")
self.assertEqual(self.patient.name_prefix, "")
self.assertEqual(self.patient.name_suffix, "")
def test_update_fields_from_header(self):
"""
Tests that
:meth:`~django_dicom.models.dicom_entity.DicomEntity.update_fields_from_header`
method returns the expected values. This test relies on the created
instance's fields containing the expected values beforehand.
"""
header_fields = self.patient.get_header_fields()
expected_values = {
field.name: getattr(self.patient, field.name)
for field in header_fields
}
result = self.patient.update_fields_from_header(self.image.header)
self.assertIsNone(result)
values = {
field.name: getattr(self.patient, field.name)
for field in header_fields
}
for key, value in values.items():
try:
self.assertEqual(value, expected_values[key])
except AssertionError:
if expected_values[key] is None:
self.assertEqual(value, "")
else:
self.fail(
f"expected {expected_values[key]} but got {value}"
)
def test_get_admin_link(self):
"""
Tests that the
:meth:`~django_dicom.models.dicom_entity.DicomEntity.get_admin_link`
method returns the expected value.
"""
namespace = "/admin/django_dicom/patient"
url = f"{namespace}/{self.patient.id}/change/"
expected = f'<a href="{url}">{self.patient.id}</a>'
result = self.patient.get_admin_link()
self.assertEqual(result, expected)
##############
# Properties #
##############
def test_admin_link(self):
"""
Tests that the
:attr:`~django_dicom.models.dicom_entity.DicomEntity.admin_link`
property returns the expected value.
"""
namespace = "/admin/django_dicom/patient"
url = f"{namespace}/{self.patient.id}/change/"
expected = f'<a href="{url}">{self.patient.id}</a>'
result = self.patient.admin_link
self.assertEqual(result, expected)
| 33.2 | 125 | 0.623987 |
1268dc6d7f7a9cd64c547da26253ff1e7b2736f6 | 7,165 | py | Python | test/integration/test_good_templates.py | thbishop-intuit/cfn-python-lint | bee2082654e5e7816911f9208d7099dbaddd7a6b | [
"MIT-0"
] | 1 | 2020-11-18T07:06:47.000Z | 2020-11-18T07:06:47.000Z | test/integration/test_good_templates.py | thbishop-intuit/cfn-python-lint | bee2082654e5e7816911f9208d7099dbaddd7a6b | [
"MIT-0"
] | null | null | null | test/integration/test_good_templates.py | thbishop-intuit/cfn-python-lint | bee2082654e5e7816911f9208d7099dbaddd7a6b | [
"MIT-0"
] | null | null | null | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from test.integration import BaseCliTestCase
import cfnlint.core
class TestQuickStartTemplates(BaseCliTestCase):
"""Test QuickStart Templates Parsing """
scenarios = [
{
'filename': 'test/fixtures/templates/good/generic.yaml',
'results': [],
'exit_code': 0,
},
{
'filename': 'test/fixtures/templates/good/minimal.yaml',
'results': [],
'exit_code': 0,
},
{
'filename': 'test/fixtures/templates/good/transform.yaml',
'results': [],
'exit_code': 0,
},
{
'filename': 'test/fixtures/templates/bad/transform_serverless_template.yaml',
'results': [
{
'Filename': 'test/fixtures/templates/bad/transform_serverless_template.yaml',
'Location': {
'Start': {
'ColumnNumber': 1,
'LineNumber': 1
},
'End': {
'ColumnNumber': 1,
'LineNumber': 1
},
'Path': None
},
'Rule': {
'Id': 'E0001',
'Description': 'Errors found when performing transformation on the template',
'Source': 'https://github.com/aws-cloudformation/cfn-python-lint',
'ShortDescription': 'Error found when transforming the template'
},
'Level': 'Error',
'Message': 'Error transforming template: Resource with id [AppName] is invalid. Resource is missing the required [Location] property.'
},
{
'Filename': 'test/fixtures/templates/bad/transform_serverless_template.yaml',
'Location': {
'Start': {
'ColumnNumber': 1,
'LineNumber': 1
},
'End': {
'ColumnNumber': 1,
'LineNumber': 1
},
'Path': None
},
'Rule': {
'Id': 'E0001',
'Description': 'Errors found when performing transformation on the template',
'Source': 'https://github.com/aws-cloudformation/cfn-python-lint',
'ShortDescription': 'Error found when transforming the template'
},
'Level': 'Error',
'Message': "Error transforming template: Resource with id [ExampleLayer] is invalid. Missing required property 'ContentUri'."
},
{
'Filename': 'test/fixtures/templates/bad/transform_serverless_template.yaml',
'Location': {
'Start': {
'ColumnNumber': 1,
'LineNumber': 1
},
'End': {
'ColumnNumber': 1,
'LineNumber': 1
},
'Path': None
},
'Rule': {
'Id': 'E0001',
'Description': 'Errors found when performing transformation on the template',
'Source': 'https://github.com/aws-cloudformation/cfn-python-lint',
'ShortDescription': 'Error found when transforming the template'
},
'Level': 'Error',
'Message': "Error transforming template: Resource with id [myFunctionMyTimer] is invalid. Missing required property 'Schedule'."
}
],
'exit_code': 2,
},
{
'filename': 'test/fixtures/templates/good/conditions.yaml',
'results': [],
'exit_code': 0,
},
{
'filename': 'test/fixtures/templates/good/resources_codepipeline.yaml',
'results': [],
'exit_code': 0,
},
{
'filename': 'test/fixtures/templates/good/transform_serverless_api.yaml',
'results': [],
'exit_code': 0,
},
{
'filename': 'test/fixtures/templates/good/transform_serverless_function.yaml',
'results': [],
'exit_code': 0,
},
{
'filename': 'test/fixtures/templates/good/transform_serverless_globals.yaml',
'results': [
{
"Filename": "test/fixtures/templates/good/transform_serverless_globals.yaml",
"Level": "Error",
"Location": {
"End": {
"ColumnNumber": 13,
"LineNumber": 10
},
"Path": [
"Resources",
"myFunction",
"Properties",
"Runtime"
],
"Start": {
"ColumnNumber": 3,
"LineNumber": 10
}
},
"Message": "Deprecated runtime (nodejs6.10) specified. Updating disabled since 2019-08-12. Please consider updating to nodejs10.x",
"Rule": {
"Description": "Check if an EOL Lambda Runtime is specified and give an error if used. ",
"Id": "E2531",
"ShortDescription": "Check if EOL Lambda Function Runtimes are used",
"Source": "https://docs.aws.amazon.com/lambda/latest/dg/runtime-support-policy.html"
}
}
],
'exit_code': 2,
},
{
'filename': 'test/fixtures/templates/good/transform/list_transform.yaml',
'results': [],
'exit_code': 0,
},
{
'filename': 'test/fixtures/templates/good/transform/list_transform_many.yaml',
'results': [],
'exit_code': 0,
},
{
'filename': 'test/fixtures/templates/good/transform/list_transform_not_sam.yaml',
'results': [],
'exit_code': 0,
}
]
def test_templates(self):
"""Test Successful JSON Parsing"""
self.run_scenarios()
def test_module_integration(self):
""" Test same templates using integration approach"""
rules = cfnlint.core.get_rules(
[], [], ['E', 'W'], {}, False)
self.run_module_integration_scenarios(rules)
| 40.027933 | 154 | 0.430286 |
8a5b2d7a7e256eeb6aa5ac52e1734950519124f2 | 2,869 | py | Python | dhalsim/network_events/network_delay.py | afmurillo/WadiTwin | 80e2e260a99c02f93aa0a45c9037eef07a70a2f2 | [
"MIT"
] | null | null | null | dhalsim/network_events/network_delay.py | afmurillo/WadiTwin | 80e2e260a99c02f93aa0a45c9037eef07a70a2f2 | [
"MIT"
] | null | null | null | dhalsim/network_events/network_delay.py | afmurillo/WadiTwin | 80e2e260a99c02f93aa0a45c9037eef07a70a2f2 | [
"MIT"
] | null | null | null | import os
import subprocess
import sys
from dhalsim.network_events.synced_event import SyncedEvent
import argparse
from pathlib import Path
class NetworkDelay(SyncedEvent):
"""
This is a delay network event. This event will use Linux-tc at a switch link that causes the indicated
delay in ms in all communications using that network link.
:param intermediate_yaml_path: The path to the intermediate YAML file
:param yaml_index: The index of the event in the intermediate YAML
:param interface_name: The name of the interface that has the event
"""
def __init__(self, intermediate_yaml_path: Path, yaml_index: int, interface_name: str):
super().__init__(intermediate_yaml_path, yaml_index)
self.interface_name = interface_name
self.delay_value = float(self.intermediate_event['value'])
def setup(self):
self.logger.debug("Starting network delay queue at interface " + str(self.interface_name)
+ " with value " + str(self.delay_value))
cmd = 'tc qdisc del dev ' + str(self.interface_name) + ' root'
os.system(cmd)
cmd = 'tc qdisc add dev ' + str(self.interface_name) + ' root netem delay ' + str(self.delay_value) + 'ms'
self.logger.debug('trying command: ' + str(cmd))
os.system(cmd)
def teardown(self):
cmd = 'tc qdisc del dev ' + str(self.interface_name) + ' root '
os.system(cmd)
self.logger.info("Tear down network event")
def interrupt(self):
"""
This function will be called when we want to stop the event. It calls the teardown
function if the event is in state 1 (running)
"""
if self.state == 1:
self.teardown()
def event_step(self):
"""This function just passes, as there is no required action in an event step."""
pass
def is_valid_file(parser_instance, arg):
"""Verifies whether the intermediate yaml path is valid."""
if not os.path.exists(arg):
parser_instance.error(arg + " does not exist")
else:
return arg
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Start everything for an event')
parser.add_argument(dest="intermediate_yaml",
help="intermediate yaml file", metavar="FILE",
type=lambda x: is_valid_file(parser, x))
parser.add_argument(dest="index", help="Index of the network event in intermediate yaml",
type=int,
metavar="N")
parser.add_argument(dest="interface_name", help="Interface name of the network event")
args = parser.parse_args()
event = NetworkDelay(intermediate_yaml_path=Path(args.intermediate_yaml), yaml_index=args.index,
interface_name=args.interface_name)
event.main_loop()
| 36.782051 | 114 | 0.656675 |
51febf22b0facd552d35748a11b39de2fc7adf8c | 265 | py | Python | poezio/ui/consts.py | hrnciar/poezio | 12b8af11df35dda535412b0c02ba792890095a7d | [
"Zlib"
] | 50 | 2015-02-11T12:00:25.000Z | 2022-01-18T05:26:40.000Z | poezio/ui/consts.py | hrnciar/poezio | 12b8af11df35dda535412b0c02ba792890095a7d | [
"Zlib"
] | 3 | 2017-11-27T20:55:42.000Z | 2020-03-20T18:05:53.000Z | poezio/ui/consts.py | hrnciar/poezio | 12b8af11df35dda535412b0c02ba792890095a7d | [
"Zlib"
] | 15 | 2015-04-22T14:33:36.000Z | 2021-09-29T21:33:50.000Z | from datetime import datetime
FORMAT_CHAR = '\x19'
# These are non-printable chars, so they should never appear in the input,
# I guess. But maybe we can find better chars that are even less risky.
FORMAT_CHARS = '\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x1A'
| 37.857143 | 74 | 0.754717 |
83d02ac35274083af6c9d0596bc9c7821b62536e | 3,592 | py | Python | 3_3_2_speaker_file.py | hajin-kim/Python-EV3 | 4d590772ee82697f43dea5a878275a917f13acc5 | [
"MIT"
] | null | null | null | 3_3_2_speaker_file.py | hajin-kim/Python-EV3 | 4d590772ee82697f43dea5a878275a917f13acc5 | [
"MIT"
] | null | null | null | 3_3_2_speaker_file.py | hajin-kim/Python-EV3 | 4d590772ee82697f43dea5a878275a917f13acc5 | [
"MIT"
] | null | null | null | #!/usr/bin/env pybricks-micropython
from pybricks.hubs import EV3Brick
from pybricks.ev3devices import (Motor, TouchSensor, ColorSensor,
InfraredSensor, UltrasonicSensor, GyroSensor)
from pybricks.parameters import Port, Stop, Direction, Button, Color
from pybricks.tools import wait, StopWatch, DataLog
from pybricks.robotics import DriveBase
from pybricks.media.ev3dev import SoundFile, ImageFile
# This program requires LEGO EV3 MicroPython v2.0 or higher.
# Click "Open user guide" on the EV3 extension tab for more information.
# Create your objects here.
ev3 = EV3Brick()
# Write your program here.
ev3.speaker.beep()
# classmethod speaker.play_file(file_name)
# 사운드 파일을 재생합니다.
# Parameters
# • file_name(str) – 사운드 파일의 경로(확장 요소 포함).
# you can choose one of below.
# • volume(percentage: %) – 사운드 음량 (Default: 100).
# SoundFile.BLACK
# SoundFile.BLUE
# SoundFile.BROWN
# SoundFile.DOWN
# SoundFile.GREEN
# SoundFile.LEFT
# SoundFile.RED
# SoundFile.RIGHT
# SoundFile.UP
# SoundFile.WHITE
# SoundFile.YELLOW
# SoundFile.EV3
# SoundFile._BASE_PATH
# SoundFile.CAT_PURR
# SoundFile.DOG_BARK_1
# SoundFile.DOG_BARK_2
# SoundFile.DOG_GROWL
# SoundFile.DOG_SNIFF
# SoundFile.DOG_WHINE
# SoundFile.ELEPHANT_CALL
# SoundFile.INSECT_BUZZ_1
# SoundFile.INSECT_BUZZ_2
# SoundFile.INSECT_CHIRP
# SoundFile.SNAKE_HISS
# SoundFile.SNAKE_RATTLE
# SoundFile.T_REX_ROAR
# SoundFile.BRAVO
# SoundFile.FANTASTIC
# SoundFile.GAME_OVER
# SoundFile.GO
# SoundFile.GOOD
# SoundFile.GOOD_JOB
# SoundFile.GOODBYE
# SoundFile.HELLO
# SoundFile.HI
# SoundFile.LEGO
# SoundFile.MINDSTORMS
# SoundFile.MORNING
# SoundFile.NO
# SoundFile.OKAY
# SoundFile.OKEY_DOKEY
# SoundFile.SORRY
# SoundFile.THANK_YOU
# SoundFile.YES
# SoundFile.BOING
# SoundFile.BOO
# SoundFile.CHEERING
# SoundFile.CRUNCHING
# SoundFile.CRYING
# SoundFile.FANFARE
# SoundFile.KUNG_FU
# SoundFile.LAUGHING_1
# SoundFile.LAUGHING_2
# SoundFile.MAGIC_WAND
# SoundFile.OUCH
# SoundFile.SHOUTING
# SoundFile.SMACK
# SoundFile.SNEEZING
# SoundFile.SNORING
# SoundFile.UH_OH
# SoundFile.ACTIVATE
# SoundFile.ANALYZE
# SoundFile.BACKWARDS
# SoundFile.COLOR
# SoundFile.DETECTED
# SoundFile.ERROR
# SoundFile.ERROR_ALARM
# SoundFile.FLASHING
# SoundFile.FORWARD
# SoundFile.OBJECT
# SoundFile.SEARCHING
# SoundFile.START
# SoundFile.STOP
# SoundFile.TOUCH
# SoundFile.TURN
# SoundFile.AIR_RELEASE
# SoundFile.AIRBRAKE
# SoundFile.BACKING_ALERT
# SoundFile.HORN_1
# SoundFile.HORN_2
# SoundFile.LASER
# SoundFile.MOTOR_IDLE
# SoundFile.MOTOR_START
# SoundFile.MOTOR_STOP
# SoundFile.RATCHET
# SoundFile.SONAR
# SoundFile.TICK_TACK
# SoundFile.SPEED_DOWN
# SoundFile.SPEED_IDLE
# SoundFile.SPEED_UP
# SoundFile.ZERO
# SoundFile.ONE
# SoundFile.TWO
# SoundFile.THREE
# SoundFile.FOUR
# SoundFile.FIVE
# SoundFile.SIX
# SoundFile.SEVEN
# SoundFile.EIGHT
# SoundFile.NINE
# SoundFile.TEN
# SoundFile.CLICK
# SoundFile.CONFIRM
# SoundFile.GENERAL_ALERT
# SoundFile.OVERPOWER
# SoundFile.READY
# Play preloaded file
ev3.speaker.set_volume(25)
ev3.speaker.play_file(SoundFile.READY)
# Play user custom file
# ev3.speaker.set_volume(25)
ev3.speaker.play_file("Half_of_Time.wav") # [a-z_].wav
| 25.295775 | 78 | 0.688753 |
4787bc58fd71a71f0b53c7542713d43c5056a820 | 913 | py | Python | Label initialization.py | hianingan/Prediction-of-igneous-reservoir-parameters | 92b781762472eef25b5ec27cc9f9e94d864244f2 | [
"Apache-2.0"
] | 3 | 2020-05-27T13:08:33.000Z | 2022-03-01T06:59:06.000Z | Label initialization.py | hianingan/Prediction-of-igneous-reservoir-parameters | 92b781762472eef25b5ec27cc9f9e94d864244f2 | [
"Apache-2.0"
] | null | null | null | Label initialization.py | hianingan/Prediction-of-igneous-reservoir-parameters | 92b781762472eef25b5ec27cc9f9e94d864244f2 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import csv
xls_name =
root =
file_name =
def make_tensor(xls_name, sheet_name):
df = pd.read_excel(xls_name, sheet_name=sheet_name)
a = []
for i in range(539):
data = df.iloc[i, 2]
data = round(data, 3)
a.append(data)
b = np.array(a, dtype=float)
return b
def load_csv(root, filename, tensor):
if not os.path.exists(os.path.join(root, filename)):
with open(os.path.join(root, filename), mode='w', newline='') as f:
csv_write = csv.writer(f)
for i in range(539):
print(i)
csv_write.writerow([tensor[i]])
print(filename)
def main():
a = make_tensor(xls_name, '一')
print(a)
load_csv(root, file_name, a)
if __name__ == '__main__':
main()
| 20.75 | 76 | 0.564074 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.