content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
#!/usr/bin/env python
import numpy as np
import datetime as dt
import sys, os, pickle, time
from keras.models import Model, save_model, load_model
from keras.regularizers import l2
from keras.optimizers import SGD, Adam
import keras.backend as K
import tensorflow as tf
import pandas as pd
import innvestigate
import innvestigate.utils as iutils
from ml_functions import read_csv_files, normalize_multivariate_data, log, get_features
def brier_score_keras(obs, preds):
return K.mean((preds - obs) ** 2)
def brier_skill_score_keras(obs, preds):
climo = K.mean((obs - K.mean(obs)) ** 2)
bs = brier_score_keras(obs, preds)
ratio = (bs / climo)
return climo
def auc(obs, preds):
auc = tf.metrics.auc(obs, preds)[1]
K.get_session().run(tf.local_variables_initializer())
return auc
def log(msg):
print( time.ctime(time.time()), msg )
### NEURAL NETWORK PARAMETERS ###
nn_params = { 'num_layers': 1, 'num_neurons': [ 1024 ], 'dropout': 0.1, 'lr': 0.001, 'num_epochs': 30, \
'report_window_space':[ int(sys.argv[1]) ], 'report_window_time':[ int(sys.argv[2]) ] }
dataset = 'RT2020'
scaling_dataset = 'NSC3km-12sec'
scaling_file = '/glade/work/sobash/NSC_objects/scaling_values_all_%s.pk'%scaling_dataset
trained_models_dir = '/glade/work/sobash/NSC_objects/trained_models_paper'
sdate = dt.datetime(2020,5,1,0,0,0)
edate = dt.datetime(2020,5,10,0,0,0)
dateinc = dt.timedelta(days=1)
features = get_features('basic')
log('Reading Data')
# read data and reassign data types to float32 to save memory
type_dict = {}
for f in features: type_dict[f]='float32'
df, numfcsts = read_csv_files(sdate, edate, dataset)
print(numfcsts)
scaling_values = pickle.load(open(scaling_file, 'rb'))
norm_in_data, scaling_values = normalize_multivariate_data(df[features].values.astype(np.float32), features, scaling_values=scaling_values)
dense_model = None
model_fname = '%s/neural_network_2016_120km_2hr_nn%d_drop%.1f_basic.h5'%(trained_models_dir,nn_params['num_neurons'][0],nn_params['dropout'])
dense_model = load_model(model_fname, custom_objects={'brier_score_keras': brier_score_keras, 'brier_skill_score_keras':brier_skill_score_keras, 'auc':auc })
print(norm_in_data.shape)
analyzer = innvestigate.create_analyzer('lrp.alpha_2_beta_1', dense_model, neuron_selection_mode='index')
a = analyzer.analyze(norm_in_data, 0)
a /= np.max(np.abs(a))
a = a.reshape((36,1298,-1))
a = np.mean(a[24,:,:], axis=0)
print(a.shape)
for i,f in enumerate(features):
print(f, a[i])
log('Finished')
| neural_network_lrp.py | 2,544 | !/usr/bin/env python NEURAL NETWORK PARAMETERS read data and reassign data types to float32 to save memory | 107 | en | 0.495952 |
#!/usr/bin/env python3
import torch
import torch.optim as optim
import os, sys
import warnings
import numpy as np
current_path = os.path.dirname(os.path.realpath(__file__))
PROJECT_HOME = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir, os.pardir, os.pardir))
if PROJECT_HOME not in sys.path:
sys.path.append(PROJECT_HOME)
from common.fast_rl.common.utils import EarlyStopping
from common.environments import get_data
from codes.f_utils import common_utils
from common.environments import TimeUnit, TradeEnvironmentType, Action
from common.environments import UpbitEnvironment
from common.environments import EpsilonGreedyTradeDQNActionSelector, \
ArgmaxTradeActionSelector, RandomTradeDQNActionSelector
from common.fast_rl import rl_agent, value_based_model, actions, experience_single, replay_buffer
from common.fast_rl.common import utils
from common.fast_rl.common import statistics
from rl_main.trade_main import visualizer
from common.slack import PushSlack
pusher = PushSlack()
##### NOTE #####
from codes.a_config.parameters import PARAMETERS as params
##### NOTE #####
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
MODEL_SAVE_DIR = os.path.join(PROJECT_HOME, "out", "model_save_files")
if not os.path.exists(MODEL_SAVE_DIR):
os.makedirs(MODEL_SAVE_DIR)
def evaluate(env, agent, verbose=True):
experience_source = experience_single.ExperienceSourceSingleEnvFirstLast(env, agent, gamma=params.GAMMA, n_step=params.N_STEP)
done = False
state = env.reset()
agent_state = agent.initial_agent_state()
episode_reward = 0.0
num_buys = 0
info = None
step_idx = 0
while not done:
step_idx += 1
states_input = []
processed_state = experience_source.get_processed_state(state)
states_input.append(processed_state)
agent_states_input = []
agent_states_input.append(agent_state)
new_actions, new_agent_states = agent(states_input, agent_states_input)
agent_state = new_agent_states[0]
action = new_actions[0]
if action == Action.MARKET_BUY.value:
num_buys += 1
if num_buys > 10:
action_str = "BUY({0})".format(10)
else:
action_str = "BUY({0})".format(num_buys)
else:
action_str = env.get_action_meanings()[action]
msg = "[{0:2}|{1}] OHLCV: {2}, {3}, {4}, {5}, {6:<10.1f}, Action: {7:7} --> ".format(
step_idx,
env.data.iloc[env.transaction_state_idx]['datetime_krw'],
env.data.iloc[env.transaction_state_idx]['open'],
env.data.iloc[env.transaction_state_idx]['high'],
env.data.iloc[env.transaction_state_idx]['low'],
env.data.iloc[env.transaction_state_idx]['final'],
env.data.iloc[env.transaction_state_idx]['volume'],
action_str
)
next_state, reward, done, info = env.step(action)
if action in [Action.HOLD.value]:
msg += "Reward: {0:.3f}, hold coin: {1:.1f}".format(
reward, info["hold_coin"]
)
elif action == Action.MARKET_BUY.value:
if num_buys <= 10:
coin_krw_str = "{0:.1f}".format(info['coin_krw'])
commission_fee_str = "{0:.1f}".format(info['commission_fee'])
else:
coin_krw_str = "-"
commission_fee_str = "-"
msg += "Reward: {0:.3f}, slippage: {1:.1f}, coin_unit_price: {2:.1f}, " \
"coin_krw: {3}, commission: {4}, hold coin: {5:.1f}".format(
reward, info["slippage"], info["coin_unit_price"],
coin_krw_str, commission_fee_str, info["hold_coin"]
)
elif action == Action.MARKET_SELL.value:
msg += "Reward: {0:.3f}, slippage: {1:.1f}, coin_unit_price: {2:.1f}, " \
"coin_krw: {3:.1f}, commission: {4:.1f}, sold coin: {5:.1f}, profit: {6:.1f}".format(
reward, info["slippage"], info["coin_unit_price"],
info['coin_krw'], info['commission_fee'], info["sold_coin"], info["profit"]
)
else:
raise ValueError()
if verbose:
print(msg)
episode_reward += reward
state = next_state
if verbose:
print("SAMPLED TRANSACTION DONE! - START DATETIME: {0}, EPISODE REWARD: {1:>8.3f}, "
"PROFIT: {2:>10.1f}, STEPS: {3}".format(
env.transaction_start_datetime, episode_reward, info["profit"], step_idx
))
return info["profit"], step_idx
def train(coin_name, time_unit, train_env, evaluate_env):
common_utils.print_fast_rl_params(params)
params.BATCH_SIZE *= params.TRAIN_STEP_FREQ
net = value_based_model.DuelingDQNSmallCNN(
observation_shape=train_env.observation_space.shape,
n_actions=train_env.action_space.n
).to(device)
print(net)
print("ACTION MEANING: {0}".format(train_env.get_action_meanings()))
tgt_net = value_based_model.DuelingDQNSmallCNN(
observation_shape=train_env.observation_space.shape,
n_actions=train_env.action_space.n
).to(device)
action_selector = EpsilonGreedyTradeDQNActionSelector(epsilon=params.EPSILON_INIT, env=train_env)
agent = rl_agent.DQNAgent(dqn_model=net, action_selector=action_selector, device=device)
argmax_action_selector = ArgmaxTradeActionSelector(env=evaluate_env)
evaluate_agent = rl_agent.DQNAgent(dqn_model=net, action_selector=argmax_action_selector, device=device)
random_action_selector = RandomTradeDQNActionSelector(env=evaluate_env)
random_agent = rl_agent.DQNAgent(dqn_model=None, action_selector=random_action_selector, device=device)
experience_source = experience_single.ExperienceSourceSingleEnvFirstLast(
train_env, agent, gamma=params.GAMMA, n_step=params.N_STEP
)
buffer = replay_buffer.ExperienceReplayBuffer(experience_source, buffer_size=params.REPLAY_BUFFER_SIZE)
optimizer = optim.Adam(net.parameters(), lr=params.LEARNING_RATE)
step_idx = 0
last_loss = 0.0
evaluate_steps = []
evaluate_dqn_total_profits = []
evaluate_random_total_profits = []
early_stopping = EarlyStopping(
patience=params.STOP_PATIENCE_COUNT,
evaluation_min_threshold=params.TRAIN_STOP_EPISODE_REWARD,
verbose=True,
delta=0.0,
model_save_dir=MODEL_SAVE_DIR,
model_save_file_prefix=params.ENVIRONMENT_ID.value + "_" + coin_name + "_" + time_unit.value,
agent=agent
)
with utils.SpeedTracker(params=params, frame=False, early_stopping=None) as reward_tracker:
while step_idx < params.MAX_GLOBAL_STEP:
step_idx += params.TRAIN_STEP_FREQ
last_entry = buffer.populate(params.TRAIN_STEP_FREQ)
if epsilon_tracker:
epsilon_tracker.udpate(step_idx)
episode_rewards = experience_source.pop_episode_reward_lst()
solved = False
if episode_rewards:
for episode_reward in episode_rewards:
reward_tracker.set_episode_reward(
episode_reward, step_idx, action_selector.epsilon, last_info=last_entry.info,
last_loss=last_loss, model=net
)
if reward_tracker.done_episodes % params.TEST_PERIOD_EPISODE == 0:
print("#" * 200)
print("[TEST START]")
evaluate(evaluate_env, evaluate_agent)
evaluate_steps.append(step_idx)
dqn_total_profit, _ = evaluate_random(
"DQN", evaluate_env, evaluate_agent, num_episodes=100
)
evaluate_dqn_total_profits.append(dqn_total_profit)
random_total_profit, _ = evaluate_random(
"RANDOM", evaluate_env, random_agent, num_episodes=100
)
evaluate_random_total_profits.append(random_total_profit)
solved = early_stopping(dqn_total_profit, step_idx=step_idx)
visualizer.draw_performance(
evaluate_steps,
evaluate_dqn_total_profits,
evaluate_random_total_profits
)
print("[TEST END]")
print("#" * 200)
if solved:
break
if solved:
break
optimizer.zero_grad()
batch = buffer.sample(params.BATCH_SIZE)
loss_v = value_based_model.calc_loss_double_dqn(batch, net, tgt_net, gamma=params.GAMMA, device=device)
loss_v.backward()
optimizer.step()
draw_loss = min(1.0, loss_v.detach().item())
last_loss = loss_v.detach().item()
if step_idx % params.TARGET_NET_SYNC_STEP_PERIOD < params.TRAIN_STEP_FREQ:
tgt_net.sync(net)
return net
def evaluate_random(agent_type, env, agent, num_episodes, verbose=True):
num_positive = 0
num_negative = 0
total_profit = 0.0
total_steps = 0
for _ in range(num_episodes):
profit, step = evaluate(env, agent, verbose=False)
if profit > 0:
num_positive += 1
else:
num_negative += 1
total_profit += profit
total_steps += step
avg_num_steps_per_episode = total_steps / num_episodes
if verbose:
print("###[{0:6}] POSITIVE: {1}/{3}, NEGATIVE: {2}/{3}, TOTAL PROFIT: {4:.1f}, AVG. STEP FOR EPISODE: {5:.1f}".format(
agent_type, num_positive, num_negative, num_episodes, total_profit, avg_num_steps_per_episode
))
return total_profit, avg_num_steps_per_episode
def evaluate_sequential_all(agent_type, env, agent, data_size, verbose=True):
num_positive = 0
num_negative = 0
total_profit = 0.0
total_steps = 0
num_episodes = 0
env.transaction_state_idx = 0
while True:
num_episodes += 1
profit, step = evaluate(env, agent, verbose=False)
if profit > 0:
num_positive += 1
else:
num_negative += 1
total_profit += profit
total_steps += step
if env.transaction_state_idx >= data_size - 1:
break
avg_num_steps_per_episode = total_steps / num_episodes
if verbose:
print("###[{0:6}] POSITIVE: {1}/{3}, NEGATIVE: {2}/{3}, TOTAL PROFIT: {4:.1f}, AVG. STEP FOR EPISODE: {5:.1f}".format(
agent_type, num_positive, num_negative, num_episodes, total_profit, avg_num_steps_per_episode
))
return num_positive, num_negative, num_episodes, total_profit, avg_num_steps_per_episode
def main():
coin_name = "OMG"
time_unit = TimeUnit.ONE_HOUR
train_data_info, evaluate_data_info = get_data(coin_name=coin_name, time_unit=time_unit)
print(train_data_info["first_datetime_krw"], train_data_info["last_datetime_krw"])
print(evaluate_data_info["first_datetime_krw"], evaluate_data_info["last_datetime_krw"])
train_env = UpbitEnvironment(
coin_name=coin_name,
time_unit=time_unit,
data_info=train_data_info,
environment_type=TradeEnvironmentType.TRAIN
)
evaluate_random_env = UpbitEnvironment(
coin_name=coin_name,
time_unit=time_unit,
data_info=evaluate_data_info,
environment_type=TradeEnvironmentType.TEST_RANDOM,
)
net = train(coin_name, time_unit, train_env, evaluate_random_env)
print("#### TEST SEQUENTIALLY")
evaluate_sequential_env = UpbitEnvironment(
coin_name=coin_name,
time_unit=time_unit,
data_info=evaluate_data_info,
environment_type=TradeEnvironmentType.TEST_SEQUENTIAL,
)
argmax_action_selector = ArgmaxTradeActionSelector(env=evaluate_sequential_env)
evaluate_agent = rl_agent.DQNAgent(dqn_model=net, action_selector=argmax_action_selector, device=device)
sequential_dqn_num_positives = []
sequential_dqn_num_negatives = []
sequential_dqn_num_episodes = []
sequential_dqn_num_steps_per_episode = []
sequential_dqn_total_profits = []
for _ in range(10):
num_positive, num_negative, num_episodes, total_profit, avg_num_steps_per_episode = evaluate_sequential_all(
"DQN", evaluate_sequential_env, evaluate_agent, data_size=len(evaluate_data_info["data"]), verbose=False
)
sequential_dqn_num_positives.append(num_positive)
sequential_dqn_num_negatives.append(num_negative)
sequential_dqn_num_episodes.append(num_episodes)
sequential_dqn_total_profits.append(total_profit)
sequential_dqn_num_steps_per_episode.append(avg_num_steps_per_episode)
dqn_msg = f"SEQUENTIAL: DQN - {np.mean(sequential_dqn_num_episodes):.1f} EPISODES - " \
f"POSITIVE: {np.mean(sequential_dqn_num_positives):.1f}, " \
f"NEGATIVE: {np.mean(sequential_dqn_num_negatives):.1f}, " \
f"AVERAGE PROFIT {np.mean(sequential_dqn_total_profits):.1f}/STD {np.std(sequential_dqn_total_profits):.1f}, " \
f"AVERAGE STEP {np.mean(sequential_dqn_num_steps_per_episode):.1f}"
print(dqn_msg)
random_action_selector = RandomTradeDQNActionSelector(env=evaluate_sequential_env)
random_agent = rl_agent.DQNAgent(dqn_model=None, action_selector=random_action_selector, device=device)
sequential_random_num_positives = []
sequential_random_num_negatives = []
sequential_random_num_episodes = []
sequential_random_num_steps_per_episode = []
sequential_random_total_profits = []
for _ in range(10):
num_positive, num_negative, num_episodes, total_profit, avg_num_steps_per_episode = evaluate_sequential_all(
"RANDOM", evaluate_sequential_env, random_agent, data_size=len(evaluate_data_info["data"]), verbose=False
)
sequential_random_num_positives.append(num_positive)
sequential_random_num_negatives.append(num_negative)
sequential_random_num_episodes.append(num_episodes)
sequential_random_total_profits.append(total_profit)
sequential_random_num_steps_per_episode.append(avg_num_steps_per_episode)
random_msg = f"SEQUENTIAL: RANDOM - {np.mean(sequential_random_num_episodes):.1f} EPISODES - " \
f"POSITIVE: {np.mean(sequential_random_num_positives):.1f}, " \
f"NEGATIVE: {np.mean(sequential_random_num_negatives):.1f}, " \
f"AVERAGE PROFIT {np.mean(sequential_random_total_profits):.1f}/STD {np.std(sequential_random_total_profits):.1f}, " \
f"AVERAGE STEP {np.mean(sequential_random_num_steps_per_episode):.1f}"
print(random_msg)
pusher.send_message(
"me", dqn_msg
)
pusher.send_message(
"me", random_msg
)
if __name__ == "__main__":
main() | codes/f_main/trade_main/upbit_trade_main.py | 15,267 | !/usr/bin/env python3 NOTE NOTE | 32 | fr | 0.32425 |
import os
import warnings
warnings.filterwarnings("ignore")
shared_params = ('python CPT_STMeta_Simplify_Obj.py '
'--Dataset ChargeStation '
'--CT 6 '
'--PT 7 '
'--TT 4 '
'--GLL 1 '
'--LSTMUnits 64 '
'--GALUnits 64 '
'--GALHeads 2 '
'--DenseUnits 32 '
'--DataRange All '
'--TrainDays All '
'--TC 0.1 '
'--TD 1000 '
'--TI 500 '
'--Epoch 10000 '
'--Train False '
'--lr 2e-5 '
'--Normalize True '
'--patience 0.1 '
'--ESlength 200 '
'--BatchSize 128 '
'--Device 0 ')
if __name__ == "__main__":
# 可以先选择在 DiDi-Xian, DiDi-Chengdu, Metro-Shanghai, ChargeStation-Beijing 这几个数据集上进行测试,因为耗时比较短
# stability test
test_times = 10
for i in range(test_times):
os.system(shared_params + '--CT 6 --PT 7 --TT 4 --City Beijing --Group Beijing'
' --K 1 --L 1 --Graph Distance-Correlation --CodeVersion ST_Sim1_%s' % i) | Experiments/StabilityTest/Master_CS_0.py | 1,321 | 可以先选择在 DiDi-Xian, DiDi-Chengdu, Metro-Shanghai, ChargeStation-Beijing 这几个数据集上进行测试,因为耗时比较短 stability test | 104 | zh | 0.523839 |
# pylint: disable=redefined-outer-name
import asyncio
import time
import pytest
DEFAULT_MAX_LATENCY = 10 * 1000
@pytest.mark.asyncio
async def test_slow_server(host):
if not pytest.enable_microbatch:
pytest.skip()
A, B = 0.2, 1
data = '{"a": %s, "b": %s}' % (A, B)
time_start = time.time()
req_count = 10
tasks = tuple(
pytest.assert_request(
"POST",
f"http://{host}/echo_with_delay",
headers=(("Content-Type", "application/json"),),
data=data,
timeout=30,
assert_status=200,
assert_data=data.encode(),
)
for i in range(req_count)
)
await asyncio.gather(*tasks)
assert time.time() - time_start < 12
req_count = 100
tasks = tuple(
pytest.assert_request(
"POST",
f"http://{host}/echo_with_delay",
headers=(("Content-Type", "application/json"),),
data=data,
assert_status=lambda i: i in (200, 429),
)
for i in range(req_count)
)
await asyncio.gather(*tasks)
@pytest.mark.asyncio
async def test_fast_server(host):
if not pytest.enable_microbatch:
pytest.skip()
A, B = 0.0002, 0.01
data = '{"a": %s, "b": %s}' % (A, B)
time_start = time.time()
req_count = 500
tasks = tuple(
pytest.assert_request(
"POST",
f"http://{host}/echo_with_delay",
headers=(("Content-Type", "application/json"),),
data=data,
timeout=30,
assert_status=200,
assert_data=data.encode(),
)
for i in range(req_count)
)
await asyncio.gather(*tasks)
assert time.time() - time_start < 5
| tests/integration/api_server/test_microbatch.py | 1,764 | pylint: disable=redefined-outer-name | 36 | en | 0.375342 |
import os
import numpy as np
from scipy.stats import multivariate_normal
import inspect
from sklearn.metrics.pairwise import pairwise_distances
def sample(transition_matrix, means, covs, start_state, n_samples,
random_state):
n_states, n_features, _ = covs.shape
states = np.zeros(n_samples, dtype='int')
emissions = np.zeros((n_samples, n_features))
for i in range(n_samples):
if i == 0:
prev_state = start_state
else:
prev_state = states[i - 1]
state = random_state.choice(n_states,
p=transition_matrix[:, prev_state])
emissions[i] = random_state.multivariate_normal(
means[state], covs[state])
states[i] = state
return emissions, states
def make_data(T=20):
"""
Sample data from a HMM model and compute associated CRF potentials.
"""
random_state = np.random.RandomState(0)
d = 0.2
e = 0.1
transition_matrix = np.array([[1 - 2 * d, d, d], [1 - e, e, 0],
[1 - e, 0, e]])
means = np.array([[0, 0], [10, 0], [5, -5]])
covs = np.array([[[1, 0], [0, 1]], [[.2, 0], [0, .3]], [[2, 0], [0, 1]]])
start_state = 0
emissions, states = sample(transition_matrix,
means,
covs,
start_state,
n_samples=T,
random_state=random_state)
emission_log_likelihood = []
for mean, cov in zip(means, covs):
rv = multivariate_normal(mean, cov)
emission_log_likelihood.append(rv.logpdf(emissions)[:, np.newaxis])
emission_log_likelihood = np.concatenate(emission_log_likelihood, axis=1)
log_transition_matrix = np.log(transition_matrix)
# CRF potential from HMM model
theta = emission_log_likelihood[:, :, np.newaxis] \
+ log_transition_matrix[np.newaxis, :, :]
return states, emissions, theta
def make_alignment_data():
rng = np.random.RandomState(0)
m, n = 2, 2
X = rng.randn(m, 3)
Y = rng.randn(n, 3)
return pairwise_distances(X, Y) / 10
def get_data_path(fn, subfolder='data'):
"""Return path to filename ``fn`` in the data folder.
During testing it is often necessary to load data files. This
function returns the full path to files in the ``data`` subfolder
by default.
Parameters
----------
fn : str
File name.
subfolder : str, defaults to ``data``
Name of the subfolder that contains the data.
Returns
-------
str
Inferred absolute path to the test data for the module where
``get_data_path(fn)`` is called.
Notes
-----
The requested path may not point to an existing file, as its
existence is not checked.
This is from skbio's code base
https://github.com/biocore/scikit-bio/blob/master/skbio/util/_testing.py#L50
"""
# getouterframes returns a list of tuples: the second tuple
# contains info about the caller, and the second element is its
# filename
callers_filename = inspect.getouterframes(inspect.currentframe())[1][1]
path = os.path.dirname(os.path.abspath(callers_filename))
data_path = os.path.join(path, subfolder, fn)
return data_path
| deepblast/utils.py | 3,322 | Return path to filename ``fn`` in the data folder.
During testing it is often necessary to load data files. This
function returns the full path to files in the ``data`` subfolder
by default.
Parameters
----------
fn : str
File name.
subfolder : str, defaults to ``data``
Name of the subfolder that contains the data.
Returns
-------
str
Inferred absolute path to the test data for the module where
``get_data_path(fn)`` is called.
Notes
-----
The requested path may not point to an existing file, as its
existence is not checked.
This is from skbio's code base
https://github.com/biocore/scikit-bio/blob/master/skbio/util/_testing.py#L50
Sample data from a HMM model and compute associated CRF potentials.
CRF potential from HMM model getouterframes returns a list of tuples: the second tuple contains info about the caller, and the second element is its filename | 881 | en | 0.838895 |
from transformers import RobertaConfig
from modeling.hf_head.modeling_roberta_parsing import RobertaForGraphPrediction
from modeling.sequence_labeling import SequenceLabeling
if __name__ == '__main__':
config = RobertaConfig(graph_head_hidden_size_mlp_arc=100, graph_head_hidden_size_mlp_rel=100, dropout_classifier=0.1)
#config.graph_head_hidden_size_mlp_arc = 100
model = RobertaForGraphPrediction(config)
SequenceLabeling(
)
# 1. GIVE IT TO PYTORCH LIGHTNING
# 2. DEFINE DATA MODULE FOR PARSING --> INPUT + LOSS: TRY TO FIT
# 3. Prediction (recover full graph after bpes-
breakpoint()
| parser.py | 630 | config.graph_head_hidden_size_mlp_arc = 100 1. GIVE IT TO PYTORCH LIGHTNING 2. DEFINE DATA MODULE FOR PARSING --> INPUT + LOSS: TRY TO FIT 3. Prediction (recover full graph after bpes- | 184 | en | 0.66232 |
"""
Defines useful extended internal coordinate frames
"""
import numpy as np
import McUtils.Numputils as nput
from McUtils.Coordinerds import (
ZMatrixCoordinateSystem, CartesianCoordinateSystem, CoordinateSystemConverter,
ZMatrixCoordinates, CartesianCoordinates3D, CoordinateSet, CoordinateSystemConverters
)
from .MoleculeInterface import AbstractMolecule
__all__ = [
"MolecularZMatrixCoordinateSystem",
"MolecularCartesianCoordinateSystem"
]
__reload_hook__ = [".MoleculeInterface"]
def _get_best_axes(first_pos, axes):
"""
Determine the best pair of inertial axes so that we don't get large-scale breakdowns from the choice of embedding
:param first_pos:
:type first_pos:
:param axes:
:type axes:
:return:
:rtype:
"""
if axes.ndim > 2:
axes = axes[..., (0, 1), :]
ax_choice = (0, 1)
ax_names = ["A", "B"]
else:
fp_norm = np.linalg.norm(first_pos)
if fp_norm > 1.0e-10: # not chilling at the origin...
first_pos = first_pos / fp_norm
# check if it lies along an axis or is perpendicular to an axis
a_proj = np.dot(first_pos, axes[0])
b_proj = np.dot(first_pos, axes[1])
c_proj = np.dot(first_pos, axes[2])
if np.abs(b_proj) < .05: # lies in the A/C plane
if np.abs(a_proj) > .95:
ax_choice = (1, 2)
ax_names = ["B", "C"]
else:
ax_choice = (0, 1)
ax_names = ["A", "B"]
elif np.abs(c_proj) < .05: # lies in the A/B plane
if np.abs(a_proj) > .95:
ax_choice = (1, 2)
ax_names = ["B", "C"]
else:
ax_choice = (0, 2)
ax_names = ["A", "C"]
elif np.abs(a_proj) < .05: # lies in the B/C plane
if np.abs(b_proj) > .95:
ax_choice = (0, 2)
ax_names = ["A", "C"]
else:
ax_choice = (0, 1)
ax_names = ["A", "B"]
else: # not in any of the planes so no issues
ax_choice = (0, 1)
ax_names = ["A", "B"]
else:
ax_choice = (0, 1)
ax_names = ["A", "B"]
axes = axes[ax_choice,]
return axes, ax_names, ax_choice
class MolecularZMatrixCoordinateSystem(ZMatrixCoordinateSystem):
"""
Mirrors the standard ZMatrix coordinate system in _almost_ all regards, but forces an embedding
"""
name = "MolecularZMatrix"
def __init__(self, molecule, converter_options=None, **opts):
"""
:param molecule:
:type molecule: AbstractMolecule
:param converter_options:
:type converter_options:
:param opts:
:type opts:
"""
self.molecule = molecule
if converter_options is None:
converter_options = opts
opts = {}
nats = len(molecule.atoms)
super().__init__(converter_options=converter_options, dimension=(nats, 3), coordinate_shape=(nats, 3), opts=opts)
self.set_embedding()
@property
def origins(self):
return self.converter_options['origins']
@property
def axes(self):
return self.converter_options['axes']
def pre_convert(self, system):
self.set_embedding()
def set_embedding(self):
molecule = self.molecule
com = molecule.center_of_mass
axes = molecule.inertial_axes
converter_options = self.converter_options
if 'ordering' in converter_options:
ordering = np.array(converter_options['ordering'], dtype=int)
ordering[0, 1] = -3; ordering[0, 2] = -1; ordering[0, 3] = -2
ordering[1, 2] = -1; ordering[1, 3] = -2
ordering[2, 3] = -2
converter_options['ordering'] = ordering
first = ordering[0, 0]
else:
first = 0
first_pos = molecule.coords[first]
axes, ax_names, ax_choice = _get_best_axes(first_pos, axes)
converter_options['origins'] = com
converter_options['axes'] = axes
converter_options['axes_labels'] = ax_names
converter_options['axes_choice'] = ax_choice
converter_options['molecule'] = molecule
def jacobian(self,
*args,
reembed=None,
strip_dummies=None,
converter_options=None,
**kwargs
):
if converter_options is None:
converter_options = {}
merged_convert_options = dict(self.converter_options, **converter_options)
try:
remb = merged_convert_options['reembed'] if reembed is None else reembed
except KeyError:
remb = None
try:
strip_dummies = merged_convert_options['strip_dummies'] if strip_dummies is None else strip_dummies
except KeyError:
strip_dummies = False
if strip_dummies:
dummies = self.molecule.dummy_positions
else:
dummies = None
if dummies is not None:
main_excludes = np.setdiff1d(
np.arange(self.molecule.num_atoms),
dummies
)
try:
self.converter_options['reembed'] = True if remb is None else remb
jacs = super().jacobian(*args, converter_options=converter_options, **kwargs)
raw_jacs = []
for j in jacs:
ext_dim = j.ndim - 2
shp = sum(
((j.shape[i] // 3, 3) for i in range(ext_dim)),
()
) + j.shape[-2:]
j = j.reshape(shp)
if dummies is not None:
for i in range(ext_dim):
j = np.take(j, main_excludes, axis=2*i)
# j.shape[:i]
# + (j.shape[i] // 3, 3)
# + j.shape[i+1:]
# )
raw_jacs.append(j)
jacs = raw_jacs
return jacs
finally:
if remb is not None:
self.converter_options['reembed'] = remb
class MolecularCartesianCoordinateSystem(CartesianCoordinateSystem):
"""
Mirrors the standard Cartesian coordinate system in _almost_ all regards, but forces an embedding
"""
name= "MolecularCartesians"
def __init__(self, molecule, converter_options=None, **opts):
"""
:param molecule:
:type molecule: AbstractMolecule
:param converter_options:
:type converter_options:
:param opts:
:type opts:
"""
self.molecule = molecule #type: AbstractMolecule
nats = len(self.molecule.atoms)
if converter_options is None:
converter_options = opts
opts = {}
super().__init__(converter_options=converter_options, dimension=(nats, 3), opts=opts)
def pre_convert(self, system):
self.set_embedding()
def set_embedding(self):
"""
Sets up the embedding options...
:return:
:rtype:
"""
molecule = self.molecule
com = molecule.center_of_mass
axes = molecule.inertial_axes
converter_options = self.converter_options
if 'ordering' in converter_options:
ordering = np.array(converter_options['ordering'], dtype=int)
ordering[0, 1] = -3; ordering[0, 2] = -2; ordering[0, 3] = -1
ordering[1, 2] = -1; ordering[1, 3] = -2
ordering[2, 3] = -2
converter_options['ordering'] = ordering
first = ordering[0, 0]
else:
first = 0
first_pos = molecule.coords[first]
axes, ax_names, ax_choice = _get_best_axes(first_pos, axes)
converter_options['origins'] = com
converter_options['axes'] = axes
converter_options['axes_labels'] = ax_names
converter_options['axes_choice'] = ax_choice
converter_options['molecule'] = molecule
def jacobian(self,
coords,
system,
strip_dummies=None,
converter_options=None,
analytic_deriv_order=None,
**kwargs
):
if converter_options is None:
converter_options = {}
merged_convert_options = dict(self.converter_options, **converter_options)
try:
strip_dummies = merged_convert_options['strip_dummies'] if strip_dummies is None else strip_dummies
except KeyError:
strip_dummies = False
try:
analytic_deriv_order = merged_convert_options['analytic_deriv_order'] if analytic_deriv_order is None else analytic_deriv_order
except KeyError:
analytic_deriv_order = 0
if strip_dummies:
dummies = self.molecule.dummy_positions
if len(dummies) == 0:
dummies = None
else:
dummies = None
if dummies is not None:
main_excludes = np.setdiff1d(
np.arange(self.molecule.num_atoms),
dummies
)
else:
main_excludes = None
jacs = super().jacobian(coords, system, analytic_deriv_order=analytic_deriv_order, converter_options=converter_options, **kwargs)
raw_jacs = []
for n,j in enumerate(jacs): # this expects a full filling of the jacobians which maybe I need to not expect...
baseline = 2*analytic_deriv_order + len(coords.shape)
ext_dim = j.ndim - baseline
shp = sum(
((j.shape[i] // 3, 3) for i in range(ext_dim)),
()
) + j.shape[-baseline:]
j = j.reshape(shp)
if dummies is not None:
for i in range(ext_dim):
j = np.take(j, main_excludes, axis=2*i)
for i in range(analytic_deriv_order):
j = np.take(j, main_excludes, axis=-2*(i+2))
if len(coords.shape) > 2:
j = np.moveaxis(j, -3, 0)
raw_jacs.append(j)
jacs = raw_jacs
return jacs
class MolecularCartesianToZMatrixConverter(CoordinateSystemConverter):
"""
...
"""
types = (MolecularCartesianCoordinateSystem, MolecularZMatrixCoordinateSystem)
def convert(self, coords, molecule=None, origins=None, axes=None, ordering=None, **kwargs):
"""
Converts from Cartesian to ZMatrix coords, preserving the embedding
:param coords:
:type coords: CoordinateSet
:param molecule:
:type molecule:
:param origins:
:type origins:
:param axes:
:type axes:
:param ordering:
:type ordering:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
zmcs, opts = self.convert_many(np.array([coords]),
molecule=molecule, origins=origins, axes=axes, ordering=ordering, **kwargs)
zmcs = zmcs[0]
if 'derivs' in opts:
derivs = opts['derivs']
reshaped_derivs = [None] * len(derivs)
for i, v in enumerate(derivs):
reshaped_derivs[i] = v[0]
opts['derivs'] = reshaped_derivs
return zmcs, opts
def convert_many(self, coords,
molecule=None,
origins=None, axes=None,
ordering=None,
strip_embedding=True,
strip_dummies=False,
**kwargs):
"""
Converts from Cartesian to ZMatrix coords, preserving the embedding
:param coords: coordinates in Cartesians to convert
:type coords: np.ndarray
:param molecule:
:type molecule: AbstractMolecule
:param origins: the origin for each individual structure
:type origins: np.ndarray
:param axes: the axes for each structure
:type axes: np.ndarray
:param ordering: the Z-matrix ordering spec
:type ordering:
:param strip_embedding: whether to strip the embedding coordinates
:type strip_embedding:
:param strip_dummies: whether to strip all dummy coordinates
:type strip_dummies:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
n_sys = coords.shape[0]
n_coords = coords.shape[1]
n_atoms = len(molecule.atoms)
# we add three dummy atoms at the origins and along the axes before doing the conversion
if origins.ndim == 1:
origins = np.broadcast_to(origins[np.newaxis, np.newaxis], (n_sys, 1, 3))
elif origins.ndim == 2:
origins = origins[:, np.newaxis, :]
if axes.ndim == 2:
axes = np.broadcast_to(axes[np.newaxis], (n_sys, 2, 3))
if origins.shape[0] != n_sys:
if n_sys % origins.shape[0] != 0:
raise ValueError("inconsistent shapes; origins shape {} but coords shape {}".format(
origins.shape,
coords.shape
))
num_coords = n_sys // origins.shape[0]
origins = np.broadcast_to(origins[:, np.newaxis, :, :], (origins.shape[0], num_coords) + origins.shape[1:])
origins = origins.reshape((n_sys,) + origins.shape[2:])
if axes.shape[0] != n_sys:
if n_sys % axes.shape[0] != 0:
raise ValueError("inconsistent shapes; axes shape {} but coords shape {}".format(
axes.shape,
coords.shape
))
num_coords = n_sys // axes.shape[0]
axes = np.broadcast_to(axes[:, np.newaxis, :, :], (axes.shape[0], num_coords) + axes.shape[1:])
axes = axes.reshape((n_sys,) + axes.shape[2:])
coords = np.concatenate([origins, origins+axes, coords], axis=1)
if ordering is not None:
ordering = np.array(ordering, dtype=int)
ordering[0, 1] = -3; ordering[0, 2] = -2; ordering[0, 3] = -1
ordering[1, 2] = -2; ordering[1, 3] = -1
ordering[2, 3] = -1
ordering = ordering + 3
ordering = np.concatenate([ [[0, -1, -1, -1], [1, 0, -1, -1], [2, 0, 1, -1]], ordering])
# print("...?", ordering)
res = CoordinateSet(coords, CartesianCoordinates3D).convert(ZMatrixCoordinates,
ordering=ordering,
origins=origins,
axes=axes,
**kwargs
)
if isinstance(res, tuple):
zmcs, opts = res
else:
zmcs = res
opts=res.converter_options
opts['ordering'] = opts['ordering'][3:] - 3
# zmcs = zmcs[:, 2:]
if strip_dummies:
dummies = [0, 1, 2] + [x+3 for x in molecule.dummy_positions] # add on axes
elif strip_embedding:
dummies = [0, 1, 2]
else:
dummies = None
if dummies is not None:
main_excludes = np.setdiff1d(
np.arange(len(molecule.atoms) + 3),
dummies
)
sub_excludes = main_excludes - 1 # drop one fewer terms to drop I think...
if 'derivs' in opts:
derivs = opts['derivs']
reshaped_derivs = [None] * len(derivs)
deriv_excludes = np.arange(3, len(molecule.atoms) + 3)
for i, v in enumerate(derivs):
# drop all terms relating to the embedding of the embedding
start_dim = v.ndim - 2*(i+2)
for j in range(start_dim, v.ndim-2, 2):
v = np.take(v, deriv_excludes, axis=j)
v = np.take(v, sub_excludes, axis=-2)
reshaped_derivs[i] = v
opts['derivs'] = reshaped_derivs
zmcs = zmcs[..., sub_excludes, :]
# raise Exception(derivs.shape)
return zmcs, opts
MolecularCartesianToZMatrixConverter = MolecularCartesianToZMatrixConverter()
MolecularCartesianToZMatrixConverter.register(CoordinateSystemConverters)
class MolecularCartesianToRegularCartesianConverter(CoordinateSystemConverter):
"""
...
"""
types = (MolecularCartesianCoordinateSystem, CartesianCoordinateSystem)
def convert(self, coords, **kw):
return coords, kw
def convert_many(self, coords, **kwargs):
"""
Converts from Cartesian to ZMatrix coords, preserving the embedding
"""
return coords, kwargs
MolecularCartesianToRegularCartesianConverter = MolecularCartesianToRegularCartesianConverter()
MolecularCartesianToRegularCartesianConverter.register()
class MolecularZMatrixToCartesianConverter(CoordinateSystemConverter):
"""
...
"""
types = (MolecularZMatrixCoordinateSystem, MolecularCartesianCoordinateSystem)
def convert(self, coords, **kw):
total_points, opts = self.convert_many(coords[np.newaxis], **kw)
return total_points[0], opts
def convert_many(self, coords, molecule=None, origins=None, axes=None, ordering=None,
reembed=False, axes_choice=None, return_derivs=None,
strip_dummies=False,
strip_embedding=True,
planar_ref_tolerance=None,
**kwargs):
"""
Converts from Cartesian to ZMatrix coords, attempting to preserve the embedding
"""
from .Molecule import Molecule
n_sys = coords.shape[0]
n_coords = coords.shape[1]
n_atoms = len(molecule.atoms)
if n_coords != n_atoms + 2:
# means we already added the embedding
if n_coords != n_atoms:
raise ValueError('Embedding unclear when num_coords ({}) < num_atoms ({})'.format(
n_coords,
n_atoms
))
x_ax = axes[..., 0, :]
y_ax = axes[..., 1, :]
extra_norms0 = nput.vec_norms(x_ax)
extra_norms1 = nput.vec_norms(y_ax)
extra_angles, _ = nput.vec_angles(x_ax, y_ax)
extra_coords = np.zeros((n_sys, 2, 3))
extra_coords[..., 0, 0] = extra_norms0
extra_coords[..., 1, 0] = extra_norms1
extra_coords[..., 1, 1] = extra_angles
coords = np.concatenate([extra_coords, coords], axis=-2)
if ordering is not None:
ordering = np.array(ordering, dtype=int)
ordering = ordering + 3
ordering = np.concatenate([ [[0, -1, -1, -1], [1, 0, -1, -1], [2, 0, 1, -1]], ordering])
refuse_derivs = reembed and coords.squeeze().ndim != 2
res = CoordinateSet(coords, ZMatrixCoordinates).convert(CartesianCoordinates3D,
ordering=ordering,
origins=origins,
axes=axes,
return_derivs=(return_derivs and not refuse_derivs),
**kwargs)
if isinstance(res, tuple):
carts, opts = res
else:
carts = res
opts = res.converter_options
if reembed:
if molecule is None:
raise ValueError("can't reembed without a reference structure")
embed_carts = carts[..., 3:, :]
reembed = not (
carts.squeeze().ndim == 2 and
np.allclose(molecule.coords, embed_carts, atol=1.0e-5)
) # agree to like a ten thousandth of an angstrom
if reembed:
if not return_derivs:
embed_carts = molecule.embed_coords(embed_carts, planar_ref_tolerance=planar_ref_tolerance)
carts = np.concatenate([
carts[..., :3, :],
embed_carts
],
axis=-2
)
else:
inert_coords, coord_coms, coord_axes = Molecule(molecule.atoms, embed_carts).principle_axis_data
if axes_choice is None:
axes_choice = (0, 1)
guh = self.convert_many(coords,
origins=coord_coms,
axes=coord_axes[:, axes_choice],
molecule=molecule,
reembed=False,
ordering=ordering,
return_derivs=return_derivs,
axes_choice=axes_choice,
**kwargs
)
return guh
opts['origins'] = origins
opts['axes'] = axes
if ordering is not None:
opts['ordering'] = ordering[3:] - 3
if strip_dummies:
# raise Exception("wwwwaaaaaaaaat")
dummies = [0, 1, 2] + [x + 3 for x in molecule.dummy_positions] # add on axes
elif strip_embedding:
dummies = [0, 1, 2]
else:
dummies = None
if dummies is not None:
main_excludes = np.setdiff1d(
np.arange(len(molecule.atoms) + 3),
dummies
)
sub_excludes = main_excludes - 1 # drop one fewer terms to drop I think...
if 'derivs' in opts:
derivs = opts['derivs']
reshaped_derivs = [None] * len(derivs)
deriv_excludes = np.arange(3, len(molecule.atoms) + 3)
for i, v in enumerate(derivs):
# drop all terms relating to the embedding of the embedding
start_dim = v.ndim - i
for j in range(start_dim, v.ndim, 2):
v = np.take(v, deriv_excludes, axis=j)
v = np.take(v, sub_excludes, axis=-2)
reshaped_derivs[i] = v
opts['derivs'] = reshaped_derivs
carts = carts[..., main_excludes, :]
return carts, opts
MolecularZMatrixToCartesianConverter = MolecularZMatrixToCartesianConverter()
MolecularZMatrixToCartesianConverter.register()
class MolecularZMatrixToRegularZMatrixConverter(CoordinateSystemConverter):
"""
...
"""
types = (MolecularZMatrixCoordinateSystem, ZMatrixCoordinateSystem)
def convert(self, coords, **kw):
return coords, kw
def convert_many(self, coords, **kwargs):
return coords, kwargs
MolecularZMatrixToRegularZMatrixConverter = MolecularZMatrixToRegularZMatrixConverter()
MolecularZMatrixToRegularZMatrixConverter.register()
| Psience/Molecools/CoordinateSystems.py | 23,570 | Mirrors the standard Cartesian coordinate system in _almost_ all regards, but forces an embedding
...
...
Mirrors the standard ZMatrix coordinate system in _almost_ all regards, but forces an embedding
...
...
:param molecule:
:type molecule: AbstractMolecule
:param converter_options:
:type converter_options:
:param opts:
:type opts:
:param molecule:
:type molecule: AbstractMolecule
:param converter_options:
:type converter_options:
:param opts:
:type opts:
Determine the best pair of inertial axes so that we don't get large-scale breakdowns from the choice of embedding
:param first_pos:
:type first_pos:
:param axes:
:type axes:
:return:
:rtype:
Converts from Cartesian to ZMatrix coords, preserving the embedding
:param coords:
:type coords: CoordinateSet
:param molecule:
:type molecule:
:param origins:
:type origins:
:param axes:
:type axes:
:param ordering:
:type ordering:
:param kwargs:
:type kwargs:
:return:
:rtype:
Converts from Cartesian to ZMatrix coords, preserving the embedding
:param coords: coordinates in Cartesians to convert
:type coords: np.ndarray
:param molecule:
:type molecule: AbstractMolecule
:param origins: the origin for each individual structure
:type origins: np.ndarray
:param axes: the axes for each structure
:type axes: np.ndarray
:param ordering: the Z-matrix ordering spec
:type ordering:
:param strip_embedding: whether to strip the embedding coordinates
:type strip_embedding:
:param strip_dummies: whether to strip all dummy coordinates
:type strip_dummies:
:param kwargs:
:type kwargs:
:return:
:rtype:
Converts from Cartesian to ZMatrix coords, preserving the embedding
Converts from Cartesian to ZMatrix coords, attempting to preserve the embedding
Sets up the embedding options...
:return:
:rtype:
Defines useful extended internal coordinate frames
not chilling at the origin... check if it lies along an axis or is perpendicular to an axis lies in the A/C plane lies in the A/B plane lies in the B/C plane not in any of the planes so no issues j.shape[:i] + (j.shape[i] // 3, 3) + j.shape[i+1:] )type: AbstractMolecule this expects a full filling of the jacobians which maybe I need to not expect... we add three dummy atoms at the origins and along the axes before doing the conversion print("...?", ordering) zmcs = zmcs[:, 2:] add on axes drop one fewer terms to drop I think... drop all terms relating to the embedding of the embedding raise Exception(derivs.shape) means we already added the embedding agree to like a ten thousandth of an angstrom raise Exception("wwwwaaaaaaaaat") add on axes drop one fewer terms to drop I think... drop all terms relating to the embedding of the embedding | 2,665 | en | 0.6465 |
# mypy: allow-untyped-defs
import os.path
from unittest.mock import patch
from tools.manifest.manifest import Manifest
from tools.wpt import testfiles
def test_getrevish_kwarg():
assert testfiles.get_revish(revish="abcdef") == "abcdef"
assert testfiles.get_revish(revish="123456\n") == "123456"
def test_getrevish_implicit():
with patch("tools.wpt.testfiles.branch_point", return_value="base"):
assert testfiles.get_revish() == "base..HEAD"
def test_affected_testfiles():
manifest_json = {
"items": {
"crashtest": {
"a": {
"b": {
"c": {
"foo-crash.html": [
"acdefgh123456",
["null", {}],
]
}
}
}
}
},
"url_base": "/",
"version": 8,
}
manifest = Manifest.from_json("/", manifest_json)
with patch("tools.wpt.testfiles.load_manifest", return_value=manifest):
# Dependent affected tests are determined by walking the filesystem,
# which doesn't work in our test setup. We would need to refactor
# testfiles.affected_testfiles or have a more complex test setup to
# support testing those.
full_test_path = os.path.join(
testfiles.wpt_root, "a", "b", "c", "foo-crash.html")
tests_changed, _ = testfiles.affected_testfiles([full_test_path])
assert tests_changed == {full_test_path}
def test_exclude_ignored():
default_ignored = [
"resources/testharness.js",
"resources/testharnessreport.js",
"resources/testdriver.js",
"resources/testdriver-vendor.js",
]
default_ignored_abs = sorted(os.path.join(testfiles.wpt_root, x) for x in default_ignored)
default_changed = [
"foo/bar.html"
]
default_changed_abs = sorted(os.path.join(testfiles.wpt_root, x) for x in default_changed)
files = default_ignored + default_changed
changed, ignored = testfiles.exclude_ignored(files, None)
assert sorted(changed) == default_changed_abs
assert sorted(ignored) == default_ignored_abs
changed, ignored = testfiles.exclude_ignored(files, [])
assert sorted(changed) == sorted(default_changed_abs + default_ignored_abs)
assert sorted(ignored) == []
| tools/wpt/tests/test_testfiles.py | 2,424 | mypy: allow-untyped-defs Dependent affected tests are determined by walking the filesystem, which doesn't work in our test setup. We would need to refactor testfiles.affected_testfiles or have a more complex test setup to support testing those. | 244 | en | 0.943593 |
#!/usr/bin/env python
"""
_Exists_
Oracle implementation of JobGroup.Exists
"""
__all__ = []
from WMCore.WMBS.MySQL.JobGroup.Exists import Exists as ExistsJobGroupMySQL
class Exists(ExistsJobGroupMySQL):
pass
| src/python/WMCore/WMBS/Oracle/JobGroup/Exists.py | 219 | _Exists_
Oracle implementation of JobGroup.Exists
!/usr/bin/env python | 72 | en | 0.373402 |
from unittest import TestCase
from src.adders import HalfAdder, FullAdder, FourBitFullAdder
from tests.utils import decimal_to_boolean_list
class HalfAdderTests(TestCase):
TRUTH_TABLE = (
# A B S Cout
((False, False), (False, False)),
((False, True), (True, False)),
((True, False), (True, False)),
((True, True), (False, True)),
)
def setUp(self):
self.half_adder = HalfAdder()
def test_truth_table(self):
for test_case in self.TRUTH_TABLE:
assert self.half_adder.set_inputs(*test_case[0]) == test_case[1]
class FullAdderTests(TestCase):
TRUTH_TABLE = (
# A B Cin S Cout
((False, False, False), (False, False)),
((False, False, True), (True, False)),
((False, True, False), (True, False)),
((False, True, True), (False, True)),
((True, False, False), (True, False)),
((True, False, True), (False, True)),
((True, True, False), (False, True)),
((True, True, True), (True, True)),
)
def setUp(self):
self.full_adder = FullAdder()
def test_truth_table(self):
for test_case in self.TRUTH_TABLE:
assert self.full_adder.set_inputs(*test_case[0]) == test_case[1]
class FourBitFullAdderTests(TestCase):
def setUp(self):
self.full_adder = FourBitFullAdder()
self.TRUTH_TABLE = []
# Generate the truth table, since it is HUGE for a 4 bit adder
# Note: it will generate items like:
# (((False, True, False, False), (False, False, True, True)), (False, False, True, True, True))
# and
# (((False, True, True, False), (False, True, True, True)), (False, True, True, False, True))
# for 4 + 3 = 7 and 6 + 7 = 13, respectively
for addend_1 in range(0, 16):
for addend_2 in range(0, 16):
self.TRUTH_TABLE.append(
(
(decimal_to_boolean_list(addend_1, padding=4), decimal_to_boolean_list(addend_2, padding=4)),
decimal_to_boolean_list(addend_1 + addend_2, padding=5),
)
)
def test_truth_table(self):
for test_case in self.TRUTH_TABLE:
# Note, generate the inputs arguments by setting both addends and the carry in (which is always 0 *false*)
inputs = (test_case[0][0], test_case[0][1], False)
assert self.full_adder.set_inputs(*inputs) == test_case[1]
# Test adding 15+15 with a carry in, which will result in 31
assert (
self.full_adder.set_inputs(
value_1=(True, True, True, True),
value_2=(True, True, True, True),
carry_in=True,
)
== (True, True, True, True, True)
)
| tests/test_adders.py | 2,880 | A B S Cout A B Cin S Cout Generate the truth table, since it is HUGE for a 4 bit adder Note: it will generate items like: (((False, True, False, False), (False, False, True, True)), (False, False, True, True, True)) and (((False, True, True, False), (False, True, True, True)), (False, True, True, False, True)) for 4 + 3 = 7 and 6 + 7 = 13, respectively Note, generate the inputs arguments by setting both addends and the carry in (which is always 0 *false*) Test adding 15+15 with a carry in, which will result in 31 | 564 | en | 0.548788 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from scrapy.spiders import Spider
from scrapy.spiders import Request
import json
from hexun.items import HexunItem
from utils.urlUtils import UrlUtils
from utils.dateTimeUtils import DateTimeUtils
class PPSpider(Spider):
name = 'pp'
urlTemplate = 'http://webftcn.hermes.hexun.com/shf/minute?code=DCEpp{0}&start={1}&number=225&t=1513835351321'
start_urls = [
]
allowed_domains = ['*.hexun.com']
def start_requests(self):
contractList = DateTimeUtils.getContractList()
for contract in contractList:
url = self.urlTemplate.format(contract, DateTimeUtils.getStartTime())
yield Request(url=url, callback=self.parseItem)
def parseItem(self, response):
jsonData = json.loads(response.body_as_unicode().strip(';').strip('(').strip(')'))
datas = jsonData['Data'][0]
contractName = self.getContractName(response)
for dataItem in datas:
lldpeItem = HexunItem()
lldpeItem['product'] = contractName
lldpeItem['dateTime'] = dataItem[0]
lldpeItem['price'] = dataItem[1]
lldpeItem['amount'] = dataItem[2]
lldpeItem['volumn'] = dataItem[3]
lldpeItem['avePrice'] = dataItem[4]
lldpeItem['openInterest'] = dataItem[5]
yield lldpeItem
def getContractName(self, response):
code = UrlUtils.getQueryValue(response.url, 'code')[-4:]
return self.name + code
| hexun/hexun/spiders/ppSpider.py | 1,509 | !/usr/bin/python -*- coding: UTF-8 -*- | 38 | en | 0.437977 |
# Standard Library
import copy
import json
import re
from .log_helper import default_logger as logger
def format_cfg(cfg):
"""Format experiment config for friendly display"""
# json_str = json.dumps(cfg, indent=2, ensure_ascii=False)
# return json_str
def list2str(cfg):
for key, value in cfg.items():
if isinstance(value, dict):
cfg[key] = list2str(value)
elif isinstance(value, list):
if len(value) == 0 or isinstance(value[0], (int, float)):
cfg[key] = str(value)
else:
for i, item in enumerate(value):
if isinstance(item, dict):
value[i] = list2str(item)
cfg[key] = value
return cfg
cfg = list2str(copy.deepcopy(cfg))
json_str = json.dumps(cfg, indent=2, ensure_ascii=False).split("\n")
# json_str = [re.sub(r"(\"|,$|\{|\}|\[$|\s$)", "", line) for line in json_str if line.strip() not in "{}[]"]
json_str = [re.sub(r"(\"|(!\],$)|\s$)", "", line) for line in json_str]
cfg_str = "\n".join([line.rstrip() for line in json_str if line.strip()])
return cfg_str
def is_number(num):
pattern = re.compile(r'^[-+]?[-0-9]\d*\.\d*|[-+]?\.?[0-9]\d*$')
res = pattern.match(num)
if res:
return True
return False
def try_decode(val):
"""bool, int, float, or str"""
if val.upper() == 'FALSE':
return False
elif val.upper() == 'TRUE':
return True
if val.isdigit():
return int(val)
if is_number(val):
return float(val)
return val
def merge_opts_into_cfg(opts, cfg):
cfg = copy.deepcopy(cfg)
if opts is None or len(opts) == 0:
return cfg
assert len(opts) % 2 == 0
keys, values = opts[0::2], opts[1::2]
for key, val in zip(keys, values):
logger.info(f'replacing {key}')
val = try_decode(val)
cur_cfg = cfg
# for hooks
if '-' in key:
key_p, key_s = key.split('-')
k_module, k_type = key_p.split('.')
cur_cfg = cur_cfg[k_module]
flag_exist = False
for idx in range(len(cur_cfg)):
if cur_cfg[idx]['type'] != k_type:
continue
flag_exist = True
cur_cfg_temp = cur_cfg[idx]
key_s = key_s.split('.')
for k in key_s[:-1]:
cur_cfg_temp = cur_cfg_temp.setdefault(k, {})
cur_cfg_temp[key_s[-1]] = val
if not flag_exist:
_cur_cfg = {}
cur_cfg_temp = _cur_cfg
key_s = key_s.split('.')
for k in key_s[:-1]:
cur_cfg_temp = cur_cfg_temp.setdefault(k, {})
cur_cfg_temp[key_s[-1]] = val
cur_cfg.append(_cur_cfg)
else:
key = key.split('.')
for k in key[:-1]:
cur_cfg = cur_cfg.setdefault(k, {})
cur_cfg[key[-1]] = val
return cfg
def upgrade_cfg(cfg):
# cfg = upgrade_fp16(cfg)
return cfg
| up/utils/general/cfg_helper.py | 3,152 | Format experiment config for friendly display
bool, int, float, or str
Standard Library json_str = json.dumps(cfg, indent=2, ensure_ascii=False) return json_str json_str = [re.sub(r"(\"|,$|\{|\}|\[$|\s$)", "", line) for line in json_str if line.strip() not in "{}[]"] for hooks cfg = upgrade_fp16(cfg) | 303 | en | 0.249755 |
# Copyright 2020 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import logging
from time import sleep
from streamsets.testframework.markers import sdc_min_version
from streamsets.sdk.sdc_models import Metrics
logger = logging.getLogger(__name__)
@pytest.fixture(scope='module')
def sdc_common_hook():
def hook(data_collector):
data_collector.add_stage_lib('streamsets-datacollector-groovy_2_4-lib')
return hook
# SDC-11777: provide way to easily see where a pipeline is when it is stuck in STARTING
@sdc_min_version('3.15.0')
def test_runner_metrics_for_init_and_destroy(sdc_builder, sdc_executor):
"""Ensure that we properly update metrics when the runner is in starting phase."""
builder = sdc_builder.get_pipeline_builder()
SLEEP_SCRIPT = "sleep(5*1000)"
# Super simple cluster pipeline
source = builder.add_stage('Dev Data Generator')
groovy = builder.add_stage('Groovy Evaluator', type='processor')
groovy.init_script = SLEEP_SCRIPT
groovy.destroy_script = SLEEP_SCRIPT
groovy.script = SLEEP_SCRIPT
trash = builder.add_stage('Trash')
source >> groovy >> trash
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
# Start the pipeline, it should take at least 5 seconds (since the sleep) and we check that at least once
# we have seen the metrics we're looking for.
sdc_executor.start_pipeline(pipeline, wait=False)
count = 0
while True:
# TLKT-468: SDC object doesn't expose get_pipeline_metrics method
metrics_json = sdc_executor.api_client.get_pipeline_metrics(pipeline.id)
if metrics_json:
metrics = Metrics(metrics_json)
logger.info(f"Detected runtime gauge state {metrics.gauge('runner.0.gauge').value['state']}")
if metrics.gauge('runner.0.gauge').value['state'] == 'Starting':
count += 1
status = sdc_executor.get_pipeline_status(pipeline).response.json()
sleep(0.5)
if status.get('status') == 'RUNNING':
break
assert count > 0
sdc_executor.stop_pipeline(pipeline)
| pipeline/test_metrics.py | 2,643 | Ensure that we properly update metrics when the runner is in starting phase.
Copyright 2020 StreamSets Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. SDC-11777: provide way to easily see where a pipeline is when it is stuck in STARTING Super simple cluster pipeline Start the pipeline, it should take at least 5 seconds (since the sleep) and we check that at least once we have seen the metrics we're looking for. TLKT-468: SDC object doesn't expose get_pipeline_metrics method | 959 | en | 0.882916 |
#!/usr/bin/env python
#
# This program shows how to use MPI_Alltoall. Each processor
# send/rec a different random number to/from other processors.
#
# numpy is required
import numpy
from numpy import *
# mpi4py module
from mpi4py import MPI
import sys
def myquit(mes):
MPI.Finalize()
print(mes)
sys.exit()
# Initialize MPI and print out hello
comm=MPI.COMM_WORLD
myid=comm.Get_rank()
numprocs=comm.Get_size()
print("hello from ",myid," of ",numprocs)
# We are going to send/recv a single value to/from
# each processor. Here we allocate arrays
s_vals=zeros(numprocs,"i")
r_vals=zeros(numprocs,"i")
# Fill the send arrays with random numbers
random.seed(myid)
for i in range(0, numprocs):
s_vals[i]=random.randint(1,10)
print("myid=",myid,"s_vals=",s_vals)
# Send/recv to/from all
comm.Alltoall(s_vals, r_vals)
print("myid=",myid,"r_vals=",r_vals)
MPI.Finalize()
# Note, the sent values and the recv values are
# like a transpose of each other
#
# mpiexec -n 4 ./P_ex07.py | grep s_v | sort
# myid= 0 s_vals= [6 1 4 4]
# myid= 1 s_vals= [6 9 6 1]
# myid= 2 s_vals= [9 9 7 3]
# myid= 3 s_vals= [9 4 9 9]
# mpiexec -n 4 ./P_ex07.py | grep r_v | sort
# myid= 0 r_vals= [6 6 9 9]
# myid= 1 r_vals= [1 9 9 4]
# myid= 2 r_vals= [4 6 7 9]
# myid= 3 r_vals= [4 1 3 9]
| array/bot/others/P_ex07.py | 1,309 | !/usr/bin/env python This program shows how to use MPI_Alltoall. Each processor send/rec a different random number to/from other processors. numpy is required mpi4py module Initialize MPI and print out hello We are going to send/recv a single value to/from each processor. Here we allocate arrays Fill the send arrays with random numbers Send/recv to/from all Note, the sent values and the recv values are like a transpose of each other mpiexec -n 4 ./P_ex07.py | grep s_v | sort myid= 0 s_vals= [6 1 4 4] myid= 1 s_vals= [6 9 6 1] myid= 2 s_vals= [9 9 7 3] myid= 3 s_vals= [9 4 9 9] mpiexec -n 4 ./P_ex07.py | grep r_v | sort myid= 0 r_vals= [6 6 9 9] myid= 1 r_vals= [1 9 9 4] myid= 2 r_vals= [4 6 7 9] myid= 3 r_vals= [4 1 3 9] | 733 | en | 0.520106 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''topology_context.py'''
import os
from collections import namedtuple
from heronpy.api.task_hook import (ITaskHook, EmitInfo, SpoutAckInfo,
SpoutFailInfo, BoltExecuteInfo,
BoltAckInfo, BoltFailInfo)
from heronpy.api.topology_context import TopologyContext
import heronpy.api.api_constants as api_constants
from heron.instance.src.python.utils.metrics import MetricsCollector
import heron.instance.src.python.utils.system_constants as system_constants
import heron.common.src.python.pex_loader as pex_loader
class TopologyContextImpl(TopologyContext):
"""Implemention of TopologyContext
This is created by Heron Instance and passed on to the topology spouts/bolts
as the topology context
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, config, topology, task_to_component, my_task_id, metrics_collector,
topo_pex_path):
self.config = config
self.topology = topology
self.task_to_component_map = task_to_component
self.task_id = my_task_id
self.metrics_collector = metrics_collector
self.topology_pex_path = os.path.abspath(topo_pex_path)
inputs, outputs, out_fields = self._get_inputs_and_outputs_and_outfields(topology)
self.inputs = inputs
self.outputs = outputs
self.component_to_out_fields = out_fields
# init task hooks
self.task_hooks = []
self._init_task_hooks()
##### Implementation of interface methods #####
def get_task_id(self):
"""Property to get the task id of this component"""
return self.task_id
def get_component_id(self):
"""Property to get the component id of this component"""
return self.task_to_component_map.get(self.get_task_id())
def get_cluster_config(self):
"""Returns the cluster config for this component
Note that the returned config is auto-typed map: <str -> any Python object>.
"""
return self.config
def get_topology_name(self):
"""Returns the name of the topology
"""
return str(self.topology.name)
def register_metric(self, name, metric, time_bucket_in_sec):
"""Registers a new metric to this context"""
collector = self.get_metrics_collector()
collector.register_metric(name, metric, time_bucket_in_sec)
def get_sources(self, component_id):
"""Returns the declared inputs to specified component
:return: map <streamId namedtuple (same structure as protobuf msg) -> gtype>, or
None if not found
"""
# this is necessary because protobuf message is not hashable
StreamId = namedtuple('StreamId', 'id, component_name')
if component_id in self.inputs:
ret = {}
for istream in self.inputs.get(component_id):
key = StreamId(id=istream.stream.id, component_name=istream.stream.component_name)
ret[key] = istream.gtype
return ret
else:
return None
def get_this_sources(self):
return self.get_sources(self.get_component_id())
def get_component_tasks(self, component_id):
"""Returns the task ids allocated for the given component id"""
ret = []
for task_id, comp_id in self.task_to_component_map.items():
if comp_id == component_id:
ret.append(task_id)
return ret
def add_task_hook(self, task_hook):
"""Registers a specified task hook to this context
:type task_hook: heron.instance.src.python.utils.topology.ITaskHook
:param task_hook: Implementation of ITaskHook
"""
if not isinstance(task_hook, ITaskHook):
raise TypeError("In add_task_hook(): attempt to add non ITaskHook instance, given: %s"
% str(type(task_hook)))
self.task_hooks.append(task_hook)
##### Other exposed implementation specific methods #####
def get_topology_pex_path(self):
"""Returns the topology's pex file path"""
return self.topology_pex_path
def get_metrics_collector(self):
"""Returns this context's metrics collector"""
if self.metrics_collector is None or not isinstance(self.metrics_collector, MetricsCollector):
raise RuntimeError("Metrics collector is not registered in this context")
return self.metrics_collector
########################################
@classmethod
def _get_inputs_and_outputs_and_outfields(cls, topology):
inputs = {}
outputs = {}
out_fields = {}
for spout in topology.spouts:
inputs[spout.comp.name] = [] # spout doesn't have any inputs
outputs[spout.comp.name] = spout.outputs
out_fields.update(cls._get_output_to_comp_fields(spout.outputs))
for bolt in topology.bolts:
inputs[bolt.comp.name] = bolt.inputs
outputs[bolt.comp.name] = bolt.outputs
out_fields.update(cls._get_output_to_comp_fields(bolt.outputs))
return inputs, outputs, out_fields
@staticmethod
def _get_output_to_comp_fields(outputs):
out_fields = {}
for out_stream in outputs:
comp_name = out_stream.stream.component_name
stream_id = out_stream.stream.id
if comp_name not in out_fields:
out_fields[comp_name] = dict()
# get the fields of a particular output stream
ret = []
for kt in out_stream.schema.keys:
ret.append(kt.key)
out_fields[comp_name][stream_id] = tuple(ret)
return out_fields
######### Task hook related ##########
def _init_task_hooks(self):
task_hooks_cls_list = self.get_cluster_config().get(api_constants.TOPOLOGY_AUTO_TASK_HOOKS,
None)
if task_hooks_cls_list is None:
return
# load pex first
topo_pex_path = self.get_topology_pex_path()
pex_loader.load_pex(topo_pex_path)
for class_name in task_hooks_cls_list:
try:
task_hook_cls = pex_loader.import_and_get_class(topo_pex_path, class_name)
task_hook_instance = task_hook_cls()
assert isinstance(task_hook_instance, ITaskHook)
self.task_hooks.append(task_hook_instance)
except AssertionError:
raise RuntimeError("Auto-registered task hook not instance of ITaskHook")
except Exception as e:
raise RuntimeError("Error with loading task hook class: %s, with error message: %s"
% (class_name, str(e)))
def invoke_hook_prepare(self):
"""invoke task hooks for after the spout/bolt's initialize() method"""
for task_hook in self.task_hooks:
task_hook.prepare(self.get_cluster_config(), self)
def invoke_hook_cleanup(self):
"""invoke task hooks for just before the spout/bolt's cleanup method"""
for task_hook in self.task_hooks:
task_hook.clean_up()
def invoke_hook_emit(self, values, stream_id, out_tasks):
"""invoke task hooks for every time a tuple is emitted in spout/bolt
:type values: list
:param values: values emitted
:type stream_id: str
:param stream_id: stream id into which tuple is emitted
:type out_tasks: list
:param out_tasks: list of custom grouping target task id
"""
if len(self.task_hooks) > 0:
emit_info = EmitInfo(values=values, stream_id=stream_id,
task_id=self.get_task_id(), out_tasks=out_tasks)
for task_hook in self.task_hooks:
task_hook.emit(emit_info)
def invoke_hook_spout_ack(self, message_id, complete_latency_ns):
"""invoke task hooks for every time spout acks a tuple
:type message_id: str
:param message_id: message id to which an acked tuple was anchored
:type complete_latency_ns: float
:param complete_latency_ns: complete latency in nano seconds
"""
if len(self.task_hooks) > 0:
spout_ack_info = SpoutAckInfo(message_id=message_id,
spout_task_id=self.get_task_id(),
complete_latency_ms=complete_latency_ns *
system_constants.NS_TO_MS)
for task_hook in self.task_hooks:
task_hook.spout_ack(spout_ack_info)
def invoke_hook_spout_fail(self, message_id, fail_latency_ns):
"""invoke task hooks for every time spout fails a tuple
:type message_id: str
:param message_id: message id to which a failed tuple was anchored
:type fail_latency_ns: float
:param fail_latency_ns: fail latency in nano seconds
"""
if len(self.task_hooks) > 0:
spout_fail_info = SpoutFailInfo(message_id=message_id,
spout_task_id=self.get_task_id(),
fail_latency_ms=fail_latency_ns * system_constants.NS_TO_MS)
for task_hook in self.task_hooks:
task_hook.spout_fail(spout_fail_info)
def invoke_hook_bolt_execute(self, heron_tuple, execute_latency_ns):
"""invoke task hooks for every time bolt processes a tuple
:type heron_tuple: HeronTuple
:param heron_tuple: tuple that is executed
:type execute_latency_ns: float
:param execute_latency_ns: execute latency in nano seconds
"""
if len(self.task_hooks) > 0:
bolt_execute_info = \
BoltExecuteInfo(heron_tuple=heron_tuple,
executing_task_id=self.get_task_id(),
execute_latency_ms=execute_latency_ns * system_constants.NS_TO_MS)
for task_hook in self.task_hooks:
task_hook.bolt_execute(bolt_execute_info)
def invoke_hook_bolt_ack(self, heron_tuple, process_latency_ns):
"""invoke task hooks for every time bolt acks a tuple
:type heron_tuple: HeronTuple
:param heron_tuple: tuple that is acked
:type process_latency_ns: float
:param process_latency_ns: process latency in nano seconds
"""
if len(self.task_hooks) > 0:
bolt_ack_info = BoltAckInfo(heron_tuple=heron_tuple,
acking_task_id=self.get_task_id(),
process_latency_ms=process_latency_ns * system_constants.NS_TO_MS)
for task_hook in self.task_hooks:
task_hook.bolt_ack(bolt_ack_info)
def invoke_hook_bolt_fail(self, heron_tuple, fail_latency_ns):
"""invoke task hooks for every time bolt fails a tuple
:type heron_tuple: HeronTuple
:param heron_tuple: tuple that is failed
:type fail_latency_ns: float
:param fail_latency_ns: fail latency in nano seconds
"""
if len(self.task_hooks) > 0:
bolt_fail_info = BoltFailInfo(heron_tuple=heron_tuple,
failing_task_id=self.get_task_id(),
fail_latency_ms=fail_latency_ns * system_constants.NS_TO_MS)
for task_hook in self.task_hooks:
task_hook.bolt_fail(bolt_fail_info)
| heron/instance/src/python/utils/topology/topology_context_impl.py | 11,290 | Implemention of TopologyContext
This is created by Heron Instance and passed on to the topology spouts/bolts
as the topology context
Registers a specified task hook to this context
:type task_hook: heron.instance.src.python.utils.topology.ITaskHook
:param task_hook: Implementation of ITaskHook
Returns the cluster config for this component
Note that the returned config is auto-typed map: <str -> any Python object>.
Property to get the component id of this component
Returns the task ids allocated for the given component id
Returns this context's metrics collector
Returns the declared inputs to specified component
:return: map <streamId namedtuple (same structure as protobuf msg) -> gtype>, or
None if not found
Property to get the task id of this component
Returns the name of the topology
Returns the topology's pex file path
invoke task hooks for every time bolt acks a tuple
:type heron_tuple: HeronTuple
:param heron_tuple: tuple that is acked
:type process_latency_ns: float
:param process_latency_ns: process latency in nano seconds
invoke task hooks for every time bolt processes a tuple
:type heron_tuple: HeronTuple
:param heron_tuple: tuple that is executed
:type execute_latency_ns: float
:param execute_latency_ns: execute latency in nano seconds
invoke task hooks for every time bolt fails a tuple
:type heron_tuple: HeronTuple
:param heron_tuple: tuple that is failed
:type fail_latency_ns: float
:param fail_latency_ns: fail latency in nano seconds
invoke task hooks for just before the spout/bolt's cleanup method
invoke task hooks for every time a tuple is emitted in spout/bolt
:type values: list
:param values: values emitted
:type stream_id: str
:param stream_id: stream id into which tuple is emitted
:type out_tasks: list
:param out_tasks: list of custom grouping target task id
invoke task hooks for after the spout/bolt's initialize() method
invoke task hooks for every time spout acks a tuple
:type message_id: str
:param message_id: message id to which an acked tuple was anchored
:type complete_latency_ns: float
:param complete_latency_ns: complete latency in nano seconds
invoke task hooks for every time spout fails a tuple
:type message_id: str
:param message_id: message id to which a failed tuple was anchored
:type fail_latency_ns: float
:param fail_latency_ns: fail latency in nano seconds
Registers a new metric to this context
topology_context.py
!/usr/bin/env python -*- encoding: utf-8 -*- Copyright 2016 Twitter. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=too-many-instance-attributes init task hooks Implementation of interface methods this is necessary because protobuf message is not hashable Other exposed implementation specific methods spout doesn't have any inputs get the fields of a particular output stream Task hook related load pex first | 3,339 | en | 0.738891 |
'''
Created on Sep 18, 2017
@author: jschm
'''
from cs115 import map
def powerset(lst):
"""returns the power set of the list - the set of all subsets of the list"""
if lst == []:
return [[]]
#power set is a list of lists
#this way is more efficent for getting the combinations of the characters in a list
lose_it = powerset(lst[1:])
use_it = map(lambda subset: [lst[0]] + subset, lose_it)
return lose_it + use_it
print(powerset(['a', 'b', 'c']))
def subset(target, lst):
"""determines whether or not it is possible to create target sum using the
values in the list. Values in teh list can be positive, negative, or zero."""
if target == 0:
return True
#what if target is 0?
if lst == []:
return False
#use_it = subset(target - lst[0], lst[1:])
#lose_it = subset(target, lst[1:])
"""and and or are short-cut operators in python. THe second operand is not evaluated
when the overall result can be deduced by evaluating the second operand"""
#return use_it or lose_it
return subset(target - lst[0], lst[1:]) or subset(target, lst[1:])
print(subset(5,[1,3,2,4,5]))
def subset_with_values(target, lst):
"""Determines whether or not it is possible to create the target sum using
values in the list. Values in the list can be positive, negative, or zero.
The function returns a tuple of exactly two items. The first is a boolean,
that indicates true if the sum is possible and false if it is not. The second
element in the tuple is a list of all values that add up to make the target sum."""
if target == 0:
return(True, [])
if lst == []:
return(False, [])
use_it = subset_with_values(target - lst[0], lst[1:])
if use_it[0]:
return(True, [lst[0]] + use_it[1])
return subset_with_values(target, lst[1:])
print(subset_with_values(8, [7,2,2,2,2]))
print(subset_with_values(12, [1,2,4,9]))
"""
def LCSWithValues2(S1,S2):
if S1 == "" or S2 == "":
return (0, "")
if S1[0] == S2[0]:
result = result + S1[0]
return (1 + LCSWithValues2(S1[1:], S2[1:]), result)
useS1 = LCSWithValues2(S1, S2[1:])
useS2 = LCSWithValues2(S1[1:], S2)
if useS1[0] > useS2[0]:
return useS1
return useS2
print(LCSWithValues2("sam", "spam"))
"""
def LCSWithValues(S1,S2):
"""returns the longest common string"""
if S1 == "" or S2 == "":
return (0, "")
if S1[0] == S2[0]:
result = LCSWithValues(S1[1:], S2[1:])
return (1 + result[0], S1[0] + result[1])
useS1 = LCSWithValues(S1, S2[1:])
useS2 = LCSWithValues(S1[1:], S2)
if useS1[0] > useS2[0]:
return useS1
return useS2
print(LCSWithValues("sam", "spam"))
#^^^the LCSWithValues2 does not work because the result variable needs to be defined, and if it is redefined it stays empty always.
def coin_row(lst):
#one line:
return 0 if lst == [] else max(lst[0] + coin_row(lst[2:]), coin_row(lst[1:]))
"""
if(lst == []):
return 0
return max(lst[0] + coin_row(lst[2:]), coin_row(lst[1:]))
"""
"""
if(lst == []):
return 0
use_it = lst[0] + coin_row(lst[2:])
lose_it = coin_row(lst[1:])
return max(use_it, lose_it)
This is how you set up each function^^^
and then you can make it nicer
"""
"""
if(coin_row(lst[1:])>lst[0]):
amount = coin_row(lst[1:])
return max(coin_row(lst[2:]), coin_row(lst[2:]))
"""
def coin_row_with_values(lst):
if lst == []:
return [0, []]
use_it = coin_row_with_values(lst[2:])
new_sum = lst[0] + use_it[0]
#that's the result^
lose_it = coin_row_with_values(lst[1:])
if new_sum > lose_it[0]:
#only returns this once I think
#nevermind!
#print('hello')
return [new_sum, [lst[0]] + use_it[1]]
return lose_it
print(coin_row([10, 5, 5, 5, 10, 10, 1, 1]))
print(coin_row_with_values([10, 5, 5, 5, 10, 50, 1, 10, 1, 1, 25]))
#can use below as spell-checker
def distance(first, second):
if first == '':
return len(second)
if second == '':
return len(first)
if first[0] == second[0]:
return distance(first[1:], second[1:])
substitution = 1 + distance(first[1:], second[1:])
deletion = 1 + distance(first[1:], second)
insertion = 1 + distance(first, second[1:])
return min(substitution, deletion, insertion)
| use_it_or_lose_it.py | 4,429 | returns the longest common string
returns the power set of the list - the set of all subsets of the list
determines whether or not it is possible to create target sum using the
values in the list. Values in teh list can be positive, negative, or zero.
Determines whether or not it is possible to create the target sum using
values in the list. Values in the list can be positive, negative, or zero.
The function returns a tuple of exactly two items. The first is a boolean,
that indicates true if the sum is possible and false if it is not. The second
element in the tuple is a list of all values that add up to make the target sum.
Created on Sep 18, 2017
@author: jschm
power set is a list of liststhis way is more efficent for getting the combinations of the characters in a listwhat if target is 0?use_it = subset(target - lst[0], lst[1:])lose_it = subset(target, lst[1:])return use_it or lose_it^^^the LCSWithValues2 does not work because the result variable needs to be defined, and if it is redefined it stays empty always.one line:that's the result^only returns this once I thinknevermind!print('hello')can use below as spell-checker | 1,143 | en | 0.714085 |
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for working with tf.train.SequenceExamples."""
import math
import tensorflow.compat.v1 as tf
QUEUE_CAPACITY = 500
SHUFFLE_MIN_AFTER_DEQUEUE = QUEUE_CAPACITY // 5
def _shuffle_inputs(input_tensors, capacity, min_after_dequeue, num_threads):
"""Shuffles tensors in `input_tensors`, maintaining grouping."""
shuffle_queue = tf.RandomShuffleQueue(
capacity, min_after_dequeue, dtypes=[t.dtype for t in input_tensors])
enqueue_op = shuffle_queue.enqueue(input_tensors)
runner = tf.train.QueueRunner(shuffle_queue, [enqueue_op] * num_threads)
tf.train.add_queue_runner(runner)
output_tensors = shuffle_queue.dequeue()
for i in range(len(input_tensors)):
output_tensors[i].set_shape(input_tensors[i].shape)
return output_tensors
def get_padded_batch(file_list, batch_size, input_size, label_shape=None,
num_enqueuing_threads=4, shuffle=False):
"""Reads batches of SequenceExamples from TFRecords and pads them.
Can deal with variable length SequenceExamples by padding each batch to the
length of the longest sequence with zeros.
Args:
file_list: A list of paths to TFRecord files containing SequenceExamples.
batch_size: The number of SequenceExamples to include in each batch.
input_size: The size of each input vector. The returned batch of inputs
will have a shape [batch_size, num_steps, input_size].
label_shape: Shape for labels. If not specified, will use [].
num_enqueuing_threads: The number of threads to use for enqueuing
SequenceExamples.
shuffle: Whether to shuffle the batches.
Returns:
inputs: A tensor of shape [batch_size, num_steps, input_size] of floats32s.
labels: A tensor of shape [batch_size, num_steps] of int64s.
lengths: A tensor of shape [batch_size] of int32s. The lengths of each
SequenceExample before padding.
Raises:
ValueError: If `shuffle` is True and `num_enqueuing_threads` is less than 2.
"""
file_queue = tf.train.string_input_producer(file_list)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(file_queue)
sequence_features = {
'inputs': tf.FixedLenSequenceFeature(shape=[input_size],
dtype=tf.float32),
'labels': tf.FixedLenSequenceFeature(shape=label_shape or [],
dtype=tf.int64)}
_, sequence = tf.parse_single_sequence_example(
serialized_example, sequence_features=sequence_features)
length = tf.shape(sequence['inputs'])[0]
input_tensors = [sequence['inputs'], sequence['labels'], length]
if shuffle:
if num_enqueuing_threads < 2:
raise ValueError(
'`num_enqueuing_threads` must be at least 2 when shuffling.')
shuffle_threads = int(math.ceil(num_enqueuing_threads) / 2.)
# Since there may be fewer records than SHUFFLE_MIN_AFTER_DEQUEUE, take the
# minimum of that number and the number of records.
min_after_dequeue = count_records(
file_list, stop_at=SHUFFLE_MIN_AFTER_DEQUEUE)
input_tensors = _shuffle_inputs(
input_tensors, capacity=QUEUE_CAPACITY,
min_after_dequeue=min_after_dequeue,
num_threads=shuffle_threads)
num_enqueuing_threads -= shuffle_threads
tf.logging.info(input_tensors)
return tf.train.batch(
input_tensors,
batch_size=batch_size,
capacity=QUEUE_CAPACITY,
num_threads=num_enqueuing_threads,
dynamic_pad=True,
allow_smaller_final_batch=False)
def count_records(file_list, stop_at=None):
"""Counts number of records in files from `file_list` up to `stop_at`.
Args:
file_list: List of TFRecord files to count records in.
stop_at: Optional number of records to stop counting at.
Returns:
Integer number of records in files from `file_list` up to `stop_at`.
"""
num_records = 0
for tfrecord_file in file_list:
tf.logging.info('Counting records in %s.', tfrecord_file)
for _ in tf.python_io.tf_record_iterator(tfrecord_file):
num_records += 1
if stop_at and num_records >= stop_at:
tf.logging.info('Number of records is at least %d.', num_records)
return num_records
tf.logging.info('Total records: %d', num_records)
return num_records
def flatten_maybe_padded_sequences(maybe_padded_sequences, lengths=None):
"""Flattens the batch of sequences, removing padding (if applicable).
Args:
maybe_padded_sequences: A tensor of possibly padded sequences to flatten,
sized `[N, M, ...]` where M = max(lengths).
lengths: Optional length of each sequence, sized `[N]`. If None, assumes no
padding.
Returns:
flatten_maybe_padded_sequences: The flattened sequence tensor, sized
`[sum(lengths), ...]`.
"""
def flatten_unpadded_sequences():
# The sequences are equal length, so we should just flatten over the first
# two dimensions.
return tf.reshape(maybe_padded_sequences,
[-1] + maybe_padded_sequences.shape.as_list()[2:])
if lengths is None:
return flatten_unpadded_sequences()
def flatten_padded_sequences():
indices = tf.where(tf.sequence_mask(lengths))
return tf.gather_nd(maybe_padded_sequences, indices)
return tf.cond(
tf.equal(tf.reduce_min(lengths), tf.shape(maybe_padded_sequences)[1]),
flatten_unpadded_sequences,
flatten_padded_sequences)
| magenta/common/sequence_example_lib.py | 5,993 | Shuffles tensors in `input_tensors`, maintaining grouping.
Counts number of records in files from `file_list` up to `stop_at`.
Args:
file_list: List of TFRecord files to count records in.
stop_at: Optional number of records to stop counting at.
Returns:
Integer number of records in files from `file_list` up to `stop_at`.
Flattens the batch of sequences, removing padding (if applicable).
Args:
maybe_padded_sequences: A tensor of possibly padded sequences to flatten,
sized `[N, M, ...]` where M = max(lengths).
lengths: Optional length of each sequence, sized `[N]`. If None, assumes no
padding.
Returns:
flatten_maybe_padded_sequences: The flattened sequence tensor, sized
`[sum(lengths), ...]`.
Reads batches of SequenceExamples from TFRecords and pads them.
Can deal with variable length SequenceExamples by padding each batch to the
length of the longest sequence with zeros.
Args:
file_list: A list of paths to TFRecord files containing SequenceExamples.
batch_size: The number of SequenceExamples to include in each batch.
input_size: The size of each input vector. The returned batch of inputs
will have a shape [batch_size, num_steps, input_size].
label_shape: Shape for labels. If not specified, will use [].
num_enqueuing_threads: The number of threads to use for enqueuing
SequenceExamples.
shuffle: Whether to shuffle the batches.
Returns:
inputs: A tensor of shape [batch_size, num_steps, input_size] of floats32s.
labels: A tensor of shape [batch_size, num_steps] of int64s.
lengths: A tensor of shape [batch_size] of int32s. The lengths of each
SequenceExample before padding.
Raises:
ValueError: If `shuffle` is True and `num_enqueuing_threads` is less than 2.
Utility functions for working with tf.train.SequenceExamples.
Copyright 2022 The Magenta Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Since there may be fewer records than SHUFFLE_MIN_AFTER_DEQUEUE, take the minimum of that number and the number of records. The sequences are equal length, so we should just flatten over the first two dimensions. | 2,591 | en | 0.805205 |
"""
gaeenv
~~~~~~~
Google App Engine Virtual Environment builder.
"""
import os
from setuptools import setup, find_packages
from gaeenv.main import gaeenv_version
def read_file(file_name):
return open(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
file_name
)
).read()
ldesc = read_file('README')
ldesc += "\n\n" + read_file('CHANGES')
setup(
name='gaeenv',
version=gaeenv_version,
url='https://github.com/llinder/gaeenv',
license='Apache 2.0',
author='Lance Linder',
author_email='llinder@gmail.com',
description="Goole App Engine Virtualenv tools",
long_description=ldesc,
packages = find_packages(exclude="test"),
install_requires = ['requests>=2.2.0'],
entry_points={
'console_scripts': ['gaeenv = gaeenv.main:main']
},
zip_safe=False,
platforms='any',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| setup.py | 1,214 | gaeenv
~~~~~~~
Google App Engine Virtual Environment builder. | 62 | en | 0.649132 |
# based on: https://github.com/ShiqiYu/libfacedetection.train/blob/74f3aa77c63234dd954d21286e9a60703b8d0868/tasks/task1/yufacedetectnet.py # noqa
import math
from enum import Enum
from typing import Callable, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from kornia.geometry.bbox import nms as nms_kornia
__all__ = [
"FaceDetector",
"FaceDetectorResult",
"FaceKeypoint",
]
url: str = "https://github.com/ShiqiYu/libfacedetection.train/raw/master/tasks/task1/weights/yunet_final.pth"
class FaceKeypoint(Enum):
r"""Define the keypoints detected in a face.
The left/right convention is based on the screen viewer.
"""
EYE_LEFT = 0
EYE_RIGHT = 1
NOSE = 2
MOUTH_LEFT = 3
MOUTH_RIGHT = 4
class FaceDetectorResult:
r"""Encapsulate the results obtained by the :py:class:`kornia.contrib.FaceDetector`.
Args:
data: the encoded results coming from the feature detector with shape :math:`(14,)`.
"""
def __init__(self, data: torch.Tensor) -> None:
if len(data) < 15:
raise ValueError(f"Result must comes as vector of size(14). Got: {data.shape}.")
self._data = data
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> "FaceDetectorResult":
"""Like :func:`torch.nn.Module.to()` method."""
self._data = self._data.to(device=device, dtype=dtype)
return self
@property
def xmin(self) -> torch.Tensor:
"""The bounding box top-left x-coordinate."""
return self._data[..., 0]
@property
def ymin(self) -> torch.Tensor:
"""The bounding box top-left y-coordinate."""
return self._data[..., 1]
@property
def xmax(self) -> torch.Tensor:
"""The bounding box bottom-right x-coordinate."""
return self._data[..., 2]
@property
def ymax(self) -> torch.Tensor:
"""The bounding box bottom-right y-coordinate."""
return self._data[..., 3]
def get_keypoint(self, keypoint: FaceKeypoint) -> torch.Tensor:
"""The [x y] position of a given facial keypoint.
Args:
keypoint: the keypoint type to return the position.
"""
if keypoint == FaceKeypoint.EYE_LEFT:
out = self._data[..., (4, 5)]
elif keypoint == FaceKeypoint.EYE_RIGHT:
out = self._data[..., (6, 7)]
elif keypoint == FaceKeypoint.NOSE:
out = self._data[..., (8, 9)]
elif keypoint == FaceKeypoint.MOUTH_LEFT:
out = self._data[..., (10, 11)]
elif keypoint == FaceKeypoint.MOUTH_RIGHT:
out = self._data[..., (12, 13)]
else:
raise ValueError(f"Not valid keypoint type. Got: {keypoint}.")
return out
@property
def score(self) -> torch.Tensor:
"""The detection score."""
return self._data[..., 14]
@property
def width(self) -> torch.Tensor:
"""The bounding box width."""
return self.xmax - self.xmin
@property
def height(self) -> torch.Tensor:
"""The bounding box height."""
return self.ymax - self.ymin
@property
def top_left(self) -> torch.Tensor:
"""The [x y] position of the top-left coordinate of the bounding box."""
return self._data[..., (0, 1)]
@property
def top_right(self) -> torch.Tensor:
"""The [x y] position of the top-left coordinate of the bounding box."""
out = self.top_left
out[..., 0] += self.width
return out
@property
def bottom_right(self) -> torch.Tensor:
"""The [x y] position of the bottom-right coordinate of the bounding box."""
return self._data[..., (2, 3)]
@property
def bottom_left(self) -> torch.Tensor:
"""The [x y] position of the top-left coordinate of the bounding box."""
out = self.top_left
out[..., 1] += self.height
return out
class FaceDetector(nn.Module):
r"""Detect faces in a given image using a CNN.
By default, it uses the method described in :cite:`facedetect-yu`.
Args:
top_k: the maximum number of detections to return before the nms.
confidence_threshold: the threshold used to discard detections.
nms_threshold: the threshold used by the nms for iou.
keep_top_k: the maximum number of detections to return after the nms.
Return:
A tensor of shape :math:`(N,15)` to be used with :py:class:`kornia.contrib.FaceDetectorResult`.
Example:
>>> img = torch.rand(1, 3, 320, 320)
>>> detect = FaceDetector()
>>> res = detect(img)
"""
def __init__(self,
top_k: int = 5000,
confidence_threshold: float = 0.3,
nms_threshold: float = 0.3,
keep_top_k: int = 750) -> None:
super().__init__()
self.top_k = top_k
self.confidence_threshold = confidence_threshold
self.nms_threshold = nms_threshold
self.keep_top_k = keep_top_k
self.config = {
'name': 'YuFaceDetectNet',
'min_sizes': [[10, 16, 24], [32, 48], [64, 96], [128, 192, 256]],
'steps': [8, 16, 32, 64],
'variance': [0.1, 0.2],
'clip': False,
}
self.min_sizes: List[List[int]] = [[10, 16, 24], [32, 48], [64, 96], [128, 192, 256]]
self.steps: List[int] = [8, 16, 32, 64]
self.variance: List[float] = [0.1, 0.2]
self.clip: bool = False
self.model = YuFaceDetectNet('test', pretrained=True)
self.nms: Callable = nms_kornia
def preprocess(self, image: torch.Tensor) -> torch.Tensor:
return image
def postprocess(self, data: Dict[str, torch.Tensor], height: int, width: int) -> torch.Tensor:
loc, conf, iou = data['loc'], data['conf'], data['iou']
scale = torch.tensor([
width, height, width, height,
width, height, width, height,
width, height, width, height,
width, height,
], device=loc.device, dtype=loc.dtype) # 14
priors = _PriorBox(self.min_sizes, self.steps, self.clip, image_size=(height, width))
priors = priors.to(loc.device, loc.dtype)
boxes = _decode(loc, priors(), self.variance) # Nx14
boxes = boxes * scale
# clamp here for the compatibility for ONNX
cls_scores, iou_scores = conf[:, 1], iou[:, 0]
scores = (cls_scores * iou_scores.clamp(0., 1.)).sqrt()
# ignore low scores
inds = (scores > self.confidence_threshold)
boxes, scores = boxes[inds], scores[inds]
# keep top-K before NMS
order = scores.sort(descending=True)[1][:self.top_k]
boxes, scores = boxes[order], scores[order]
# performd NMS
# NOTE: nms need to be revise since does not export well to onnx
dets = torch.cat((boxes, scores[:, None]), dim=-1) # Nx15
keep = self.nms(boxes[:, :4], scores, self.nms_threshold)
if len(keep) > 0:
dets = dets[keep, :]
# keep top-K faster NMS
return dets[:self.keep_top_k]
def forward(self, image: torch.Tensor) -> torch.Tensor:
img = self.preprocess(image)
out = self.model(img)
return self.postprocess(out, img.shape[-2], img.shape[-1])
# utils for the network
class ConvDPUnit(nn.Sequential):
def __init__(self, in_channels, out_channels, withBNRelu=True):
super().__init__()
self.add_module("conv1", nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=True, groups=1))
self.add_module("conv2", nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=True, groups=out_channels))
if withBNRelu:
self.add_module("bn", nn.BatchNorm2d(out_channels))
self.add_module("relu", nn.ReLU(inplace=True))
class Conv_head(nn.Sequential):
def __init__(self, in_channels: int, mid_channels: int, out_channels: int) -> None:
super().__init__()
self.add_module("conv1", nn.Conv2d(in_channels, mid_channels, 3, 2, 1, bias=True, groups=1))
self.add_module("bn1", nn.BatchNorm2d(mid_channels))
self.add_module("relu", nn.ReLU(inplace=True))
self.add_module("conv2", ConvDPUnit(mid_channels, out_channels))
class Conv4layerBlock(nn.Sequential):
def __init__(self, in_channels: int, out_channels: int, withBNRelu: bool = True) -> None:
super().__init__()
self.add_module("conv1", ConvDPUnit(in_channels, in_channels, True))
self.add_module("conv2", ConvDPUnit(in_channels, out_channels, withBNRelu))
class YuFaceDetectNet(nn.Module):
def __init__(self, phase, pretrained: bool):
super().__init__()
self.phase = phase
self.num_classes = 2
self.model0 = Conv_head(3, 16, 16)
self.model1 = Conv4layerBlock(16, 64)
self.model2 = Conv4layerBlock(64, 64)
self.model3 = Conv4layerBlock(64, 64)
self.model4 = Conv4layerBlock(64, 64)
self.model5 = Conv4layerBlock(64, 64)
self.model6 = Conv4layerBlock(64, 64)
self.head = nn.Sequential(
Conv4layerBlock(64, 3 * (14 + 2 + 1), False),
Conv4layerBlock(64, 2 * (14 + 2 + 1), False),
Conv4layerBlock(64, 2 * (14 + 2 + 1), False),
Conv4layerBlock(64, 3 * (14 + 2 + 1), False),
)
if self.phase == 'train':
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.xavier_normal_(m.weight.data)
m.bias.data.fill_(0.02)
else:
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# use torch.hub to load pretrained model
if pretrained:
pretrained_dict = torch.hub.load_state_dict_from_url(
url, map_location=lambda storage, loc: storage
)
self.load_state_dict(pretrained_dict, strict=True)
self.eval()
def forward(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
detection_sources, head_list = [], []
x = self.model0(x)
x = F.max_pool2d(x, 2)
x = self.model1(x)
x = self.model2(x)
x = F.max_pool2d(x, 2)
x = self.model3(x)
detection_sources.append(x)
x = F.max_pool2d(x, 2)
x = self.model4(x)
detection_sources.append(x)
x = F.max_pool2d(x, 2)
x = self.model5(x)
detection_sources.append(x)
x = F.max_pool2d(x, 2)
x = self.model6(x)
detection_sources.append(x)
for i, h in enumerate(self.head):
x_tmp = h(detection_sources[i])
head_list.append(x_tmp.permute(0, 2, 3, 1).contiguous())
head_data = torch.cat([o.view(o.size(0), -1) for o in head_list], 1)
head_data = head_data.view(head_data.size(0), -1, 17)
loc_data, conf_data, iou_data = head_data.split((14, 2, 1), dim=-1)
if self.phase == "test":
loc_data = loc_data.view(-1, 14)
conf_data = torch.softmax(conf_data.view(-1, self.num_classes), dim=-1)
iou_data = iou_data.view(-1, 1)
else:
loc_data = loc_data.view(loc_data.size(0), -1, 14)
conf_data = conf_data.view(conf_data.size(0), -1, self.num_classes)
iou_data = iou_data.view(iou_data.size(0), -1, 1)
return {"loc": loc_data, "conf": conf_data, "iou": iou_data}
# utils for post-processing
# Adapted from https://github.com/Hakuyume/chainer-ssd
def _decode(loc: torch.Tensor, priors: torch.Tensor, variances: List[float]) -> torch.Tensor:
"""Decode locations from predictions using priors to undo the encoding we did for offset regression at train
time.
Args:
loc:location predictions for loc layers. Shape: [num_priors,4].
priors: Prior boxes in center-offset form. Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes.
Return:
Tensor containing decoded bounding box predictions.
"""
boxes = torch.cat((
priors[:, 0:2] + loc[:, 0:2] * variances[0] * priors[:, 2:4],
priors[:, 2:4] * torch.exp(loc[:, 2:4] * variances[1]),
priors[:, 0:2] + loc[:, 4:6] * variances[0] * priors[:, 2:4],
priors[:, 0:2] + loc[:, 6:8] * variances[0] * priors[:, 2:4],
priors[:, 0:2] + loc[:, 8:10] * variances[0] * priors[:, 2:4],
priors[:, 0:2] + loc[:, 10:12] * variances[0] * priors[:, 2:4],
priors[:, 0:2] + loc[:, 12:14] * variances[0] * priors[:, 2:4]), 1)
# prepare final output
tmp = boxes[:, 0:2] - boxes[:, 2:4] / 2
return torch.cat((tmp, boxes[:, 2:4] + tmp, boxes[:, 4:]), dim=-1)
class _PriorBox:
def __init__(self, min_sizes: List[List[int]], steps: List[int], clip: bool, image_size: Tuple[int, int]) -> None:
self.min_sizes = min_sizes
self.steps = steps
self.clip = clip
self.image_size = image_size
self.device: torch.device = torch.device('cpu')
self.dtype: torch.dtype = torch.float32
for i in range(4):
if(self.steps[i] != math.pow(2, (i + 3))):
raise ValueError("steps must be [8,16,32,64]")
self.feature_map_2th = [int(int((self.image_size[0] + 1) / 2) / 2),
int(int((self.image_size[1] + 1) / 2) / 2)]
self.feature_map_3th = [int(self.feature_map_2th[0] / 2),
int(self.feature_map_2th[1] / 2)]
self.feature_map_4th = [int(self.feature_map_3th[0] / 2),
int(self.feature_map_3th[1] / 2)]
self.feature_map_5th = [int(self.feature_map_4th[0] / 2),
int(self.feature_map_4th[1] / 2)]
self.feature_map_6th = [int(self.feature_map_5th[0] / 2),
int(self.feature_map_5th[1] / 2)]
self.feature_maps = [self.feature_map_3th, self.feature_map_4th,
self.feature_map_5th, self.feature_map_6th]
def to(self, device: torch.device, dtype: torch.dtype) -> '_PriorBox':
self.device = device
self.dtype = dtype
return self
def __call__(self) -> torch.Tensor:
anchors: List[float] = []
for k, f in enumerate(self.feature_maps):
min_sizes: List[int] = self.min_sizes[k]
# NOTE: the nested loop it's to make torchscript happy
for i in range(f[0]):
for j in range(f[1]):
for min_size in min_sizes:
s_kx = min_size / self.image_size[1]
s_ky = min_size / self.image_size[0]
cx = (j + 0.5) * self.steps[k] / self.image_size[1]
cy = (i + 0.5) * self.steps[k] / self.image_size[0]
anchors += [cx, cy, s_kx, s_ky]
# back to torch land
output = torch.tensor(anchors, device=self.device, dtype=self.dtype).view(-1, 4)
if self.clip:
output = output.clamp(max=1, min=0)
return output
| kornia/contrib/face_detection.py | 15,353 | Detect faces in a given image using a CNN.
By default, it uses the method described in :cite:`facedetect-yu`.
Args:
top_k: the maximum number of detections to return before the nms.
confidence_threshold: the threshold used to discard detections.
nms_threshold: the threshold used by the nms for iou.
keep_top_k: the maximum number of detections to return after the nms.
Return:
A tensor of shape :math:`(N,15)` to be used with :py:class:`kornia.contrib.FaceDetectorResult`.
Example:
>>> img = torch.rand(1, 3, 320, 320)
>>> detect = FaceDetector()
>>> res = detect(img)
Encapsulate the results obtained by the :py:class:`kornia.contrib.FaceDetector`.
Args:
data: the encoded results coming from the feature detector with shape :math:`(14,)`.
Define the keypoints detected in a face.
The left/right convention is based on the screen viewer.
Decode locations from predictions using priors to undo the encoding we did for offset regression at train
time.
Args:
loc:location predictions for loc layers. Shape: [num_priors,4].
priors: Prior boxes in center-offset form. Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes.
Return:
Tensor containing decoded bounding box predictions.
The [x y] position of the top-left coordinate of the bounding box.
The [x y] position of the bottom-right coordinate of the bounding box.
The [x y] position of a given facial keypoint.
Args:
keypoint: the keypoint type to return the position.
The bounding box height.
The detection score.
Like :func:`torch.nn.Module.to()` method.
The [x y] position of the top-left coordinate of the bounding box.
The [x y] position of the top-left coordinate of the bounding box.
The bounding box width.
The bounding box bottom-right x-coordinate.
The bounding box top-left x-coordinate.
The bounding box bottom-right y-coordinate.
The bounding box top-left y-coordinate.
based on: https://github.com/ShiqiYu/libfacedetection.train/blob/74f3aa77c63234dd954d21286e9a60703b8d0868/tasks/task1/yufacedetectnet.py noqa 14 Nx14 clamp here for the compatibility for ONNX ignore low scores keep top-K before NMS performd NMS NOTE: nms need to be revise since does not export well to onnx Nx15 keep top-K faster NMS utils for the network use torch.hub to load pretrained model utils for post-processing Adapted from https://github.com/Hakuyume/chainer-ssd prepare final output NOTE: the nested loop it's to make torchscript happy back to torch land | 2,491 | en | 0.741201 |
"""
Module Doc String
"""
EMOTIONS = [
"sentimental",
"afraid",
"proud",
"faithful",
"terrified",
"joyful",
"angry",
"sad",
"jealous",
"grateful",
"prepared",
"embarrassed",
"excited",
"annoyed",
"lonely",
"ashamed",
"guilty",
"surprised",
"nostalgic",
"confident",
"furious",
"disappointed",
"caring",
"trusting",
"disgusted",
"anticipating",
"anxious",
"hopeful",
"content",
"impressed",
"apprehensive",
"devastated",
]
def main():
""" Driver """
if __name__ == "__main__":
main()
| common.py | 626 | Driver
Module Doc String | 25 | en | 0.092294 |
from django.utils import translation
from django.utils.translation.trans_real import (
to_language as django_to_language,
parse_accept_lang_header as django_parse_accept_lang_header
)
from django.test import RequestFactory, TestCase
from django.urls import reverse
from .. import language_code_to_iso_3166, parse_accept_lang_header, to_language
from ..utils import queue_ga_event
class UtilsTestCase(TestCase):
def test_get_language_code_to_iso_3166(self):
self.assertEqual(language_code_to_iso_3166('en-gb'), 'en-GB')
self.assertEqual(language_code_to_iso_3166('en-us'), 'en-US')
self.assertEqual(language_code_to_iso_3166('fr'), 'fr')
def test_to_language(self):
self.assertEqual(to_language('en_US'), 'en-US')
def test_parse_accept_lang_header_returns_iso_3166_language(self):
self.assertEqual(
parse_accept_lang_header('en-GB,en;q=0.5'),
(('en-GB', 1.0), ('en', 0.5)),
)
def test_queue_ga_event_new(self):
request = RequestFactory().get('/')
request.session = self.client.session
queue_ga_event(request, ['send', 'event', 'foo'])
self.assertEqual(request.session['ga_events'], [['send', 'event', 'foo']])
def test_queue_ga_event_append(self):
request = RequestFactory().get('/')
request.session = self.client.session
request.session['ga_events'] = [['send', 'event', 'foo']]
queue_ga_event(request, ['send', 'event', 'bar'])
self.assertEqual(request.session['ga_events'], [['send', 'event', 'foo'], ['send', 'event', 'bar']])
self.assertTrue(request.session.modified)
class UtilsIntegrationTestCase(TestCase):
"""
Test that our overrides to Django translation functions work.
"""
def test_to_language(self):
self.assertEqual(django_to_language('en_US'), 'en-US')
def test_parse_accept_lang_header_returns_iso_3166_language(self):
self.assertEqual(
django_parse_accept_lang_header('en-GB,en;q=0.5'),
(('en-GB', 1.0), ('en', 0.5)),
)
def test_reverse_produces_correct_url_prefix(self):
translation.activate('en-GB')
url = reverse('payments:completed')
self.assertTrue(url.startswith('/en-GB/'))
translation.deactivate()
| donate/core/tests/test_utils.py | 2,319 | Test that our overrides to Django translation functions work. | 61 | en | 0.880661 |
# global
import ivy
import abc
import importlib
from typing import List
# local
from ivy_builder.specs.spec import Spec
from ivy_builder.specs import DatasetSpec
from ivy_builder.specs.spec import locals_to_kwargs
# ToDo: fix cyclic imports, so this method can be imported from the builder module
def load_class_from_str(full_str):
mod_str = '.'.join(full_str.split('.')[:-1])
class_str = full_str.split('.')[-1]
return getattr(importlib.import_module(mod_str), class_str)
class NetworkSpec(Spec, abc.ABC):
def __init__(self, dataset_spec: DatasetSpec = None, dev_strs: List[str] = None,
v_keychains=None, keep_v_keychains=False, build_mode='explicit', **kwargs) -> None:
"""
base class for storing general specifications of the neural network
"""
kw = locals_to_kwargs(locals())
super().__init__(dataset_spec=dataset_spec,
dev_strs=dev_strs,
v_keychains=v_keychains,
keep_v_keychains=keep_v_keychains,
build_mode=build_mode,
**kwargs)
if 'subnets' in self:
for k, subet_spec in self.subnets.items():
if 'network_spec_class' in subet_spec:
if isinstance(subet_spec.network_spec_class, str):
spec_class = load_class_from_str(subet_spec.network_spec_class)
else:
spec_class = subet_spec.network_spec_class
if isinstance(kwargs['subnets'][k], spec_class):
subet_spec = kwargs['subnets'][k]
else:
subet_spec = spec_class(**{**kwargs['subnets'][k],
**dict(dataset_spec=dataset_spec, dev_strs=dev_strs)})
self.subnets[k] = subet_spec
if isinstance(subet_spec.network_class, str):
self.subnets[k].network_class = load_class_from_str(subet_spec.network_class)
else:
self.subnets[k].network_class = subet_spec.network_class
self.subnets[k].store_vars = ivy.default(self.subnets[k].if_exists('store_vars'), True)
self.subnets[k].build_mode = ivy.default(self.subnets[k].if_exists('build_mode'), self.build_mode)
self.subnets[k].dataset_spec = dataset_spec
self.subnets[k].dev_strs = dev_strs
self._kwargs = kw
| ivy_builder/specs/network_spec.py | 2,531 | base class for storing general specifications of the neural network
global local ToDo: fix cyclic imports, so this method can be imported from the builder module | 163 | en | 0.671608 |
# coding: utf-8
import pprint
import re
import six
class SetBackupPolicyRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'backup_policy': 'BackupPolicy'
}
attribute_map = {
'backup_policy': 'backup_policy'
}
def __init__(self, backup_policy=None):
"""SetBackupPolicyRequestBody - a model defined in huaweicloud sdk"""
self._backup_policy = None
self.discriminator = None
self.backup_policy = backup_policy
@property
def backup_policy(self):
"""Gets the backup_policy of this SetBackupPolicyRequestBody.
:return: The backup_policy of this SetBackupPolicyRequestBody.
:rtype: BackupPolicy
"""
return self._backup_policy
@backup_policy.setter
def backup_policy(self, backup_policy):
"""Sets the backup_policy of this SetBackupPolicyRequestBody.
:param backup_policy: The backup_policy of this SetBackupPolicyRequestBody.
:type: BackupPolicy
"""
self._backup_policy = backup_policy
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SetBackupPolicyRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/set_backup_policy_request_body.py | 2,860 | Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
Returns true if both objects are equal
SetBackupPolicyRequestBody - a model defined in huaweicloud sdk
Returns true if both objects are not equal
For `print` and `pprint`
Gets the backup_policy of this SetBackupPolicyRequestBody.
:return: The backup_policy of this SetBackupPolicyRequestBody.
:rtype: BackupPolicy
Sets the backup_policy of this SetBackupPolicyRequestBody.
:param backup_policy: The backup_policy of this SetBackupPolicyRequestBody.
:type: BackupPolicy
Returns the model properties as a dict
Returns the string representation of the model
coding: utf-8 | 808 | en | 0.817823 |
# -*- coding: utf-8 -*-
"""
Rewrite ot.bregman.sinkhorn in Python Optimal Transport (https://pythonot.github.io/_modules/ot/bregman.html#sinkhorn)
using pytorch operations.
Bregman projections for regularized OT (Sinkhorn distance).
"""
import torch
M_EPS = 1e-16
def sinkhorn(a, b, C, reg=1e-1, method='sinkhorn', maxIter=1000, tau=1e3,
stopThr=1e-9, verbose=True, log=True, warm_start=None, eval_freq=10, print_freq=200, **kwargs):
"""
Solve the entropic regularization optimal transport
The input should be PyTorch tensors
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- C is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are target and source measures (sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1].
Parameters
----------
a : torch.tensor (na,)
samples measure in the target domain
b : torch.tensor (nb,)
samples in the source domain
C : torch.tensor (na,nb)
loss matrix
reg : float
Regularization term > 0
method : str
method used for the solver either 'sinkhorn', 'greenkhorn', 'sinkhorn_stabilized' or
'sinkhorn_epsilon_scaling', see those function for specific parameters
maxIter : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error ( > 0 )
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (na x nb) torch.tensor
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
[1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
See Also
--------
"""
if method.lower() == 'sinkhorn':
return sinkhorn_knopp(a, b, C, reg, maxIter=maxIter,
stopThr=stopThr, verbose=verbose, log=log,
warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq,
**kwargs)
elif method.lower() == 'sinkhorn_stabilized':
return sinkhorn_stabilized(a, b, C, reg, maxIter=maxIter, tau=tau,
stopThr=stopThr, verbose=verbose, log=log,
warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq,
**kwargs)
elif method.lower() == 'sinkhorn_epsilon_scaling':
return sinkhorn_epsilon_scaling(a, b, C, reg,
maxIter=maxIter, maxInnerIter=100, tau=tau,
scaling_base=0.75, scaling_coef=None, stopThr=stopThr,
verbose=False, log=log, warm_start=warm_start, eval_freq=eval_freq,
print_freq=print_freq, **kwargs)
else:
raise ValueError("Unknown method '%s'." % method)
def sinkhorn_knopp(a, b, C, reg=1e-1, maxIter=1000, stopThr=1e-9,
verbose=True, log=True, warm_start=None, eval_freq=10, print_freq=200, **kwargs):
"""
Solve the entropic regularization optimal transport
The input should be PyTorch tensors
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- C is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are target and source measures (sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1].
Parameters
----------
a : torch.tensor (na,)
samples measure in the target domain
b : torch.tensor (nb,)
samples in the source domain
C : torch.tensor (na,nb)
loss matrix
reg : float
Regularization term > 0
maxIter : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error ( > 0 )
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (na x nb) torch.tensor
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
[1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
See Also
--------
"""
device = a.device
na, nb = C.shape
assert na >= 1 and nb >= 1, 'C needs to be 2d'
assert na == a.shape[0] and nb == b.shape[0], "Shape of a or b does't match that of C"
assert reg > 0, 'reg should be greater than 0'
assert a.min() >= 0. and b.min() >= 0., 'Elements in a or b less than 0'
# unnecessary check for our special case
if log:
log = {'err': []}
if warm_start is not None:
u = warm_start['u']
v = warm_start['v']
else:
u = torch.ones(na, dtype=a.dtype).to(device) / na
v = torch.ones(nb, dtype=b.dtype).to(device) / nb
K = torch.empty(C.shape, dtype=C.dtype).to(device)
torch.div(C, -reg, out=K)
torch.exp(K, out=K)
b_hat = torch.empty(b.shape, dtype=C.dtype).to(device)
it = 1
err = 1
# allocate memory beforehand
KTu = torch.empty(v.shape, dtype=v.dtype).to(device)
Kv = torch.empty(u.shape, dtype=u.dtype).to(device)
while (err > stopThr and it <= maxIter):
upre, vpre = u, v
torch.matmul(u, K, out=KTu)
v = torch.div(b, KTu + M_EPS)
torch.matmul(K, v, out=Kv)
u = torch.div(a, Kv + M_EPS)
if torch.any(torch.isnan(u)) or torch.any(torch.isnan(v)) or \
torch.any(torch.isinf(u)) or torch.any(torch.isinf(v)):
print('Warning: numerical errors at iteration', it)
u, v = upre, vpre
break
if log and it % eval_freq == 0:
# we can speed up the process by checking for the error only all
# the eval_freq iterations
# below is equivalent to:
# b_hat = torch.sum(u.reshape(-1, 1) * K * v.reshape(1, -1), 0)
# but with more memory efficient
b_hat = torch.matmul(u, K) * v
err = (b - b_hat).pow(2).sum().item()
# err = (b - b_hat).abs().sum().item()
log['err'].append(err)
if verbose and it % print_freq == 0:
print('iteration {:5d}, constraint error {:5e}'.format(it, err))
it += 1
if log:
log['u'] = u
log['v'] = v
log['alpha'] = reg * torch.log(u + M_EPS)
log['beta'] = reg * torch.log(v + M_EPS)
# transport plan
P = u.reshape(-1, 1) * K * v.reshape(1, -1)
if log:
return P, log
else:
return P
def sinkhorn_stabilized(a, b, C, reg=1e-1, maxIter=1000, tau=1e3, stopThr=1e-9,
verbose=False, log=False, warm_start=None, eval_freq=10, print_freq=200, **kwargs):
"""
Solve the entropic regularization OT problem with log stabilization
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- C is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are target and source measures (sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1]
but with the log stabilization proposed in [3] an defined in [2] (Algo 3.1)
Parameters
----------
a : torch.tensor (na,)
samples measure in the target domain
b : torch.tensor (nb,)
samples in the source domain
C : torch.tensor (na,nb)
loss matrix
reg : float
Regularization term > 0
tau : float
thershold for max value in u or v for log scaling
maxIter : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error ( > 0 )
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (na x nb) torch.tensor
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
[1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
[2] Bernhard Schmitzer. Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. SIAM Journal on Scientific Computing, 2019
[3] Chizat, L., Peyré, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816.
See Also
--------
"""
device = a.device
na, nb = C.shape
assert na >= 1 and nb >= 1, 'C needs to be 2d'
assert na == a.shape[0] and nb == b.shape[0], "Shape of a or b does't match that of C"
assert reg > 0, 'reg should be greater than 0'
assert a.min() >= 0. and b.min() >= 0., 'Elements in a or b less than 0'
if log:
log = {'err': []}
if warm_start is not None:
alpha = warm_start['alpha']
beta = warm_start['beta']
else:
alpha = torch.zeros(na, dtype=a.dtype).to(device)
beta = torch.zeros(nb, dtype=b.dtype).to(device)
u = torch.ones(na, dtype=a.dtype).to(device) / na
v = torch.ones(nb, dtype=b.dtype).to(device) / nb
def update_K(alpha, beta):
"""log space computation"""
"""memory efficient"""
torch.add(alpha.reshape(-1, 1), beta.reshape(1, -1), out=K)
torch.add(K, -C, out=K)
torch.div(K, reg, out=K)
torch.exp(K, out=K)
def update_P(alpha, beta, u, v, ab_updated=False):
"""log space P (gamma) computation"""
torch.add(alpha.reshape(-1, 1), beta.reshape(1, -1), out=P)
torch.add(P, -C, out=P)
torch.div(P, reg, out=P)
if not ab_updated:
torch.add(P, torch.log(u + M_EPS).reshape(-1, 1), out=P)
torch.add(P, torch.log(v + M_EPS).reshape(1, -1), out=P)
torch.exp(P, out=P)
K = torch.empty(C.shape, dtype=C.dtype).to(device)
update_K(alpha, beta)
b_hat = torch.empty(b.shape, dtype=C.dtype).to(device)
it = 1
err = 1
ab_updated = False
# allocate memory beforehand
KTu = torch.empty(v.shape, dtype=v.dtype).to(device)
Kv = torch.empty(u.shape, dtype=u.dtype).to(device)
P = torch.empty(C.shape, dtype=C.dtype).to(device)
while (err > stopThr and it <= maxIter):
upre, vpre = u, v
torch.matmul(u, K, out=KTu)
v = torch.div(b, KTu + M_EPS)
torch.matmul(K, v, out=Kv)
u = torch.div(a, Kv + M_EPS)
ab_updated = False
# remove numerical problems and store them in K
if u.abs().sum() > tau or v.abs().sum() > tau:
alpha += reg * torch.log(u + M_EPS)
beta += reg * torch.log(v + M_EPS)
u.fill_(1. / na)
v.fill_(1. / nb)
update_K(alpha, beta)
ab_updated = True
if log and it % eval_freq == 0:
# we can speed up the process by checking for the error only all
# the eval_freq iterations
update_P(alpha, beta, u, v, ab_updated)
b_hat = torch.sum(P, 0)
err = (b - b_hat).pow(2).sum().item()
log['err'].append(err)
if verbose and it % print_freq == 0:
print('iteration {:5d}, constraint error {:5e}'.format(it, err))
it += 1
if log:
log['u'] = u
log['v'] = v
log['alpha'] = alpha + reg * torch.log(u + M_EPS)
log['beta'] = beta + reg * torch.log(v + M_EPS)
# transport plan
update_P(alpha, beta, u, v, False)
if log:
return P, log
else:
return P
def sinkhorn_epsilon_scaling(a, b, C, reg=1e-1, maxIter=100, maxInnerIter=100, tau=1e3, scaling_base=0.75,
scaling_coef=None, stopThr=1e-9, verbose=False, log=False, warm_start=None, eval_freq=10,
print_freq=200, **kwargs):
"""
Solve the entropic regularization OT problem with log stabilization
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- C is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are target and source measures (sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix
scaling algorithm as proposed in [1] but with the log stabilization
proposed in [3] and the log scaling proposed in [2] algorithm 3.2
Parameters
----------
a : torch.tensor (na,)
samples measure in the target domain
b : torch.tensor (nb,)
samples in the source domain
C : torch.tensor (na,nb)
loss matrix
reg : float
Regularization term > 0
tau : float
thershold for max value in u or v for log scaling
maxIter : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error ( > 0 )
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (na x nb) torch.tensor
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
[1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
[2] Bernhard Schmitzer. Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. SIAM Journal on Scientific Computing, 2019
[3] Chizat, L., Peyré, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816.
See Also
--------
"""
na, nb = C.shape
assert na >= 1 and nb >= 1, 'C needs to be 2d'
assert na == a.shape[0] and nb == b.shape[0], "Shape of a or b does't match that of C"
assert reg > 0, 'reg should be greater than 0'
assert a.min() >= 0. and b.min() >= 0., 'Elements in a or b less than 0'
def get_reg(it, reg, pre_reg):
if it == 1:
return scaling_coef
else:
if (pre_reg - reg) * scaling_base < M_EPS:
return reg
else:
return (pre_reg - reg) * scaling_base + reg
if scaling_coef is None:
scaling_coef = C.max() + reg
it = 1
err = 1
running_reg = scaling_coef
if log:
log = {'err': []}
warm_start = None
while (err > stopThr and it <= maxIter):
running_reg = get_reg(it, reg, running_reg)
P, _log = sinkhorn_stabilized(a, b, C, running_reg, maxIter=maxInnerIter, tau=tau,
stopThr=stopThr, verbose=False, log=True,
warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq,
**kwargs)
warm_start = {}
warm_start['alpha'] = _log['alpha']
warm_start['beta'] = _log['beta']
primal_val = (C * P).sum() + reg * (P * torch.log(P)).sum() - reg * P.sum()
dual_val = (_log['alpha'] * a).sum() + (_log['beta'] * b).sum() - reg * P.sum()
err = primal_val - dual_val
log['err'].append(err)
if verbose and it % print_freq == 0:
print('iteration {:5d}, constraint error {:5e}'.format(it, err))
it += 1
if log:
log['alpha'] = _log['alpha']
log['beta'] = _log['beta']
return P, log
else:
return P
| losses/bregman_pytorch.py | 17,062 | Solve the entropic regularization optimal transport
The input should be PyTorch tensors
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- C is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are target and source measures (sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1].
Parameters
----------
a : torch.tensor (na,)
samples measure in the target domain
b : torch.tensor (nb,)
samples in the source domain
C : torch.tensor (na,nb)
loss matrix
reg : float
Regularization term > 0
method : str
method used for the solver either 'sinkhorn', 'greenkhorn', 'sinkhorn_stabilized' or
'sinkhorn_epsilon_scaling', see those function for specific parameters
maxIter : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error ( > 0 )
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (na x nb) torch.tensor
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
[1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
See Also
--------
Solve the entropic regularization OT problem with log stabilization
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- C is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are target and source measures (sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix
scaling algorithm as proposed in [1] but with the log stabilization
proposed in [3] and the log scaling proposed in [2] algorithm 3.2
Parameters
----------
a : torch.tensor (na,)
samples measure in the target domain
b : torch.tensor (nb,)
samples in the source domain
C : torch.tensor (na,nb)
loss matrix
reg : float
Regularization term > 0
tau : float
thershold for max value in u or v for log scaling
maxIter : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error ( > 0 )
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (na x nb) torch.tensor
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
[1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
[2] Bernhard Schmitzer. Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. SIAM Journal on Scientific Computing, 2019
[3] Chizat, L., Peyré, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816.
See Also
--------
Solve the entropic regularization optimal transport
The input should be PyTorch tensors
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- C is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are target and source measures (sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1].
Parameters
----------
a : torch.tensor (na,)
samples measure in the target domain
b : torch.tensor (nb,)
samples in the source domain
C : torch.tensor (na,nb)
loss matrix
reg : float
Regularization term > 0
maxIter : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error ( > 0 )
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (na x nb) torch.tensor
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
[1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
See Also
--------
Solve the entropic regularization OT problem with log stabilization
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- C is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are target and source measures (sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1]
but with the log stabilization proposed in [3] an defined in [2] (Algo 3.1)
Parameters
----------
a : torch.tensor (na,)
samples measure in the target domain
b : torch.tensor (nb,)
samples in the source domain
C : torch.tensor (na,nb)
loss matrix
reg : float
Regularization term > 0
tau : float
thershold for max value in u or v for log scaling
maxIter : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error ( > 0 )
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (na x nb) torch.tensor
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
[1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
[2] Bernhard Schmitzer. Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. SIAM Journal on Scientific Computing, 2019
[3] Chizat, L., Peyré, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816.
See Also
--------
log space computation
log space P (gamma) computation
Rewrite ot.bregman.sinkhorn in Python Optimal Transport (https://pythonot.github.io/_modules/ot/bregman.html#sinkhorn)
using pytorch operations.
Bregman projections for regularized OT (Sinkhorn distance).
-*- coding: utf-8 -*- unnecessary check for our special case allocate memory beforehand we can speed up the process by checking for the error only all the eval_freq iterations below is equivalent to: b_hat = torch.sum(u.reshape(-1, 1) * K * v.reshape(1, -1), 0) but with more memory efficient err = (b - b_hat).abs().sum().item() transport plan allocate memory beforehand remove numerical problems and store them in K we can speed up the process by checking for the error only all the eval_freq iterations transport plan | 7,525 | en | 0.613419 |
# -*- coding: utf-8 -*-
# Copyright 2021 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the
# cli_rm_builder.
#
# Manually editing this file is not advised.
#
# To update the argspec make the desired changes
# in the module docstring and re-run
# cli_rm_builder.
#
#############################################
"""
The arg spec for the vyos_route_maps module
"""
class Route_mapsArgs(object): # pylint: disable=R0903
"""The arg spec for the vyos_route_maps module"""
def __init__(self, **kwargs):
pass
argument_spec = {
"config": {
"type": "list",
"elements": "dict",
"options": {
"route_map": {"type": "str"},
"entries": {
"aliases": ["rules"],
"type": "list",
"elements": "dict",
"options": {
"sequence": {"type": "int"},
"call": {"type": "str"},
"description": {"type": "str"},
"action": {
"type": "str",
"choices": ["deny", "permit"],
},
"continue_sequence": {"type": "int"},
"set": {
"type": "dict",
"options": {
"aggregator": {
"type": "dict",
"options": {
"ip": {"type": "str"},
"as": {"type": "str"},
},
},
"as_path_exclude": {"type": "str"},
"as_path_prepend": {"type": "str"},
"atomic_aggregate": {"type": "bool"},
"bgp_extcommunity_rt": {"type": "str"},
"comm_list": {
"type": "dict",
"options": {
"comm_list": {"type": "str"},
"delete": {"type": "bool"},
},
},
"community": {
"type": "dict",
"options": {"value": {"type": "str"}},
},
"extcommunity_rt": {"type": "str"},
"extcommunity_soo": {"type": "str"},
"ip_next_hop": {"type": "str"},
"ipv6_next_hop": {
"type": "dict",
"options": {
"ip_type": {
"type": "str",
"choices": ["global", "local"],
},
"value": {"type": "str"},
},
},
"large_community": {"type": "str"},
"local_preference": {"type": "str"},
"metric": {"type": "str"},
"metric_type": {
"type": "str",
"choices": ["type-1", "type-2"],
},
"origin": {
"type": "str",
"choices": ["egp", "igp", "incomplete"],
},
"originator_id": {"type": "str"},
"src": {"type": "str"},
"tag": {"type": "str"},
"weight": {"type": "str"},
},
},
"match": {
"type": "dict",
"options": {
"as_path": {"type": "str"},
"community": {
"type": "dict",
"options": {
"community_list": {"type": "str"},
"exact_match": {"type": "bool"},
},
},
"extcommunity": {"type": "str"},
"interface": {"type": "str"},
"ip": {
"type": "dict",
"options": {
"address": {
"type": "dict",
"options": {
"list_type": {
"type": "str",
"choices": [
"access-list",
"prefix-list",
],
},
"value": {"type": "str"},
},
},
"next_hop": {
"type": "dict",
"options": {
"list_type": {
"type": "str",
"choices": [
"access-list",
"prefix-list",
],
},
"value": {"type": "str"},
},
},
"route_source": {
"type": "dict",
"options": {
"list_type": {
"type": "str",
"choices": [
"access-list",
"prefix-list",
],
},
"value": {"type": "str"},
},
},
},
},
"ipv6": {
"type": "dict",
"options": {
"address": {
"type": "dict",
"options": {
"list_type": {
"type": "str",
"choices": [
"access-list",
"prefix-list",
],
},
"value": {"type": "str"},
},
},
"next_hop": {"type": "str"},
},
},
"large_community_large_community_list": {
"type": "str"
},
"metric": {"type": "int"},
"origin": {
"type": "str",
"choices": ["ebgp", "ibgp", "incomplete"],
},
"peer": {"type": "str"},
"rpki": {
"type": "str",
"choices": [
"notfound",
"invalid",
"valid",
],
},
},
},
"on_match": {
"type": "dict",
"options": {
"next": {"type": "bool"},
"goto": {"type": "int"},
},
},
},
},
},
},
"running_config": {"type": "str"},
"state": {
"type": "str",
"choices": [
"deleted",
"merged",
"overridden",
"replaced",
"gathered",
"rendered",
"parsed",
],
"default": "merged",
},
} # pylint: disable=C0301
| venv/lib/python3.6/site-packages/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/route_maps/route_maps.py | 10,483 | The arg spec for the vyos_route_maps module
-*- coding: utf-8 -*- Copyright 2021 Red Hat GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) WARNING This file is auto generated by the cli_rm_builder. Manually editing this file is not advised. To update the argspec make the desired changes in the module docstring and re-run cli_rm_builder. pylint: disable=R0903 pylint: disable=C0301 | 460 | en | 0.691623 |
# Database Lib
"""
Oracle
PostGresSQL
SQLite
SQLServer
Hive
Spark
"""
import os, datetime, pandas, time, re
from collections import namedtuple, OrderedDict
import jmespath
import sqlalchemy
from multiprocessing import Queue, Process
from xutil.helpers import (
log,
elog,
slog,
get_exception_message,
struct,
now,
get_databases,
get_dir_path,
get_profile,
get_variables,
file_exists,
str_rmv_indent,
ptable,
make_rec,
get_error_str,
)
from xutil.diskio import read_yaml, write_csvs
conns = {}
_fwklike = lambda k, v: "lower({}) like lower('{}')".format(k, v)
_fwkeq = lambda k, v: "{} = '{}'".format(k, v)
_fw = lambda sep, _fwkop, **kws: sep.join([_fwkop(k, v) for k, v in kws.items()]) # Format WHERE
fwa = lambda _fwkop=_fwkeq, **kws: _fw(' and ', _fwkop, **kws) # Format WHERE AND
fwo = lambda _fwkop=_fwkeq, **kws: _fw(' or ', _fwkop, **kws) # Format WHERE OR
rows_to_dicts = lambda rows: [row._asdict() for row in rows]
class DBConn(object):
"""Base class for database connections"""
_fix_f_name = lambda self, f: f
_to_text = lambda self, t: t
def __init__(self, conn_dict, profile=None, echo=False):
"Inititate connection"
self._cred = struct(conn_dict)
self._cred.kwargs = conn_dict.get('kwargs', {})
self.name = self._cred.get('name', None)
self.username = self._cred.get('username', None)
self.type = self._cred.type
self.engine = None
self._cursor_description = None
self.profile = profile
self.batch_size = 10000
self.fetch_size = 20000
self.echo = echo
self.connect()
self.last_connect = now()
# Base Template
template_base_path = '{}/database/templates/base.yaml'.format(
get_dir_path())
self.template_dict = read_yaml(template_base_path)
# Specific Type Template
template_path = '{}/database/templates/{}.yaml'.format(
get_dir_path(), self.type)
temp_dict = read_yaml(template_path)
for key1 in temp_dict:
# Level 1
if isinstance(temp_dict[key1], dict):
if key1 not in self.template_dict:
self.template_dict[key1] = temp_dict[key1]
# Level 2
for key2 in temp_dict[key1]:
# Always Overwrite
self.template_dict[key1][key2] = temp_dict[key1][key2]
else:
# Level 1 Non-Dict Overwrite
self.template_dict[key1] = temp_dict[key1]
self.variables = self._template('variables')
if os.getenv('PROFILE_YAML'):
other_vars = get_variables()
for key in other_vars:
self.variables[key] = other_vars[key]
self.tmp_folder = self.variables['tmp_folder']
self.set_variables()
if echo:
log("Connected to {} as {}".format(self._cred.name, self._cred.user))
def connect(self):
"""Connect to Database"""
self.engine = self.get_engine()
self.connection = self.engine.connect()
def close(self):
"""Close database connection"""
self.conn.connection.close()
def reconnect(self, min_tresh=0):
"""Re-Connect to Database if minute threshold reached"""
if (now() - self.last_connect).total_seconds() > min_tresh * 60:
log('Reconnecting to {}...'.format(self.name))
self.connect()
self.last_connect = now()
def set_variables(self):
"""Set custom variables"""
raise Exception("Method 'set_variables' is not implemented!")
def get_dialect(self, echo=False):
"""SQLAlchemy dialect"""
raise Exception("Method 'get_dialect' is not implemented!")
def get_engine(self, echo=False):
import sqlalchemy
if not self.engine:
self.create_engine(echo=self.echo)
self.engine_inspect = sqlalchemy.inspect(self.engine)
return self.engine
def check_pk(self, table, fields):
"Check Primary key to ensure there are not duplicates"
if 'where' in fields.lower():
fields, where_clause = fields.lower().split('where')
where_clause = 'where ' + where_clause
else:
where_clause = ''
sql = '''
select
'{table}' as table,
case when count(1) = count({fields}) then 'PASS' else 'FAIL' end as pk_result
from {table}
{where_clause}
'''.format(
table=table,
fields=fields,
where_clause=where_clause,
)
data = self.query(sql, echo=False)
headers = self._fields
print(ptable(headers, data))
if data[0].pk_result == 'FAIL':
raise (Exception('PK Text failed for table "{}" with fields "{}"'.format(
table, fields)))
def _do_execute(self, sql):
try:
self._cursor_description = None
self.fields = None
self.result = self.connection.execute(sql)
self._cursor_description = self.result._cursor_description()
self._fields = self._get_cursor_fields()
except Exception as E:
if 'not open' in get_error_str(E):
pass # error when Oracle doesn't have a cursor open
else:
log(Exception('Error for SQL:\n' + sql))
raise E
def execute_multi(self,
sql,
dtype='namedtuple',
limit=None,
echo=True,
query_name='Record',
log=log):
"""
Execute multiple SQL statements separtated by ';'. Returns a generator.
Example:
for fields, rows in conn.execute(sql):
print(fields)
print(len(rows))
"""
self.reconnect(min_tresh=10)
data = None
fields = None
rows = []
message_mapping = {
'drop ': 'Dropping {}.',
'truncate ': 'Truncating {}.',
'select ': 'Selecting {}.',
'create ': 'Creating {}.',
'insert ': 'Inserting {}.',
'alter ': 'Altering {}.',
'update ': 'Updating {}.',
'delete ': 'Deleting {}.',
'exec ': 'Calling Procedure {}.',
'grant ': 'Granting {}.',
}
sqls = sql.split(';')
for sql in sqls:
if not sql.strip(): continue
sql_ = sql.strip().lower()
for word, message in message_mapping.items():
if sql_.startswith(word):
if echo:
log(
message.format(' '.join(
sql_.splitlines()[0].split()[1:3]).upper()))
break
# Call procedure with callproc
if sql_.startswith('exec '):
procedure = sql_[5:].split('(')[0]
args = sql_[5:].split('(')[1][:-1].replace("'", '').split(',')
args = [a.strip() for a in args]
cursor.callproc(procedure, args)
continue
try:
self._fields = []
rows = self.query(
sql,
rec_name=query_name,
dtype=dtype,
limit=limit,
echo=echo,
log=log)
fields = self._fields
if '-- pk_test:' in sql.lower() and sql_.startswith('create'):
sql_lines = sql_.splitlines()
regexp = r'create\s+table\s+(\S*)[\sa-zA-Z\d]+ as'
table = re.findall(regexp, sql_lines[0])[0]
line = [
l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')
][0]
fields = line.split(':')[-1]
self.check_pk(table, fields)
except Exception as E:
message = get_exception_message().lower()
if sql_.startswith(
'drop ') and self.error_msg['table_not_exist'] in message:
log("WARNING: Table already dropped.")
else:
raise E
if not fields: fields = []
yield fields, rows
def execute(self,
sql,
dtype='tuple',
limit=None,
echo=True,
query_name='Record',
log=log):
"""Execute SQL, return last result"""
self.reconnect(min_tresh=10)
data = None
fields = None
rows = []
message_mapping = {
'drop ': 'Dropping {}.',
'truncate ': 'Truncating {}.',
'select ': 'Selecting {}.',
'create ': 'Creating {}.',
'insert ': 'Inserting {}.',
'alter ': 'Altering {}.',
'update ': 'Updating {}.',
'delete ': 'Deleting {}.',
'exec ': 'Calling Procedure {}.',
'grant ': 'Granting {}.',
}
sql_ = sql.strip().lower()
for word, message in message_mapping.items():
if sql_.startswith(word):
if echo:
log(
message.format(' '.join(
sql_.splitlines()[0].split()[1:3]).upper()))
break
# Call procedure with callproc
if sql_.startswith('exec '):
procedure = sql_[5:].split('(')[0]
args = sql_[5:].split('(')[1][:-1].replace("'", '').split(',')
args = [a.strip() for a in args]
connection = self.engine.raw_connection()
try:
cursor = connection.cursor()
cursor.callproc(procedure, args)
self._fields = self._get_cursor_fields(cursor_desc=cursor.description)
rows = list(cursor.fetchall())
cursor.close()
connection.commit()
return fields, rows
finally:
connection.close()
try:
self._fields = []
rows = self.query(
sql,
rec_name=query_name,
dtype=dtype,
limit=limit,
echo=echo,
log=log)
fields = self._fields
if '-- pk_test:' in sql.lower() and sql_.startswith('create'):
sql_lines = sql_.splitlines()
regexp = r'create\s+table\s+(\S*)[\sa-zA-Z\d]+ as'
table = re.findall(regexp, sql_lines[0])[0]
line = [
l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')
][0]
fields = line.split(':')[-1]
self.check_pk(table, fields)
except Exception as E:
message = get_exception_message().lower()
if sql_.startswith(
'drop ') and self.error_msg['table_not_exist'] in message:
log("WARNING: Table already dropped.")
else:
raise E
if not fields: fields = []
return fields, rows
def insert(self, table, data, echo=False):
"""Insert records of namedtuple or dicts"""
raise Exception('insert not implemented')
def drop_table(self, table, log=log):
"Drop table"
try:
sql = self._template('core.drop_table').format(table)
self._do_execute(sql)
except Exception as E:
message = get_exception_message().lower()
if self._template('error_filter.table_not_exist') in message:
if self.echo:
log('Table "{}" already dropped.'.format(table))
else:
raise E
def create_table(self, table, field_types, drop=False, log=log):
"Create table"
if drop:
self.drop_table(table, log=log)
new_ftypes = OrderedDict()
for f in field_types:
ftype, max_len, dec_len = field_types[f]
if dec_len:
suff = '({},{})'.format(max_len, dec_len)
elif max_len:
suff = '({})'.format(max_len)
else:
suff = ''
new_ftypes[f] = self._template('general_type_map')[ftype].replace(
'()', suff)
field_types_str = ', \n'.join([
self._fix_f_name(field) + ' ' + new_ftypes[field] for field in new_ftypes
])
sql = self._template('core.create_table').format(
table=table,
col_types=field_types_str,
)
# log('Creating table: \n' + sql))
try:
self._do_execute(sql)
except Exception as e:
raise e
log('Created table "{}"'.format(table))
def _get_cursor_fields(self, as_dict=False, native_type=True, cursor_desc=None):
"Get fields of active Select cursor"
fields = OrderedDict()
cursor_desc = cursor_desc if cursor_desc else self._cursor_description
if cursor_desc == None:
return []
for f in cursor_desc:
f_name = f[0].lower()
if as_dict:
if native_type:
f_type = f[1]
else:
f_type = self.reverse_data_map[f[1]]
# assign floa/double as needed
if 'cx_Oracle.NUMBER' in str(f[1]):
if f[4] and f[4] > 11: f_type = 'long'
if f[5] and f[5] > 0: f_type = 'double'
fields[f_name] = f_type
else:
fields[f_name] = None
if as_dict:
return fields
else:
return list(fields.keys())
def stream(self,
sql,
rec_name='Record',
dtype='namedtuple',
yield_chuncks=False,
chunk_size=None,
limit=None,
echo=True):
"Stream Select from SQL, yield records as they come in"
self.reconnect(min_tresh=10)
if echo: log("Streaming SQL for '{}'.".format(rec_name))
fetch_size = limit if limit else self.fetch_size
fetch_size = chunk_size if chunk_size else fetch_size
try:
self._do_execute(sql)
except Exception as e:
raise e
if dtype == 'tuple':
make_rec = lambda row: row
make_batch = lambda rows: rows
elif dtype == 'dataframe':
yield_chuncks=True
make_batch = lambda rows: pandas.DataFrame(rows, columns=self._fields)
else:
Record = namedtuple(
rec_name.replace(' ', '_').replace('.', '_'), self._fields)
make_rec = lambda row: Record(*row)
make_batch = lambda rows: [make_rec(r) for r in rows]
self._stream_counter = 0
while True:
if not self._fields:
break
rows = self.result.fetchmany(fetch_size)
if rows:
if yield_chuncks:
batch = make_batch(rows)
self._stream_counter += len(batch)
if len(batch):
yield batch
else:
for row in rows:
self._stream_counter += 1
yield make_rec(row)
else:
break
if limit:
break
# log('Stream finished at {} records.'.format(self._stream_counter))
def query(self,
sql,
rec_name='Record',
dtype='namedtuple',
limit=None,
echo=True,
retrying=False,
log=log):
"Select from SQL, return list of namedtuples"
# if echo: log("Running SQL for '{}'.".format(rec_name))
self.reconnect(min_tresh=10)
s_t = datetime.datetime.now()
_data = list(self.stream(sql, dtype=dtype, echo=False, limit=limit))
if not self.result.closed:
self.result.close()
fields = self._fields
if not fields: return []
if dtype == 'namedtuple':
Record = namedtuple(rec_name.replace(' ', '_').replace('.', '_'), fields)
if limit:
data = [Record(*row) for row in _data]
else:
data = [Record(*row) for row in _data]
elif dtype == 'tuple':
if limit:
data = [tuple(row) for row in _data]
else:
data = [tuple(row) for row in _data]
elif dtype == 'dataframe':
if limit:
data = pandas.DataFrame([row for row in _data], columns=fields)
else:
data = pandas.DataFrame([row for row in _data], columns=fields)
else:
raise (Exception('{} is not recongnized.'.format(dtype)))
secs = (datetime.datetime.now() - s_t).total_seconds()
rate = round(len(data) / secs, 1)
if echo:
log(" >>> Got {} rows in {} secs [{} r/s].".format(
len(data), secs, rate))
return data
def _split_schema_table(self, table_name):
schema, table = table_name.split('.') if '.' in table_name else (
self.username, table_name)
return schema, table
def _concat_fields(self, fields, as_text=False):
return ' || '.join(fields)
def _template(self, template_key_str):
val = jmespath.search(template_key_str, self.template_dict)
if isinstance(val, str):
val = str_rmv_indent(val)
return val
def get_schemas(self, echo=True):
"Get list of schemas."
Rec = namedtuple('Schemas', 'schema')
self._fields = Rec._fields
sql_tmpl = self._template('metadata.schemas')
if sql_tmpl:
schemas = [r[0] for r in self.query(sql_tmpl)]
else:
# http://docs.sqlalchemy.org/en/rel_0_9/core/reflection.html#sqlalchemy.engine.reflection.Inspector.get_schemas
self.get_engine(echo=echo)
schemas = self.engine_inspect.get_schema_names()
rows = [Rec(s) for s in schemas]
return rows
def get_objects(self, schema, object_type='all', echo=True):
"Get metadata for objects. object_type in 'all', 'table', 'view'"
Rec = namedtuple('Table', 'schema object_name object_type')
self._fields = Rec._fields
def get_rec(object_name, object_type):
r_dict = dict(
schema=schema, object_name=object_name, object_type=object_type)
return Rec(**r_dict)
if object_type == 'all':
table_rows = self.get_tables(schema)
rows = [get_rec(r.table, 'table') for r in sorted(table_rows)]
view_rows = self.get_views(schema)
rows += [get_rec(r.view, 'view') for r in sorted(view_rows)]
elif object_type == 'table':
table_rows = self.get_tables(schema)
rows = [get_rec(r.table, 'table') for r in sorted(table_rows)]
elif object_type == 'view':
view_rows = self.get_views(schema)
rows += [get_rec(r.view, 'view') for r in sorted(view_rows)]
else:
raise Exception('Object type "{}" not supported!'.format(object_type))
return rows
def get_tables(self, schema, echo=True):
"Get metadata for tables."
schemas = schema if isinstance(schema, list) else [schema]
def get_tables_for(schema):
def get_rec(table):
self._fields = ['schema', 'table']
return tuple([schema, table])
# Getting pickle.PicklingError: Can't pickle <class 'xutil.database.base.Table'>
Rec = namedtuple('Table', 'schema table')
self._fields = Rec._fields
r_dict = dict(schema=schema, table=table)
return Rec(**r_dict)
sql_tmpl = self._template('metadata.tables')
if sql_tmpl:
tables = self.query(sql_tmpl.format(schema=schema))
if hasattr(self, '_std_get_tables'):
tables = self._std_get_tables(schema, tables)
else:
self.get_engine(echo=echo)
tables = self.engine_inspect.get_table_names(schema)
return [get_rec(v) for v in sorted(tables)]
rows = []
for schema in schemas:
for row in get_tables_for(schema):
rows.append(row)
return rows
def get_views(self, schema, echo=True):
"Get metadata for views."
schemas = schema if isinstance(schema, list) else [schema]
def get_views_for(schema):
def get_rec(view):
self._fields = ['schema', 'view']
return tuple([schema, view])
# pickle.PicklingError: Can't pickle <class 'xutil.database.base.View'>
Rec = namedtuple('View', 'schema view')
self._fields = Rec._fields
r_dict = dict(schema=schema, view=view)
return Rec(**r_dict)
sql_tmpl = self._template('metadata.views')
if sql_tmpl:
views = [r[0] for r in self.query(sql_tmpl.format(schema=schema))]
else:
self.get_engine(echo=echo)
views = self.engine_inspect.get_view_names(schema)
return [get_rec(v) for v in sorted(views)]
rows = []
for schema in schemas:
for row in get_views_for(schema):
rows.append(row)
return rows
def get_columns(self,
table_name,
object_type=None,
echo=False,
include_schema_table=True,
native_type=True):
"Get column metadata for table"
if include_schema_table:
headers = 'schema table id column_name type nullable default autoincrement'
else:
headers = 'id column_name type nullable default autoincrement'
Rec = namedtuple('Columns', headers)
self._fields = Rec._fields
all_rows = []
table_names = table_name if isinstance(table_name, list) else [table_name]
for table_name in table_names:
schema, table = self._split_schema_table(table_name)
def get_rec(r_dict, column_order):
if include_schema_table:
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['column_name'] = r_dict['name']
r_dict['type'] = str(r_dict['type'])
if not native_type:
r_dict['type']= r_dict['type'].lower()
r_dict['type'] = r_dict['type'].split('(')[0] if '(' in r_dict[
'type'] else r_dict['type']
native_type_map = self._template('native_type_map')
if not r_dict['type'] in native_type_map:
raise Exception('Field type "{}" not in native_type_map for {}'.format(r_dict['type'], self.type))
r_dict['type'] = native_type_map[r_dict['type']]
r_dict['id'] = column_order
for k in list(r_dict):
if k not in headers.split():
del r_dict[k]
if '(' in r_dict['type']:
r_dict['type'] = r_dict['type'].split('(')[0]
return Rec(**r_dict)
sql_tmpl = self._template('metadata.columns')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
if hasattr(self, '_std_get_columns'):
rows = self._std_get_columns(schema, table, rows)
else:
self.get_engine(echo=echo)
rows = self.engine_inspect.get_columns(table, schema=schema)
all_rows += [get_rec(r_dict, i + 1) for i, r_dict in enumerate(rows)]
self._fields = Rec._fields
return all_rows
def get_primary_keys(self, table_name, echo=False):
"Get PK metadata for table"
Rec = namedtuple('PKs', 'schema table pk_name column_name column_order')
self._fields = Rec._fields
schema, table = self._split_schema_table(table_name)
def get_rec(col, pk_name, column_order):
r_dict = {}
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['pk_name'] = pk_name
r_dict['column_name'] = col
r_dict['column_order'] = column_order
return Rec(**r_dict)
sql_tmpl = self._template('metadata.primary_keys')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
else:
self.get_engine(echo=echo)
r_dict = self.engine_inspect.get_pk_constraint(table, schema=schema)
rows = [
get_rec(col, r_dict['name'], i + 1)
for i, col in enumerate(r_dict['constrained_columns'])
]
return rows
def get_indexes(self, table_name, echo=False):
"Get indexes metadata for table"
Rec = namedtuple(
'Indexes', 'schema table index_name column_name column_order unique')
self._fields = Rec._fields
schema, table = self._split_schema_table(table_name)
def get_rec(r_dict):
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['index_name'] = r_dict['name']
r_dict['unique'] = str(r_dict['unique'])
del r_dict['name']
for i, col in enumerate(r_dict['column_names']):
r_dict['column_name'] = col
r_dict['column_order'] = i + 1
yield Rec(**r_dict)
sql_tmpl = self._template('metadata.indexes')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
else:
self.get_engine(echo=echo)
rows = self.engine_inspect.get_indexes(table, schema=schema)
rows = [get_rec(r_dict) for r_dict in rows]
return rows
def get_ddl(self, table_name, object_type=None, echo=True):
"Get ddl for table"
Rec = namedtuple('DDL', 'ddl')
self._fields = Rec._fields
schema, table = self._split_schema_table(table_name)
sql_tmpl = self._template('metadata.ddl')
if sql_tmpl:
rows = self.query(
sql_tmpl.format(
schema=schema,
table=table,
obj_type=object_type,
))
else:
self.get_engine(echo=echo)
ddl = self.engine_inspect.get_view_definition(table, schema=schema)
rows = [Rec(ddl)] if ddl else []
self._fields = Rec._fields
return rows
def get_all_columns(self):
"Get all columns for all tables / views"
sql_tmpl = self._template('metadata.all_columns')
if not sql_tmpl:
raise Exception('get_all_columns not implemented for {}'.format(
self.type))
rows = self.query(sql_tmpl)
return rows
def get_all_tables(self, filter, as_sql=False):
"Get all tables / views"
sql_tmpl = self._template('metadata.all_tables')
if not sql_tmpl:
raise Exception('get_all_tables not implemented for {}'.format(self.type))
sql = sql_tmpl.format(filter=filter)
return sql if as_sql else self.query(sql, echo=False)
def analyze_fields(self,
analysis,
table_name,
fields=[],
as_sql=False,
union=True,
expr_func_map={},
**kwargs):
"""Base function for field level analysis
expr_func_map: contains mapping for expression to SQL function to all fields
"""
if '.' not in table_name:
raise Exception("table_name must have schema and name in it with a '.'")
if analysis not in self.template_dict['analysis']:
raise Exception("'{}' not found in template for '{}'.".format(
analysis, self.type))
schema, table = self._split_schema_table(table_name)
# get field type
field_rows = self.get_columns(table_name)
field_type = {r.column_name.lower(): r.type for r in field_rows}
if not fields:
fields = [r.column_name for r in field_rows]
for expr in list(expr_func_map):
tmpl_path = 'function.' + expr_func_map[expr]
expr_func_map[expr] = ',\n'.join([
self._template(tmpl_path).format(field=field)
for field in [r.column_name for r in field_rows]
])
sep = ' \nunion all\n' if union else ' \n ;\n'
sql = sep.join([
self._template('analysis.' + analysis).format(
schema=schema,
field=field,
table=table,
type=field_type[field.lower()] if field else '',
**expr_func_map,
**kwargs) for field in fields
])
return sql if as_sql else self.query(sql, analysis, echo=False)
def analyze_tables(self, analysis, tables=[], as_sql=False, **kwargs):
"""Base function for table level analysis"""
if analysis not in self.template_dict['analysis']:
raise Exception("'{}' not found in template for '{}'.".format(
analysis, self.type))
if not tables and 'schema' in kwargs:
# get all tables
rows = self.get_schemas(kwargs['schema'])
crt_obj = lambda r: struct(dict(schema=r.schema, table=r.object_name))
objs = [crt_obj(r) for r in rows]
else:
crt_obj = lambda schema, table: struct(dict(schema=schema, table=table))
objs = [crt_obj(*self._split_schema_table(t)) for t in tables]
sql = ' \nunion all\n'.join([
self._template('analysis.' + analysis).format(
schema=obj.schema, table=obj.table, **kwargs) for obj in objs
])
return sql if as_sql else self.query(sql, analysis, echo=False)
def analyze_join_match(self,
t1,
t2,
t1_field,
t2_field,
t1_filter='1=1',
t2_filter='1=1',
as_sql=False,
as_text=True,
lowercase=True):
def get_kwargs(t1, t2, t1_field, t2_field, t1_filter, t2_filter):
t1_field_arr = ['t1.' + f for f in t1_field.split(',')]
t2_field_arr = ['t2.' + f for f in t2_field.split(',')]
t1_field_concat = self._concat_fields(t1_field_arr, as_text=as_text)
t2_field_concat = self._concat_fields(t2_field_arr, as_text=as_text)
to_text = self._to_text
if lowercase:
conds = ' and '.join([
'lower({}) = lower({})'.format(to_text(f), to_text(t2_field_arr[i]))
for i, f in enumerate(t1_field_arr)
])
else:
conds = ' and '.join([
'{} = {}'.format(to_text(f), to_text(t2_field_arr[i]))
for i, f in enumerate(t1_field_arr)
])
t1_fields1 = t1_field
t2_fields1 = t2_field
t1_field = ', '.join(['t1.' + f for f in t1_field_arr])
t2_field = ', '.join(['t2.' + f for f in t2_field_arr])
return dict(
t1=t1,
t1_field=t1_field_concat,
t1_fields1=t1_fields1,
t1_filter=t1_filter,
t2=t2,
t2_field=t2_field_concat,
t2_fields1=t2_fields1,
t2_filter=t2_filter,
conds=conds,
)
kwargs = get_kwargs(
t1=t1,
t2=t2,
t1_field=t1_field,
t2_field=t2_field,
t1_filter=t1_filter,
t2_filter=t2_filter,
)
sql = self.analyze_fields(
'table_join_match', t1, [''], as_sql=True, **kwargs)
return sql if as_sql else self.query(sql, 'table_join_match', echo=False)
def get_conn(db,
dbs=None,
echo=True,
reconnect=False,
use_jdbc=False,
conn_expire_min=10,
spark_hive=False) -> DBConn:
global conns
dbs = dbs if dbs else get_databases()
profile = get_profile()
db_dict = struct(dbs[db])
if db_dict.type.lower() == 'hive' and spark_hive:
db_dict.type = 'spark'
use_jdbc = True if (use_jdbc or ('use_jdbc' in db_dict
and db_dict['use_jdbc'])) else use_jdbc
if db in conns and not reconnect:
if (now() - conns[db].last_connect).total_seconds() / 60 < conn_expire_min:
return conns[db]
if use_jdbc:
log('*USING JDBC for ' + db)
from .jdbc import JdbcConn
conn = JdbcConn(db_dict, profile=profile)
elif db_dict.type.lower() == 'oracle':
from .oracle import OracleConn
conn = OracleConn(db_dict, echo=echo)
elif db_dict.type.lower() == 'spark':
from .spark import SparkConn
conn = SparkConn(db_dict, echo=echo)
elif db_dict.type.lower() == 'hive':
from .hive import HiveConn, Beeline
if 'use_beeline' in db_dict and db_dict.use_beeline:
conn = Beeline(db_dict, echo=echo)
else:
conn = HiveConn(db_dict, echo=echo)
elif db_dict.type.lower() in ('postgresql', 'redshift'):
from .postgresql import PostgreSQLConn
conn = PostgreSQLConn(db_dict, echo=echo)
elif db_dict.type.lower() == 'sqlserver':
from .sqlserver import SQLServerConn
conn = SQLServerConn(db_dict, echo=echo)
elif db_dict.type.lower() == 'sqlite':
from .sqlite import SQLiteConn
conn = SQLiteConn(db_dict, echo=echo)
else:
raise Exception(f'Type {db_dict.type} not handled!')
conns[db] = conn
return conn
class SqlX:
"""
SQL Express functions. Supports CRUD transactional operations.
Suppose there is a table named 'cache', sqlx allows:
sqlx.x('cache').insert(rows)
sqlx.x('cache').insert_one(row)
sqlx.x('cache').add(**kws)
sqlx.x('cache').delete(where)
sqlx.x('cache').update(rows, pk_fields)
sqlx.x('cache').update_one(row, pk_cols)
sqlx.x('cache').replace(rows, pk_fields)
sqlx.x('cache').query(where)
sqlx.x('cache').select_one(where)
"""
def __init__(self, conn: DBConn, table, schema, ntRec: namedtuple):
self.conn = conn
self.table = table
self.schema = schema
self.ntRec = ntRec
self.pk_fields = None
self.table_obj = schema + '.' + table if schema else table
self.insert_one = lambda row: self.insert([row])
self.add = lambda **kws: self.insert([self.ntRec(**kws)])
self.update_one = lambda row, pk_cols=None: self.update([row], pk_cols)
self.update_rec=lambda pk_cols=None, **kws: self.update([make_rec(**kws)], pk_cols)
self.replace_one = lambda row, pk_cols=None: self.replace([row], pk_cols)
self.replace_rec=lambda pk_cols=None, **kws: self.replace([make_rec(**kws)], pk_cols)
# self.select_one = lambda where: self.select_one(where, one=True)
def _get_pk(self):
if not self.pk_fields:
pk_rows = self.conn.get_primary_keys(self.table_obj)
self.pk_fields = [r.column_name for r in pk_rows]
return self.pk_fields
def insert(self, data):
return self.conn.insert(self.table_obj, data)
def update(self, data, pk_fields=None):
if not pk_fields:
pk_fields = self._get_pk()
if not pk_fields:
raise Exception("Need Keys to perform UPDATE!")
t_fields = [x.lower() for x in data[0]._fields]
for f in pk_fields:
if not f.lower() in t_fields:
# if keys not provided, need to make sure PK values are provided in data records
raise Exception(
"Value of PK field '{}' must be provided to perform UPDATE!".
format(f))
self.conn.update(self.table_obj, data, pk_fields, echo=False)
def update_one(self, row, pk_cols=None):
self.update([row], pk_cols)
def update_rec(self, pk_cols=None, **kws):
self.update([make_rec(**kws)], pk_cols)
def replace(self, data, pk_fields=None):
if not pk_fields:
pk_fields = self._get_pk()
self.conn.replace(self.table_obj, data, pk_fields, echo=False)
# def replace_rec(self, pk_cols=None, **kws):
# # add default None?
# for field in self.ntRec._fields:
# kws[field] = kws.get(field, None)
# self.replace([self.ntRec(**kws)], pk_cols)
def query(self, where='1=1', one=False, limit=None, as_dict=False):
rows = self.conn.query(
"select * from {} where {}".format(self.table_obj, where),
echo=False,
limit=limit)
rows = rows_to_dicts(rows) if as_dict else rows
if one: return rows[0] if rows else None
else: return rows
def select_one(self, where, field=None, as_dict=False):
row = self.query(where, one=True, as_dict=as_dict)
if field and row:
return row[field] if as_dict else row.__getattribute__(field)
return row
def delete(self, where):
self.conn.execute("delete from {} where {}".format(self.table_obj, where))
def make_sqlx(conn, schema, tables):
"Make sqlx lookup function for given tables"
table_func_map = {}
for table in tables:
ntRec = namedtuple(table, tables[table].columns.keys())
table_func_map[table] = SqlX(conn, table, schema, ntRec)
# return table_func_map
def sqlx(expr) -> SqlX:
obj = jmespath.search(expr, table_func_map)
if not obj:
raise Exception('sqlx: Cannot find "{}"'.format(expr))
return obj
return sqlx
def get_sql_sources(sql_text, echo=False):
"""Obtain the source tables of a query
"""
import sqlparse
# replace "as(" to "as (" # this trips up the sql parser in CTEs
sql_text = re.sub(r"as\(", "as (", sql_text, 0, re.MULTILINE | re.IGNORECASE)
statements = sqlparse.parse(sql_text)
cte_aliases = set()
sql_sources = {}
def get_sources(statement):
sources_dict = {}
last_kw_from = False
last_kw_join = False
cte_mode = False
last_tok = None
done = False
while not done:
for tok in statement.tokens:
if tok.is_group:
if cte_mode and isinstance(tok, sqlparse.sql.IdentifierList):
for tok2 in tok.tokens:
if isinstance(tok2, sqlparse.sql.Identifier):
for tok3 in tok2.tokens:
if isinstance(tok3, sqlparse.sql.Parenthesis):
cte_aliases.add(tok3.parent.normalized.lower())
sources_dict2 = get_sources(tok3)
sources_dict = {**sources_dict, **sources_dict2}
elif isinstance(tok, sqlparse.sql.Parenthesis):
sources_dict2 = get_sources(tok)
sources_dict = {**sources_dict, **sources_dict2}
else:
for tok2 in tok.tokens:
if isinstance(tok2, sqlparse.sql.Parenthesis):
cte_aliases.add(tok2.parent.normalized.lower())
sources_dict2 = get_sources(tok2)
sources_dict = {**sources_dict, **sources_dict2}
if (last_kw_from or last_kw_join) and last_tok.is_whitespace:
if isinstance(tok, sqlparse.sql.IdentifierList):
for tok2 in tok.tokens:
if isinstance(tok2, sqlparse.sql.Identifier) and '(' in tok2.value:
sources_dict2 = get_sources(tok2)
sources_dict = {**sources_dict, **sources_dict2}
elif isinstance(tok2, sqlparse.sql.Identifier) and tok2.normalized.lower() not in cte_aliases:
if echo: log('+Table = ' + tok2.normalized.lower())
sources_dict[tok2.normalized.lower()] = tok.parent
elif isinstance(tok, sqlparse.sql.Identifier) and tok.normalized.lower() not in cte_aliases:
if echo: log('+Table = ' + tok.normalized.lower())
sources_dict[tok.normalized.lower()] = tok.parent
last_kw_join = False
if tok.is_keyword and tok.normalized == 'WITH':
cte_mode = True
last_kw_from = False
elif tok.is_keyword and tok.normalized == 'GROUP':
last_kw_join = False
last_kw_from = False
elif tok.is_keyword and tok.normalized == 'WHERE':
last_kw_join = False
last_kw_from = False
elif tok.is_keyword and tok.normalized == 'ORDER':
last_kw_join = False
last_kw_from = False
elif tok.is_keyword and tok.normalized == 'CREATE':
cte_mode = True
last_kw_from = False
elif tok.is_keyword and tok.normalized == 'SELECT':
cte_mode = False
last_kw_from = False
elif tok.is_keyword and tok.normalized == 'FROM':
last_kw_from = True
elif tok.is_keyword and 'JOIN' in tok.normalized:
last_kw_join = True
last_tok = tok
done = True
return sources_dict
for s, statement in enumerate(statements):
has_from = False
last_kw_create = False
last_kw_create_table = False
create_table = None
for tok in statement.tokens:
if isinstance(tok, sqlparse.sql.Identifier) and last_kw_create_table:
create_table = tok.normalized
last_kw_create_table = False
last_kw_create = False
if echo: log('-CREATE TABLE ' + create_table)
if tok.is_keyword and tok.normalized == 'TABLE' and last_kw_create:
last_kw_create_table = True
if tok.is_keyword and tok.normalized == 'CREATE':
last_kw_create = True
if tok.is_keyword and tok.normalized == 'FROM':
has_from = True
last_tok = tok
if has_from:
sources_dict = get_sources(statement)
if create_table:
sql_sources[create_table] = sorted(sources_dict)
else:
sql_sources[s] = sorted(sources_dict)
return sql_sources
| xutil/database/base.py | 38,466 | Base class for database connections
SQL Express functions. Supports CRUD transactional operations.
Suppose there is a table named 'cache', sqlx allows:
sqlx.x('cache').insert(rows)
sqlx.x('cache').insert_one(row)
sqlx.x('cache').add(**kws)
sqlx.x('cache').delete(where)
sqlx.x('cache').update(rows, pk_fields)
sqlx.x('cache').update_one(row, pk_cols)
sqlx.x('cache').replace(rows, pk_fields)
sqlx.x('cache').query(where)
sqlx.x('cache').select_one(where)
Inititate connection
Get fields of active Select cursor
Base function for field level analysis
expr_func_map: contains mapping for expression to SQL function to all fields
Base function for table level analysis
Check Primary key to ensure there are not duplicates
Close database connection
Connect to Database
Create table
Drop table
Execute SQL, return last result
Execute multiple SQL statements separtated by ';'. Returns a generator.
Example:
for fields, rows in conn.execute(sql):
print(fields)
print(len(rows))
Get all columns for all tables / views
Get all tables / views
Get column metadata for table
Get ddl for table
SQLAlchemy dialect
Get indexes metadata for table
Get metadata for objects. object_type in 'all', 'table', 'view'
Get PK metadata for table
Get list of schemas.
Obtain the source tables of a query
Get metadata for tables.
Get metadata for views.
Insert records of namedtuple or dicts
Make sqlx lookup function for given tables
Select from SQL, return list of namedtuples
Re-Connect to Database if minute threshold reached
Set custom variables
Stream Select from SQL, yield records as they come in
Oracle
PostGresSQL
SQLite
SQLServer
Hive
Spark
Database Lib Format WHERE Format WHERE AND Format WHERE OR Base Template Specific Type Template Level 1 Level 2 Always Overwrite Level 1 Non-Dict Overwrite error when Oracle doesn't have a cursor open Call procedure with callproc Call procedure with callproc log('Creating table: \n' + sql)) assign floa/double as needed log('Stream finished at {} records.'.format(self._stream_counter)) if echo: log("Running SQL for '{}'.".format(rec_name)) http://docs.sqlalchemy.org/en/rel_0_9/core/reflection.htmlsqlalchemy.engine.reflection.Inspector.get_schemas Getting pickle.PicklingError: Can't pickle <class 'xutil.database.base.Table'> pickle.PicklingError: Can't pickle <class 'xutil.database.base.View'> get field type get all tables self.select_one = lambda where: self.select_one(where, one=True) if keys not provided, need to make sure PK values are provided in data records def replace_rec(self, pk_cols=None, **kws): add default None? for field in self.ntRec._fields: kws[field] = kws.get(field, None) self.replace([self.ntRec(**kws)], pk_cols) return table_func_map replace "as(" to "as (" this trips up the sql parser in CTEs | 2,784 | en | 0.484071 |
# flake8: noqa
# Disable Flake8 because of all the sphinx imports
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Airflow documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 9 20:50:01 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
"""Configuration of Airflow Docs"""
import os
import sys
from glob import glob
from typing import List
import airflow
from airflow.configuration import default_config_yaml
try:
import sphinx_airflow_theme # pylint: disable=unused-import
airflow_theme_is_available = True
except ImportError:
airflow_theme_is_available = False
# Hack to allow changing for piece of the code to behave differently while
# the docs are being built. The main objective was to alter the
# behavior of the utils.apply_default that was hiding function headers
os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE'
# == Sphinx configuration ======================================================
# -- Project information -------------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
# General information about the project.
project = 'Airflow'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '1.0.0'
version = airflow.__version__
# The full version, including alpha/beta/rc tags.
# release = '1.0.0'
release = airflow.__version__
# -- General configuration -----------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'provider_init_hack',
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz',
'sphinxarg.ext',
'sphinxcontrib.httpdomain',
'sphinxcontrib.jinja',
'sphinx.ext.intersphinx',
'autoapi.extension',
'exampleinclude',
'docroles',
'removemarktransform',
'sphinx_copybutton',
'redirects',
'providers_packages_ref',
# First, generate redoc
'sphinxcontrib.redoc',
# Second, update redoc script
"sphinx_script_update",
"sphinxcontrib.spelling",
]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.join(os.path.dirname(__file__), 'exts'))
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns: List[str] = [
# We only link to selected subpackages.
'_api/airflow/index.rst',
# We have custom page - operators-and-hooks-ref.rst
'_api/airflow/providers/index.rst',
# Packages with subpackages
"_api/airflow/providers/microsoft/index.rst",
"_api/airflow/providers/apache/index.rst",
"_api/airflow/providers/cncf/index.rst",
# Templates or partials
'autoapi_templates',
'howto/operator/google/_partials',
'howto/operator/microsoft/_partials',
]
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
def _get_rst_filepath_from_path(filepath: str):
if os.path.isdir(filepath):
result = filepath
elif os.path.isfile(filepath) and filepath.endswith('/__init__.py'):
result = filepath.rpartition("/")[0]
else:
result = filepath.rpartition(".")[0]
result += "/index.rst"
result = f"_api/{os.path.relpath(result, ROOT_DIR)}"
return result
# Exclude top-level packages
# do not exclude these top-level modules from the doc build:
_allowed_top_level = ("exceptions.py",)
for path in glob(f"{ROOT_DIR}/airflow/*"):
name = os.path.basename(path)
if os.path.isfile(path) and not path.endswith(_allowed_top_level):
exclude_patterns.append(f"_api/airflow/{name.rpartition('.')[0]}")
browsable_packages = ["operators", "hooks", "sensors", "providers", "executors", "models", "secrets"]
if os.path.isdir(path) and name not in browsable_packages:
exclude_patterns.append(f"_api/airflow/{name}")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = True
# -- Options for HTML output ---------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
if airflow_theme_is_available:
html_theme = 'sphinx_airflow_theme'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Airflow Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = ""
# given, this must be the name of an image file (path relative to the
# configuration directory) that is the favicon of the docs. Modern browsers
# use this as the icon for tabs, windows and bookmarks. It should be a
# Windows-style icon file (.ico), which is 16x16 or 32x32 pixels large.
html_favicon = "../airflow/www/static/pin_32.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# A list of JavaScript filename. The entry must be a filename string or a
# tuple containing the filename string and the attributes dictionary. The
# filename must be relative to the html_static_path, or a full URI with
# scheme like http://example.org/script.js.
html_js_files = ['jira-links.js']
# Custom sidebar templates, maps document names to template names.
if airflow_theme_is_available:
html_sidebars = {
'**': [
'version-selector.html',
'searchbox.html',
'globaltoc.html',
]
}
# If false, no index is generated.
html_use_index = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# A dictionary of values to pass into the template engine’s context for all pages.
html_context = {
# Google Analytics ID.
# For more information look at:
# https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/layout.html#L222-L232
'theme_analytics_id': 'UA-140539454-1',
}
if airflow_theme_is_available:
html_context = {
# Variables used to build a button for editing the source code
#
# The path is created according to the following template:
#
# https://{{ github_host|default("github.com") }}/{{ github_user }}/{{ github_repo }}/
# {{ theme_vcs_pageview_mode|default("blob") }}/{{ github_version }}{{ conf_py_path }}
# {{ pagename }}{{ suffix }}
#
# More information:
# https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/templates/doc_builder/conf.py.tmpl#L100-L103
# https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/breadcrumbs.html#L45
# https://github.com/apache/airflow-site/blob/91f760c/sphinx_airflow_theme/sphinx_airflow_theme/suggest_change_button.html#L36-L40
#
'theme_vcs_pageview_mode': 'edit',
'conf_py_path': '/docs/',
'github_user': 'apache',
'github_repo': 'airflow',
'github_version': 'master',
'display_github': 'master',
'suffix': '.rst',
}
# == Extensions configuration ==================================================
# -- Options for sphinxcontrib.jinjac ------------------------------------------
# See: https://github.com/tardyp/sphinx-jinja
# Jinja context
jinja_contexts = {'config_ctx': {"configs": default_config_yaml()}}
# -- Options for sphinx.ext.autodoc --------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html
# This value contains a list of modules to be mocked up. This is useful when some external dependencies
# are not met at build time and break the building process.
autodoc_mock_imports = [
'MySQLdb',
'adal',
'analytics',
'azure',
'azure.cosmos',
'azure.datalake',
'azure.kusto',
'azure.mgmt',
'boto3',
'botocore',
'bson',
'cassandra',
'celery',
'cloudant',
'cryptography',
'cx_Oracle',
'datadog',
'distributed',
'docker',
'google',
'google_auth_httplib2',
'googleapiclient',
'grpc',
'hdfs',
'httplib2',
'jaydebeapi',
'jenkins',
'jira',
'kubernetes',
'msrestazure',
'pandas',
'pandas_gbq',
'paramiko',
'pinotdb',
'psycopg2',
'pydruid',
'pyhive',
'pyhive',
'pymongo',
'pymssql',
'pysftp',
'qds_sdk',
'redis',
'simple_salesforce',
'slackclient',
'smbclient',
'snowflake',
'sshtunnel',
'tenacity',
'vertica_python',
'winrm',
'zdesk',
]
# The default options for autodoc directives. They are applied to all autodoc directives automatically.
autodoc_default_options = {'show-inheritance': True, 'members': True}
# -- Options for sphinx.ext.intersphinx ----------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html
# This config value contains the locations and names of other projects that should
# be linked to in this documentation.
intersphinx_mapping = {
'boto3': ('https://boto3.amazonaws.com/v1/documentation/api/latest/', None),
'celery': ('https://docs.celeryproject.org/en/stable/', None),
'hdfs': ('https://hdfscli.readthedocs.io/en/latest/', None),
'jinja2': ('https://jinja.palletsprojects.com/en/master/', None),
'mongodb': ('https://api.mongodb.com/python/current/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'python': ('https://docs.python.org/3/', None),
'requests': ('https://requests.readthedocs.io/en/master/', None),
'sqlalchemy': ('https://docs.sqlalchemy.org/en/latest/', None),
# google-api
'google-api-core': ('https://googleapis.dev/python/google-api-core/latest', None),
'google-cloud-automl': ('https://googleapis.dev/python/automl/latest', None),
'google-cloud-bigquery': ('https://googleapis.dev/python/bigquery/latest', None),
'google-cloud-bigquery-datatransfer': ('https://googleapis.dev/python/bigquerydatatransfer/latest', None),
'google-cloud-bigquery-storage': ('https://googleapis.dev/python/bigquerystorage/latest', None),
'google-cloud-bigtable': ('https://googleapis.dev/python/bigtable/latest', None),
'google-cloud-container': ('https://googleapis.dev/python/container/latest', None),
'google-cloud-core': ('https://googleapis.dev/python/google-cloud-core/latest', None),
'google-cloud-datacatalog': ('https://googleapis.dev/python/datacatalog/latest', None),
'google-cloud-datastore': ('https://googleapis.dev/python/datastore/latest', None),
'google-cloud-dlp': ('https://googleapis.dev/python/dlp/latest', None),
'google-cloud-kms': ('https://googleapis.dev/python/cloudkms/latest', None),
'google-cloud-language': ('https://googleapis.dev/python/language/latest', None),
'google-cloud-monitoring': ('https://googleapis.dev/python/monitoring/latest', None),
'google-cloud-pubsub': ('https://googleapis.dev/python/pubsub/latest', None),
'google-cloud-redis': ('https://googleapis.dev/python/redis/latest', None),
'google-cloud-spanner': ('https://googleapis.dev/python/spanner/latest', None),
'google-cloud-speech': ('https://googleapis.dev/python/speech/latest', None),
'google-cloud-storage': ('https://googleapis.dev/python/storage/latest', None),
'google-cloud-tasks': ('https://googleapis.dev/python/cloudtasks/latest', None),
'google-cloud-texttospeech': ('https://googleapis.dev/python/texttospeech/latest', None),
'google-cloud-translate': ('https://googleapis.dev/python/translation/latest', None),
'google-cloud-videointelligence': ('https://googleapis.dev/python/videointelligence/latest', None),
'google-cloud-vision': ('https://googleapis.dev/python/vision/latest', None),
}
# -- Options for sphinx.ext.viewcode -------------------------------------------
# See: https://www.sphinx-doc.org/es/master/usage/extensions/viewcode.html
# If this is True, viewcode extension will emit viewcode-follow-imported event to resolve the name of
# the module by other extensions. The default is True.
viewcode_follow_imported_members = True
# -- Options for sphinx-autoapi ------------------------------------------------
# See: https://sphinx-autoapi.readthedocs.io/en/latest/config.html
# Paths (relative or absolute) to the source code that you wish to generate
# your API documentation from.
autoapi_dirs = [
os.path.abspath('../airflow'),
]
# A directory that has user-defined templates to override our default templates.
autoapi_template_dir = 'autoapi_templates'
# A list of patterns to ignore when finding files
autoapi_ignore = [
'*/airflow/kubernetes/kubernetes_request_factory/*',
'*/_internal*',
'*/airflow/**/providers/**/utils/*',
'*/node_modules/*',
'*/example_dags/*',
'*/migrations/*',
]
# Keep the AutoAPI generated files on the filesystem after the run.
# Useful for debugging.
autoapi_keep_files = True
# Relative path to output the AutoAPI files into. This can also be used to place the generated documentation
# anywhere in your documentation hierarchy.
autoapi_root = '_api'
# -- Options for ext.exampleinclude --------------------------------------------
exampleinclude_sourceroot = os.path.abspath('..')
# -- Options for ext.redirects -------------------------------------------------
redirects_file = 'redirects.txt'
# -- Options for sphinxcontrib.redoc -------------------------------------------
# See: https://sphinxcontrib-redoc.readthedocs.io/en/stable/
OPENAPI_FILE = os.path.join(os.path.dirname(__file__), "..", "airflow", "api_connexion", "openapi", "v1.yaml")
redoc = [
{
'name': 'Airflow REST API',
'page': 'stable-rest-api-ref',
'spec': OPENAPI_FILE,
'opts': {
'hide-hostname': True,
'no-auto-auth': True,
},
},
]
# Options for script updater
redoc_script_url = "https://cdn.jsdelivr.net/npm/redoc@2.0.0-rc.30/bundles/redoc.standalone.js"
| docs/conf.py | 16,080 | Configuration of Airflow Docs
flake8: noqa Disable Flake8 because of all the sphinx imports Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Airflow documentation build configuration file, created by sphinx-quickstart on Thu Oct 9 20:50:01 2014. This file is execfile()d with the current directory set to its containing dir. Note that not all possible configuration values are present in this autogenerated file. All configuration values have a default; values that are commented out serve to show the default. pylint: disable=unused-import Hack to allow changing for piece of the code to behave differently while the docs are being built. The main objective was to alter the behavior of the utils.apply_default that was hiding function headers == Sphinx configuration ====================================================== -- Project information ------------------------------------------------------- See: https://www.sphinx-doc.org/en/master/usage/configuration.htmlproject-information General information about the project. The version info for the project you're documenting, acts as replacement for |version| and |release|, also used in various other places throughout the built documents. The short X.Y version. version = '1.0.0' The full version, including alpha/beta/rc tags. release = '1.0.0' -- General configuration ----------------------------------------------------- See: https://www.sphinx-doc.org/en/master/usage/configuration.html Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. First, generate redoc Second, update redoc script If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. We only link to selected subpackages. We have custom page - operators-and-hooks-ref.rst Packages with subpackages Templates or partials Exclude top-level packages do not exclude these top-level modules from the doc build: Add any paths that contain templates here, relative to this directory. The suffix of source filenames. The master toctree document. The name of the Pygments (syntax highlighting) style to use. If true, keep warnings as "system message" paragraphs in the built documents. -- Options for HTML output --------------------------------------------------- See: https://www.sphinx-doc.org/en/master/usage/configuration.htmloptions-for-html-output The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. The name for this set of Sphinx documents. If None, it defaults to "<project> v<release> documentation". A shorter title for the navigation bar. Default is the same as html_title. given, this must be the name of an image file (path relative to the configuration directory) that is the favicon of the docs. Modern browsers use this as the icon for tabs, windows and bookmarks. It should be a Windows-style icon file (.ico), which is 16x16 or 32x32 pixels large. Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". A list of JavaScript filename. The entry must be a filename string or a tuple containing the filename string and the attributes dictionary. The filename must be relative to the html_static_path, or a full URI with scheme like http://example.org/script.js. Custom sidebar templates, maps document names to template names. If false, no index is generated. If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. A dictionary of values to pass into the template engine’s context for all pages. Google Analytics ID. For more information look at: https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/layout.htmlL222-L232 Variables used to build a button for editing the source code The path is created according to the following template: https://{{ github_host|default("github.com") }}/{{ github_user }}/{{ github_repo }}/ {{ theme_vcs_pageview_mode|default("blob") }}/{{ github_version }}{{ conf_py_path }} {{ pagename }}{{ suffix }} More information: https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/templates/doc_builder/conf.py.tmplL100-L103 https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/breadcrumbs.htmlL45 https://github.com/apache/airflow-site/blob/91f760c/sphinx_airflow_theme/sphinx_airflow_theme/suggest_change_button.htmlL36-L40 == Extensions configuration ================================================== -- Options for sphinxcontrib.jinjac ------------------------------------------ See: https://github.com/tardyp/sphinx-jinja Jinja context -- Options for sphinx.ext.autodoc -------------------------------------------- See: https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html This value contains a list of modules to be mocked up. This is useful when some external dependencies are not met at build time and break the building process. The default options for autodoc directives. They are applied to all autodoc directives automatically. -- Options for sphinx.ext.intersphinx ---------------------------------------- See: https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html This config value contains the locations and names of other projects that should be linked to in this documentation. google-api -- Options for sphinx.ext.viewcode ------------------------------------------- See: https://www.sphinx-doc.org/es/master/usage/extensions/viewcode.html If this is True, viewcode extension will emit viewcode-follow-imported event to resolve the name of the module by other extensions. The default is True. -- Options for sphinx-autoapi ------------------------------------------------ See: https://sphinx-autoapi.readthedocs.io/en/latest/config.html Paths (relative or absolute) to the source code that you wish to generate your API documentation from. A directory that has user-defined templates to override our default templates. A list of patterns to ignore when finding files Keep the AutoAPI generated files on the filesystem after the run. Useful for debugging. Relative path to output the AutoAPI files into. This can also be used to place the generated documentation anywhere in your documentation hierarchy. -- Options for ext.exampleinclude -------------------------------------------- -- Options for ext.redirects ------------------------------------------------- -- Options for sphinxcontrib.redoc ------------------------------------------- See: https://sphinxcontrib-redoc.readthedocs.io/en/stable/ Options for script updater | 7,649 | en | 0.705281 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RoutesOperations(object):
"""RoutesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Route"
"""Gets the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Route, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.Route
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
route_parameters, # type: "_models.Route"
**kwargs # type: Any
):
# type: (...) -> "_models.Route"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_parameters, 'Route')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Route', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
route_parameters, # type: "_models.Route"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Route"]
"""Creates or updates a route in the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param route_parameters: Parameters supplied to the create or update route operation.
:type route_parameters: ~azure.mgmt.network.v2020_08_01.models.Route
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Route or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_08_01.models.Route]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
route_parameters=route_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
route_table_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteListResult"]
"""Gets all routes in a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_08_01.models.RouteListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes'} # type: ignore
| sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/operations/_routes_operations.py | 21,296 | RoutesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
Creates or updates a route in the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param route_parameters: Parameters supplied to the create or update route operation.
:type route_parameters: ~azure.mgmt.network.v2020_08_01.models.Route
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Route or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_08_01.models.Route]
:raises ~azure.core.exceptions.HttpResponseError:
Deletes the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
Gets the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Route, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.Route
:raises: ~azure.core.exceptions.HttpResponseError
Gets all routes in a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_08_01.models.RouteListResult]
:raises: ~azure.core.exceptions.HttpResponseError
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- pylint: disable=unused-import,ungrouped-imports type: str type: str type: str type: Any type: (...) -> None type: ClsType[None] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore type: str type: str type: str type: Any type: (...) -> LROPoller[None] type: Union[bool, PollingMethod] type: ClsType[None] type: Optional[str] type: ignore type: str type: str type: str type: Any type: (...) -> "_models.Route" type: ClsType["_models.Route"] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore type: str type: str type: str type: "_models.Route" type: Any type: (...) -> "_models.Route" type: ClsType["_models.Route"] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: Dict[str, Any] type: ignore type: str type: str type: str type: "_models.Route" type: Any type: (...) -> LROPoller["_models.Route"] type: Union[bool, PollingMethod] type: ClsType["_models.Route"] type: Optional[str] type: ignore type: str type: str type: Any type: (...) -> Iterable["_models.RouteListResult"] type: ClsType["_models.RouteListResult"] Construct headers type: Dict[str, Any] Construct URL type: ignore Construct parameters type: Dict[str, Any] type: Dict[str, Any] type: ignore | 5,475 | en | 0.524519 |
"""
Makes python 2 behave more like python 3.
Ideally we import this globally so all our python 2 interpreters will assist in spotting errors early.
"""
# future imports are harmless if they implement behaviour that already exists in the current interpreter version
from __future__ import absolute_import, division, print_function
import sys
from collections import OrderedDict
if sys.version_info.major == 2:
# Override dict and make items() behave like iteritems() to retain performance
class dict(dict):
def items(self):
return super(dict, self).iteritems()
def keys(self):
return super(dict, self).iterkeys()
def values(self):
return super(dict, self).itervalues()
class OrderedDict(OrderedDict):
def items(self):
return super(OrderedDict, self).iteritems()
def keys(self):
return super(OrderedDict, self).iterkeys()
def values(self):
return super(OrderedDict, self).itervalues()
# Override range with xrange to mimic python3's range
range = xrange
import cStringIO as io
else:
unicode = str
long = int
import io
try:
from typing import *
T = TypeVar('T')
except:
pass
| errorCheckTool/py23.py | 1,249 | Makes python 2 behave more like python 3.
Ideally we import this globally so all our python 2 interpreters will assist in spotting errors early.
future imports are harmless if they implement behaviour that already exists in the current interpreter version Override dict and make items() behave like iteritems() to retain performance Override range with xrange to mimic python3's range | 386 | en | 0.779591 |
import rebound
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import display, clear_output
from sherlockpipe.nbodies.PlanetInput import PlanetInput
class StabilityCalculator:
def __init__(self, star_mass):
self.star_mass = star_mass
def mass_from_radius(self, radius):
return radius ** (1 / 0.55) if radius <= 12.1 else radius ** (1 / 0.01)
def run(self, planet_params):
sim = rebound.Simulation()
sim.integrator = "whfast"
sim.ri_whfast.safe_mode = 0
sim.dt = 1e-2
sim.add(m=1.0)
for planet_param in planet_params:
sim.add(m=self.mass_from_radius(planet_param.r) * 0.000003003 / self.star_mass, P=planet_param.P, e=planet_param.e, omega=planet_param.omega)
#sim.status()
sim.move_to_com()
sim.init_megno()
sim.exit_max_distance = 20.
try:
sim.integrate(5e2 * 2. * np.pi, exact_finish_time=0) # integrate for 500 years, integrating to the nearest
# for i in range(500):
# sim.integrate(sim.t + i * 2 * np.pi)
# fig, ax = rebound.OrbitPlot(sim, color=True, unitlabel="[AU]", xlim=[-0.1, 0.1], ylim=[-0.1, 0.1])
# plt.show()
# plt.close(fig)
#clear_output(wait=True)
#timestep for each output to keep the timestep constant and preserve WHFast's symplectic nature
megno = sim.calculate_megno()
megno = megno if megno < 10 else 10
return megno
except rebound.Escape:
return 10. # At least one particle got ejected, returning large MEGNO
planet_params = []
parameters = []
# grid = 20
# par_e = np.linspace(0.0, 0.7, grid)
# par_e1 = np.linspace(0.0, 0.7, grid)
# for i in par_e:
# for j in par_e1:
# parameters.append((PlanetInput(1.74542, 0.01606, 1.12207, 0), PlanetInput(0.03088, 2.97, j)))
from rebound.interruptible_pool import InterruptiblePool
parameters.append(PlanetInput(5.43440, 1.68792, 0))
parameters.append(PlanetInput(1.74542, 1.12207, 0))
parameters.append(PlanetInput(4.02382, 1.34990, 0))
parameters.append(PlanetInput(2.8611, 1.17643, 0))
parameters.append(PlanetInput(1.58834, 1.07459, 0))
result = StabilityCalculator(0.211299).run(parameters)
print("MEGNO: " + str(result))
# pool = InterruptiblePool()
# results = pool.map(StabilityCalculator(0.211299).run, parameters)
# results2d = np.array(results).reshape(grid, grid)
# fig = plt.figure(figsize=(7, 5))
# ax = plt.subplot(111)
# extent = [min(par_e), max(par_e), min(par_e1), max(par_e1)]
# ax.set_xlim(extent[0], extent[1])
# ax.set_xlabel("ecc1 $e$")
# ax.set_ylim(extent[2], extent[3])
# ax.set_ylabel("ecc2 $e1$")
# im = ax.imshow(results2d, interpolation="none", vmin=1.9, vmax=10, cmap="RdYlGn_r", origin="lower", aspect='auto', extent=extent)
# cb = plt.colorbar(im, ax=ax)
# cb.set_label("MEGNO $\\langle Y \\rangle$")
# plt.show()
| experimental/megno.py | 2,958 | sim.status() integrate for 500 years, integrating to the nearest for i in range(500): sim.integrate(sim.t + i * 2 * np.pi) fig, ax = rebound.OrbitPlot(sim, color=True, unitlabel="[AU]", xlim=[-0.1, 0.1], ylim=[-0.1, 0.1]) plt.show() plt.close(fig)clear_output(wait=True)timestep for each output to keep the timestep constant and preserve WHFast's symplectic nature At least one particle got ejected, returning large MEGNO grid = 20 par_e = np.linspace(0.0, 0.7, grid) par_e1 = np.linspace(0.0, 0.7, grid) for i in par_e: for j in par_e1: parameters.append((PlanetInput(1.74542, 0.01606, 1.12207, 0), PlanetInput(0.03088, 2.97, j))) pool = InterruptiblePool() results = pool.map(StabilityCalculator(0.211299).run, parameters) results2d = np.array(results).reshape(grid, grid) fig = plt.figure(figsize=(7, 5)) ax = plt.subplot(111) extent = [min(par_e), max(par_e), min(par_e1), max(par_e1)] ax.set_xlim(extent[0], extent[1]) ax.set_xlabel("ecc1 $e$") ax.set_ylim(extent[2], extent[3]) ax.set_ylabel("ecc2 $e1$") im = ax.imshow(results2d, interpolation="none", vmin=1.9, vmax=10, cmap="RdYlGn_r", origin="lower", aspect='auto', extent=extent) cb = plt.colorbar(im, ax=ax) cb.set_label("MEGNO $\\langle Y \\rangle$") plt.show() | 1,252 | en | 0.287847 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from acq4.devices.Device import *
from acq4.util import Qt
import acq4.util.Mutex as Mutex
from collections import OrderedDict
class LightSource(Device):
"""Device tracking the state and properties of multiple illumination sources.
"""
# emitted when the on/off status of a light changes
sigLightChanged = Qt.Signal(object, object) # self, light_name
def __init__(self, dm, config, name):
Device.__init__(self, dm, config, name)
self._sources = OrderedDict() # [name: {'active': bool, 'wavelength': float, 'power': float, ...}, ...]
self._lock = Mutex.Mutex()
def addSource(self, name, conf):
self._sources[name] = conf
if 'xkey' in conf:
devname, row, col = self._sources[name]['xkey']
dev = self.dm.getDevice(devname)
dev.addKeyCallback((row, col), self._hotkeyPressed, (name,))
def describe(self, onlyActive=True):
"""Return a description of the current state of all active light sources.
If onlyActive is False, then information for all sources will be returned, whether or not they are active.
"""
if onlyActive:
return OrderedDict([(n,s) for n,s in self._sources.items() if s['active']])
else:
return self._sources.copy()
def activeSources(self):
"""Return the names of all active light sources.
"""
return [s['name'] for s in self._sources if s['active']]
def sourceActive(self, name):
"""Return True if the named light source is currently active.
"""
return self._sources[name]['active']
def setSourceActive(self, name, active):
"""Activate / deactivate a light source.
"""
raise NotImplementedError()
def _updateXkeyLight(self, name):
if 'xkey' in self._sources[name]:
devname, row, col = self._sources[name]['xkey']
dev = self.dm.getDevice(devname)
bl = dev.getBacklights()
bl[row,col] = int(self._sources[name]['active'])
dev.setBacklights(bl)
def _hotkeyPressed(self, dev, changes, name):
self.setSourceActive(name, not self.sourceActive(name))
| acq4/devices/LightSource/LightSource.py | 2,272 | Device tracking the state and properties of multiple illumination sources.
Return the names of all active light sources.
Return a description of the current state of all active light sources.
If onlyActive is False, then information for all sources will be returned, whether or not they are active.
Activate / deactivate a light source.
Return True if the named light source is currently active.
-*- coding: utf-8 -*- emitted when the on/off status of a light changes self, light_name [name: {'active': bool, 'wavelength': float, 'power': float, ...}, ...] | 591 | en | 0.775694 |
import asyncio
import dataclasses
import logging
import multiprocessing
from concurrent.futures.process import ProcessPoolExecutor
from enum import Enum
from typing import Dict, List, Optional, Set, Tuple, Union
from clvm.casts import int_from_bytes
from kujenga.consensus.block_body_validation import validate_block_body
from kujenga.consensus.block_header_validation import validate_finished_header_block, validate_unfinished_header_block
from kujenga.consensus.block_record import BlockRecord
from kujenga.consensus.blockchain_interface import BlockchainInterface
from kujenga.consensus.constants import ConsensusConstants
from kujenga.consensus.cost_calculator import NPCResult
from kujenga.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from kujenga.consensus.find_fork_point import find_fork_point_in_chain
from kujenga.consensus.full_block_to_block_record import block_to_block_record
from kujenga.consensus.multiprocess_validation import PreValidationResult, pre_validate_blocks_multiprocessing
from kujenga.full_node.block_store import BlockStore
from kujenga.full_node.coin_store import CoinStore
from kujenga.full_node.hint_store import HintStore
from kujenga.full_node.mempool_check_conditions import get_name_puzzle_conditions
from kujenga.types.blockchain_format.coin import Coin
from kujenga.types.blockchain_format.sized_bytes import bytes32
from kujenga.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from kujenga.types.blockchain_format.vdf import VDFInfo
from kujenga.types.coin_record import CoinRecord
from kujenga.types.condition_opcodes import ConditionOpcode
from kujenga.types.end_of_slot_bundle import EndOfSubSlotBundle
from kujenga.types.full_block import FullBlock
from kujenga.types.generator_types import BlockGenerator, GeneratorArg
from kujenga.types.header_block import HeaderBlock
from kujenga.types.unfinished_block import UnfinishedBlock
from kujenga.types.unfinished_header_block import UnfinishedHeaderBlock
from kujenga.types.weight_proof import SubEpochChallengeSegment
from kujenga.util.errors import Err
from kujenga.util.generator_tools import get_block_header, tx_removals_and_additions
from kujenga.util.ints import uint16, uint32, uint64, uint128
from kujenga.util.streamable import recurse_jsonify
log = logging.getLogger(__name__)
class ReceiveBlockResult(Enum):
"""
When Blockchain.receive_block(b) is called, one of these results is returned,
showing whether the block was added to the chain (extending the peak),
and if not, why it was not added.
"""
NEW_PEAK = 1 # Added to the peak of the blockchain
ADDED_AS_ORPHAN = 2 # Added as an orphan/stale block (not a new peak of the chain)
INVALID_BLOCK = 3 # Block was not added because it was invalid
ALREADY_HAVE_BLOCK = 4 # Block is already present in this blockchain
DISCONNECTED_BLOCK = 5 # Block's parent (previous pointer) is not in this blockchain
class Blockchain(BlockchainInterface):
constants: ConsensusConstants
constants_json: Dict
# peak of the blockchain
_peak_height: Optional[uint32]
# All blocks in peak path are guaranteed to be included, can include orphan blocks
__block_records: Dict[bytes32, BlockRecord]
# all hashes of blocks in block_record by height, used for garbage collection
__heights_in_cache: Dict[uint32, Set[bytes32]]
# Defines the path from genesis to the peak, no orphan blocks
__height_to_hash: Dict[uint32, bytes32]
# All sub-epoch summaries that have been included in the blockchain from the beginning until and including the peak
# (height_included, SubEpochSummary). Note: ONLY for the blocks in the path to the peak
__sub_epoch_summaries: Dict[uint32, SubEpochSummary] = {}
# Unspent Store
coin_store: CoinStore
# Store
block_store: BlockStore
# Used to verify blocks in parallel
pool: ProcessPoolExecutor
# Set holding seen compact proofs, in order to avoid duplicates.
_seen_compact_proofs: Set[Tuple[VDFInfo, uint32]]
# Whether blockchain is shut down or not
_shut_down: bool
# Lock to prevent simultaneous reads and writes
lock: asyncio.Lock
compact_proof_lock: asyncio.Lock
hint_store: HintStore
@staticmethod
async def create(
coin_store: CoinStore, block_store: BlockStore, consensus_constants: ConsensusConstants, hint_store: HintStore
):
"""
Initializes a blockchain with the BlockRecords from disk, assuming they have all been
validated. Uses the genesis block given in override_constants, or as a fallback,
in the consensus constants config.
"""
self = Blockchain()
self.lock = asyncio.Lock() # External lock handled by full node
self.compact_proof_lock = asyncio.Lock()
cpu_count = multiprocessing.cpu_count()
if cpu_count > 61:
cpu_count = 61 # Windows Server 2016 has an issue https://bugs.python.org/issue26903
num_workers = max(cpu_count - 2, 1)
self.pool = ProcessPoolExecutor(max_workers=num_workers)
log.info(f"Started {num_workers} processes for block validation")
self.constants = consensus_constants
self.coin_store = coin_store
self.block_store = block_store
self.constants_json = recurse_jsonify(dataclasses.asdict(self.constants))
self._shut_down = False
await self._load_chain_from_store()
self._seen_compact_proofs = set()
self.hint_store = hint_store
return self
def shut_down(self):
self._shut_down = True
self.pool.shutdown(wait=True)
async def _load_chain_from_store(self) -> None:
"""
Initializes the state of the Blockchain class from the database.
"""
height_to_hash, sub_epoch_summaries = await self.block_store.get_peak_height_dicts()
self.__height_to_hash = height_to_hash
self.__sub_epoch_summaries = sub_epoch_summaries
self.__block_records = {}
self.__heights_in_cache = {}
block_records, peak = await self.block_store.get_block_records_close_to_peak(self.constants.BLOCKS_CACHE_SIZE)
for block in block_records.values():
self.add_block_record(block)
if len(block_records) == 0:
assert peak is None
self._peak_height = None
return None
assert peak is not None
self._peak_height = self.block_record(peak).height
assert len(self.__height_to_hash) == self._peak_height + 1
def get_peak(self) -> Optional[BlockRecord]:
"""
Return the peak of the blockchain
"""
if self._peak_height is None:
return None
return self.height_to_block_record(self._peak_height)
async def get_full_peak(self) -> Optional[FullBlock]:
if self._peak_height is None:
return None
""" Return list of FullBlocks that are peaks"""
block = await self.block_store.get_full_block(self.height_to_hash(self._peak_height))
assert block is not None
return block
async def get_full_block(self, header_hash: bytes32) -> Optional[FullBlock]:
return await self.block_store.get_full_block(header_hash)
async def receive_block(
self,
block: FullBlock,
pre_validation_result: Optional[PreValidationResult] = None,
fork_point_with_peak: Optional[uint32] = None,
) -> Tuple[
ReceiveBlockResult,
Optional[Err],
Optional[uint32],
Tuple[List[CoinRecord], Dict[bytes, Dict[bytes32, CoinRecord]]],
]:
"""
This method must be called under the blockchain lock
Adds a new block into the blockchain, if it's valid and connected to the current
blockchain, regardless of whether it is the child of a head, or another block.
Returns a header if block is added to head. Returns an error if the block is
invalid. Also returns the fork height, in the case of a new peak.
"""
genesis: bool = block.height == 0
if self.contains_block(block.header_hash):
return ReceiveBlockResult.ALREADY_HAVE_BLOCK, None, None, ([], {})
if not self.contains_block(block.prev_header_hash) and not genesis:
return (ReceiveBlockResult.DISCONNECTED_BLOCK, Err.INVALID_PREV_BLOCK_HASH, None, ([], {}))
if not genesis and (self.block_record(block.prev_header_hash).height + 1) != block.height:
return ReceiveBlockResult.INVALID_BLOCK, Err.INVALID_HEIGHT, None, ([], {})
npc_result: Optional[NPCResult] = None
if pre_validation_result is None:
if block.height == 0:
prev_b: Optional[BlockRecord] = None
else:
prev_b = self.block_record(block.prev_header_hash)
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants, len(block.finished_sub_slots) > 0, prev_b, self
)
if block.is_transaction_block():
if block.transactions_generator is not None:
try:
block_generator: Optional[BlockGenerator] = await self.get_block_generator(block)
except ValueError:
return ReceiveBlockResult.INVALID_BLOCK, Err.GENERATOR_REF_HAS_NO_GENERATOR, None, ([], {})
assert block_generator is not None and block.transactions_info is not None
npc_result = get_name_puzzle_conditions(
block_generator,
min(self.constants.MAX_BLOCK_COST_CLVM, block.transactions_info.cost),
cost_per_byte=self.constants.COST_PER_BYTE,
safe_mode=False,
)
removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
else:
removals, tx_additions = [], []
header_block = get_block_header(block, tx_additions, removals)
else:
npc_result = None
header_block = get_block_header(block, [], [])
required_iters, error = validate_finished_header_block(
self.constants,
self,
header_block,
False,
difficulty,
sub_slot_iters,
)
if error is not None:
return ReceiveBlockResult.INVALID_BLOCK, error.code, None, ([], {})
else:
npc_result = pre_validation_result.npc_result
required_iters = pre_validation_result.required_iters
assert pre_validation_result.error is None
assert required_iters is not None
error_code, _ = await validate_block_body(
self.constants,
self,
self.block_store,
self.coin_store,
self.get_peak(),
block,
block.height,
npc_result,
fork_point_with_peak,
self.get_block_generator,
)
if error_code is not None:
return ReceiveBlockResult.INVALID_BLOCK, error_code, None, ([], {})
block_record = block_to_block_record(
self.constants,
self,
required_iters,
block,
None,
)
# Always add the block to the database
async with self.block_store.db_wrapper.lock:
try:
header_hash: bytes32 = block.header_hash
# Perform the DB operations to update the state, and rollback if something goes wrong
await self.block_store.db_wrapper.begin_transaction()
await self.block_store.add_full_block(header_hash, block, block_record)
fork_height, peak_height, records, (coin_record_change, hint_changes) = await self._reconsider_peak(
block_record, genesis, fork_point_with_peak, npc_result
)
await self.block_store.db_wrapper.commit_transaction()
# Then update the memory cache. It is important that this task is not cancelled and does not throw
self.add_block_record(block_record)
for fetched_block_record in records:
self.__height_to_hash[fetched_block_record.height] = fetched_block_record.header_hash
if fetched_block_record.sub_epoch_summary_included is not None:
self.__sub_epoch_summaries[
fetched_block_record.height
] = fetched_block_record.sub_epoch_summary_included
if peak_height is not None:
self._peak_height = peak_height
except BaseException:
self.block_store.rollback_cache_block(header_hash)
await self.block_store.db_wrapper.rollback_transaction()
raise
if fork_height is not None:
# new coin records added
assert coin_record_change is not None
return ReceiveBlockResult.NEW_PEAK, None, fork_height, (coin_record_change, hint_changes)
else:
return ReceiveBlockResult.ADDED_AS_ORPHAN, None, None, ([], {})
def get_hint_list(self, npc_result: NPCResult) -> List[Tuple[bytes32, bytes]]:
h_list = []
for npc in npc_result.npc_list:
for opcode, conditions in npc.conditions:
if opcode == ConditionOpcode.CREATE_COIN:
for condition in conditions:
if len(condition.vars) > 2 and condition.vars[2] != b"":
puzzle_hash, amount_bin = condition.vars[0], condition.vars[1]
amount = int_from_bytes(amount_bin)
coin_id = Coin(npc.coin_name, puzzle_hash, amount).name()
h_list.append((coin_id, condition.vars[2]))
return h_list
async def _reconsider_peak(
self,
block_record: BlockRecord,
genesis: bool,
fork_point_with_peak: Optional[uint32],
npc_result: Optional[NPCResult],
) -> Tuple[
Optional[uint32],
Optional[uint32],
List[BlockRecord],
Tuple[List[CoinRecord], Dict[bytes, Dict[bytes32, CoinRecord]]],
]:
"""
When a new block is added, this is called, to check if the new block is the new peak of the chain.
This also handles reorgs by reverting blocks which are not in the heaviest chain.
It returns the height of the fork between the previous chain and the new chain, or returns
None if there was no update to the heaviest chain.
"""
peak = self.get_peak()
lastest_coin_state: Dict[bytes32, CoinRecord] = {}
hint_coin_state: Dict[bytes32, Dict[bytes32, CoinRecord]] = {}
if genesis:
if peak is None:
block: Optional[FullBlock] = await self.block_store.get_full_block(block_record.header_hash)
assert block is not None
if npc_result is not None:
tx_removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
else:
tx_removals, tx_additions = [], []
if block.is_transaction_block():
assert block.foliage_transaction_block is not None
added = await self.coin_store.new_block(
block.height,
block.foliage_transaction_block.timestamp,
block.get_included_reward_coins(),
tx_additions,
tx_removals,
)
else:
added, _ = [], []
await self.block_store.set_peak(block_record.header_hash)
return uint32(0), uint32(0), [block_record], (added, {})
return None, None, [], ([], {})
assert peak is not None
if block_record.weight > peak.weight:
# Find the fork. if the block is just being appended, it will return the peak
# If no blocks in common, returns -1, and reverts all blocks
if block_record.prev_hash == peak.header_hash:
fork_height: int = peak.height
elif fork_point_with_peak is not None:
fork_height = fork_point_with_peak
else:
fork_height = find_fork_point_in_chain(self, block_record, peak)
if block_record.prev_hash != peak.header_hash:
roll_changes: List[CoinRecord] = await self.coin_store.rollback_to_block(fork_height)
for coin_record in roll_changes:
lastest_coin_state[coin_record.name] = coin_record
# Rollback sub_epoch_summaries
heights_to_delete = []
for ses_included_height in self.__sub_epoch_summaries.keys():
if ses_included_height > fork_height:
heights_to_delete.append(ses_included_height)
for height in heights_to_delete:
log.info(f"delete ses at height {height}")
del self.__sub_epoch_summaries[height]
# Collect all blocks from fork point to new peak
blocks_to_add: List[Tuple[FullBlock, BlockRecord]] = []
curr = block_record.header_hash
while fork_height < 0 or curr != self.height_to_hash(uint32(fork_height)):
fetched_full_block: Optional[FullBlock] = await self.block_store.get_full_block(curr)
fetched_block_record: Optional[BlockRecord] = await self.block_store.get_block_record(curr)
assert fetched_full_block is not None
assert fetched_block_record is not None
blocks_to_add.append((fetched_full_block, fetched_block_record))
if fetched_full_block.height == 0:
# Doing a full reorg, starting at height 0
break
curr = fetched_block_record.prev_hash
records_to_add = []
for fetched_full_block, fetched_block_record in reversed(blocks_to_add):
records_to_add.append(fetched_block_record)
if fetched_full_block.is_transaction_block():
if fetched_block_record.header_hash == block_record.header_hash:
tx_removals, tx_additions, npc_res = await self.get_tx_removals_and_additions(
fetched_full_block, npc_result
)
else:
tx_removals, tx_additions, npc_res = await self.get_tx_removals_and_additions(
fetched_full_block, None
)
assert fetched_full_block.foliage_transaction_block is not None
added_rec = await self.coin_store.new_block(
fetched_full_block.height,
fetched_full_block.foliage_transaction_block.timestamp,
fetched_full_block.get_included_reward_coins(),
tx_additions,
tx_removals,
)
removed_rec: List[Optional[CoinRecord]] = [
await self.coin_store.get_coin_record(name) for name in tx_removals
]
# Set additions first, then removals in order to handle ephemeral coin state
# Add in height order is also required
record: Optional[CoinRecord]
for record in added_rec:
assert record
lastest_coin_state[record.name] = record
for record in removed_rec:
assert record
lastest_coin_state[record.name] = record
if npc_res is not None:
hint_list: List[Tuple[bytes32, bytes]] = self.get_hint_list(npc_res)
await self.hint_store.add_hints(hint_list)
# There can be multiple coins for the same hint
for coin_id, hint in hint_list:
key = hint
if key not in hint_coin_state:
hint_coin_state[key] = {}
hint_coin_state[key][coin_id] = lastest_coin_state[coin_id]
# Changes the peak to be the new peak
await self.block_store.set_peak(block_record.header_hash)
return (
uint32(max(fork_height, 0)),
block_record.height,
records_to_add,
(list(lastest_coin_state.values()), hint_coin_state),
)
# This is not a heavier block than the heaviest we have seen, so we don't change the coin set
return None, None, [], ([], {})
async def get_tx_removals_and_additions(
self, block: FullBlock, npc_result: Optional[NPCResult] = None
) -> Tuple[List[bytes32], List[Coin], Optional[NPCResult]]:
if block.is_transaction_block():
if block.transactions_generator is not None:
if npc_result is None:
block_generator: Optional[BlockGenerator] = await self.get_block_generator(block)
assert block_generator is not None
npc_result = get_name_puzzle_conditions(
block_generator,
self.constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=self.constants.COST_PER_BYTE,
safe_mode=False,
)
tx_removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
return tx_removals, tx_additions, npc_result
else:
return [], [], None
else:
return [], [], None
def get_next_difficulty(self, header_hash: bytes32, new_slot: bool) -> uint64:
assert self.contains_block(header_hash)
curr = self.block_record(header_hash)
if curr.height <= 2:
return self.constants.DIFFICULTY_STARTING
return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[1]
def get_next_slot_iters(self, header_hash: bytes32, new_slot: bool) -> uint64:
assert self.contains_block(header_hash)
curr = self.block_record(header_hash)
if curr.height <= 2:
return self.constants.SUB_SLOT_ITERS_STARTING
return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[0]
async def get_sp_and_ip_sub_slots(
self, header_hash: bytes32
) -> Optional[Tuple[Optional[EndOfSubSlotBundle], Optional[EndOfSubSlotBundle]]]:
block: Optional[FullBlock] = await self.block_store.get_full_block(header_hash)
if block is None:
return None
curr_br: BlockRecord = self.block_record(block.header_hash)
is_overflow = curr_br.overflow
curr: Optional[FullBlock] = block
assert curr is not None
while True:
if curr_br.first_in_sub_slot:
curr = await self.block_store.get_full_block(curr_br.header_hash)
assert curr is not None
break
if curr_br.height == 0:
break
curr_br = self.block_record(curr_br.prev_hash)
if len(curr.finished_sub_slots) == 0:
# This means we got to genesis and still no sub-slots
return None, None
ip_sub_slot = curr.finished_sub_slots[-1]
if not is_overflow:
# Pos sub-slot is the same as infusion sub slot
return None, ip_sub_slot
if len(curr.finished_sub_slots) > 1:
# Have both sub-slots
return curr.finished_sub_slots[-2], ip_sub_slot
prev_curr: Optional[FullBlock] = await self.block_store.get_full_block(curr.prev_header_hash)
if prev_curr is None:
assert curr.height == 0
prev_curr = curr
prev_curr_br = self.block_record(curr.header_hash)
else:
prev_curr_br = self.block_record(curr.prev_header_hash)
assert prev_curr_br is not None
while prev_curr_br.height > 0:
if prev_curr_br.first_in_sub_slot:
prev_curr = await self.block_store.get_full_block(prev_curr_br.header_hash)
assert prev_curr is not None
break
prev_curr_br = self.block_record(prev_curr_br.prev_hash)
if len(prev_curr.finished_sub_slots) == 0:
return None, ip_sub_slot
return prev_curr.finished_sub_slots[-1], ip_sub_slot
def get_recent_reward_challenges(self) -> List[Tuple[bytes32, uint128]]:
peak = self.get_peak()
if peak is None:
return []
recent_rc: List[Tuple[bytes32, uint128]] = []
curr: Optional[BlockRecord] = peak
while curr is not None and len(recent_rc) < 2 * self.constants.MAX_SUB_SLOT_BLOCKS:
if curr != peak:
recent_rc.append((curr.reward_infusion_new_challenge, curr.total_iters))
if curr.first_in_sub_slot:
assert curr.finished_reward_slot_hashes is not None
sub_slot_total_iters = curr.ip_sub_slot_total_iters(self.constants)
# Start from the most recent
for rc in reversed(curr.finished_reward_slot_hashes):
if sub_slot_total_iters < curr.sub_slot_iters:
break
recent_rc.append((rc, sub_slot_total_iters))
sub_slot_total_iters = uint128(sub_slot_total_iters - curr.sub_slot_iters)
curr = self.try_block_record(curr.prev_hash)
return list(reversed(recent_rc))
async def validate_unfinished_block(
self, block: UnfinishedBlock, skip_overflow_ss_validation=True
) -> PreValidationResult:
if (
not self.contains_block(block.prev_header_hash)
and not block.prev_header_hash == self.constants.GENESIS_CHALLENGE
):
return PreValidationResult(uint16(Err.INVALID_PREV_BLOCK_HASH.value), None, None)
unfinished_header_block = UnfinishedHeaderBlock(
block.finished_sub_slots,
block.reward_chain_block,
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
b"",
)
prev_b = self.try_block_record(unfinished_header_block.prev_header_hash)
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants, len(unfinished_header_block.finished_sub_slots) > 0, prev_b, self
)
required_iters, error = validate_unfinished_header_block(
self.constants,
self,
unfinished_header_block,
False,
difficulty,
sub_slot_iters,
skip_overflow_ss_validation,
)
if error is not None:
return PreValidationResult(uint16(error.code.value), None, None)
prev_height = (
-1
if block.prev_header_hash == self.constants.GENESIS_CHALLENGE
else self.block_record(block.prev_header_hash).height
)
npc_result = None
if block.transactions_generator is not None:
assert block.transactions_info is not None
try:
block_generator: Optional[BlockGenerator] = await self.get_block_generator(block)
except ValueError:
return PreValidationResult(uint16(Err.GENERATOR_REF_HAS_NO_GENERATOR.value), None, None)
if block_generator is None:
return PreValidationResult(uint16(Err.GENERATOR_REF_HAS_NO_GENERATOR.value), None, None)
npc_result = get_name_puzzle_conditions(
block_generator,
min(self.constants.MAX_BLOCK_COST_CLVM, block.transactions_info.cost),
cost_per_byte=self.constants.COST_PER_BYTE,
safe_mode=False,
)
error_code, cost_result = await validate_block_body(
self.constants,
self,
self.block_store,
self.coin_store,
self.get_peak(),
block,
uint32(prev_height + 1),
npc_result,
None,
self.get_block_generator,
)
if error_code is not None:
return PreValidationResult(uint16(error_code.value), None, None)
return PreValidationResult(None, required_iters, cost_result)
async def pre_validate_blocks_multiprocessing(
self,
blocks: List[FullBlock],
npc_results: Dict[uint32, NPCResult],
batch_size: int = 4,
wp_summaries: Optional[List[SubEpochSummary]] = None,
) -> Optional[List[PreValidationResult]]:
return await pre_validate_blocks_multiprocessing(
self.constants,
self.constants_json,
self,
blocks,
self.pool,
True,
npc_results,
self.get_block_generator,
batch_size,
wp_summaries,
)
def contains_block(self, header_hash: bytes32) -> bool:
"""
True if we have already added this block to the chain. This may return false for orphan blocks
that we have added but no longer keep in memory.
"""
return header_hash in self.__block_records
def block_record(self, header_hash: bytes32) -> BlockRecord:
return self.__block_records[header_hash]
def height_to_block_record(self, height: uint32) -> BlockRecord:
header_hash = self.height_to_hash(height)
return self.block_record(header_hash)
def get_ses_heights(self) -> List[uint32]:
return sorted(self.__sub_epoch_summaries.keys())
def get_ses(self, height: uint32) -> SubEpochSummary:
return self.__sub_epoch_summaries[height]
def height_to_hash(self, height: uint32) -> Optional[bytes32]:
return self.__height_to_hash[height]
def contains_height(self, height: uint32) -> bool:
return height in self.__height_to_hash
def get_peak_height(self) -> Optional[uint32]:
return self._peak_height
async def warmup(self, fork_point: uint32):
"""
Loads blocks into the cache. The blocks loaded include all blocks from
fork point - BLOCKS_CACHE_SIZE up to and including the fork_point.
Args:
fork_point: the last block height to load in the cache
"""
if self._peak_height is None:
return None
block_records = await self.block_store.get_block_records_in_range(
max(fork_point - self.constants.BLOCKS_CACHE_SIZE, uint32(0)), fork_point
)
for block_record in block_records.values():
self.add_block_record(block_record)
def clean_block_record(self, height: int):
"""
Clears all block records in the cache which have block_record < height.
Args:
height: Minimum height that we need to keep in the cache
"""
if height < 0:
return None
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
while blocks_to_remove is not None and height >= 0:
for header_hash in blocks_to_remove:
del self.__block_records[header_hash] # remove from blocks
del self.__heights_in_cache[uint32(height)] # remove height from heights in cache
if height == 0:
break
height = height - 1
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
def clean_block_records(self):
"""
Cleans the cache so that we only maintain relevant blocks. This removes
block records that have height < peak - BLOCKS_CACHE_SIZE.
These blocks are necessary for calculating future difficulty adjustments.
"""
if len(self.__block_records) < self.constants.BLOCKS_CACHE_SIZE:
return None
peak = self.get_peak()
assert peak is not None
if peak.height - self.constants.BLOCKS_CACHE_SIZE < 0:
return None
self.clean_block_record(peak.height - self.constants.BLOCKS_CACHE_SIZE)
async def get_block_records_in_range(self, start: int, stop: int) -> Dict[bytes32, BlockRecord]:
return await self.block_store.get_block_records_in_range(start, stop)
async def get_header_blocks_in_range(
self, start: int, stop: int, tx_filter: bool = True
) -> Dict[bytes32, HeaderBlock]:
hashes = []
for height in range(start, stop + 1):
if self.contains_height(uint32(height)):
header_hash: bytes32 = self.height_to_hash(uint32(height))
hashes.append(header_hash)
blocks: List[FullBlock] = []
for hash in hashes.copy():
block = self.block_store.block_cache.get(hash)
if block is not None:
blocks.append(block)
hashes.remove(hash)
blocks_on_disk: List[FullBlock] = await self.block_store.get_blocks_by_hash(hashes)
blocks.extend(blocks_on_disk)
header_blocks: Dict[bytes32, HeaderBlock] = {}
for block in blocks:
if self.height_to_hash(block.height) != block.header_hash:
raise ValueError(f"Block at {block.header_hash} is no longer in the blockchain (it's in a fork)")
if tx_filter is False:
header = get_block_header(block, [], [])
else:
tx_additions: List[CoinRecord] = [
c for c in (await self.coin_store.get_coins_added_at_height(block.height)) if not c.coinbase
]
removed: List[CoinRecord] = await self.coin_store.get_coins_removed_at_height(block.height)
header = get_block_header(
block, [record.coin for record in tx_additions], [record.coin.name() for record in removed]
)
header_blocks[header.header_hash] = header
return header_blocks
async def get_header_block_by_height(
self, height: int, header_hash: bytes32, tx_filter: bool = True
) -> Optional[HeaderBlock]:
header_dict: Dict[bytes32, HeaderBlock] = await self.get_header_blocks_in_range(height, height, tx_filter)
if len(header_dict) == 0:
return None
if header_hash not in header_dict:
return None
return header_dict[header_hash]
async def get_block_records_at(self, heights: List[uint32], batch_size=900) -> List[BlockRecord]:
"""
gets block records by height (only blocks that are part of the chain)
"""
records: List[BlockRecord] = []
hashes = []
assert batch_size < 999 # sqlite in python 3.7 has a limit on 999 variables in queries
for height in heights:
hashes.append(self.height_to_hash(height))
if len(hashes) > batch_size:
res = await self.block_store.get_block_records_by_hash(hashes)
records.extend(res)
hashes = []
if len(hashes) > 0:
res = await self.block_store.get_block_records_by_hash(hashes)
records.extend(res)
return records
async def get_block_record_from_db(self, header_hash: bytes32) -> Optional[BlockRecord]:
if header_hash in self.__block_records:
return self.__block_records[header_hash]
return await self.block_store.get_block_record(header_hash)
def remove_block_record(self, header_hash: bytes32):
sbr = self.block_record(header_hash)
del self.__block_records[header_hash]
self.__heights_in_cache[sbr.height].remove(header_hash)
def add_block_record(self, block_record: BlockRecord):
"""
Adds a block record to the cache.
"""
self.__block_records[block_record.header_hash] = block_record
if block_record.height not in self.__heights_in_cache.keys():
self.__heights_in_cache[block_record.height] = set()
self.__heights_in_cache[block_record.height].add(block_record.header_hash)
async def persist_sub_epoch_challenge_segments(
self, ses_block_hash: bytes32, segments: List[SubEpochChallengeSegment]
):
return await self.block_store.persist_sub_epoch_challenge_segments(ses_block_hash, segments)
async def get_sub_epoch_challenge_segments(
self,
ses_block_hash: bytes32,
) -> Optional[List[SubEpochChallengeSegment]]:
segments: Optional[List[SubEpochChallengeSegment]] = await self.block_store.get_sub_epoch_challenge_segments(
ses_block_hash
)
if segments is None:
return None
return segments
# Returns 'True' if the info is already in the set, otherwise returns 'False' and stores it.
def seen_compact_proofs(self, vdf_info: VDFInfo, height: uint32) -> bool:
pot_tuple = (vdf_info, height)
if pot_tuple in self._seen_compact_proofs:
return True
# Periodically cleanup to keep size small. TODO: make this smarter, like FIFO.
if len(self._seen_compact_proofs) > 10000:
self._seen_compact_proofs.clear()
self._seen_compact_proofs.add(pot_tuple)
return False
async def get_block_generator(
self, block: Union[FullBlock, UnfinishedBlock], additional_blocks=None
) -> Optional[BlockGenerator]:
if additional_blocks is None:
additional_blocks = {}
ref_list = block.transactions_generator_ref_list
if block.transactions_generator is None:
assert len(ref_list) == 0
return None
if len(ref_list) == 0:
return BlockGenerator(block.transactions_generator, [])
result: List[GeneratorArg] = []
previous_block_hash = block.prev_header_hash
if (
self.try_block_record(previous_block_hash)
and self.height_to_hash(self.block_record(previous_block_hash).height) == previous_block_hash
):
# We are not in a reorg, no need to look up alternate header hashes (we can get them from height_to_hash)
for ref_height in block.transactions_generator_ref_list:
header_hash = self.height_to_hash(ref_height)
ref_block = await self.get_full_block(header_hash)
assert ref_block is not None
if ref_block.transactions_generator is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
result.append(GeneratorArg(ref_block.height, ref_block.transactions_generator))
else:
# First tries to find the blocks in additional_blocks
reorg_chain: Dict[uint32, FullBlock] = {}
curr: Union[FullBlock, UnfinishedBlock] = block
additional_height_dict = {}
while curr.prev_header_hash in additional_blocks:
prev: FullBlock = additional_blocks[curr.prev_header_hash]
additional_height_dict[prev.height] = prev
if isinstance(curr, FullBlock):
assert curr.height == prev.height + 1
reorg_chain[prev.height] = prev
curr = prev
peak: Optional[BlockRecord] = self.get_peak()
if self.contains_block(curr.prev_header_hash) and peak is not None:
# Then we look up blocks up to fork point one at a time, backtracking
previous_block_hash = curr.prev_header_hash
prev_block_record = await self.block_store.get_block_record(previous_block_hash)
prev_block = await self.block_store.get_full_block(previous_block_hash)
assert prev_block is not None
assert prev_block_record is not None
fork = find_fork_point_in_chain(self, peak, prev_block_record)
curr_2: Optional[FullBlock] = prev_block
assert curr_2 is not None and isinstance(curr_2, FullBlock)
reorg_chain[curr_2.height] = curr_2
while curr_2.height > fork and curr_2.height > 0:
curr_2 = await self.block_store.get_full_block(curr_2.prev_header_hash)
assert curr_2 is not None
reorg_chain[curr_2.height] = curr_2
for ref_height in block.transactions_generator_ref_list:
if ref_height in reorg_chain:
ref_block = reorg_chain[ref_height]
assert ref_block is not None
if ref_block.transactions_generator is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
result.append(GeneratorArg(ref_block.height, ref_block.transactions_generator))
else:
if ref_height in additional_height_dict:
ref_block = additional_height_dict[ref_height]
else:
header_hash = self.height_to_hash(ref_height)
ref_block = await self.get_full_block(header_hash)
assert ref_block is not None
if ref_block.transactions_generator is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
result.append(GeneratorArg(ref_block.height, ref_block.transactions_generator))
assert len(result) == len(ref_list)
return BlockGenerator(block.transactions_generator, result)
| kujenga/consensus/blockchain.py | 42,131 | When Blockchain.receive_block(b) is called, one of these results is returned,
showing whether the block was added to the chain (extending the peak),
and if not, why it was not added.
Adds a block record to the cache.
Clears all block records in the cache which have block_record < height.
Args:
height: Minimum height that we need to keep in the cache
Cleans the cache so that we only maintain relevant blocks. This removes
block records that have height < peak - BLOCKS_CACHE_SIZE.
These blocks are necessary for calculating future difficulty adjustments.
True if we have already added this block to the chain. This may return false for orphan blocks
that we have added but no longer keep in memory.
Return the peak of the blockchain
Added to the peak of the blockchain Added as an orphan/stale block (not a new peak of the chain) Block was not added because it was invalid Block is already present in this blockchain Block's parent (previous pointer) is not in this blockchain peak of the blockchain All blocks in peak path are guaranteed to be included, can include orphan blocks all hashes of blocks in block_record by height, used for garbage collection Defines the path from genesis to the peak, no orphan blocks All sub-epoch summaries that have been included in the blockchain from the beginning until and including the peak (height_included, SubEpochSummary). Note: ONLY for the blocks in the path to the peak Unspent Store Store Used to verify blocks in parallel Set holding seen compact proofs, in order to avoid duplicates. Whether blockchain is shut down or not Lock to prevent simultaneous reads and writes External lock handled by full node Windows Server 2016 has an issue https://bugs.python.org/issue26903 Always add the block to the database Perform the DB operations to update the state, and rollback if something goes wrong Then update the memory cache. It is important that this task is not cancelled and does not throw new coin records added Find the fork. if the block is just being appended, it will return the peak If no blocks in common, returns -1, and reverts all blocks Rollback sub_epoch_summaries Collect all blocks from fork point to new peak Doing a full reorg, starting at height 0 Set additions first, then removals in order to handle ephemeral coin state Add in height order is also required There can be multiple coins for the same hint Changes the peak to be the new peak This is not a heavier block than the heaviest we have seen, so we don't change the coin set This means we got to genesis and still no sub-slots Pos sub-slot is the same as infusion sub slot Have both sub-slots Start from the most recent remove from blocks remove height from heights in cache sqlite in python 3.7 has a limit on 999 variables in queries Returns 'True' if the info is already in the set, otherwise returns 'False' and stores it. Periodically cleanup to keep size small. TODO: make this smarter, like FIFO. We are not in a reorg, no need to look up alternate header hashes (we can get them from height_to_hash) First tries to find the blocks in additional_blocks Then we look up blocks up to fork point one at a time, backtracking | 3,161 | en | 0.930704 |
class ListData():
def __init__(instance):
### INTERNAL PARAMETERS #############
instance.missing_data_character = " "
#####################################
instance.dataset = []
def headers(instance):
"""
Returns the first row of the instance.dataset
Returns:
List
"""
return instance.dataset[0]
def data_rows(instance):
"""
Returns the rows of the instance.dataset except the first rows.
Returns:
List
"""
return instance.dataset[1:len(instance.dataset)]
def import_csv_file(instance, input_file_path,
column_delimiter_pattern_in_input_file,
line_head_pattern_to_remove='',
line_tail_pattern_to_remove='',
cell_head_and_tail_characters_to_remove=''):
"""
Returns:
nothing
Examples:
>>> # Import a CSV file (yasgui.org formatting)
>>> my_list_data = ListData()
>>> my_list_data.import_csv_file('test_data//yasgui_output_100.csv',
... column_delimiter_pattern_in_input_file=' , ',
... line_tail_pattern_to_remove=' ,',
... cell_head_and_tail_characters_to_remove='"')
Cleaning parameters are set. Output resulting from a demo parsing operation is as following:
----------------------------------LINE 0----------------------------------
<BLANKLINE>
----------------------------------LINE 1----------------------------------
['publication_type', 'journal_article', 'title', 'publication_year', 'author_name', 'journal_name', 'journal_issue_number', 'journal_volume_number', 'startEndPages', 'publisher_name', 'doi']
----------------------------------LINE 2----------------------------------
['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893']
<BLANKLINE>
CSV file "test_data//yasgui_output_100.csv" is imported as ListData object.
>>> # Parse a one-column CSV file
>>> my_list_data = ListData()
>>> my_list_data.import_csv_file('test_data//one_column_data.csv',
... column_delimiter_pattern_in_input_file=',')
Cleaning parameters are set. Output resulting from a demo parsing operation is as following:
----------------------------------LINE 0----------------------------------
<BLANKLINE>
----------------------------------LINE 1----------------------------------
['doi', '']
----------------------------------LINE 2----------------------------------
['10.1163/187607508X384689', '']
<BLANKLINE>
CSV file "test_data//one_column_data.csv" is imported as ListData object.
>>> my_list_data.get_column_at_index(0)
['doi', '10.1163/187607508X384689', '10.1017/S0954579416000572', '10.1007/s11562-016-0353-7', '10.1016/j.adolescence.2016.09.008', '10.1186/s13561-016-0122-6', '10.1007/s00799-016-0182-6', '10.5194/gmd-2016-266', '10.1007/s00737-015-0531-2', '10.1103/RevModPhys.88.021003', 'https://doi.org/10.1101/167171', 'https://doi.org/10.1016/j.chb.2017.04.047', '10.1016/j.trb.2016.09.005', '10.1016/j.ancene.2016.01.001', '10.1111/adb.12322', '10.1017/njg.2016.45', '10.1080/1359432X.2016.1209489', '10.1117/1.JBO.21.6.066008', '10.5194/gmd-10-3329-2017', '10.1016/j.rser.2017.01.103', '10.1177/2050157916664559', '10.1007/978-3-319-45931-8_17', '10.1007/s11136-015-1171-8', '10.1145/2991079.2991121', '10.1093/cz/zow089', '10.1126/science.aac8167', '10.1007/s00586-016-4606-1', '10.1186/s12937-017-0229-6', '10.1007/s11357-016-9894-1', '10.1080/00130095.2015.1094371', '10.1016/j.epsl.2016.02.028', '10.1371/journal.pone.0168636', '10.1016/j.atmosres.2016.03.016', '10.1111/deci.12206', '10.1126/science.aad9634', '10.1103/PhysRevA.94.012506', '10.4103/0019-5545.196846', '10.1016/j.cedpsych.2017.01.006', '10.3324/haematol.2015.133470', '10.1057/978-1-137-50956-7', '10.1016/j.scico.2016.04.001', 'https://doi.org/10.1016/j.scico.2016.04.001', '10.1080/03081087.2015.1053425', '10.3758/s13423-017-1270-3', '10.1681/ASN.2015030287', '10.1016/j.avb.2016.05.006', '10.1177/0971333616689191', '10.1002/sej.1243', '10.1016/j.foreco.2017.06.023', '10.1103/PhysRevLett.118.071801', 'https://doi.org/10.1093/geront/gnv127', '10.1007/978-3-319-42324-1_16', '10.1109/JBHI.2015.2412656', '10.1016/j.jeem.2016.04.002', '10.1080/00207543.2015.1058982', '10.1038/mp.2016.100', '10.1080/03003930.2016.1194267', '10.1016/j.envint.2017.01.018', '10.1038/pr.2015.179', '10.1177/1753193416669263', '10.1016/j.tre.2016.11.003', '10.1021/acs.jpcc.5b12016', '10.1002/anie.201603510', '10.1073/pnas.1607005113', '(DOI) - 10.1111/cch.12521', '10.1017/S0016756815000886', '10.1080/1350293X.2015.1073507', '10.1152/jn.00701.2015', '10.1371/journal.pone.0170791', '10.1016/j.seares.2016.07.005', '10.1016/j.reseneeco.2016.03.003', '10.1007/s00531-017-1499-0', '10.1007/s41669-017-0014-7', '10.1093/acrefore/9780190228613.013.439', '10.14814/phy2.13201', '10.1016/j.jtrangeo.2016.10.013', '10.1523/JNEUROSCI.3658-16.2017', '10.1192/bjpo.bp.115.000166', '10.1136/bmjgh-2016-000109', '10.7554/eLife.20320.001', '10.1037/pas0000332', '10.1177/1474704916673841', '10.1057/978-1-137-58179-2', '10.1002/ejp.963', '10.1017/thg.2016.78', '10.1038/tpj.2016.32', '10.1016/j.jesp.2017.03.008', '10.1287/trsc.2015.0647', '10.1186/s13015-016-0087-3', '10.1016/j.neuroimage.2016.10.030', '10.1371/journal.pone.0169109', '10.1007/s11367-017-1358-z', '10.1080/1369183X.2015.1061425', '10.2196/mental.4614', '10.1002/arp.1564', '10.1021/acs.orglett.6b01023', '10.3847/1538-4357/aa6c47', 'http://www.socialevraagstukken.nl/veiligheid-creeer-je-met-geborgenheid/', '10.1186/s12888-016-0790-0', '10.1371/journal.pone.0155755']
#>>> Enter parsing paramaters that do not match the contents of the CSV file
#>>> Error is not invoked anymore as another from CSV_File takes over. Kept for possible future use
#>>> my_list_data = ListData()
#>>> try:
#... my_list_data.import_csv_file('test_data//one_column_data.txt',
#... column_delimiter_pattern_in_input_file='\\n',
#... line_head_pattern_to_remove='',
#... line_tail_pattern_to_remove='')
#... except Exception as error_message:
#... print('Exception caught: ' + str(error_message))
Cleaning parameters are set. Output resulting from a demo parsing operation is as following:
----------------------------------LINE 0----------------------------------
<BLANKLINE>
Exception caught: No data imported from CSV file "test_data//one_column_data.csv". Parsing parameters provided does not seem to match formatting of the inputted CSV file.
"""
from preprocessor.csv_tools import CSV_File
csv_file = CSV_File(input_file_path,
column_delimiter_pattern_in_input_file=column_delimiter_pattern_in_input_file)
csv_file.set_parsing_and_cleaning_parameters(line_head_pattern_to_remove=line_head_pattern_to_remove,
line_tail_pattern_to_remove=line_tail_pattern_to_remove,
cell_head_and_tail_characters_to_remove=cell_head_and_tail_characters_to_remove)
with open(csv_file.input_file_path, encoding='utf8') as input_file:
for i, each_line in enumerate(input_file):
csv_line = csv_file.get_line_at_position_from_file(i + 1)
csv_row = csv_file.clean_and_parse_line_to_CSV_Row_using_cleaning_parameters(csv_line)
instance.append_row(csv_row)
if instance.dataset: # if not empty
print('\nCSV file "%s" is imported as ListData object.' % csv_file.input_file_path)
else:
raise ValueError('No data imported from CSV file "%s". Parsing parameters provided does not seem to match '
'formatting of the inputted CSV file.' % csv_file.input_file_path)
def import_json_object(instance, json_object):
"""
Converts a JSON formatted object to a ListData object.
Args:
json_dictionary(dict): a dictionary that is formatted as JSON
Returns:
Examples:
>>> my_json_object = {
... 1: {'label': 'Example', 'value': 3},
... 2: {'label': 'Test', 'value': 1},
... 3: {'label': 'Tryout'}
... }
>>> print(my_json_object)
{1: {'label': 'Example', 'value': 3}, 2: {'label': 'Test', 'value': 1}, 3: {'label': 'Tryout'}}
>>> my_list_data = ListData()
>>> my_list_data.import_json_object(my_json_object)
>>> print(my_list_data.dataset)
[['label', 'value'], ['Example', 3], ['Test', 1], ['Tryout', ' ']]
"""
from preprocessor.legacy_functions.get_header_index import get_header_index
# iterate through all entries and their ids in the input Bibliography
# (this first iteration is only for indexing all headers in the instance.headers_row. all headers must be
# indexed first before starting to add data rows, because adding header names on-the-go would create data rows
# of differing lengths)
# instance.headers should be empty (and thus, should give an error if attempted to be indexed)
try:
# if instance.headers is not empty (and thus, does not give an index error) raise exception
if instance.headers():
raise Exception('Instance.headers not empty prior to append operation. This method is not compatible '
'with adding new headers/columns.')
# if there an index error, this indicates that the instance.headers() is indeed empty (and therefore cannot
# be indexed).
except IndexError:
headers_list = []
for each_entry_id, each_entry_data in json_object.items():
# add each field name in the input Bibliography to instance.headers_row
for each_field_name in each_entry_data.keys():
if each_field_name not in headers_list:
# add to headers row
headers_list.append(each_field_name)
# add the now fully populated instance.headers_row as the first row of the full dataset (instance.dataset)
instance.dataset.append(headers_list)
# iterate (once again) through all entries and their ids in the input Bibliography
# (this second iteration is for adding data rows)
for each_entry_id, each_entry_data in json_object.items():
# add a blank list to represent a new row per each entry in inputted Bibliography object.
instance.dataset.append([])
# select the last added row
current_row = instance.dataset[-1]
# make this last added row (i.e., each row) as long as the header row
while len(current_row) < len(instance.headers()):
current_row.append(instance.missing_data_character)
# for each field_name-field_value pair in the input Bibliography
for each_field_name, each_field_value in each_entry_data.items():
# extract the index number of the field name's representation in the headers row
current_field_name_header_index = get_header_index(each_field_name, instance.dataset)
current_row[current_field_name_header_index] = each_field_value
def import_bibliography_object(instance, bibliography_object):
"""
Converts a Bibliography class object to a ListData object.
Returns:
ListData class object
Examples:
>>> from triplicator.bibTools import Bibliography
>>> my_bibliography = Bibliography()
>>> my_bibliography.setEntry('01', 'author', 'John Doe')
>>> my_bibliography.setEntry('02', 'author', 'Jane Doe')
>>> #my_bibliography.import_data('..//triplicator//example_data//test.bib')
>>> print(my_bibliography.entries)
{'01': {'author': 'John Doe'}, '02': {'author': 'Jane Doe'}}
>>> my_list_data = ListData()
>>> my_list_data.import_bibliography_object(my_bibliography)
>>> print(my_list_data.dataset)
[['author'], ['John Doe'], ['Jane Doe']]
"""
instance.import_json_object(bibliography_object.entries)
def get_column_at_index(instance, index):
'''
Allows columns to be selected (i.e., returned) by entering their index position.
:return: A list vector that contains values from the queried column
:example:
>>> my_listdata = ListData()
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]
>>> my_listdata.get_column_at_index(1)
['birth_date', 2084, 2054]
'''
#############################################################################################################
# assign the column matching the current_index to a variable
column = [each_row[index] for each_row in instance.dataset]
return column
def get_row_length(instance):
"""
Gets the length of a sample row from the dataset.
Returns:
Integer
Examples:
>>> my_listdata = ListData()
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]
>>> my_listdata.get_row_length()
2
"""
probe_index = 0
row_length = 0
try:
row_length = len(instance.dataset[probe_index])
except IndexError:
raise ('Not possible to probe row at index %s. Nothing found at this index position.' % probe_index)
return row_length
def transpose_dataset(instance):
"""
>>> my_listdata = ListData()
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]
>>> my_listdata.transpose_dataset().dataset
[['name', 'john', 'jane'], ['birth_date', 2084, 2054]]
>>> my_listdata.transpose_dataset().dataset
[['name', 'birth_date'], ['john', 2084], ['jane', 2054]]
>>> my_listdata.transpose_dataset().dataset == my_listdata.transpose_dataset().transpose_dataset().dataset
True
"""
row_length = instance.get_row_length()
columns = [instance.get_column_at_index(i) for i in range(0, row_length)]
instance.dataset = columns
return instance
def merge_all_rows_to_one(instance, value_separator_pattern=' | '):
"""
>>> my_listdata = ListData().append_row(['john', 2054]).append_row(['john', 3254])
>>> my_listdata.merge_all_rows_to_one().dataset
['john', '2054 | 3254']
>>> my_listdata = ListData()
>>> my_listdata.dataset = [['john', 2054], ['john', 3254], ['john', 2672]]
>>> my_listdata.merge_all_rows_to_one().dataset
['john', '2054 | 3254 | 2672']
# method does not deal with headers
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['john', 2054]]
>>> my_listdata.merge_all_rows_to_one().dataset
['name | john', 'birth_date | 2084 | 2054']
# but headers can be easily managed
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['john', 2054]]
>>> my_listdata.dataset = my_listdata.dataset[1:]
>>> my_listdata.merge_all_rows_to_one().dataset
['john', '2084 | 2054']
# different separator pattern (and a transpose-like operation)
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['john', 2054], ['jane', 2054]]
>>> my_listdata.merge_all_rows_to_one('; ').dataset
['name; john; jane', 'birth_date; 2084; 2054']
>>> type(my_listdata.dataset)
<class 'list'>
>>> from preprocessor.csv_tools import CSV_Line, CSV_Row, Row_Merge_Buffer
>>> line_1 = CSV_Line(' "Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "Steer - Robert A." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "10.1037//0022-006x.56.6.893" ,')
>>> line_2 = CSV_Line(' "Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "John - Doe B." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "https://doi.org/10.1037//0022-006x.56.6.893" ,')
>>> line_1.clean_head_and_tail_from_patterns(' ,', location='tail').clean_head_and_tail_from_patterns(' ', location='head')
'"Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "Steer - Robert A." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "10.1037//0022-006x.56.6.893"'
>>> line_2.clean_head_and_tail_from_patterns(' ,', location='tail').clean_head_and_tail_from_patterns(' ', location='head')
'"Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "John - Doe B." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "https://doi.org/10.1037//0022-006x.56.6.893"'
>>> row_1 = line_1.parse_line_and_CONVERT_to_CSV_Row(' , ').clean_cell_heads_and_tails_from_characters('"')
>>> row_2 = line_2.parse_line_and_CONVERT_to_CSV_Row(' , ').clean_cell_heads_and_tails_from_characters('"')
>>> buffer = Row_Merge_Buffer(1)
>>> buffer.append_as_first_row_and_reset_buffer(row_1)
"https://w3id.org/oc/corpus/br/45174: [['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893']]"
>>> buffer.append_row_if_ids_match(row_2)
"https://w3id.org/oc/corpus/br/45174: [['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893'], ['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', 'https://doi.org/10.1037//0022-006x.56.6.893']]"
>>> buffer.merge_all_rows_to_one(' | ')
"https://w3id.org/oc/corpus/br/45174: ['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A. | John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893 | https://doi.org/10.1037//0022-006x.56.6.893']"
# List conversion with actual rows
>>> a = ListData()
>>> a.dataset = [['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893'], ['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', 'https://doi.org/10.1037//0022-006x.56.6.893']]
>>> a.merge_all_rows_to_one(' | ').dataset
['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A. | John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893 | https://doi.org/10.1037//0022-006x.56.6.893']
# Row_Merge_Buffer class conversion with actual rows
>>> a = Row_Merge_Buffer(1)
>>> a.dataset = [['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893'], ['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', 'https://doi.org/10.1037//0022-006x.56.6.893']]
>>> a.merge_all_rows_to_one(' | ').dataset
['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A. | John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893 | https://doi.org/10.1037//0022-006x.56.6.893']
# Error from empty dataset
>>> a = ListData()
>>>
>>> try:
... a.merge_all_rows_to_one(' | ') # no item to index in empty dataset
... except Exception as error_message:
... print('Exception: ' + str(error_message))
Exception: Dataset to be merged is either empty or not indexable (no item at index [0]).
The input dataset is:
[]
"""
try:
instance.dataset[0]
except IndexError:
raise IndexError('Dataset to be merged is either empty or not indexable (no item at index [0]).\nThe input dataset is:\n%s' % str(instance.dataset))
dataset = instance.dataset
# initiate merged_row with the first row of the dataset
merged_row = dataset[0]
for each_row in dataset:
current_row = each_row
current_cell_position = 0
for each_current_cell, each_merged_cell in zip(current_row, merged_row):
if str(each_current_cell) not in str(each_merged_cell): # str conversion needed for 'in' comparison
merged_cell = str(each_merged_cell) + value_separator_pattern + str(each_current_cell)
merged_row[current_cell_position] = merged_cell
current_cell_position += 1
# no need to specify an else scenario, as if compared cells are the same, merged row can stay as is
instance.dataset = merged_row
return instance
def append_row(instance, new_row):
"""
Appends a row the ListData object's dataset variable.
Returns:
ListData object (instance)
Examples:
>>> my_listdata = ListData()
>>> my_listdata.append_row([1,2,3]).dataset
[[1, 2, 3]]
>>> my_listdata.dataset
[[1, 2, 3]]
>>> my_listdata.append_row(['a','b','c']).dataset
[[1, 2, 3], ['a', 'b', 'c']]
>>> my_listdata.dataset
[[1, 2, 3], ['a', 'b', 'c']]
>>> my_listdata.append_row(['x', 'y']).append_row(['z', 't']).append_row(['m', 'n']).dataset
[[1, 2, 3], ['a', 'b', 'c'], ['x', 'y'], ['z', 't'], ['m', 'n']]
"""
instance.dataset.append(new_row)
return instance
def clear_all(instance):
"""
Resets ListData object's dataset variable to its empty state.
Returns:
ListData object
Examples:
>>> my_listdata = ListData()
>>> my_listdata.append_row([1,2,3]).dataset
[[1, 2, 3]]
>>> my_listdata.dataset
[[1, 2, 3]]
>>> my_listdata.clear_all().dataset
[]
>>> my_listdata.dataset
[]
"""
instance.dataset = []
return instance
def append_column(instance, new_column_values, new_column_name):
"""
:param new_column_values:
:param new_column_name:
:param dataset:
:return: Changes the inputted dataset when ran (no need for assigning the output to a variable).
:usage: append_column(NEW_COLUMN_VARIABLES_LIST, NEW_COLUMN_NAME_STRING, DATASET)
:example:
>>> my_list_data = ListData()
>>> my_list_data.dataset = [['day', 'month'], [1, 'June'], [3, 'May'], [4, 'Jun']]
>>> years_column = [2149, 2150, 2151]
>>> my_list_data.append_column(years_column, "year")
>>> print(my_list_data.dataset) # changes the original data set without a need to assign the output to a new variable, etc.
[['day', 'month', 'year'], [1, 'June', 2149], [3, 'May', 2150], [4, 'Jun', 2151]]
"""
#############################################################################################################
# Check for duplicate header names
if new_column_name in instance.headers(): # if this duplicate check is not included, things go wrong (the duplicate header gets added to column values—a strange behavior, but it is prevented with not allowing duplicate headers).
print(
"ERROR: Header name already in dataset. Re-run all code up to this point or change header name.\nError "
"occured while processing new_column_name: " + str(
new_column_name))
raise ValueError(
"Header name already in dataset. Please choose a different name. If name is correct, try re-running "
"all code up to this point. (See console output for last header name processed.)")
if len(new_column_values) != len(instance.data_rows()):
raise Exception("Inputted column length must be equal to instance.dataset column length.\n" +
'new_column_values length: ' + str(len(new_column_values)) + '\n' +
'instance.data_rows() length: ' + str(len(instance.data_rows()))
)
# Append the inputted column to specified dataset
# pass argument to variable
new_column = new_column_values
# new column = merging of column name and column values
new_column.insert(0, new_column_name)
# for each row in the dataset, append the new column at the end
for i, row in enumerate(instance.dataset):
instance.dataset[i].append(new_column[i])
def remove_column(instance, target_column_header):
"""
Removes a column from dataset.
Args:
target_column_header(str): Name of the column to be removed.
Returns:
Nothing; modifies dataset.
Examples:
>>> example_data = [['day', 'month', 'hour'], ['1', 'June', '12.00'], ['3', 'May', '11.00'],
... ['4', 'Jun', '15.00']]
>>> my_list_data = ListData()
>>> my_list_data.dataset = example_data
>>> print(my_list_data.dataset)
[['day', 'month', 'hour'], ['1', 'June', '12.00'], ['3', 'May', '11.00'], ['4', 'Jun', '15.00']]
>>> my_list_data.remove_column('hour')
>>> print(my_list_data.dataset)
[['day', 'month'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]
"""
#############################################################################################################
# the column header also needs to be included in removal process
from preprocessor.legacy_functions.get_header_index import get_header_index
target_index = get_header_index(target_column_header, instance.dataset)
for i, row in enumerate(instance.dataset):
del (instance.dataset[i][target_index])
def remove_columns(instance, target_column_headers_list):
"""
Removes multiple columns from dataset. Is a variation of .remove_column() method to support efficient removal
of multiple columns.
Args:
target_column_headers_list(list): A list of strings whose items are the header names of columns to
be removed
Returns:
Nothing; modifies dataset.
"""
if type(target_column_headers_list) == list:
pass
else:
raise Exception('The argument "target_column_headers_list" must be of "list" type.')
for each_column_header in target_column_headers_list:
instance.remove_column(each_column_header)
def replace_headers(instance, header_replacements_list):
"""
Replaces headers of a dataset.
Args:
header_replacements_list(list): A list of strings to replace headers
Returns:
Nothing; modifies the provided dataset.
Examples:
>>> example_data = [['day', 'month'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]
>>> my_list_data = ListData()
>>> my_list_data.dataset = example_data
>>> print(my_list_data.dataset)
[['day', 'month'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]
>>> my_list_data.replace_headers(['d', 'm'])
>>> print(my_list_data.dataset)
[['d', 'm'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]
"""
# number of headers inputted should match the number of headers in the dataset
if len(header_replacements_list) == len(instance.headers()):
pass
else:
raise Exception('header_replacements_list should be the same length with instance.headers()' + '\n' +
'header_replacements_list length: ' + str(len(header_replacements_list)) + '\n' +
'instance.headers() length: ' + str(len(instance.headers()))
)
for i, each_header in enumerate(header_replacements_list):
instance.dataset[0][i] = each_header
class ListBuffer(ListData):
def __init__(self):
ListData.__init__(self)
# states
self.is_empty = True
def append_row(self, new_row):
"""
Overrides the ListData method of the same name to change buffer state to 'not empty' after adding something to
the buffer
Args:
new_row(list, bool, str, int): The object to be added as a new row to buffer
Returns:
ListData object (self)
Examples:
# initiate
>>> my_buffer = ListBuffer()
# empty?
>>> my_buffer.is_empty
True
# simple add
>>> a = my_buffer.append_row(['item 1', 'item 2', 'item 3']) # variable assignment is to suppress output
# fluent interface
>>> my_buffer.append_row(['item 4', 'item 5', 'item 6']). \
append_row(['item 7', 'item 8', 'item 9']).dataset
[['item 1', 'item 2', 'item 3'], ['item 4', 'item 5', 'item 6'], ['item 7', 'item 8', 'item 9']]
# empty now?
>>> my_buffer.is_empty
False
"""
ListData.append_row(self, new_row)
self.is_empty = False
return self
def is_each_row_balanced(self, exclude_special_rows_of_syntax=None):
"""
Checks whether each row in buffer is balanced (i.e., does not have unmatched parantheses, brackets, etc). Can
exclude special row types (e.g., comment) from evaluation.
Args:
exclude_special_rows_of_syntax(str): specifies what type of rows to exclude from evaluation
(e.g., comment rows). Uses predefined syntax settings per specified syntax (e.g., 'bibtex').
Keyword Args:
- bibtex (exclude_special_rows_of_syntax): sets evaluation exclusion criteria for bibtex syntax
Returns:
boolean
Examples:
>>> # an unbalanced row is present
>>> my_buffer = ListBuffer()
>>> my_buffer.append_row(['a', 'b', 'c']).append_row(['d', 'e', 'f']).dataset
[['a', 'b', 'c'], ['d', 'e', 'f']]
>>> my_buffer.append_row(['g', 'h' , '>'])\
.is_each_row_balanced()
False
>>> # single row from a bib file
>>> my_buffer = ListBuffer()
>>> my_buffer.append_row(' year = "2017",')\
.is_each_row_balanced()
True
>>> # bibtex entry start (no exception vs. exception)
>>> my_buffer.append_row('@article{96d9add3e2f44e8abbf030170689bc30,')\
.is_each_row_balanced()
False
>>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax='bibtex')
True
>>> # bibtex comment (no exception vs. exception)
>>> my_buffer = ListBuffer()
>>> my_buffer.append_row('% This is a comment with an unbalanced characters }]>')\
.is_each_row_balanced()
False
>>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax='bibtex')
True
>>> # a full bibtex entry with an unbalanced curly bracket at title field
>>> my_buffer = ListBuffer()
>>> my_buffer.dataset = ['@book{a82caf00e1a143759c7f5543b6c84ea5,', 'title = "{Knowledge Representation for Health Care (AIME 2015 International Joint Workshop, KR4HC/ProHealth 2015)",', 'author = "D Riano and R. Lenz and S Miksch and M Peleg and M. Reichert and {ten Teije}, A.C.M.",', 'year = "2015",', 'doi = "10.1007/978-3-319-26585-8",', 'isbn = "9783319265841",', 'series = "LNAI",', 'publisher = "Springer",', 'number = "9485",', '}', '']
>>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax='bibtex') # error
False
>>> # the same entry with unbalanced curly bracket removed
>>> my_buffer.dataset = ['@book{a82caf00e1a143759c7f5543b6c84ea5,', 'title = "Knowledge Representation for Health Care (AIME 2015 International Joint Workshop, KR4HC/ProHealth 2015)",', 'author = "D Riano and R. Lenz and S Miksch and M Peleg and M. Reichert and {ten Teije}, A.C.M.",', 'year = "2015",', 'doi = "10.1007/978-3-319-26585-8",', 'isbn = "9783319265841",', 'series = "LNAI",', 'publisher = "Springer",', 'number = "9485",', '}', '']
>>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax='bibtex')
True
"""
from preprocessor.string_tools import String
buffer = self.dataset
is_balanced_log = []
for each_row in buffer:
each_row = String(str(each_row))
if not each_row.is_balanced():
# print('row is not balanced: ', each_row)
### EXCLUSIONS FOR BIBTEX ###########################################
if exclude_special_rows_of_syntax == 'bibtex':
# print('special syntax = bibtex recognized')
# forgive these row types
if each_row.is_line_type('bibtex', 'start of entry') \
or each_row.is_line_type('bibtex', 'end of entry') \
or each_row.is_line_type('bibtex', 'comment'):
is_balanced_log.append(True)
# print("01: appended True to log, because the row is unbalanced but it passed exclusion rules", "the current row (each_row) is: ", "(", type(each_row) ,")", each_row)
else:
is_balanced_log.append(False)
######################################################################
else:
is_balanced_log.append(False)
# print("02: appended False to log because row is unbalanced (no exclusion keyword specified) ", "the current row (each_row) is: ", "(", type(each_row) ,")", each_row)
else:
is_balanced_log.append(True)
# print("03: appended True to log because row is balanced ", "the current row (each_row) is: ", "(", type(each_row) ,")", each_row)
if False in is_balanced_log:
return False
else:
return True
def is_parsable(self, syntax_to_parse_by='bibtex'):
"""
Args:
syntax_to_parse_by:
Returns:
boolean
Examples:
# bibtex entry with no issues
>>> my_buffer = ListBuffer()
>>> my_buffer.dataset = ['@article{5f3ed8a5037f4837be0c7e8e5a1f0948,',
... 'title = "New Horizons biedt eindelijk goede blik op Pluto",',
... 'author = "B. Andeweg",',
... 'year = "2015",',
... 'month = "7",',
... 'journal = "Volkskrant",',
... '}']
>>> my_buffer.is_parsable()
True
# unmatched " in author field
>>> my_buffer = ListBuffer()
>>> my_buffer.dataset = ['@article{5f3ed8a5037f4837be0c7e8e5a1f0948,',
... 'title = "New Horizons biedt eindelijk goede blik op Pluto",',
... 'author = "B. "Andeweg",',
... 'year = "2015",',
... 'month = "7",',
... 'journal = "Volkskrant",',
... '}']
>>> my_buffer.is_parsable()
False
"""
if syntax_to_parse_by == 'bibtex':
# import and shorten bibtex parser function
from pybtex.database.input import bibtex
parser = bibtex.Parser()
# TODO: test can be made in memory instead of via temporary file (could use 'parser.parse_bytes')
with open('temp_buffer_dump.bib', 'w', encoding='utf8') as temp_buffer_dump_file:
for each_buffer_row in self.dataset:
print(each_buffer_row, file=temp_buffer_dump_file)
with open('temp_buffer_dump.bib', encoding='utf8') as temp_buffer_dump_file:
try:
parsed_file = parser.parse_file(temp_buffer_dump_file)
return True
except:
return False
| preprocessor/ListData.py | 40,084 | :param new_column_values:
:param new_column_name:
:param dataset:
:return: Changes the inputted dataset when ran (no need for assigning the output to a variable).
:usage: append_column(NEW_COLUMN_VARIABLES_LIST, NEW_COLUMN_NAME_STRING, DATASET)
:example:
>>> my_list_data = ListData()
>>> my_list_data.dataset = [['day', 'month'], [1, 'June'], [3, 'May'], [4, 'Jun']]
>>> years_column = [2149, 2150, 2151]
>>> my_list_data.append_column(years_column, "year")
>>> print(my_list_data.dataset) # changes the original data set without a need to assign the output to a new variable, etc.
[['day', 'month', 'year'], [1, 'June', 2149], [3, 'May', 2150], [4, 'Jun', 2151]]
Appends a row the ListData object's dataset variable.
Returns:
ListData object (instance)
Examples:
>>> my_listdata = ListData()
>>> my_listdata.append_row([1,2,3]).dataset
[[1, 2, 3]]
>>> my_listdata.dataset
[[1, 2, 3]]
>>> my_listdata.append_row(['a','b','c']).dataset
[[1, 2, 3], ['a', 'b', 'c']]
>>> my_listdata.dataset
[[1, 2, 3], ['a', 'b', 'c']]
>>> my_listdata.append_row(['x', 'y']).append_row(['z', 't']).append_row(['m', 'n']).dataset
[[1, 2, 3], ['a', 'b', 'c'], ['x', 'y'], ['z', 't'], ['m', 'n']]
Overrides the ListData method of the same name to change buffer state to 'not empty' after adding something to
the buffer
Args:
new_row(list, bool, str, int): The object to be added as a new row to buffer
Returns:
ListData object (self)
Examples:
# initiate
>>> my_buffer = ListBuffer()
# empty?
>>> my_buffer.is_empty
True
# simple add
>>> a = my_buffer.append_row(['item 1', 'item 2', 'item 3']) # variable assignment is to suppress output
# fluent interface
>>> my_buffer.append_row(['item 4', 'item 5', 'item 6']). append_row(['item 7', 'item 8', 'item 9']).dataset
[['item 1', 'item 2', 'item 3'], ['item 4', 'item 5', 'item 6'], ['item 7', 'item 8', 'item 9']]
# empty now?
>>> my_buffer.is_empty
False
Resets ListData object's dataset variable to its empty state.
Returns:
ListData object
Examples:
>>> my_listdata = ListData()
>>> my_listdata.append_row([1,2,3]).dataset
[[1, 2, 3]]
>>> my_listdata.dataset
[[1, 2, 3]]
>>> my_listdata.clear_all().dataset
[]
>>> my_listdata.dataset
[]
Returns the rows of the instance.dataset except the first rows.
Returns:
List
Allows columns to be selected (i.e., returned) by entering their index position.
:return: A list vector that contains values from the queried column
:example:
>>> my_listdata = ListData()
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]
>>> my_listdata.get_column_at_index(1)
['birth_date', 2084, 2054]
Gets the length of a sample row from the dataset.
Returns:
Integer
Examples:
>>> my_listdata = ListData()
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]
>>> my_listdata.get_row_length()
2
Returns the first row of the instance.dataset
Returns:
List
Converts a Bibliography class object to a ListData object.
Returns:
ListData class object
Examples:
>>> from triplicator.bibTools import Bibliography
>>> my_bibliography = Bibliography()
>>> my_bibliography.setEntry('01', 'author', 'John Doe')
>>> my_bibliography.setEntry('02', 'author', 'Jane Doe')
>>> #my_bibliography.import_data('..//triplicator//example_data//test.bib')
>>> print(my_bibliography.entries)
{'01': {'author': 'John Doe'}, '02': {'author': 'Jane Doe'}}
>>> my_list_data = ListData()
>>> my_list_data.import_bibliography_object(my_bibliography)
>>> print(my_list_data.dataset)
[['author'], ['John Doe'], ['Jane Doe']]
Returns:
nothing
Examples:
>>> # Import a CSV file (yasgui.org formatting)
>>> my_list_data = ListData()
>>> my_list_data.import_csv_file('test_data//yasgui_output_100.csv',
... column_delimiter_pattern_in_input_file=' , ',
... line_tail_pattern_to_remove=' ,',
... cell_head_and_tail_characters_to_remove='"')
Cleaning parameters are set. Output resulting from a demo parsing operation is as following:
----------------------------------LINE 0----------------------------------
<BLANKLINE>
----------------------------------LINE 1----------------------------------
['publication_type', 'journal_article', 'title', 'publication_year', 'author_name', 'journal_name', 'journal_issue_number', 'journal_volume_number', 'startEndPages', 'publisher_name', 'doi']
----------------------------------LINE 2----------------------------------
['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893']
<BLANKLINE>
CSV file "test_data//yasgui_output_100.csv" is imported as ListData object.
>>> # Parse a one-column CSV file
>>> my_list_data = ListData()
>>> my_list_data.import_csv_file('test_data//one_column_data.csv',
... column_delimiter_pattern_in_input_file=',')
Cleaning parameters are set. Output resulting from a demo parsing operation is as following:
----------------------------------LINE 0----------------------------------
<BLANKLINE>
----------------------------------LINE 1----------------------------------
['doi', '']
----------------------------------LINE 2----------------------------------
['10.1163/187607508X384689', '']
<BLANKLINE>
CSV file "test_data//one_column_data.csv" is imported as ListData object.
>>> my_list_data.get_column_at_index(0)
['doi', '10.1163/187607508X384689', '10.1017/S0954579416000572', '10.1007/s11562-016-0353-7', '10.1016/j.adolescence.2016.09.008', '10.1186/s13561-016-0122-6', '10.1007/s00799-016-0182-6', '10.5194/gmd-2016-266', '10.1007/s00737-015-0531-2', '10.1103/RevModPhys.88.021003', 'https://doi.org/10.1101/167171', 'https://doi.org/10.1016/j.chb.2017.04.047', '10.1016/j.trb.2016.09.005', '10.1016/j.ancene.2016.01.001', '10.1111/adb.12322', '10.1017/njg.2016.45', '10.1080/1359432X.2016.1209489', '10.1117/1.JBO.21.6.066008', '10.5194/gmd-10-3329-2017', '10.1016/j.rser.2017.01.103', '10.1177/2050157916664559', '10.1007/978-3-319-45931-8_17', '10.1007/s11136-015-1171-8', '10.1145/2991079.2991121', '10.1093/cz/zow089', '10.1126/science.aac8167', '10.1007/s00586-016-4606-1', '10.1186/s12937-017-0229-6', '10.1007/s11357-016-9894-1', '10.1080/00130095.2015.1094371', '10.1016/j.epsl.2016.02.028', '10.1371/journal.pone.0168636', '10.1016/j.atmosres.2016.03.016', '10.1111/deci.12206', '10.1126/science.aad9634', '10.1103/PhysRevA.94.012506', '10.4103/0019-5545.196846', '10.1016/j.cedpsych.2017.01.006', '10.3324/haematol.2015.133470', '10.1057/978-1-137-50956-7', '10.1016/j.scico.2016.04.001', 'https://doi.org/10.1016/j.scico.2016.04.001', '10.1080/03081087.2015.1053425', '10.3758/s13423-017-1270-3', '10.1681/ASN.2015030287', '10.1016/j.avb.2016.05.006', '10.1177/0971333616689191', '10.1002/sej.1243', '10.1016/j.foreco.2017.06.023', '10.1103/PhysRevLett.118.071801', 'https://doi.org/10.1093/geront/gnv127', '10.1007/978-3-319-42324-1_16', '10.1109/JBHI.2015.2412656', '10.1016/j.jeem.2016.04.002', '10.1080/00207543.2015.1058982', '10.1038/mp.2016.100', '10.1080/03003930.2016.1194267', '10.1016/j.envint.2017.01.018', '10.1038/pr.2015.179', '10.1177/1753193416669263', '10.1016/j.tre.2016.11.003', '10.1021/acs.jpcc.5b12016', '10.1002/anie.201603510', '10.1073/pnas.1607005113', '(DOI) - 10.1111/cch.12521', '10.1017/S0016756815000886', '10.1080/1350293X.2015.1073507', '10.1152/jn.00701.2015', '10.1371/journal.pone.0170791', '10.1016/j.seares.2016.07.005', '10.1016/j.reseneeco.2016.03.003', '10.1007/s00531-017-1499-0', '10.1007/s41669-017-0014-7', '10.1093/acrefore/9780190228613.013.439', '10.14814/phy2.13201', '10.1016/j.jtrangeo.2016.10.013', '10.1523/JNEUROSCI.3658-16.2017', '10.1192/bjpo.bp.115.000166', '10.1136/bmjgh-2016-000109', '10.7554/eLife.20320.001', '10.1037/pas0000332', '10.1177/1474704916673841', '10.1057/978-1-137-58179-2', '10.1002/ejp.963', '10.1017/thg.2016.78', '10.1038/tpj.2016.32', '10.1016/j.jesp.2017.03.008', '10.1287/trsc.2015.0647', '10.1186/s13015-016-0087-3', '10.1016/j.neuroimage.2016.10.030', '10.1371/journal.pone.0169109', '10.1007/s11367-017-1358-z', '10.1080/1369183X.2015.1061425', '10.2196/mental.4614', '10.1002/arp.1564', '10.1021/acs.orglett.6b01023', '10.3847/1538-4357/aa6c47', 'http://www.socialevraagstukken.nl/veiligheid-creeer-je-met-geborgenheid/', '10.1186/s12888-016-0790-0', '10.1371/journal.pone.0155755']
#>>> Enter parsing paramaters that do not match the contents of the CSV file
#>>> Error is not invoked anymore as another from CSV_File takes over. Kept for possible future use
#>>> my_list_data = ListData()
#>>> try:
#... my_list_data.import_csv_file('test_data//one_column_data.txt',
#... column_delimiter_pattern_in_input_file='\n',
#... line_head_pattern_to_remove='',
#... line_tail_pattern_to_remove='')
#... except Exception as error_message:
#... print('Exception caught: ' + str(error_message))
Cleaning parameters are set. Output resulting from a demo parsing operation is as following:
----------------------------------LINE 0----------------------------------
<BLANKLINE>
Exception caught: No data imported from CSV file "test_data//one_column_data.csv". Parsing parameters provided does not seem to match formatting of the inputted CSV file.
Converts a JSON formatted object to a ListData object.
Args:
json_dictionary(dict): a dictionary that is formatted as JSON
Returns:
Examples:
>>> my_json_object = {
... 1: {'label': 'Example', 'value': 3},
... 2: {'label': 'Test', 'value': 1},
... 3: {'label': 'Tryout'}
... }
>>> print(my_json_object)
{1: {'label': 'Example', 'value': 3}, 2: {'label': 'Test', 'value': 1}, 3: {'label': 'Tryout'}}
>>> my_list_data = ListData()
>>> my_list_data.import_json_object(my_json_object)
>>> print(my_list_data.dataset)
[['label', 'value'], ['Example', 3], ['Test', 1], ['Tryout', ' ']]
Checks whether each row in buffer is balanced (i.e., does not have unmatched parantheses, brackets, etc). Can
exclude special row types (e.g., comment) from evaluation.
Args:
exclude_special_rows_of_syntax(str): specifies what type of rows to exclude from evaluation
(e.g., comment rows). Uses predefined syntax settings per specified syntax (e.g., 'bibtex').
Keyword Args:
- bibtex (exclude_special_rows_of_syntax): sets evaluation exclusion criteria for bibtex syntax
Returns:
boolean
Examples:
>>> # an unbalanced row is present
>>> my_buffer = ListBuffer()
>>> my_buffer.append_row(['a', 'b', 'c']).append_row(['d', 'e', 'f']).dataset
[['a', 'b', 'c'], ['d', 'e', 'f']]
>>> my_buffer.append_row(['g', 'h' , '>']) .is_each_row_balanced()
False
>>> # single row from a bib file
>>> my_buffer = ListBuffer()
>>> my_buffer.append_row(' year = "2017",') .is_each_row_balanced()
True
>>> # bibtex entry start (no exception vs. exception)
>>> my_buffer.append_row('@article{96d9add3e2f44e8abbf030170689bc30,') .is_each_row_balanced()
False
>>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax='bibtex')
True
>>> # bibtex comment (no exception vs. exception)
>>> my_buffer = ListBuffer()
>>> my_buffer.append_row('% This is a comment with an unbalanced characters }]>') .is_each_row_balanced()
False
>>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax='bibtex')
True
>>> # a full bibtex entry with an unbalanced curly bracket at title field
>>> my_buffer = ListBuffer()
>>> my_buffer.dataset = ['@book{a82caf00e1a143759c7f5543b6c84ea5,', 'title = "{Knowledge Representation for Health Care (AIME 2015 International Joint Workshop, KR4HC/ProHealth 2015)",', 'author = "D Riano and R. Lenz and S Miksch and M Peleg and M. Reichert and {ten Teije}, A.C.M.",', 'year = "2015",', 'doi = "10.1007/978-3-319-26585-8",', 'isbn = "9783319265841",', 'series = "LNAI",', 'publisher = "Springer",', 'number = "9485",', '}', '']
>>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax='bibtex') # error
False
>>> # the same entry with unbalanced curly bracket removed
>>> my_buffer.dataset = ['@book{a82caf00e1a143759c7f5543b6c84ea5,', 'title = "Knowledge Representation for Health Care (AIME 2015 International Joint Workshop, KR4HC/ProHealth 2015)",', 'author = "D Riano and R. Lenz and S Miksch and M Peleg and M. Reichert and {ten Teije}, A.C.M.",', 'year = "2015",', 'doi = "10.1007/978-3-319-26585-8",', 'isbn = "9783319265841",', 'series = "LNAI",', 'publisher = "Springer",', 'number = "9485",', '}', '']
>>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax='bibtex')
True
Args:
syntax_to_parse_by:
Returns:
boolean
Examples:
# bibtex entry with no issues
>>> my_buffer = ListBuffer()
>>> my_buffer.dataset = ['@article{5f3ed8a5037f4837be0c7e8e5a1f0948,',
... 'title = "New Horizons biedt eindelijk goede blik op Pluto",',
... 'author = "B. Andeweg",',
... 'year = "2015",',
... 'month = "7",',
... 'journal = "Volkskrant",',
... '}']
>>> my_buffer.is_parsable()
True
# unmatched " in author field
>>> my_buffer = ListBuffer()
>>> my_buffer.dataset = ['@article{5f3ed8a5037f4837be0c7e8e5a1f0948,',
... 'title = "New Horizons biedt eindelijk goede blik op Pluto",',
... 'author = "B. "Andeweg",',
... 'year = "2015",',
... 'month = "7",',
... 'journal = "Volkskrant",',
... '}']
>>> my_buffer.is_parsable()
False
>>> my_listdata = ListData().append_row(['john', 2054]).append_row(['john', 3254])
>>> my_listdata.merge_all_rows_to_one().dataset
['john', '2054 | 3254']
>>> my_listdata = ListData()
>>> my_listdata.dataset = [['john', 2054], ['john', 3254], ['john', 2672]]
>>> my_listdata.merge_all_rows_to_one().dataset
['john', '2054 | 3254 | 2672']
# method does not deal with headers
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['john', 2054]]
>>> my_listdata.merge_all_rows_to_one().dataset
['name | john', 'birth_date | 2084 | 2054']
# but headers can be easily managed
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['john', 2054]]
>>> my_listdata.dataset = my_listdata.dataset[1:]
>>> my_listdata.merge_all_rows_to_one().dataset
['john', '2084 | 2054']
# different separator pattern (and a transpose-like operation)
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['john', 2054], ['jane', 2054]]
>>> my_listdata.merge_all_rows_to_one('; ').dataset
['name; john; jane', 'birth_date; 2084; 2054']
>>> type(my_listdata.dataset)
<class 'list'>
>>> from preprocessor.csv_tools import CSV_Line, CSV_Row, Row_Merge_Buffer
>>> line_1 = CSV_Line(' "Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "Steer - Robert A." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "10.1037//0022-006x.56.6.893" ,')
>>> line_2 = CSV_Line(' "Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "John - Doe B." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "https://doi.org/10.1037//0022-006x.56.6.893" ,')
>>> line_1.clean_head_and_tail_from_patterns(' ,', location='tail').clean_head_and_tail_from_patterns(' ', location='head')
'"Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "Steer - Robert A." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "10.1037//0022-006x.56.6.893"'
>>> line_2.clean_head_and_tail_from_patterns(' ,', location='tail').clean_head_and_tail_from_patterns(' ', location='head')
'"Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "John - Doe B." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "https://doi.org/10.1037//0022-006x.56.6.893"'
>>> row_1 = line_1.parse_line_and_CONVERT_to_CSV_Row(' , ').clean_cell_heads_and_tails_from_characters('"')
>>> row_2 = line_2.parse_line_and_CONVERT_to_CSV_Row(' , ').clean_cell_heads_and_tails_from_characters('"')
>>> buffer = Row_Merge_Buffer(1)
>>> buffer.append_as_first_row_and_reset_buffer(row_1)
"https://w3id.org/oc/corpus/br/45174: [['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893']]"
>>> buffer.append_row_if_ids_match(row_2)
"https://w3id.org/oc/corpus/br/45174: [['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893'], ['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', 'https://doi.org/10.1037//0022-006x.56.6.893']]"
>>> buffer.merge_all_rows_to_one(' | ')
"https://w3id.org/oc/corpus/br/45174: ['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A. | John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893 | https://doi.org/10.1037//0022-006x.56.6.893']"
# List conversion with actual rows
>>> a = ListData()
>>> a.dataset = [['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893'], ['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', 'https://doi.org/10.1037//0022-006x.56.6.893']]
>>> a.merge_all_rows_to_one(' | ').dataset
['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A. | John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893 | https://doi.org/10.1037//0022-006x.56.6.893']
# Row_Merge_Buffer class conversion with actual rows
>>> a = Row_Merge_Buffer(1)
>>> a.dataset = [['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893'], ['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', 'https://doi.org/10.1037//0022-006x.56.6.893']]
>>> a.merge_all_rows_to_one(' | ').dataset
['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A. | John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893 | https://doi.org/10.1037//0022-006x.56.6.893']
# Error from empty dataset
>>> a = ListData()
>>>
>>> try:
... a.merge_all_rows_to_one(' | ') # no item to index in empty dataset
... except Exception as error_message:
... print('Exception: ' + str(error_message))
Exception: Dataset to be merged is either empty or not indexable (no item at index [0]).
The input dataset is:
[]
Removes a column from dataset.
Args:
target_column_header(str): Name of the column to be removed.
Returns:
Nothing; modifies dataset.
Examples:
>>> example_data = [['day', 'month', 'hour'], ['1', 'June', '12.00'], ['3', 'May', '11.00'],
... ['4', 'Jun', '15.00']]
>>> my_list_data = ListData()
>>> my_list_data.dataset = example_data
>>> print(my_list_data.dataset)
[['day', 'month', 'hour'], ['1', 'June', '12.00'], ['3', 'May', '11.00'], ['4', 'Jun', '15.00']]
>>> my_list_data.remove_column('hour')
>>> print(my_list_data.dataset)
[['day', 'month'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]
Removes multiple columns from dataset. Is a variation of .remove_column() method to support efficient removal
of multiple columns.
Args:
target_column_headers_list(list): A list of strings whose items are the header names of columns to
be removed
Returns:
Nothing; modifies dataset.
Replaces headers of a dataset.
Args:
header_replacements_list(list): A list of strings to replace headers
Returns:
Nothing; modifies the provided dataset.
Examples:
>>> example_data = [['day', 'month'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]
>>> my_list_data = ListData()
>>> my_list_data.dataset = example_data
>>> print(my_list_data.dataset)
[['day', 'month'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]
>>> my_list_data.replace_headers(['d', 'm'])
>>> print(my_list_data.dataset)
[['d', 'm'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]
>>> my_listdata = ListData()
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]
>>> my_listdata.transpose_dataset().dataset
[['name', 'john', 'jane'], ['birth_date', 2084, 2054]]
>>> my_listdata.transpose_dataset().dataset
[['name', 'birth_date'], ['john', 2084], ['jane', 2054]]
>>> my_listdata.transpose_dataset().dataset == my_listdata.transpose_dataset().transpose_dataset().dataset
True
INTERNAL PARAMETERS if not empty iterate through all entries and their ids in the input Bibliography (this first iteration is only for indexing all headers in the instance.headers_row. all headers must be indexed first before starting to add data rows, because adding header names on-the-go would create data rows of differing lengths) instance.headers should be empty (and thus, should give an error if attempted to be indexed) if instance.headers is not empty (and thus, does not give an index error) raise exception if there an index error, this indicates that the instance.headers() is indeed empty (and therefore cannot be indexed). add each field name in the input Bibliography to instance.headers_row add to headers row add the now fully populated instance.headers_row as the first row of the full dataset (instance.dataset) iterate (once again) through all entries and their ids in the input Bibliography (this second iteration is for adding data rows) add a blank list to represent a new row per each entry in inputted Bibliography object. select the last added row make this last added row (i.e., each row) as long as the header row for each field_name-field_value pair in the input Bibliography extract the index number of the field name's representation in the headers row assign the column matching the current_index to a variable initiate merged_row with the first row of the dataset str conversion needed for 'in' comparison no need to specify an else scenario, as if compared cells are the same, merged row can stay as is Check for duplicate header names if this duplicate check is not included, things go wrong (the duplicate header gets added to column values—a strange behavior, but it is prevented with not allowing duplicate headers). Append the inputted column to specified dataset pass argument to variable new column = merging of column name and column values for each row in the dataset, append the new column at the end the column header also needs to be included in removal process number of headers inputted should match the number of headers in the dataset states print('row is not balanced: ', each_row) EXCLUSIONS FOR BIBTEX print('special syntax = bibtex recognized') forgive these row types print("01: appended True to log, because the row is unbalanced but it passed exclusion rules", "the current row (each_row) is: ", "(", type(each_row) ,")", each_row) print("02: appended False to log because row is unbalanced (no exclusion keyword specified) ", "the current row (each_row) is: ", "(", type(each_row) ,")", each_row) print("03: appended True to log because row is balanced ", "the current row (each_row) is: ", "(", type(each_row) ,")", each_row) import and shorten bibtex parser function TODO: test can be made in memory instead of via temporary file (could use 'parser.parse_bytes') | 26,350 | en | 0.375172 |
"""[HTTPX](https://www.python-httpx.org/) 驱动适配
```bash
nb driver install httpx
# 或者
pip install nonebot2[httpx]
```
:::tip 提示
本驱动仅支持客户端 HTTP 连接
:::
FrontMatter:
sidebar_position: 3
description: nonebot.drivers.httpx 模块
"""
from typing import Type, AsyncGenerator
from contextlib import asynccontextmanager
from nonebot.typing import overrides
from nonebot.drivers._block_driver import BlockDriver
from nonebot.drivers import (
Request,
Response,
WebSocket,
HTTPVersion,
ForwardMixin,
ForwardDriver,
combine_driver,
)
try:
import httpx
except ImportError:
raise ImportError(
"Please install httpx by using `pip install nonebot2[httpx]`"
) from None
class Mixin(ForwardMixin):
"""HTTPX Mixin"""
@property
@overrides(ForwardMixin)
def type(self) -> str:
return "httpx"
@overrides(ForwardMixin)
async def request(self, setup: Request) -> Response:
async with httpx.AsyncClient(
http2=setup.version == HTTPVersion.H2,
proxies=setup.proxy,
follow_redirects=True,
) as client:
response = await client.request(
setup.method,
str(setup.url),
content=setup.content,
data=setup.data,
json=setup.json,
files=setup.files,
headers=tuple(setup.headers.items()),
timeout=setup.timeout,
)
return Response(
response.status_code,
headers=response.headers,
content=response.content,
request=setup,
)
@overrides(ForwardMixin)
@asynccontextmanager
async def websocket(self, setup: Request) -> AsyncGenerator[WebSocket, None]:
async with super(Mixin, self).websocket(setup) as ws:
yield ws
Driver: Type[ForwardDriver] = combine_driver(BlockDriver, Mixin) # type: ignore
"""HTTPX Driver"""
| nonebot/drivers/httpx.py | 2,033 | HTTPX Mixin
[HTTPX](https://www.python-httpx.org/) 驱动适配
```bash
nb driver install httpx
# 或者
pip install nonebot2[httpx]
```
:::tip 提示
本驱动仅支持客户端 HTTP 连接
:::
FrontMatter:
sidebar_position: 3
description: nonebot.drivers.httpx 模块
type: ignore | 253 | en | 0.330597 |
from flask import render_template, request, redirect, send_from_directory, jsonify, Blueprint
from direct_answers import choose_direct_answer
from direct_answers import search_result_features
import indieweb_utils
import search_helpers, config, search_page_feeds
import requests
import json
import math
import spacy
import mf2py
main = Blueprint("main", __name__, static_folder="static", static_url_path="")
nlp = spacy.load('en_core_web_sm')
@main.route("/")
def home():
q = request.args.get("q")
return render_template("search/submit.html", title="IndieWeb Search", query=q)
@main.route("/autocomplete")
def search_autocomplete():
query = request.args.get("q")
suggest = requests.get("https://es-indieweb-search.jamesg.blog/suggest?q={}&pw={}".format(query, config.ELASTICSEARCH_PASSWORD))
return jsonify(suggest.json()), 200
@main.route("/results", methods=["GET", "POST"])
def results_page():
page = request.args.get("page")
site = request.args.get("site")
if site and site == "jamesg.blog":
# used for special jamesg.blog search redirect, not for open use
site = "".join([x for x in site if x.isalpha() or x == "."])
return redirect('/results?query=site:"{}"%20{}'.format(site, request.args.get("query")))
special_result = False
if not request.args.get("query"):
return redirect("/")
query_with_handled_spaces = request.args.get("query").replace("--", "").replace(" ", " ").strip()
allowed_chars = [" ", '"', ":", "-", "/", ".", "=", ","]
cleaned_value_for_query = ''.join(e for e in query_with_handled_spaces if e.isalnum() or e in allowed_chars).strip()
query_values_in_list, query_with_handled_spaces = search_helpers.handle_advanced_search(query_with_handled_spaces)
if cleaned_value_for_query.startswith("xray https://") or cleaned_value_for_query.startswith("xray http://"):
return redirect("https://xray.p3k.io/parse?url={}".format(cleaned_value_for_query.replace("xray ", "")))
session = requests.Session()
if cleaned_value_for_query == "random":
random_site = session.get("https://es-indieweb-search.jamesg.blog/random?pw={}".format(config.ELASTICSEARCH_PASSWORD)).json()["domain"]
return redirect("https://{}/".format(random_site))
if not request.args.get("query"):
return redirect("/")
full_query_with_full_stops = ''.join(e for e in query_with_handled_spaces if e.isalnum() or e == " " or e == ".")
if len(cleaned_value_for_query) == 0:
return redirect("/")
do_i_use = ""
pagination = "0"
if page:
# If page cannot be converted into an integer, redirect to homepage
try:
if int(page) > 1:
pagination = (int(page) - 1) * 10
except:
return redirect("/")
else:
page = 1
order = "score"
minimal = "false"
if request.args.get("order") == "date_asc":
order = "date_asc"
elif request.args.get("order") == "date_desc":
order = "date_desc"
cleaned_value_for_query = cleaned_value_for_query.replace("what is", "")
if request.args.get("format") and (request.args.get("format") == "json_feed" or request.args.get("format") == "jf2"):
minimal = "true"
query_params = ""
if query_values_in_list.get("site"):
query_params += "&site={}".format(query_values_in_list.get("site").replace("%", ""))
if request.args.get("query").startswith("discover"):
query_params += "&discover=true"
if "js:none" in request.args.get("query"):
query_params += "&js=false"
if query_values_in_list.get("category"):
query_params += "&category={}".format(query_values_in_list.get("category"))
if query_values_in_list.get("mf2prop"):
query_params += "&mf2_property={}".format(query_values_in_list.get("mf2prop"))
rows = session.get("https://es-indieweb-search.jamesg.blog/?pw={}&q={}&sort={}&from={}&minimal={}{}".format(
config.ELASTICSEARCH_PASSWORD,
cleaned_value_for_query.replace("who is", "").replace("code", "").replace("discover ", "").strip(),
order, str(pagination),
minimal,
query_params)
).json()
num_of_results = rows["hits"]["total"]["value"]
rows = rows["hits"]["hits"]
for r in rows:
if r["_source"].get("h_card"):
r["_source"]["h_card"] = json.loads(r["_source"]["h_card"])
else:
r["_source"]["h_card"] = None
cleaned_value = cleaned_value_for_query.lower()
if page == 1:
do_i_use, special_result = choose_direct_answer.choose_featured_snippet(
cleaned_value,
cleaned_value_for_query,
rows,
special_result,
full_query_with_full_stops,
session,
nlp
)
if len(rows) == 0:
out_of_bounds_page = True
final_query = cleaned_value_for_query
# this code doesn't work right now
# identify_mistakes = spell.unknown(cleaned_value.split('"')[-1].split(" "))
# final_query = ""
# suggestion = False
# cleaned_items = cleaned_value.split('"')[-1].split(" ")
# for w in range(0, len(cleaned_items)):
# if cleaned_items[w] in identify_mistakes and cleaned_items[w] != "":
# final_query += spell.correction(cleaned_items[w]) + " "
# suggestion = True
# final_query = " " + final_query
# else:
# final_query += cleaned_items[w] + " "
# final_query = "".join(cleaned_value.split('"')[:-1]) + '" ' + final_query
else:
out_of_bounds_page = False
suggestion = False
final_query = ""
if "random aeropress" in cleaned_value or "generate aeropress" in cleaned_value and request.args.get("type") != "image":
special_result = search_result_features.aeropress_recipe()
format = request.args.get("format")
if format == "json_feed":
json_feed = search_page_feeds.process_json_feed(rows, cleaned_value, page, format)
return json_feed
elif format == "jf2":
jf2_feed = search_page_feeds.process_jf2_feed(rows)
return jf2_feed
elif format == "rss":
rss_feed = search_page_feeds.process_rss_feed(rows, cleaned_value, page, format)
return rss_feed
elif format == "direct_serp_json":
if special_result:
return jsonify({"text": do_i_use, "featured_serp": special_result})
else:
return jsonify({"message": "no custom serp available on this search"})
elif format == "results_page_json":
return jsonify({"results": [r["_source"] for r in rows]})
# show one result if a featured snippet is available, even if there are no other results to show
if not special_result and not do_i_use and int(num_of_results) == 0:
num_of_results = 0
out_of_bounds_page = True
else:
out_of_bounds_page = False
return render_template("search/results.html",
results=rows,
number_of_results=int(num_of_results),
page=int(page),
page_count=int(math.ceil(num_of_results / 10)),
query=cleaned_value,
results_type=request.args.get("type"),
out_of_bounds_page=out_of_bounds_page,
ordered_by=request.args.get("order"),
base_results_query="/results?query=" + cleaned_value_for_query,
corrected_text=final_query,
suggestion_made=suggestion,
special_result=special_result,
do_i_use=do_i_use,
title="Search results for '{}' query".format(cleaned_value)
)
@main.route("/robots.txt")
def robots():
return send_from_directory(main.static_folder, "robots.txt")
@main.route('/assets/<path:path>')
def send_static_images(path):
return send_from_directory("static/", path)
@main.route("/changelog")
def changelog():
return render_template("changelog.html", title="IndieWeb Search Changelog")
@main.route("/advanced")
def advanced_search():
return render_template(
"search/advanced_search.html",
title="IndieWeb Search Advanced Search Options"
)
@main.route("/api/post-type")
def get_original_post_type():
page_to_check = request.args.get("url")
mf2_parsed = mf2py.parse(page_to_check)
if not mf2_parsed:
return jsonify({"status": "failed", "result": ""})
if not mf2_parsed["items"]:
return jsonify({"status": "failed", "result": ""})
# get h_entry
h_entry = [i for i in mf2_parsed["items"] if i["type"] == ["h-entry"]]
result = indieweb_utils.get_post_type(h_entry)
return jsonify({"status": "success", "result": result})
@main.route("/api/authorship")
def get_post_author():
page_to_check = request.args.get("url")
mf2_parsed = mf2py.parse(page_to_check)
if not mf2_parsed:
return jsonify({"status": "failed", "message": "No microformats could be found on this page", "author": []})
if not mf2_parsed["items"]:
return jsonify({"status": "failed", "message": "No microformats could be found on this page", "author": []})
# get h_entry
h_entry = [i for i in mf2_parsed["items"] if i["type"] == ["h-entry"]]
h_card = [i for i in mf2_parsed["items"] if i["type"] == ["h-card"]]
if not h_entry and h_card == []:
return jsonify({"status": "failed", "message": "No h-entry could be found on this page", "author": []})
if h_card == []:
for i in h_entry["items"]:
if i['type'] == ['h-entry']:
if i['properties'].get('author'):
# if author is h_card
if type(i['properties']['author'][0]) == dict and i['properties']['author'][0].get('type') == ['h-card']:
h_card = i['properties']['author'][0]
elif type(i['properties']['author']) == list:
h_card = i['properties']['author'][0]
result = indieweb_utils.discover_author(h_card, h_entry, page_to_check, [])
return jsonify({"status": "success", "result": result})
@main.route("/stats")
def stats():
count_request = requests.get("https://es-indieweb-search.jamesg.blog/count").json()
count = count_request["es_count"]["count"]
domains = count_request["domains"]
headers = {
"Authorization": config.ELASTICSEARCH_API_TOKEN
}
feed_breakdown_request = requests.get("https://es-indieweb-search.jamesg.blog/feed_breakdown", headers=headers).json()
special_stats = requests.get("https://es-indieweb-search.jamesg.blog/special_stats", headers=headers).json()
top_linked_assets = special_stats["top_ten_links"]
link_types = special_stats["link_microformat_instances"]
return render_template(
"search/stats.html",
count=count,
domains=domains,
title="IndieWeb Search Index Stats",
feed_breakdown=feed_breakdown_request,
top_linked_assets=top_linked_assets,
link_types=link_types
)
@main.route("/about")
def about():
return render_template("search/about.html", title="About IndieWeb Search") | main.py | 10,101 | used for special jamesg.blog search redirect, not for open use If page cannot be converted into an integer, redirect to homepage this code doesn't work right now identify_mistakes = spell.unknown(cleaned_value.split('"')[-1].split(" ")) final_query = "" suggestion = False cleaned_items = cleaned_value.split('"')[-1].split(" ") for w in range(0, len(cleaned_items)): if cleaned_items[w] in identify_mistakes and cleaned_items[w] != "": final_query += spell.correction(cleaned_items[w]) + " " suggestion = True final_query = " " + final_query else: final_query += cleaned_items[w] + " " final_query = "".join(cleaned_value.split('"')[:-1]) + '" ' + final_query show one result if a featured snippet is available, even if there are no other results to show get h_entry get h_entry if author is h_card | 809 | en | 0.701417 |
# 03_xkcd_multithread_download.py
# In dieser Übung geht es darum den Download der Comics zu beschleunigen
# indem man mehrere Threads zum downloaden nutzt.
import os, threading, requests, bs4
os.chdir(os.path.dirname(__file__))
target_dir='.\\comics'
source_url='https://xkcd.com'
# Prüfe ob Seite erreichbar
url_content=requests.get(source_url)
try:
url_content.raise_for_status()
except:
print('URL xkcd.com kann nicht aufgerufen werden. Script wird beendet.')
exit()
# Downloade die Comics als Thread
def download_comic(comic_url):
file_name=comic_url.split('/')[-1]
new_file=open(target_dir+'\\'+file_name, 'wb')
get_comic=requests.get(comic_url)
try:
get_comic.raise_for_status()
for chunk in get_comic.iter_content(10**6):
new_file.write(chunk)
new_file.close()
except:
print('Bild-URL %s ist fehlerhaft') % (comic_url)
# Sammle die Links zu den Comics und den weiterführenden Seiten
link_counter=0
threads=[]
def scrape_comic_links(url_name):
global link_counter, threads
while link_counter != int(comic_target_amount):
url_content=requests.get(url_name)
try:
url_content.raise_for_status()
bs4_object=bs4.BeautifulSoup(url_content.text, features='html.parser')
bs4_next_result=bs4_object.select('a[rel="prev"]')
next_url=bs4_next_result[0].get('href')
bs4_comic_result=bs4_object.select('div #comic img')
comic_url=bs4_comic_result[0].get('src')
comic_url='https://'+comic_url.lstrip('/')
url_name=source_url+next_url
link_counter+=1
# Starte Download-Thread
thread_object=threading.Thread(name='Download_Comic', target=download_comic, args=[comic_url])
thread_object.start()
# Füge diesen Thread einer Liste hinzu um später zu prüfen ob alles Abgearbeitet wurde.
threads.append(thread_object)
except:
print('URL nicht gefunden.')
return
else:
link_counter=0
return
while True:
print('Wieviele Comics sollen heruntergeladen werden?')
comic_target_amount=input()
if comic_target_amount.isdecimal():
scrape_comic_links(source_url)
# Warte bis alle Prozesse abgeschlossen sind.
for thread in threads:
thread.join()
print('Downloads abgeschlossen')
break
| Python/Buch_ATBS/Teil_2/Kapitel_15_Aufgaben_zeitlich_Planen_und_Programme_starten/03_xkcd_multithread_download/03_xkcd_multithread_download.py | 2,456 | 03_xkcd_multithread_download.py In dieser Übung geht es darum den Download der Comics zu beschleunigen indem man mehrere Threads zum downloaden nutzt. Prüfe ob Seite erreichbar Downloade die Comics als Thread Sammle die Links zu den Comics und den weiterführenden Seiten Starte Download-Thread Füge diesen Thread einer Liste hinzu um später zu prüfen ob alles Abgearbeitet wurde. Warte bis alle Prozesse abgeschlossen sind. | 423 | de | 0.986979 |
# -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""List the versions within a key."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.cloudkms import base as cloudkms_base
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.kms import flags
class List(base.ListCommand):
r"""List the versions within a key.
Lists all of the versions within the given key.
## EXAMPLES
The following command lists all versions within the
key `frodo`, keyring `fellowship`, and location `global`:
$ {command} --location global \
--keyring fellowship \
--key frodo
"""
@staticmethod
def Args(parser):
flags.AddKeyResourceFlags(parser)
parser.display_info.AddFormat('table(name, state)')
def Run(self, args):
# pylint: disable=line-too-long
client = cloudkms_base.GetClientInstance()
messages = cloudkms_base.GetMessagesModule()
crypto_key_ref = flags.ParseCryptoKeyName(args)
request = messages.CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListRequest(
parent=crypto_key_ref.RelativeName())
return list_pager.YieldFromList(
client.projects_locations_keyRings_cryptoKeys_cryptoKeyVersions,
request,
field='cryptoKeyVersions',
limit=args.limit,
batch_size_attribute='pageSize')
| lib/surface/kms/keys/versions/list.py | 2,026 | List the versions within a key.
Lists all of the versions within the given key.
## EXAMPLES
The following command lists all versions within the
key `frodo`, keyring `fellowship`, and location `global`:
$ {command} --location global \
--keyring fellowship \
--key frodo
List the versions within a key.
-*- coding: utf-8 -*- Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=line-too-long | 942 | en | 0.834972 |
# Create your views here.
from django.contrib.auth import get_user_model
from django.db import transaction
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from django.utils.translation import gettext as _
from openbook_common.responses import ApiMessageResponse
from openbook_common.serializers import CommonFollowRequestSerializer
from openbook_moderation.permissions import IsNotSuspended
from openbook_common.utils.helpers import normalise_request_data
from openbook_follows.serializers import FollowUserRequestSerializer, FollowSerializer, \
DeleteFollowSerializer, UpdateFollowSerializer, FollowUserSerializer, RequestToFollowUserSerializer, \
ApproveUserFollowRequestSerializer, RejectUserFollowRequestSerializer, ReceivedFollowRequestsRequestSerializer
class ReceivedFollowRequests(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def get(self, request):
query_params = request.query_params.dict()
user = request.user
serializer = ReceivedFollowRequestsRequestSerializer(data=query_params)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
max_id = data.get('max_id')
count = data.get('count', 10)
received_follow_requests = user.get_received_follow_requests(max_id=max_id).order_by(
'-id')[:count]
response_serializer = CommonFollowRequestSerializer(received_follow_requests, many=True,
context={'request': request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
class RequestToFollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def put(self, request):
serializer = RequestToFollowUserSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_request_to_follow_username = data.get('username')
user = request.user
with transaction.atomic():
follow_request = user.create_follow_request_for_user_with_username(user_to_request_to_follow_username)
response_serializer = CommonFollowRequestSerializer(follow_request, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_201_CREATED)
class CancelRequestToFollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
serializer = RequestToFollowUserSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_cancel_request_for = data.get('username')
user = request.user
with transaction.atomic():
user.delete_follow_request_for_user_with_username(user_to_cancel_request_for)
return ApiMessageResponse(_('Follow request cancelled.'), status=status.HTTP_200_OK)
class ApproveUserFollowRequest(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
serializer = ApproveUserFollowRequestSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_approve_follow_request_from_username = data.get('username')
user = request.user
with transaction.atomic():
user.approve_follow_request_from_user_with_username(
user_username=user_to_approve_follow_request_from_username)
return ApiMessageResponse(_('Follow request approved.'), status=status.HTTP_200_OK)
class RejectUserFollowRequest(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
serializer = RejectUserFollowRequestSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_reject_follow_request_from_username = data.get('username')
user = request.user
with transaction.atomic():
user.reject_follow_request_from_user_with_username(
user_username=user_to_reject_follow_request_from_username)
return ApiMessageResponse(_('Follow request rejected.'), status=status.HTTP_200_OK)
class FollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
request_data = _prepare_request_data_for_validation(request.data)
serializer = FollowUserRequestSerializer(data=request_data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
lists_ids = data.get('lists_ids')
user_to_follow_username = data.get('username')
user = request.user
User = get_user_model()
user_to_follow = User.objects.get(username=user_to_follow_username)
with transaction.atomic():
follow = user.follow_user_with_id(user_to_follow.pk, lists_ids=lists_ids)
response_serializer = FollowSerializer(follow, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_201_CREATED)
class UnfollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
user = request.user
serializer = DeleteFollowSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_unfollow_username = data.get('username')
User = get_user_model()
user_to_unfollow = User.objects.get(username=user_to_unfollow_username)
with transaction.atomic():
user.unfollow_user_with_id(user_to_unfollow.pk)
response_serializer = FollowUserSerializer(user_to_unfollow, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
class UpdateFollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
request_data = _prepare_request_data_for_validation(request.data)
user = request.user
serializer = UpdateFollowSerializer(data=request_data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
lists_ids = data.get('lists_ids')
followed_user_username = data.get('username')
User = get_user_model()
followed_user = User.objects.get(username=followed_user_username)
with transaction.atomic():
follow = user.update_follow_for_user_with_id(followed_user.pk, lists_ids=lists_ids)
response_serializer = FollowSerializer(follow, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
def _prepare_request_data_for_validation(request_data):
request_data_copy = normalise_request_data(request_data)
lists_ids = request_data_copy.get('lists_ids', None)
if isinstance(lists_ids, str):
lists_ids = lists_ids.split(',')
request_data_copy['lists_ids'] = lists_ids
return request_data_copy
| openbook_follows/views.py | 7,396 | Create your views here. | 23 | en | 0.928092 |
"""
本文件用以练习 manim 的各种常用对象
SVGMobject
ImageMobject
TextMobject
TexMobeject
Text
参考资料: https://www.bilibili.com/video/BV1CC4y1H7kp
XiaoCY 2020-11-27
"""
#%% 初始化
from manimlib.imports import *
"""
素材文件夹介绍
在 manim 中使用各种素材时可以使用绝对路径声明素材。
为了简单,可以创建 assets 文件夹并放置在 manim 路径下。
如此做,使用素材时可以不加路径。
assets/raster_images/ 放置 png 等格式的位图
assets/svg_images/ 放置 svg 格式的矢量图
assets/sounds/ 一般不用,也可以不创建
"""
#%% SVGMobject
"""
在 manim 中使用 SVG 图片可以直接使用 SVGMobject,
传入的第一个参数是指向 SVG 文件的字符串,
关键字参数包括 VMobject 的共有属性,有
填充样式
填充颜色 fill_color 或 color
不透明度 fill_opacity
线条样式
线条颜色 stroke_color 或 color
线条宽度 stroke_width
线条不透明度 stroke_opacity
背景线条样式
背景线条颜色 background_stroke_color 或 color
背景线条宽度 background_stroke_width
背景线条不透明度 background_stroke_opacity
光泽样式
光泽尺度 sheen_factor
光泽方向 sheen_direction
"""
class try_SVGMob(Scene): # 使用 class 创建一个场景,名字可自定义
def construct(self): # 这里 class 和 def 暂且当成是固定的套路吧
# 构造 SVGMobject --- 添加 SVG 图片
mob = SVGMobject(
"coin.svg",
color = BLUE, # manim 内置部分颜色,参见 https://manim.ml/constants.html#id7
stroke_width = 1.00
)
# SVGMobject 可以使用 VMobject 的所有动画
# 动画暂时不在本练习中具体讲解,这里仅作示意
self.play(FadeInFromLarge(mob))
self.wait(2)
#%% ImageMobject
"""
与 SVGMobject 相像,插入位图时可使用 ImageMobject,
传入的第一个参数是字符串表示的位图路径,
关键字参数仅有以下部分
图片高度 height (默认为2)
是否反色 invert (默认 False)
"""
class try_ImageMob(Scene):
def construct(self):
# 添加位图
mob = ImageMobject(
'smirk.png',
height = 3
)
# 由于 ImageMobject 不是 VMobject 的子类,很多动画无法使用
# 但是这里依然不对动画做深入讨论
self.play(FadeInFromLarge(mob))
self.wait(2)
#%% TextMobject
"""
TextMobject 会将字符串作为 LaTeX 的普通字符进行编译
传入的第一个参数为需要添加的字符串,其可以使用 LaTeX 表达式
由于 LaTeX 表达式中常含有反斜线,构造字符串时需要采用双反斜线
或在字符串前添加 r 以消除歧义
TextMobject 是 VMobject,其他属性同 SVGMobject
一个 TextMobject 中也可以传入多个字符串,会单独编译但连在一起显示
这时可以利用索引来访问各个字符串
其他可选参数
arg_separator 传入多个字符串时,设置字符串之间的字符,默认为空格
tex_to_color_map 为一个字典,根据键值自动拆分字符串进行上色
"""
class try_TextMob(Scene):
def construct(self):
# 定义一个字符串(数组),并通过下标进行访问
text = TextMobject(
"早安, \\TeX!",
r"你好,\LaTeX!",
tex_to_color_map = {"\\LaTeX": RED_B}
)
self.play(Write(text[0]))
self.wait(0.5)
# 注意这里用到 text[2] 和 text[3]
# 原本不存在,但 tex_to_color_map 为了上色将其自动拆分了
# text[0] = r"早安, \TeX!"
# text[1] = "你好,"
# text[2] = r"\LaTeX"
# text[3] = "!"
self.play(Transform(text[0],text[1:4]))
self.wait(1)
#%% TexMobject
"""
TexMobject 实际上提供了 align* 的数学环境,用于辨析 LaTeX 数学公式
其使用方法和 TextMobject 一样
关于数学公式的 LaTeX 代码可以使用妈叔的在线编辑器
(友情推荐) https://www.latexlive.com/
"""
class try_TexMob(Scene):
def construct(self):
text = TextMobject('欧拉公式') # 这是文字
tex = TexMobject( # 这是公式
r'\mathrm{e}^{\mathrm{i}\pi} + 1 = 0',
color = GREEN
)
self.play(FadeIn(text))
self.wait(0.5)
self.play(ReplacementTransform(text,tex))
self.wait(1)
#%% Text
"""
TextMobject 的文字是经过 LaTeX 编译的,
若仅使用文字(不用 \LaTeX 之类的特殊符号),可以使用 Text
传入的第一个参数为文字字符串,可选参数包括
颜色 color
字体 font
颜色 t2c (字典)
"""
class try_Text(Scene):
def construct(self):
text = Text(
"Hello World!",
font = "Adobe Heiti Std",
t2c = {
"H": BLUE,
"W": RED
}
)
self.play(Write(text))
self.wait(1) | manim/tutorial01_Mobjects.py | 5,248 | 本文件用以练习 manim 的各种常用对象
SVGMobject
ImageMobject
TextMobject
TexMobeject
Text
参考资料: https://www.bilibili.com/video/BV1CC4y1H7kp
XiaoCY 2020-11-27
%% 初始化%% SVGMobject 使用 class 创建一个场景,名字可自定义 这里 class 和 def 暂且当成是固定的套路吧 构造 SVGMobject --- 添加 SVG 图片 manim 内置部分颜色,参见 https://manim.ml/constants.htmlid7 SVGMobject 可以使用 VMobject 的所有动画 动画暂时不在本练习中具体讲解,这里仅作示意%% ImageMobject 添加位图 由于 ImageMobject 不是 VMobject 的子类,很多动画无法使用 但是这里依然不对动画做深入讨论%% TextMobject 定义一个字符串(数组),并通过下标进行访问 注意这里用到 text[2] 和 text[3] 原本不存在,但 tex_to_color_map 为了上色将其自动拆分了 text[0] = r"早安, \TeX!" text[1] = "你好," text[2] = r"\LaTeX" text[3] = "!"%% TexMobject 这是文字 这是公式%% Text | 645 | zh | 0.896572 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.box_predictor."""
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from tf_api.builders import hyperparams_builder
from tf_api.core import box_predictor
from tf_api.protos import hyperparams_pb2
class MaskRCNNBoxPredictorTest(tf.test.TestCase):
def _build_arg_scope_with_hyperparams(self,
op_type=hyperparams_pb2.Hyperparams.FC):
hyperparams = hyperparams_pb2.Hyperparams()
hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(hyperparams_text_proto, hyperparams)
hyperparams.op = op_type
return hyperparams_builder.build(hyperparams, is_training=True)
def test_get_boxes_with_five_classes(self):
image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)
mask_box_predictor = box_predictor.MaskRCNNBoxPredictor(
is_training=False,
num_classes=5,
fc_hyperparams=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
)
box_predictions = mask_box_predictor.predict(
image_features, num_predictions_per_location=1, scope='BoxPredictor')
box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
class_predictions_with_background = box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape,
class_predictions_with_background_shape) = sess.run(
[tf.shape(box_encodings),
tf.shape(class_predictions_with_background)])
self.assertAllEqual(box_encodings_shape, [2, 1, 5, 4])
self.assertAllEqual(class_predictions_with_background_shape, [2, 1, 6])
def test_value_error_on_predict_instance_masks_with_no_conv_hyperparms(self):
with self.assertRaises(ValueError):
box_predictor.MaskRCNNBoxPredictor(
is_training=False,
num_classes=5,
fc_hyperparams=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
predict_instance_masks=True)
def test_get_instance_masks(self):
image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)
mask_box_predictor = box_predictor.MaskRCNNBoxPredictor(
is_training=False,
num_classes=5,
fc_hyperparams=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
conv_hyperparams=self._build_arg_scope_with_hyperparams(
op_type=hyperparams_pb2.Hyperparams.CONV),
predict_instance_masks=True)
box_predictions = mask_box_predictor.predict(
image_features, num_predictions_per_location=1, scope='BoxPredictor')
mask_predictions = box_predictions[box_predictor.MASK_PREDICTIONS]
self.assertListEqual([2, 1, 5, 14, 14],
mask_predictions.get_shape().as_list())
def test_do_not_return_instance_masks_and_keypoints_without_request(self):
image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)
mask_box_predictor = box_predictor.MaskRCNNBoxPredictor(
is_training=False,
num_classes=5,
fc_hyperparams=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4)
box_predictions = mask_box_predictor.predict(
image_features, num_predictions_per_location=1, scope='BoxPredictor')
self.assertEqual(len(box_predictions), 2)
self.assertTrue(box_predictor.BOX_ENCODINGS in box_predictions)
self.assertTrue(box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND
in box_predictions)
def test_value_error_on_predict_keypoints(self):
with self.assertRaises(ValueError):
box_predictor.MaskRCNNBoxPredictor(
is_training=False,
num_classes=5,
fc_hyperparams=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
predict_keypoints=True)
class RfcnBoxPredictorTest(tf.test.TestCase):
def _build_arg_scope_with_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.build(conv_hyperparams, is_training=True)
def test_get_correct_box_encoding_and_class_prediction_shapes(self):
image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
proposal_boxes = tf.random_normal([4, 2, 4], dtype=tf.float32)
rfcn_box_predictor = box_predictor.RfcnBoxPredictor(
is_training=False,
num_classes=2,
conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
num_spatial_bins=[3, 3],
depth=4,
crop_size=[12, 12],
box_code_size=4
)
box_predictions = rfcn_box_predictor.predict(
image_features, num_predictions_per_location=1, scope='BoxPredictor',
proposal_boxes=proposal_boxes)
box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
class_predictions_with_background = box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape,
class_predictions_shape) = sess.run(
[tf.shape(box_encodings),
tf.shape(class_predictions_with_background)])
self.assertAllEqual(box_encodings_shape, [8, 1, 2, 4])
self.assertAllEqual(class_predictions_shape, [8, 1, 3])
class ConvolutionalBoxPredictorTest(tf.test.TestCase):
def _build_arg_scope_with_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.build(conv_hyperparams, is_training=True)
def test_get_boxes_for_five_aspect_ratios_per_location(self):
image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
)
box_predictions = conv_box_predictor.predict(
image_features, num_predictions_per_location=5, scope='BoxPredictor')
box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
objectness_predictions = box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape,
objectness_predictions_shape) = sess.run(
[tf.shape(box_encodings), tf.shape(objectness_predictions)])
self.assertAllEqual(box_encodings_shape, [4, 320, 1, 4])
self.assertAllEqual(objectness_predictions_shape, [4, 320, 1])
def test_get_boxes_for_one_aspect_ratio_per_location(self):
image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
)
box_predictions = conv_box_predictor.predict(
image_features, num_predictions_per_location=1, scope='BoxPredictor')
box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
objectness_predictions = box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape,
objectness_predictions_shape) = sess.run(
[tf.shape(box_encodings), tf.shape(objectness_predictions)])
self.assertAllEqual(box_encodings_shape, [4, 64, 1, 4])
self.assertAllEqual(objectness_predictions_shape, [4, 64, 1])
def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(
self):
num_classes_without_background = 6
image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
)
box_predictions = conv_box_predictor.predict(
image_features,
num_predictions_per_location=5,
scope='BoxPredictor')
box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
class_predictions_with_background = box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape, class_predictions_with_background_shape
) = sess.run([
tf.shape(box_encodings), tf.shape(class_predictions_with_background)])
self.assertAllEqual(box_encodings_shape, [4, 320, 1, 4])
self.assertAllEqual(class_predictions_with_background_shape,
[4, 320, num_classes_without_background+1])
def test_get_boxes_for_five_aspect_ratios_per_location_fully_convolutional(
self):
image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64])
conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
)
box_predictions = conv_box_predictor.predict(
image_features, num_predictions_per_location=5, scope='BoxPredictor')
box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
objectness_predictions = box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
init_op = tf.global_variables_initializer()
resolution = 32
expected_num_anchors = resolution*resolution*5
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape,
objectness_predictions_shape) = sess.run(
[tf.shape(box_encodings), tf.shape(objectness_predictions)],
feed_dict={image_features:
np.random.rand(4, resolution, resolution, 64)})
self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4])
self.assertAllEqual(objectness_predictions_shape,
[4, expected_num_anchors, 1])
if __name__ == '__main__':
tf.test.main()
| src/animal_detection/tf_api/core/box_predictor_test.py | 12,636 | Tests for object_detection.core.box_predictor.
Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== | 709 | en | 0.823591 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import collections
import copy
import itertools
import os
import pprint
import sys
import types
import warnings
from six import string_types
import archspec.cpu
try:
import clingo
# There may be a better way to detect this
clingo_cffi = hasattr(clingo.Symbol, '_rep')
except ImportError:
clingo = None # type: ignore
clingo_cffi = False
import llnl.util.lang
import llnl.util.tty as tty
import spack
import spack.architecture
import spack.bootstrap
import spack.cmd
import spack.compilers
import spack.config
import spack.dependency
import spack.directives
import spack.environment as ev
import spack.error
import spack.package
import spack.package_prefs
import spack.repo
import spack.spec
import spack.util.timer
import spack.variant
import spack.version
if sys.version_info >= (3, 3):
from collections.abc import Sequence # novm
else:
from collections import Sequence
#: Enumeration like object to mark version provenance
version_provenance = collections.namedtuple( # type: ignore
'VersionProvenance', ['external', 'packages_yaml', 'package_py', 'spec']
)(spec=0, external=1, packages_yaml=2, package_py=3)
#: String representation of version origins, to emit legible
# facts for the ASP solver
version_origin_str = {
0: 'spec',
1: 'external',
2: 'packages_yaml',
3: 'package_py'
}
#: Named tuple to contain information on declared versions
DeclaredVersion = collections.namedtuple(
'DeclaredVersion', ['version', 'idx', 'origin']
)
def issequence(obj):
if isinstance(obj, string_types):
return False
return isinstance(obj, (Sequence, types.GeneratorType))
def listify(args):
if len(args) == 1 and issequence(args[0]):
return list(args[0])
return list(args)
def packagize(pkg):
if isinstance(pkg, string_types):
return spack.repo.path.get_pkg_class(pkg)
else:
return pkg
def specify(spec):
if isinstance(spec, spack.spec.Spec):
return spec
return spack.spec.Spec(spec)
class AspObject(object):
"""Object representing a piece of ASP code."""
def _id(thing):
"""Quote string if needed for it to be a valid identifier."""
if isinstance(thing, AspObject):
return thing
elif isinstance(thing, bool):
return '"%s"' % str(thing)
elif isinstance(thing, int):
return str(thing)
else:
return '"%s"' % str(thing)
@llnl.util.lang.key_ordering
class AspFunction(AspObject):
def __init__(self, name, args=None):
self.name = name
self.args = () if args is None else args
def _cmp_key(self):
return (self.name, self.args)
def __call__(self, *args):
return AspFunction(self.name, args)
def symbol(self, positive=True):
def argify(arg):
if isinstance(arg, bool):
return clingo.String(str(arg))
elif isinstance(arg, int):
return clingo.Number(arg)
else:
return clingo.String(str(arg))
return clingo.Function(
self.name, [argify(arg) for arg in self.args], positive=positive)
def __str__(self):
return "%s(%s)" % (
self.name, ', '.join(str(_id(arg)) for arg in self.args))
def __repr__(self):
return str(self)
class AspFunctionBuilder(object):
def __getattr__(self, name):
return AspFunction(name)
fn = AspFunctionBuilder()
def all_compilers_in_config():
return spack.compilers.all_compilers()
def extend_flag_list(flag_list, new_flags):
"""Extend a list of flags, preserving order and precedence.
Add new_flags at the end of flag_list. If any flags in new_flags are
already in flag_list, they are moved to the end so that they take
higher precedence on the compile line.
"""
for flag in new_flags:
if flag in flag_list:
flag_list.remove(flag)
flag_list.append(flag)
def check_same_flags(flag_dict_1, flag_dict_2):
"""Return True if flag dicts contain the same flags regardless of order."""
types = set(flag_dict_1.keys()).union(set(flag_dict_2.keys()))
for t in types:
values1 = set(flag_dict_1.get(t, []))
values2 = set(flag_dict_2.get(t, []))
assert values1 == values2
def check_packages_exist(specs):
"""Ensure all packages mentioned in specs exist."""
repo = spack.repo.path
for spec in specs:
for s in spec.traverse():
try:
check_passed = repo.exists(s.name) or repo.is_virtual(s.name)
except Exception as e:
msg = 'Cannot find package: {0}'.format(str(e))
check_passed = False
tty.debug(msg)
if not check_passed:
raise spack.repo.UnknownPackageError(str(s.fullname))
class Result(object):
"""Result of an ASP solve."""
def __init__(self, specs, asp=None):
self.asp = asp
self.satisfiable = None
self.optimal = None
self.warnings = None
self.nmodels = 0
# specs ordered by optimization level
self.answers = []
self.cores = []
# names of optimization criteria
self.criteria = []
# Abstract user requests
self.abstract_specs = specs
# Concrete specs
self._concrete_specs = None
def print_cores(self):
for core in self.cores:
tty.msg(
"The following constraints are unsatisfiable:",
*sorted(str(symbol) for symbol in core))
@property
def specs(self):
"""List of concretized specs satisfying the initial
abstract request.
"""
# The specs were already computed, return them
if self._concrete_specs:
return self._concrete_specs
# Assert prerequisite
msg = 'cannot compute specs ["satisfiable" is not True ]'
assert self.satisfiable, msg
self._concrete_specs = []
best = min(self.answers)
opt, _, answer = best
for input_spec in self.abstract_specs:
key = input_spec.name
if input_spec.virtual:
providers = [spec.name for spec in answer.values()
if spec.package.provides(key)]
key = providers[0]
self._concrete_specs.append(answer[key])
return self._concrete_specs
def _normalize_packages_yaml(packages_yaml):
normalized_yaml = copy.copy(packages_yaml)
for pkg_name in packages_yaml:
is_virtual = spack.repo.path.is_virtual(pkg_name)
if pkg_name == 'all' or not is_virtual:
continue
# Remove the virtual entry from the normalized configuration
data = normalized_yaml.pop(pkg_name)
is_buildable = data.get('buildable', True)
if not is_buildable:
for provider in spack.repo.path.providers_for(pkg_name):
entry = normalized_yaml.setdefault(provider.name, {})
entry['buildable'] = False
externals = data.get('externals', [])
keyfn = lambda x: spack.spec.Spec(x['spec']).name
for provider, specs in itertools.groupby(externals, key=keyfn):
entry = normalized_yaml.setdefault(provider, {})
entry.setdefault('externals', []).extend(specs)
return normalized_yaml
class PyclingoDriver(object):
def __init__(self, cores=True, asp=None):
"""Driver for the Python clingo interface.
Arguments:
cores (bool): whether to generate unsatisfiable cores for better
error reporting.
asp (file-like): optional stream to write a text-based ASP program
for debugging or verification.
"""
global clingo
if not clingo:
with spack.bootstrap.ensure_bootstrap_configuration():
spack.bootstrap.ensure_clingo_importable_or_raise()
import clingo
self.out = asp or llnl.util.lang.Devnull()
self.cores = cores
def title(self, name, char):
self.out.write('\n')
self.out.write("%" + (char * 76))
self.out.write('\n')
self.out.write("%% %s\n" % name)
self.out.write("%" + (char * 76))
self.out.write('\n')
def h1(self, name):
self.title(name, "=")
def h2(self, name):
self.title(name, "-")
def newline(self):
self.out.write('\n')
def fact(self, head):
"""ASP fact (a rule without a body)."""
symbol = head.symbol() if hasattr(head, 'symbol') else head
self.out.write("%s.\n" % str(symbol))
atom = self.backend.add_atom(symbol)
self.backend.add_rule([atom], [], choice=self.cores)
if self.cores:
self.assumptions.append(atom)
def solve(
self, solver_setup, specs, dump=None, nmodels=0,
timers=False, stats=False, tests=False
):
timer = spack.util.timer.Timer()
# Initialize the control object for the solver
self.control = clingo.Control()
self.control.configuration.solve.models = nmodels
self.control.configuration.asp.trans_ext = 'all'
self.control.configuration.asp.eq = '5'
self.control.configuration.configuration = 'tweety'
self.control.configuration.solve.parallel_mode = '2'
self.control.configuration.solver.opt_strategy = "usc,one"
# set up the problem -- this generates facts and rules
self.assumptions = []
with self.control.backend() as backend:
self.backend = backend
solver_setup.setup(self, specs, tests=tests)
timer.phase("setup")
# read in the main ASP program and display logic -- these are
# handwritten, not generated, so we load them as resources
parent_dir = os.path.dirname(__file__)
self.control.load(os.path.join(parent_dir, 'concretize.lp'))
self.control.load(os.path.join(parent_dir, "display.lp"))
timer.phase("load")
# Grounding is the first step in the solve -- it turns our facts
# and first-order logic rules into propositional logic.
self.control.ground([("base", [])])
timer.phase("ground")
# With a grounded program, we can run the solve.
result = Result(specs)
models = [] # stable models if things go well
cores = [] # unsatisfiable cores if they do not
def on_model(model):
models.append((model.cost, model.symbols(shown=True, terms=True)))
solve_kwargs = {"assumptions": self.assumptions,
"on_model": on_model,
"on_core": cores.append}
if clingo_cffi:
solve_kwargs["on_unsat"] = cores.append
solve_result = self.control.solve(**solve_kwargs)
timer.phase("solve")
# once done, construct the solve result
result.satisfiable = solve_result.satisfiable
def stringify(x):
if clingo_cffi:
# Clingo w/ CFFI will throw an exception on failure
try:
return x.string
except RuntimeError:
return str(x)
else:
return x.string or str(x)
if result.satisfiable:
# build spec from the best model
builder = SpecBuilder(specs)
min_cost, best_model = min(models)
tuples = [
(sym.name, [stringify(a) for a in sym.arguments])
for sym in best_model
]
answers = builder.build_specs(tuples)
# add best spec to the results
result.answers.append((list(min_cost), 0, answers))
# pull optimization criteria names out of the solution
criteria = [
(int(args[0]), args[1]) for name, args in tuples
if name == "opt_criterion"
]
result.criteria = [t[1] for t in sorted(criteria, reverse=True)]
# record the number of models the solver considered
result.nmodels = len(models)
elif cores:
symbols = dict(
(a.literal, a.symbol)
for a in self.control.symbolic_atoms
)
for core in cores:
core_symbols = []
for atom in core:
sym = symbols[atom]
if sym.name == "rule":
sym = sym.arguments[0].string
core_symbols.append(sym)
result.cores.append(core_symbols)
if timers:
timer.write_tty()
print()
if stats:
print("Statistics:")
pprint.pprint(self.control.statistics)
return result
class SpackSolverSetup(object):
"""Class to set up and run a Spack concretization solve."""
def __init__(self):
self.gen = None # set by setup()
self.declared_versions = {}
self.possible_versions = {}
self.deprecated_versions = {}
self.possible_virtuals = None
self.possible_compilers = []
self.variant_values_from_specs = set()
self.version_constraints = set()
self.target_constraints = set()
self.compiler_version_constraints = set()
self.post_facts = []
# id for dummy variables
self._condition_id_counter = itertools.count()
# Caches to optimize the setup phase of the solver
self.target_specs_cache = None
def pkg_version_rules(self, pkg):
"""Output declared versions of a package.
This uses self.possible_versions so that we include any versions
that arise from a spec.
"""
def key_fn(version):
# Origins are sorted by order of importance:
# 1. Spec from command line
# 2. Externals
# 3. Package preferences
# 4. Directives in package.py
return version.origin, version.idx
pkg = packagize(pkg)
declared_versions = self.declared_versions[pkg.name]
most_to_least_preferred = sorted(declared_versions, key=key_fn)
for weight, declared_version in enumerate(most_to_least_preferred):
self.gen.fact(fn.version_declared(
pkg.name, declared_version.version, weight,
version_origin_str[declared_version.origin]
))
# Declare deprecated versions for this package, if any
deprecated = self.deprecated_versions[pkg.name]
for v in sorted(deprecated):
self.gen.fact(fn.deprecated_version(pkg.name, v))
def spec_versions(self, spec):
"""Return list of clauses expressing spec's version constraints."""
spec = specify(spec)
assert spec.name
if spec.concrete:
return [fn.version(spec.name, spec.version)]
if spec.versions == spack.version.ver(":"):
return []
# record all version constraints for later
self.version_constraints.add((spec.name, spec.versions))
return [fn.version_satisfies(spec.name, spec.versions)]
def target_ranges(self, spec, single_target_fn):
target = spec.architecture.target
# Check if the target is a concrete target
if str(target) in archspec.cpu.TARGETS:
return [single_target_fn(spec.name, target)]
self.target_constraints.add((spec.name, target))
return [fn.node_target_satisfies(spec.name, target)]
def conflict_rules(self, pkg):
for trigger, constraints in pkg.conflicts.items():
trigger_id = self.condition(spack.spec.Spec(trigger), name=pkg.name)
self.gen.fact(fn.conflict_trigger(trigger_id))
for constraint, _ in constraints:
constraint_id = self.condition(constraint, name=pkg.name)
self.gen.fact(fn.conflict(pkg.name, trigger_id, constraint_id))
self.gen.newline()
def available_compilers(self):
"""Facts about available compilers."""
self.gen.h2("Available compilers")
compilers = self.possible_compilers
compiler_versions = collections.defaultdict(lambda: set())
for compiler in compilers:
compiler_versions[compiler.name].add(compiler.version)
for compiler in sorted(compiler_versions):
for v in sorted(compiler_versions[compiler]):
self.gen.fact(fn.compiler_version(compiler, v))
self.gen.newline()
def compiler_defaults(self):
"""Set compiler defaults, given a list of possible compilers."""
self.gen.h2("Default compiler preferences")
compiler_list = self.possible_compilers.copy()
compiler_list = sorted(
compiler_list, key=lambda x: (x.name, x.version), reverse=True)
ppk = spack.package_prefs.PackagePrefs("all", 'compiler', all=False)
matches = sorted(compiler_list, key=ppk)
for i, cspec in enumerate(matches):
f = fn.default_compiler_preference(cspec.name, cspec.version, i)
self.gen.fact(f)
# Enumerate target families. This may be redundant, but compilers with
# custom versions will be able to concretize properly.
for entry in spack.compilers.all_compilers_config():
compiler_entry = entry['compiler']
cspec = spack.spec.CompilerSpec(compiler_entry['spec'])
if not compiler_entry.get('target', None):
continue
self.gen.fact(fn.compiler_supports_target(
cspec.name, cspec.version, compiler_entry['target']
))
def compiler_supports_os(self):
compilers_yaml = spack.compilers.all_compilers_config()
for entry in compilers_yaml:
c = spack.spec.CompilerSpec(entry['compiler']['spec'])
operating_system = entry['compiler']['operating_system']
self.gen.fact(fn.compiler_supports_os(
c.name, c.version, operating_system
))
def package_compiler_defaults(self, pkg):
"""Facts about packages' compiler prefs."""
packages = spack.config.get("packages")
pkg_prefs = packages.get(pkg.name)
if not pkg_prefs or "compiler" not in pkg_prefs:
return
compiler_list = self.possible_compilers.copy()
compiler_list = sorted(
compiler_list, key=lambda x: (x.name, x.version), reverse=True)
ppk = spack.package_prefs.PackagePrefs(pkg.name, 'compiler', all=False)
matches = sorted(compiler_list, key=ppk)
for i, cspec in enumerate(reversed(matches)):
self.gen.fact(fn.node_compiler_preference(
pkg.name, cspec.name, cspec.version, -i * 100
))
def pkg_rules(self, pkg, tests):
pkg = packagize(pkg)
# versions
self.pkg_version_rules(pkg)
self.gen.newline()
# variants
for name, variant in sorted(pkg.variants.items()):
self.gen.fact(fn.variant(pkg.name, name))
single_value = not variant.multi
if single_value:
self.gen.fact(fn.variant_single_value(pkg.name, name))
self.gen.fact(
fn.variant_default_value_from_package_py(
pkg.name, name, variant.default)
)
else:
spec_variant = variant.make_default()
defaults = spec_variant.value
for val in sorted(defaults):
self.gen.fact(
fn.variant_default_value_from_package_py(
pkg.name, name, val)
)
values = variant.values
if values is None:
values = []
elif isinstance(values, spack.variant.DisjointSetsOfValues):
union = set()
# Encode the disjoint sets in the logic program
for sid, s in enumerate(values.sets):
for value in s:
self.gen.fact(fn.variant_value_from_disjoint_sets(
pkg.name, name, value, sid
))
union.update(s)
values = union
# make sure that every variant has at least one possible value
if not values:
values = [variant.default]
for value in sorted(values):
self.gen.fact(fn.variant_possible_value(pkg.name, name, value))
self.gen.newline()
# conflicts
self.conflict_rules(pkg)
# default compilers for this package
self.package_compiler_defaults(pkg)
# virtuals
self.package_provider_rules(pkg)
# dependencies
self.package_dependencies_rules(pkg, tests)
# virtual preferences
self.virtual_preferences(
pkg.name,
lambda v, p, i: self.gen.fact(
fn.pkg_provider_preference(pkg.name, v, p, i)
)
)
def condition(self, required_spec, imposed_spec=None, name=None):
"""Generate facts for a dependency or virtual provider condition.
Arguments:
required_spec (spack.spec.Spec): the spec that triggers this condition
imposed_spec (spack.spec.Spec or None): the sepc with constraints that
are imposed when this condition is triggered
name (str or None): name for `required_spec` (required if
required_spec is anonymous, ignored if not)
Returns:
int: id of the condition created by this function
"""
named_cond = required_spec.copy()
named_cond.name = named_cond.name or name
assert named_cond.name, "must provide name for anonymous condtions!"
condition_id = next(self._condition_id_counter)
self.gen.fact(fn.condition(condition_id))
# requirements trigger the condition
requirements = self.checked_spec_clauses(
named_cond, body=True, required_from=name)
for pred in requirements:
self.gen.fact(
fn.condition_requirement(condition_id, pred.name, *pred.args)
)
if imposed_spec:
imposed_constraints = self.checked_spec_clauses(
imposed_spec, body=False, required_from=name)
for pred in imposed_constraints:
# imposed "node"-like conditions are no-ops
if pred.name in ("node", "virtual_node"):
continue
self.gen.fact(
fn.imposed_constraint(condition_id, pred.name, *pred.args)
)
return condition_id
def package_provider_rules(self, pkg):
for provider_name in sorted(set(s.name for s in pkg.provided.keys())):
self.gen.fact(fn.possible_provider(pkg.name, provider_name))
for provided, whens in pkg.provided.items():
for when in whens:
condition_id = self.condition(when, provided, pkg.name)
self.gen.fact(fn.provider_condition(
condition_id, when.name, provided.name
))
self.gen.newline()
def package_dependencies_rules(self, pkg, tests):
"""Translate 'depends_on' directives into ASP logic."""
for _, conditions in sorted(pkg.dependencies.items()):
for cond, dep in sorted(conditions.items()):
deptypes = dep.type.copy()
# Skip test dependencies if they're not requested
if not tests:
deptypes.discard("test")
# ... or if they are requested only for certain packages
if not isinstance(tests, bool) and pkg.name not in tests:
deptypes.discard("test")
# if there are no dependency types to be considered
# anymore, don't generate the dependency
if not deptypes:
continue
condition_id = self.condition(cond, dep.spec, pkg.name)
self.gen.fact(fn.dependency_condition(
condition_id, pkg.name, dep.spec.name
))
for t in sorted(deptypes):
# there is a declared dependency of type t
self.gen.fact(fn.dependency_type(condition_id, t))
self.gen.newline()
def virtual_preferences(self, pkg_name, func):
"""Call func(vspec, provider, i) for each of pkg's provider prefs."""
config = spack.config.get("packages")
pkg_prefs = config.get(pkg_name, {}).get("providers", {})
for vspec, providers in pkg_prefs.items():
if vspec not in self.possible_virtuals:
continue
for i, provider in enumerate(providers):
provider_name = spack.spec.Spec(provider).name
func(vspec, provider_name, i)
def provider_defaults(self):
self.gen.h2("Default virtual providers")
assert self.possible_virtuals is not None
self.virtual_preferences(
"all",
lambda v, p, i: self.gen.fact(
fn.default_provider_preference(v, p, i))
)
def external_packages(self):
"""Facts on external packages, as read from packages.yaml"""
# Read packages.yaml and normalize it, so that it
# will not contain entries referring to virtual
# packages.
packages_yaml = spack.config.get("packages")
packages_yaml = _normalize_packages_yaml(packages_yaml)
self.gen.h1('External packages')
for pkg_name, data in packages_yaml.items():
if pkg_name == 'all':
continue
# This package does not appear in any repository
if pkg_name not in spack.repo.path:
continue
self.gen.h2('External package: {0}'.format(pkg_name))
# Check if the external package is buildable. If it is
# not then "external(<pkg>)" is a fact.
external_buildable = data.get('buildable', True)
if not external_buildable:
self.gen.fact(fn.external_only(pkg_name))
# Read a list of all the specs for this package
externals = data.get('externals', [])
external_specs = [spack.spec.Spec(x['spec']) for x in externals]
# Order the external versions to prefer more recent versions
# even if specs in packages.yaml are not ordered that way
external_versions = [
(x.version, external_id)
for external_id, x in enumerate(external_specs)
]
external_versions = [
(v, idx, external_id)
for idx, (v, external_id) in
enumerate(sorted(external_versions, reverse=True))
]
for version, idx, external_id in external_versions:
self.declared_versions[pkg_name].append(DeclaredVersion(
version=version,
idx=idx,
origin=version_provenance.external
))
# Declare external conditions with a local index into packages.yaml
for local_idx, spec in enumerate(external_specs):
condition_id = self.condition(spec)
self.gen.fact(
fn.possible_external(condition_id, pkg_name, local_idx)
)
self.possible_versions[spec.name].add(spec.version)
self.gen.newline()
def preferred_variants(self, pkg_name):
"""Facts on concretization preferences, as read from packages.yaml"""
preferences = spack.package_prefs.PackagePrefs
preferred_variants = preferences.preferred_variants(pkg_name)
if not preferred_variants:
return
for variant_name in sorted(preferred_variants):
variant = preferred_variants[variant_name]
values = variant.value
if not isinstance(values, tuple):
values = (values,)
# perform validation of the variant and values
spec = spack.spec.Spec(pkg_name)
spec.update_variant_validate(variant_name, values)
for value in values:
self.variant_values_from_specs.add(
(pkg_name, variant.name, value)
)
self.gen.fact(fn.variant_default_value_from_packages_yaml(
pkg_name, variant.name, value
))
def preferred_targets(self, pkg_name):
key_fn = spack.package_prefs.PackagePrefs(pkg_name, 'target')
if not self.target_specs_cache:
self.target_specs_cache = [
spack.spec.Spec('target={0}'.format(target_name))
for target_name in archspec.cpu.TARGETS
]
target_specs = self.target_specs_cache
preferred_targets = [x for x in target_specs if key_fn(x) < 0]
if not preferred_targets:
return
preferred = preferred_targets[0]
self.gen.fact(fn.package_target_weight(
str(preferred.architecture.target), pkg_name, -30
))
def flag_defaults(self):
self.gen.h2("Compiler flag defaults")
# types of flags that can be on specs
for flag in spack.spec.FlagMap.valid_compiler_flags():
self.gen.fact(fn.flag_type(flag))
self.gen.newline()
# flags from compilers.yaml
compilers = all_compilers_in_config()
for compiler in compilers:
for name, flags in compiler.flags.items():
for flag in flags:
self.gen.fact(fn.compiler_version_flag(
compiler.name, compiler.version, name, flag))
def checked_spec_clauses(self, *args, **kwargs):
"""Wrap a call to spec clauses into a try/except block that raise
a comprehensible error message in case of failure.
"""
requestor = kwargs.pop('required_from', None)
try:
clauses = self.spec_clauses(*args, **kwargs)
except RuntimeError as exc:
msg = str(exc)
if requestor:
msg += ' [required from package "{0}"]'.format(requestor)
raise RuntimeError(msg)
return clauses
def spec_clauses(self, spec, body=False, transitive=True):
"""Return a list of clauses for a spec mandates are true.
Arguments:
spec (spack.spec.Spec): the spec to analyze
body (bool): if True, generate clauses to be used in rule bodies
(final values) instead of rule heads (setters).
transitive (bool): if False, don't generate clauses from
dependencies (default True)
"""
clauses = []
# TODO: do this with consistent suffixes.
class Head(object):
node = fn.node
virtual_node = fn.virtual_node
node_platform = fn.node_platform_set
node_os = fn.node_os_set
node_target = fn.node_target_set
variant_value = fn.variant_set
node_compiler = fn.node_compiler_set
node_compiler_version = fn.node_compiler_version_set
node_flag = fn.node_flag_set
class Body(object):
node = fn.node
virtual_node = fn.virtual_node
node_platform = fn.node_platform
node_os = fn.node_os
node_target = fn.node_target
variant_value = fn.variant_value
node_compiler = fn.node_compiler
node_compiler_version = fn.node_compiler_version
node_flag = fn.node_flag
f = Body if body else Head
if spec.name:
clauses.append(
f.node(spec.name) if not spec.virtual
else f.virtual_node(spec.name))
clauses.extend(self.spec_versions(spec))
# seed architecture at the root (we'll propagate later)
# TODO: use better semantics.
arch = spec.architecture
if arch:
if arch.platform:
clauses.append(f.node_platform(spec.name, arch.platform))
if arch.os:
clauses.append(f.node_os(spec.name, arch.os))
if arch.target:
clauses.extend(self.target_ranges(spec, f.node_target))
# variants
for vname, variant in sorted(spec.variants.items()):
values = variant.value
if not isinstance(values, (list, tuple)):
values = [values]
for value in values:
# * is meaningless for concretization -- just for matching
if value == '*':
continue
# validate variant value only if spec not concrete
if not spec.concrete:
reserved_names = spack.directives.reserved_names
if not spec.virtual and vname not in reserved_names:
try:
variant_def = spec.package.variants[vname]
except KeyError:
msg = 'variant "{0}" not found in package "{1}"'
raise RuntimeError(msg.format(vname, spec.name))
else:
variant_def.validate_or_raise(variant, spec.package)
clauses.append(f.variant_value(spec.name, vname, value))
# Tell the concretizer that this is a possible value for the
# variant, to account for things like int/str values where we
# can't enumerate the valid values
self.variant_values_from_specs.add((spec.name, vname, value))
# compiler and compiler version
if spec.compiler:
clauses.append(f.node_compiler(spec.name, spec.compiler.name))
if spec.compiler.concrete:
clauses.append(f.node_compiler_version(
spec.name, spec.compiler.name, spec.compiler.version))
elif spec.compiler.versions:
clauses.append(
fn.node_compiler_version_satisfies(
spec.name, spec.compiler.name, spec.compiler.versions))
self.compiler_version_constraints.add(
(spec.name, spec.compiler))
# compiler flags
for flag_type, flags in spec.compiler_flags.items():
for flag in flags:
clauses.append(f.node_flag(spec.name, flag_type, flag))
# dependencies
if spec.concrete:
clauses.append(fn.concrete(spec.name))
# TODO: add concrete depends_on() facts for concrete dependencies
# add all clauses from dependencies
if transitive:
for dep in spec.traverse(root=False):
clauses.extend(self.spec_clauses(dep, body, transitive=False))
return clauses
def build_version_dict(self, possible_pkgs, specs):
"""Declare any versions in specs not declared in packages."""
self.declared_versions = collections.defaultdict(list)
self.possible_versions = collections.defaultdict(set)
self.deprecated_versions = collections.defaultdict(set)
packages_yaml = spack.config.get("packages")
packages_yaml = _normalize_packages_yaml(packages_yaml)
for pkg_name in possible_pkgs:
pkg = spack.repo.get(pkg_name)
# All the versions from the corresponding package.py file. Since concepts
# like being a "develop" version or being preferred exist only at a
# package.py level, sort them in this partial list here
def key_fn(item):
version, info = item
# When COMPARING VERSIONS, the '@develop' version is always
# larger than other versions. BUT when CONCRETIZING, the largest
# NON-develop version is selected by default.
return info.get('preferred', False), not version.isdevelop(), version
for idx, item in enumerate(sorted(
pkg.versions.items(), key=key_fn, reverse=True
)):
v, version_info = item
self.possible_versions[pkg_name].add(v)
self.declared_versions[pkg_name].append(DeclaredVersion(
version=v, idx=idx, origin=version_provenance.package_py
))
deprecated = version_info.get('deprecated', False)
if deprecated:
self.deprecated_versions[pkg_name].add(v)
# All the preferred version from packages.yaml, versions in external
# specs will be computed later
version_preferences = packages_yaml.get(pkg_name, {}).get("version", [])
for idx, v in enumerate(version_preferences):
self.declared_versions[pkg_name].append(DeclaredVersion(
version=v, idx=idx, origin=version_provenance.packages_yaml
))
for spec in specs:
for dep in spec.traverse():
if dep.versions.concrete:
# Concrete versions used in abstract specs from cli. They
# all have idx equal to 0, which is the best possible. In
# any case they will be used due to being set from the cli.
self.declared_versions[dep.name].append(DeclaredVersion(
version=dep.version,
idx=0,
origin=version_provenance.spec
))
self.possible_versions[dep.name].add(dep.version)
def _supported_targets(self, compiler_name, compiler_version, targets):
"""Get a list of which targets are supported by the compiler.
Results are ordered most to least recent.
"""
supported = []
for target in targets:
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
target.optimization_flags(compiler_name, compiler_version)
supported.append(target)
except archspec.cpu.UnsupportedMicroarchitecture:
continue
except ValueError:
continue
return sorted(supported, reverse=True)
def platform_defaults(self):
self.gen.h2('Default platform')
platform = spack.architecture.platform()
self.gen.fact(fn.node_platform_default(platform))
def os_defaults(self, specs):
self.gen.h2('Possible operating systems')
platform = spack.architecture.platform()
# create set of OS's to consider
possible = set([
platform.front_os, platform.back_os, platform.default_os])
for spec in specs:
if spec.architecture and spec.architecture.os:
possible.add(spec.architecture.os)
# make directives for possible OS's
for possible_os in sorted(possible):
self.gen.fact(fn.os(possible_os))
# mark this one as default
self.gen.fact(fn.node_os_default(platform.default_os))
def target_defaults(self, specs):
"""Add facts about targets and target compatibility."""
self.gen.h2('Default target')
platform = spack.architecture.platform()
uarch = archspec.cpu.TARGETS.get(platform.default)
self.gen.h2('Target compatibility')
compatible_targets = [uarch] + uarch.ancestors
additional_targets_in_family = sorted([
t for t in archspec.cpu.TARGETS.values()
if (t.family.name == uarch.family.name and
t not in compatible_targets)
], key=lambda x: len(x.ancestors), reverse=True)
compatible_targets += additional_targets_in_family
compilers = self.possible_compilers
# this loop can be used to limit the number of targets
# considered. Right now we consider them all, but it seems that
# many targets can make things slow.
# TODO: investigate this.
best_targets = set([uarch.family.name])
for compiler in sorted(compilers):
supported = self._supported_targets(
compiler.name, compiler.version, compatible_targets
)
# If we can't find supported targets it may be due to custom
# versions in the spec, e.g. gcc@foo. Try to match the
# real_version from the compiler object to get more accurate
# results.
if not supported:
compiler_obj = spack.compilers.compilers_for_spec(compiler)
compiler_obj = compiler_obj[0]
supported = self._supported_targets(
compiler.name,
compiler_obj.real_version,
compatible_targets
)
if not supported:
continue
for target in supported:
best_targets.add(target.name)
self.gen.fact(fn.compiler_supports_target(
compiler.name, compiler.version, target.name))
self.gen.fact(fn.compiler_supports_target(
compiler.name, compiler.version, uarch.family.name))
# add any targets explicitly mentioned in specs
for spec in specs:
if not spec.architecture or not spec.architecture.target:
continue
target = archspec.cpu.TARGETS.get(spec.target.name)
if not target:
self.target_ranges(spec, None)
continue
if target not in compatible_targets:
compatible_targets.append(target)
i = 0
for target in compatible_targets:
self.gen.fact(fn.target(target.name))
self.gen.fact(fn.target_family(target.name, target.family.name))
for parent in sorted(target.parents):
self.gen.fact(fn.target_parent(target.name, parent.name))
# prefer best possible targets; weight others poorly so
# they're not used unless set explicitly
if target.name in best_targets:
self.gen.fact(fn.default_target_weight(target.name, i))
i += 1
else:
self.gen.fact(fn.default_target_weight(target.name, 100))
self.gen.newline()
def virtual_providers(self):
self.gen.h2("Virtual providers")
assert self.possible_virtuals is not None
# what provides what
for vspec in sorted(self.possible_virtuals):
self.gen.fact(fn.virtual(vspec))
self.gen.newline()
def generate_possible_compilers(self, specs):
compilers = all_compilers_in_config()
cspecs = set([c.spec for c in compilers])
# add compiler specs from the input line to possibilities if we
# don't require compilers to exist.
strict = spack.concretize.Concretizer().check_for_compiler_existence
for spec in specs:
for s in spec.traverse():
if not s.compiler or not s.compiler.concrete:
continue
if strict and s.compiler not in cspecs:
raise spack.concretize.UnavailableCompilerVersionError(
s.compiler
)
else:
cspecs.add(s.compiler)
self.gen.fact(fn.allow_compiler(
s.compiler.name, s.compiler.version
))
return cspecs
def define_version_constraints(self):
"""Define what version_satisfies(...) means in ASP logic."""
for pkg_name, versions in sorted(self.version_constraints):
# version must be *one* of the ones the spec allows.
allowed_versions = [
v for v in sorted(self.possible_versions[pkg_name])
if v.satisfies(versions)
]
# This is needed to account for a variable number of
# numbers e.g. if both 1.0 and 1.0.2 are possible versions
exact_match = [v for v in allowed_versions if v == versions]
if exact_match:
allowed_versions = exact_match
# generate facts for each package constraint and the version
# that satisfies it
for v in allowed_versions:
self.gen.fact(fn.version_satisfies(pkg_name, versions, v))
self.gen.newline()
def define_virtual_constraints(self):
"""Define versions for constraints on virtuals.
Must be called before define_version_constraints().
"""
# aggregate constraints into per-virtual sets
constraint_map = collections.defaultdict(lambda: set())
for pkg_name, versions in self.version_constraints:
if not spack.repo.path.is_virtual(pkg_name):
continue
constraint_map[pkg_name].add(versions)
# extract all the real versions mentioned in version ranges
def versions_for(v):
if isinstance(v, spack.version.Version):
return [v]
elif isinstance(v, spack.version.VersionRange):
result = [v.start] if v.start else []
result += [v.end] if v.end else []
return result
elif isinstance(v, spack.version.VersionList):
return sum((versions_for(e) for e in v), [])
else:
raise TypeError("expected version type, found: %s" % type(v))
# define a set of synthetic possible versions for virtuals, so
# that `version_satisfies(Package, Constraint, Version)` has the
# same semantics for virtuals as for regular packages.
for pkg_name, versions in sorted(constraint_map.items()):
possible_versions = set(
sum([versions_for(v) for v in versions], [])
)
for version in sorted(possible_versions):
self.possible_versions[pkg_name].add(version)
def define_compiler_version_constraints(self):
compiler_list = spack.compilers.all_compiler_specs()
compiler_list = list(sorted(set(compiler_list)))
for pkg_name, cspec in self.compiler_version_constraints:
for compiler in compiler_list:
if compiler.satisfies(cspec):
self.gen.fact(
fn.node_compiler_version_satisfies(
pkg_name,
cspec.name,
cspec.versions,
compiler.version
)
)
self.gen.newline()
def define_target_constraints(self):
def _all_targets_satisfiying(single_constraint):
allowed_targets = []
if ':' not in single_constraint:
return [single_constraint]
t_min, _, t_max = single_constraint.partition(':')
for test_target in archspec.cpu.TARGETS.values():
# Check lower bound
if t_min and not t_min <= test_target:
continue
# Check upper bound
if t_max and not t_max >= test_target:
continue
allowed_targets.append(test_target)
return allowed_targets
cache = {}
for spec_name, target_constraint in sorted(self.target_constraints):
# Construct the list of allowed targets for this constraint
allowed_targets = []
for single_constraint in str(target_constraint).split(','):
if single_constraint not in cache:
cache[single_constraint] = _all_targets_satisfiying(
single_constraint
)
allowed_targets.extend(cache[single_constraint])
for target in allowed_targets:
self.gen.fact(
fn.node_target_satisfies(
spec_name, target_constraint, target
)
)
self.gen.newline()
def define_variant_values(self):
"""Validate variant values from the command line.
Also add valid variant values from the command line to the
possible values for a variant.
"""
# Tell the concretizer about possible values from specs we saw in
# spec_clauses()
for pkg, variant, value in sorted(self.variant_values_from_specs):
self.gen.fact(fn.variant_possible_value(pkg, variant, value))
def setup(self, driver, specs, tests=False):
"""Generate an ASP program with relevant constraints for specs.
This calls methods on the solve driver to set up the problem with
facts and rules from all possible dependencies of the input
specs, as well as constraints from the specs themselves.
Arguments:
specs (list): list of Specs to solve
"""
self._condition_id_counter = itertools.count()
# preliminary checks
check_packages_exist(specs)
# get list of all possible dependencies
self.possible_virtuals = set(
x.name for x in specs if x.virtual
)
possible = spack.package.possible_dependencies(
*specs,
virtuals=self.possible_virtuals,
deptype=spack.dependency.all_deptypes
)
pkgs = set(possible)
# driver is used by all the functions below to add facts and
# rules to generate an ASP program.
self.gen = driver
# get possible compilers
self.possible_compilers = self.generate_possible_compilers(specs)
# traverse all specs and packages to build dict of possible versions
self.build_version_dict(possible, specs)
self.gen.h1('General Constraints')
self.available_compilers()
self.compiler_defaults()
self.compiler_supports_os()
# architecture defaults
self.platform_defaults()
self.os_defaults(specs)
self.target_defaults(specs)
self.virtual_providers()
self.provider_defaults()
self.external_packages()
self.flag_defaults()
self.gen.h1('Package Constraints')
for pkg in sorted(pkgs):
self.gen.h2('Package rules: %s' % pkg)
self.pkg_rules(pkg, tests=tests)
self.gen.h2('Package preferences: %s' % pkg)
self.preferred_variants(pkg)
self.preferred_targets(pkg)
# Inject dev_path from environment
env = ev.active_environment()
if env:
for spec in sorted(specs):
for dep in spec.traverse():
_develop_specs_from_env(dep, env)
self.gen.h1('Spec Constraints')
for spec in sorted(specs):
self.gen.h2('Spec: %s' % str(spec))
self.gen.fact(
fn.virtual_root(spec.name) if spec.virtual
else fn.root(spec.name)
)
for clause in self.spec_clauses(spec):
self.gen.fact(clause)
if clause.name == 'variant_set':
self.gen.fact(fn.variant_default_value_from_cli(
*clause.args
))
self.gen.h1("Variant Values defined in specs")
self.define_variant_values()
self.gen.h1("Virtual Constraints")
self.define_virtual_constraints()
self.gen.h1("Version Constraints")
self.define_version_constraints()
self.gen.h1("Compiler Version Constraints")
self.define_compiler_version_constraints()
self.gen.h1("Target Constraints")
self.define_target_constraints()
class SpecBuilder(object):
"""Class with actions to rebuild a spec from ASP results."""
def __init__(self, specs):
self._result = None
self._command_line_specs = specs
self._flag_sources = collections.defaultdict(lambda: set())
self._flag_compiler_defaults = set()
def node(self, pkg):
if pkg not in self._specs:
self._specs[pkg] = spack.spec.Spec(pkg)
def _arch(self, pkg):
arch = self._specs[pkg].architecture
if not arch:
arch = spack.spec.ArchSpec()
self._specs[pkg].architecture = arch
return arch
def node_platform(self, pkg, platform):
self._arch(pkg).platform = platform
def node_os(self, pkg, os):
self._arch(pkg).os = os
def node_target(self, pkg, target):
self._arch(pkg).target = target
def variant_value(self, pkg, name, value):
# FIXME: is there a way not to special case 'dev_path' everywhere?
if name == 'dev_path':
self._specs[pkg].variants.setdefault(
name,
spack.variant.SingleValuedVariant(name, value)
)
return
if name == 'patches':
self._specs[pkg].variants.setdefault(
name,
spack.variant.MultiValuedVariant(name, value)
)
return
self._specs[pkg].update_variant_validate(name, value)
def version(self, pkg, version):
self._specs[pkg].versions = spack.version.ver([version])
def node_compiler(self, pkg, compiler):
self._specs[pkg].compiler = spack.spec.CompilerSpec(compiler)
def node_compiler_version(self, pkg, compiler, version):
self._specs[pkg].compiler.versions = spack.version.VersionList(
[version])
def node_flag_compiler_default(self, pkg):
self._flag_compiler_defaults.add(pkg)
def node_flag(self, pkg, flag_type, flag):
self._specs[pkg].compiler_flags.setdefault(flag_type, []).append(flag)
def node_flag_source(self, pkg, source):
self._flag_sources[pkg].add(source)
def no_flags(self, pkg, flag_type):
self._specs[pkg].compiler_flags[flag_type] = []
def external_spec_selected(self, pkg, idx):
"""This means that the external spec and index idx
has been selected for this package.
"""
packages_yaml = spack.config.get('packages')
packages_yaml = _normalize_packages_yaml(packages_yaml)
spec_info = packages_yaml[pkg]['externals'][int(idx)]
self._specs[pkg].external_path = spec_info.get('prefix', None)
self._specs[pkg].external_modules = (
spack.spec.Spec._format_module_list(spec_info.get('modules', None))
)
self._specs[pkg].extra_attributes = spec_info.get(
'extra_attributes', {}
)
def depends_on(self, pkg, dep, type):
dependency = self._specs[pkg]._dependencies.get(dep)
if not dependency:
self._specs[pkg]._add_dependency(
self._specs[dep], (type,))
else:
dependency.add_type(type)
def reorder_flags(self):
"""Order compiler flags on specs in predefined order.
We order flags so that any node's flags will take priority over
those of its dependents. That is, the deepest node in the DAG's
flags will appear last on the compile line, in the order they
were specified.
The solver determines wihch flags are on nodes; this routine
imposes order afterwards.
"""
# nodes with no flags get flag order from compiler
compilers = dict((c.spec, c) for c in all_compilers_in_config())
for pkg in self._flag_compiler_defaults:
spec = self._specs[pkg]
compiler_flags = compilers[spec.compiler].flags
check_same_flags(spec.compiler_flags, compiler_flags)
spec.compiler_flags.update(compiler_flags)
# index of all specs (and deps) from the command line by name
cmd_specs = dict(
(s.name, s)
for spec in self._command_line_specs
for s in spec.traverse())
# iterate through specs with specified flags
for pkg, sources in self._flag_sources.items():
spec = self._specs[pkg]
# order is determined by the DAG. A spec's flags come after
# any from its ancestors on the compile line.
order = [
s.name
for s in spec.traverse(order='post', direction='parents')]
# sort the sources in our DAG order
sorted_sources = sorted(
sources, key=lambda s: order.index(s))
# add flags from each source, lowest to highest precedence
flags = collections.defaultdict(lambda: [])
for source_name in sorted_sources:
source = cmd_specs[source_name]
for name, flag_list in source.compiler_flags.items():
extend_flag_list(flags[name], flag_list)
check_same_flags(spec.compiler_flags, flags)
spec.compiler_flags.update(flags)
def deprecated(self, pkg, version):
msg = 'using "{0}@{1}" which is a deprecated version'
tty.warn(msg.format(pkg, version))
def build_specs(self, function_tuples):
# Functions don't seem to be in particular order in output. Sort
# them here so that directives that build objects (like node and
# node_compiler) are called in the right order.
function_tuples.sort(key=lambda f: {
"node": -2,
"node_compiler": -1,
}.get(f[0], 0))
self._specs = {}
for name, args in function_tuples:
action = getattr(self, name, None)
# print out unknown actions so we can display them for debugging
if not action:
msg = "%s(%s)" % (name, ", ".join(str(a) for a in args))
tty.debug(msg)
continue
assert action and callable(action)
# ignore predicates on virtual packages, as they're used for
# solving but don't construct anything
pkg = args[0]
if spack.repo.path.is_virtual(pkg):
continue
action(*args)
# namespace assignment is done after the fact, as it is not
# currently part of the solve
for spec in self._specs.values():
repo = spack.repo.path.repo_for_pkg(spec)
spec.namespace = repo.namespace
# fix flags after all specs are constructed
self.reorder_flags()
# inject patches -- note that we' can't use set() to unique the
# roots here, because the specs aren't complete, and the hash
# function will loop forever.
roots = [spec.root for spec in self._specs.values()]
roots = dict((id(r), r) for r in roots)
for root in roots.values():
spack.spec.Spec.inject_patches_variant(root)
# Add external paths to specs with just external modules
for s in self._specs.values():
spack.spec.Spec.ensure_external_path_if_external(s)
for s in self._specs.values():
_develop_specs_from_env(s, ev.active_environment())
for s in self._specs.values():
s._mark_concrete()
for s in self._specs.values():
spack.spec.Spec.ensure_no_deprecated(s)
return self._specs
def _develop_specs_from_env(spec, env):
dev_info = env.dev_specs.get(spec.name, {}) if env else {}
if not dev_info:
return
path = os.path.normpath(os.path.join(env.path, dev_info['path']))
if 'dev_path' in spec.variants:
assert spec.variants['dev_path'].value == path
else:
spec.variants.setdefault(
'dev_path', spack.variant.SingleValuedVariant('dev_path', path)
)
spec.constrain(dev_info['spec'])
#
# These are handwritten parts for the Spack ASP model.
#
def solve(specs, dump=(), models=0, timers=False, stats=False, tests=False):
"""Solve for a stable model of specs.
Arguments:
specs (list): list of Specs to solve.
dump (tuple): what to dump
models (int): number of models to search (default: 0)
"""
driver = PyclingoDriver()
if "asp" in dump:
driver.out = sys.stdout
# Check upfront that the variants are admissible
for root in specs:
for s in root.traverse():
if s.virtual:
continue
spack.spec.Spec.ensure_valid_variants(s)
setup = SpackSolverSetup()
return driver.solve(setup, specs, dump, models, timers, stats, tests)
| lib/spack/spack/solver/asp.py | 61,361 | Object representing a piece of ASP code.
Result of an ASP solve.
Class to set up and run a Spack concretization solve.
Class with actions to rebuild a spec from ASP results.
Driver for the Python clingo interface.
Arguments:
cores (bool): whether to generate unsatisfiable cores for better
error reporting.
asp (file-like): optional stream to write a text-based ASP program
for debugging or verification.
Quote string if needed for it to be a valid identifier.
Get a list of which targets are supported by the compiler.
Results are ordered most to least recent.
Facts about available compilers.
Declare any versions in specs not declared in packages.
Ensure all packages mentioned in specs exist.
Return True if flag dicts contain the same flags regardless of order.
Wrap a call to spec clauses into a try/except block that raise
a comprehensible error message in case of failure.
Set compiler defaults, given a list of possible compilers.
Generate facts for a dependency or virtual provider condition.
Arguments:
required_spec (spack.spec.Spec): the spec that triggers this condition
imposed_spec (spack.spec.Spec or None): the sepc with constraints that
are imposed when this condition is triggered
name (str or None): name for `required_spec` (required if
required_spec is anonymous, ignored if not)
Returns:
int: id of the condition created by this function
Validate variant values from the command line.
Also add valid variant values from the command line to the
possible values for a variant.
Define what version_satisfies(...) means in ASP logic.
Define versions for constraints on virtuals.
Must be called before define_version_constraints().
Extend a list of flags, preserving order and precedence.
Add new_flags at the end of flag_list. If any flags in new_flags are
already in flag_list, they are moved to the end so that they take
higher precedence on the compile line.
Facts on external packages, as read from packages.yaml
This means that the external spec and index idx
has been selected for this package.
ASP fact (a rule without a body).
Facts about packages' compiler prefs.
Translate 'depends_on' directives into ASP logic.
Output declared versions of a package.
This uses self.possible_versions so that we include any versions
that arise from a spec.
Facts on concretization preferences, as read from packages.yaml
Order compiler flags on specs in predefined order.
We order flags so that any node's flags will take priority over
those of its dependents. That is, the deepest node in the DAG's
flags will appear last on the compile line, in the order they
were specified.
The solver determines wihch flags are on nodes; this routine
imposes order afterwards.
Generate an ASP program with relevant constraints for specs.
This calls methods on the solve driver to set up the problem with
facts and rules from all possible dependencies of the input
specs, as well as constraints from the specs themselves.
Arguments:
specs (list): list of Specs to solve
Solve for a stable model of specs.
Arguments:
specs (list): list of Specs to solve.
dump (tuple): what to dump
models (int): number of models to search (default: 0)
Return a list of clauses for a spec mandates are true.
Arguments:
spec (spack.spec.Spec): the spec to analyze
body (bool): if True, generate clauses to be used in rule bodies
(final values) instead of rule heads (setters).
transitive (bool): if False, don't generate clauses from
dependencies (default True)
Return list of clauses expressing spec's version constraints.
List of concretized specs satisfying the initial
abstract request.
Add facts about targets and target compatibility.
Call func(vspec, provider, i) for each of pkg's provider prefs.
Copyright 2013-2021 Lawrence Livermore National Security, LLC and other Spack Project Developers. See the top-level COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) There may be a better way to detect this type: ignore novm: Enumeration like object to mark version provenance type: ignore: String representation of version origins, to emit legible facts for the ASP solver: Named tuple to contain information on declared versions specs ordered by optimization level names of optimization criteria Abstract user requests Concrete specs The specs were already computed, return them Assert prerequisite Remove the virtual entry from the normalized configuration Initialize the control object for the solver set up the problem -- this generates facts and rules read in the main ASP program and display logic -- these are handwritten, not generated, so we load them as resources Grounding is the first step in the solve -- it turns our facts and first-order logic rules into propositional logic. With a grounded program, we can run the solve. stable models if things go well unsatisfiable cores if they do not once done, construct the solve result Clingo w/ CFFI will throw an exception on failure build spec from the best model add best spec to the results pull optimization criteria names out of the solution record the number of models the solver considered set by setup() id for dummy variables Caches to optimize the setup phase of the solver Origins are sorted by order of importance: 1. Spec from command line 2. Externals 3. Package preferences 4. Directives in package.py Declare deprecated versions for this package, if any record all version constraints for later Check if the target is a concrete target Enumerate target families. This may be redundant, but compilers with custom versions will be able to concretize properly. versions variants Encode the disjoint sets in the logic program make sure that every variant has at least one possible value conflicts default compilers for this package virtuals dependencies virtual preferences requirements trigger the condition imposed "node"-like conditions are no-ops Skip test dependencies if they're not requested ... or if they are requested only for certain packages if there are no dependency types to be considered anymore, don't generate the dependency there is a declared dependency of type t Read packages.yaml and normalize it, so that it will not contain entries referring to virtual packages. This package does not appear in any repository Check if the external package is buildable. If it is not then "external(<pkg>)" is a fact. Read a list of all the specs for this package Order the external versions to prefer more recent versions even if specs in packages.yaml are not ordered that way Declare external conditions with a local index into packages.yaml perform validation of the variant and values types of flags that can be on specs flags from compilers.yaml TODO: do this with consistent suffixes. seed architecture at the root (we'll propagate later) TODO: use better semantics. variants * is meaningless for concretization -- just for matching validate variant value only if spec not concrete Tell the concretizer that this is a possible value for the variant, to account for things like int/str values where we can't enumerate the valid values compiler and compiler version compiler flags dependencies TODO: add concrete depends_on() facts for concrete dependencies add all clauses from dependencies All the versions from the corresponding package.py file. Since concepts like being a "develop" version or being preferred exist only at a package.py level, sort them in this partial list here When COMPARING VERSIONS, the '@develop' version is always larger than other versions. BUT when CONCRETIZING, the largest NON-develop version is selected by default. All the preferred version from packages.yaml, versions in external specs will be computed later Concrete versions used in abstract specs from cli. They all have idx equal to 0, which is the best possible. In any case they will be used due to being set from the cli. create set of OS's to consider make directives for possible OS's mark this one as default this loop can be used to limit the number of targets considered. Right now we consider them all, but it seems that many targets can make things slow. TODO: investigate this. If we can't find supported targets it may be due to custom versions in the spec, e.g. gcc@foo. Try to match the real_version from the compiler object to get more accurate results. add any targets explicitly mentioned in specs prefer best possible targets; weight others poorly so they're not used unless set explicitly what provides what add compiler specs from the input line to possibilities if we don't require compilers to exist. version must be *one* of the ones the spec allows. This is needed to account for a variable number of numbers e.g. if both 1.0 and 1.0.2 are possible versions generate facts for each package constraint and the version that satisfies it aggregate constraints into per-virtual sets extract all the real versions mentioned in version ranges define a set of synthetic possible versions for virtuals, so that `version_satisfies(Package, Constraint, Version)` has the same semantics for virtuals as for regular packages. Check lower bound Check upper bound Construct the list of allowed targets for this constraint Tell the concretizer about possible values from specs we saw in spec_clauses() preliminary checks get list of all possible dependencies driver is used by all the functions below to add facts and rules to generate an ASP program. get possible compilers traverse all specs and packages to build dict of possible versions architecture defaults Inject dev_path from environment FIXME: is there a way not to special case 'dev_path' everywhere? nodes with no flags get flag order from compiler index of all specs (and deps) from the command line by name iterate through specs with specified flags order is determined by the DAG. A spec's flags come after any from its ancestors on the compile line. sort the sources in our DAG order add flags from each source, lowest to highest precedence Functions don't seem to be in particular order in output. Sort them here so that directives that build objects (like node and node_compiler) are called in the right order. print out unknown actions so we can display them for debugging ignore predicates on virtual packages, as they're used for solving but don't construct anything namespace assignment is done after the fact, as it is not currently part of the solve fix flags after all specs are constructed inject patches -- note that we' can't use set() to unique the roots here, because the specs aren't complete, and the hash function will loop forever. Add external paths to specs with just external modules These are handwritten parts for the Spack ASP model. Check upfront that the variants are admissible | 10,785 | en | 0.86332 |
import logging
import sys
from notion.block import PageBlock
from notion.client import NotionClient
from requests import HTTPError, codes
from enex2notion.utils_exceptions import BadTokenException
logger = logging.getLogger(__name__)
def get_root(token, name):
if not token:
logger.warning(
"No token provided, dry run mode. Nothing will be uploaded to Notion!"
)
return None
try:
client = get_notion_client(token)
except BadTokenException:
logger.error("Invalid token provided!")
sys.exit(1)
return get_import_root(client, name)
def get_notion_client(token):
try:
return NotionClient(token_v2=token)
except HTTPError as e: # pragma: no cover
if e.response.status_code == codes["unauthorized"]:
raise BadTokenException
raise
def get_import_root(client, title):
try:
top_pages = client.get_top_level_pages()
except KeyError: # pragma: no cover
# Need empty account to test
top_pages = []
for page in top_pages:
if isinstance(page, PageBlock) and page.title == title:
logger.info(f"'{title}' page found")
return page
logger.info(f"Creating '{title}' page...")
return client.current_space.add_page(title)
| enex2notion/cli_notion.py | 1,313 | pragma: no cover pragma: no cover Need empty account to test | 60 | en | 0.710923 |
from flask_testing import TestCase
from flask import url_for
from core import app, db
import unittest
from core.models import FeatureRequest, Client, ProductArea
import datetime
class BaseTest(TestCase):
SQLALCHEMY_DATABASE_URI = "sqlite://"
TESTING = True
def create_app(self):
app.config["TESTING"] = True
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite://"
return app
def setUp(self):
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
class HomepageTest(BaseTest):
def test_homepage(self):
"Make sure that homepage works fine"
response = self.client.get(url_for("home_view"))
assert b"Add a feature request:" in response.data
assert b"List feature requests:" in response.data
class ListpageTest(BaseTest):
def test_empty_listpage(self):
"Make sure that empty list page works fine"
response = self.client.get(url_for("home_view"))
response = self.client.get(url_for("feature_requests_view"))
assert b"No feature requests found." in response.data
def test_non_empty_listpage(self):
"Also that it can display multiple entries"
fr = FeatureRequest(
title="Title",
description="Desc",
client=None,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=None,
)
db.session.add(fr)
fr2 = FeatureRequest(
title="Title",
description="Desc",
client=None,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=None,
)
db.session.add(fr2)
db.session.commit()
response = self.client.get(url_for("feature_requests_view"))
assert response.data.count(b"Update") == 2
assert response.data.count(b"Delete") == 2
assert (
url_for("feature_requests_update", feature_request_id=1).encode()
in response.data
)
assert (
url_for("feature_requests_delete", feature_request_id=1).encode()
in response.data
)
class AddOtherObjectsMixin:
"A reusable mixin that adds a client and a product area to the db"
def add_other_objects(self):
self.cl = Client("C1")
db.session.add(self.cl)
self.pa = ProductArea("PA1")
db.session.add(self.pa)
db.session.commit()
class CreatepageTest(AddOtherObjectsMixin, BaseTest):
def test_createpage(self):
"Make sure that the create page works"
response = self.client.get(url_for("feature_requests_create"))
assert b"Add Feature Request" in response.data
assert b"<form method='POST'>" in response.data
assert b"form-group has-error" not in response.data
def test_createpage_error(self):
"The create page should return with error when post data is missing"
response = self.client.post(
url_for("feature_requests_create"),
data=dict(
title="Title",
description="Desc",
client=None,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=None,
),
)
assert b"form-group has-error" in response.data
assert b"<form method='POST'>" in response.data
assert response.status == "200 OK"
def test_createpage_success(self):
"The create page should return a 302 FOUND redirect when an entry is submitted"
client = Client("C1")
db.session.add(client)
product_area = ProductArea("PA1")
db.session.add(product_area)
db.session.commit()
response = self.client.post(
url_for("feature_requests_create"),
data=dict(
title="Title",
description="Desc",
client=client.id,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=product_area.id,
),
)
assert response.status == "302 FOUND"
def test_createpage_success_flash(self):
"""The create page should display the proper flash message when an object is
created"""
self.add_other_objects()
response = self.client.post(
url_for("feature_requests_create"),
data=dict(
title="Title",
description="Desc",
client=self.cl.id,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=self.pa.id,
),
follow_redirects=True,
)
assert response.status == "200 OK"
assert b"Feature request created!" in response.data
assert response.data.count(b"Update") == 1
assert response.data.count(b"Delete") == 1
assert self.cl.name.encode() in response.data
assert self.pa.name.encode() in response.data
def test_createpage_change_priorities(self):
"""The create page should change the priorities of the other objects when a
new one has the same priority and client"""
self.add_other_objects()
fr = FeatureRequest(
title="Title",
description="Desc",
client=self.cl,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=self.pa,
)
db.session.add(fr)
db.session.commit()
assert FeatureRequest.query.filter_by(id=fr.id).first().client_priority == 1
response = self.client.post(
url_for("feature_requests_create"),
data=dict(
title="Title",
description="Desc",
client=self.cl.id,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=self.pa.id,
),
follow_redirects=True,
)
assert response.status == "200 OK"
assert FeatureRequest.query.filter_by(id=fr.id).first().client_priority == 2
class UpdatepageTest(AddOtherObjectsMixin, BaseTest):
def add_feature_request(self):
"A reusable method for this class"
self.fr = FeatureRequest(
title="Title",
description="Desc",
client=None,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=None,
)
db.session.add(self.fr)
db.session.commit()
def test_updatepage_not_found(self):
"Make sure that the update page returs 404 when the obj is not found"
response = self.client.get(
url_for("feature_requests_update", feature_request_id=1232)
)
assert response.status == "404 NOT FOUND"
def test_updatepage_ok(self):
"Make sure that the update page is displayed properly along with the object"
self.add_feature_request()
response = self.client.get(
url_for("feature_requests_update", feature_request_id=self.fr.id)
)
assert "Edit Feature Request: {0}".format(self.fr.id).encode() in response.data
assert b"<form method='POST'>" in response.data
assert b"form-group has-error" not in response.data
assert self.fr.title.encode() in response.data
assert self.fr.description.encode() in response.data
def test_updatepage_error(self):
"The createpage should return an error when data is missing"
self.add_feature_request()
response = self.client.post(
url_for("feature_requests_update", feature_request_id=self.fr.id),
data=dict(
title="Title",
description="Desc",
client=None,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=None,
),
)
assert b"form-group has-error" in response.data
assert b"<form method='POST'>" in response.data
assert response.status == "200 OK"
def test_createpage_success(self):
"The createpage should properly update the object"
self.add_feature_request()
self.add_other_objects()
newtitle = "The new title"
response = self.client.post(
url_for("feature_requests_update", feature_request_id=self.fr.id),
data=dict(
title=newtitle,
description="Desc",
client=self.cl.id,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=self.pa.id,
),
)
assert response.status == "302 FOUND"
assert FeatureRequest.query.filter_by(id=self.fr.id).first().title == newtitle
def test_updatepage_success_flash(self):
"""Make sure that the flash message is displayed correctly and we are
redirected to the list view"""
self.add_feature_request()
self.add_other_objects()
response = self.client.post(
url_for("feature_requests_update", feature_request_id=self.fr.id),
data=dict(
title="Title",
description="Desc",
client=self.cl.id,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=self.pa.id,
),
follow_redirects=True,
)
assert response.status == "200 OK"
assert b"Feature request updated!" in response.data
assert response.data.count(b"Update") == 1
assert response.data.count(b"Delete") == 1
assert self.cl.name.encode() in response.data
assert self.pa.name.encode() in response.data
def test_updatepage_change_priorities(self):
"The updatepage should also update the client priorities"
self.add_other_objects()
fr = FeatureRequest(
title="Title",
description="Desc",
client=self.cl,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=self.pa,
)
db.session.add(fr)
fr2 = FeatureRequest(
title="Title",
description="Desc",
client=self.cl,
client_priority=2,
target_date=datetime.date(2018, 1, 1),
product_area=self.pa,
)
db.session.add(fr2)
db.session.commit()
assert FeatureRequest.query.filter_by(id=fr.id).first().client_priority == 1
assert FeatureRequest.query.filter_by(id=fr2.id).first().client_priority == 2
response = self.client.post(
url_for("feature_requests_update", feature_request_id=2),
data=dict(
title="Title",
description="Desc",
client=self.cl.id,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=self.pa.id,
),
follow_redirects=True,
)
assert response.status == "200 OK"
assert FeatureRequest.query.filter_by(id=fr.id).first().client_priority == 2
assert FeatureRequest.query.filter_by(id=fr2.id).first().client_priority == 1
class DeletepageTest(BaseTest):
def add_feature_request(self):
"A reusable method for this class"
self.fr = FeatureRequest(
title="Title",
description="Desc",
client=None,
client_priority=1,
target_date=datetime.date(2018, 1, 1),
product_area=None,
)
db.session.add(self.fr)
db.session.commit()
def test_deletepdatepage_only_post(self):
"Make sure that the delete page returns 405 when requested with get"
response = self.client.get(
url_for("feature_requests_delete", feature_request_id=1232)
)
assert response.status == "405 METHOD NOT ALLOWED"
def test_deletepdatepage_not_found(self):
"Make sure that the delete page returs 404 when the obj is not found"
response = self.client.post(
url_for("feature_requests_delete", feature_request_id=1232)
)
assert response.status == "404 NOT FOUND"
def test_deletepage_ok(self):
"Make sure that the delete page deletes the obj"
self.add_feature_request()
assert db.session.query(FeatureRequest.query.filter().exists()).scalar() is True
response = self.client.post(
url_for("feature_requests_delete", feature_request_id=self.fr.id)
)
assert (
db.session.query(FeatureRequest.query.filter().exists()).scalar() is False
)
assert response.status == "302 FOUND"
def test_deletepage_flash_message(self):
"Make sure that the delete page shows the proper flash message"
self.add_feature_request()
response = self.client.post(
url_for("feature_requests_delete", feature_request_id=self.fr.id),
follow_redirects=True,
)
assert response.status == "200 OK"
assert b"Feature request deleted!" in response.data
assert response.data.count(b"Update") == 0
assert response.data.count(b"Delete") == 0
if __name__ == "__main__":
unittest.main()
| test_core.py | 13,555 | A reusable mixin that adds a client and a product area to the db
A reusable method for this class
A reusable method for this class
Make sure that the create page works
The create page should change the priorities of the other objects when a
new one has the same priority and client
The create page should return with error when post data is missing
The create page should return a 302 FOUND redirect when an entry is submitted
The createpage should properly update the object
The create page should display the proper flash message when an object is
created
Make sure that the delete page shows the proper flash message
Make sure that the delete page deletes the obj
Make sure that the delete page returs 404 when the obj is not found
Make sure that the delete page returns 405 when requested with get
Make sure that empty list page works fine
Make sure that homepage works fine
Also that it can display multiple entries
The updatepage should also update the client priorities
The createpage should return an error when data is missing
Make sure that the update page returs 404 when the obj is not found
Make sure that the update page is displayed properly along with the object
Make sure that the flash message is displayed correctly and we are
redirected to the list view | 1,273 | en | 0.886708 |
from .settings import *
RESPA_CATERINGS_ENABLED = True
RESPA_COMMENTS_ENABLED = True
RESPA_PAYMENTS_ENABLED = True
# Bambora Payform provider settings
RESPA_PAYMENTS_PROVIDER_CLASS = 'payments.providers.BamboraPayformProvider'
RESPA_PAYMENTS_BAMBORA_API_URL = 'https://real-bambora-api-url/api'
RESPA_PAYMENTS_BAMBORA_API_KEY = 'dummy-key'
RESPA_PAYMENTS_BAMBORA_API_SECRET = 'dummy-secret'
RESPA_PAYMENTS_BAMBORA_PAYMENT_METHODS = ['dummy-bank']
DJANGO_ADMIN_LOGOUT_REDIRECT_URL='https://hel.fi'
RESPA_ADMIN_LOGOUT_REDIRECT_URL='https://hel.fi'
# API token auth endpoint
MACHINE_TO_MACHINE_AUTH_ENABLED=1
| respa/test_settings.py | 608 | Bambora Payform provider settings API token auth endpoint | 57 | en | 0.344469 |
# Theory: Indexes
# There are several types of collections to store data in Python.
# Positionally ordered collections of elements are usually called
# sequences, and both lists and strings belong to them. EAch
# element in a list, as well as each character in a string, has an
# index that corresponds to its position. Indexes are used to
# access elements within a sequence. Indexing is zero-based, so if
# you see a person who counts from zero, you must have met a
# programmer.
| Computer science/Programming languages/Python/Working with data/Collections/Lists/Indexes/topic.py | 484 | Theory: Indexes There are several types of collections to store data in Python. Positionally ordered collections of elements are usually called sequences, and both lists and strings belong to them. EAch element in a list, as well as each character in a string, has an index that corresponds to its position. Indexes are used to access elements within a sequence. Indexing is zero-based, so if you see a person who counts from zero, you must have met a programmer. | 463 | en | 0.970234 |
from __future__ import absolute_import
import requests
import json
import logging
from .base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
def ProviderParser(subparser):
subparser.description = '''
Zeit Provider requires a token to access its API.
You can generate one for your account on the following URL:
https://zeit.co/account/tokens'''
subparser.add_argument('--auth-token', help='specify your API token')
# Implements the DNS Zeit provider.
# The API is quite simple: you can list all records, add one record or delete one record.
# - list is pretty straightforward: we get all records then filter for given parameters,
# - add uses directly the API to add a new record without any added complexity,
# - delete uses list + delete: we get the list of all records, filter on the given parameters and delete record by id,
# - update uses list + delete + add: we get the list of all records, find record for given identifier, then insert a new record and delete the old record.
class Provider(BaseProvider):
def __init__(self, options, engine_overrides=None):
super(Provider, self).__init__(options, engine_overrides)
self.domain_id = None
self.api_endpoint = 'https://api.zeit.co/v2/domains'
def authenticate(self):
result = self._get('/{0}'.format(self.options['domain']))
if not result['uid']:
raise Exception('Error, domain {0} not found'.format(self.options['domain']))
self.domain_id = result['uid']
def list_records(self, type=None, name=None, content=None):
result = self._get('/{0}/records'.format(self.options['domain']))
raw_records = result['records']
if type:
raw_records = [raw_record for raw_record in raw_records if raw_record['type'] == type]
if name:
raw_records = [raw_record for raw_record in raw_records if raw_record['name'] == self._relative_name(name)]
if content:
raw_records = [raw_record for raw_record in raw_records if raw_record['value'] == content]
records = []
for raw_record in raw_records:
records.append({
'id': raw_record['id'],
'type': raw_record['type'],
'name': self._full_name(raw_record['name']),
'content': raw_record['value']
})
LOGGER.debug('list_records: %s', records)
return records
def create_record(self, type, name, content):
# We ignore creation if a record already exists for given type/name/content
records = self.list_records(type, name, content)
if records:
LOGGER.debug('create_record (ignored, duplicate): %s', records[0]['id'])
return True
data = {
'type': type,
'name': self._relative_name(name),
'value': content
}
result = self._post('/{0}/records'.format(self.options['domain']), data)
if not result['uid']:
raise Exception('Error occured when inserting the new record.')
LOGGER.debug('create_record: %s', result['uid'])
return True
def update_record(self, identifier, type=None, name=None, content=None):
# Zeit do not allow to update a record, only add or remove.
# So we get the corresponding record, dump or update its content and insert it as a new record.
# Then we remove the old record.
records = []
if identifier:
records = self.list_records()
records = [record for record in records if record['id'] == identifier]
else:
records = self.list_records(type, name)
if not records:
raise Exception('No record found for identifer: {0}'.format(identifier))
if len(records) > 1:
LOGGER.warn('Multiple records have been found for given parameters. Only first one will be updated (id: {0})'.format(records[0]['id']))
data = {
'type': type,
'name': self._relative_name(name),
'value': content
}
if not type:
data['type'] = records[0]['type']
if not name:
data['name'] = self._relative_name(records[0]['name'])
if not content:
data['value'] = records[0]['content']
result = self._post('/{0}/records'.format(self.options['domain']), data)
self._delete('/{0}/records/{1}'.format(self.options['domain'], records[0]['id']))
LOGGER.debug('update_record: %s => %s', records[0]['id'], result['uid'])
return True
def delete_record(self, identifier=None, type=None, name=None, content=None):
delete_record_ids = []
if not identifier:
records = self.list_records(type, name, content)
delete_record_ids = [record['id'] for record in records]
else:
delete_record_ids.append(identifier)
LOGGER.debug('delete_records: %s', delete_record_ids)
for delete_record_id in delete_record_ids:
self._delete('/{0}/records/{1}'.format(self.options['domain'], delete_record_id))
LOGGER.debug('delete_record: %s', True)
return True
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
request = requests.request(action, self.api_endpoint + url,
params=query_params,
data=json.dumps(data),
headers={'Authorization': 'Bearer {0}'.format(self.options.get('auth_token'))})
request.raise_for_status()
return request.json() | lexicon/providers/zeit.py | 5,843 | Implements the DNS Zeit provider. The API is quite simple: you can list all records, add one record or delete one record. - list is pretty straightforward: we get all records then filter for given parameters, - add uses directly the API to add a new record without any added complexity, - delete uses list + delete: we get the list of all records, filter on the given parameters and delete record by id, - update uses list + delete + add: we get the list of all records, find record for given identifier, then insert a new record and delete the old record. We ignore creation if a record already exists for given type/name/content Zeit do not allow to update a record, only add or remove. So we get the corresponding record, dump or update its content and insert it as a new record. Then we remove the old record. | 821 | en | 0.833679 |
# Generated by Django 3.2.9 on 2022-01-01 10:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notes', '0003_auto_20220101_1040'),
]
operations = [
migrations.RenameField(
model_name='notes',
old_name='category',
new_name='categories',
),
migrations.RemoveField(
model_name='notescategory',
name='count',
),
migrations.AddField(
model_name='notesrating',
name='comment',
field=models.TextField(null=True),
),
]
| notes/migrations/0004_auto_20220101_1047.py | 636 | Generated by Django 3.2.9 on 2022-01-01 10:47 | 45 | en | 0.763604 |
import tkinter as tk
from tkinter import ttk
import json
from dashboard.entities.InputField import InputField
from dashboard.entities.StatusField import StatusField
class Devices(ttk.Frame):
"""
Devices Frame for Settings
"""
def __init__(self, parent, settings):
"""
Constructs a WarningPopUp
:param parent: Parent Frame
:param settings: settings class
"""
self.settings = settings
ttk.Frame.__init__(self, parent, relief="raised", borderwidth=2)
self.content = ttk.Frame(self, borderwidth=2)
self.content.pack(expand=True, fill=tk.X, side='top', anchor='n')
self.devices = []
label1 = tk.Label(self.content, text="Apparaten", font=("Verdana", 14), relief="groove")
label1.pack(expand=True, fill=tk.X, side='top')
self.render_devices()
def render_devices(self):
# Removed current sidebar buttons
for frame in self.devices:
frame.pack_forget()
# Add sidebar buttons based on json
self.settings.load_devices()
for serial_number, data in self.settings.devices.items():
self.build_device(serial_number, data)
def build_device(self, serial_number, data):
button = ttk.Button(self.content, text=data["Name"], width=15,
command=lambda: self.settings.show_view(serial_number, self))
button.pack(fill=tk.X, pady=2)
self.devices.append(button) | dashboard/entities/Devices.py | 1,490 | Devices Frame for Settings
Constructs a WarningPopUp
:param parent: Parent Frame
:param settings: settings class
Removed current sidebar buttons Add sidebar buttons based on json | 183 | en | 0.677385 |
#!/usr/bin/env python3
# Copyright 2021 Xiaomi Corporation (Author: Liyong Guo, Fangjun Kuang)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import k2
import sentencepiece as spm
import torch
import torch.nn as nn
from asr_datamodule import LibriSpeechAsrDataModule
from conformer import Conformer
from icefall.bpe_graph_compiler import BpeCtcTrainingGraphCompiler
from icefall.checkpoint import average_checkpoints, load_checkpoint
from icefall.decode import (
get_lattice,
nbest_decoding,
nbest_oracle,
one_best_decoding,
rescore_with_attention_decoder,
rescore_with_n_best_list,
rescore_with_whole_lattice,
)
from icefall.lexicon import Lexicon
from icefall.utils import (
AttributeDict,
get_texts,
setup_logger,
store_transcripts,
str2bool,
write_error_stats,
)
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--epoch",
type=int,
default=34,
help="It specifies the checkpoint to use for decoding."
"Note: Epoch counts from 0.",
)
parser.add_argument(
"--avg",
type=int,
default=20,
help="Number of checkpoints to average. Automatically select "
"consecutive checkpoints before the checkpoint specified by "
"'--epoch'. ",
)
parser.add_argument(
"--method",
type=str,
default="attention-decoder",
help="""Decoding method.
Supported values are:
- (0) ctc-decoding. Use CTC decoding. It uses a sentence piece
model, i.e., lang_dir/bpe.model, to convert word pieces to words.
It needs neither a lexicon nor an n-gram LM.
- (1) 1best. Extract the best path from the decoding lattice as the
decoding result.
- (2) nbest. Extract n paths from the decoding lattice; the path
with the highest score is the decoding result.
- (3) nbest-rescoring. Extract n paths from the decoding lattice,
rescore them with an n-gram LM (e.g., a 4-gram LM), the path with
the highest score is the decoding result.
- (4) whole-lattice-rescoring. Rescore the decoding lattice with an
n-gram LM (e.g., a 4-gram LM), the best path of rescored lattice
is the decoding result.
- (5) attention-decoder. Extract n paths from the LM rescored
lattice, the path with the highest score is the decoding result.
- (6) nbest-oracle. Its WER is the lower bound of any n-best
rescoring method can achieve. Useful for debugging n-best
rescoring method.
""",
)
parser.add_argument(
"--num-paths",
type=int,
default=100,
help="""Number of paths for n-best based decoding method.
Used only when "method" is one of the following values:
nbest, nbest-rescoring, attention-decoder, and nbest-oracle
""",
)
parser.add_argument(
"--nbest-scale",
type=float,
default=0.5,
help="""The scale to be applied to `lattice.scores`.
It's needed if you use any kinds of n-best based rescoring.
Used only when "method" is one of the following values:
nbest, nbest-rescoring, attention-decoder, and nbest-oracle
A smaller value results in more unique paths.
""",
)
parser.add_argument(
"--export",
type=str2bool,
default=False,
help="""When enabled, the averaged model is saved to
conformer_ctc/exp/pretrained.pt. Note: only model.state_dict() is saved.
pretrained.pt contains a dict {"model": model.state_dict()},
which can be loaded by `icefall.checkpoint.load_checkpoint()`.
""",
)
parser.add_argument(
"--exp-dir",
type=str,
default="conformer_mmi/exp_500",
help="The experiment dir",
)
parser.add_argument(
"--lang-dir",
type=str,
default="data/lang_bpe_500",
help="The lang dir",
)
parser.add_argument(
"--num-decoder-layers",
type=int,
default=6,
help="Number of attention decoder layers",
)
return parser
def get_params() -> AttributeDict:
params = AttributeDict(
{
"lm_dir": Path("data/lm"),
# parameters for conformer
"subsampling_factor": 4,
"vgg_frontend": False,
"use_feat_batchnorm": True,
"feature_dim": 80,
"nhead": 8,
"attention_dim": 512,
# parameters for decoding
"search_beam": 20,
"output_beam": 8,
"min_active_states": 30,
"max_active_states": 10000,
"use_double_scores": True,
}
)
return params
def decode_one_batch(
params: AttributeDict,
model: nn.Module,
HLG: Optional[k2.Fsa],
H: Optional[k2.Fsa],
bpe_model: Optional[spm.SentencePieceProcessor],
batch: dict,
word_table: k2.SymbolTable,
sos_id: int,
eos_id: int,
G: Optional[k2.Fsa] = None,
) -> Dict[str, List[List[str]]]:
"""Decode one batch and return the result in a dict. The dict has the
following format:
- key: It indicates the setting used for decoding. For example,
if no rescoring is used, the key is the string `no_rescore`.
If LM rescoring is used, the key is the string `lm_scale_xxx`,
where `xxx` is the value of `lm_scale`. An example key is
`lm_scale_0.7`
- value: It contains the decoding result. `len(value)` equals to
batch size. `value[i]` is the decoding result for the i-th
utterance in the given batch.
Args:
params:
It's the return value of :func:`get_params`.
- params.method is "1best", it uses 1best decoding without LM rescoring.
- params.method is "nbest", it uses nbest decoding without LM rescoring.
- params.method is "nbest-rescoring", it uses nbest LM rescoring.
- params.method is "whole-lattice-rescoring", it uses whole lattice LM
rescoring.
model:
The neural model.
HLG:
The decoding graph. Used only when params.method is NOT ctc-decoding.
H:
The ctc topo. Used only when params.method is ctc-decoding.
bpe_model:
The BPE model. Used only when params.method is ctc-decoding.
batch:
It is the return value from iterating
`lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation
for the format of the `batch`.
word_table:
The word symbol table.
sos_id:
The token ID of the SOS.
eos_id:
The token ID of the EOS.
G:
An LM. It is not None when params.method is "nbest-rescoring"
or "whole-lattice-rescoring". In general, the G in HLG
is a 3-gram LM, while this G is a 4-gram LM.
Returns:
Return the decoding result. See above description for the format of
the returned dict.
"""
if HLG is not None:
device = HLG.device
else:
device = H.device
feature = batch["inputs"]
assert feature.ndim == 3
feature = feature.to(device)
# at entry, feature is (N, T, C)
supervisions = batch["supervisions"]
nnet_output, memory, memory_key_padding_mask = model(feature, supervisions)
# nnet_output is (N, T, C)
supervision_segments = torch.stack(
(
supervisions["sequence_idx"],
supervisions["start_frame"] // params.subsampling_factor,
supervisions["num_frames"] // params.subsampling_factor,
),
1,
).to(torch.int32)
if H is None:
assert HLG is not None
decoding_graph = HLG
else:
assert HLG is None
assert bpe_model is not None
decoding_graph = H
lattice = get_lattice(
nnet_output=nnet_output,
decoding_graph=decoding_graph,
supervision_segments=supervision_segments,
search_beam=params.search_beam,
output_beam=params.output_beam,
min_active_states=params.min_active_states,
max_active_states=params.max_active_states,
subsampling_factor=params.subsampling_factor,
)
if params.method == "ctc-decoding":
best_path = one_best_decoding(
lattice=lattice, use_double_scores=params.use_double_scores
)
# Note: `best_path.aux_labels` contains token IDs, not word IDs
# since we are using H, not HLG here.
#
# token_ids is a lit-of-list of IDs
token_ids = get_texts(best_path)
# hyps is a list of str, e.g., ['xxx yyy zzz', ...]
hyps = bpe_model.decode(token_ids)
# hyps is a list of list of str, e.g., [['xxx', 'yyy', 'zzz'], ... ]
hyps = [s.split() for s in hyps]
key = "ctc-decoding"
return {key: hyps}
if params.method == "nbest-oracle":
# Note: You can also pass rescored lattices to it.
# We choose the HLG decoded lattice for speed reasons
# as HLG decoding is faster and the oracle WER
# is only slightly worse than that of rescored lattices.
best_path = nbest_oracle(
lattice=lattice,
num_paths=params.num_paths,
ref_texts=supervisions["text"],
word_table=word_table,
nbest_scale=params.nbest_scale,
oov="<UNK>",
)
hyps = get_texts(best_path)
hyps = [[word_table[i] for i in ids] for ids in hyps]
key = f"oracle_{params.num_paths}_nbest_scale_{params.nbest_scale}" # noqa
return {key: hyps}
if params.method in ["1best", "nbest"]:
if params.method == "1best":
best_path = one_best_decoding(
lattice=lattice, use_double_scores=params.use_double_scores
)
key = "no_rescore"
else:
best_path = nbest_decoding(
lattice=lattice,
num_paths=params.num_paths,
use_double_scores=params.use_double_scores,
nbest_scale=params.nbest_scale,
)
key = f"no_rescore-nbest-scale-{params.nbest_scale}-{params.num_paths}" # noqa
hyps = get_texts(best_path)
hyps = [[word_table[i] for i in ids] for ids in hyps]
return {key: hyps}
assert params.method in [
"nbest-rescoring",
"whole-lattice-rescoring",
"attention-decoder",
]
lm_scale_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
lm_scale_list += [0.8, 0.9, 1.0, 1.1, 1.2, 1.3]
lm_scale_list += [1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]
if params.method == "nbest-rescoring":
best_path_dict = rescore_with_n_best_list(
lattice=lattice,
G=G,
num_paths=params.num_paths,
lm_scale_list=lm_scale_list,
nbest_scale=params.nbest_scale,
)
elif params.method == "whole-lattice-rescoring":
best_path_dict = rescore_with_whole_lattice(
lattice=lattice,
G_with_epsilon_loops=G,
lm_scale_list=lm_scale_list,
)
elif params.method == "attention-decoder":
# lattice uses a 3-gram Lm. We rescore it with a 4-gram LM.
rescored_lattice = rescore_with_whole_lattice(
lattice=lattice,
G_with_epsilon_loops=G,
lm_scale_list=None,
)
# TODO: pass `lattice` instead of `rescored_lattice` to
# `rescore_with_attention_decoder`
best_path_dict = rescore_with_attention_decoder(
lattice=rescored_lattice,
num_paths=params.num_paths,
model=model,
memory=memory,
memory_key_padding_mask=memory_key_padding_mask,
sos_id=sos_id,
eos_id=eos_id,
nbest_scale=params.nbest_scale,
)
else:
assert False, f"Unsupported decoding method: {params.method}"
ans = dict()
if best_path_dict is not None:
for lm_scale_str, best_path in best_path_dict.items():
hyps = get_texts(best_path)
hyps = [[word_table[i] for i in ids] for ids in hyps]
ans[lm_scale_str] = hyps
else:
for lm_scale in lm_scale_list:
ans["empty"] = [[] * lattice.shape[0]]
return ans
def decode_dataset(
dl: torch.utils.data.DataLoader,
params: AttributeDict,
model: nn.Module,
HLG: Optional[k2.Fsa],
H: Optional[k2.Fsa],
bpe_model: Optional[spm.SentencePieceProcessor],
word_table: k2.SymbolTable,
sos_id: int,
eos_id: int,
G: Optional[k2.Fsa] = None,
) -> Dict[str, List[Tuple[List[str], List[str]]]]:
"""Decode dataset.
Args:
dl:
PyTorch's dataloader containing the dataset to decode.
params:
It is returned by :func:`get_params`.
model:
The neural model.
HLG:
The decoding graph. Used only when params.method is NOT ctc-decoding.
H:
The ctc topo. Used only when params.method is ctc-decoding.
bpe_model:
The BPE model. Used only when params.method is ctc-decoding.
word_table:
It is the word symbol table.
sos_id:
The token ID for SOS.
eos_id:
The token ID for EOS.
G:
An LM. It is not None when params.method is "nbest-rescoring"
or "whole-lattice-rescoring". In general, the G in HLG
is a 3-gram LM, while this G is a 4-gram LM.
Returns:
Return a dict, whose key may be "no-rescore" if no LM rescoring
is used, or it may be "lm_scale_0.7" if LM rescoring is used.
Its value is a list of tuples. Each tuple contains two elements:
The first is the reference transcript, and the second is the
predicted result.
"""
results = []
num_cuts = 0
try:
num_batches = len(dl)
except TypeError:
num_batches = "?"
results = defaultdict(list)
for batch_idx, batch in enumerate(dl):
texts = batch["supervisions"]["text"]
hyps_dict = decode_one_batch(
params=params,
model=model,
HLG=HLG,
H=H,
bpe_model=bpe_model,
batch=batch,
word_table=word_table,
G=G,
sos_id=sos_id,
eos_id=eos_id,
)
for lm_scale, hyps in hyps_dict.items():
this_batch = []
assert len(hyps) == len(texts)
for hyp_words, ref_text in zip(hyps, texts):
ref_words = ref_text.split()
this_batch.append((ref_words, hyp_words))
results[lm_scale].extend(this_batch)
num_cuts += len(batch["supervisions"]["text"])
if batch_idx % 100 == 0:
batch_str = f"{batch_idx}/{num_batches}"
logging.info(
f"batch {batch_str}, cuts processed until now is {num_cuts}"
)
return results
def save_results(
params: AttributeDict,
test_set_name: str,
results_dict: Dict[str, List[Tuple[List[int], List[int]]]],
):
if params.method == "attention-decoder":
# Set it to False since there are too many logs.
enable_log = False
else:
enable_log = True
test_set_wers = dict()
for key, results in results_dict.items():
recog_path = params.exp_dir / f"recogs-{test_set_name}-{key}.txt"
store_transcripts(filename=recog_path, texts=results)
if enable_log:
logging.info(f"The transcripts are stored in {recog_path}")
# The following prints out WERs, per-word error statistics and aligned
# ref/hyp pairs.
errs_filename = params.exp_dir / f"errs-{test_set_name}-{key}.txt"
with open(errs_filename, "w") as f:
wer = write_error_stats(
f, f"{test_set_name}-{key}", results, enable_log=enable_log
)
test_set_wers[key] = wer
if enable_log:
logging.info(
"Wrote detailed error stats to {}".format(errs_filename)
)
test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1])
errs_info = params.exp_dir / f"wer-summary-{test_set_name}.txt"
with open(errs_info, "w") as f:
print("settings\tWER", file=f)
for key, val in test_set_wers:
print("{}\t{}".format(key, val), file=f)
s = "\nFor {}, WER of different settings are:\n".format(test_set_name)
note = "\tbest for {}".format(test_set_name)
for key, val in test_set_wers:
s += "{}\t{}{}\n".format(key, val, note)
note = ""
logging.info(s)
@torch.no_grad()
def main():
parser = get_parser()
LibriSpeechAsrDataModule.add_arguments(parser)
args = parser.parse_args()
args.exp_dir = Path(args.exp_dir)
args.lang_dir = Path(args.lang_dir)
params = get_params()
params.update(vars(args))
setup_logger(f"{params.exp_dir}/log-{params.method}/log-decode")
logging.info("Decoding started")
logging.info(params)
lexicon = Lexicon(params.lang_dir)
max_token_id = max(lexicon.tokens)
num_classes = max_token_id + 1 # +1 for the blank
device = torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda", 0)
logging.info(f"device: {device}")
graph_compiler = BpeCtcTrainingGraphCompiler(
params.lang_dir,
device=device,
sos_token="<sos/eos>",
eos_token="<sos/eos>",
)
sos_id = graph_compiler.sos_id
eos_id = graph_compiler.eos_id
if params.method == "ctc-decoding":
HLG = None
H = k2.ctc_topo(
max_token=max_token_id,
modified=False,
device=device,
)
bpe_model = spm.SentencePieceProcessor()
bpe_model.load(str(params.lang_dir / "bpe.model"))
else:
H = None
bpe_model = None
HLG = k2.Fsa.from_dict(
torch.load(f"{params.lang_dir}/HLG.pt", map_location="cpu")
)
HLG = HLG.to(device)
assert HLG.requires_grad is False
if not hasattr(HLG, "lm_scores"):
HLG.lm_scores = HLG.scores.clone()
if params.method in (
"nbest-rescoring",
"whole-lattice-rescoring",
"attention-decoder",
):
if not (params.lm_dir / "G_4_gram.pt").is_file():
logging.info("Loading G_4_gram.fst.txt")
logging.warning("It may take 8 minutes.")
with open(params.lm_dir / "G_4_gram.fst.txt") as f:
first_word_disambig_id = lexicon.word_table["#0"]
G = k2.Fsa.from_openfst(f.read(), acceptor=False)
# G.aux_labels is not needed in later computations, so
# remove it here.
del G.aux_labels
# CAUTION: The following line is crucial.
# Arcs entering the back-off state have label equal to #0.
# We have to change it to 0 here.
G.labels[G.labels >= first_word_disambig_id] = 0
G = k2.Fsa.from_fsas([G]).to(device)
G = k2.arc_sort(G)
torch.save(G.as_dict(), params.lm_dir / "G_4_gram.pt")
else:
logging.info("Loading pre-compiled G_4_gram.pt")
d = torch.load(params.lm_dir / "G_4_gram.pt", map_location="cpu")
G = k2.Fsa.from_dict(d).to(device)
if params.method in ["whole-lattice-rescoring", "attention-decoder"]:
# Add epsilon self-loops to G as we will compose
# it with the whole lattice later
G = k2.add_epsilon_self_loops(G)
G = k2.arc_sort(G)
G = G.to(device)
# G.lm_scores is used to replace HLG.lm_scores during
# LM rescoring.
G.lm_scores = G.scores.clone()
else:
G = None
model = Conformer(
num_features=params.feature_dim,
nhead=params.nhead,
d_model=params.attention_dim,
num_classes=num_classes,
subsampling_factor=params.subsampling_factor,
num_decoder_layers=params.num_decoder_layers,
vgg_frontend=params.vgg_frontend,
use_feat_batchnorm=params.use_feat_batchnorm,
)
if params.avg == 1:
load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model)
else:
start = params.epoch - params.avg + 1
filenames = []
for i in range(start, params.epoch + 1):
if start >= 0:
filenames.append(f"{params.exp_dir}/epoch-{i}.pt")
logging.info(f"averaging {filenames}")
model.load_state_dict(average_checkpoints(filenames))
if params.export:
logging.info(f"Export averaged model to {params.exp_dir}/pretrained.pt")
torch.save(
{"model": model.state_dict()}, f"{params.exp_dir}/pretrained.pt"
)
return
model.to(device)
model.eval()
num_param = sum([p.numel() for p in model.parameters()])
logging.info(f"Number of model parameters: {num_param}")
librispeech = LibriSpeechAsrDataModule(args)
# CAUTION: `test_sets` is for displaying only.
# If you want to skip test-clean, you have to skip
# it inside the for loop. That is, use
#
# if test_set == 'test-clean': continue
#
test_sets = ["test-clean", "test-other"]
for test_set, test_dl in zip(test_sets, librispeech.test_dataloaders()):
results_dict = decode_dataset(
dl=test_dl,
params=params,
model=model,
HLG=HLG,
H=H,
bpe_model=bpe_model,
word_table=lexicon.word_table,
G=G,
sos_id=sos_id,
eos_id=eos_id,
)
save_results(
params=params, test_set_name=test_set, results_dict=results_dict
)
logging.info("Done!")
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
if __name__ == "__main__":
main()
| egs/librispeech/ASR/conformer_mmi/decode.py | 22,952 | Decode dataset.
Args:
dl:
PyTorch's dataloader containing the dataset to decode.
params:
It is returned by :func:`get_params`.
model:
The neural model.
HLG:
The decoding graph. Used only when params.method is NOT ctc-decoding.
H:
The ctc topo. Used only when params.method is ctc-decoding.
bpe_model:
The BPE model. Used only when params.method is ctc-decoding.
word_table:
It is the word symbol table.
sos_id:
The token ID for SOS.
eos_id:
The token ID for EOS.
G:
An LM. It is not None when params.method is "nbest-rescoring"
or "whole-lattice-rescoring". In general, the G in HLG
is a 3-gram LM, while this G is a 4-gram LM.
Returns:
Return a dict, whose key may be "no-rescore" if no LM rescoring
is used, or it may be "lm_scale_0.7" if LM rescoring is used.
Its value is a list of tuples. Each tuple contains two elements:
The first is the reference transcript, and the second is the
predicted result.
Decode one batch and return the result in a dict. The dict has the
following format:
- key: It indicates the setting used for decoding. For example,
if no rescoring is used, the key is the string `no_rescore`.
If LM rescoring is used, the key is the string `lm_scale_xxx`,
where `xxx` is the value of `lm_scale`. An example key is
`lm_scale_0.7`
- value: It contains the decoding result. `len(value)` equals to
batch size. `value[i]` is the decoding result for the i-th
utterance in the given batch.
Args:
params:
It's the return value of :func:`get_params`.
- params.method is "1best", it uses 1best decoding without LM rescoring.
- params.method is "nbest", it uses nbest decoding without LM rescoring.
- params.method is "nbest-rescoring", it uses nbest LM rescoring.
- params.method is "whole-lattice-rescoring", it uses whole lattice LM
rescoring.
model:
The neural model.
HLG:
The decoding graph. Used only when params.method is NOT ctc-decoding.
H:
The ctc topo. Used only when params.method is ctc-decoding.
bpe_model:
The BPE model. Used only when params.method is ctc-decoding.
batch:
It is the return value from iterating
`lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation
for the format of the `batch`.
word_table:
The word symbol table.
sos_id:
The token ID of the SOS.
eos_id:
The token ID of the EOS.
G:
An LM. It is not None when params.method is "nbest-rescoring"
or "whole-lattice-rescoring". In general, the G in HLG
is a 3-gram LM, while this G is a 4-gram LM.
Returns:
Return the decoding result. See above description for the format of
the returned dict.
!/usr/bin/env python3 Copyright 2021 Xiaomi Corporation (Author: Liyong Guo, Fangjun Kuang) See ../../../../LICENSE for clarification regarding multiple authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. parameters for conformer parameters for decoding at entry, feature is (N, T, C) nnet_output is (N, T, C) Note: `best_path.aux_labels` contains token IDs, not word IDs since we are using H, not HLG here. token_ids is a lit-of-list of IDs hyps is a list of str, e.g., ['xxx yyy zzz', ...] hyps is a list of list of str, e.g., [['xxx', 'yyy', 'zzz'], ... ] Note: You can also pass rescored lattices to it. We choose the HLG decoded lattice for speed reasons as HLG decoding is faster and the oracle WER is only slightly worse than that of rescored lattices. noqa noqa lattice uses a 3-gram Lm. We rescore it with a 4-gram LM. TODO: pass `lattice` instead of `rescored_lattice` to `rescore_with_attention_decoder` Set it to False since there are too many logs. The following prints out WERs, per-word error statistics and aligned ref/hyp pairs. +1 for the blank G.aux_labels is not needed in later computations, so remove it here. CAUTION: The following line is crucial. Arcs entering the back-off state have label equal to 0. We have to change it to 0 here. Add epsilon self-loops to G as we will compose it with the whole lattice later G.lm_scores is used to replace HLG.lm_scores during LM rescoring. CAUTION: `test_sets` is for displaying only. If you want to skip test-clean, you have to skip it inside the for loop. That is, use if test_set == 'test-clean': continue | 4,816 | en | 0.812743 |
#!/usr/bin/env python
# -*- Mode: Python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*-
# vi: set ts=4 sw=4 expandtab:
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is [Open Source Virtual Machine.].
#
# The Initial Developer of the Original Code is
# Adobe System Incorporated.
# Portions created by the Initial Developer are Copyright (C) 2004-2006
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Adobe AS3 Team
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import os
import subprocess
import sys
def compile_abc(target, files, deps=None, configs=None):
asc_jar = os.environ.get('ASC', os.path.realpath('../../../utils/asc.jar'))
javacmd = ['java', '-ea', '-DAS3', '-DAVMPLUS', '-classpath', asc_jar, 'macromedia.asc.embedding.ScriptCompiler', '-builtin']
if deps:
javacmd.extend("../%s/%s.abc" % (dep, dep) for dep in deps)
javacmd.extend(['-out', target])
javacmd.extend(files)
javacmd.extend(configs)
p = subprocess.Popen(javacmd, cwd=target)
p.wait()
def main():
configs = sys.argv[1:]
if configs == []:
# Build without float suppot by default
configs = ['-config', 'CONFIG::VMCFG_FLOAT=false']
compile_abc("builtin", ["builtin.as", "Vector.as", "DescribeType.as", "JSON.as", "Math.as", "Error.as", "Date.as", "RegExp.as", "IDataInput.as", "IDataOutput.as", "ByteArray.as", "Proxy.as", "XML.as", "Dictionary.as"], configs=configs)
compile_abc("shell", ["Capabilities.as", "Domain.as", "System.as"], deps=["builtin"], configs=configs)
compile_abc("avmplus", ["avmplus.as"], deps=["builtin"], configs=configs)
if __name__ == "__main__":
main()
| src/avm2/generated/generate.py | 2,996 | !/usr/bin/env python -*- Mode: Python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- vi: set ts=4 sw=4 expandtab: ***** BEGIN LICENSE BLOCK ***** Version: MPL 1.1/GPL 2.0/LGPL 2.1 The contents of this file are subject to the Mozilla Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.mozilla.org/MPL/ Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. The Original Code is [Open Source Virtual Machine.]. The Initial Developer of the Original Code is Adobe System Incorporated. Portions created by the Initial Developer are Copyright (C) 2004-2006 the Initial Developer. All Rights Reserved. Contributor(s): Adobe AS3 Team Alternatively, the contents of this file may be used under the terms of either the GNU General Public License Version 2 or later (the "GPL"), or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), in which case the provisions of the GPL or the LGPL are applicable instead of those above. If you wish to allow use of your version of this file only under the terms of either the GPL or the LGPL, and not to allow others to use your version of this file under the terms of the MPL, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL or the LGPL. If you do not delete the provisions above, a recipient may use your version of this file under the terms of any one of the MPL, the GPL or the LGPL. ***** END LICENSE BLOCK ***** Build without float suppot by default | 1,781 | en | 0.842464 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class TrafficWeight:
def __init__(self):
self.request = 0
self.response = 0
class PacketInterval:
def __init__(self):
self.firstPacket = 0
self.lastPacket = 0
| src/utils.py | 227 | !/usr/bin/env python -*- coding: utf-8 -*- | 42 | en | 0.34282 |
# Dana jest posortowana tablica A[1, ..., n] oraz liczba x. Proszę napisać program, który stwierdza
# czy istnieją indeksy i oraz j takie, że A[i] + A[j] = x.
def sum_search(T, x):
l = 0
r = len(T) - 1
while l <= r:
if T[l] + T[r] == x:
return True
elif T[l] + T[r] > x:
r -= 1
else:
l += 1
return False
T = [2, 5, 8, 12, 16, 19, 20, 25, 34, 55, 81]
x = 37
print(sum_search(T, x))
| Exercises/Exercises_01/07_exercise.py | 466 | Dana jest posortowana tablica A[1, ..., n] oraz liczba x. Proszę napisać program, który stwierdza czy istnieją indeksy i oraz j takie, że A[i] + A[j] = x. | 154 | pl | 0.994844 |
# Generated by Django 2.2.12 on 2020-07-05 18:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vote', '0002_request_track'),
]
operations = [
migrations.AddField(
model_name='track',
name='metadata_locked',
field=models.BooleanField(default=False),
),
]
| nkdsu/apps/vote/migrations/0003_track_metadata_locked.py | 388 | Generated by Django 2.2.12 on 2020-07-05 18:03 | 46 | en | 0.535931 |
# IMPORTATION STANDARD
# IMPORTATION THIRDPARTY
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.cryptocurrency.defi import terraengineer_model
@pytest.mark.vcr
@pytest.mark.parametrize(
"asset,address",
[("ust", "terra1tmnqgvg567ypvsvk6rwsga3srp7e3lg6u0elp8")],
)
def test_get_history_asset_from_terra_address(asset, address, recorder):
df = terraengineer_model.get_history_asset_from_terra_address(
asset=asset,
address=address,
)
recorder.capture(df)
| tests/openbb_terminal/cryptocurrency/defi/test_terraengineer_model.py | 503 | IMPORTATION STANDARD IMPORTATION THIRDPARTY IMPORTATION INTERNAL | 64 | en | 0.66435 |
from __future__ import print_function
# Part of the JBEI Quantitative Metabolic Modeling Library (JQMM)
# Copyright (c) 2016, The Regents of the University of California.
# For licensing details see "license.txt" and "legal.txt".
from builtins import str
import re
import core
import NamedRangedNumber
class Gene(NamedRangedNumber.NamedRangedNumber):
"""
Class for single genes, and values typically associated with them.
Typically it is instantiated with a string representing a name, and a value.
Since genes can potentially have multiple names due to conflicting standards, the superclass also supports
receiving a list of names during instantiation, instead of a string.
The first name in the list will be considered the canonical name when rendering the gene as a string.
The given value can be an integer, a float, a ranged number, or a string representation of any of these,
but is kept internally and exported as a ranged number.
"""
def __init__(self, names, value=None):
if isinstance(names, list):
nameList = names
else:
nameList = [names]
for name in nameList:
assert ' ' not in name.strip(), "Gene names cannot contain spaces: '" + name + "'"
super(Gene, self).__init__(names, value)
def addName(self, name):
assert ' ' not in name.strip(), "Gene names cannot contain spaces: '" + name + "'"
super(Gene, self).addName(name)
class GeneSet(NamedRangedNumber.NamedRangedNumberSet):
"""
Class for a set of GeneSet objects, derived from NamedRangedNumberSet.
"""
def __init__(self, contents=None):
super(GeneSet, self).__init__(contents)
def recastSet(self, victimSet, preferExistingObjects=True, preferExistingValues=False):
itemsToRecast = victimSet.contentsList
recastItems = self.recastItems(itemsToRecast, preferExistingObjects, preferExistingValues)
return GeneSet(recastItems)
@staticmethod
def createSetsFromStrings(structureString="", valueString=None):
"""
Construct a list of GeneSet objects based on the given strings.
The first string, structureString, determines the number of GeneSets to create.
An empty string will return an empty list.
A string containing anything else will return one or more subsets.
The number of subsets is determined by the number of times the separator string " or " occurs in the
structureString. For example, "(setA) or (setB) or (setC) or geneD or geneE or (setF)" will create
six subsets. "(setA)", "(setC)", "geneD", etc are substrings that declare the contents of each set.
There are two accepted ways to format the substrings:
Method #1:
substrings example: "aName=[aLow:aBest:aHigh] and bName=[bLow:bBest:bHigh] and cName=value and dName"
valueString=None
In this example, four NamedRangedNumber objects will be created in total:
aName and bName specify rangedNumber values for NamedRangedNumber, cName specifies just one floating point
number that is converted to a rangedNumber, and dName crates a NamedRangedNumber with the value set to None.
valueString can be left out entirely.
Method #2:
substrings example: "aName and dName and cName and qName"
valueString example: "aName=[aLow:aBest:aHigh] bName=[bLow:bBest:bHigh] cName=value dName=value fName=value"
In this example, four NamedRangedNumber objects will be created, but only two of them will be assigned values
(the other two will have values of None). This happens because the structureString declares what items are in
the set, while the valueString only assigns values. If a value is given in the second string for a name that is
not listed in the first, that value is ignored. No item is created for it.
While it is possible to supply a mixture of methods 1 and 2, it is not recommended practice. Values assigned via
method 2 take precedence over values assigned via method 1, even if the value assigned is "=None".
Note that Gene objects are re-used from one set to the next. That is, if the same name is mentioned in two
different substrings, only one Gene object will be created but it will be placed in two subsets.
"""
givenValues = {}
if valueString is not None:
pairs = valueString.split() # Split on whitespace, no need to strip
for pair in pairs:
parts = pair.split('=')
name = parts[0]
if parts[1:2]:
givenValues[name] = parts[1]
subSets = []
structureString = structureString.strip() # Stripping initial surrounding whitespace in order to check for a blank entry
if structureString != "":
collectionStrings = re.split("\s+or\s+", structureString)
for collectionStr in collectionStrings:
items = []
# Sections of the string are sometimes enclosed in parenthesis.
# Plus, sometime garbage from badly-done embeds comes up, like so:
# <html:p> GENE_ASSOCIATION :( b1901 and b1900 and ( b1898 and b1899 ) ) </html:p>
collectionStr = collectionStr.replace('(',' ').replace(')',' ').strip()
itemStrings = re.split("\s+and\s+", collectionStr)
for itemString in itemStrings:
item = Gene.fromString(itemString)
if item.canonicalName in givenValues:
item.set(givenValues[item.canonicalName])
items.append(item)
if items:
subSets.append(GeneSet(items))
# is a list of GeneSets
return subSets
if __name__ == "__main__":
test()
def test():
try:
vError = False
print("Instantiating from illegal string \"test=[2,3,4]\", expecting failure ...")
a = Gene.fromString("test=[2,3,4]")
except ValueError:
vError = True
print("\tGot ValueError as expected.")
pass
assert vError, "NamedRangedNumber accepted wrong input."
print("\nInstantiating from string value \"test=[2:3:4]\" ...")
a = Gene.fromString("test=[2:3:4]")
assert a.canonicalName == 'test', "Name wrong"
b = Gene.fromString("dorks=[0.5:1:1.5]")
c = a + 3
print("\t" + str(a) + ' + 3 = ' + str(c))
d = a + b
print("\t" + str(a) + ' + ' + str(b) + ' = ' + str(d))
assert d.value.best == 4.0, "Addition failure, d.value.best should be 4.0."
print("\nInstantiating a GeneSet from an invalid string, expecting failure:")
strA = "(bob fred frank) or (jed and Bill123) and (fred & billyBob) or captainAmerica"
print("\t" + strA)
try:
aError = False
geneSets = GeneSet.createSetsFromStrings(strA)
except AssertionError:
aError = True
print("\tGot AssertionError as expected.")
pass
assert aError, "GeneSet.createSetsFromStrings accepted wrong input."
print("\nInstantiating a GeneSet from strings:")
strA = "(bob and fred and frank) or (jed and Bill123) or (fred and billyBob) or captainAmerica"
strB = "bob=12 fred=45 frank=[1:2:3] jed=10.1 Bill123=1"
print("\t" + strA)
print("\t" + strB)
subSets = GeneSet.createSetsFromStrings(strA, strB)
masterSet = GeneSet()
newSubSets = []
print("Master set:")
for subSet in subSets:
newSubSets.append(masterSet.recastSet(subSet))
print("\t" + str(masterSet))
print("Subsets consolidated, for embedding:")
print("\t" + GeneSet.createStringFromSets(newSubSets))
print("Significance test result for master set:" + str(masterSet.testSignificance(12)))
| code/core/Genes.py | 7,887 | Class for single genes, and values typically associated with them.
Typically it is instantiated with a string representing a name, and a value.
Since genes can potentially have multiple names due to conflicting standards, the superclass also supports
receiving a list of names during instantiation, instead of a string.
The first name in the list will be considered the canonical name when rendering the gene as a string.
The given value can be an integer, a float, a ranged number, or a string representation of any of these,
but is kept internally and exported as a ranged number.
Class for a set of GeneSet objects, derived from NamedRangedNumberSet.
Construct a list of GeneSet objects based on the given strings.
The first string, structureString, determines the number of GeneSets to create.
An empty string will return an empty list.
A string containing anything else will return one or more subsets.
The number of subsets is determined by the number of times the separator string " or " occurs in the
structureString. For example, "(setA) or (setB) or (setC) or geneD or geneE or (setF)" will create
six subsets. "(setA)", "(setC)", "geneD", etc are substrings that declare the contents of each set.
There are two accepted ways to format the substrings:
Method #1:
substrings example: "aName=[aLow:aBest:aHigh] and bName=[bLow:bBest:bHigh] and cName=value and dName"
valueString=None
In this example, four NamedRangedNumber objects will be created in total:
aName and bName specify rangedNumber values for NamedRangedNumber, cName specifies just one floating point
number that is converted to a rangedNumber, and dName crates a NamedRangedNumber with the value set to None.
valueString can be left out entirely.
Method #2:
substrings example: "aName and dName and cName and qName"
valueString example: "aName=[aLow:aBest:aHigh] bName=[bLow:bBest:bHigh] cName=value dName=value fName=value"
In this example, four NamedRangedNumber objects will be created, but only two of them will be assigned values
(the other two will have values of None). This happens because the structureString declares what items are in
the set, while the valueString only assigns values. If a value is given in the second string for a name that is
not listed in the first, that value is ignored. No item is created for it.
While it is possible to supply a mixture of methods 1 and 2, it is not recommended practice. Values assigned via
method 2 take precedence over values assigned via method 1, even if the value assigned is "=None".
Note that Gene objects are re-used from one set to the next. That is, if the same name is mentioned in two
different substrings, only one Gene object will be created but it will be placed in two subsets.
Part of the JBEI Quantitative Metabolic Modeling Library (JQMM) Copyright (c) 2016, The Regents of the University of California. For licensing details see "license.txt" and "legal.txt". Split on whitespace, no need to strip Stripping initial surrounding whitespace in order to check for a blank entry Sections of the string are sometimes enclosed in parenthesis. Plus, sometime garbage from badly-done embeds comes up, like so: <html:p> GENE_ASSOCIATION :( b1901 and b1900 and ( b1898 and b1899 ) ) </html:p> is a list of GeneSets | 3,307 | en | 0.848236 |
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple executor that operates synchronously in eager TensorFlow mode."""
from typing import Any, MutableMapping, Optional
import cachetools
import tensorflow as tf
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import serialization_utils
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.common_libs import tracing
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.api import typed_object
from tensorflow_federated.python.core.impl import computation_impl
from tensorflow_federated.python.core.impl import type_utils
from tensorflow_federated.python.core.impl.compiler import building_blocks
from tensorflow_federated.python.core.impl.executors import executor_base
from tensorflow_federated.python.core.impl.executors import executor_value_base
from tensorflow_federated.python.core.impl.types import type_analysis
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.impl.types import type_serialization
from tensorflow_federated.python.core.impl.utils import tensorflow_utils
from tensorflow_federated.python.tensorflow_libs import graph_merge
# Cache size here is simply heuristic, no formal analysis.
_TF_FUNCTION_CACHE_SIZE = 100
def _get_wrapped_function_from_comp(comp, must_pin_function_to_cpu, param_type,
device):
"""Extracts the TensorFlow function from serialized computation.
Args:
comp: An instance of `pb.Computation`.
must_pin_function_to_cpu: A boolean flag to indicate if the computation is
forced to be on CPUs.
param_type: A `tff.Type` instance or None.
device: A `tf.config.LogicalDevice` or None.
Returns:
A TensorFlow ConcreteFunction.
"""
def function_to_wrap():
"""No-arg function to import graph def.
We pass a no-arg function to `tf.compat.v1.wrap_function` to avoid
the leftover placeholders that can result from binding arguments to the
imported graphdef via `input_map`. The correct signature will be added to
this function later, via the `prune` call below.
Returns:
Result of importing graphdef backing `comp`.
"""
graph_def = serialization_utils.unpack_graph_def(comp.tensorflow.graph_def)
init_op = comp.tensorflow.initialize_op
if init_op:
graph_def = tensorflow_utils.add_control_deps_for_init_op(
graph_def, init_op)
def _import_fn():
return tf.import_graph_def(
graph_merge.uniquify_shared_names(graph_def), name='')
if must_pin_function_to_cpu:
with tf.device('cpu'):
return _import_fn()
elif device is not None:
with tf.device(device.name):
return _import_fn()
else:
return _import_fn()
wrapped_noarg_fn = tf.compat.v1.wrap_function(function_to_wrap, signature=[])
if param_type is not None:
input_tensor_names = tensorflow_utils.extract_tensor_names_from_binding(
comp.tensorflow.parameter)
else:
input_tensor_names = []
output_tensor_names = tensorflow_utils.extract_tensor_names_from_binding(
comp.tensorflow.result)
import_graph = wrapped_noarg_fn.graph
try:
wrapped_fn = wrapped_noarg_fn.prune(
feeds=tf.nest.map_structure(import_graph.as_graph_element,
input_tensor_names),
fetches=tf.nest.map_structure(import_graph.as_graph_element,
output_tensor_names),
)
except KeyError as e:
raise TypeError(
'Caught exception trying to prune graph `{g}` with '
'feeds {feeds} and fetches {fetches}. This indicates that these '
'names may not refer to tensors in the graph. .\nException: {e}'.format(
g=import_graph,
feeds=input_tensor_names,
fetches=output_tensor_names,
e=e))
return wrapped_fn
def embed_tensorflow_computation(comp, type_spec=None, device=None):
"""Embeds a TensorFlow computation for use in the eager context.
Args:
comp: An instance of `pb.Computation`.
type_spec: An optional `tff.Type` instance or something convertible to it.
device: An optional `tf.config.LogicalDevice`.
Returns:
Either a one-argument or a zero-argument callable that executes the
computation in eager mode.
Raises:
TypeError: If arguments are of the wrong types, e.g., in `comp` is not a
TensorFlow computation.
"""
# TODO(b/134543154): Decide whether this belongs in `tensorflow_utils.py`
# since it deals exclusively with eager mode. Incubate here, and potentially
# move there, once stable.
py_typecheck.check_type(comp, pb.Computation)
comp_type = type_serialization.deserialize_type(comp.type)
type_spec = computation_types.to_type(type_spec)
if type_spec is not None:
if not type_spec.is_equivalent_to(comp_type):
raise TypeError('Expected a computation of type {}, got {}.'.format(
type_spec, comp_type))
else:
type_spec = comp_type
# TODO(b/155198591): Currently, TF will raise on any function returning a
# `tf.data.Dataset` not pinned to CPU. We should follow up here and remove
# this gating when we can.
must_pin_function_to_cpu = type_analysis.contains(type_spec.result,
lambda t: t.is_sequence())
which_computation = comp.WhichOneof('computation')
if which_computation != 'tensorflow':
unexpected_building_block = building_blocks.ComputationBuildingBlock.from_proto(
comp)
raise TypeError('Expected a TensorFlow computation, found {}.'.format(
unexpected_building_block))
if type_spec.is_function():
param_type = type_spec.parameter
result_type = type_spec.result
else:
param_type = None
result_type = type_spec
wrapped_fn = _get_wrapped_function_from_comp(comp, must_pin_function_to_cpu,
param_type, device)
param_fns = []
if param_type is not None:
for spec in structure.flatten(type_spec.parameter):
if spec.is_tensor():
param_fns.append(lambda x: x)
else:
py_typecheck.check_type(spec, computation_types.SequenceType)
param_fns.append(tf.data.experimental.to_variant)
result_fns = []
for spec in structure.flatten(result_type):
if spec.is_tensor():
result_fns.append(lambda x: x)
else:
py_typecheck.check_type(spec, computation_types.SequenceType)
tf_structure = type_conversions.type_to_tf_structure(spec.element)
def fn(x, tf_structure=tf_structure):
return tf.data.experimental.from_variant(x, tf_structure)
result_fns.append(fn)
def _fn_to_return(arg, param_fns, wrapped_fn): # pylint:disable=missing-docstring
param_elements = []
if arg is not None:
arg_parts = structure.flatten(arg)
if len(arg_parts) != len(param_fns):
raise RuntimeError('Expected {} arguments, found {}.'.format(
len(param_fns), len(arg_parts)))
for arg_part, param_fn in zip(arg_parts, param_fns):
param_elements.append(param_fn(arg_part))
result_parts = wrapped_fn(*param_elements)
# There is a tf.wrap_function(...) issue b/144127474 that variables created
# from tf.import_graph_def(...) inside tf.wrap_function(...) is not
# destroyed. So get all the variables from `wrapped_fn` and destroy
# manually.
# TODO(b/144127474): Remove this manual cleanup once tf.wrap_function(...)
# is fixed.
resources = []
for op in wrapped_fn.graph.get_operations():
if op.type == 'VarHandleOp':
resources += op.outputs
if resources:
for resource in wrapped_fn.prune(feeds={}, fetches=resources)():
tf.raw_ops.DestroyResourceOp(resource=resource)
result_elements = []
for result_part, result_fn in zip(result_parts, result_fns):
result_elements.append(result_fn(result_part))
return structure.pack_sequence_as(result_type, result_elements)
fn_to_return = lambda arg, p=param_fns, w=wrapped_fn: _fn_to_return(arg, p, w)
# pylint: disable=function-redefined
if must_pin_function_to_cpu:
old_fn_to_return = fn_to_return
def fn_to_return(x):
with tf.device('cpu'):
return old_fn_to_return(x)
elif device is not None:
old_fn_to_return = fn_to_return
def fn_to_return(x):
with tf.device(device.name):
return old_fn_to_return(x)
# pylint: enable=function-redefined
if param_type is not None:
return lambda arg: fn_to_return(arg) # pylint: disable=unnecessary-lambda
else:
return lambda: fn_to_return(None)
def to_representation_for_type(
value: Any,
tf_function_cache: MutableMapping[str, Any],
type_spec: Optional[computation_types.Type] = None,
device: Optional[tf.config.LogicalDevice] = None) -> Any:
"""Verifies or converts the `value` to an eager object matching `type_spec`.
WARNING: This function is only partially implemented. It does not support
data sets at this point.
The output of this function is always an eager tensor, eager dataset, a
representation of a TensorFlow computation, or a nested structure of those
that matches `type_spec`, and when `device` has been specified, everything
is placed on that device on a best-effort basis.
TensorFlow computations are represented here as zero- or one-argument Python
callables that accept their entire argument bundle as a single Python object.
Args:
value: The raw representation of a value to compare against `type_spec` and
potentially to be converted.
tf_function_cache: A cache obeying `dict` semantics that can be used to look
up previously embedded TensorFlow functions.
type_spec: An instance of `tff.Type`, can be `None` for values that derive
from `typed_object.TypedObject`.
device: An optional `tf.config.LogicalDevice` to place the value on (for
tensor-level values).
Returns:
Either `value` itself, or a modified version of it.
Raises:
TypeError: If the `value` is not compatible with `type_spec`.
"""
type_spec = type_utils.reconcile_value_with_type_spec(value, type_spec)
if isinstance(value, computation_base.Computation):
return to_representation_for_type(
computation_impl.ComputationImpl.get_proto(value), tf_function_cache,
type_spec, device)
elif isinstance(value, pb.Computation):
key = (value.SerializeToString(), str(type_spec),
device.name if device else None)
cached_fn = tf_function_cache.get(key)
if cached_fn is not None:
return cached_fn
embedded_fn = embed_tensorflow_computation(value, type_spec, device)
tf_function_cache[key] = embedded_fn
return embedded_fn
elif type_spec.is_struct():
type_elem = structure.to_elements(type_spec)
value_elem = (structure.to_elements(structure.from_container(value)))
result_elem = []
if len(type_elem) != len(value_elem):
raise TypeError('Expected a {}-element tuple, found {} elements.'.format(
len(type_elem), len(value_elem)))
for (t_name, el_type), (v_name, el_val) in zip(type_elem, value_elem):
if t_name != v_name:
raise TypeError(
'Mismatching element names in type vs. value: {} vs. {}.'.format(
t_name, v_name))
el_repr = to_representation_for_type(el_val, tf_function_cache, el_type,
device)
result_elem.append((t_name, el_repr))
return structure.Struct(result_elem)
elif device is not None:
py_typecheck.check_type(device, tf.config.LogicalDevice)
with tf.device(device.name):
return to_representation_for_type(
value, tf_function_cache, type_spec=type_spec, device=None)
elif isinstance(value, EagerValue):
return value.internal_representation
elif isinstance(value, executor_value_base.ExecutorValue):
raise TypeError(
'Cannot accept a value embedded within a non-eager executor.')
elif type_spec.is_tensor():
if not tf.is_tensor(value):
value = tf.convert_to_tensor(value, dtype=type_spec.dtype)
elif hasattr(value, 'read_value'):
# a tf.Variable-like result, get a proper tensor.
value = value.read_value()
value_type = (
computation_types.TensorType(value.dtype.base_dtype, value.shape))
if not type_spec.is_assignable_from(value_type):
raise TypeError(
'The apparent type {} of a tensor {} does not match the expected '
'type {}.'.format(value_type, value, type_spec))
return value
elif type_spec.is_sequence():
if isinstance(value, list):
value = tensorflow_utils.make_data_set_from_elements(
None, value, type_spec.element)
py_typecheck.check_type(value,
type_conversions.TF_DATASET_REPRESENTATION_TYPES)
element_type = computation_types.to_type(value.element_spec)
value_type = computation_types.SequenceType(element_type)
type_spec.check_assignable_from(value_type)
return value
else:
raise TypeError('Unexpected type {}.'.format(type_spec))
class EagerValue(executor_value_base.ExecutorValue):
"""A representation of an eager value managed by the eager executor."""
def __init__(self, value, tf_function_cache, type_spec=None, device=None):
"""Creates an instance of a value in this executor.
Args:
value: Depending on `type_spec`, either a `tf.Tensor`, `tf.data.Dataset`,
or a nested structure of these stored in an `Struct`.
tf_function_cache: A cache obeying `dict` semantics that can be used to
look up previously embedded TensorFlow functions.
type_spec: An instance of `tff.Type` that represents a tensor, a dataset,
or a nested structure of these.
device: An optional `tf.config.LogicalDevice` on which to place the value.
"""
if type_spec is None:
py_typecheck.check_type(value, typed_object.TypedObject)
type_spec = value.type_signature
else:
type_spec = computation_types.to_type(type_spec)
py_typecheck.check_type(type_spec, computation_types.Type)
self._type_signature = type_spec
self._value = to_representation_for_type(value, tf_function_cache,
type_spec, device)
@property
def internal_representation(self):
"""Returns a representation of the eager value embedded in the executor.
This property is only intended for use by the eager executor and tests. Not
for consumption by consumers of the executor interface.
"""
return self._value
@property
def type_signature(self):
return self._type_signature
@tracing.trace
async def compute(self):
return self._value
class EagerTFExecutor(executor_base.Executor):
"""The eager executor only runs TensorFlow, synchronously, in eager mode.
TODO(b/134764569): Add support for data as a building block.
This executor understands the following TFF types: tensors, sequences, named
tuples, and functions. It does not understand placements, federated, or
abstract types.
This executor understands the following kinds of TFF computation building
blocks: tensorflow computations, and external data. It does not understand
lambda calculus or any compositional constructs. Tuples and selections can
only be created using `create_struct()` and `create_selection()` in the API.
The arguments to be ingested can be Python constants of simple types, nested
structures of those, as well as eager tensors and eager datasets.
The external data references must identify files available in the executor's
filesystem. The exact format is yet to be documented.
The executor will be able to place work on specific devices (e.g., on GPUs).
In contrast to the reference executor, it handles data sets in a pipelined
fashion, and does not place limits on the data set sizes. It also avoids
marshaling TensorFlow values in and out between calls.
It does not deal with multithreading, checkpointing, federated computations,
and other concerns to be covered by separate executor components. It runs the
operations it supports in a synchronous fashion. Asynchrony and other aspects
not supported here should be handled by composing this executor with other
executors into a complex executor stack, rather than mixing in all the logic.
"""
def __init__(self, device=None):
"""Creates a new instance of an eager executor.
Args:
device: An optional `tf.config.LogicalDevice` that this executor will
schedule all of its operations to run on. For example, the list of
logical devices can be obtained using
`tf.config.list_logical_devices()`.
Raises:
RuntimeError: If not executing eagerly.
TypeError: If the device is not a `tf.config.LogicalDevice`.
ValueError: If there is no device `device`.
"""
if not tf.executing_eagerly():
raise RuntimeError('The eager executor may only be used in eager mode.')
if device is not None:
py_typecheck.check_type(device, tf.config.LogicalDevice)
self._device = device
else:
self._device = None
self._tf_function_cache = cachetools.LRUCache(_TF_FUNCTION_CACHE_SIZE)
@tracing.trace(span=True)
async def create_value(self, value, type_spec=None):
"""Embeds `value` of type `type_spec` within this executor.
Args:
value: An object that represents the value to embed within the executor.
type_spec: The `tff.Type` of the value represented by this object, or
something convertible to it. Can optionally be `None` if `value` is an
instance of `typed_object.TypedObject`.
Returns:
An instance of `EagerValue`.
Raises:
RuntimeError: If not executing eagerly.
TypeError: If the arguments are of the wrong types.
ValueError: If the type was not specified and cannot be determined from
the value.
"""
if not tf.executing_eagerly():
raise RuntimeError('The eager executor may only be used in eager mode.')
return EagerValue(value, self._tf_function_cache, type_spec, self._device)
@tracing.trace
async def create_call(self, comp, arg=None):
"""Creates a call to `comp` with optional `arg`.
Args:
comp: As documented in `executor_base.Executor`.
arg: As documented in `executor_base.Executor`.
Returns:
An instance of `EagerValue` representing the result of the call.
Raises:
RuntimeError: If not executing eagerly.
TypeError: If the arguments are of the wrong types.
"""
py_typecheck.check_type(comp, EagerValue)
if arg is not None:
py_typecheck.check_type(arg, EagerValue)
if not comp.type_signature.is_function():
raise TypeError('Expected a functional type, found {}'.format(
comp.type_signature))
if comp.type_signature.parameter is not None:
return EagerValue(
comp.internal_representation(arg.internal_representation), # pytype: disable=attribute-error
self._tf_function_cache,
comp.type_signature.result,
self._device)
elif arg is None:
return EagerValue(comp.internal_representation(), self._tf_function_cache,
comp.type_signature.result, self._device)
else:
raise TypeError('Cannot pass an argument to a no-argument function.')
@tracing.trace
async def create_struct(self, elements):
"""Creates a tuple of `elements`.
Args:
elements: As documented in `executor_base.Executor`.
Returns:
An instance of `EagerValue` that represents the constructed tuple.
"""
elements = structure.to_elements(structure.from_container(elements))
val_elements = []
type_elements = []
for k, v in elements:
py_typecheck.check_type(v, EagerValue)
val_elements.append((k, v.internal_representation))
type_elements.append((k, v.type_signature))
return EagerValue(
structure.Struct(val_elements), self._tf_function_cache,
computation_types.StructType([
(k, v) if k is not None else v for k, v in type_elements
]))
@tracing.trace
async def create_selection(self, source, index=None, name=None):
"""Creates a selection from `source`.
Args:
source: As documented in `executor_base.Executor`.
index: As documented in `executor_base.Executor`.
name: As documented in `executor_base.Executor`.
Returns:
An instance of `EagerValue` that represents the constructed selection.
Raises:
TypeError: If arguments are of the wrong types.
ValueError: If either both, or neither of `name` and `index` are present.
"""
py_typecheck.check_type(source, EagerValue)
py_typecheck.check_type(source.type_signature, computation_types.StructType)
py_typecheck.check_type(source.internal_representation, structure.Struct)
if index is not None:
py_typecheck.check_type(index, int)
if name is not None:
raise ValueError(
'Cannot simultaneously specify name {} and index {}.'.format(
name, index))
else:
return EagerValue(source.internal_representation[index],
self._tf_function_cache, source.type_signature[index])
elif name is not None:
py_typecheck.check_type(name, str)
return EagerValue(
getattr(source.internal_representation, str(name)),
self._tf_function_cache, getattr(source.type_signature, str(name)))
else:
raise ValueError('Must specify either name or index.')
def close(self):
pass
| tensorflow_federated/python/core/impl/executors/eager_tf_executor.py | 22,484 | The eager executor only runs TensorFlow, synchronously, in eager mode.
TODO(b/134764569): Add support for data as a building block.
This executor understands the following TFF types: tensors, sequences, named
tuples, and functions. It does not understand placements, federated, or
abstract types.
This executor understands the following kinds of TFF computation building
blocks: tensorflow computations, and external data. It does not understand
lambda calculus or any compositional constructs. Tuples and selections can
only be created using `create_struct()` and `create_selection()` in the API.
The arguments to be ingested can be Python constants of simple types, nested
structures of those, as well as eager tensors and eager datasets.
The external data references must identify files available in the executor's
filesystem. The exact format is yet to be documented.
The executor will be able to place work on specific devices (e.g., on GPUs).
In contrast to the reference executor, it handles data sets in a pipelined
fashion, and does not place limits on the data set sizes. It also avoids
marshaling TensorFlow values in and out between calls.
It does not deal with multithreading, checkpointing, federated computations,
and other concerns to be covered by separate executor components. It runs the
operations it supports in a synchronous fashion. Asynchrony and other aspects
not supported here should be handled by composing this executor with other
executors into a complex executor stack, rather than mixing in all the logic.
A representation of an eager value managed by the eager executor.
Creates an instance of a value in this executor.
Args:
value: Depending on `type_spec`, either a `tf.Tensor`, `tf.data.Dataset`,
or a nested structure of these stored in an `Struct`.
tf_function_cache: A cache obeying `dict` semantics that can be used to
look up previously embedded TensorFlow functions.
type_spec: An instance of `tff.Type` that represents a tensor, a dataset,
or a nested structure of these.
device: An optional `tf.config.LogicalDevice` on which to place the value.
Creates a new instance of an eager executor.
Args:
device: An optional `tf.config.LogicalDevice` that this executor will
schedule all of its operations to run on. For example, the list of
logical devices can be obtained using
`tf.config.list_logical_devices()`.
Raises:
RuntimeError: If not executing eagerly.
TypeError: If the device is not a `tf.config.LogicalDevice`.
ValueError: If there is no device `device`.
Extracts the TensorFlow function from serialized computation.
Args:
comp: An instance of `pb.Computation`.
must_pin_function_to_cpu: A boolean flag to indicate if the computation is
forced to be on CPUs.
param_type: A `tff.Type` instance or None.
device: A `tf.config.LogicalDevice` or None.
Returns:
A TensorFlow ConcreteFunction.
Embeds a TensorFlow computation for use in the eager context.
Args:
comp: An instance of `pb.Computation`.
type_spec: An optional `tff.Type` instance or something convertible to it.
device: An optional `tf.config.LogicalDevice`.
Returns:
Either a one-argument or a zero-argument callable that executes the
computation in eager mode.
Raises:
TypeError: If arguments are of the wrong types, e.g., in `comp` is not a
TensorFlow computation.
No-arg function to import graph def.
We pass a no-arg function to `tf.compat.v1.wrap_function` to avoid
the leftover placeholders that can result from binding arguments to the
imported graphdef via `input_map`. The correct signature will be added to
this function later, via the `prune` call below.
Returns:
Result of importing graphdef backing `comp`.
Returns a representation of the eager value embedded in the executor.
This property is only intended for use by the eager executor and tests. Not
for consumption by consumers of the executor interface.
Verifies or converts the `value` to an eager object matching `type_spec`.
WARNING: This function is only partially implemented. It does not support
data sets at this point.
The output of this function is always an eager tensor, eager dataset, a
representation of a TensorFlow computation, or a nested structure of those
that matches `type_spec`, and when `device` has been specified, everything
is placed on that device on a best-effort basis.
TensorFlow computations are represented here as zero- or one-argument Python
callables that accept their entire argument bundle as a single Python object.
Args:
value: The raw representation of a value to compare against `type_spec` and
potentially to be converted.
tf_function_cache: A cache obeying `dict` semantics that can be used to look
up previously embedded TensorFlow functions.
type_spec: An instance of `tff.Type`, can be `None` for values that derive
from `typed_object.TypedObject`.
device: An optional `tf.config.LogicalDevice` to place the value on (for
tensor-level values).
Returns:
Either `value` itself, or a modified version of it.
Raises:
TypeError: If the `value` is not compatible with `type_spec`.
A simple executor that operates synchronously in eager TensorFlow mode.
Copyright 2019, The TensorFlow Federated Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Cache size here is simply heuristic, no formal analysis. TODO(b/134543154): Decide whether this belongs in `tensorflow_utils.py` since it deals exclusively with eager mode. Incubate here, and potentially move there, once stable. TODO(b/155198591): Currently, TF will raise on any function returning a `tf.data.Dataset` not pinned to CPU. We should follow up here and remove this gating when we can. pylint:disable=missing-docstring There is a tf.wrap_function(...) issue b/144127474 that variables created from tf.import_graph_def(...) inside tf.wrap_function(...) is not destroyed. So get all the variables from `wrapped_fn` and destroy manually. TODO(b/144127474): Remove this manual cleanup once tf.wrap_function(...) is fixed. pylint: disable=function-redefined pylint: enable=function-redefined pylint: disable=unnecessary-lambda a tf.Variable-like result, get a proper tensor. pytype: disable=attribute-error | 6,682 | en | 0.794006 |
import torch
import argparse
from bindsnet.network import Network
from bindsnet.learning import Hebbian
from bindsnet.pipeline import EnvironmentPipeline
from bindsnet.encoding import bernoulli
from bindsnet.network.monitors import Monitor
from bindsnet.environment import GymEnvironment
from bindsnet.network.topology import Connection
from bindsnet.network.nodes import Input, LIFNodes
from bindsnet.pipeline.action import select_multinomial
parser = argparse.ArgumentParser()
parser.add_argument("-n", type=int, default=1000000)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--n_neurons", type=int, default=100)
parser.add_argument("--dt", type=float, default=1.0)
parser.add_argument("--plot_interval", type=int, default=10)
parser.add_argument("--render_interval", type=int, default=10)
parser.add_argument("--print_interval", type=int, default=100)
parser.add_argument("--gpu", dest="gpu", action="store_true")
parser.set_defaults(plot=False, render=False, gpu=False)
args = parser.parse_args()
n = args.n
seed = args.seed
n_neurons = args.n_neurons
dt = args.dt
plot_interval = args.plot_interval
render_interval = args.render_interval
print_interval = args.print_interval
gpu = args.gpu
if gpu:
torch.set_default_tensor_type("torch.cuda.FloatTensor")
torch.cuda.manual_seed_all(seed)
else:
torch.manual_seed(seed)
# Build network.
network = Network(dt=dt)
# Layers of neurons.
inpt = Input(shape=(1, 1, 1, 80, 80), traces=True) # Input layer
exc = LIFNodes(n=n_neurons, refrac=0, traces=True) # Excitatory layer
readout = LIFNodes(n=4, refrac=0, traces=True) # Readout layer
layers = {"X": inpt, "E": exc, "R": readout}
# Connections between layers.
# Input -> excitatory.
w = 0.01 * torch.rand(layers["X"].n, layers["E"].n)
input_exc_conn = Connection(
source=layers["X"],
target=layers["E"],
w=0.01 * torch.rand(layers["X"].n, layers["E"].n),
wmax=0.02,
norm=0.01 * layers["X"].n,
)
# Excitatory -> readout.
exc_readout_conn = Connection(
source=layers["E"],
target=layers["R"],
w=0.01 * torch.rand(layers["E"].n, layers["R"].n),
update_rule=Hebbian,
nu=[1e-2, 1e-2],
norm=0.5 * layers["E"].n,
)
# Spike recordings for all layers.
spikes = {}
for layer in layers:
spikes[layer] = Monitor(layers[layer], ["s"], time=plot_interval)
# Voltage recordings for excitatory and readout layers.
voltages = {}
for layer in set(layers.keys()) - {"X"}:
voltages[layer] = Monitor(layers[layer], ["v"], time=plot_interval)
# Add all layers and connections to the network.
for layer in layers:
network.add_layer(layers[layer], name=layer)
network.add_connection(input_exc_conn, source="X", target="E")
network.add_connection(exc_readout_conn, source="E", target="R")
# Add all monitors to the network.
for layer in layers:
network.add_monitor(spikes[layer], name="%s_spikes" % layer)
if layer in voltages:
network.add_monitor(voltages[layer], name="%s_voltages" % layer)
# Load the Breakout environment.
environment = GymEnvironment("BreakoutDeterministic-v4")
environment.reset()
pipeline = EnvironmentPipeline(
network,
environment,
encoding=bernoulli,
time=1,
history=5,
delta=10,
plot_interval=plot_interval,
print_interval=print_interval,
render_interval=render_interval,
action_function=select_multinomial,
output="R",
)
total = 0
rewards = []
avg_rewards = []
lengths = []
avg_lengths = []
i = 0
try:
while i < n:
result = pipeline.env_step()
pipeline.step(result)
is_done = result[2]
if is_done:
pipeline.reset_state_variables()
i += 1
except KeyboardInterrupt:
environment.close()
| bindsnet_master/examples/breakout/random_network_baseline.py | 3,724 | Build network. Layers of neurons. Input layer Excitatory layer Readout layer Connections between layers. Input -> excitatory. Excitatory -> readout. Spike recordings for all layers. Voltage recordings for excitatory and readout layers. Add all layers and connections to the network. Add all monitors to the network. Load the Breakout environment. | 346 | en | 0.789972 |
"""
Ensemble the predictions from different model outputs.
"""
import argparse
import json
import pickle
import numpy as np
from collections import Counter
from data.loader import DataLoader
from utils import scorer, constant
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('pred_files', nargs='+', help='A list of prediction files written by eval.py.')
parser.add_argument('--data_dir', default='dataset/tacred')
parser.add_argument('--dataset', default='test', help='Evaluate on dev or test set.')
parser.add_argument('--weights', default='')
args = parser.parse_args()
return args
def main():
args = parse_args()
print("Loading data file...")
filename = args.data_dir + '/{}.json'.format(args.dataset)
with open(filename, 'r') as infile:
data = json.load(infile, encoding='utf8')
labels = [d['relation'] for d in data]
# read predictions
print("Loading {} prediction files...".format(len(args.pred_files)))
scores_list = []
for path in args.pred_files:
with open(path, 'rb') as infile:
scores = pickle.load(infile)
scores_list += [scores]
print("Calculating ensembled predictions...")
predictions = []
scores_by_examples = list(zip(*scores_list))
assert len(scores_by_examples) == len(data)
for scores in scores_by_examples:
if len(args.weights) == 0:
pred = ensemble(scores)
else:
pred = weight_sum(scores, args.weights)
predictions += [pred]
id2label = dict([(v,k) for k,v in constant.LABEL_TO_ID.items()])
predictions = [id2label[p] for p in predictions]
scorer.score(labels, predictions, verbose=True)
def ensemble(scores):
"""
Ensemble by majority vote.
"""
c = Counter()
for probs in zip(scores):
idx = int(np.argmax(np.array(probs)))
c.update([idx])
best = c.most_common(1)[0][0]
return best
def weight_sum(scores, weights):
weights = list(map(lambda x: float(x), weights.split(' ')))
aggregate_scores = np.zeros(len(scores[0]))
for model_scores, weight in zip(scores, weights):
scores_weights = np.array(model_scores) * weight
aggregate_scores += scores_weights
best = int(np.argmax(aggregate_scores))
return best
if __name__ == '__main__':
main()
| ensemble.py | 2,365 | Ensemble by majority vote.
Ensemble the predictions from different model outputs.
read predictions | 100 | en | 0.867509 |
import os
import sys
import shutil
import subprocess
from config import rfam_local as conf
from config import gen_config as gc
from utils import genome_search_utils as gsu
# ------------------------------------------------------------------------
def split_genome_to_chunks(updir, upid):
"""
updir:
upid:
return:
"""
# get updir location
upid_fasta = os.path.join(updir, upid + '.fa')
seq_chunks_dir = os.path.join(updir, "search_chunks")
if not os.path.exists(seq_chunks_dir):
os.mkdir(seq_chunks_dir)
os.chmod(seq_chunks_dir, 0777)
# check if we need to split the seq_file
if gsu.count_nucleotides_in_fasta(upid_fasta) >= gc.SPLIT_SIZE:
# split sequence file into smalled chunks
gsu.split_seq_file(upid_fasta, gc.SPLIT_SIZE, dest_dir=seq_chunks_dir)
# now index the fasta files
seq_files = os.listdir(seq_chunks_dir)
for seq_file in seq_files:
seq_file_loc = os.path.join(seq_chunks_dir, seq_file)
cmd = "%s --index %s" % (conf.ESL_SFETCH, seq_file_loc)
subprocess.call(cmd, shell=True)
# for input consistency if the sequence file is small, copy it in the
# search_chunks directory
else:
# copy file
shutil.copyfile(upid_fasta, os.path.join(seq_chunks_dir,
upid + '.fa'))
# index file
cmd = "%s --index %s" % (conf.ESL_SFETCH, os.path.join(seq_chunks_dir,
upid + '.fa'))
subprocess.call(cmd, shell=True)
# ------------------------------------------------------------------------
if __name__ == '__main__':
project_dir = sys.argv[1]
# this can be a file of upids or a upid string UPXXXXXXXX
upid_input = sys.argv[2]
if os.path.isfile(upid_input):
fp = open(upid_input, 'r')
upids = [x.strip() for x in fp]
fp.close()
for upid in upids:
suffix = upid[-3:]
subdir_loc = os.path.join(project_dir, suffix)
updir_loc = os.path.join(subdir_loc, upid)
split_genome_to_chunks(updir_loc, upid)
else:
# get updir location and subdir
suffix = upid_input[-3:]
subdir_loc = os.path.join(project_dir, suffix)
updir_loc = os.path.join(subdir_loc, upid_input)
split_genome_to_chunks(updir_loc, upid_input)
| scripts/support/split_genomes.py | 2,532 | ------------------------------------------------------------------------ get updir location check if we need to split the seq_file split sequence file into smalled chunks now index the fasta files for input consistency if the sequence file is small, copy it in the search_chunks directory copy file index file ------------------------------------------------------------------------ this can be a file of upids or a upid string UPXXXXXXXX get updir location and subdir | 468 | en | 0.619726 |
#!/usr/bin/env python
"""Pathspecs are methods of specifying the path on the client.
The GRR client has a number of drivers to virtualize access to different objects
to create a Virtual File System (VFS) abstraction. These are called 'VFS
Handlers' and they provide typical file-like operations (e.g. read, seek, tell
and stat). It is possible to recursively apply different drivers in the correct
order to arrive at a certain file like object. In order to specify how drivers
should be applied we use 'Path Specifications' or pathspec.
Each VFS handler is constructed from a previous handler and a pathspec. The
pathspec is just a collection of arguments which make sense to the specific VFS
handler. The type of the handler is carried by the pathtype parameter.
On the server the PathSpec is represented as a PathSpec object, and stored
as an attribute of the AFF4 object. This module defines this abstraction.
"""
import itertools
import posixpath
import re
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import standard as rdf_standard
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import jobs_pb2
from grr.server import artifact_utils
INTERPOLATED_REGEX = re.compile(r"%%([^%]+?)%%")
# Grouping pattern: e.g. {test.exe,foo.doc,bar.txt}
GROUPING_PATTERN = re.compile("{([^}]+,[^}]+)}")
class PathSpec(rdf_structs.RDFProtoStruct):
"""A path specification.
The pathspec protobuf is a recursive protobuf which contains components. This
class makes it easier to manipulate these structures by providing useful
helpers.
"""
protobuf = jobs_pb2.PathSpec
rdf_deps = [
rdfvalue.ByteSize,
"PathSpec", # TODO(user): recursive definition.
]
def CopyConstructor(self, other):
# pylint: disable=protected-access
self.SetRawData(other._CopyRawData())
# pylint: enable=protected-access
self.age = other.age
def __len__(self):
"""Return the total number of path components."""
i = -1
for i, _ in enumerate(self):
pass
return i + 1
def __getitem__(self, item):
for i, element in enumerate(self):
if i == item:
return element
raise IndexError("Pathspec index (%s) out of range" % item)
def __iter__(self):
"""Only iterate over all components from the current pointer."""
element = self
while element.HasField("pathtype"):
yield element
if element.HasField("nested_path"):
element = element.nested_path
else:
break
def Insert(self, index, rdfpathspec=None, **kwarg):
"""Insert a single component at index."""
if rdfpathspec is None:
rdfpathspec = self.__class__(**kwarg)
if index == 0:
# Copy ourselves to a temp copy.
nested_proto = self.__class__()
nested_proto.SetRawData(self.GetRawData())
# Replace ourselves with the new object.
self.SetRawData(rdfpathspec.GetRawData())
# Append the temp copy to the end.
self.last.nested_path = nested_proto
else:
previous = self[index - 1]
rdfpathspec.last.nested_path = previous.nested_path
previous.nested_path = rdfpathspec
def Append(self, component=None, **kwarg):
"""Append a new pathspec component to this pathspec."""
if component is None:
component = self.__class__(**kwarg)
if self.HasField("pathtype"):
self.last.nested_path = component
else:
for k, v in kwarg.items():
setattr(self, k, v)
self.SetRawData(component.GetRawData())
return self
def CollapsePath(self):
return utils.JoinPath(*[x.path for x in self])
def Pop(self, index=0):
"""Removes and returns the pathspec at the specified index."""
if index < 0:
index += len(self)
if index == 0:
result = self.__class__()
result.SetRawData(self.GetRawData())
self.SetRawData(self.nested_path.GetRawData())
else:
# Get the raw protobufs for the previous member.
previous = self[index - 1]
result = previous.nested_path
# Manipulate the previous members protobuf to patch the next component in.
previous.nested_path = result.nested_path
result.nested_path = None
return result
@property
def first(self):
return self
@property
def last(self):
if self.HasField("pathtype") and self.pathtype != self.PathType.UNSET:
return list(self)[-1]
return self
def Dirname(self):
"""Get a new copied object with only the directory path."""
result = self.Copy()
while 1:
last_directory = posixpath.dirname(result.last.path)
if last_directory != "/" or len(result) <= 1:
result.last.path = last_directory
# Make sure to clear the inode information.
result.last.inode = None
break
result.Pop(-1)
return result
def Basename(self):
for component in reversed(self):
basename = posixpath.basename(component.path)
if basename:
return basename
return ""
def Validate(self):
if not self.HasField("pathtype") or self.pathtype == self.PathType.UNSET:
raise ValueError("No path type set in PathSpec.")
AFF4_PREFIXES = {
0: "/fs/os", # PathSpec.PathType.OS
1: "/fs/tsk", # PathSpec.PathType.TSK
2: "/registry", # PathSpec.PathType.REGISTRY
3: "/devices/memory", # PathSpec.PathType.MEMORY
4: "/temp", # PathSpec.PathType.TMPFILE
}
def AFF4Path(self, client_urn):
"""Returns the AFF4 URN this pathspec will be stored under.
Args:
client_urn: A ClientURN.
Returns:
A urn that corresponds to this pathspec.
Raises:
ValueError: If pathspec is not of the correct type.
"""
# If the first level is OS and the second level is TSK its probably a mount
# point resolution. We map it into the tsk branch. For example if we get:
# path: \\\\.\\Volume{1234}\\
# pathtype: OS
# mount_point: /c:/
# nested_path {
# path: /windows/
# pathtype: TSK
# }
# We map this to aff4://client_id/fs/tsk/\\\\.\\Volume{1234}\\/windows/
if not self.HasField("pathtype"):
raise ValueError("Can't determine AFF4 path without a valid pathtype.")
first_component = self[0]
dev = first_component.path
if first_component.HasField("offset"):
# We divide here just to get prettier numbers in the GUI
dev += ":" + str(first_component.offset / 512)
if (len(self) > 1 and first_component.pathtype == PathSpec.PathType.OS and
self[1].pathtype == PathSpec.PathType.TSK):
result = [self.AFF4_PREFIXES[PathSpec.PathType.TSK], dev]
# Skip the top level pathspec.
start = 1
else:
# For now just map the top level prefix based on the first pathtype
result = [self.AFF4_PREFIXES[first_component.pathtype]]
start = 0
for p in self[start]:
component = p.path
# The following encode different pathspec properties into the AFF4 path in
# such a way that unique files on the client are mapped to unique URNs in
# the AFF4 space. Note that this transformation does not need to be
# reversible since we always use the PathSpec when accessing files on the
# client.
if p.HasField("offset"):
component += ":" + str(p.offset / 512)
# Support ADS names.
if p.HasField("stream_name"):
component += ":" + p.stream_name
result.append(component)
return client_urn.Add("/".join(result))
class GlobExpression(rdfvalue.RDFString):
"""A glob expression for a client path.
A glob expression represents a set of regular expressions which match files on
the client. The Glob expression supports the following expansions:
1) Client attribute expansions are surrounded with %% characters. They will be
expanded from the client AFF4 object.
2) Groupings are collections of alternates. e.g. {foo.exe,bar.sys}
3) Wild cards like * and ?
"""
context_help_url = "investigating-with-grr/flows/specifying-file-paths.html"
RECURSION_REGEX = re.compile(r"\*\*(\d*)")
def Validate(self):
"""GlobExpression is valid."""
if len(self.RECURSION_REGEX.findall(self._value)) > 1:
raise ValueError("Only one ** is permitted per path: %s." % self._value)
def Interpolate(self, client=None):
kb = client.Get(client.Schema.KNOWLEDGE_BASE)
patterns = artifact_utils.InterpolateKbAttributes(self._value, kb)
for pattern in patterns:
# Normalize the component path (this allows us to resolve ../
# sequences).
pattern = utils.NormalizePath(pattern.replace("\\", "/"))
for pattern in self.InterpolateGrouping(pattern):
yield pattern
def InterpolateGrouping(self, pattern):
"""Interpolate inline globbing groups."""
components = []
offset = 0
for match in GROUPING_PATTERN.finditer(pattern):
components.append([pattern[offset:match.start()]])
# Expand the attribute into the set of possibilities:
alternatives = match.group(1).split(",")
components.append(set(alternatives))
offset = match.end()
components.append([pattern[offset:]])
# Now calculate the cartesian products of all these sets to form all
# strings.
for vector in itertools.product(*components):
yield u"".join(vector)
def _ReplaceRegExGrouping(self, grouping):
alternatives = grouping.group(1).split(",")
return "(" + "|".join(re.escape(s) for s in alternatives) + ")"
def _ReplaceRegExPart(self, part):
if part == "**/":
return "(?:.*\\/)?"
elif part == "*":
return "[^\\/]*"
elif part == "?":
return "[^\\/]"
elif GROUPING_PATTERN.match(part):
return GROUPING_PATTERN.sub(self._ReplaceRegExGrouping, part)
else:
return re.escape(part)
REGEX_SPLIT_PATTERN = re.compile(
"(" + "|".join(["{[^}]+,[^}]+}", "\\?", "\\*\\*\\/?", "\\*"]) + ")")
def AsRegEx(self):
"""Return the current glob as a simple regex.
Note: No interpolation is performed.
Returns:
A RegularExpression() object.
"""
parts = self.__class__.REGEX_SPLIT_PATTERN.split(self._value)
result = "".join(self._ReplaceRegExPart(p) for p in parts)
return rdf_standard.RegularExpression("(?i)\\A%s\\Z" % result)
| grr/lib/rdfvalues/paths.py | 10,292 | A glob expression for a client path.
A glob expression represents a set of regular expressions which match files on
the client. The Glob expression supports the following expansions:
1) Client attribute expansions are surrounded with %% characters. They will be
expanded from the client AFF4 object.
2) Groupings are collections of alternates. e.g. {foo.exe,bar.sys}
3) Wild cards like * and ?
A path specification.
The pathspec protobuf is a recursive protobuf which contains components. This
class makes it easier to manipulate these structures by providing useful
helpers.
Returns the AFF4 URN this pathspec will be stored under.
Args:
client_urn: A ClientURN.
Returns:
A urn that corresponds to this pathspec.
Raises:
ValueError: If pathspec is not of the correct type.
Append a new pathspec component to this pathspec.
Return the current glob as a simple regex.
Note: No interpolation is performed.
Returns:
A RegularExpression() object.
Get a new copied object with only the directory path.
Insert a single component at index.
Interpolate inline globbing groups.
Removes and returns the pathspec at the specified index.
GlobExpression is valid.
Only iterate over all components from the current pointer.
Return the total number of path components.
Pathspecs are methods of specifying the path on the client.
The GRR client has a number of drivers to virtualize access to different objects
to create a Virtual File System (VFS) abstraction. These are called 'VFS
Handlers' and they provide typical file-like operations (e.g. read, seek, tell
and stat). It is possible to recursively apply different drivers in the correct
order to arrive at a certain file like object. In order to specify how drivers
should be applied we use 'Path Specifications' or pathspec.
Each VFS handler is constructed from a previous handler and a pathspec. The
pathspec is just a collection of arguments which make sense to the specific VFS
handler. The type of the handler is carried by the pathtype parameter.
On the server the PathSpec is represented as a PathSpec object, and stored
as an attribute of the AFF4 object. This module defines this abstraction.
!/usr/bin/env python Grouping pattern: e.g. {test.exe,foo.doc,bar.txt} TODO(user): recursive definition. pylint: disable=protected-access pylint: enable=protected-access Copy ourselves to a temp copy. Replace ourselves with the new object. Append the temp copy to the end. Get the raw protobufs for the previous member. Manipulate the previous members protobuf to patch the next component in. Make sure to clear the inode information. PathSpec.PathType.OS PathSpec.PathType.TSK PathSpec.PathType.REGISTRY PathSpec.PathType.MEMORY PathSpec.PathType.TMPFILE If the first level is OS and the second level is TSK its probably a mount point resolution. We map it into the tsk branch. For example if we get: path: \\\\.\\Volume{1234}\\ pathtype: OS mount_point: /c:/ nested_path { path: /windows/ pathtype: TSK } We map this to aff4://client_id/fs/tsk/\\\\.\\Volume{1234}\\/windows/ We divide here just to get prettier numbers in the GUI Skip the top level pathspec. For now just map the top level prefix based on the first pathtype The following encode different pathspec properties into the AFF4 path in such a way that unique files on the client are mapped to unique URNs in the AFF4 space. Note that this transformation does not need to be reversible since we always use the PathSpec when accessing files on the client. Support ADS names. Normalize the component path (this allows us to resolve ../ sequences). Expand the attribute into the set of possibilities: Now calculate the cartesian products of all these sets to form all strings. | 3,709 | en | 0.827563 |
#!/usr/bin/env python3
# Copyright (c) 2020 The Garliccoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''Test generateblock rpc.
'''
from test_framework.test_framework import GarliccoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class GenerateBlockTest(GarliccoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0]
self.log.info('Generate an empty block to address')
address = node.getnewaddress()
hash = node.generateblock(output=address, transactions=[])['hash']
block = node.getblock(blockhash=hash, verbose=2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']['address'], address)
self.log.info('Generate an empty block to a descriptor')
hash = node.generateblock('addr(' + address + ')', [])['hash']
block = node.getblock(blockhash=hash, verbosity=2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']['address'], address)
self.log.info('Generate an empty block to a combo descriptor with compressed pubkey')
combo_key = '0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798'
combo_address = 'bcrt1qw508d6qejxtdg4y5r3zarvary0c5xw7kygt080'
hash = node.generateblock('combo(' + combo_key + ')', [])['hash']
block = node.getblock(hash, 2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']['address'], combo_address)
self.log.info('Generate an empty block to a combo descriptor with uncompressed pubkey')
combo_key = '0408ef68c46d20596cc3f6ddf7c8794f71913add807f1dc55949fa805d764d191c0b7ce6894c126fce0babc6663042f3dde9b0cf76467ea315514e5a6731149c67'
combo_address = 'mkc9STceoCcjoXEXe6cm66iJbmjM6zR9B2'
hash = node.generateblock('combo(' + combo_key + ')', [])['hash']
block = node.getblock(hash, 2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']['address'], combo_address)
# Generate 110 blocks to spend
node.generatetoaddress(110, address)
# Generate some extra mempool transactions to verify they don't get mined
for _ in range(10):
node.sendtoaddress(address, 0.001)
self.log.info('Generate block with txid')
txid = node.sendtoaddress(address, 1)
hash = node.generateblock(address, [txid])['hash']
block = node.getblock(hash, 1)
assert_equal(len(block['tx']), 2)
assert_equal(block['tx'][1], txid)
self.log.info('Generate block with raw tx')
utxos = node.listunspent(addresses=[address])
raw = node.createrawtransaction([{'txid':utxos[0]['txid'], 'vout':utxos[0]['vout']}],[{address:1}])
signed_raw = node.signrawtransactionwithwallet(raw)['hex']
hash = node.generateblock(address, [signed_raw])['hash']
block = node.getblock(hash, 1)
assert_equal(len(block['tx']), 2)
txid = block['tx'][1]
assert_equal(node.gettransaction(txid)['hex'], signed_raw)
self.log.info('Fail to generate block with out of order txs')
raw1 = node.createrawtransaction([{'txid':txid, 'vout':0}],[{address:0.9999}])
signed_raw1 = node.signrawtransactionwithwallet(raw1)['hex']
txid1 = node.sendrawtransaction(signed_raw1)
raw2 = node.createrawtransaction([{'txid':txid1, 'vout':0}],[{address:0.999}])
signed_raw2 = node.signrawtransactionwithwallet(raw2)['hex']
assert_raises_rpc_error(-25, 'TestBlockValidity failed: bad-txns-inputs-missingorspent', node.generateblock, address, [signed_raw2, txid1])
self.log.info('Fail to generate block with txid not in mempool')
missing_txid = '0000000000000000000000000000000000000000000000000000000000000000'
assert_raises_rpc_error(-5, 'Transaction ' + missing_txid + ' not in mempool.', node.generateblock, address, [missing_txid])
self.log.info('Fail to generate block with invalid raw tx')
invalid_raw_tx = '0000'
assert_raises_rpc_error(-22, 'Transaction decode failed for ' + invalid_raw_tx, node.generateblock, address, [invalid_raw_tx])
self.log.info('Fail to generate block with invalid address/descriptor')
assert_raises_rpc_error(-5, 'Invalid address or descriptor', node.generateblock, '1234', [])
self.log.info('Fail to generate block with a ranged descriptor')
ranged_descriptor = 'pkh(tpubD6NzVbkrYhZ4XgiXtGrdW5XDAPFCL9h7we1vwNCpn8tGbBcgfVYjXyhWo4E1xkh56hjod1RhGjxbaTLV3X4FyWuejifB9jusQ46QzG87VKp/0/*)'
assert_raises_rpc_error(-8, 'Ranged descriptor not accepted. Maybe pass through deriveaddresses first?', node.generateblock, ranged_descriptor, [])
self.log.info('Fail to generate block with a descriptor missing a private key')
child_descriptor = 'pkh(tpubD6NzVbkrYhZ4XgiXtGrdW5XDAPFCL9h7we1vwNCpn8tGbBcgfVYjXyhWo4E1xkh56hjod1RhGjxbaTLV3X4FyWuejifB9jusQ46QzG87VKp/0\'/0)'
assert_raises_rpc_error(-5, 'Cannot derive script without private keys', node.generateblock, child_descriptor, [])
if __name__ == '__main__':
GenerateBlockTest().main()
| test/functional/rpc_generateblock.py | 5,521 | Test generateblock rpc.
!/usr/bin/env python3 Copyright (c) 2020 The Garliccoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Generate 110 blocks to spend Generate some extra mempool transactions to verify they don't get mined | 330 | en | 0.635099 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class cluster_id(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /routing-system/router/router-bgp/router-bgp-attributes/cluster-id. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__cluster_id_value','__cluster_id_ipv4_address',)
_yang_name = 'cluster-id'
_rest_name = 'cluster-id'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__cluster_id_ipv4_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="cluster-id-ipv4-address", rest_name="ipv4-address", parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id-ipv4-address'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as IP address', u'alt-name': u'ipv4-address'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='inet:ipv4-address', is_config=True)
self.__cluster_id_value = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), is_leaf=True, yang_name="cluster-id-value", rest_name="id", parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as 32 bit quantity', u'cli-drop-node-name': None, u'alt-name': u'id'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='decimal-number', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'routing-system', u'router', u'router-bgp', u'router-bgp-attributes', u'cluster-id']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'router', u'bgp', u'cluster-id']
def _get_cluster_id_value(self):
"""
Getter method for cluster_id_value, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_value (decimal-number)
"""
return self.__cluster_id_value
def _set_cluster_id_value(self, v, load=False):
"""
Setter method for cluster_id_value, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_value (decimal-number)
If this variable is read-only (config: false) in the
source YANG file, then _set_cluster_id_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cluster_id_value() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), is_leaf=True, yang_name="cluster-id-value", rest_name="id", parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as 32 bit quantity', u'cli-drop-node-name': None, u'alt-name': u'id'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='decimal-number', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cluster_id_value must be of a type compatible with decimal-number""",
'defined-type': "brocade-bgp:decimal-number",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), is_leaf=True, yang_name="cluster-id-value", rest_name="id", parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as 32 bit quantity', u'cli-drop-node-name': None, u'alt-name': u'id'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='decimal-number', is_config=True)""",
})
self.__cluster_id_value = t
if hasattr(self, '_set'):
self._set()
def _unset_cluster_id_value(self):
self.__cluster_id_value = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), is_leaf=True, yang_name="cluster-id-value", rest_name="id", parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as 32 bit quantity', u'cli-drop-node-name': None, u'alt-name': u'id'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='decimal-number', is_config=True)
def _get_cluster_id_ipv4_address(self):
"""
Getter method for cluster_id_ipv4_address, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_ipv4_address (inet:ipv4-address)
"""
return self.__cluster_id_ipv4_address
def _set_cluster_id_ipv4_address(self, v, load=False):
"""
Setter method for cluster_id_ipv4_address, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_ipv4_address (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_cluster_id_ipv4_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cluster_id_ipv4_address() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="cluster-id-ipv4-address", rest_name="ipv4-address", parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id-ipv4-address'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as IP address', u'alt-name': u'ipv4-address'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='inet:ipv4-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cluster_id_ipv4_address must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="cluster-id-ipv4-address", rest_name="ipv4-address", parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id-ipv4-address'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as IP address', u'alt-name': u'ipv4-address'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='inet:ipv4-address', is_config=True)""",
})
self.__cluster_id_ipv4_address = t
if hasattr(self, '_set'):
self._set()
def _unset_cluster_id_ipv4_address(self):
self.__cluster_id_ipv4_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="cluster-id-ipv4-address", rest_name="ipv4-address", parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id-ipv4-address'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as IP address', u'alt-name': u'ipv4-address'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='inet:ipv4-address', is_config=True)
cluster_id_value = __builtin__.property(_get_cluster_id_value, _set_cluster_id_value)
cluster_id_ipv4_address = __builtin__.property(_get_cluster_id_ipv4_address, _set_cluster_id_ipv4_address)
__choices__ = {u'ch-cluster-id': {u'ca-cluster-id': [u'cluster_id_value'], u'ca-cluster-id-ipv4-address': [u'cluster_id_ipv4_address']}}
_pyangbind_elements = {'cluster_id_value': cluster_id_value, 'cluster_id_ipv4_address': cluster_id_ipv4_address, }
| pybind/slxos/v17r_1_01a/routing_system/router/router_bgp/router_bgp_attributes/cluster_id/__init__.py | 11,443 | This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /routing-system/router/router-bgp/router-bgp-attributes/cluster-id. Each member element of
the container is represented as a class variable - with a specific
YANG type.
Getter method for cluster_id_ipv4_address, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_ipv4_address (inet:ipv4-address)
Getter method for cluster_id_value, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_value (decimal-number)
Setter method for cluster_id_ipv4_address, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_ipv4_address (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_cluster_id_ipv4_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cluster_id_ipv4_address() directly.
Setter method for cluster_id_value, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_value (decimal-number)
If this variable is read-only (config: false) in the
source YANG file, then _set_cluster_id_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cluster_id_value() directly. | 1,476 | en | 0.646565 |
from django.contrib import messages, auth
from django.contrib.auth.decorators import login_required
from payments.forms import MakePaymentForm
from django.shortcuts import render, get_object_or_404, redirect
from django.core.urlresolvers import reverse
from django.template.context_processors import csrf
from django.conf import settings
from services.models import Service
import stripe
stripe.api_key = settings.STRIPE_SECRET
@login_required(login_url="/accounts/login?next=payments/buy_now")
def buy_now(request, id):
if request.method == 'POST':
form = MakePaymentForm(request.POST)
if form.is_valid():
try:
# service = get_object_or_404(Service, pk=id)
customer = stripe.Charge.create(
amount= int(total*100),
currency="EUR",
description=request.user.email,
card=form.cleaned_data['stripe_id'],
)
except stripe.error.CardError:
messages.error(request, "Your card was declined!")
if customer.paid:
messages.success(request, "You have successfully paid")
return redirect(reverse('all_services'))
else:
messages.error(request, "Unable to take payment")
else:
messages.error(request, "We were unable to take a payment with that card!")
else:
form = MakePaymentForm()
services = get_object_or_404(Service, pk=id)
args = {'form': form, 'publishable': settings.STRIPE_PUBLISHABLE, 'services': services}
args.update(csrf(request))
return render(request, 'pay.html', args) | payments/views.py | 1,691 | service = get_object_or_404(Service, pk=id) | 43 | en | 0.565954 |
#!/usr/bin/env python2.7
# William Lam
# wwww.virtuallyghetto.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pyVim.connect as connect
import getpass
import requests
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
# Snippet borrowed from Michael Rice
# https://gist.github.com/michaelrice/a6794a017e349fc65d01
requests.packages.urllib3.disable_warnings()
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
# Demonstrates configuring the Message of the Day (MOTD) on vCenter Server
# Example output:
# > logged in to vcsa
# > Setting vCenter Server MOTD to "Hello from virtuallyGhetto"
# > logout
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--host',
required=True,
action='store',
help='Remote host to connect to')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use when connecting to host')
parser.add_argument('-o', '--port',
required=False,
action='store',
help="port to use, default 443", default=443)
parser.add_argument('-m', '--message',
required=True,
action='store',
help='Message to be used for VC MOTD')
args = parser.parse_args()
if args.password:
password = args.password
else:
password = getpass.getpass(
prompt='Enter password for host %s and user %s: ' %
(args.host, args.user))
si = connect.SmartConnect(host=args.host,
user=args.user,
pwd=password,
port=int(args.port))
print "logged in to %s" % args.host
print "Setting vCenter Server MOTD to \"%s\"" % args.message
si.content.sessionManager.UpdateServiceMessage(message=args.message)
print "logout"
si.content.sessionManager.Logout()
| samples/set_vcenter_motd.py | 2,935 | !/usr/bin/env python2.7 William Lam wwww.virtuallyghetto.com Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Snippet borrowed from Michael Rice https://gist.github.com/michaelrice/a6794a017e349fc65d01 Legacy Python that doesn't verify HTTPS certificates by default Handle target environment that doesn't support HTTPS verification Demonstrates configuring the Message of the Day (MOTD) on vCenter Server Example output: > logged in to vcsa > Setting vCenter Server MOTD to "Hello from virtuallyGhetto" > logout | 984 | en | 0.848623 |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file is stored in the variable path
data=pd.read_csv(path)
#Code starts here
data.rename(columns={'Total':'Total_Medals'},inplace=True)
# Data Loading
data['Better_Event'] = np.where(data['Total_Summer']> data['Total_Winter'], 'Summer', 'Winter')
data['Better_Event'] =np.where(data['Total_Summer'] ==data['Total_Winter'],'Both',data['Better_Event'])
better_event=data['Better_Event'].value_counts().idxmax()
data.head()
# Summer or Winter
# Top 10
data.head(10)
# Plotting top 10
# Top Performing Countries
top_countries=data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]
top_countries=top_countries[:-1]
top_countries
# Best in the world
def top_ten(df,col):
country_list=[]
country_list= list((top_countries.nlargest(10,col)['Country_Name']))
return country_list
top_10_summer=top_ten(top_countries,'Total_Summer')
top_10_winter=top_ten(top_countries,'Total_Winter')
top_10=top_ten(top_countries,'Total_Medals')
a=set(top_10_summer).intersection(set(top_10_winter))
b=a.intersection(set(top_10))
common=list(b)
summer_df= data[data['Country_Name'].isin(top_10_summer)]
summer_df.head()
winter_df= data[data['Country_Name'].isin(top_10_winter)]
winter_df.head()
top_df= data[data['Country_Name'].isin(top_10)]
top_df.head()
plt.figure(figsize=(10,10))
plt.bar(summer_df['Country_Name'],summer_df['Total_Summer'])
plt.xticks(rotation=30)
plt.show()
summer_df['Golden_Ratio']=summer_df['Gold_Summer']/summer_df['Total_Summer']
summer_max_ratio=max(summer_df['Golden_Ratio'])
summer_max_ratio
summer_country_gold=summer_df.loc[summer_df['Gold_Summer'].idxmax(),'Country_Name']
summer_country_gold
winter_df['Golden_Ratio']=summer_df['Gold_Winter']/summer_df['Total_Winter']
winter_max_ratio=max(winter_df['Golden_Ratio'])
winter_country_gold=winter_df.loc[winter_df['Gold_Winter'].idxmax(),'Country_Name']
winter_country_gold
top_df['Golden_Ratio']=top_df['Gold_Total']/top_df['Total_Medals']
top_max_ratio=max(top_df['Golden_Ratio'])
top_country_gold=top_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']
top_country_gold
data_1=data[:-1]
data_1['Total_Points']=data_1['Gold_Total']*3+data_1['Silver_Total']*2+data_1['Bronze_Total']*1
most_points=max(data_1['Total_Points'])
most_points
best_country=data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']
best_country
# Plotting the best
best=data[data['Country_Name']==best_country]
best
best=best[['Gold_Total','Silver_Total','Bronze_Total']]
best
best.plot.bar()
plt.xlabel("United States")
plt.ylabel("Medals")
plt.xticks(rotation=45)
| code.py | 2,668 | --------------Importing header filesPath of the file is stored in the variable pathCode starts here Data Loading Summer or Winter Top 10 Plotting top 10 Top Performing Countries Best in the world Plotting the best | 215 | en | 0.65079 |
from functools import partial as curry
from django import forms
from django.utils import timezone
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from pinax.images.models import ImageSet
from mdeditor.fields import MDTextFormField
from .conf import settings
from .models import Post, Revision, Section
from .signals import post_published
from .utils import load_path_attr
FIELDS = [
"section",
"author",
"markup",
"title",
"slug",
"teaser",
"content",
"description",
"state"
]
class PostFormMixin:
@property
def markup_choice(self):
return self.cleaned_data["markup"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
post = self.instance
latest_revision = post.latest()
if latest_revision:
# set initial data from the latest revision
self.fields["teaser"].initial = latest_revision.teaser
self.fields["content"].initial = latest_revision.content
def save_post(self, post):
published = False
if post.pk is None or Post.objects.filter(pk=post.pk, published=None).count():
if self.cleaned_data["state"] == Post.STATE_CHOICES[-1][0]:
post.published = timezone.now()
published = True
render_func = curry(
load_path_attr(
settings.PINAX_BLOG_MARKUP_CHOICE_MAP[self.markup_choice]["parser"]
)
)
post.teaser_html = render_func(self.cleaned_data["teaser"])
post.content_html = render_func(self.cleaned_data["content"])
post.updated = timezone.now()
post.save()
r = Revision()
r.post = post
r.title = post.title
r.teaser = self.cleaned_data["teaser"]
r.content = self.cleaned_data["content"]
r.author = post.author
r.updated = post.updated
r.published = post.published
r.save()
if published:
post_published.send(sender=Post, post=post)
return post
class AdminPostForm(PostFormMixin, forms.ModelForm):
title = forms.CharField(
label=_("Title"),
max_length=90,
widget=forms.TextInput(attrs={"style": "width: 50%;"}),
)
slug = forms.CharField(
label=_("Slug"),
widget=forms.TextInput(attrs={"style": "width: 50%;"})
)
teaser = forms.CharField(
label=_("Teaser"),
widget=forms.Textarea(attrs={"style": "width: 80%;"}),
)
content = MDTextFormField()
description = forms.CharField(
label=_("Description"),
widget=forms.Textarea(attrs={"style": "width: 80%;"}),
required=False
)
class Meta:
model = Post
fields = FIELDS
class Media:
js = settings.PINAX_BLOG_ADMIN_JS
def save(self, blog=None):
post = super().save(commit=False)
if blog:
post.blog = blog
return self.save_post(post)
class PostForm(PostFormMixin, forms.ModelForm):
markup_choice = "markdown"
teaser = forms.CharField(widget=forms.Textarea())
content = MDTextFormField()
class Meta:
model = Post
fields = [
"section",
"title",
"teaser",
"content",
"description",
"state"
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if Section.objects.count() < 2:
self.section = Section.objects.first()
del self.fields["section"]
else:
self.section = None
def save(self, blog=None, author=None):
post = super().save(commit=False)
if blog:
post.blog = blog
if author:
post.author = author
post.image_set = ImageSet.objects.create(created_by=author)
if self.section:
post.section = self.section
post.slug = slugify(post.title)
post.markup = self.markup_choice
return self.save_post(post)
| pinax/blog/forms.py | 4,090 | set initial data from the latest revision | 41 | en | 0.564377 |
from mininet.topo import Topo
class Project1_Topo_0866007(Topo):
def __init__(self):
Topo.__init__(self)
# Add hosts
h1 = self.addHost('h1', ip='192.168.0.1/24')
h2 = self.addHost('h2', ip='192.168.0.2/24')
h3 = self.addHost('h3', ip='192.168.0.3/24')
h4 = self.addHost('h4', ip='192.168.0.4/24')
# Add switches
s1 = self.addSwitch('s1')
s2 = self.addSwitch('s2')
s3 = self.addSwitch('s3')
s4 = self.addSwitch('s4')
# Add links
self.addLink(h1, s1)
self.addLink(h2, s1)
self.addLink(h3, s2)
self.addLink(h4, s2)
self.addLink(s1, s3)
self.addLink(s1, s4)
self.addLink(s2, s3)
self.addLink(s2, s4)
topos = {'topo_0866007': Project1_Topo_0866007}
| project1_0866007/bonus_0866007.py | 815 | Add hosts Add switches Add links | 32 | en | 0.637601 |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import osx_trace
import optparse
import os
import subprocess
import sys
import StringIO
from exceptions import *
class MockTrace(object):
def __init__(self):
self.reset()
def reset(self):
self._call_return_value = 0
self._calls = []
def call(self, args, sudo=False):
self._calls.append((args, sudo))
return self._call_return_value
@property
def calls(self):
return self._calls
def set_call_return_value(self, value):
self._call_return_value = value
@property
def codes_file(self):
return "trace.codes"
def osx_trace_main(*args):
old_sys_argv = sys.argv
old_sys_stdout = sys.stdout
old_sys_stderr = sys.stderr
try:
sys.argv = [old_sys_argv[0]]
sys.argv.extend(args)
parser = optparse.OptionParser(usage=osx_trace.main_usage())
sys.stdout = StringIO.StringIO()
sys.stderr = sys.stdout
return osx_trace.main(parser)
finally:
sys.argv = old_sys_argv
sys.stdout = old_sys_stdout
sys.stderr = old_sys_stderr
class OSXTraceTest(unittest.TestCase):
def setUp(self):
self._real_create_trace_cmd = osx_trace.create_trace_cmd
def tearDown(self):
osx_trace.create_trace_cmd = self._real_create_trace_cmd
# Sanity check of the full script etc.
def test_toplevel_script_smoketest(self):
script = os.path.join(os.path.dirname(__file__), "../osx-trace")
assert os.path.exists(script)
p = subprocess.Popen([script, "help"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(stdout, stderr) = p.communicate()
self.assertTrue(stdout.startswith("Usage: osx-trace <command> [options]"))
def test_trace_but_cant_compile(self):
osx_trace.create_trace_cmd = lambda x: CompilerNeededException()
ret = osx_trace_main("record")
assert ret != 0
def test_record_empty(self):
osx_trace.create_trace_cmd = lambda x: MockTrace()
ret = osx_trace_main("record")
assert ret != 0
def test_record(self):
trace = MockTrace()
osx_trace.create_trace_cmd = lambda options: trace
ret = osx_trace_main("record", "test.trace")
assert ret == 0
calls = trace.calls
self.assertEquals(3, len(calls))
self.assertTrue(calls[0][1]) # sudo
self.assertEquals(["-r"], calls[0][0])
self.assertTrue(calls[1][1]) # sudo
self.assertEquals("-L", calls[1][0][0])
self.assertEquals(2, len(calls[1][0]))
self.assertFalse(calls[2][1]) # not sudo
self.assertEquals(6, len(calls[2][0]))
self.assertEquals("-t", calls[2][0][0])
self.assertEquals("-R", calls[2][0][1])
self.assertEquals("-o", calls[2][0][3])
self.assertEquals("test.trace", calls[2][0][4])
self.assertEquals("trace.codes", calls[2][0][5])
| src/osx_trace_test.py | 3,284 | Copyright 2011 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Sanity check of the full script etc. sudo sudo not sudo | 605 | en | 0.843199 |
import datetime
from django.contrib.syndication import feeds, views
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils import tzinfo
from django.utils.feedgenerator import rfc2822_date, rfc3339_date
from models import Entry
from xml.dom import minidom
try:
set
except NameError:
from sets import Set as set
class FeedTestCase(TestCase):
fixtures = ['feeddata.json']
def assertChildNodes(self, elem, expected):
actual = set([n.nodeName for n in elem.childNodes])
expected = set(expected)
self.assertEqual(actual, expected)
def assertChildNodeContent(self, elem, expected):
for k, v in expected.items():
self.assertEqual(
elem.getElementsByTagName(k)[0].firstChild.wholeText, v)
def assertCategories(self, elem, expected):
self.assertEqual(set(i.firstChild.wholeText for i in elem.childNodes if i.nodeName == 'category'), set(expected));
######################################
# Feed view
######################################
class SyndicationFeedTest(FeedTestCase):
"""
Tests for the high-level syndication feed framework.
"""
def test_rss2_feed(self):
"""
Test the structure and content of feeds generated by Rss201rev2Feed.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '2.0')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
# Find the last build date
d = Entry.objects.latest('date').date
ltz = tzinfo.LocalTimezone(d)
last_build_date = rfc2822_date(d.replace(tzinfo=ltz))
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
self.assertChildNodeContent(chan, {
'title': 'My blog',
'description': 'A more thorough description of my blog.',
'link': 'http://example.com/blog/',
'language': 'en',
'lastBuildDate': last_build_date,
#'atom:link': '',
'ttl': '600',
'copyright': 'Copyright (c) 2007, Sally Smith',
})
self.assertCategories(chan, ['python', 'django']);
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss2/'
)
# Find the pubdate of the first feed item
d = Entry.objects.get(pk=1).date
ltz = tzinfo.LocalTimezone(d)
pub_date = rfc2822_date(d.replace(tzinfo=ltz))
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
'guid': 'http://example.com/blog/1/',
'pubDate': pub_date,
'author': 'test@example.com (Sally Smith)',
})
self.assertCategories(items[0], ['python', 'testing']);
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'category', 'pubDate', 'author'])
def test_rss091_feed(self):
"""
Test the structure and content of feeds generated by RssUserland091Feed.
"""
response = self.client.get('/syndication/rss091/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '0.91')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
self.assertCategories(chan, ['python', 'django'])
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss091/'
)
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
})
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description'])
self.assertCategories(item, [])
def test_atom_feed(self):
"""
Test the structure and content of feeds generated by Atom1Feed.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('xmlns'), 'http://www.w3.org/2005/Atom')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'rights', 'category', 'author'])
for link in feed.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/syndication/atom/')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'category', 'updated', 'rights', 'author'])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_custom_feed_generator(self):
response = self.client.get('/syndication/custom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('django'), 'rocks')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'spam', 'rights', 'category', 'author'])
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertEqual(entry.getAttribute('bacon'), 'yum')
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'ministry', 'rights', 'author', 'updated', 'category'])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_title_escaping(self):
"""
Tests that titles are escaped correctly in RSS feeds.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
for item in doc.getElementsByTagName('item'):
link = item.getElementsByTagName('link')[0]
if link.firstChild.wholeText == 'http://example.com/blog/4/':
title = item.getElementsByTagName('title')[0]
self.assertEquals(title.firstChild.wholeText, u'A & B < C > D')
def test_naive_datetime_conversion(self):
"""
Test that datetimes are correctly converted to the local time zone.
"""
# Naive date times passed in get converted to the local time zone, so
# check the recived zone offset against the local offset.
response = self.client.get('/syndication/naive-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('date').date
ltz = tzinfo.LocalTimezone(d)
latest = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest)
def test_aware_datetime_conversion(self):
"""
Test that datetimes with timezones don't get trodden on.
"""
response = self.client.get('/syndication/aware-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
self.assertEqual(updated[-6:], '+00:42')
def test_feed_url(self):
"""
Test that the feed_url can be overridden.
"""
response = self.client.get('/syndication/feedurl/')
doc = minidom.parseString(response.content)
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/customfeedurl/')
def test_item_link_error(self):
"""
Test that a ImproperlyConfigured is raised if no link could be found
for the item(s).
"""
self.assertRaises(ImproperlyConfigured,
self.client.get,
'/syndication/articles/')
def test_template_feed(self):
"""
Test that the item title and description can be overridden with
templates.
"""
response = self.client.get('/syndication/template/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'Title in your templates: My first entry',
'description': 'Description in your templates: My first entry',
'link': 'http://example.com/blog/1/',
})
def test_add_domain(self):
"""
Test add_domain() prefixes domains onto the correct URLs.
"""
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', 'http://djangoproject.com/doc/'),
'http://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'https://djangoproject.com/doc/'),
'https://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'mailto:uhoh@djangoproject.com'),
'mailto:uhoh@djangoproject.com'
)
######################################
# Deprecated feeds
######################################
class DeprecatedSyndicationFeedTest(FeedTestCase):
"""
Tests for the deprecated API (feed() view and the feed_dict etc).
"""
def test_empty_feed_dict(self):
"""
Test that an empty feed_dict raises a 404.
"""
response = self.client.get('/syndication/depr-feeds-empty/aware-dates/')
self.assertEquals(response.status_code, 404)
def test_nonexistent_slug(self):
"""
Test that a non-existent slug raises a 404.
"""
response = self.client.get('/syndication/depr-feeds/foobar/')
self.assertEquals(response.status_code, 404)
def test_rss_feed(self):
"""
A simple test for Rss201rev2Feed feeds generated by the deprecated
system.
"""
response = self.client.get('/syndication/depr-feeds/rss/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
self.assertEqual(feed.getAttribute('version'), '2.0')
chan = feed.getElementsByTagName('channel')[0]
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link'])
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
def test_complex_base_url(self):
"""
Tests that the base url for a complex feed doesn't raise a 500
exception.
"""
response = self.client.get('/syndication/depr-feeds/complex/')
self.assertEquals(response.status_code, 404)
| tests/regressiontests/syndication/tests.py | 13,237 | Tests for the deprecated API (feed() view and the feed_dict etc).
Tests for the high-level syndication feed framework.
Test add_domain() prefixes domains onto the correct URLs.
Test the structure and content of feeds generated by Atom1Feed.
Test that datetimes with timezones don't get trodden on.
Tests that the base url for a complex feed doesn't raise a 500
exception.
Test that an empty feed_dict raises a 404.
Test that the feed_url can be overridden.
Test that a ImproperlyConfigured is raised if no link could be found
for the item(s).
Test that datetimes are correctly converted to the local time zone.
Test that a non-existent slug raises a 404.
Test the structure and content of feeds generated by RssUserland091Feed.
Test the structure and content of feeds generated by Rss201rev2Feed.
A simple test for Rss201rev2Feed feeds generated by the deprecated
system.
Test that the item title and description can be overridden with
templates.
Tests that titles are escaped correctly in RSS feeds.
Feed view Making sure there's only 1 `rss` element and that the correct RSS version was specified. Making sure there's only one `channel` element w/in the `rss` element. Find the last build date'atom:link': '', Ensure the content of the channel is correct Check feed_url is passed Find the pubdate of the first feed item Making sure there's only 1 `rss` element and that the correct RSS version was specified. Making sure there's only one `channel` element w/in the `rss` element. Ensure the content of the channel is correct Check feed_url is passed Naive date times passed in get converted to the local time zone, so check the recived zone offset against the local offset. Deprecated feeds | 1,694 | en | 0.875619 |
"""
Provides linkedin api-related code
"""
import random
import logging
from time import sleep
import json
from linkedin_api.utils.helpers import get_id_from_urn
from linkedin_api.client import Client
logger = logging.getLogger(__name__)
class Linkedin(object):
"""
Class for accessing Linkedin API.
"""
_MAX_UPDATE_COUNT = 100 # max seems to be 100
_MAX_SEARCH_COUNT = 49 # max seems to be 49
_MAX_REPEATED_REQUESTS = (
200
) # VERY conservative max requests count to avoid rate-limit
def __init__(self, username, password):
self.client = Client(debug=True)
self.client.authenticate(username, password)
self.logger = logger
def search(self, params, max_results=None, results=[]):
"""
Do a search.
"""
sleep(
random.randint(0, 1)
) # sleep a random duration to try and evade suspention
count = (
max_results
if max_results and max_results <= Linkedin._MAX_SEARCH_COUNT
else Linkedin._MAX_SEARCH_COUNT
)
default_params = {
"count": count,
"guides": "List()",
"origin": "GLOBAL_SEARCH_HEADER",
"q": "guided",
"start": len(results),
}
default_params.update(params)
res = self.client.session.get(
f"{self.client.API_BASE_URL}/search/cluster", params=default_params
)
data = res.json()
total_found = data.get("paging", {}).get("total")
# recursive base case
if (
len(data["elements"]) == 0
or (max_results is not None and len(results) >= max_results)
or total_found is None
or len(results) >= total_found
or (max_results is not None and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS)
):
return results
results.extend(data["elements"][0]["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.search(params, results=results, max_results=max_results)
def search_people(
self,
keywords=None,
connection_of=None,
network_depth=None,
regions=None,
industries=None,
):
"""
Do a people search.
"""
guides = ["v->PEOPLE"]
if connection_of:
guides.append(f"facetConnectionOf->{connection_of}")
if network_depth:
guides.append(f"facetNetwork->{network_depth}")
if regions:
guides.append(f'facetGeoRegion->{"|".join(regions)}')
if industries:
guides.append(f'facetIndustry->{"|".join(industries)}')
params = {"guides": "List({})".format(",".join(guides))}
if keywords:
params["keywords"] = keywords
data = self.search(params)
results = []
for item in data:
search_profile = item["hitInfo"][
"com.linkedin.voyager.search.SearchProfile"
]
profile_id = search_profile["id"]
distance = search_profile["distance"]["value"]
results.append(
{
"urn_id": profile_id,
"distance": distance,
"public_id": search_profile["miniProfile"]["publicIdentifier"],
}
)
return results
def search_companies(self, max_results=None, results=[]):
"""
Do a company search
Note: try swap from blended search to cluster
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
#Search params from main search, here for reference
'''
default_params = {
"count": count,
"guides": "List()",
"origin": "GLOBAL_SEARCH_HEADER",
"q": "guided",
"start": len(results),
}
'''
default_params = {
"origin": "GLOBAL_SEARCH_HEADER",
"guides": "List(resultType->companies)",
"count": "10",
"q": "guided",
"filters": "List(resultType->companies)",
"start": len(results)
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/search/blended?keywords=s&origin=GLOBAL_SEARCH_HEADER&count=10&guides=List(resultType-%3Ecompanies)&q=all&filters=List(resultType-%3Ecompanies)&start={len(results)}"
)
data = res.json()
total_found = data.get("paging", {}).get("total")
if (
len(data["elements"]) == 0 or
len(data["elements"][0]["elements"]) == 0
or total_found is None
or (max_results is not None and len(results) >= max_results)
or (max_results is not None and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS)
):
return results
results.extend(data["elements"][0]["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.search_companies(max_results=max_results, results=results)
def get_profile_contact_info(self, public_id=None, urn_id=None):
"""
Return data for a single profile.
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
res = self.client.session.get(
f"{self.client.API_BASE_URL}/identity/profiles/{public_id or urn_id}/profileContactInfo"
)
data = res.json()
contact_info = {
"email_address": data.get("emailAddress"),
"websites": [],
"phone_numbers": data.get("phoneNumbers", []),
}
websites = data.get("websites", [])
for item in websites:
if "com.linkedin.voyager.identity.profile.StandardWebsite" in item["type"]:
item["label"] = item["type"][
"com.linkedin.voyager.identity.profile.StandardWebsite"
]["category"]
elif "" in item["type"]:
item["label"] = item["type"][
"com.linkedin.voyager.identity.profile.CustomWebsite"
]["label"]
del item["type"]
contact_info["websites"] = websites
return contact_info
def get_profile(self, public_id=None, urn_id=None):
"""
Return data for a single profile.
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
res = self.client.session.get(
f"{self.client.API_BASE_URL}/identity/profiles/{public_id or urn_id}/profileView"
)
data = res.json()
if data and "status" in data and data["status"] != 200:
self.logger.info("request failed: {}".format(data["message"]))
return {}
# massage [profile] data
profile = data["profile"]
if "miniProfile" in profile:
if "picture" in profile["miniProfile"]:
profile["displayPictureUrl"] = profile["miniProfile"]["picture"][
"com.linkedin.common.VectorImage"
]["rootUrl"]
profile["profile_id"] = get_id_from_urn(profile["miniProfile"]["entityUrn"])
del profile["miniProfile"]
del profile["defaultLocale"]
del profile["supportedLocales"]
del profile["versionTag"]
del profile["showEducationOnProfileTopCard"]
# massage [experience] data
experience = data["positionView"]["elements"]
for item in experience:
if "company" in item and "miniCompany" in item["company"]:
if "logo" in item["company"]["miniCompany"]:
logo = item["company"]["miniCompany"]["logo"].get(
"com.linkedin.common.VectorImage"
)
if logo:
item["companyLogoUrl"] = logo["rootUrl"]
del item["company"]["miniCompany"]
profile["experience"] = experience
# massage [skills] data
skills = [item["name"] for item in data["skillView"]["elements"]]
profile["skills"] = skills
# massage [education] data
education = data["educationView"]["elements"]
for item in education:
if "school" in item:
if "logo" in item["school"]:
item["school"]["logoUrl"] = item["school"]["logo"][
"com.linkedin.common.VectorImage"
]["rootUrl"]
del item["school"]["logo"]
profile["education"] = education
return profile
def get_profile_connections(self, urn_id):
"""
Return a list of profile ids connected to profile of given [urn_id]
"""
return self.search_people(connection_of=urn_id, network_depth="F")
def get_profile_networkinfo(self, urn_id):
"""
Return the nework info connected to the profile of the given [urn_id]
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
res = self.client.session.get(
f"{self.client.API_BASE_URL}/identity/profiles/{urn_id}/networkinfo"
)
return res.json()
def get_company_updates(self, public_id=None, urn_id=None, max_results=None, results=[]):
""""
Return a list of company posts
[public_id] - public identifier ie - microsoft
[urn_id] - id provided by the related URN
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
params = {
"companyUniversalName": {public_id or urn_id},
"q": "companyFeedByUniversalName",
"moduleKey": "member-share",
"count": Linkedin._MAX_UPDATE_COUNT,
"start": len(results),
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/feed/updates", params=params
)
data = res.json()
if (
len(data["elements"]) == 0
or (max_results is not None and len(results) >= max_results)
or (max_results is not None and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS)
):
return results
results.extend(data["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.get_company_updates(public_id=public_id, urn_id=urn_id, results=results, max_results=max_results)
def get_profile_updates(self, public_id=None, urn_id=None, max_results=None, results=[]):
""""
Return a list of profile posts
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
params = {
"profileId": {public_id or urn_id},
"q": "memberShareFeed",
"moduleKey": "member-share",
"count": Linkedin._MAX_UPDATE_COUNT,
"start": len(results),
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/feed/updates", params=params
)
data = res.json()
if (
len(data["elements"]) == 0
or (max_results is not None and len(results) >= max_results)
or (max_results is not None and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS)
):
return results
results.extend(data["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.get_profile_updates(public_id=public_id, urn_id=urn_id, results=results, max_results=max_results)
def get_current_profile_views(self):
"""
Get profile view statistics, including chart data.
"""
res = self.client.session.get(
f"{self.client.API_BASE_URL}/identity/panels"
)
data = res.json()
return data['elements'][0]['value']['com.linkedin.voyager.identity.me.ProfileViewsByTimePanel']
def get_school(self, public_id):
"""
Return data for a single school.
[public_id] - public identifier i.e. uq
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
params = {
"decoration": (
"""
(
autoGenerated,backgroundCoverImage,
companyEmployeesSearchPageUrl,companyPageUrl,confirmedLocations*,coverPhoto,dataVersion,description,
entityUrn,followingInfo,foundedOn,headquarter,jobSearchPageUrl,lcpTreatment,logo,name,type,overviewPhoto,
paidCompany,partnerCompanyUrl,partnerLogo,partnerLogoImage,rankForTopCompanies,salesNavigatorCompanyUrl,
school,showcase,staffCount,staffCountRange,staffingCompany,topCompaniesListName,universalName,url,
companyIndustries*,industries,specialities,
acquirerCompany~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),
affiliatedCompanies*~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),
groups*~(entityUrn,largeLogo,groupName,memberCount,websiteUrl,url),
showcasePages*~(entityUrn,logo,name,industries,followingInfo,url,description,universalName)
)
"""
),
"q": "universalName",
"universalName": public_id,
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/organization/companies", params=params
)
data = res.json()
if data and "status" in data and data["status"] != 200:
self.logger.info("request failed: {}".format(data["message"]))
return {}
school = data["elements"][0]
return school
def get_similar_companies(self, public_id):
"""
Return similar companies for a single company.
[public_id] - public identifier i.e. univeristy-of-queensland
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
res = self.client.session.get(
f"{self.client.API_BASE_URL}/organization/companies?count={Linkedin._MAX_SEARCH_COUNT}&companyUniversalName={public_id}&q=similarCompanies&start=0&decorationId=com.linkedin.voyager.deco.organization.web.WebSimilarCompanyCardWithRelevanceReason-3"
)
data = res.json()
return data
def get_company(self, public_id):
"""
Return data for a single company.
[public_id] - public identifier i.e. univeristy-of-queensland
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
params = {
"decoration": (
"""
(
affiliatedCompaniesWithEmployeesRollup,affiliatedCompaniesWithJobsRollup,articlePermalinkForTopCompanies,
autoGenerated,backgroundCoverImage,companyEmployeesSearchPageUrl,
companyPageUrl,confirmedLocations*,coverPhoto,dataVersion,description,entityUrn,followingInfo,
foundedOn,headquarter,jobSearchPageUrl,lcpTreatment,logo,name,type,overviewPhoto,paidCompany,
partnerCompanyUrl,partnerLogo,partnerLogoImage,permissions,rankForTopCompanies,
salesNavigatorCompanyUrl,school,showcase,staffCount,staffCountRange,staffingCompany,
topCompaniesListName,universalName,url,companyIndustries*,industries,specialities,
acquirerCompany~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),
affiliatedCompanies*~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),
groups*~(entityUrn,largeLogo,groupName,memberCount,websiteUrl,url),
showcasePages*~(entityUrn,logo,name,industries,followingInfo,url,description,universalName)
)
"""
),
"q": "universalName",
"universalName": public_id,
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/organization/companies", params=params
)
data = res.json()
if data and "status" in data and data["status"] != 200:
self.logger.info("request failed: {}".format(data["message"]))
return {}
company = data["elements"][0]
return company
def get_conversation_details(self, profile_urn_id):
"""
Return the conversation (or "message thread") details for a given [public_profile_id]
"""
# passing `params` doesn't work properly, think it's to do with List().
# Might be a bug in `requests`?
res = self.client.session.get(
f"{self.client.API_BASE_URL}/messaging/conversations?\
keyVersion=LEGACY_INBOX&q=participants&recipients=List({profile_urn_id})"
)
data = res.json()
item = data["elements"][0]
item["id"] = get_id_from_urn(item["entityUrn"])
return item
def get_conversations(self):
"""
Return list of conversations the user is in.
"""
params = {"keyVersion": "LEGACY_INBOX"}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/messaging/conversations", params=params
)
return res.json()
def get_conversation(self, conversation_urn_id):
"""
Return the full conversation at a given [conversation_urn_id]
"""
res = self.client.session.get(
f"{self.client.API_BASE_URL}/messaging/conversations/{conversation_urn_id}/events"
)
return res.json()
def send_message(self, conversation_urn_id, message_body):
"""
Return the full conversation at a given [conversation_urn_id]
"""
params = {"action": "create"}
payload = json.dumps(
{
"eventCreate": {
"value": {
"com.linkedin.voyager.messaging.create.MessageCreate": {
"body": message_body,
"attachments": [],
"attributedBody": {"text": message_body, "attributes": []},
"mediaAttachments": [],
}
}
}
}
)
res = self.client.session.post(
f"{self.client.API_BASE_URL}/messaging/conversations/{conversation_urn_id}/events",
params=params,
data=payload,
)
return res.status_code == 201
| linkedin_api/linkedin.py | 19,287 | Class for accessing Linkedin API.
Return data for a single company.
[public_id] - public identifier i.e. univeristy-of-queensland
"
Return a list of company posts
[public_id] - public identifier ie - microsoft
[urn_id] - id provided by the related URN
Return the full conversation at a given [conversation_urn_id]
Return the conversation (or "message thread") details for a given [public_profile_id]
Return list of conversations the user is in.
Get profile view statistics, including chart data.
Return data for a single profile.
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
Return a list of profile ids connected to profile of given [urn_id]
Return data for a single profile.
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
Return the nework info connected to the profile of the given [urn_id]
"
Return a list of profile posts
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
Return data for a single school.
[public_id] - public identifier i.e. uq
Return similar companies for a single company.
[public_id] - public identifier i.e. univeristy-of-queensland
Do a search.
Do a company search
Note: try swap from blended search to cluster
Do a people search.
Return the full conversation at a given [conversation_urn_id]
Provides linkedin api-related code
max seems to be 100 max seems to be 49 VERY conservative max requests count to avoid rate-limit sleep a random duration to try and evade suspention recursive base case sleep a random duration to try and evade suspentionSearch params from main search, here for reference sleep a random duration to try and evade suspention massage [profile] data massage [experience] data massage [skills] data massage [education] data sleep a random duration to try and evade suspention sleep a random duration to try and evade suspention sleep a random duration to try and evade suspention sleep a random duration to try and evade suspention sleep a random duration to try and evade suspention sleep a random duration to try and evade suspention passing `params` doesn't work properly, think it's to do with List(). Might be a bug in `requests`? | 2,242 | en | 0.842029 |
import os
import sys
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
)
from src.DbHelper import DbHelper
persons = [
'Lucy',
'Franz',
'Susanne',
'Jonathan',
'Max',
'Stephan',
'Julian',
'Frederike',
'Amy',
'Miriam',
'Jonas',
'Anna',
'Sebastian'
]
addresses = [ f'Musterstraße {i}' for i in range(1,11)]
accounts = [ f'Bank Account {i}' for i in range(1, 14)]
phones = [f'Phone Number {i}' for i in range(1,12)]
creditcards = [f'Credit Card Number {i}' for i in range(1,14)]
socialsecuritynumbers = [f'SSN {i}' for i in range(1,10)]
nodes = {
'Person':('name', persons),
'Address':('address', addresses),
'BankAccount':('account', accounts),
'CreditCard':('number', creditcards),
'SSN':('ssn', socialsecuritynumbers)
}
if __name__ == "__main__":
# See https://neo4j.com/developer/aura-connect-driver/ for Aura specific connection URL.
scheme = "neo4j" # Connecting to Aura, use the "neo4j+s" URI scheme
host_name = "localhost"
port = 7687 # Bolt Port https://neo4j.com/docs/operations-manual/current/configuration/ports/ | .NET | Java | JavaScript | Go | Python
url = f"{scheme}://{host_name}:{port}"
user = 'neo4j'
password = 'neo4j'
db_helper = DbHelper(url, user, password)
for Label, values in nodes.items():
PropertyKey = values[0]
for PropertyValue in values[1]:
db_helper.run_query(
'CREATE (node:' + Label + ' {' + PropertyKey + ': "' + PropertyValue + '" }) RETURN node.' + PropertyKey
)
db_helper.close()
| src/Simple_Fraud_Detection/solution/01_fill_fraud_db_with_nodes.py | 1,660 | See https://neo4j.com/developer/aura-connect-driver/ for Aura specific connection URL. Connecting to Aura, use the "neo4j+s" URI scheme Bolt Port https://neo4j.com/docs/operations-manual/current/configuration/ports/ | .NET | Java | JavaScript | Go | Python | 256 | en | 0.743682 |
# Keypirinha launcher (keypirinha.com)
import keypirinha as kp
import keypirinha_util as kpu
import keypirinha_net as kpnet
import json
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), "lib"))
from faker import Faker
class FakerData(kp.Plugin):
ITEMCAT = kp.ItemCategory.USER_BASE + 1
ITEMRESULT = kp.ItemCategory.USER_BASE + 2
# The default ammount of suggestions to show after the user selected the faker category
DEFAULT_MAX_RESULTS = 5
# The default language used to instantiate Faker
DEFAULT_LANGUAGE = 'en_US'
def __init__(self):
super().__init__()
self.current_output = []
def on_start(self):
self.read_config()
pass
def on_events(self, flags):
if flags & kp.Events.PACKCONFIG:
self.read_config()
def on_catalog(self):
self.set_catalog([
self.create_item(
category=kp.ItemCategory.KEYWORD,
label="Faker",
short_desc="Generate fake data",
target="Faker",
args_hint=kp.ItemArgsHint.REQUIRED,
hit_hint=kp.ItemHitHint.KEEPALL
)
])
def on_suggest(self, user_input, items_chain):
if not items_chain or items_chain[0].category() != kp.ItemCategory.KEYWORD:
return
suggestions = []
# Generate outputs
if len(items_chain) == 2:
items = []
# We don't want to generate the output each time the user enter a new query
# Let's keep the output, so this way Keypirinha itself can filter it
if not self.current_output:
for x in range(0, self.max_results):
try:
items.append(str(getattr(self.fakeGenerator, items_chain[1].target())()))
except Exception as error:
items.append(str(error))
continue
if len(items) > 0:
# Remove duplicated
items = list(set(items))
# Append suggestions
for x in items:
self.current_output.append(
self.create_item(
category=self.ITEMRESULT,
label=x,
short_desc='Press Enter to copy',
target=x,
args_hint=kp.ItemArgsHint.FORBIDDEN,
hit_hint=kp.ItemHitHint.IGNORE,
loop_on_suggest=False
)
)
suggestions = self.current_output
# Generate suggestions categories
else:
self.current_output = []
lines = self.load_text_resource('providers.json')
data = json.loads(lines)
for item in data:
try:
suggestions.append(
self.create_item(
category=self.ITEMCAT,
label=item['name'],
short_desc=item['description'],
target=item['function'],
args_hint=kp.ItemArgsHint.FORBIDDEN,
hit_hint=kp.ItemHitHint.IGNORE,
icon_handle=self.load_icon("res://{}/icons/{}.png".format(self.package_full_name(), item['name'][0].upper())),
loop_on_suggest=True
)
)
except Exception as error:
self.err("Could not generate suggestion for fake data category: {}".format(item['name']), error)
self.set_suggestions(suggestions, kp.Match.FUZZY, kp.Sort.DEFAULT)
def on_execute(self, item, action):
if (item.category() == self.ITEMCAT):
to_clipboard = getattr(self.fakeGenerator, item.target())()
elif (item.category() == self.ITEMRESULT):
to_clipboard = item.label()
kpu.set_clipboard(to_clipboard)
def read_config(self):
settings = self.load_settings()
self.max_results = int(settings.get("max_results", section="main", fallback=self.DEFAULT_MAX_RESULTS))
self.language = settings.get("language", section="main", fallback=self.DEFAULT_LANGUAGE)
self.fakeGenerator = Faker(self.language) | src/fakerdata.py | 3,462 | Keypirinha launcher (keypirinha.com) The default ammount of suggestions to show after the user selected the faker category The default language used to instantiate Faker Generate outputs We don't want to generate the output each time the user enter a new query Let's keep the output, so this way Keypirinha itself can filter it Remove duplicated Append suggestions Generate suggestions categories | 396 | en | 0.775254 |
from queue import Queue, Empty, Full
from ..core import DriverBase, format_msg
import pika
class Driver(DriverBase):
def __init__(self, exchange, queue, routing_key=None, buffer_maxsize=None,
*args, **kwargs):
super().__init__()
self._args = args
self._kwargs = kwargs
self._exchange = exchange
self._queue = queue
self._routing_key = routing_key or queue
self._buffer = Queue(buffer_maxsize) \
if buffer_maxsize is not None else None
self._declared = False
def run(self, driver_id, ts, fields, tags):
if not fields:
return
msg = format_msg(ts, driver_id, tags, fields)
try:
with pika.BlockingConnection(
pika.ConnectionParameters(*self._args, **self._kwargs)) as c:
channel = c.channel()
self._publish(channel, msg)
# Flush buffer
if self._buffer is not None:
try:
while True:
msg = self._buffer.get_nowait()
self._publish(channel, msg)
except Empty:
pass
except pika.exceptions.AMQPError:
# Add to buffer
if self._buffer is not None:
try:
self._buffer.put_nowait(msg)
except Full:
pass
def _declare(self, channel):
if not self._declared:
channel.exchange_declare(exchange=self._exchange, durable=True)
channel.queue_declare(queue=self._queue, durable=True)
channel.queue_bind(
exchange=self._exchange,
queue=self._queue,
routing_key=self._routing_key
)
self._declared = True
def _publish(self, channel, msg):
self._declare(channel)
channel.basic_publish(
exchange=self._exchange,
routing_key=self._routing_key,
body=msg,
properties=pika.BasicProperties(delivery_mode=2)
)
| piot/outputs/amqp.py | 2,154 | Flush buffer Add to buffer | 26 | en | 0.571851 |
# -*- coding: utf-8 -*-
import subprocess
def test_too_many_arguments_in_fixture(absolute_path):
"""
End-to-End test to check arguments count.
It is required due to how 'function_type' parameter
works inside 'flake8'.
Otherwise it is not set, unit tests can not cover `is_method` correctly.
"""
filename = absolute_path('fixtures', 'config', 'wrong_arguments.py')
process = subprocess.Popen(
['flake8', '--select', 'Z', filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, _ = process.communicate()
assert stdout.count(b'Z211') == 4
| tests/test_checkers/test_high_complexity.py | 623 | End-to-End test to check arguments count.
It is required due to how 'function_type' parameter
works inside 'flake8'.
Otherwise it is not set, unit tests can not cover `is_method` correctly.
-*- coding: utf-8 -*- | 215 | en | 0.638676 |
import itertools
try:
import theano
import theano.tensor as T
from theano.gradient import disconnected_grad
except ImportError:
theano = None
T = None
from ._backend import Backend
from .. import make_graph_backend_decorator
class _TheanoBackend(Backend):
def __init__(self):
super().__init__("Theano")
@staticmethod
def is_available():
return theano is not None
@Backend._assert_backend_available
def is_compatible(self, function, arguments):
if not isinstance(function, T.TensorVariable):
return False
return all([isinstance(argument, T.TensorVariable)
for argument in arguments])
def _compile_function_without_warnings(self, *args, **kwargs):
return theano.function(*args, **kwargs, on_unused_input="ignore")
@Backend._assert_backend_available
def compile_function(self, function, arguments):
"""Compiles a Theano graph into a callable."""
return self._compile_function_without_warnings(arguments, function)
@Backend._assert_backend_available
def compute_gradient(self, function, arguments):
"""Returns a compiled function computing the gradient of ``function``
with respect to ``arguments``.
"""
if len(arguments) == 1:
(argument,) = arguments
gradient = T.grad(function, argument)
return self._compile_function_without_warnings(arguments, gradient)
gradient = T.grad(function, arguments)
return self._compile_function_without_warnings(arguments, gradient)
def _compute_unary_hessian_vector_product(self, gradient, argument):
"""Returns a function accepting two arguments to compute a
Hessian-vector product of a scalar-valued unary function.
"""
argument_type = argument.type()
try:
Rop = T.Rop(gradient, argument, argument_type)
except NotImplementedError:
proj = T.sum(gradient * disconnected_grad(argument_type))
Rop = T.grad(proj, argument)
return self._compile_function_without_warnings(
[argument, argument_type], Rop)
def _compute_nary_hessian_vector_product(self, gradients, arguments):
"""Returns a function accepting `2 * len(arguments)` arguments to
compute a Hessian-vector product of a multivariate function.
Notes
-----
The implementation is based on TensorFlow's '_hessian_vector_product'
function in 'tensorflow.python.ops.gradients_impl'.
"""
argument_types = [argument.type() for argument in arguments]
try:
Rop = T.Rop(gradients, arguments, argument_types)
except NotImplementedError:
proj = [T.sum(gradient * disconnected_grad(argument_type))
for gradient, argument_type in zip(gradients,
argument_types)]
proj_grad = [T.grad(proj_elem, arguments,
disconnected_inputs="ignore",
return_disconnected="None")
for proj_elem in proj]
proj_grad_transpose = map(list, zip(*proj_grad))
proj_grad_stack = [
T.stacklists([c for c in row if c is not None])
for row in proj_grad_transpose]
Rop = [T.sum(stack, axis=0) for stack in proj_grad_stack]
return self._compile_function_without_warnings(
list(itertools.chain(arguments, argument_types)), Rop)
@Backend._assert_backend_available
def compute_hessian_vector_product(self, function, arguments):
"""Computes the directional derivative of the gradient, which is
equivalent to computing a Hessian-vector product with the direction
vector.
"""
if len(arguments) == 1:
(argument,) = arguments
gradient = T.grad(function, argument)
return self._compute_unary_hessian_vector_product(
gradient, argument)
gradients = T.grad(function, arguments)
return self._compute_nary_hessian_vector_product(gradients, arguments)
Theano = make_graph_backend_decorator(_TheanoBackend)
| pymanopt/autodiff/backends/_theano.py | 4,274 | Returns a function accepting `2 * len(arguments)` arguments to
compute a Hessian-vector product of a multivariate function.
Notes
-----
The implementation is based on TensorFlow's '_hessian_vector_product'
function in 'tensorflow.python.ops.gradients_impl'.
Returns a function accepting two arguments to compute a
Hessian-vector product of a scalar-valued unary function.
Compiles a Theano graph into a callable.
Returns a compiled function computing the gradient of ``function``
with respect to ``arguments``.
Computes the directional derivative of the gradient, which is
equivalent to computing a Hessian-vector product with the direction
vector. | 649 | en | 0.735338 |
#!-*-coding:utf-8-*-
import sys
# import PyQt4 QtCore and QtGui modules
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5 import QtCore
from pylinac import VMAT
from dmlc import Ui_MainWindow
class DirectoryPath(object):
def __init__(self, pathDir, getCountImages):
self._pathDir = pathDir
self._fieldOpenPathfile = ""
self._dmlcopenfilenamepath = ""
self._getCountImages = getCountImages
@property
def pathDir(self):
return getattr(self, '_pathDir')
@pathDir.setter
def pathDir(self, pathDir):
self._pathDir = pathDir
@property
def fieldOpenPathfile(self):
return getattr(self, '_fieldOpenPathfile')
@fieldOpenPathfile.setter
def fieldOpenPathfile(self, fieldOpenPathfile):
self._fieldOpenPathfile = fieldOpenPathfile
@property
def dmlcopenfilenamepath(self):
return getattr(self, '_dmlcopenfilenamepath')
@dmlcopenfilenamepath.setter
def dmlcopenfilenamepath(self, dmlcopenfilenamepath):
self._dmlcopenfilenamepath = dmlcopenfilenamepath
class MainWindow(QMainWindow, Ui_MainWindow):
"""MainWindow inherits QMainWindow"""
def __init__(self, parent: object = None) -> object:
super(MainWindow, self).__init__(parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
def openFileNameDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self, "QFileDialog.getOpenFileName()", "",
"All Files (*);;Python Files (*.py)", options=options)
if fileName:
print(fileName)
def OpenDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self, "QFileDialog.getOpenFileName()", "",
"All Files (*);;DICOM Files (*.dcm)", options=options)
if fileName:
DirectoryPath.fieldOpenPathfile = fileName
def OpenDmlcFiles(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self, "QFileDialog.getOpenFileName()", "",
"All Files (*);;DICOM Files (*.dcm)", options=options)
if fileName:
DirectoryPath.dmlcopenfilenamepath = fileName
def DmlcCalculations(self, cal1, cal2, textparam):
if cal1:
leeds = VMAT(images=[DirectoryPath.fieldOpenPathfile, DirectoryPath.dmlcopenfilenamepath],
delivery_types=['open', 'dmlc'])
leeds.analyze(test='drmlc', tolerance=1.3, x_offset=0)
leeds.plot_analyzed_subimage('profile')
leeds.save_analyzed_subimage('myprofile.png', subimage='profile')
print(leeds.return_results())
leeds.plot_analyzed_image()
leeds.publish_pdf(DirectoryPath.dmlcopenfilenamepath + '.pdf')
if cal2:
drgs = VMAT(images=[DirectoryPath.fieldOpenPathfile, DirectoryPath.dmlcopenfilenamepath],
delivery_types=['open', 'drgs'])
drgs.analyze(test='drgs', tolerance=1.3, x_offset=10)
drgs.save_analyzed_subimage('myprofiledrgs.png', subimage='profile')
print(drgs.return_results())
drgs.plot_analyzed_image()
drgs.publish_pdf(DirectoryPath.dmlcopenfilenamepath + 'drgs' + '.pdf', author=textparam, unit="TrueBeamSTX")
def __del__(self):
self.ui = None
# -----------------------------------------------------#
if __name__ == '__main__':
# create application
app = QApplication(sys.argv)
app.setApplicationName('Dmlc')
d = DirectoryPath(pathDir="", getCountImages=0)
# create widget
w = MainWindow()
w.setWindowTitle('Dmlc')
w.show()
# connection
# QObject.connect( app, SIGNAL( 'lastWindowClosed()' ), app, SLOT( 'quit()' ) )
# execute application
sys.exit(app.exec_())
| VMAT/Dmlc/main.py | 4,151 | MainWindow inherits QMainWindow
!-*-coding:utf-8-*- import PyQt4 QtCore and QtGui modules ----------------------------------------------------- create application create widget connection QObject.connect( app, SIGNAL( 'lastWindowClosed()' ), app, SLOT( 'quit()' ) ) execute application | 286 | en | 0.353619 |
import numpy as np
import pickle
import math
try:
from utilities import dot_loss, next_batch
except ImportError:
from utilities.utilities import dot_loss, next_batch
class DontCacheRef(Exception):
pass
class BasicConverter(object):
def __init__(self, learning_rate = 0.05, batch_size = 1, num_epochs = 300, threshold = 0.02, add_layer_dynamic = False, layer_to_expand = 0):
# training control
self._learning_rate = learning_rate
self._batchSize = batch_size
self._num_epochs = num_epochs
self._threshold = threshold
self._add_layer_dynamic = add_layer_dynamic
self._layer_to_expand = int(layer_to_expand)
# training history
self._updatedLoss = 1000.0
self._diffs = []
self._losses = []
self._updates = []
self._epoch = 0
def losses(self):
return self._losses
def diffs(self):
return self._diffs
def updates(self):
return self._updates
def save_history(self, fname):
f_train = open(fname, 'wb')
training_data = [self._losses, self._diffs, self._updates]
pickle.dump(training_data, f_train)
f_train.close()
def get_refs(self, base_model, datapoints, scaler = None, conv_1d = False, conv_2d = False, cache_data = True):
try:
if not cache_data:
raise DontCacheRef()
# Return the cached list of reference outputs for the base model
return (self.__datapoints, self.__refs)
except (DontCacheRef, AttributeError) as e:
# Create the list of reference outputs for the base model
if conv_1d and conv_2d:
print('ERROR: conv_1d and conv_2d are mutually exclusive')
return None
refs = []
flattened = []
for point in datapoints:
spoint = point
if scaler and not conv_2d:
spoint = scaler.transform([point])
prob = 0.0
if conv_1d:
prob = base_model.predict_proba(np.expand_dims(np.expand_dims(spoint, axis = 2), axis = 0))[0][0]
elif conv_2d:
# this will match if original model was trained with correct dimensionality
prob = base_model.predict_proba(np.expand_dims(spoint, axis = 0))
else:
prob = base_model.predict_proba(spoint.reshape(1, -1))[0][0]
refs.append(prob)
flattened.append(spoint.flatten().tolist())
self.__datapoints = np.asarray(flattened)
self.__refs = np.asarray(refs)
return (self.__datapoints, self.__refs)
def convert_model(self, drone_model, base_model, datapoints, scaler = None, conv_1d = False, conv_2d = False, cache_data = True, epoch_reset = False):
# Get the list of reference outputs for the base model
datapoints_for_drone, refs = self.get_refs(base_model, datapoints, scaler, conv_1d, conv_2d, cache_data)
inflate = 0 # to inflate the learning without change iterations
if epoch_reset:
self._epoch = 0
avloss = 0
# convert until min epochs are passed and leave only if loss at minima
while (self._epoch < self._num_epochs) or (self._updatedLoss < avloss):
# initialize the total loss for the epoch
epochloss = []
# loop over our data in batches
for (batchX, batchY) in next_batch(datapoints_for_drone, refs, self._batchSize):
batchY = np.array(batchY)
if batchX.shape[0] != self._batchSize:
print('Batch size insufficient (%s), continuing...' % batchY.shape[0])
continue
# Find current output and calculate loss for our graph
preds = drone_model.evaluate_total(batchX, debug = False)
loss, error = dot_loss(preds, batchY)
epochloss.append(loss)
# Update the model
drone_model.update(batchX, batchY, self._learning_rate)
avloss = np.average(epochloss)
diff = 0.0
if self._epoch > 0:
# is the relative improvement of the loss too small, smaller than threshold
diff = math.fabs(avloss - self._losses[-1]) / avloss
self._diffs.append(diff)
self._losses.append(avloss)
update = 0
modify = True if (diff < self._threshold) else False
if modify:
# If it is less than the threshold, is it below
# where we last updated, has the drone learned enough
#
# - skip checks if we have never updated before
# - do at least 6 learning iterations before attempting new update
# - use asymptotic exponential to push model to learn
# until its loss is far enough away from previous update,
inflate += 1 # iterate inflating
modify = True if self._updatedLoss == 1000.0 else (avloss < (self._updatedLoss - (50.0 * (1.0 - np.exp(-0.04 * inflate)) * diff * avloss))) and (inflate > 5)
if modify:
update = 1
inflate = 0
print('Model conversion not sufficient, updating...')
print('Last updated loss: %s' % self._updatedLoss)
self._updatedLoss = avloss
if self._add_layer_dynamic:
drone_model.add_layer_dynamic()
else:
drone_model.expand_layer_dynamic(self._layer_to_expand)
print('Model structure is now:')
drone_model.print_layers()
self._updates.append(update)
print('Epoch: %s, loss %s, diff %.5f, last updated loss %.5f' % (self._epoch, avloss, diff, self._updatedLoss))
# update our loss history list by taking the average loss
# across all batches
if self._epoch == 0: # be consistent at the first epoch
self._losses.append(avloss)
self._diffs.append(math.fabs(avloss - self._updatedLoss) / avloss)
self._updates.append(0)
self._epoch += 1
return drone_model
class AdvancedConverter(object):
def __init__(self, learning_rate = 0.05, batch_size = 1, num_epochs = 300, threshold = 0.02, add_layer_dynamic = False, layer_to_expand = None):
# training control
self._learning_rate = learning_rate
self._batchSize = batch_size
self._num_epochs = num_epochs
self._threshold = threshold
self._add_layer_dynamic = add_layer_dynamic
self.__round_robin = False
if layer_to_expand is None:
self.__round_robin = True
self._layer_to_expand = int(layer_to_expand) if layer_to_expand is not None else None
# training history
self._updatedLoss = 1000.0
self._diffs = []
self._losses = []
self._updates = []
self._epoch = 0
self.__rr_begin = 0
self.__rr_last = 0
def losses(self):
return self._losses
def diffs(self):
return self._diffs
def updates(self):
return self._updates
def round_robin(self, num_layers):
self.__rr_last = self.__rr_begin
self.__rr_begin = np.random.ranint(0, num_layers - 1) # careful, expanding last layer will change output number
return self.__rr_last
def save_history(self, fname):
f_train = open(fname, 'wb')
training_data = [self._losses, self._diffs, self._updates]
pickle.dump(training_data, f_train)
f_train.close()
def get_refs(self, base_model, datapoints, scaler = None, cache_data = True):
try:
if not cache_data:
raise DontCacheRef()
# Return the cached list of reference outputs for the base model
return (self.__datapoints, self.__refs)
except(DontCacheRef, AttributeError) as e:
# Create the list of reference outputs for the base model
refs = []
datapoints_for_drone = datapoints
if scaler:
datapoints_for_drone = scaler.transform(datapoints)
for point in datapoints_for_drone:
prob = base_model.predict_proba(point)
refs.append(prob)
self.__datapoints = datapoints_for_drone
self.__refs = refs
return (self.__datapoints, self.__refs)
def convert_model(self, drone_model, base_model, datapoints, scaler = None, cache_data = True, epoch_reset = False):
# Get the list of reference outputs for the base model
datapoints_for_drone, refs = self.get_refs(base_model, datapoints, scaler, cache_data)
inflate = 0 # to inflate the learning without change iterations
if epoch_reset:
self._epoch = 0
avloss = 0
# convert until min epochs are passed and leave only if loss at minima
while (self._epoch < self._num_epochs) or (self._updatedLoss < avloss):
# initialize the total loss for the epoch
epochloss = []
# loop over our data in batches
for (batchX, batchY) in next_batch(datapoints_for_drone, refs, self._batchSize):
batchY = np.array(batchY)
if batchX.shape[0] != self._batchSize:
print('Batch size insufficient ({}), continuing...'.format(batchY.shape[0]))
continue
# Find current output and calculate loss for our graph
preds = drone_model.evaluate_total(batchX, debug = False)
loss, error = dot_loss(preds, batchY)
epochloss.append(loss)
# Update the model
drone_model.update(batchX, batchY, self._learning_rate)
avloss = np.average(epochloss)
diff = 0.0
if self._epoch > 0:
# is the relative improvement of the loss too small, smaller than threshold
diff = math.fabs(avloss - self._losses[-1]) / avloss
self._diffs.append(diff)
self._losses.append(avloss)
update = 0
modify = True if (diff < self._threshold) else False
if modify:
# If it is less than the threshold, is it below
# where we last updated, has the drone learned enough
#
# - skip checks if we have never updated before
# - do at least 6 learning iterations before attempting new update
# - use asymptotic exponential to push model to learn
# until its loss is far enough away from previous update,
inflate += 1 # iterate inflating
modify = True if self._updatedLoss == 1000.0 else (avloss < (self._updatedLoss - (50.0 * (1.0 - np.exp(-0.04 * inflate)) * diff * avloss))) and (inflate > 5)
if modify:
update = 1
inflate = 0
print('Model conversion not sufficient, updating...')
print('Last updated loss: %s' % self._updatedLoss)
self._updatedLoss = avloss
if self._add_layer_dynamic:
drone_model.add_layer_dynamic()
elif self._layer_to_expand is not None:
drone_model.expand_layer_dynamic(self._layer_to_expand)
else:
drone_model.expand_layer_dynamic(self.round_robin(drone_model.num_layers()))
print('Model structure is now:')
drone_model.print_layers()
self._updates.append(update)
print('Epoch: %s, loss %s, diff %.5f, last updated loss %.5f' % (self._epoch, avloss, diff, self._updatedLoss))
# update our loss history list by taking the average loss
# across all batches
if self._epoch == 0: # be consistent at the first epoch
self._losses.append(avloss)
self._diffs.append(math.fabs(avloss - self._updatedLoss) / avloss)
self._updates.append(0)
self._epoch += 1
return drone_model
| nndrone/converters.py | 12,628 | training control training history Return the cached list of reference outputs for the base model Create the list of reference outputs for the base model this will match if original model was trained with correct dimensionality Get the list of reference outputs for the base model to inflate the learning without change iterations convert until min epochs are passed and leave only if loss at minima initialize the total loss for the epoch loop over our data in batches Find current output and calculate loss for our graph Update the model is the relative improvement of the loss too small, smaller than threshold If it is less than the threshold, is it below where we last updated, has the drone learned enough - skip checks if we have never updated before - do at least 6 learning iterations before attempting new update - use asymptotic exponential to push model to learn until its loss is far enough away from previous update, iterate inflating update our loss history list by taking the average loss across all batches be consistent at the first epoch training control training history careful, expanding last layer will change output number Return the cached list of reference outputs for the base model Create the list of reference outputs for the base model Get the list of reference outputs for the base model to inflate the learning without change iterations convert until min epochs are passed and leave only if loss at minima initialize the total loss for the epoch loop over our data in batches Find current output and calculate loss for our graph Update the model is the relative improvement of the loss too small, smaller than threshold If it is less than the threshold, is it below where we last updated, has the drone learned enough - skip checks if we have never updated before - do at least 6 learning iterations before attempting new update - use asymptotic exponential to push model to learn until its loss is far enough away from previous update, iterate inflating update our loss history list by taking the average loss across all batches be consistent at the first epoch | 2,097 | en | 0.887515 |
import emoji
emoji.emojize('\:sunglasses:?')
#Transformação #Comentário
String['Curso em Videos Python']
frase[9:13]
frase[9:21:2]
frase[:5]
frase[15:]
frase[9::3]
#Aula Curso Em Video Python 9 => revisão 2 [13/07/2020 14h00m]
#Funcionalidades de Trasnformação
Objeti.Methodo() #Comentário
frase.find('deo') #Acha, busca
frase.find('Android') #Acha, busca
frase.replace('Python','Android') #Subistui
frase.lower() #tudo em minusculo
frase.capitelize() #tudo maiusculo
frase.title()
frase = ['Aprenda Python']
frase.rstrip() # o lado direito "r" é uma keyword de direita
frase.lstrip() # strip vai remover os, somente os espaços da esquerda keyword "r"
#Funcionalidade Divisão de Strings [Cadeia de Caracteres]
frase.split() #
'-'.join(frase) #vc vai juntar todos os elementos de frase e vai usar esse separador aqui '-'
| natural-languages-python.py | 876 | Transformação ComentárioAula Curso Em Video Python 9 => revisão 2 [13/07/2020 14h00m]Funcionalidades de TrasnformaçãoComentárioAcha, buscaAcha, buscaSubistuitudo em minusculotudo maiusculo o lado direito "r" é uma keyword de direita strip vai remover os, somente os espaços da esquerda keyword "r"Funcionalidade Divisão de Strings [Cadeia de Caracteres]vc vai juntar todos os elementos de frase e vai usar esse separador aqui '-' | 429 | pt | 0.988508 |
_HAS_OPS = False
def _register_extensions():
import os
import imp
import torch
# load the custom_op_library and register the custom ops
lib_dir = os.path.dirname(__file__)
_, path, _ = imp.find_module("_C", [lib_dir])
torch.ops.load_library(path)
try:
_register_extensions()
_HAS_OPS = True
except (ImportError, OSError):
pass
def _check_cuda_version():
"""
Make sure that CUDA versions match between the pytorch install and torchvision install
"""
if not _HAS_OPS:
return -1
import torch
_version = torch.ops.torchvision._cuda_version()
if _version != -1 and torch.version.cuda is not None:
tv_version = str(_version)
if int(tv_version) < 10000:
tv_major = int(tv_version[0])
tv_minor = int(tv_version[2])
else:
tv_major = int(tv_version[0:2])
tv_minor = int(tv_version[3])
t_version = torch.version.cuda
t_version = t_version.split('.')
t_major = int(t_version[0])
t_minor = int(t_version[1])
if t_major != tv_major or t_minor != tv_minor:
raise RuntimeError("Detected that PyTorch and torchvision were compiled with different CUDA versions. "
"PyTorch has CUDA Version={}.{} and torchvision has CUDA Version={}.{}. "
"Please reinstall the torchvision that matches your PyTorch install."
.format(t_major, t_minor, tv_major, tv_minor))
return _version
_check_cuda_version()
| torchvision/extension.py | 1,581 | Make sure that CUDA versions match between the pytorch install and torchvision install
load the custom_op_library and register the custom ops | 143 | en | 0.823146 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SequenceFile Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow import dtypes
from tensorflow.compat.v1 import data
from tensorflow_io.core.python.ops import _load_library
hadoop_ops = _load_library('_hadoop_ops.so')
class SequenceFileDataset(data.Dataset):
"""A Sequence File Dataset that reads the sequence file."""
def __init__(self, filenames):
"""Create a `SequenceFileDataset`.
`SequenceFileDataset` allows a user to read data from a hadoop sequence
file. A sequence file consists of (key value) pairs sequentially. At
the moment, `org.apache.hadoop.io.Text` is the only serialization type
being supported, and there is no compression support.
For example:
```python
dataset = SequenceFileDataset("/foo/bar.seq")
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
# Prints the (key, value) pairs inside a hadoop sequence file.
while True:
try:
print(sess.run(next_element))
except tf.errors.OutOfRangeError:
break
```
Args:
filenames: A `tf.string` tensor containing one or more filenames.
"""
self._filenames = tf.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
super(SequenceFileDataset, self).__init__()
def _inputs(self):
return []
def _as_variant_tensor(self):
return hadoop_ops.sequence_file_dataset(
self._filenames, (dtypes.string, dtypes.string))
@property
def output_classes(self):
return tf.Tensor, tf.Tensor
@property
def output_shapes(self):
return (tf.TensorShape([]), tf.TensorShape([]))
@property
def output_types(self):
return dtypes.string, dtypes.string
| tensorflow_io/hadoop/python/ops/hadoop_dataset_ops.py | 2,501 | A Sequence File Dataset that reads the sequence file.
Create a `SequenceFileDataset`.
`SequenceFileDataset` allows a user to read data from a hadoop sequence
file. A sequence file consists of (key value) pairs sequentially. At
the moment, `org.apache.hadoop.io.Text` is the only serialization type
being supported, and there is no compression support.
For example:
```python
dataset = SequenceFileDataset("/foo/bar.seq")
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
# Prints the (key, value) pairs inside a hadoop sequence file.
while True:
try:
print(sess.run(next_element))
except tf.errors.OutOfRangeError:
break
```
Args:
filenames: A `tf.string` tensor containing one or more filenames.
SequenceFile Dataset.
Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== | 1,428 | en | 0.740516 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle.fluid.layers as layers
from paddle.fluid.executor import Executor
import paddle.fluid.core as core
from paddle.fluid.backward import append_backward
import numpy
class TestWhileOp(unittest.TestCase):
def test_simple_forward(self):
d0 = layers.data(
"d0", shape=[10], append_batch_size=False, dtype='float32')
d1 = layers.data(
"d1", shape=[10], append_batch_size=False, dtype='float32')
d2 = layers.data(
"d2", shape=[10], append_batch_size=False, dtype='float32')
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i)
i = layers.increment(i)
layers.array_write(d1, i, array=data_array)
i = layers.increment(i)
layers.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
array_len = layers.fill_constant(shape=[1], dtype='int64', value=3)
array_len.stop_gradient = True
cond = layers.less_than(x=i, y=array_len)
while_op = layers.While(cond=cond)
with while_op.block():
d = layers.array_read(array=data_array, i=i)
prev = layers.array_read(array=mem_array, i=i)
result = layers.sums(input=[d, prev])
i = layers.increment(x=i, in_place=True)
layers.array_write(result, i=i, array=mem_array)
layers.less_than(x=i, y=array_len, cond=cond)
sum_result = layers.array_read(array=mem_array, i=i)
loss = layers.mean(sum_result)
append_backward(loss)
cpu = core.CPUPlace()
exe = Executor(cpu)
d = []
for i in range(3):
d.append(numpy.random.random(size=[10]).astype('float32'))
outs = exe.run(feed={'d0': d[0],
'd1': d[1],
'd2': d[2]},
fetch_list=[sum_result])
self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01)
if __name__ == '__main__':
unittest.main()
| python/paddle/fluid/tests/unittests/test_while_op.py | 2,853 | Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 583 | en | 0.863545 |
from models.joint_fpn import JointFpn
from trainers.segmentation_trainer import SegmentationTrainer
from data_generators.joint_data_generator import JointDataGenerator
from data_generators.scenenet_rgbd_data_generator import ScenenetRGBDDataGenerator
from utils.config import process_config
from utils.dirs import create_dirs
from utils.utils import get_args
import tensorflow as tf
from utils import factory
from tensorflow.keras.mixed_precision import experimental as mixed_precision
def main():
# capture the config path from the run arguments
# then process the json configuration file
try:
args = get_args()
config = process_config(args.config)
except:
print("missing or invalid arguments")
exit(0)
# use mixed precision for training
if config.exp.mixed_precision:
print('Use mixed precision training')
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_policy(policy)
if config.exp.jpa_optimization:
tf.config.optimizer.set_jit(True)
# create the experiments dirs
create_dirs([config.callbacks.tensorboard_log_dir,
config.callbacks.checkpoint_dir])
print('Create the training data generator.')
if config.generator.is_scenenet == True:
train_data = ScenenetRGBDDataGenerator(config)
else:
train_data = JointDataGenerator(config)
validation_data = None
if type(config.validation.img_dir) == str:
print('Create the validation data generator.')
validation_data = JointDataGenerator(
config, is_training_set=False)
print('Create the model.')
model = factory.create(config.model.class_name)(config, train_data)
print('Create the trainer')
trainer = SegmentationTrainer(
model, train_data, config, validation_generator=validation_data)
print('Start training the model.')
trainer.train()
if __name__ == '__main__':
main()
| train_joint.py | 1,975 | capture the config path from the run arguments then process the json configuration file use mixed precision for training create the experiments dirs | 148 | en | 0.548635 |
# coding: utf-8
import warnings
import numpy as np
import pandas as pd
from packaging import version
from sklearn.metrics import pairwise_distances_chunked
from sklearn.utils import check_X_y,check_random_state
from sklearn.preprocessing import LabelEncoder
import functools
from pyclustering.cluster.clarans import clarans
from pyclustering.utils import timedcall
from pyclustering.utils import (draw_clusters,
average_inter_cluster_distance,
average_intra_cluster_distance,
average_neighbor_distance)
import sklearn
from sklearn.metrics import (davies_bouldin_score,
silhouette_score,
pairwise_distances,
calinski_harabasz_score
)
# They changed the name of calinski_harabaz_score in later version of sklearn:
# https://github.com/scikit-learn/scikit-learn/blob/c4733f4895c1becdf587b38970f6f7066656e3f9/doc/whats_new/v0.20.rst#id2012
sklearn_version = version.parse(sklearn.__version__)
nm_chg_ver = version.parse("0.23")
if sklearn_version >= nm_chg_ver:
from sklearn.metrics import calinski_harabasz_score as _cal_score
else:
from sklearn.metrics import calinski_harabaz_score as _cal_score
def _get_clust_pairs(clusters):
return [(i, j) for i in clusters for j in clusters if i > j]
def _dunn(data=None, dist=None, labels=None):
clusters = set(labels)
inter_dists = [
dist[np.ix_(labels == i, labels == j)].min()
for i, j in _get_clust_pairs(clusters)
]
intra_dists = [
dist[np.ix_(labels == i, labels == i)].max()
for i in clusters
]
return min(inter_dists) / max(intra_dists)
def dunn(dist, labels):
return _dunn(data=None, dist=dist, labels=labels)
def cop(data, dist, labels):
clusters = set(labels)
cpairs = _get_clust_pairs(clusters)
prox_lst = [
dist[np.ix_(labels == i[0], labels == i[1])].max()
for i in cpairs
]
out_l = []
for c in clusters:
c_data = data[labels == c]
c_center = c_data.mean(axis=0, keepdims=True)
c_intra = pairwise_distances(c_data, c_center).mean()
c_prox = [prox for pair, prox in zip(cpairs, prox_lst) if c in pair]
c_inter = min(c_prox)
to_add = len(c_data) * c_intra / c_inter
out_l.append(to_add)
return sum(out_l) / len(labels)
def _silhouette_score2(data=None, dist=None, labels=None):
return silhouette_score(dist, labels, metric='precomputed')
def _davies_bouldin_score2(data=None, dist=None, labels=None):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'divide by zero')
return davies_bouldin_score(data, labels)
def _calinski_harabaz_score2(data=None, dist=None, labels=None):
return _cal_score(data, labels)
def check_number_of_labels(n_labels, n_samples):
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
def cluster_dist_reduce(D_chunk, start, labels, label_freqs):
# accumulate distances from each sample to each cluster
clust_dists = np.zeros((len(D_chunk), len(label_freqs)),
dtype=D_chunk.dtype)
for i in range(len(D_chunk)):
clust_dists[i] += np.bincount(labels, weights=D_chunk[i],
minlength=len(label_freqs))
# intra_index selects intra-cluster distances within clust_dists
intra_index = (np.arange(len(D_chunk)), labels[start:start + len(D_chunk)])
# intra_clust_dists are averaged over cluster size outside this function
intra_clust_dists = clust_dists[intra_index]
# of the remaining distances we normalise and extract the minimum
clust_dists[intra_index] = np.inf
clust_dists /= label_freqs
inter_clust_dists = clust_dists.min(axis=1)
return intra_clust_dists, inter_clust_dists
def inter_cluster_dist(data=None, dist=None, labels=None):
_, inter_dist = cluster_distances(dist, labels, metric='precomputed')
return inter_dist
def intra_cluster_dist(data=None, dist=None, labels=None):
intra_dist, _ = cluster_distances(dist, labels, metric='precomputed')
return intra_dist
def cluster_distances(X, labels, *, metric='precomputed', random_state=None, **kwds):
return intra_inter_distances(X, labels, metric=metric, **kwds)
def intra_inter_distances(X, labels, metric='precomputed'):
# Check for non-zero diagonal entries in precomputed distance matrix
atol = np.finfo(X.dtype).eps * 100
if np.any(np.abs(np.diagonal(X)) > atol):
raise ValueError(
'The precomputed distance matrix contains non-zero '
'elements on the diagonal. Use np.fill_diagonal(X, 0).'
)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples = len(labels)
label_freqs = np.bincount(labels)
check_number_of_labels(len(le.classes_), n_samples)
reduce_func = functools.partial(cluster_dist_reduce,
labels=labels, label_freqs=label_freqs)
results = zip(*pairwise_distances_chunked(X, reduce_func=reduce_func))
intra_clust_dists, inter_clust_dists = results
intra_clust_dists = np.concatenate(intra_clust_dists)
inter_clust_dists = np.concatenate(inter_clust_dists)
return np.mean(intra_clust_dists),np.mean(inter_clust_dists)
def clarans_labels(clarans_object):
labels_clarans = clarans_object.get_clusters()
labels=pd.DataFrame(labels_clarans).T.melt(var_name='clusters')\
.dropna()
labels['value']=labels.value.astype(int)
labels=labels.sort_values(['value'])\
.set_index('value')\
.values\
.flatten()
return labels
def calculate_clarans_cvi(data,initial_cluster,dist=None):
cvi_df = pd.DataFrame(columns=['avg_inter_dist','silhouette','calinski',
'avg_intra_dist','davies','dunn'])
df_list = data.values.tolist()
dist=pairwise_distances(data)
np.fill_diagonal(dist, 0)
for k in range(initial_cluster,10):
print(k)
clarans_model = clarans(df_list,k,3,5)
(_, result) =timedcall(clarans_model.process)
labels = clarans_labels(result)
# avg_inter_dist = inter_cluster_dist(dist=dist,labels=labels)
sihlouette = silhouette_score(dist=dist, labels=labels)
davies = davies_bouldin_score(data, labels)
calinski = calinski_harabasz_score(data, labels)
# avg_intra_dist = intra_cluster_dist(dist=dist,labels=labels)
dunn_ = dunn(dist,labels)
cvi_df.loc[k] = [avg_inter_dist,sihlouette,
davies,calinski,avg_intra_dist,dunn_]
print(cvi_df)
del clarans_model
return cvi_df | clust_indices.py | 6,800 | coding: utf-8 They changed the name of calinski_harabaz_score in later version of sklearn: https://github.com/scikit-learn/scikit-learn/blob/c4733f4895c1becdf587b38970f6f7066656e3f9/doc/whats_new/v0.20.rstid2012 accumulate distances from each sample to each cluster intra_index selects intra-cluster distances within clust_dists intra_clust_dists are averaged over cluster size outside this function of the remaining distances we normalise and extract the minimum Check for non-zero diagonal entries in precomputed distance matrix avg_inter_dist = inter_cluster_dist(dist=dist,labels=labels) avg_intra_dist = intra_cluster_dist(dist=dist,labels=labels) | 652 | en | 0.776842 |
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from common.onnx_layer_test_class import Caffe2OnnxLayerTest
class TestImageScaler(Caffe2OnnxLayerTest):
def create_net(self, shape, scale, ir_version):
"""
ONNX net IR net
Input->ImageScaler->Output => Input->ScaleShift(Power)
"""
#
# Create ONNX model
#
import onnx
from onnx import helper
from onnx import TensorProto
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape)
bias = np.random.randint(-10, 10, shape[1]).astype(np.float)
node_def = onnx.helper.make_node(
'ImageScaler',
inputs=['input'],
outputs=['output'],
bias=bias,
scale=scale
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[node_def],
'test_model',
[input],
[output],
)
# Create the model (ModelProto)
onnx_net = helper.make_model(graph_def, producer_name='test_model')
#
# Create reference IR net
#
ref_net = None
return onnx_net, ref_net
def create_net_const(self, shape, scale, precision, ir_version):
"""
ONNX net IR net
Input->Concat(+scaled const)->Output => Input->Concat(+const)
"""
#
# Create ONNX model
#
import onnx
from onnx import helper
from onnx import TensorProto
concat_axis = 0
output_shape = shape.copy()
output_shape[concat_axis] *= 2
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
constant = np.random.randint(-127, 127, shape).astype(np.float)
bias = np.random.randint(-10, 10, shape[1]).astype(np.float)
node_const_def = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['const1'],
value=helper.make_tensor(
name='const_tensor',
data_type=TensorProto.FLOAT,
dims=constant.shape,
vals=constant.flatten(),
),
)
node_def = onnx.helper.make_node(
'ImageScaler',
inputs=['const1'],
outputs=['scale'],
bias=bias,
scale=scale
)
node_concat_def = onnx.helper.make_node(
'Concat',
inputs=['input', 'scale'],
outputs=['output'],
axis=concat_axis
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[node_const_def, node_def, node_concat_def],
'test_model',
[input],
[output],
)
# Create the model (ModelProto)
onnx_net = helper.make_model(graph_def, producer_name='test_model')
#
# Create reference IR net
#
ir_const = constant * scale + np.expand_dims(np.expand_dims([bias], 2), 3)
if precision == 'FP16':
ir_const = ir_const.astype(np.float16)
ref_net = None
return onnx_net, ref_net
test_data_precommit = [dict(shape=[2, 4, 6, 8], scale=4.5),
dict(shape=[1, 1, 10, 12], scale=0.5)]
test_data = [dict(shape=[1, 1, 10, 12], scale=0.5),
dict(shape=[1, 3, 10, 12], scale=1.5),
dict(shape=[6, 8, 10, 12], scale=4.5)]
@pytest.mark.parametrize("params", test_data_precommit)
def test_image_scaler_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_image_scaler(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
@pytest.mark.parametrize("params", test_data_precommit)
def test_image_scaler_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_image_scaler_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
| tests/layer_tests/onnx_tests/test_image_scaler.py | 5,106 | ONNX net IR net
Input->ImageScaler->Output => Input->ScaleShift(Power)
ONNX net IR net
Input->Concat(+scaled const)->Output => Input->Concat(+const)
Copyright (C) 2018-2022 Intel Corporation SPDX-License-Identifier: Apache-2.0 Create ONNX model Create the graph (GraphProto) Create the model (ModelProto) Create reference IR net Create ONNX model Create the graph (GraphProto) Create the model (ModelProto) Create reference IR net | 514 | en | 0.206604 |
lista = ['item1', 'item2', 'item3', 123, 12.43, 898.34, 00.989]
print(lista)
del lista[0] # pode remover tudo ou apenas um item de um indice permanentemente
popped = lista.pop(0) #pode remover um item pelo indice de uma lista, porem o item tirado pode ser posto em uma variavel
lista.remove('item3') # pode remover um item pelo valor / remove o primeiro valor da lista
print(lista)
print(popped)
valores = list(range(0, 11)) # list cria uma lista
print(valores)
valores_1 = [7, 3, 6, 9, 7, 8, 2, 1, 78, 90, 23, 45, 56, 21, 3]
print(valores_1)
valores_1.sort() # coloca os itens de forma ordenada em uma lista permanentemente
print(valores_1)
valores_1.sort(reverse=True) # faz o mesmo de cima, porem inverte a ordem
print(valores_1)
print(len(valores_1)) # len conta a quantidade de elementos de uma lista ou varavel
valores_1[0] = 'new' #substitui um valor de indice por outro no mesmo indice
valores_1.append('alex') # adiciona um objeto no final da lista
valores_1.insert(4, 'camila') # insere um item em um indice especifico
print(valores_1)
print('\n')
a = [12, 43, 76, 35, 24] # lista
b = a # cria uma ligação com a lista
print(f'{a}\n{b}')
b.remove(43)
print(f'{a}\n{b}')
c = a[:] # [:] usado para criar uma copia de uma lista que pode ser modificado como quiser
c.append('jack')
print(f'{a}\n{b}\n{c}')
| python/cursoemvideo-python/03-mundo-3/listas/lista 1/listas.py | 1,318 | pode remover tudo ou apenas um item de um indice permanentementepode remover um item pelo indice de uma lista, porem o item tirado pode ser posto em uma variavel pode remover um item pelo valor / remove o primeiro valor da lista list cria uma lista coloca os itens de forma ordenada em uma lista permanentemente faz o mesmo de cima, porem inverte a ordem len conta a quantidade de elementos de uma lista ou varavelsubstitui um valor de indice por outro no mesmo indice adiciona um objeto no final da lista insere um item em um indice especifico lista cria uma ligação com a lista [:] usado para criar uma copia de uma lista que pode ser modificado como quiser | 659 | pt | 0.997352 |
import random
### Advantage Logic ###
def advantage(rollfunc):
roll1 = rollfunc
roll2 = rollfunc
if roll1 > roll2:
return roll1
else:
return roll2
### Disadvantage Logic ###
def disadvantage(rollfunc):
roll1 = rollfunc
roll2 = rollfunc
if roll1 < roll2:
return roll1
else:
return roll2
### Die Rolls ###
def rolld4(sides:int=4):
return random.randint(1, sides)
def rolld6(sides:int=6):
return random.randint(1, sides)
def rolld8(sides:int=8):
return random.randint(1, sides)
def rolld10(sides:int=10):
return random.randint(1, sides)
def rolld12(sides:int=12):
return random.randint(1, sides)
def rolld20(sides:int=20):
return random.randint(1, sides) | DieRolls.py | 746 | Advantage Logic Disadvantage Logic Die Rolls | 46 | en | 0.589802 |
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from legate.pandas.common import types as ty
class SeriesAccessor(object):
def __init__(self, series):
self._series = series
self._column = series._frame._columns[0]
def _construct_result(self, column):
return self._series.__ctor__(
frame=self._series._frame.replace_columns([column])
)
class CategoricalAccessor(SeriesAccessor):
def __init__(self, series):
super(CategoricalAccessor, self).__init__(series)
assert ty.is_categorical_dtype(self._column.dtype)
@property
def codes(self):
return self._construct_result(self._column.get_codes())
class DatetimeProperties(SeriesAccessor):
def __init__(self, series):
super(DatetimeProperties, self).__init__(series)
assert self._column.dtype == ty.ts_ns
@property
def year(self):
return self._get_dt_field("year")
@property
def month(self):
return self._get_dt_field("month")
@property
def day(self):
return self._get_dt_field("day")
@property
def hour(self):
return self._get_dt_field("hour")
@property
def minute(self):
return self._get_dt_field("minute")
@property
def second(self):
return self._get_dt_field("second")
@property
def weekday(self):
return self._get_dt_field("weekday")
def _get_dt_field(self, field):
dtype = ty.get_dt_field_type(self._column.dtype, field)
return self._construct_result(self._column.get_dt_field(field, dtype))
class StringMethods(SeriesAccessor):
def __init__(self, series):
super(StringMethods, self).__init__(series)
assert ty.is_string_dtype(self._column.dtype)
def contains(self, pat, case=True, flags=0, na=np.NaN, regex=True):
if pat is None and not case:
raise AttributeError("'NoneType' object has no attribute 'upper'")
assert pat is not None and regex
return self._construct_result(self._column.contains(pat))
def pad(self, width, side="left", fillchar=" "):
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
return self._construct_result(
self._column.pad(width, side=side, fillchar=fillchar)
)
def strip(self, to_strip=None):
return self._construct_result(self._column.strip(to_strip=to_strip))
def zfill(self, width):
return self._construct_result(self._column.zfill(width))
def lower(self):
return self._construct_result(self._column.lower())
def upper(self):
return self._construct_result(self._column.upper())
def swapcase(self):
return self._construct_result(self._column.swapcase())
def to_datetime(self, format):
if format is None:
raise ValueError("Format must be provided")
return self._construct_result(self._column.to_datetime(format))
| legate/pandas/frontend/accessors.py | 3,547 | Copyright 2021 NVIDIA Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 555 | en | 0.858923 |
"""
This is the UGaLi analysis sub-package.
Classes related to higher-level data analysis live here.
Modules
objects :
mask :
"""
| ugali/analysis/__init__.py | 148 | This is the UGaLi analysis sub-package.
Classes related to higher-level data analysis live here.
Modules
objects :
mask : | 138 | en | 0.716722 |
from keras.models import Sequential, load_model
from keras.layers.core import Dense, Dropout, Activation,Flatten
from keras.layers.recurrent import LSTM, GRU, SimpleRNN
from keras.layers.convolutional import Convolution2D, Convolution1D, MaxPooling2D, MaxPooling1D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import ELU, PReLU, LeakyReLU
from keras.layers.wrappers import TimeDistributed
from keras.optimizers import SGD, Adagrad, RMSprop
from keras.callbacks import Callback, ModelCheckpoint, EarlyStopping
from keras.utils.io_utils import HDF5Matrix
from scipy import signal
import scipy.io
import scipy.io.wavfile as wav
import numpy as np
import h5py
import librosa
import sys
import os
def make_spectrum_phase(y, FRAMESIZE, OVERLAP, FFTSIZE):
D=librosa.stft(y,n_fft=FRAMESIZE,hop_length=OVERLAP,win_length=FFTSIZE,window=scipy.signal.hamming)
Sxx = np.log10(abs(D)**2)
print(str(D) + " the value for D")
phase = np.exp(1j * np.angle(D))
print(str(phase) + " the value of phase")
mean = np.mean(Sxx, axis=1).reshape((257,1))
std = np.std(Sxx, axis=1).reshape((257,1))+1e-12
Sxx = (Sxx-mean)/std
return Sxx, phase, mean, std
def recons_spec_phase(Sxx_r, phase):
Sxx_r = np.sqrt(10**Sxx_r)
R = np.multiply(Sxx_r , phase)
result = librosa.istft(R,
hop_length=256,
win_length=512,
window=scipy.signal.hamming)
return result
def predict(modelpath, noisylistpath):
model=load_model(modelpath) #"weights/DNN_spec_20160425v2.hdf5"
FRAMESIZE = 512
OVERLAP = 256
FFTSIZE = 512
RATE = 16000
FRAMEWIDTH = 2
FBIN = FRAMESIZE//2+1
# noisylistpath = sys.argv[2]
noisylistpath = noisylistpath
with open(noisylistpath, 'r') as f:
for line in f:
print(line)
filename = line.split('/')[-1][:]
print(filename)
y,sr=librosa.load(line[:],sr=RATE)
training_data = np.empty((10000, FBIN, FRAMEWIDTH*2+1)) # For Noisy data
Sxx, phase, mean, std = make_spectrum_phase(y, FRAMESIZE, OVERLAP, FFTSIZE)
idx = 0
for i in range(FRAMEWIDTH, Sxx.shape[1]-FRAMEWIDTH): # 5 Frmae
training_data[idx,:,:] = Sxx[:,i-FRAMEWIDTH:i+FRAMEWIDTH+1] # For Noisy data
idx = idx + 1
X_train = training_data[:idx]
X_train = np.reshape(X_train,(idx,-1))
predict = model.predict(X_train)
count=0
for i in range(FRAMEWIDTH, Sxx.shape[1]-FRAMEWIDTH):
Sxx[:,i] = predict[count]
count+=1
# # The un-enhanced part of spec should be un-normalized
Sxx[:, :FRAMEWIDTH] = (Sxx[:, :FRAMEWIDTH] * std) + mean
Sxx[:, -FRAMEWIDTH:] = (Sxx[:, -FRAMEWIDTH:] * std) + mean
recons_y = recons_spec_phase(Sxx, phase)
output = librosa.util.fix_length(recons_y, y.shape[0])
wav.write("static/wav/enhanced.wav",RATE,np.int16(output*32767))
return os.path.join("static","wav","enhanced.wav")
| test_gen_spec.py | 3,080 | "weights/DNN_spec_20160425v2.hdf5" noisylistpath = sys.argv[2] For Noisy data 5 Frmae For Noisy data The un-enhanced part of spec should be un-normalized | 154 | en | 0.721465 |
from myelin.utils import CallbackList, Experience
class RLInteraction:
"""An episodic interaction between an agent and an environment."""
def __init__(self, env, agent, callbacks=None, termination_conditions=None):
self.env = env
self.agent = agent
self.callbacks = CallbackList(callbacks)
if termination_conditions is None:
self.termination_conditions = [lambda a: False]
else:
self.termination_conditions = termination_conditions
self.episode = 0
self.step = 0
@property
def info(self):
return {
'episode': self.episode,
'step': self.step
}
def should_continue(self):
for termination_condition in self.termination_conditions:
if termination_condition(self.info):
print(termination_condition)
return False
return True
def start(self):
"""Starts agent-environment interaction."""
self.callbacks.on_interaction_begin()
while self.should_continue():
self.callbacks.on_episode_begin(self.episode)
self.env.reset()
self.step = 0
while not self.env.is_terminal():
self.callbacks.on_step_begin(self.step)
state = self.env.get_state()
action = self.agent.get_action(state)
next_state, reward, done, info = self.env.step(action)
experience = Experience(state, action, reward, next_state, done)
self.agent.update(experience)
self.callbacks.on_step_end(self.step)
self.step += 1
self.callbacks.on_episode_end(self.episode, self.step)
self.episode += 1
self.callbacks.on_interaction_end(self.episode)
| myelin/core/interactions.py | 1,832 | An episodic interaction between an agent and an environment.
Starts agent-environment interaction. | 98 | en | 0.844718 |
from __future__ import division
import numpy as np
from scipy import ndimage as ndi
from ..morphology import dilation, erosion, square
from ..util import img_as_float, view_as_windows
from ..color import gray2rgb
def _find_boundaries_subpixel(label_img):
"""See ``find_boundaries(..., mode='subpixel')``.
Notes
-----
This function puts in an empty row and column between each *actual*
row and column of the image, for a corresponding shape of $2s - 1$
for every image dimension of size $s$. These "interstitial" rows
and columns are filled as ``True`` if they separate two labels in
`label_img`, ``False`` otherwise.
I used ``view_as_windows`` to get the neighborhood of each pixel.
Then I check whether there are two labels or more in that
neighborhood.
"""
ndim = label_img.ndim
max_label = np.iinfo(label_img.dtype).max
label_img_expanded = np.zeros([(2 * s - 1) for s in label_img.shape],
label_img.dtype)
pixels = (slice(None, None, 2), ) * ndim
label_img_expanded[pixels] = label_img
edges = np.ones(label_img_expanded.shape, dtype=bool)
edges[pixels] = False
label_img_expanded[edges] = max_label
windows = view_as_windows(np.pad(label_img_expanded, 1,
mode='constant', constant_values=0),
(3,) * ndim)
boundaries = np.zeros_like(edges)
for index in np.ndindex(label_img_expanded.shape):
if edges[index]:
values = np.unique(windows[index].ravel())
if len(values) > 2: # single value and max_label
boundaries[index] = True
return boundaries
def find_boundaries(label_img, connectivity=1, mode='thick', background=0):
"""Return bool array where boundaries between labeled regions are True.
Parameters
----------
label_img : array of int or bool
An array in which different regions are labeled with either different
integers or boolean values.
connectivity: int in {1, ..., `label_img.ndim`}, optional
A pixel is considered a boundary pixel if any of its neighbors
has a different label. `connectivity` controls which pixels are
considered neighbors. A connectivity of 1 (default) means
pixels sharing an edge (in 2D) or a face (in 3D) will be
considered neighbors. A connectivity of `label_img.ndim` means
pixels sharing a corner will be considered neighbors.
mode: string in {'thick', 'inner', 'outer', 'subpixel'}
How to mark the boundaries:
- thick: any pixel not completely surrounded by pixels of the
same label (defined by `connectivity`) is marked as a boundary.
This results in boundaries that are 2 pixels thick.
- inner: outline the pixels *just inside* of objects, leaving
background pixels untouched.
- outer: outline pixels in the background around object
boundaries. When two objects touch, their boundary is also
marked.
- subpixel: return a doubled image, with pixels *between* the
original pixels marked as boundary where appropriate.
background: int, optional
For modes 'inner' and 'outer', a definition of a background
label is required. See `mode` for descriptions of these two.
Returns
-------
boundaries : array of bool, same shape as `label_img`
A bool image where ``True`` represents a boundary pixel. For
`mode` equal to 'subpixel', ``boundaries.shape[i]`` is equal
to ``2 * label_img.shape[i] - 1`` for all ``i`` (a pixel is
inserted in between all other pairs of pixels).
Examples
--------
>>> labels = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 5, 5, 5, 0, 0],
... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],
... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],
... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],
... [0, 0, 0, 0, 0, 5, 5, 5, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8)
>>> find_boundaries(labels, mode='thick').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> find_boundaries(labels, mode='inner').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> find_boundaries(labels, mode='outer').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> labels_small = labels[::2, ::3]
>>> labels_small
array([[0, 0, 0, 0],
[0, 0, 5, 0],
[0, 1, 5, 0],
[0, 0, 5, 0],
[0, 0, 0, 0]], dtype=uint8)
>>> find_boundaries(labels_small, mode='subpixel').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> bool_image = np.array([[False, False, False, False, False],
... [False, False, False, False, False],
... [False, False, True, True, True],
... [False, False, True, True, True],
... [False, False, True, True, True]], dtype=np.bool)
>>> find_boundaries(bool_image)
array([[False, False, False, False, False],
[False, False, True, True, True],
[False, True, True, True, True],
[False, True, True, False, False],
[False, True, True, False, False]], dtype=bool)
"""
if label_img.dtype == 'bool':
label_img = label_img.astype(np.uint8)
ndim = label_img.ndim
selem = ndi.generate_binary_structure(ndim, connectivity)
if mode != 'subpixel':
boundaries = dilation(label_img, selem) != erosion(label_img, selem)
if mode == 'inner':
foreground_image = (label_img != background)
boundaries &= foreground_image
elif mode == 'outer':
max_label = np.iinfo(label_img.dtype).max
background_image = (label_img == background)
selem = ndi.generate_binary_structure(ndim, ndim)
inverted_background = np.array(label_img, copy=True)
inverted_background[background_image] = max_label
adjacent_objects = ((dilation(label_img, selem) !=
erosion(inverted_background, selem)) &
~background_image)
boundaries &= (background_image | adjacent_objects)
return boundaries
else:
boundaries = _find_boundaries_subpixel(label_img)
return boundaries
def mark_boundaries(image, label_img, color=(1, 1, 0),
outline_color=None, mode='outer', background_label=0):
"""Return image with boundaries between labeled regions highlighted.
Parameters
----------
image : (M, N[, 3]) array
Grayscale or RGB image.
label_img : (M, N) array of int
Label array where regions are marked by different integer values.
color : length-3 sequence, optional
RGB color of boundaries in the output image.
outline_color : length-3 sequence, optional
RGB color surrounding boundaries in the output image. If None, no
outline is drawn.
mode : string in {'thick', 'inner', 'outer', 'subpixel'}, optional
The mode for finding boundaries.
background_label : int, optional
Which label to consider background (this is only useful for
modes ``inner`` and ``outer``).
Returns
-------
marked : (M, N, 3) array of float
An image in which the boundaries between labels are
superimposed on the original image.
See Also
--------
find_boundaries
"""
marked = img_as_float(image, force_copy=True)
if marked.ndim == 2:
marked = gray2rgb(marked)
if mode == 'subpixel':
# Here, we want to interpose an extra line of pixels between
# each original line - except for the last axis which holds
# the RGB information. ``ndi.zoom`` then performs the (cubic)
# interpolation, filling in the values of the interposed pixels
marked = ndi.zoom(marked, [2 - 1/s for s in marked.shape[:-1]] + [1],
mode='reflect')
boundaries = find_boundaries(label_img, mode=mode,
background=background_label)
if outline_color is not None:
outlines = dilation(boundaries, square(3))
marked[outlines] = outline_color
marked[boundaries] = color
return marked
| venv/lib/python3.8/site-packages/skimage/segmentation/boundaries.py | 9,983 | See ``find_boundaries(..., mode='subpixel')``.
Notes
-----
This function puts in an empty row and column between each *actual*
row and column of the image, for a corresponding shape of $2s - 1$
for every image dimension of size $s$. These "interstitial" rows
and columns are filled as ``True`` if they separate two labels in
`label_img`, ``False`` otherwise.
I used ``view_as_windows`` to get the neighborhood of each pixel.
Then I check whether there are two labels or more in that
neighborhood.
Return bool array where boundaries between labeled regions are True.
Parameters
----------
label_img : array of int or bool
An array in which different regions are labeled with either different
integers or boolean values.
connectivity: int in {1, ..., `label_img.ndim`}, optional
A pixel is considered a boundary pixel if any of its neighbors
has a different label. `connectivity` controls which pixels are
considered neighbors. A connectivity of 1 (default) means
pixels sharing an edge (in 2D) or a face (in 3D) will be
considered neighbors. A connectivity of `label_img.ndim` means
pixels sharing a corner will be considered neighbors.
mode: string in {'thick', 'inner', 'outer', 'subpixel'}
How to mark the boundaries:
- thick: any pixel not completely surrounded by pixels of the
same label (defined by `connectivity`) is marked as a boundary.
This results in boundaries that are 2 pixels thick.
- inner: outline the pixels *just inside* of objects, leaving
background pixels untouched.
- outer: outline pixels in the background around object
boundaries. When two objects touch, their boundary is also
marked.
- subpixel: return a doubled image, with pixels *between* the
original pixels marked as boundary where appropriate.
background: int, optional
For modes 'inner' and 'outer', a definition of a background
label is required. See `mode` for descriptions of these two.
Returns
-------
boundaries : array of bool, same shape as `label_img`
A bool image where ``True`` represents a boundary pixel. For
`mode` equal to 'subpixel', ``boundaries.shape[i]`` is equal
to ``2 * label_img.shape[i] - 1`` for all ``i`` (a pixel is
inserted in between all other pairs of pixels).
Examples
--------
>>> labels = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 5, 5, 5, 0, 0],
... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],
... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],
... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],
... [0, 0, 0, 0, 0, 5, 5, 5, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8)
>>> find_boundaries(labels, mode='thick').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> find_boundaries(labels, mode='inner').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> find_boundaries(labels, mode='outer').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> labels_small = labels[::2, ::3]
>>> labels_small
array([[0, 0, 0, 0],
[0, 0, 5, 0],
[0, 1, 5, 0],
[0, 0, 5, 0],
[0, 0, 0, 0]], dtype=uint8)
>>> find_boundaries(labels_small, mode='subpixel').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> bool_image = np.array([[False, False, False, False, False],
... [False, False, False, False, False],
... [False, False, True, True, True],
... [False, False, True, True, True],
... [False, False, True, True, True]], dtype=np.bool)
>>> find_boundaries(bool_image)
array([[False, False, False, False, False],
[False, False, True, True, True],
[False, True, True, True, True],
[False, True, True, False, False],
[False, True, True, False, False]], dtype=bool)
Return image with boundaries between labeled regions highlighted.
Parameters
----------
image : (M, N[, 3]) array
Grayscale or RGB image.
label_img : (M, N) array of int
Label array where regions are marked by different integer values.
color : length-3 sequence, optional
RGB color of boundaries in the output image.
outline_color : length-3 sequence, optional
RGB color surrounding boundaries in the output image. If None, no
outline is drawn.
mode : string in {'thick', 'inner', 'outer', 'subpixel'}, optional
The mode for finding boundaries.
background_label : int, optional
Which label to consider background (this is only useful for
modes ``inner`` and ``outer``).
Returns
-------
marked : (M, N, 3) array of float
An image in which the boundaries between labels are
superimposed on the original image.
See Also
--------
find_boundaries
single value and max_label Here, we want to interpose an extra line of pixels between each original line - except for the last axis which holds the RGB information. ``ndi.zoom`` then performs the (cubic) interpolation, filling in the values of the interposed pixels | 6,372 | en | 0.583085 |
from __future__ import print_function
import argparse
import gym
from itertools import count
import numpy as np
import mxnet as mx
import mxnet.ndarray as F
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import autograd
parser = argparse.ArgumentParser(description='MXNet actor-critic example')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor (default: 0.99)')
parser.add_argument('--seed', type=int, default=543, metavar='N',
help='random seed (default: 1)')
parser.add_argument('--render', action='store_true',
help='render the environment')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='interval between training status logs (default: 10)')
args = parser.parse_args()
env = gym.make('CartPole-v0')
env.seed(args.seed)
class Policy(gluon.Block):
def __init__(self, **kwargs):
super(Policy, self).__init__(**kwargs)
with self.name_scope():
self.dense = nn.Dense(16, in_units=4, activation='relu')
self.action_pred = nn.Dense(2, in_units=16)
self.value_pred = nn.Dense(1, in_units=16)
def forward(self, x):
x = self.dense(x)
probs = self.action_pred(x)
values = self.value_pred(x)
return F.softmax(probs), values
net = Policy()
net.collect_params().initialize(mx.init.Uniform(0.02))
trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': 3e-2})
loss = gluon.loss.L1Loss()
running_reward = 10
for epoch in count(1):
state = env.reset()
rewards = []
values = []
heads = []
actions = []
with autograd.record():
# Sample a sequence of actions
for t in range(10000):
state = mx.nd.array(np.expand_dims(state, 0))
prob, value = net(state)
action, logp = mx.nd.sample_multinomial(prob, get_prob=True)
state, reward, done, _ = env.step(action.asnumpy()[0])
if args.render:
env.render()
rewards.append(reward)
values.append(value)
actions.append(action.asnumpy()[0])
heads.append(logp)
if done:
break
# reverse accumulate and normalize rewards
running_reward = running_reward * 0.99 + t * 0.01
R = 0
for i in range(len(rewards)-1, -1, -1):
R = rewards[i] + args.gamma * R
rewards[i] = R
rewards = np.array(rewards)
rewards -= rewards.mean()
rewards /= rewards.std() + np.finfo(rewards.dtype).eps
# compute loss and gradient
L = sum([loss(value, mx.nd.array([r])) for r, value in zip(rewards, values)])
final_nodes = [L]
for logp, r, v in zip(heads, rewards, values):
reward = r - v.asnumpy()[0,0]
# Here we differentiate the stochastic graph, corresponds to the
# first term of equation (6) in https://arxiv.org/pdf/1506.05254.pdf
# Optimizer minimizes the loss but we want to maximizing the reward,
# so use we use -reward here.
final_nodes.append(logp*(-reward))
autograd.backward(final_nodes)
trainer.step(t)
if epoch % args.log_interval == 0:
print('Episode {}\tLast length: {:5d}\tAverage length: {:.2f}'.format(
epoch, t, running_reward))
if running_reward > 200:
print("Solved! Running reward is now {} and "
"the last episode runs to {} time steps!".format(running_reward, t))
break
| example/gluon/actor_critic.py | 3,624 | Sample a sequence of actions reverse accumulate and normalize rewards compute loss and gradient Here we differentiate the stochastic graph, corresponds to the first term of equation (6) in https://arxiv.org/pdf/1506.05254.pdf Optimizer minimizes the loss but we want to maximizing the reward, so use we use -reward here. | 320 | en | 0.843892 |
import click
from typing import Sequence, Tuple
from click.formatting import measure_table, iter_rows
class OrderedCommand(click.Command):
def get_params(self, ctx):
rv = super().get_params(ctx)
rv.sort(key=lambda o: (not o.required, o.name))
return rv
def format_options(self, ctx, formatter) -> None:
"""Writes all the options into the formatter if they exist."""
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
opts.append(rv)
if opts:
with formatter.section("Options"):
self.write_dl(formatter, opts)
@staticmethod
def write_dl(formatter, rows: Sequence[Tuple[str, str]], col_max: int = 30, col_spacing: int = 2) -> None:
rows = list(rows)
widths = measure_table(rows)
if len(widths) != 2:
raise TypeError("Expected two columns for definition list")
first_col = min(widths[0], col_max) + col_spacing
for first, second in iter_rows(rows, len(widths)):
formatter.write(f"{'':>{formatter.current_indent}}{first}")
if not second:
formatter.write("\n")
continue
if len(first) <= first_col - col_spacing:
formatter.write(" " * (first_col - len(first)))
else:
formatter.write("\n")
formatter.write(" " * (first_col + formatter.current_indent))
if "[" in second:
text, meta = second.split("[")
formatter.write(f"[{meta} {text}\n")
else:
formatter.write(f"{second}\n")
def add_options(options):
def _add_options(func):
for option in reversed(options):
func = option(func)
return func
return _add_options
class Defaults:
DOCKER_IMAGE = "docker.io/yellowdogco/virtual-screening-worker-public:3.3.0"
PORTAL_API_URL = "https://portal.yellowdog.co/api"
NAMESPACE = "virtual-screening"
RETRIES = 10
shared_options = [
click.option("--api_key_id", envvar="API_KEY_ID", required=True,
help="The application's API key ID for authenticating with the platform API. It is recommended to "
"supply this via the environment variable API_KEY_ID"),
click.option("--api_key_secret", envvar="API_KEY_SECRET", required=True,
help="The application's API key secret for authenticating with the platform API. It is recommended to "
"supply this via the environment variable API_KEY_SECRET"),
click.option("--template_id", envvar="TEMPLATE_ID", required=True,
help="The compute requirement template ID to use for provisioning compute"),
click.option("--platform_api_url", envvar="PLATFORM_API_URL", default=Defaults.PORTAL_API_URL,
help="The URL of the platform API"),
click.option("--namespace", envvar="NAMESPACE", default=Defaults.NAMESPACE,
help="The namespace within which all work and compute will be created"),
click.option("--docker_image", envvar="DOCKER_IMAGE", default=Defaults.DOCKER_IMAGE,
help="The docker image that will be executed by the workers"),
click.option("--retries", envvar="RETRIES", type=int, default=Defaults.RETRIES,
help="The number of times each failed task should be retried"),
]
| src/cli.py | 3,482 | Writes all the options into the formatter if they exist. | 56 | en | 0.816037 |
import argparse
import glob
import os
import time
import vlc
import cv2
import numpy as np
from enum import Enum
from tqdm import tqdm
from PIL import Image, ImageDraw, ImageFont
from align.align_trans import get_reference_facial_points
from align.detector import load_detect_faces_models, process_faces
from align.visualization_utils import draw_fps, show_results
from util.extract_feature_v2 import extract_feature_for_img, load_face_id_model
MIN_FACE_PROB = 0.9
STREAM_DIR = '/home/ec2-user/projects/facelab-data/stream-data'
RESULT_DIR = '/home/ec2-user/projects/facelab-data/results'
ID_FEATURES_DIR = '/home/ec2-user/projects/facelab-data/test_Aligned/'
FACE_ID_MODEL_ROOT = '/home/ec2-user/projects/facelab-data/models/backbone_ir50_ms1m_epoch120.pth'
FONT_PATH = '/usr/share/fonts/dejavu/DejaVuSans.ttf'
class Mode(Enum):
DEMO = 1
FILE = 2
def __str__(self):
return self.name
@staticmethod
def from_string(s):
try:
return Mode[s]
except KeyError:
raise ValueError()
def load_id_files(id_features_dir):
id_npy = {}
for path in glob.glob('{}/*.npy'.format(id_features_dir)):
name = os.path.splitext(os.path.basename(path))[0]
id_npy[name] = np.load(path)
return id_npy
def check_identity(id_npy, query_features, max_min_dist=1.0):
distances_from_id = {}
for name, id_npy_arr in id_npy.items():
distances_from_id[name] = []
for id_npy_row in id_npy_arr:
dist = np.linalg.norm(id_npy_row - query_features)
distances_from_id[name].append(dist)
min_dist = np.finfo(float).max
name_match = ''
for name, distances in distances_from_id.items():
avg = np.mean(distances)
if avg < min_dist:
min_dist = avg
name_match = name
if min_dist > max_min_dist:
name_match = 'Unknown'
return name_match, min_dist
def process_and_viz_img(pil_img,
det_models,
face_id_model,
reference,
crop_size,
id_npy,
font):
# Detect bboxes and landmarks for all faces in the image and warp the
# faces.
face_results = process_faces(
img=pil_img,
det_models=det_models,
reference=reference,
crop_size=crop_size)
# Filter results by detection probability.
filtered_face_results = []
for face_result in face_results:
face_prob = face_result.bounding_box[4]
if face_prob < MIN_FACE_PROB:
print('Skipping detection with low face probability: {:.2f}'.format(face_prob))
continue
filtered_face_results.append(face_result)
face_results = filtered_face_results
identity_list = []
for face_result in face_results:
features = extract_feature_for_img(
img=face_result.warped_face,
backbone=face_id_model)
# features is tensor, so converting to numpy arr below
identity, min_dist = check_identity(
id_npy=id_npy,
query_features=features.numpy())
identity_list.append((identity, '({:.2f})'.format(min_dist)))
# Visualize the results
viz_img = show_results(
img=pil_img,
bounding_boxes=[
fr.bounding_box
for fr in face_results
],
facial_landmarks=[
fr.landmark
for fr in face_results
],
names=identity_list,
font=font)
if identity_list:
names = list(zip(*identity_list))[0]
else:
names = []
return viz_img, names
def play_sound_for_name(name):
name_to_sound_file = {
'neelam': '/Users/bkovacs/Documents/neelam-how-is-it-going.m4a',
'kovi': '/Users/bkovacs/Documents/balazs-how-is-it-going.m4a',
}
name = name.lower()
if name not in name_to_sound_file:
return
player = vlc.MediaPlayer(name_to_sound_file[name])
player.play()
def play_sound_if_needed(names,
name_to_last_time_seen,
cur_time,
min_elapsed_to_play=3):
for name in names:
if (name not in name_to_last_time_seen or
name_to_last_time_seen[name] + min_elapsed_to_play < cur_time):
play_sound_for_name(name)
name_to_last_time_seen[name] = cur_time
def demo(det_models,
face_id_model,
reference,
crop_size,
id_npy,
max_size,
font):
cap = cv2.VideoCapture(0)
name_to_last_time_seen = {}
try:
while cap.isOpened():
start_time = time.time()
ret, image_np = cap.read()
if ret and cap.isOpened():
# Process frame
# BGR -> RGB
pil_img = Image.fromarray(image_np[..., ::-1])
pil_img.thumbnail((max_size, max_size))
viz_img, names = process_and_viz_img(
pil_img=pil_img,
det_models=det_models,
face_id_model=face_id_model,
reference=reference,
crop_size=crop_size,
id_npy=id_npy,
font=font,
)
cur_time = time.time()
play_sound_if_needed(
names=names,
name_to_last_time_seen=name_to_last_time_seen,
cur_time=cur_time)
fps = 1.0 / (cur_time - start_time)
draw_fps(
img=viz_img,
font=font,
fps=fps,
)
# Display the resulting frame
viz_img_bgr = np.array(viz_img)[..., ::-1]
cv2.imshow('Face Detection Demo', viz_img_bgr)
# Quit if we press 'q'.
if (cv2.waitKey(1) & 0xFF) == ord('q'):
break
finally:
# When everything is done, release the capture.
cap.release()
cv2.destroyAllWindows()
def process_files(input_dir,
output_dir,
det_models,
face_id_model,
reference,
crop_size,
id_npy,
max_size,
font):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
image_names = list(os.listdir(input_dir))
for img_idx in tqdm(range(len(image_names))):
image_name = image_names[img_idx]
pil_img = Image.open(os.path.join(input_dir, image_name))
pil_img.thumbnail((max_size, max_size))
viz_img, _ = process_and_viz_img(
pil_img=pil_img,
det_models=det_models,
face_id_model=face_id_model,
reference=reference,
crop_size=crop_size,
id_npy=id_npy,
font=font,
)
viz_img.save(os.path.join(output_dir, '{}-stream.jpg'.format(img_idx)))
def main(mode, face_id_model_root, id_features_dir, font_path):
print('Loading models...')
det_models = load_detect_faces_models()
face_id_model = load_face_id_model(model_root=face_id_model_root)
id_npy = load_id_files(id_features_dir)
crop_size = 112
max_size = 1024
reference = get_reference_facial_points(default_square=True)
font = ImageFont.FreeTypeFont(font=font_path, size=24)
print('Starting image processing...')
if mode == Mode.DEMO:
demo(
det_models=det_models,
face_id_model=face_id_model,
reference=reference,
crop_size=crop_size,
id_npy=id_npy,
max_size=max_size,
font=font)
elif mode == Mode.FILE:
process_files(
input_dir=STREAM_DIR,
output_dir=RESULT_DIR,
det_models=det_models,
face_id_model=face_id_model,
reference=reference,
crop_size=crop_size,
id_npy=id_npy,
max_size=max_size,
font=font)
else:
raise ValueError('Invalid mode: {}'.format(mode))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=Mode.from_string, default=Mode.DEMO, choices=list(Mode))
parser.add_argument('--face_id_model_root',
type=str,
default=FACE_ID_MODEL_ROOT)
parser.add_argument('--id_features_dir',
type=str,
default=ID_FEATURES_DIR)
parser.add_argument('--font_path',
type=str,
default=FONT_PATH)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(args.mode,
args.face_id_model_root,
args.id_features_dir,
args.font_path)
| test_video_stream.py | 8,945 | Detect bboxes and landmarks for all faces in the image and warp the faces. Filter results by detection probability. features is tensor, so converting to numpy arr below Visualize the results Process frame BGR -> RGB Display the resulting frame Quit if we press 'q'. When everything is done, release the capture. | 311 | en | 0.849147 |
from userinput import *
from types import SimpleNamespace
import sys
from PyQt5.QtCore import pyqtSignal as pys
class numberInput(QtWidgets.QMainWindow,Ui_MainWindow):
input_num=pys(str)
def __init__(self,opacity=1,loc=(200,200),parent=None):
super(numberInput,self).__init__(parent)
self.setupUi(self)
#self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.btn=SimpleNamespace(name="",text="")
self.setTransparency(opacity)
self.input_text=""
self.close_btn.clicked.connect(self.close)
self.move(loc[0],loc[1])
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.b1.clicked.connect(self.getNumbers)
self.b2.clicked.connect(self.getNumbers)
self.b3.clicked.connect(self.getNumbers)
self.b4.clicked.connect(self.getNumbers)
self.b5.clicked.connect(self.getNumbers)
self.b6.clicked.connect(self.getNumbers)
self.b7.clicked.connect(self.getNumbers)
self.b8.clicked.connect(self.getNumbers)
self.b9.clicked.connect(self.getNumbers)
self.b0.clicked.connect(self.getNumbers)
self.ok.clicked.connect(self.submit_inputs)
self.clr.clicked.connect(self.clear_text)
self.del_btn.clicked.connect(self.delete_text)
def getNumbers(self):
self.btn.name=self.sender().objectName()
self.btn.text=self.sender().text()
self.input_text+=self.btn.text
self.display.setText(self.input_text)
def delete_text(self):
self.input_text=self.input_text[:-1]
self.display.setText(self.input_text)
def clear_text(self):
self.display.clear()
self.input_text=""
def submit_inputs(self):
self.input_num.emit(self.input_text)
self.close()
def setTransparency(self,pos):
self.setWindowOpacity(pos)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
numInput=numberInput()
numInput.setWindowFlags(QtCore.Qt.FramelessWindowHint)
numInput.show()
sys.exit(app.exec_())
| udvent_reworked_v2_2_friday/input_Number.py | 2,226 | self.setWindowFlags(QtCore.Qt.FramelessWindowHint) | 50 | en | 0.184616 |
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=protected-access
import asyncio
from unittest import TestCase, main, mock
from magma.common.service_registry import ServiceRegistry
from magma.magmad.sync_rpc_client import SyncRPCClient
from orc8r.protos.sync_rpc_service_pb2 import (
GatewayRequest,
GatewayResponse,
SyncRPCRequest,
SyncRPCResponse,
)
class SyncRPCClientTests(TestCase):
"""
Tests for the SyncRPCClient
"""
def setUp(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self._loop = loop
self._sync_rpc_client = SyncRPCClient(loop=loop, response_timeout=3)
self._sync_rpc_client._conn_closed_table = {
12345: False
}
ServiceRegistry.add_service('test', '0.0.0.0', 0)
ServiceRegistry._PROXY_CONFIG = {'local_port': 2345,
'cloud_address': 'test',
'proxy_cloud_connections': True}
self._req_body = GatewayRequest(gwId="test id", authority='mobility',
path='/magma.MobilityService'
'/ListAddedIPv4Blocks',
headers={'te': 'trailers',
'content-type':
'application/grpc',
'user-agent':
'grpc-python/1.4.0',
'grpc-accept-encoding':
'identity'},
payload=bytes.fromhex('0000000000'))
self._expected_resp = GatewayResponse(status="400",
headers={"test_key": "test_val"},
payload=b'\x00'
b'\x00\x00\x00\n\n\x08')
self._expected_err_msg = "test error"
def test_forward_request_conn_closed(self):
self._sync_rpc_client.forward_request(
SyncRPCRequest(reqId=12345, connClosed=True))
self.assertEqual(self._sync_rpc_client._conn_closed_table[12345], True)
def test_send_sync_rpc_response(self):
expected = SyncRPCResponse(reqId=123, respBody=self._expected_resp)
self._sync_rpc_client._response_queue.put(expected)
res = self._sync_rpc_client.send_sync_rpc_response()
actual = next(res)
self.assertEqual(expected, actual)
expected = SyncRPCResponse(heartBeat=True)
actual = next(res)
self.assertEqual(expected, actual)
def test_retry_connect_sleep(self):
self._sync_rpc_client._current_delay = 0
for i in range(5):
self._sync_rpc_client._retry_connect_sleep()
if i == 4:
self.assertEqual(self._sync_rpc_client.RETRY_MAX_DELAY_SECS,
self._sync_rpc_client._current_delay)
else:
self.assertEqual(2 ** i, self._sync_rpc_client._current_delay)
def test_disconnect_sync_rpc_event(self):
disconnect_sync_rpc_event_mock = mock.patch(
'magma.magmad.events.disconnected_sync_rpc_stream')
with disconnect_sync_rpc_event_mock as disconnect_sync_rpc_streams:
self._sync_rpc_client._cleanup_and_reconnect()
disconnect_sync_rpc_streams.assert_called_once_with()
if __name__ == "__main__":
main()
| orc8r/gateway/python/magma/magmad/tests/sync_rpc_client_tests.py | 4,065 | Tests for the SyncRPCClient
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
pylint: disable=protected-access | 530 | en | 0.860432 |
from typing import Dict, List
from sortedcontainers import SortedDict
from shamrock.types.blockchain_format.coin import Coin
from shamrock.types.blockchain_format.sized_bytes import bytes32
from shamrock.types.mempool_item import MempoolItem
class Mempool:
def __init__(self, max_size_in_cost: int):
self.spends: Dict[bytes32, MempoolItem] = {}
self.sorted_spends: SortedDict = SortedDict()
self.additions: Dict[bytes32, MempoolItem] = {}
self.removals: Dict[bytes32, MempoolItem] = {}
self.max_size_in_cost: int = max_size_in_cost
self.total_mempool_cost: int = 0
def get_min_fee_rate(self, cost: int) -> float:
"""
Gets the minimum fpc rate that a transaction with specified cost will need in order to get included.
"""
if self.at_full_capacity(cost):
current_cost = self.total_mempool_cost
# Iterates through all spends in increasing fee per cost
for fee_per_cost, spends_with_fpc in self.sorted_spends.items():
for spend_name, item in spends_with_fpc.items():
current_cost -= item.cost
# Removing one at a time, until our transaction of size cost fits
if current_cost + cost <= self.max_size_in_cost:
return fee_per_cost
raise ValueError(
f"Transaction with cost {cost} does not fit in mempool of max cost {self.max_size_in_cost}"
)
else:
return 0
def remove_from_pool(self, item: MempoolItem):
"""
Removes an item from the mempool.
"""
removals: List[Coin] = item.removals
additions: List[Coin] = item.additions
for rem in removals:
del self.removals[rem.name()]
for add in additions:
del self.additions[add.name()]
del self.spends[item.name]
del self.sorted_spends[item.fee_per_cost][item.name]
dic = self.sorted_spends[item.fee_per_cost]
if len(dic.values()) == 0:
del self.sorted_spends[item.fee_per_cost]
self.total_mempool_cost -= item.cost
assert self.total_mempool_cost >= 0
def add_to_pool(
self,
item: MempoolItem,
):
"""
Adds an item to the mempool by kicking out transactions (if it doesn't fit), in order of increasing fee per cost
"""
while self.at_full_capacity(item.cost):
# Val is Dict[hash, MempoolItem]
fee_per_cost, val = self.sorted_spends.peekitem(index=0)
to_remove = list(val.values())[0]
self.remove_from_pool(to_remove)
self.spends[item.name] = item
# sorted_spends is Dict[float, Dict[bytes32, MempoolItem]]
if item.fee_per_cost not in self.sorted_spends:
self.sorted_spends[item.fee_per_cost] = {}
self.sorted_spends[item.fee_per_cost][item.name] = item
for add in item.additions:
self.additions[add.name()] = item
for coin in item.removals:
self.removals[coin.name()] = item
self.total_mempool_cost += item.cost
def at_full_capacity(self, cost: int) -> bool:
"""
Checks whether the mempool is at full capacity and cannot accept a transaction with size cost.
"""
return self.total_mempool_cost + cost > self.max_size_in_cost
| shamrock/full_node/mempool.py | 3,433 | Adds an item to the mempool by kicking out transactions (if it doesn't fit), in order of increasing fee per cost
Checks whether the mempool is at full capacity and cannot accept a transaction with size cost.
Gets the minimum fpc rate that a transaction with specified cost will need in order to get included.
Removes an item from the mempool.
Iterates through all spends in increasing fee per cost Removing one at a time, until our transaction of size cost fits Val is Dict[hash, MempoolItem] sorted_spends is Dict[float, Dict[bytes32, MempoolItem]] | 551 | en | 0.85197 |
import time
import copy
import pickle
import warnings
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score, average_precision_score, precision_recall_curve, auc
def sparse_to_tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
def get_scores(edges_pos, edges_neg, A_pred, adj_label):
# get logists and labels
preds = A_pred[edges_pos.T]
preds_neg = A_pred[edges_neg.T]
logists = np.hstack([preds, preds_neg])
labels = np.hstack([np.ones(preds.size(0)), np.zeros(preds_neg.size(0))])
# logists = A_pred.view(-1)
# labels = adj_label.to_dense().view(-1)
# calc scores
roc_auc = roc_auc_score(labels, logists)
ap_score = average_precision_score(labels, logists)
precisions, recalls, thresholds = precision_recall_curve(labels, logists)
pr_auc = auc(recalls, precisions)
warnings.simplefilter('ignore', RuntimeWarning)
f1s = np.nan_to_num(2*precisions*recalls/(precisions+recalls))
best_comb = np.argmax(f1s)
f1 = f1s[best_comb]
pre = precisions[best_comb]
rec = recalls[best_comb]
thresh = thresholds[best_comb]
# calc reconstracted adj_mat and accuracy with the threshold for best f1
adj_rec = copy.deepcopy(A_pred)
adj_rec[adj_rec < thresh] = 0
adj_rec[adj_rec >= thresh] = 1
labels_all = adj_label.to_dense().view(-1).long()
preds_all = adj_rec.view(-1).long()
recon_acc = (preds_all == labels_all).sum().float() / labels_all.size(0)
results = {'roc': roc_auc,
'pr': pr_auc,
'ap': ap_score,
'pre': pre,
'rec': rec,
'f1': f1,
'acc': recon_acc,
'adj_recon': adj_rec}
return results
def train_model(args, dl, vgae):
optimizer = torch.optim.Adam(vgae.parameters(), lr=args.lr)
# weights for log_lik loss
adj_t = dl.adj_train
norm_w = adj_t.shape[0]**2 / float((adj_t.shape[0]**2 - adj_t.sum()) * 2)
pos_weight = torch.FloatTensor([float(adj_t.shape[0]**2 - adj_t.sum()) / adj_t.sum()]).to(args.device)
# move input data and label to gpu if needed
features = dl.features.to(args.device)
adj_label = dl.adj_label.to_dense().to(args.device)
best_vali_criterion = 0.0
best_state_dict = None
vgae.train()
for epoch in range(args.epochs):
t = time.time()
A_pred = vgae(features)
optimizer.zero_grad()
loss = log_lik = norm_w*F.binary_cross_entropy_with_logits(A_pred, adj_label, pos_weight=pos_weight)
if not args.gae:
kl_divergence = 0.5/A_pred.size(0) * (1 + 2*vgae.logstd - vgae.mean**2 - torch.exp(2*vgae.logstd)).sum(1).mean()
loss -= kl_divergence
A_pred = torch.sigmoid(A_pred).detach().cpu()
r = get_scores(dl.val_edges, dl.val_edges_false, A_pred, dl.adj_label)
print('Epoch{:3}: train_loss: {:.4f} recon_acc: {:.4f} val_roc: {:.4f} val_ap: {:.4f} f1: {:.4f} time: {:.4f}'.format(
epoch+1, loss.item(), r['acc'], r['roc'], r['ap'], r['f1'], time.time()-t))
if r[args.criterion] > best_vali_criterion:
best_vali_criterion = r[args.criterion]
best_state_dict = copy.deepcopy(vgae.state_dict())
# r_test = get_scores(dl.test_edges, dl.test_edges_false, A_pred, dl.adj_label)
r_test = r
print(" test_roc: {:.4f} test_ap: {:.4f} test_f1: {:.4f} test_recon_acc: {:.4f}".format(
r_test['roc'], r_test['ap'], r_test['f1'], r_test['acc']))
loss.backward()
optimizer.step()
print("Done! final results: test_roc: {:.4f} test_ap: {:.4f} test_f1: {:.4f} test_recon_acc: {:.4f}".format(
r_test['roc'], r_test['ap'], r_test['f1'], r_test['acc']))
vgae.load_state_dict(best_state_dict)
return vgae
def gen_graphs(args, dl, vgae):
adj_orig = dl.adj_orig
assert adj_orig.diagonal().sum() == 0
# sp.csr_matrix
if args.gae:
pickle.dump(adj_orig, open(f'graphs/{args.dataset}_graph_0_gae.pkl', 'wb'))
else:
pickle.dump(adj_orig, open(f'graphs/{args.dataset}_graph_0.pkl', 'wb'))
# sp.lil_matrix
pickle.dump(dl.features_orig, open(f'graphs/{args.dataset}_features.pkl', 'wb'))
features = dl.features.to(args.device)
for i in range(args.gen_graphs):
with torch.no_grad():
A_pred = vgae(features)
A_pred = torch.sigmoid(A_pred).detach().cpu()
r = get_scores(dl.val_edges, dl.val_edges_false, A_pred, dl.adj_label)
adj_recon = A_pred.numpy()
np.fill_diagonal(adj_recon, 0)
# np.ndarray
if args.gae:
filename = f'graphs/{args.dataset}_graph_{i+1}_logits_gae.pkl'
else:
filename = f'graphs/{args.dataset}_graph_{i+1}_logits.pkl'
pickle.dump(adj_recon, open(filename, 'wb'))
| vgae/utils.py | 5,096 | get logists and labels logists = A_pred.view(-1) labels = adj_label.to_dense().view(-1) calc scores calc reconstracted adj_mat and accuracy with the threshold for best f1 weights for log_lik loss move input data and label to gpu if needed r_test = get_scores(dl.test_edges, dl.test_edges_false, A_pred, dl.adj_label) sp.csr_matrix sp.lil_matrix np.ndarray | 355 | en | 0.571588 |
# -*- coding: utf-8 -*-
# @Author: Yanqi Gu
# @Date: 2019-04-20 16:30:52
# @Last Modified by: Yanqi Gu
# @Last Modified time: 2019-04-20 16:57:49
| DPDecisionTree/__init__.py | 150 | -*- coding: utf-8 -*- @Author: Yanqi Gu @Date: 2019-04-20 16:30:52 @Last Modified by: Yanqi Gu @Last Modified time: 2019-04-20 16:57:49 | 139 | en | 0.443622 |
# Copyright (c) 2016,2017,2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Contains a collection of generally useful calculation tools."""
import functools
from operator import itemgetter
import numpy as np
from numpy.core.numeric import normalize_axis_index
import numpy.ma as ma
from scipy.spatial import cKDTree
import xarray as xr
from ..cbook import broadcast_indices, result_type
from ..interpolate import interpolate_1d, log_interpolate_1d
from ..package_tools import Exporter
from ..units import atleast_1d, check_units, concatenate, diff, units
from ..xarray import check_axis, preprocess_xarray
exporter = Exporter(globals())
UND = 'UND'
UND_ANGLE = -999.
DIR_STRS = (
'N', 'NNE', 'NE', 'ENE',
'E', 'ESE', 'SE', 'SSE',
'S', 'SSW', 'SW', 'WSW',
'W', 'WNW', 'NW', 'NNW',
UND
) # note the order matters!
MAX_DEGREE_ANGLE = 360 * units.degree
BASE_DEGREE_MULTIPLIER = 22.5 * units.degree
DIR_DICT = {dir_str: i * BASE_DEGREE_MULTIPLIER for i, dir_str in enumerate(DIR_STRS)}
DIR_DICT[UND] = np.nan
@exporter.export
@preprocess_xarray
def resample_nn_1d(a, centers):
"""Return one-dimensional nearest-neighbor indexes based on user-specified centers.
Parameters
----------
a : array-like
1-dimensional array of numeric values from which to
extract indexes of nearest-neighbors
centers : array-like
1-dimensional array of numeric values representing a subset of values to approximate
Returns
-------
An array of indexes representing values closest to given array values
"""
ix = []
for center in centers:
index = (np.abs(a - center)).argmin()
if index not in ix:
ix.append(index)
return ix
@exporter.export
@preprocess_xarray
def nearest_intersection_idx(a, b):
"""Determine the index of the point just before two lines with common x values.
Parameters
----------
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
Returns
-------
An array of indexes representing the index of the values
just before the intersection(s) of the two lines.
"""
# Difference in the two y-value sets
difference = a - b
# Determine the point just before the intersection of the lines
# Will return multiple points for multiple intersections
sign_change_idx, = np.nonzero(np.diff(np.sign(difference)))
return sign_change_idx
@exporter.export
@preprocess_xarray
@units.wraps(('=A', '=B'), ('=A', '=B', '=B'))
def find_intersections(x, a, b, direction='all', log_x=False):
"""Calculate the best estimate of intersection.
Calculates the best estimates of the intersection of two y-value
data sets that share a common x-value set.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
direction : string, optional
specifies direction of crossing. 'all', 'increasing' (a becoming greater than b),
or 'decreasing' (b becoming greater than a). Defaults to 'all'.
log_x : bool, optional
Use logarithmic interpolation along the `x` axis (i.e. for finding intersections
in pressure coordinates). Default is False.
Returns
-------
A tuple (x, y) of array-like with the x and y coordinates of the
intersections of the lines.
"""
# Change x to logarithmic if log_x=True
if log_x is True:
x = np.log(x)
# Find the index of the points just before the intersection(s)
nearest_idx = nearest_intersection_idx(a, b)
next_idx = nearest_idx + 1
# Determine the sign of the change
sign_change = np.sign(a[next_idx] - b[next_idx])
# x-values around each intersection
_, x0 = _next_non_masked_element(x, nearest_idx)
_, x1 = _next_non_masked_element(x, next_idx)
# y-values around each intersection for the first line
_, a0 = _next_non_masked_element(a, nearest_idx)
_, a1 = _next_non_masked_element(a, next_idx)
# y-values around each intersection for the second line
_, b0 = _next_non_masked_element(b, nearest_idx)
_, b1 = _next_non_masked_element(b, next_idx)
# Calculate the x-intersection. This comes from finding the equations of the two lines,
# one through (x0, a0) and (x1, a1) and the other through (x0, b0) and (x1, b1),
# finding their intersection, and reducing with a bunch of algebra.
delta_y0 = a0 - b0
delta_y1 = a1 - b1
intersect_x = (delta_y1 * x0 - delta_y0 * x1) / (delta_y1 - delta_y0)
# Calculate the y-intersection of the lines. Just plug the x above into the equation
# for the line through the a points. One could solve for y like x above, but this
# causes weirder unit behavior and seems a little less good numerically.
intersect_y = ((intersect_x - x0) / (x1 - x0)) * (a1 - a0) + a0
# If there's no intersections, return
if len(intersect_x) == 0:
return intersect_x, intersect_y
# Return x to linear if log_x is True
if log_x is True:
intersect_x = np.exp(intersect_x)
# Check for duplicates
duplicate_mask = (np.ediff1d(intersect_x, to_end=1) != 0)
# Make a mask based on the direction of sign change desired
if direction == 'increasing':
mask = sign_change > 0
elif direction == 'decreasing':
mask = sign_change < 0
elif direction == 'all':
return intersect_x[duplicate_mask], intersect_y[duplicate_mask]
else:
raise ValueError('Unknown option for direction: {0}'.format(str(direction)))
return intersect_x[mask & duplicate_mask], intersect_y[mask & duplicate_mask]
def _next_non_masked_element(a, idx):
"""Return the next non masked element of a masked array.
If an array is masked, return the next non-masked element (if the given index is masked).
If no other unmasked points are after the given masked point, returns none.
Parameters
----------
a : array-like
1-dimensional array of numeric values
idx : integer
index of requested element
Returns
-------
Index of next non-masked element and next non-masked element
"""
try:
next_idx = idx + a[idx:].mask.argmin()
if ma.is_masked(a[next_idx]):
return None, None
else:
return next_idx, a[next_idx]
except (AttributeError, TypeError, IndexError):
return idx, a[idx]
def _delete_masked_points(*arrs):
"""Delete masked points from arrays.
Takes arrays and removes masked points to help with calculations and plotting.
Parameters
----------
arrs : one or more array-like
source arrays
Returns
-------
arrs : one or more array-like
arrays with masked elements removed
"""
if any(hasattr(a, 'mask') for a in arrs):
keep = ~functools.reduce(np.logical_or, (np.ma.getmaskarray(a) for a in arrs))
return tuple(ma.asarray(a[keep]) for a in arrs)
else:
return arrs
@exporter.export
@preprocess_xarray
def reduce_point_density(points, radius, priority=None):
r"""Return a mask to reduce the density of points in irregularly-spaced data.
This function is used to down-sample a collection of scattered points (e.g. surface
data), returning a mask that can be used to select the points from one or more arrays
(e.g. arrays of temperature and dew point). The points selected can be controlled by
providing an array of ``priority`` values (e.g. rainfall totals to ensure that
stations with higher precipitation remain in the mask). The points and radius can be
specified with units. If none are provided, meters are assumed.
Parameters
----------
points : (N, K) array-like
N locations of the points in K dimensional space
radius : `pint.Quantity` or float
Minimum radius allowed between points. If units are not provided, meters is assumed.
priority : (N, K) array-like, optional
If given, this should have the same shape as ``points``; these values will
be used to control selection priority for points.
Returns
-------
(N,) array-like of boolean values indicating whether points should be kept. This
can be used directly to index numpy arrays to return only the desired points.
Examples
--------
>>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.)
array([ True, False, True])
>>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.,
... priority=np.array([0.1, 0.9, 0.3]))
array([False, True, False])
"""
# Handle input with units. Assume meters if units are not specified
if hasattr(radius, 'units'):
radius = radius.to('m').m
if hasattr(points, 'units'):
points = points.to('m').m
# Handle 1D input
if points.ndim < 2:
points = points.reshape(-1, 1)
# Make a kd-tree to speed searching of data.
tree = cKDTree(points)
# Need to use sorted indices rather than sorting the position
# so that the keep mask matches *original* order.
if priority is not None:
# Need to sort the locations in decreasing priority.
sorted_indices = np.argsort(priority)[::-1]
else:
# Take advantage of iterator nature of range here to avoid making big lists
sorted_indices = range(len(points))
# Keep all points initially
keep = np.ones(len(points), dtype=np.bool)
# Loop over all the potential points
for ind in sorted_indices:
# Only proceed if we haven't already excluded this point
if keep[ind]:
# Find the neighbors and eliminate them
neighbors = tree.query_ball_point(points[ind], radius)
keep[neighbors] = False
# We just removed ourselves, so undo that
keep[ind] = True
return keep
def _get_bound_pressure_height(pressure, bound, heights=None, interpolate=True):
"""Calculate the bounding pressure and height in a layer.
Given pressure, optional heights, and a bound, return either the closest pressure/height
or interpolated pressure/height. If no heights are provided, a standard atmosphere
([NOAA1976]_) is assumed.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressures
bound : `pint.Quantity`
Bound to retrieve (in pressure or height)
heights : `pint.Quantity`, optional
Atmospheric heights associated with the pressure levels. Defaults to using
heights calculated from ``pressure`` assuming a standard atmosphere.
interpolate : boolean, optional
Interpolate the bound or return the nearest. Defaults to True.
Returns
-------
`pint.Quantity`
The bound pressure and height.
"""
# avoid circular import if basic.py ever imports something from tools.py
from .basic import height_to_pressure_std, pressure_to_height_std
# Make sure pressure is monotonically decreasing
sort_inds = np.argsort(pressure)[::-1]
pressure = pressure[sort_inds]
if heights is not None:
heights = heights[sort_inds]
# Bound is given in pressure
if bound.dimensionality == {'[length]': -1.0, '[mass]': 1.0, '[time]': -2.0}:
# If the bound is in the pressure data, we know the pressure bound exactly
if bound in pressure:
bound_pressure = bound
# If we have heights, we know the exact height value, otherwise return standard
# atmosphere height for the pressure
if heights is not None:
bound_height = heights[pressure == bound_pressure]
else:
bound_height = pressure_to_height_std(bound_pressure)
# If bound is not in the data, return the nearest or interpolated values
else:
if interpolate:
bound_pressure = bound # Use the user specified bound
if heights is not None: # Interpolate heights from the height data
bound_height = log_interpolate_1d(bound_pressure, pressure, heights)
else: # If not heights given, use the standard atmosphere
bound_height = pressure_to_height_std(bound_pressure)
else: # No interpolation, find the closest values
idx = (np.abs(pressure - bound)).argmin()
bound_pressure = pressure[idx]
if heights is not None:
bound_height = heights[idx]
else:
bound_height = pressure_to_height_std(bound_pressure)
# Bound is given in height
elif bound.dimensionality == {'[length]': 1.0}:
# If there is height data, see if we have the bound or need to interpolate/find nearest
if heights is not None:
if bound in heights: # Bound is in the height data
bound_height = bound
bound_pressure = pressure[heights == bound]
else: # Bound is not in the data
if interpolate:
bound_height = bound
# Need to cast back to the input type since interp (up to at least numpy
# 1.13 always returns float64. This can cause upstream users problems,
# resulting in something like np.append() to upcast.
bound_pressure = (np.interp(np.atleast_1d(bound.m), heights.m,
pressure.m).astype(result_type(bound))
* pressure.units)
else:
idx = (np.abs(heights - bound)).argmin()
bound_pressure = pressure[idx]
bound_height = heights[idx]
else: # Don't have heights, so assume a standard atmosphere
bound_height = bound
bound_pressure = height_to_pressure_std(bound)
# If interpolation is on, this is all we need, if not, we need to go back and
# find the pressure closest to this and refigure the bounds
if not interpolate:
idx = (np.abs(pressure - bound_pressure)).argmin()
bound_pressure = pressure[idx]
bound_height = pressure_to_height_std(bound_pressure)
# Bound has invalid units
else:
raise ValueError('Bound must be specified in units of length or pressure.')
# If the bound is out of the range of the data, we shouldn't extrapolate
if not (_greater_or_close(bound_pressure, np.nanmin(pressure.m) * pressure.units)
and _less_or_close(bound_pressure, np.nanmax(pressure.m) * pressure.units)):
raise ValueError('Specified bound is outside pressure range.')
if heights is not None and not (_less_or_close(bound_height,
np.nanmax(heights.m) * heights.units)
and _greater_or_close(bound_height,
np.nanmin(heights.m)
* heights.units)):
raise ValueError('Specified bound is outside height range.')
return bound_pressure, bound_height
@exporter.export
@preprocess_xarray
@check_units('[length]')
def get_layer_heights(heights, depth, *args, bottom=None, interpolate=True, with_agl=False):
"""Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer using
the heights only.
Parameters
----------
heights : array-like
Atmospheric heights
depth : `pint.Quantity`
The thickness of the layer
args : array-like
Atmospheric variable(s) measured at the given pressures
bottom : `pint.Quantity`, optional
The bottom of the layer
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
with_agl : bool, optional
Returns the heights as above ground level by subtracting the minimum height in the
provided heights. Defaults to False.
Returns
-------
`pint.Quantity, pint.Quantity`
The height and data variables of the layer
"""
# Make sure pressure and datavars are the same length
for datavar in args:
if len(heights) != len(datavar):
raise ValueError('Height and data variables must have the same length.')
# If we want things in AGL, subtract the minimum height from all height values
if with_agl:
sfc_height = np.min(heights)
heights = heights - sfc_height
# If the bottom is not specified, make it the surface
if bottom is None:
bottom = heights[0]
# Make heights and arguments base units
heights = heights.to_base_units()
bottom = bottom.to_base_units()
# Calculate the top of the layer
top = bottom + depth
ret = [] # returned data variables in layer
# Ensure heights are sorted in ascending order
sort_inds = np.argsort(heights)
heights = heights[sort_inds]
# Mask based on top and bottom
inds = _greater_or_close(heights, bottom) & _less_or_close(heights, top)
heights_interp = heights[inds]
# Interpolate heights at bounds if necessary and sort
if interpolate:
# If we don't have the bottom or top requested, append them
if top not in heights_interp:
heights_interp = np.sort(np.append(heights_interp.m, top.m)) * heights.units
if bottom not in heights_interp:
heights_interp = np.sort(np.append(heights_interp.m, bottom.m)) * heights.units
ret.append(heights_interp)
for datavar in args:
# Ensure that things are sorted in ascending order
datavar = datavar[sort_inds]
if interpolate:
# Interpolate for the possibly missing bottom/top values
datavar_interp = interpolate_1d(heights_interp, heights, datavar)
datavar = datavar_interp
else:
datavar = datavar[inds]
ret.append(datavar)
return ret
@exporter.export
@preprocess_xarray
@check_units('[pressure]')
def get_layer(pressure, *args, heights=None, bottom=None, depth=100 * units.hPa,
interpolate=True):
r"""Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer. The
bottom of the layer can be specified with a pressure or height above the surface
pressure. The bottom defaults to the surface pressure. The depth of the layer can be
specified in terms of pressure or height above the bottom of the layer. If the top and
bottom of the layer are not in the data, they are interpolated by default.
Parameters
----------
pressure : array-like
Atmospheric pressure profile
args : array-like
Atmospheric variable(s) measured at the given pressures
heights: array-like, optional
Atmospheric heights corresponding to the given pressures. Defaults to using
heights calculated from ``p`` assuming a standard atmosphere [NOAA1976]_.
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure. Defaults
to the highest pressure or lowest height given.
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer.
Defaults to 100 hPa.
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
Returns
-------
`pint.Quantity, pint.Quantity`
The pressure and data variables of the layer
"""
# If we get the depth kwarg, but it's None, set it to the default as well
if depth is None:
depth = 100 * units.hPa
# Make sure pressure and datavars are the same length
for datavar in args:
if len(pressure) != len(datavar):
raise ValueError('Pressure and data variables must have the same length.')
# If the bottom is not specified, make it the surface pressure
if bottom is None:
bottom = np.nanmax(pressure.m) * pressure.units
bottom_pressure, bottom_height = _get_bound_pressure_height(pressure, bottom,
heights=heights,
interpolate=interpolate)
# Calculate the top if whatever units depth is in
if depth.dimensionality == {'[length]': -1.0, '[mass]': 1.0, '[time]': -2.0}:
top = bottom_pressure - depth
elif depth.dimensionality == {'[length]': 1}:
top = bottom_height + depth
else:
raise ValueError('Depth must be specified in units of length or pressure')
top_pressure, _ = _get_bound_pressure_height(pressure, top, heights=heights,
interpolate=interpolate)
ret = [] # returned data variables in layer
# Ensure pressures are sorted in ascending order
sort_inds = np.argsort(pressure)
pressure = pressure[sort_inds]
# Mask based on top and bottom pressure
inds = (_less_or_close(pressure, bottom_pressure)
& _greater_or_close(pressure, top_pressure))
p_interp = pressure[inds]
# Interpolate pressures at bounds if necessary and sort
if interpolate:
# If we don't have the bottom or top requested, append them
if not np.any(np.isclose(top_pressure, p_interp)):
p_interp = np.sort(np.append(p_interp.m, top_pressure.m)) * pressure.units
if not np.any(np.isclose(bottom_pressure, p_interp)):
p_interp = np.sort(np.append(p_interp.m, bottom_pressure.m)) * pressure.units
ret.append(p_interp[::-1])
for datavar in args:
# Ensure that things are sorted in ascending order
datavar = datavar[sort_inds]
if interpolate:
# Interpolate for the possibly missing bottom/top values
datavar_interp = log_interpolate_1d(p_interp, pressure, datavar)
datavar = datavar_interp
else:
datavar = datavar[inds]
ret.append(datavar[::-1])
return ret
@exporter.export
@preprocess_xarray
def find_bounding_indices(arr, values, axis, from_below=True):
"""Find the indices surrounding the values within arr along axis.
Returns a set of above, below, good. Above and below are lists of arrays of indices.
These lists are formulated such that they can be used directly to index into a numpy
array and get the expected results (no extra slices or ellipsis necessary). `good` is
a boolean array indicating the "columns" that actually had values to bound the desired
value(s).
Parameters
----------
arr : array-like
Array to search for values
values: array-like
One or more values to search for in `arr`
axis : int
The dimension of `arr` along which to search.
from_below : bool, optional
Whether to search from "below" (i.e. low indices to high indices). If `False`,
the search will instead proceed from high indices to low indices. Defaults to `True`.
Returns
-------
above : list of arrays
List of broadcasted indices to the location above the desired value
below : list of arrays
List of broadcasted indices to the location below the desired value
good : array
Boolean array indicating where the search found proper bounds for the desired value
"""
# The shape of generated indices is the same as the input, but with the axis of interest
# replaced by the number of values to search for.
indices_shape = list(arr.shape)
indices_shape[axis] = len(values)
# Storage for the found indices and the mask for good locations
indices = np.empty(indices_shape, dtype=np.int)
good = np.empty(indices_shape, dtype=np.bool)
# Used to put the output in the proper location
store_slice = [slice(None)] * arr.ndim
# Loop over all of the values and for each, see where the value would be found from a
# linear search
for level_index, value in enumerate(values):
# Look for changes in the value of the test for <= value in consecutive points
# Taking abs() because we only care if there is a flip, not which direction.
switches = np.abs(np.diff((arr <= value).astype(np.int), axis=axis))
# Good points are those where it's not just 0's along the whole axis
good_search = np.any(switches, axis=axis)
if from_below:
# Look for the first switch; need to add 1 to the index since argmax is giving the
# index within the difference array, which is one smaller.
index = switches.argmax(axis=axis) + 1
else:
# Generate a list of slices to reverse the axis of interest so that searching from
# 0 to N is starting at the "top" of the axis.
arr_slice = [slice(None)] * arr.ndim
arr_slice[axis] = slice(None, None, -1)
# Same as above, but we use the slice to come from the end; then adjust those
# indices to measure from the front.
index = arr.shape[axis] - 1 - switches[tuple(arr_slice)].argmax(axis=axis)
# Set all indices where the results are not good to 0
index[~good_search] = 0
# Put the results in the proper slice
store_slice[axis] = level_index
indices[tuple(store_slice)] = index
good[tuple(store_slice)] = good_search
# Create index values for broadcasting arrays
above = broadcast_indices(arr, indices, arr.ndim, axis)
below = broadcast_indices(arr, indices - 1, arr.ndim, axis)
return above, below, good
def _greater_or_close(a, value, **kwargs):
r"""Compare values for greater or close to boolean masks.
Returns a boolean mask for values greater than or equal to a target within a specified
absolute or relative tolerance (as in :func:`numpy.isclose`).
Parameters
----------
a : array-like
Array of values to be compared
value : float
Comparison value
Returns
-------
array-like
Boolean array where values are greater than or nearly equal to value.
"""
return (a > value) | np.isclose(a, value, **kwargs)
def _less_or_close(a, value, **kwargs):
r"""Compare values for less or close to boolean masks.
Returns a boolean mask for values less than or equal to a target within a specified
absolute or relative tolerance (as in :func:`numpy.isclose`).
Parameters
----------
a : array-like
Array of values to be compared
value : float
Comparison value
Returns
-------
array-like
Boolean array where values are less than or nearly equal to value.
"""
return (a < value) | np.isclose(a, value, **kwargs)
@exporter.export
@preprocess_xarray
def lat_lon_grid_deltas(longitude, latitude, **kwargs):
r"""Calculate the delta between grid points that are in a latitude/longitude format.
Calculate the signed delta distance between grid points when the grid spacing is defined by
delta lat/lon rather than delta x/y
Parameters
----------
longitude : array_like
array of longitudes defining the grid
latitude : array_like
array of latitudes defining the grid
kwargs
Other keyword arguments to pass to :class:`~pyproj.Geod`
Returns
-------
dx, dy:
at least two dimensional arrays of signed deltas between grid points in the x and y
direction
Notes
-----
Accepts 1D, 2D, or higher arrays for latitude and longitude
Assumes [..., Y, X] for >=2 dimensional arrays
"""
from pyproj import Geod
# Inputs must be the same number of dimensions
if latitude.ndim != longitude.ndim:
raise ValueError('Latitude and longitude must have the same number of dimensions.')
# If we were given 1D arrays, make a mesh grid
if latitude.ndim < 2:
longitude, latitude = np.meshgrid(longitude, latitude)
geod_args = {'ellps': 'sphere'}
if kwargs:
geod_args = kwargs
g = Geod(**geod_args)
forward_az, _, dy = g.inv(longitude[..., :-1, :], latitude[..., :-1, :],
longitude[..., 1:, :], latitude[..., 1:, :])
dy[(forward_az < -90.) | (forward_az > 90.)] *= -1
forward_az, _, dx = g.inv(longitude[..., :, :-1], latitude[..., :, :-1],
longitude[..., :, 1:], latitude[..., :, 1:])
dx[(forward_az < 0.) | (forward_az > 180.)] *= -1
return dx * units.meter, dy * units.meter
@exporter.export
def grid_deltas_from_dataarray(f):
"""Calculate the horizontal deltas between grid points of a DataArray.
Calculate the signed delta distance between grid points of a DataArray in the horizontal
directions, whether the grid is lat/lon or x/y.
Parameters
----------
f : `xarray.DataArray`
Parsed DataArray on a latitude/longitude grid, in (..., lat, lon) or (..., y, x)
dimension order
Returns
-------
dx, dy:
arrays of signed deltas between grid points in the x and y directions with dimensions
matching those of `f`.
See Also
--------
lat_lon_grid_deltas
"""
if f.metpy.crs['grid_mapping_name'] == 'latitude_longitude':
dx, dy = lat_lon_grid_deltas(f.metpy.x, f.metpy.y,
initstring=f.metpy.cartopy_crs.proj4_init)
slc_x = slc_y = tuple([np.newaxis] * (f.ndim - 2) + [slice(None)] * 2)
else:
dx = np.diff(f.metpy.x.metpy.unit_array.to('m').magnitude) * units('m')
dy = np.diff(f.metpy.y.metpy.unit_array.to('m').magnitude) * units('m')
slc = [np.newaxis] * (f.ndim - 2)
slc_x = tuple(slc + [np.newaxis, slice(None)])
slc_y = tuple(slc + [slice(None), np.newaxis])
return dx[slc_x], dy[slc_y]
def xarray_derivative_wrap(func):
"""Decorate the derivative functions to make them work nicely with DataArrays.
This will automatically determine if the coordinates can be pulled directly from the
DataArray, or if a call to lat_lon_grid_deltas is needed.
"""
@functools.wraps(func)
def wrapper(f, **kwargs):
if 'x' in kwargs or 'delta' in kwargs:
# Use the usual DataArray to pint.Quantity preprocessing wrapper
return preprocess_xarray(func)(f, **kwargs)
elif isinstance(f, xr.DataArray):
# Get axis argument, defaulting to first dimension
axis = f.metpy.find_axis_name(kwargs.get('axis', 0))
# Initialize new kwargs with the axis number
new_kwargs = {'axis': f.get_axis_num(axis)}
if check_axis(f[axis], 'time'):
# Time coordinate, need to get time deltas
new_kwargs['delta'] = f[axis].metpy.time_deltas
elif check_axis(f[axis], 'longitude'):
# Longitude coordinate, need to get grid deltas
new_kwargs['delta'], _ = grid_deltas_from_dataarray(f)
elif check_axis(f[axis], 'latitude'):
# Latitude coordinate, need to get grid deltas
_, new_kwargs['delta'] = grid_deltas_from_dataarray(f)
else:
# General coordinate, use as is
new_kwargs['x'] = f[axis].metpy.unit_array
# Calculate and return result as a DataArray
result = func(f.metpy.unit_array, **new_kwargs)
return xr.DataArray(result.magnitude,
coords=f.coords,
dims=f.dims,
attrs={'units': str(result.units)})
else:
# Error
raise ValueError('Must specify either "x" or "delta" for value positions when "f" '
'is not a DataArray.')
return wrapper
@exporter.export
@xarray_derivative_wrap
def first_derivative(f, **kwargs):
"""Calculate the first derivative of a grid of values.
Works for both regularly-spaced data and grids with varying spacing.
Either `x` or `delta` must be specified, or `f` must be given as an `xarray.DataArray` with
attached coordinate and projection information. If `f` is an `xarray.DataArray`, and `x` or
`delta` are given, `f` will be converted to a `pint.Quantity` and the derivative returned
as a `pint.Quantity`, otherwise, if neither `x` nor `delta` are given, the attached
coordinate information belonging to `axis` will be used and the derivative will be returned
as an `xarray.DataArray`.
This uses 3 points to calculate the derivative, using forward or backward at the edges of
the grid as appropriate, and centered elsewhere. The irregular spacing is handled
explicitly, using the formulation as specified by [Bowen2005]_.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
axis : int or str, optional
The array axis along which to take the derivative. If `f` is ndarray-like, must be an
integer. If `f` is a `DataArray`, can be a string (referring to either the coordinate
dimension name or the axis type) or integer (referring to axis number), unless using
implicit conversion to `pint.Quantity`, in which case it must be an integer. Defaults
to 0.
x : array-like, optional
The coordinate values corresponding to the grid points in `f`.
delta : array-like, optional
Spacing between the grid points in `f`. Should be one item less than the size
of `f` along `axis`.
Returns
-------
array-like
The first derivative calculated along the selected axis.
See Also
--------
second_derivative
"""
n, axis, delta = _process_deriv_args(f, kwargs)
# create slice objects --- initially all are [:, :, ..., :]
slice0 = [slice(None)] * n
slice1 = [slice(None)] * n
slice2 = [slice(None)] * n
delta_slice0 = [slice(None)] * n
delta_slice1 = [slice(None)] * n
# First handle centered case
slice0[axis] = slice(None, -2)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
delta_slice0[axis] = slice(None, -1)
delta_slice1[axis] = slice(1, None)
combined_delta = delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)]
delta_diff = delta[tuple(delta_slice1)] - delta[tuple(delta_slice0)]
center = (- delta[tuple(delta_slice1)] / (combined_delta * delta[tuple(delta_slice0)])
* f[tuple(slice0)]
+ delta_diff / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])
* f[tuple(slice1)]
+ delta[tuple(delta_slice0)] / (combined_delta * delta[tuple(delta_slice1)])
* f[tuple(slice2)])
# Fill in "left" edge with forward difference
slice0[axis] = slice(None, 1)
slice1[axis] = slice(1, 2)
slice2[axis] = slice(2, 3)
delta_slice0[axis] = slice(None, 1)
delta_slice1[axis] = slice(1, 2)
combined_delta = delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)]
big_delta = combined_delta + delta[tuple(delta_slice0)]
left = (- big_delta / (combined_delta * delta[tuple(delta_slice0)])
* f[tuple(slice0)]
+ combined_delta / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])
* f[tuple(slice1)]
- delta[tuple(delta_slice0)] / (combined_delta * delta[tuple(delta_slice1)])
* f[tuple(slice2)])
# Now the "right" edge with backward difference
slice0[axis] = slice(-3, -2)
slice1[axis] = slice(-2, -1)
slice2[axis] = slice(-1, None)
delta_slice0[axis] = slice(-2, -1)
delta_slice1[axis] = slice(-1, None)
combined_delta = delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)]
big_delta = combined_delta + delta[tuple(delta_slice1)]
right = (delta[tuple(delta_slice1)] / (combined_delta * delta[tuple(delta_slice0)])
* f[tuple(slice0)]
- combined_delta / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])
* f[tuple(slice1)]
+ big_delta / (combined_delta * delta[tuple(delta_slice1)])
* f[tuple(slice2)])
return concatenate((left, center, right), axis=axis)
@exporter.export
@xarray_derivative_wrap
def second_derivative(f, **kwargs):
"""Calculate the second derivative of a grid of values.
Works for both regularly-spaced data and grids with varying spacing.
Either `x` or `delta` must be specified, or `f` must be given as an `xarray.DataArray` with
attached coordinate and projection information. If `f` is an `xarray.DataArray`, and `x` or
`delta` are given, `f` will be converted to a `pint.Quantity` and the derivative returned
as a `pint.Quantity`, otherwise, if neither `x` nor `delta` are given, the attached
coordinate information belonging to `axis` will be used and the derivative will be returned
as an `xarray.DataArray`.
This uses 3 points to calculate the derivative, using forward or backward at the edges of
the grid as appropriate, and centered elsewhere. The irregular spacing is handled
explicitly, using the formulation as specified by [Bowen2005]_.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
axis : int or str, optional
The array axis along which to take the derivative. If `f` is ndarray-like, must be an
integer. If `f` is a `DataArray`, can be a string (referring to either the coordinate
dimension name or the axis type) or integer (referring to axis number), unless using
implicit conversion to `pint.Quantity`, in which case it must be an integer. Defaults
to 0.
x : array-like, optional
The coordinate values corresponding to the grid points in `f`.
delta : array-like, optional
Spacing between the grid points in `f`. There should be one item less than the size
of `f` along `axis`.
Returns
-------
array-like
The second derivative calculated along the selected axis.
See Also
--------
first_derivative
"""
n, axis, delta = _process_deriv_args(f, kwargs)
# create slice objects --- initially all are [:, :, ..., :]
slice0 = [slice(None)] * n
slice1 = [slice(None)] * n
slice2 = [slice(None)] * n
delta_slice0 = [slice(None)] * n
delta_slice1 = [slice(None)] * n
# First handle centered case
slice0[axis] = slice(None, -2)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
delta_slice0[axis] = slice(None, -1)
delta_slice1[axis] = slice(1, None)
combined_delta = delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)]
center = 2 * (f[tuple(slice0)] / (combined_delta * delta[tuple(delta_slice0)])
- f[tuple(slice1)] / (delta[tuple(delta_slice0)]
* delta[tuple(delta_slice1)])
+ f[tuple(slice2)] / (combined_delta * delta[tuple(delta_slice1)]))
# Fill in "left" edge
slice0[axis] = slice(None, 1)
slice1[axis] = slice(1, 2)
slice2[axis] = slice(2, 3)
delta_slice0[axis] = slice(None, 1)
delta_slice1[axis] = slice(1, 2)
combined_delta = delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)]
left = 2 * (f[tuple(slice0)] / (combined_delta * delta[tuple(delta_slice0)])
- f[tuple(slice1)] / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])
+ f[tuple(slice2)] / (combined_delta * delta[tuple(delta_slice1)]))
# Now the "right" edge
slice0[axis] = slice(-3, -2)
slice1[axis] = slice(-2, -1)
slice2[axis] = slice(-1, None)
delta_slice0[axis] = slice(-2, -1)
delta_slice1[axis] = slice(-1, None)
combined_delta = delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)]
right = 2 * (f[tuple(slice0)] / (combined_delta * delta[tuple(delta_slice0)])
- f[tuple(slice1)] / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])
+ f[tuple(slice2)] / (combined_delta * delta[tuple(delta_slice1)]))
return concatenate((left, center, right), axis=axis)
@exporter.export
def gradient(f, **kwargs):
"""Calculate the gradient of a grid of values.
Works for both regularly-spaced data, and grids with varying spacing.
Either `coordinates` or `deltas` must be specified, or `f` must be given as an
`xarray.DataArray` with attached coordinate and projection information. If `f` is an
`xarray.DataArray`, and `coordinates` or `deltas` are given, `f` will be converted to a
`pint.Quantity` and the gradient returned as a tuple of `pint.Quantity`, otherwise, if
neither `coordinates` nor `deltas` are given, the attached coordinate information belonging
to `axis` will be used and the gradient will be returned as a tuple of `xarray.DataArray`.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
coordinates : array-like, optional
Sequence of arrays containing the coordinate values corresponding to the
grid points in `f` in axis order.
deltas : array-like, optional
Sequence of arrays or scalars that specify the spacing between the grid points in `f`
in axis order. There should be one item less than the size of `f` along the applicable
axis.
axes : sequence, optional
Sequence of strings (if `f` is a `xarray.DataArray` and implicit conversion to
`pint.Quantity` is not used) or integers that specify the array axes along which to
take the derivatives. Defaults to all axes of `f`. If given, and used with
`coordinates` or `deltas`, its length must be less than or equal to that of the
`coordinates` or `deltas` given.
Returns
-------
tuple of array-like
The first derivative calculated along each specified axis of the original array
See Also
--------
laplacian, first_derivative
Notes
-----
If this function is used without the `axes` parameter, the length of `coordinates` or
`deltas` (as applicable) should match the number of dimensions of `f`.
"""
pos_kwarg, positions, axes = _process_gradient_args(f, kwargs)
return tuple(first_derivative(f, axis=axis, **{pos_kwarg: positions[ind]})
for ind, axis in enumerate(axes))
@exporter.export
def laplacian(f, **kwargs):
"""Calculate the laplacian of a grid of values.
Works for both regularly-spaced data, and grids with varying spacing.
Either `coordinates` or `deltas` must be specified, or `f` must be given as an
`xarray.DataArray` with attached coordinate and projection information. If `f` is an
`xarray.DataArray`, and `coordinates` or `deltas` are given, `f` will be converted to a
`pint.Quantity` and the gradient returned as a tuple of `pint.Quantity`, otherwise, if
neither `coordinates` nor `deltas` are given, the attached coordinate information belonging
to `axis` will be used and the gradient will be returned as a tuple of `xarray.DataArray`.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
coordinates : array-like, optional
The coordinate values corresponding to the grid points in `f`
deltas : array-like, optional
Spacing between the grid points in `f`. There should be one item less than the size
of `f` along the applicable axis.
axes : sequence, optional
Sequence of strings (if `f` is a `xarray.DataArray` and implicit conversion to
`pint.Quantity` is not used) or integers that specify the array axes along which to
take the derivatives. Defaults to all axes of `f`. If given, and used with
`coordinates` or `deltas`, its length must be less than or equal to that of the
`coordinates` or `deltas` given.
Returns
-------
array-like
The laplacian
See Also
--------
gradient, second_derivative
Notes
-----
If this function is used without the `axes` parameter, the length of `coordinates` or
`deltas` (as applicable) should match the number of dimensions of `f`.
"""
pos_kwarg, positions, axes = _process_gradient_args(f, kwargs)
derivs = [second_derivative(f, axis=axis, **{pos_kwarg: positions[ind]})
for ind, axis in enumerate(axes)]
laplac = sum(derivs)
if isinstance(derivs[0], xr.DataArray):
# Patch in the units that are dropped
laplac.attrs['units'] = derivs[0].attrs['units']
return laplac
def _broadcast_to_axis(arr, axis, ndim):
"""Handle reshaping coordinate array to have proper dimensionality.
This puts the values along the specified axis.
"""
if arr.ndim == 1 and arr.ndim < ndim:
new_shape = [1] * ndim
new_shape[axis] = arr.size
arr = arr.reshape(*new_shape)
return arr
def _process_gradient_args(f, kwargs):
"""Handle common processing of arguments for gradient and gradient-like functions."""
axes = kwargs.get('axes', range(f.ndim))
def _check_length(positions):
if 'axes' in kwargs and len(positions) < len(axes):
raise ValueError('Length of "coordinates" or "deltas" cannot be less than that '
'of "axes".')
elif 'axes' not in kwargs and len(positions) != len(axes):
raise ValueError('Length of "coordinates" or "deltas" must match the number of '
'dimensions of "f" when "axes" is not given.')
if 'deltas' in kwargs:
if 'coordinates' in kwargs or 'x' in kwargs:
raise ValueError('Cannot specify both "coordinates" and "deltas".')
_check_length(kwargs['deltas'])
return 'delta', kwargs['deltas'], axes
elif 'coordinates' in kwargs:
_check_length(kwargs['coordinates'])
return 'x', kwargs['coordinates'], axes
elif isinstance(f, xr.DataArray):
return 'pass', axes, axes # only the axis argument matters
else:
raise ValueError('Must specify either "coordinates" or "deltas" for value positions '
'when "f" is not a DataArray.')
def _process_deriv_args(f, kwargs):
"""Handle common processing of arguments for derivative functions."""
n = f.ndim
axis = normalize_axis_index(kwargs.get('axis', 0), n)
if f.shape[axis] < 3:
raise ValueError('f must have at least 3 point along the desired axis.')
if 'delta' in kwargs:
if 'x' in kwargs:
raise ValueError('Cannot specify both "x" and "delta".')
delta = atleast_1d(kwargs['delta'])
if delta.size == 1:
diff_size = list(f.shape)
diff_size[axis] -= 1
delta_units = getattr(delta, 'units', None)
delta = np.broadcast_to(delta, diff_size, subok=True)
if not hasattr(delta, 'units') and delta_units is not None:
delta = delta * delta_units
else:
delta = _broadcast_to_axis(delta, axis, n)
elif 'x' in kwargs:
x = _broadcast_to_axis(kwargs['x'], axis, n)
delta = diff(x, axis=axis)
else:
raise ValueError('Must specify either "x" or "delta" for value positions.')
return n, axis, delta
@exporter.export
@preprocess_xarray
def parse_angle(input_dir):
"""Calculate the meteorological angle from directional text.
Works for abbrieviations or whole words (E -> 90 | South -> 180)
and also is able to parse 22.5 degreee angles such as ESE/East South East
Parameters
----------
input_dir : string or array-like
Directional text such as west, [south-west, ne], etc
Returns
-------
`pint.Quantity`
The angle in degrees
"""
if isinstance(input_dir, str):
# abb_dirs = abbrieviated directions
abb_dirs = _clean_direction([_abbrieviate_direction(input_dir)])
elif hasattr(input_dir, '__len__'): # handle np.array, pd.Series, list, and array-like
input_dir_str = ','.join(_clean_direction(input_dir, preprocess=True))
abb_dir_str = _abbrieviate_direction(input_dir_str)
abb_dirs = _clean_direction(abb_dir_str.split(','))
else: # handle unrecognizable scalar
return np.nan
return itemgetter(*abb_dirs)(DIR_DICT)
def _clean_direction(dir_list, preprocess=False):
"""Handle None if preprocess, else handles anything not in DIR_STRS."""
if preprocess: # primarily to remove None from list so ','.join works
return [UND if not isinstance(the_dir, str) else the_dir
for the_dir in dir_list]
else: # remove extraneous abbrieviated directions
return [UND if the_dir not in DIR_STRS else the_dir
for the_dir in dir_list]
def _abbrieviate_direction(ext_dir_str):
"""Convert extended (non-abbrievated) directions to abbrieviation."""
return (ext_dir_str
.upper()
.replace('_', '')
.replace('-', '')
.replace(' ', '')
.replace('NORTH', 'N')
.replace('EAST', 'E')
.replace('SOUTH', 'S')
.replace('WEST', 'W')
)
@exporter.export
@preprocess_xarray
def angle_to_direction(input_angle, full=False, level=3):
"""Convert the meteorological angle to directional text.
Works for angles greater than or equal to 360 (360 -> N | 405 -> NE)
and rounds to the nearest angle (355 -> N | 404 -> NNE)
Parameters
----------
input_angle : numeric or array-like numeric
Angles such as 0, 25, 45, 360, 410, etc
full : boolean
True returns full text (South), False returns abbrieviated text (S)
level : int
Level of detail (3 = N/NNE/NE/ENE/E... 2 = N/NE/E/SE... 1 = N/E/S/W)
Returns
-------
direction
The directional text
"""
try: # strip units temporarily
origin_units = input_angle.units
input_angle = input_angle.m
except AttributeError: # no units associated
origin_units = units.degree
if not hasattr(input_angle, '__len__') or isinstance(input_angle, str):
input_angle = [input_angle]
scalar = True
else:
scalar = False
# clean any numeric strings, negatives, and None
# does not handle strings with alphabet
input_angle = np.array(input_angle).astype(float)
with np.errstate(invalid='ignore'): # warns about the np.nan
input_angle[np.where(input_angle < 0)] = np.nan
input_angle = input_angle * origin_units
# normalizer used for angles > 360 degree to normalize between 0 - 360
normalizer = np.array(input_angle.m / MAX_DEGREE_ANGLE.m, dtype=int)
norm_angles = abs(input_angle - MAX_DEGREE_ANGLE * normalizer)
if level == 3:
nskip = 1
elif level == 2:
nskip = 2
elif level == 1:
nskip = 4
else:
err_msg = 'Level of complexity cannot be less than 1 or greater than 3!'
raise ValueError(err_msg)
angle_dict = {i * BASE_DEGREE_MULTIPLIER.m * nskip: dir_str
for i, dir_str in enumerate(DIR_STRS[::nskip])}
angle_dict[MAX_DEGREE_ANGLE.m] = 'N' # handle edge case of 360.
angle_dict[UND_ANGLE] = UND
# round to the nearest angles for dict lookup
# 0.001 is subtracted so there's an equal number of dir_str from
# np.arange(0, 360, 22.5), or else some dir_str will be preferred
# without the 0.001, level=2 would yield:
# ['N', 'N', 'NE', 'E', 'E', 'E', 'SE', 'S', 'S',
# 'S', 'SW', 'W', 'W', 'W', 'NW', 'N']
# with the -0.001, level=2 would yield:
# ['N', 'N', 'NE', 'NE', 'E', 'E', 'SE', 'SE',
# 'S', 'S', 'SW', 'SW', 'W', 'W', 'NW', 'NW']
multiplier = np.round(
(norm_angles / BASE_DEGREE_MULTIPLIER / nskip) - 0.001).m
round_angles = (multiplier * BASE_DEGREE_MULTIPLIER.m * nskip)
round_angles[np.where(np.isnan(round_angles))] = UND_ANGLE
dir_str_arr = itemgetter(*round_angles)(angle_dict) # for array
if full:
dir_str_arr = ','.join(dir_str_arr)
dir_str_arr = _unabbrieviate_direction(dir_str_arr)
if not scalar:
dir_str = dir_str_arr.split(',')
else:
dir_str = dir_str_arr.replace(',', ' ')
else:
dir_str = dir_str_arr
return dir_str
def _unabbrieviate_direction(abb_dir_str):
"""Convert abbrieviated directions to non-abbrieviated direction."""
return (abb_dir_str
.upper()
.replace(UND, 'Undefined ')
.replace('N', 'North ')
.replace('E', 'East ')
.replace('S', 'South ')
.replace('W', 'West ')
.replace(' ,', ',')
).strip()
def _remove_nans(*variables):
"""Remove NaNs from arrays that cause issues with calculations.
Takes a variable number of arguments
Returns masked arrays in the same order as provided
"""
mask = None
for v in variables:
if mask is None:
mask = np.isnan(v)
else:
mask |= np.isnan(v)
# Mask everyone with that joint mask
ret = []
for v in variables:
ret.append(v[~mask])
return ret
| src/metpy/calc/tools.py | 53,973 | Convert extended (non-abbrievated) directions to abbrieviation.
Handle reshaping coordinate array to have proper dimensionality.
This puts the values along the specified axis.
Handle None if preprocess, else handles anything not in DIR_STRS.
Delete masked points from arrays.
Takes arrays and removes masked points to help with calculations and plotting.
Parameters
----------
arrs : one or more array-like
source arrays
Returns
-------
arrs : one or more array-like
arrays with masked elements removed
Calculate the bounding pressure and height in a layer.
Given pressure, optional heights, and a bound, return either the closest pressure/height
or interpolated pressure/height. If no heights are provided, a standard atmosphere
([NOAA1976]_) is assumed.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressures
bound : `pint.Quantity`
Bound to retrieve (in pressure or height)
heights : `pint.Quantity`, optional
Atmospheric heights associated with the pressure levels. Defaults to using
heights calculated from ``pressure`` assuming a standard atmosphere.
interpolate : boolean, optional
Interpolate the bound or return the nearest. Defaults to True.
Returns
-------
`pint.Quantity`
The bound pressure and height.
Compare values for greater or close to boolean masks.
Returns a boolean mask for values greater than or equal to a target within a specified
absolute or relative tolerance (as in :func:`numpy.isclose`).
Parameters
----------
a : array-like
Array of values to be compared
value : float
Comparison value
Returns
-------
array-like
Boolean array where values are greater than or nearly equal to value.
Compare values for less or close to boolean masks.
Returns a boolean mask for values less than or equal to a target within a specified
absolute or relative tolerance (as in :func:`numpy.isclose`).
Parameters
----------
a : array-like
Array of values to be compared
value : float
Comparison value
Returns
-------
array-like
Boolean array where values are less than or nearly equal to value.
Return the next non masked element of a masked array.
If an array is masked, return the next non-masked element (if the given index is masked).
If no other unmasked points are after the given masked point, returns none.
Parameters
----------
a : array-like
1-dimensional array of numeric values
idx : integer
index of requested element
Returns
-------
Index of next non-masked element and next non-masked element
Handle common processing of arguments for derivative functions.
Handle common processing of arguments for gradient and gradient-like functions.
Remove NaNs from arrays that cause issues with calculations.
Takes a variable number of arguments
Returns masked arrays in the same order as provided
Convert abbrieviated directions to non-abbrieviated direction.
Convert the meteorological angle to directional text.
Works for angles greater than or equal to 360 (360 -> N | 405 -> NE)
and rounds to the nearest angle (355 -> N | 404 -> NNE)
Parameters
----------
input_angle : numeric or array-like numeric
Angles such as 0, 25, 45, 360, 410, etc
full : boolean
True returns full text (South), False returns abbrieviated text (S)
level : int
Level of detail (3 = N/NNE/NE/ENE/E... 2 = N/NE/E/SE... 1 = N/E/S/W)
Returns
-------
direction
The directional text
Find the indices surrounding the values within arr along axis.
Returns a set of above, below, good. Above and below are lists of arrays of indices.
These lists are formulated such that they can be used directly to index into a numpy
array and get the expected results (no extra slices or ellipsis necessary). `good` is
a boolean array indicating the "columns" that actually had values to bound the desired
value(s).
Parameters
----------
arr : array-like
Array to search for values
values: array-like
One or more values to search for in `arr`
axis : int
The dimension of `arr` along which to search.
from_below : bool, optional
Whether to search from "below" (i.e. low indices to high indices). If `False`,
the search will instead proceed from high indices to low indices. Defaults to `True`.
Returns
-------
above : list of arrays
List of broadcasted indices to the location above the desired value
below : list of arrays
List of broadcasted indices to the location below the desired value
good : array
Boolean array indicating where the search found proper bounds for the desired value
Calculate the best estimate of intersection.
Calculates the best estimates of the intersection of two y-value
data sets that share a common x-value set.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
direction : string, optional
specifies direction of crossing. 'all', 'increasing' (a becoming greater than b),
or 'decreasing' (b becoming greater than a). Defaults to 'all'.
log_x : bool, optional
Use logarithmic interpolation along the `x` axis (i.e. for finding intersections
in pressure coordinates). Default is False.
Returns
-------
A tuple (x, y) of array-like with the x and y coordinates of the
intersections of the lines.
Calculate the first derivative of a grid of values.
Works for both regularly-spaced data and grids with varying spacing.
Either `x` or `delta` must be specified, or `f` must be given as an `xarray.DataArray` with
attached coordinate and projection information. If `f` is an `xarray.DataArray`, and `x` or
`delta` are given, `f` will be converted to a `pint.Quantity` and the derivative returned
as a `pint.Quantity`, otherwise, if neither `x` nor `delta` are given, the attached
coordinate information belonging to `axis` will be used and the derivative will be returned
as an `xarray.DataArray`.
This uses 3 points to calculate the derivative, using forward or backward at the edges of
the grid as appropriate, and centered elsewhere. The irregular spacing is handled
explicitly, using the formulation as specified by [Bowen2005]_.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
axis : int or str, optional
The array axis along which to take the derivative. If `f` is ndarray-like, must be an
integer. If `f` is a `DataArray`, can be a string (referring to either the coordinate
dimension name or the axis type) or integer (referring to axis number), unless using
implicit conversion to `pint.Quantity`, in which case it must be an integer. Defaults
to 0.
x : array-like, optional
The coordinate values corresponding to the grid points in `f`.
delta : array-like, optional
Spacing between the grid points in `f`. Should be one item less than the size
of `f` along `axis`.
Returns
-------
array-like
The first derivative calculated along the selected axis.
See Also
--------
second_derivative
Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer. The
bottom of the layer can be specified with a pressure or height above the surface
pressure. The bottom defaults to the surface pressure. The depth of the layer can be
specified in terms of pressure or height above the bottom of the layer. If the top and
bottom of the layer are not in the data, they are interpolated by default.
Parameters
----------
pressure : array-like
Atmospheric pressure profile
args : array-like
Atmospheric variable(s) measured at the given pressures
heights: array-like, optional
Atmospheric heights corresponding to the given pressures. Defaults to using
heights calculated from ``p`` assuming a standard atmosphere [NOAA1976]_.
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure. Defaults
to the highest pressure or lowest height given.
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer.
Defaults to 100 hPa.
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
Returns
-------
`pint.Quantity, pint.Quantity`
The pressure and data variables of the layer
Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer using
the heights only.
Parameters
----------
heights : array-like
Atmospheric heights
depth : `pint.Quantity`
The thickness of the layer
args : array-like
Atmospheric variable(s) measured at the given pressures
bottom : `pint.Quantity`, optional
The bottom of the layer
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
with_agl : bool, optional
Returns the heights as above ground level by subtracting the minimum height in the
provided heights. Defaults to False.
Returns
-------
`pint.Quantity, pint.Quantity`
The height and data variables of the layer
Calculate the gradient of a grid of values.
Works for both regularly-spaced data, and grids with varying spacing.
Either `coordinates` or `deltas` must be specified, or `f` must be given as an
`xarray.DataArray` with attached coordinate and projection information. If `f` is an
`xarray.DataArray`, and `coordinates` or `deltas` are given, `f` will be converted to a
`pint.Quantity` and the gradient returned as a tuple of `pint.Quantity`, otherwise, if
neither `coordinates` nor `deltas` are given, the attached coordinate information belonging
to `axis` will be used and the gradient will be returned as a tuple of `xarray.DataArray`.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
coordinates : array-like, optional
Sequence of arrays containing the coordinate values corresponding to the
grid points in `f` in axis order.
deltas : array-like, optional
Sequence of arrays or scalars that specify the spacing between the grid points in `f`
in axis order. There should be one item less than the size of `f` along the applicable
axis.
axes : sequence, optional
Sequence of strings (if `f` is a `xarray.DataArray` and implicit conversion to
`pint.Quantity` is not used) or integers that specify the array axes along which to
take the derivatives. Defaults to all axes of `f`. If given, and used with
`coordinates` or `deltas`, its length must be less than or equal to that of the
`coordinates` or `deltas` given.
Returns
-------
tuple of array-like
The first derivative calculated along each specified axis of the original array
See Also
--------
laplacian, first_derivative
Notes
-----
If this function is used without the `axes` parameter, the length of `coordinates` or
`deltas` (as applicable) should match the number of dimensions of `f`.
Calculate the horizontal deltas between grid points of a DataArray.
Calculate the signed delta distance between grid points of a DataArray in the horizontal
directions, whether the grid is lat/lon or x/y.
Parameters
----------
f : `xarray.DataArray`
Parsed DataArray on a latitude/longitude grid, in (..., lat, lon) or (..., y, x)
dimension order
Returns
-------
dx, dy:
arrays of signed deltas between grid points in the x and y directions with dimensions
matching those of `f`.
See Also
--------
lat_lon_grid_deltas
Calculate the laplacian of a grid of values.
Works for both regularly-spaced data, and grids with varying spacing.
Either `coordinates` or `deltas` must be specified, or `f` must be given as an
`xarray.DataArray` with attached coordinate and projection information. If `f` is an
`xarray.DataArray`, and `coordinates` or `deltas` are given, `f` will be converted to a
`pint.Quantity` and the gradient returned as a tuple of `pint.Quantity`, otherwise, if
neither `coordinates` nor `deltas` are given, the attached coordinate information belonging
to `axis` will be used and the gradient will be returned as a tuple of `xarray.DataArray`.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
coordinates : array-like, optional
The coordinate values corresponding to the grid points in `f`
deltas : array-like, optional
Spacing between the grid points in `f`. There should be one item less than the size
of `f` along the applicable axis.
axes : sequence, optional
Sequence of strings (if `f` is a `xarray.DataArray` and implicit conversion to
`pint.Quantity` is not used) or integers that specify the array axes along which to
take the derivatives. Defaults to all axes of `f`. If given, and used with
`coordinates` or `deltas`, its length must be less than or equal to that of the
`coordinates` or `deltas` given.
Returns
-------
array-like
The laplacian
See Also
--------
gradient, second_derivative
Notes
-----
If this function is used without the `axes` parameter, the length of `coordinates` or
`deltas` (as applicable) should match the number of dimensions of `f`.
Calculate the delta between grid points that are in a latitude/longitude format.
Calculate the signed delta distance between grid points when the grid spacing is defined by
delta lat/lon rather than delta x/y
Parameters
----------
longitude : array_like
array of longitudes defining the grid
latitude : array_like
array of latitudes defining the grid
kwargs
Other keyword arguments to pass to :class:`~pyproj.Geod`
Returns
-------
dx, dy:
at least two dimensional arrays of signed deltas between grid points in the x and y
direction
Notes
-----
Accepts 1D, 2D, or higher arrays for latitude and longitude
Assumes [..., Y, X] for >=2 dimensional arrays
Determine the index of the point just before two lines with common x values.
Parameters
----------
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
Returns
-------
An array of indexes representing the index of the values
just before the intersection(s) of the two lines.
Calculate the meteorological angle from directional text.
Works for abbrieviations or whole words (E -> 90 | South -> 180)
and also is able to parse 22.5 degreee angles such as ESE/East South East
Parameters
----------
input_dir : string or array-like
Directional text such as west, [south-west, ne], etc
Returns
-------
`pint.Quantity`
The angle in degrees
Return a mask to reduce the density of points in irregularly-spaced data.
This function is used to down-sample a collection of scattered points (e.g. surface
data), returning a mask that can be used to select the points from one or more arrays
(e.g. arrays of temperature and dew point). The points selected can be controlled by
providing an array of ``priority`` values (e.g. rainfall totals to ensure that
stations with higher precipitation remain in the mask). The points and radius can be
specified with units. If none are provided, meters are assumed.
Parameters
----------
points : (N, K) array-like
N locations of the points in K dimensional space
radius : `pint.Quantity` or float
Minimum radius allowed between points. If units are not provided, meters is assumed.
priority : (N, K) array-like, optional
If given, this should have the same shape as ``points``; these values will
be used to control selection priority for points.
Returns
-------
(N,) array-like of boolean values indicating whether points should be kept. This
can be used directly to index numpy arrays to return only the desired points.
Examples
--------
>>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.)
array([ True, False, True])
>>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.,
... priority=np.array([0.1, 0.9, 0.3]))
array([False, True, False])
Return one-dimensional nearest-neighbor indexes based on user-specified centers.
Parameters
----------
a : array-like
1-dimensional array of numeric values from which to
extract indexes of nearest-neighbors
centers : array-like
1-dimensional array of numeric values representing a subset of values to approximate
Returns
-------
An array of indexes representing values closest to given array values
Calculate the second derivative of a grid of values.
Works for both regularly-spaced data and grids with varying spacing.
Either `x` or `delta` must be specified, or `f` must be given as an `xarray.DataArray` with
attached coordinate and projection information. If `f` is an `xarray.DataArray`, and `x` or
`delta` are given, `f` will be converted to a `pint.Quantity` and the derivative returned
as a `pint.Quantity`, otherwise, if neither `x` nor `delta` are given, the attached
coordinate information belonging to `axis` will be used and the derivative will be returned
as an `xarray.DataArray`.
This uses 3 points to calculate the derivative, using forward or backward at the edges of
the grid as appropriate, and centered elsewhere. The irregular spacing is handled
explicitly, using the formulation as specified by [Bowen2005]_.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
axis : int or str, optional
The array axis along which to take the derivative. If `f` is ndarray-like, must be an
integer. If `f` is a `DataArray`, can be a string (referring to either the coordinate
dimension name or the axis type) or integer (referring to axis number), unless using
implicit conversion to `pint.Quantity`, in which case it must be an integer. Defaults
to 0.
x : array-like, optional
The coordinate values corresponding to the grid points in `f`.
delta : array-like, optional
Spacing between the grid points in `f`. There should be one item less than the size
of `f` along `axis`.
Returns
-------
array-like
The second derivative calculated along the selected axis.
See Also
--------
first_derivative
Decorate the derivative functions to make them work nicely with DataArrays.
This will automatically determine if the coordinates can be pulled directly from the
DataArray, or if a call to lat_lon_grid_deltas is needed.
Contains a collection of generally useful calculation tools.
Copyright (c) 2016,2017,2018,2019 MetPy Developers. Distributed under the terms of the BSD 3-Clause License. SPDX-License-Identifier: BSD-3-Clause note the order matters! Difference in the two y-value sets Determine the point just before the intersection of the lines Will return multiple points for multiple intersections Change x to logarithmic if log_x=True Find the index of the points just before the intersection(s) Determine the sign of the change x-values around each intersection y-values around each intersection for the first line y-values around each intersection for the second line Calculate the x-intersection. This comes from finding the equations of the two lines, one through (x0, a0) and (x1, a1) and the other through (x0, b0) and (x1, b1), finding their intersection, and reducing with a bunch of algebra. Calculate the y-intersection of the lines. Just plug the x above into the equation for the line through the a points. One could solve for y like x above, but this causes weirder unit behavior and seems a little less good numerically. If there's no intersections, return Return x to linear if log_x is True Check for duplicates Make a mask based on the direction of sign change desired Handle input with units. Assume meters if units are not specified Handle 1D input Make a kd-tree to speed searching of data. Need to use sorted indices rather than sorting the position so that the keep mask matches *original* order. Need to sort the locations in decreasing priority. Take advantage of iterator nature of range here to avoid making big lists Keep all points initially Loop over all the potential points Only proceed if we haven't already excluded this point Find the neighbors and eliminate them We just removed ourselves, so undo that avoid circular import if basic.py ever imports something from tools.py Make sure pressure is monotonically decreasing Bound is given in pressure If the bound is in the pressure data, we know the pressure bound exactly If we have heights, we know the exact height value, otherwise return standard atmosphere height for the pressure If bound is not in the data, return the nearest or interpolated values Use the user specified bound Interpolate heights from the height data If not heights given, use the standard atmosphere No interpolation, find the closest values Bound is given in height If there is height data, see if we have the bound or need to interpolate/find nearest Bound is in the height data Bound is not in the data Need to cast back to the input type since interp (up to at least numpy 1.13 always returns float64. This can cause upstream users problems, resulting in something like np.append() to upcast. Don't have heights, so assume a standard atmosphere If interpolation is on, this is all we need, if not, we need to go back and find the pressure closest to this and refigure the bounds Bound has invalid units If the bound is out of the range of the data, we shouldn't extrapolate Make sure pressure and datavars are the same length If we want things in AGL, subtract the minimum height from all height values If the bottom is not specified, make it the surface Make heights and arguments base units Calculate the top of the layer returned data variables in layer Ensure heights are sorted in ascending order Mask based on top and bottom Interpolate heights at bounds if necessary and sort If we don't have the bottom or top requested, append them Ensure that things are sorted in ascending order Interpolate for the possibly missing bottom/top values If we get the depth kwarg, but it's None, set it to the default as well Make sure pressure and datavars are the same length If the bottom is not specified, make it the surface pressure Calculate the top if whatever units depth is in returned data variables in layer Ensure pressures are sorted in ascending order Mask based on top and bottom pressure Interpolate pressures at bounds if necessary and sort If we don't have the bottom or top requested, append them Ensure that things are sorted in ascending order Interpolate for the possibly missing bottom/top values The shape of generated indices is the same as the input, but with the axis of interest replaced by the number of values to search for. Storage for the found indices and the mask for good locations Used to put the output in the proper location Loop over all of the values and for each, see where the value would be found from a linear search Look for changes in the value of the test for <= value in consecutive points Taking abs() because we only care if there is a flip, not which direction. Good points are those where it's not just 0's along the whole axis Look for the first switch; need to add 1 to the index since argmax is giving the index within the difference array, which is one smaller. Generate a list of slices to reverse the axis of interest so that searching from 0 to N is starting at the "top" of the axis. Same as above, but we use the slice to come from the end; then adjust those indices to measure from the front. Set all indices where the results are not good to 0 Put the results in the proper slice Create index values for broadcasting arrays Inputs must be the same number of dimensions If we were given 1D arrays, make a mesh grid Use the usual DataArray to pint.Quantity preprocessing wrapper Get axis argument, defaulting to first dimension Initialize new kwargs with the axis number Time coordinate, need to get time deltas Longitude coordinate, need to get grid deltas Latitude coordinate, need to get grid deltas General coordinate, use as is Calculate and return result as a DataArray Error create slice objects --- initially all are [:, :, ..., :] First handle centered case Fill in "left" edge with forward difference Now the "right" edge with backward difference create slice objects --- initially all are [:, :, ..., :] First handle centered case Fill in "left" edge Now the "right" edge Patch in the units that are dropped only the axis argument matters abb_dirs = abbrieviated directions handle np.array, pd.Series, list, and array-like handle unrecognizable scalar primarily to remove None from list so ','.join works remove extraneous abbrieviated directions strip units temporarily no units associated clean any numeric strings, negatives, and None does not handle strings with alphabet warns about the np.nan normalizer used for angles > 360 degree to normalize between 0 - 360 handle edge case of 360. round to the nearest angles for dict lookup 0.001 is subtracted so there's an equal number of dir_str from np.arange(0, 360, 22.5), or else some dir_str will be preferred without the 0.001, level=2 would yield: ['N', 'N', 'NE', 'E', 'E', 'E', 'SE', 'S', 'S', 'S', 'SW', 'W', 'W', 'W', 'NW', 'N'] with the -0.001, level=2 would yield: ['N', 'N', 'NE', 'NE', 'E', 'E', 'SE', 'SE', 'S', 'S', 'SW', 'SW', 'W', 'W', 'NW', 'NW'] for array Mask everyone with that joint mask | 25,356 | en | 0.723945 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.