hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7353987a2cc31674fe6a25a13e88a388043f92b | 361 | py | Python | eng_zap_challenge_python/__init__.py | glairtonsantos/eng-zap-challenge-python | abb6c8399f46360759d0256e4b02e35b010e28cb | [
"MIT"
] | null | null | null | eng_zap_challenge_python/__init__.py | glairtonsantos/eng-zap-challenge-python | abb6c8399f46360759d0256e4b02e35b010e28cb | [
"MIT"
] | null | null | null | eng_zap_challenge_python/__init__.py | glairtonsantos/eng-zap-challenge-python | abb6c8399f46360759d0256e4b02e35b010e28cb | [
"MIT"
] | null | null | null | from flask import Blueprint
from flask_api import FlaskAPI
URL_SOURCE = "http://grupozap-code-challenge.s3-website-us-east-1.amazonaws.com/sources/source-2.json"
blueprint = Blueprint("eng_zap_challenge_python", __name__)
def create_app(config="dev"):
app = FlaskAPI(__name__)
app.register_blueprint(blueprint)
return app
from . import views | 21.235294 | 102 | 0.767313 | from flask import Blueprint
from flask_api import FlaskAPI
URL_SOURCE = "http://grupozap-code-challenge.s3-website-us-east-1.amazonaws.com/sources/source-2.json"
blueprint = Blueprint("eng_zap_challenge_python", __name__)
def create_app(config="dev"):
app = FlaskAPI(__name__)
app.register_blueprint(blueprint)
return app
from . import views | true | true |
f7353a11b7f361e2146209f56e09a13f1d5f5fd1 | 11,293 | py | Python | signature/train_and_sample.py | joelnmdyer/SignatuRE | 085a9d727e504bd25bbebdebaa58867211a52c8d | [
"MIT"
] | null | null | null | signature/train_and_sample.py | joelnmdyer/SignatuRE | 085a9d727e504bd25bbebdebaa58867211a52c8d | [
"MIT"
] | null | null | null | signature/train_and_sample.py | joelnmdyer/SignatuRE | 085a9d727e504bd25bbebdebaa58867211a52c8d | [
"MIT"
] | null | null | null | import argparse
import logging
import numpy as np
import os
import sbi.utils as utils
from sbi.inference.base import infer
from sbi import analysis as analysis
from sbi.inference import SMCABC, SNRE_A, simulate_for_sbi, prepare_for_sbi
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
import time
import torch
# Custom scripts/modules/packages
from signature.inference import kernel_methods
from signature.utils import networks
from signature.utils import io, sampling
def train_clf(task, method, L, K=2, n_components_raw=100, seed=0):
"""
Trains a binary classifier with method <method> to distinguish between
samples (x, theta) from the joint distribution p(x, theta) and from the
product of the marginals p(x)p(theta) associated with <task>.
Input:
- task: str, name of model to run inference on, must be recognised
by function get_task above.
- method: str, name of classifier to use, either "signature" or
"gru-resnet"
- L: int, number of training examples (simulations) to generate
- K: int, number of contrasting examples. Only used when
method == "signature"
- seed: int, seed for random number generator
"""
prior, sbi_prior, obs, simulator = io.get_task(task)
if method in ["signature", "k2"]:
clf, x0, _, inn_prods, theta_kern = kernel_methods.train_kernel_classifier(prior,
simulator,
obs,
L,
K,
n_components_raw,
task,
method)
elif method[:10] == "gru-resnet":
IDIM = 1
def sbi_simulator(x):
return simulator(x)
if task == "GSE":
obs = obs[:, :-1]
IDIM = 2
# Remove time indices from GSE output
def sbi_simulator(x):
return simulator(x)[:,:-1]
ODIM = 3
if method != "gru-resnet":
ODIM = eval(method[10:])
simulator_wrapper, _prior = prepare_for_sbi(sbi_simulator, sbi_prior)
# Instantiate the neural density ratio estimator
embedding_net = networks.GRU(input_dim=IDIM, hidden_dim=32, num_layers=2,
output_dim=ODIM)
n_pars_embedding = sum(p.numel() for p in embedding_net.parameters() if p.requires_grad)
logging.info("Embedding net has {0} parameters".format(n_pars_embedding))
classifier = utils.get_nn_models.classifier_nn('resnet',
embedding_net_x=embedding_net)
# Setup the inference procedure with the SNRE-A procedure
inference = SNRE_A(prior=_prior, classifier=classifier)
# Run the inference procedure on one round and L simulated data points
theta, x = simulate_for_sbi(simulator_wrapper, _prior, num_simulations=L)
if task not in ["GSE"]:
x = x.unsqueeze(-1)
elif task == "GSE":
# Print this out to see that it gives you everything in the right place
x = x.reshape(L, -1, 2)
density_estimator = inference.append_simulations(theta, x).train()
posterior = inference.build_posterior(density_estimator)
posterior.set_default_x(obs.reshape(1,-1,IDIM))
clf = posterior
inn_prods = None
theta_kern = None
x0 = obs
prior = _prior
elif method in ["hc", "smcabc"]:
def slope_intercept(data):
reg = LinearRegression().fit(data[:-1].reshape(-1,1), data[1:].reshape(-1,1))
slope = reg.coef_
intercept = reg.intercept_
return slope, intercept
if task == "OU":
def summarise(data):
slope, intercept = slope_intercept(data)
summary = np.array([np.mean(data), slope[0,0], intercept[0]])
return summary
elif task == "MA2":
def summarise(data):
var = np.var(data)
rhos = sm.tsa.acf(data, nlags=2)[1:]
return np.array([var, rhos[0], rhos[1]])
elif task == "GSE":
def summarise(data):
data = data[:, :-1]
N = data.shape[0]
x, y = data[:,0], data[:,1]
xmean = np.mean(x)
ymean = np.mean(y)
xvar = np.var(x, ddof=1)
yvar = np.var(y, ddof=1)
if xvar == 0.:
xvar = 1e-30
if yvar == 0.:
yvar = 1e-30
x, y = (x - xmean)/np.sqrt(xvar), (y - ymean)/np.sqrt(yvar)
acx, acy = [], []
for lag in [1,2]:
acx.append(np.dot(x[:-lag], x[lag:]) / (N - 1))
acy.append(np.dot(y[:-lag], y[lag:]) / (N - 1))
ccxy = np.dot(x, y)/(N-1)
summary = np.array([xmean, ymean, np.log(xvar + 1), np.log(yvar+1), ccxy] + acx + acy)
return summary
def sbi_simulator(x):
data = simulator(x)
return summarise(data)
if method == "hc":
x0 = summarise(obs)
simulator_wrapper, _prior = prepare_for_sbi(sbi_simulator, sbi_prior)
# Instantiate the neural density ratio estimator
classifier = utils.get_nn_models.classifier_nn('resnet')
# Setup the inference procedure with the SNRE-A procedure
inference = SNRE_A(prior=_prior, classifier=classifier)
# Run the inference procedure on one round and L simulated data points
theta, x = simulate_for_sbi(simulator_wrapper, _prior, num_simulations=L)
density_estimator = inference.append_simulations(theta, x).train()
posterior = inference.build_posterior(density_estimator)
posterior.set_default_x(x0)
clf = posterior
elif method == "smcabc":
def _simulator(theta):
return simulator(theta)[:, :-1].reshape(-1)
print(_simulator(prior.sample()))
simulator_wrapper, _prior = prepare_for_sbi(_simulator, sbi_prior)
inference = SMCABC(simulator_wrapper, _prior, num_workers=20)
clf = inference
x0 = obs[:, :-1].reshape(-1)
print(x0)
inn_prods = None
theta_kern = None
prior = _prior
return clf, x0, prior, inn_prods, theta_kern
def sample(method, clf, x0, start, sampling_method, n_samples=[50_000, 100_000], prior=None,
inn_prods=None, theta_kern=None):
"""
Uses a density ratio estimator clf to sample from the posterior for x0
and prior.
Inputs:
- method: str, either "signature" or "gru-resnet" depending on which
classifier is being used
- clf: the density ratio estimator
- x0: the preprocessed observation
- start: np.array consisting of the start point for MCMC. Recommend
using true parameter value that generated x0 for this
- n_samples: list of length 2 consisting of ints > 0. Trial run of MCMC
uses n_samples[0] steps to estimate covariance matrix of
Gaussian proposal density; proper run uses n_samples[1]
- prior: prior distribution, only used if method == "signature",
otherwise ignored. Default None
"""
if method in ["signature", "k2"]:
if prior is None:
raise ValueError("Must provide prior for kernel classifier")
def create_log_ratio_estimator(clf, x):
"Create a ratio estimator from the signature-based classifier"
X_test = inn_prods(x)
clf.set_xkern(X_test.reshape(-1,1))
lr = clf.lr
coefficients = lr.coef_.T
intercept = lr.intercept_
vector = (clf._mapping).dot(coefficients)
def log_ratio_estimator(theta):
T_test = theta_kern(theta)
return T_test.dot(vector) + intercept
return log_ratio_estimator
custom_log_ratio_estimator = create_log_ratio_estimator(clf, x0)
custom_ratio_estimator = lambda theta: np.exp(custom_log_ratio_estimator(theta))
def kernel_posterior(theta):
"""
Function to evaluate estimation of posterior density for
kernel-based classifier.
"""
prior_logpdf = prior.log_prob(theta)
if prior_logpdf == -float("inf"):
return prior_logpdf
else:
log_weight = custom_log_ratio_estimator(theta)
return log_weight + prior_logpdf
log_post_prob = kernel_posterior
elif (method[:10] == "gru-resnet") or (method == "hc"):
def log_post_prob(th):
# Convert th to torch.tensor
th = torch.as_tensor(th).float()
return clf.log_prob(th)
# For sampling importance resampling
custom_ratio_estimator = lambda th: float(torch.exp(clf.log_prob(th) - prior.log_prob(th)))
elif method == "smcabc":
samples = clf(x0, 1_000, 1_000, int(1e7), 0.8)
return samples
if sampling_method == "mh":
# Pilot run to estimate covariance matrix of Gaussian proposal density
samples = sampling.mh(log_post_prob, len(start), start, method,
n_samples=n_samples[0])
cov = np.cov(samples.T)
# Proper run
samples = sampling.mh(log_post_prob, len(start), start, method,
n_samples=n_samples[1], cov=cov)
samples = samples[::100]
elif sampling_method == "sir":
# SIR
samples = sampling.sir(prior, custom_ratio_estimator, 50_000,
1_000)
return samples
def train_inference(task, method, start, L, fname, K=2, sampling_method="mh",
n_samples=[50_000, 100_000], seed=0, n_components_raw=100, start_time=0):
print("Training classifier...")
clf, x0, prior, s_kernel, t_kernel = train_clf(task, method, L, K=K,
n_components_raw=n_components_raw, seed=seed)
logging.info("Training CPU time = {0}".format(time.process_time() - start_time))
print("Sampling from posterior...")
samples = sample(method, clf, x0, start, sampling_method, n_samples=n_samples, prior=prior,
inn_prods=s_kernel, theta_kern=t_kernel)
print("Saving samples...")
np.savetxt(fname, samples)
print("Done.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Ratio estimation')
parser.add_argument('--task', type=str,
help='Name of task (simulator) to experiment with.')
parser.add_argument('--method', type=str,
help='Name of classification pipelines to use.')
parser.add_argument('--L', type=int, nargs='+',
help='Number of training simulations to use.')
parser.add_argument('--K', type=int, default=1,
help='Number of contrasting examples per simulation.')
parser.add_argument('--s', type=str, default='mh',
help="Sampling method in ['mh', 'sir'].")
parser.add_argument('--n', type=int, default=100,
help="Number of components retained in Nystrom DIVIDED BY (K+1).")
parser.add_argument('--seed', type=int, nargs='+', help='Seeds for RNG.')
args = parser.parse_args()
if args.method == "sre":
method = "signature"
else:
method = args.method
if args.task == "OU":
start = np.array([0.5, 1.])
elif args.task == "MA2":
start = np.array([0.6, 0.2])
elif args.task == "GSE":
start = np.array([1e-2, 1e-1])
for L in args.L:
for seed in args.seed:
# Setup for saving output
directory = "./{0}/{1}/".format(args.task, seed)
if not os.path.exists(directory):
os.makedirs(directory)
if method in ["signature", "k2"]:
fname = os.path.join(directory, "{0}_{1}_{2}_{3}_samples.txt".format(method, L, args.K, args.n))
logging.basicConfig(filename=os.path.join(directory,
"{0}_{1}_{2}.log".format(method, L, args.K)),
filemode="w", format="%(name)s - %(levelname)s - %(message)s",
level=logging.INFO)
else:
fname = os.path.join(directory, "{0}_{1}_samples.txt".format(method, L))
logging.basicConfig(filename=os.path.join(directory,
"{0}_{1}.log".format(method, L)),
filemode="w", format="%(name)s - %(levelname)s - %(message)s",
level=logging.INFO)
logging.info(args)
logging.info("Seed = {0}".format(seed))
# Run script
start_time = time.process_time()
train_inference(args.task, method, start, L, fname, sampling_method=args.s,
K=args.K, seed=seed, n_components_raw=args.n, start_time=start_time)
logging.info("Total CPU time = {0}".format(time.process_time() - start_time))
| 32.828488 | 100 | 0.678031 | import argparse
import logging
import numpy as np
import os
import sbi.utils as utils
from sbi.inference.base import infer
from sbi import analysis as analysis
from sbi.inference import SMCABC, SNRE_A, simulate_for_sbi, prepare_for_sbi
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
import time
import torch
from signature.inference import kernel_methods
from signature.utils import networks
from signature.utils import io, sampling
def train_clf(task, method, L, K=2, n_components_raw=100, seed=0):
prior, sbi_prior, obs, simulator = io.get_task(task)
if method in ["signature", "k2"]:
clf, x0, _, inn_prods, theta_kern = kernel_methods.train_kernel_classifier(prior,
simulator,
obs,
L,
K,
n_components_raw,
task,
method)
elif method[:10] == "gru-resnet":
IDIM = 1
def sbi_simulator(x):
return simulator(x)
if task == "GSE":
obs = obs[:, :-1]
IDIM = 2
def sbi_simulator(x):
return simulator(x)[:,:-1]
ODIM = 3
if method != "gru-resnet":
ODIM = eval(method[10:])
simulator_wrapper, _prior = prepare_for_sbi(sbi_simulator, sbi_prior)
embedding_net = networks.GRU(input_dim=IDIM, hidden_dim=32, num_layers=2,
output_dim=ODIM)
n_pars_embedding = sum(p.numel() for p in embedding_net.parameters() if p.requires_grad)
logging.info("Embedding net has {0} parameters".format(n_pars_embedding))
classifier = utils.get_nn_models.classifier_nn('resnet',
embedding_net_x=embedding_net)
inference = SNRE_A(prior=_prior, classifier=classifier)
theta, x = simulate_for_sbi(simulator_wrapper, _prior, num_simulations=L)
if task not in ["GSE"]:
x = x.unsqueeze(-1)
elif task == "GSE":
x = x.reshape(L, -1, 2)
density_estimator = inference.append_simulations(theta, x).train()
posterior = inference.build_posterior(density_estimator)
posterior.set_default_x(obs.reshape(1,-1,IDIM))
clf = posterior
inn_prods = None
theta_kern = None
x0 = obs
prior = _prior
elif method in ["hc", "smcabc"]:
def slope_intercept(data):
reg = LinearRegression().fit(data[:-1].reshape(-1,1), data[1:].reshape(-1,1))
slope = reg.coef_
intercept = reg.intercept_
return slope, intercept
if task == "OU":
def summarise(data):
slope, intercept = slope_intercept(data)
summary = np.array([np.mean(data), slope[0,0], intercept[0]])
return summary
elif task == "MA2":
def summarise(data):
var = np.var(data)
rhos = sm.tsa.acf(data, nlags=2)[1:]
return np.array([var, rhos[0], rhos[1]])
elif task == "GSE":
def summarise(data):
data = data[:, :-1]
N = data.shape[0]
x, y = data[:,0], data[:,1]
xmean = np.mean(x)
ymean = np.mean(y)
xvar = np.var(x, ddof=1)
yvar = np.var(y, ddof=1)
if xvar == 0.:
xvar = 1e-30
if yvar == 0.:
yvar = 1e-30
x, y = (x - xmean)/np.sqrt(xvar), (y - ymean)/np.sqrt(yvar)
acx, acy = [], []
for lag in [1,2]:
acx.append(np.dot(x[:-lag], x[lag:]) / (N - 1))
acy.append(np.dot(y[:-lag], y[lag:]) / (N - 1))
ccxy = np.dot(x, y)/(N-1)
summary = np.array([xmean, ymean, np.log(xvar + 1), np.log(yvar+1), ccxy] + acx + acy)
return summary
def sbi_simulator(x):
data = simulator(x)
return summarise(data)
if method == "hc":
x0 = summarise(obs)
simulator_wrapper, _prior = prepare_for_sbi(sbi_simulator, sbi_prior)
classifier = utils.get_nn_models.classifier_nn('resnet')
inference = SNRE_A(prior=_prior, classifier=classifier)
theta, x = simulate_for_sbi(simulator_wrapper, _prior, num_simulations=L)
density_estimator = inference.append_simulations(theta, x).train()
posterior = inference.build_posterior(density_estimator)
posterior.set_default_x(x0)
clf = posterior
elif method == "smcabc":
def _simulator(theta):
return simulator(theta)[:, :-1].reshape(-1)
print(_simulator(prior.sample()))
simulator_wrapper, _prior = prepare_for_sbi(_simulator, sbi_prior)
inference = SMCABC(simulator_wrapper, _prior, num_workers=20)
clf = inference
x0 = obs[:, :-1].reshape(-1)
print(x0)
inn_prods = None
theta_kern = None
prior = _prior
return clf, x0, prior, inn_prods, theta_kern
def sample(method, clf, x0, start, sampling_method, n_samples=[50_000, 100_000], prior=None,
inn_prods=None, theta_kern=None):
if method in ["signature", "k2"]:
if prior is None:
raise ValueError("Must provide prior for kernel classifier")
def create_log_ratio_estimator(clf, x):
X_test = inn_prods(x)
clf.set_xkern(X_test.reshape(-1,1))
lr = clf.lr
coefficients = lr.coef_.T
intercept = lr.intercept_
vector = (clf._mapping).dot(coefficients)
def log_ratio_estimator(theta):
T_test = theta_kern(theta)
return T_test.dot(vector) + intercept
return log_ratio_estimator
custom_log_ratio_estimator = create_log_ratio_estimator(clf, x0)
custom_ratio_estimator = lambda theta: np.exp(custom_log_ratio_estimator(theta))
def kernel_posterior(theta):
prior_logpdf = prior.log_prob(theta)
if prior_logpdf == -float("inf"):
return prior_logpdf
else:
log_weight = custom_log_ratio_estimator(theta)
return log_weight + prior_logpdf
log_post_prob = kernel_posterior
elif (method[:10] == "gru-resnet") or (method == "hc"):
def log_post_prob(th):
th = torch.as_tensor(th).float()
return clf.log_prob(th)
custom_ratio_estimator = lambda th: float(torch.exp(clf.log_prob(th) - prior.log_prob(th)))
elif method == "smcabc":
samples = clf(x0, 1_000, 1_000, int(1e7), 0.8)
return samples
if sampling_method == "mh":
samples = sampling.mh(log_post_prob, len(start), start, method,
n_samples=n_samples[0])
cov = np.cov(samples.T)
samples = sampling.mh(log_post_prob, len(start), start, method,
n_samples=n_samples[1], cov=cov)
samples = samples[::100]
elif sampling_method == "sir":
samples = sampling.sir(prior, custom_ratio_estimator, 50_000,
1_000)
return samples
def train_inference(task, method, start, L, fname, K=2, sampling_method="mh",
n_samples=[50_000, 100_000], seed=0, n_components_raw=100, start_time=0):
print("Training classifier...")
clf, x0, prior, s_kernel, t_kernel = train_clf(task, method, L, K=K,
n_components_raw=n_components_raw, seed=seed)
logging.info("Training CPU time = {0}".format(time.process_time() - start_time))
print("Sampling from posterior...")
samples = sample(method, clf, x0, start, sampling_method, n_samples=n_samples, prior=prior,
inn_prods=s_kernel, theta_kern=t_kernel)
print("Saving samples...")
np.savetxt(fname, samples)
print("Done.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Ratio estimation')
parser.add_argument('--task', type=str,
help='Name of task (simulator) to experiment with.')
parser.add_argument('--method', type=str,
help='Name of classification pipelines to use.')
parser.add_argument('--L', type=int, nargs='+',
help='Number of training simulations to use.')
parser.add_argument('--K', type=int, default=1,
help='Number of contrasting examples per simulation.')
parser.add_argument('--s', type=str, default='mh',
help="Sampling method in ['mh', 'sir'].")
parser.add_argument('--n', type=int, default=100,
help="Number of components retained in Nystrom DIVIDED BY (K+1).")
parser.add_argument('--seed', type=int, nargs='+', help='Seeds for RNG.')
args = parser.parse_args()
if args.method == "sre":
method = "signature"
else:
method = args.method
if args.task == "OU":
start = np.array([0.5, 1.])
elif args.task == "MA2":
start = np.array([0.6, 0.2])
elif args.task == "GSE":
start = np.array([1e-2, 1e-1])
for L in args.L:
for seed in args.seed:
directory = "./{0}/{1}/".format(args.task, seed)
if not os.path.exists(directory):
os.makedirs(directory)
if method in ["signature", "k2"]:
fname = os.path.join(directory, "{0}_{1}_{2}_{3}_samples.txt".format(method, L, args.K, args.n))
logging.basicConfig(filename=os.path.join(directory,
"{0}_{1}_{2}.log".format(method, L, args.K)),
filemode="w", format="%(name)s - %(levelname)s - %(message)s",
level=logging.INFO)
else:
fname = os.path.join(directory, "{0}_{1}_samples.txt".format(method, L))
logging.basicConfig(filename=os.path.join(directory,
"{0}_{1}.log".format(method, L)),
filemode="w", format="%(name)s - %(levelname)s - %(message)s",
level=logging.INFO)
logging.info(args)
logging.info("Seed = {0}".format(seed))
start_time = time.process_time()
train_inference(args.task, method, start, L, fname, sampling_method=args.s,
K=args.K, seed=seed, n_components_raw=args.n, start_time=start_time)
logging.info("Total CPU time = {0}".format(time.process_time() - start_time))
| true | true |
f7353a4eac24e4d700095336f302749cabe5524d | 38,530 | py | Python | raiden/api/python.py | offerm/raiden | 7a79026af70830bbbed44d73035f9a274e41c6a3 | [
"MIT"
] | null | null | null | raiden/api/python.py | offerm/raiden | 7a79026af70830bbbed44d73035f9a274e41c6a3 | [
"MIT"
] | 1 | 2018-06-18T13:06:00.000Z | 2018-06-18T13:06:00.000Z | raiden/api/python.py | offerm/raiden | 7a79026af70830bbbed44d73035f9a274e41c6a3 | [
"MIT"
] | 1 | 2017-06-09T19:27:11.000Z | 2017-06-09T19:27:11.000Z | import gevent
import structlog
from eth_utils import is_binary_address, is_hex, to_bytes, to_checksum_address
from gevent import Greenlet
import raiden.blockchain.events as blockchain_events
from raiden import waiting
from raiden.constants import (
GENESIS_BLOCK_NUMBER,
RED_EYES_PER_TOKEN_NETWORK_LIMIT,
SECRET_HEXSTRING_LENGTH,
SECRETHASH_HEXSTRING_LENGTH,
UINT256_MAX,
Environment,
)
from raiden.exceptions import (
AlreadyRegisteredTokenAddress,
ChannelNotFound,
DepositMismatch,
DepositOverLimit,
DuplicatedChannelError,
InsufficientFunds,
InsufficientGasReserve,
InvalidAddress,
InvalidAmount,
InvalidSecretOrSecretHash,
InvalidSettleTimeout,
RaidenRecoverableError,
TokenNotRegistered,
UnknownTokenAddress,
)
from raiden.messages import RequestMonitoring
from raiden.settings import DEFAULT_RETRY_TIMEOUT, DEVELOPMENT_CONTRACT_VERSION
from raiden.transfer import architecture, views
from raiden.transfer.events import (
EventPaymentReceivedSuccess,
EventPaymentSentFailed,
EventPaymentSentSuccess,
)
from raiden.transfer.state import (
BalanceProofSignedState,
InitiatorTask,
MediatorTask,
NettingChannelState,
TargetTask,
TransferTask,
)
from raiden.transfer.state_change import ActionChannelClose
from raiden.utils import pex, sha3
from raiden.utils.gas_reserve import has_enough_gas_reserve
from raiden.utils.typing import (
Address,
Any,
BlockSpecification,
BlockTimeout,
ChannelID,
Dict,
List,
LockedTransferType,
NetworkTimeout,
Optional,
PaymentID,
PaymentNetworkID,
Secret,
SecretHash,
Set,
TokenAddress,
TokenAmount,
TokenNetworkAddress,
TokenNetworkID,
Tuple,
)
log = structlog.get_logger(__name__) # pylint: disable=invalid-name
EVENTS_PAYMENT_HISTORY_RELATED = (
EventPaymentSentSuccess,
EventPaymentSentFailed,
EventPaymentReceivedSuccess,
)
def event_filter_for_payments(
event: architecture.Event,
token_network_identifier: TokenNetworkID = None,
partner_address: Address = None,
) -> bool:
"""Filters out non payment history related events
- If no other args are given, all payment related events match
- If a token network identifier is given then only payment events for that match
- If a partner is also given then if the event is a payment sent event and the
target matches it's returned. If it's a payment received and the initiator matches
then it's returned.
"""
is_matching_event = isinstance(event, EVENTS_PAYMENT_HISTORY_RELATED) and (
token_network_identifier is None
or token_network_identifier == event.token_network_identifier
)
if not is_matching_event:
return False
sent_and_target_matches = isinstance(
event, (EventPaymentSentFailed, EventPaymentSentSuccess)
) and (partner_address is None or event.target == partner_address)
received_and_initiator_matches = isinstance(event, EventPaymentReceivedSuccess) and (
partner_address is None or event.initiator == partner_address
)
return sent_and_target_matches or received_and_initiator_matches
def flatten_transfer(transfer: LockedTransferType, role: str) -> Dict[str, Any]:
return {
"payment_identifier": str(transfer.payment_identifier),
"token_address": to_checksum_address(transfer.token),
"token_network_identifier": to_checksum_address(
transfer.balance_proof.token_network_identifier
),
"channel_identifier": str(transfer.balance_proof.channel_identifier),
"initiator": to_checksum_address(transfer.initiator),
"target": to_checksum_address(transfer.target),
"transferred_amount": str(transfer.balance_proof.transferred_amount),
"locked_amount": str(transfer.balance_proof.locked_amount),
"role": role,
}
def get_transfer_from_task(
secrethash: SecretHash, transfer_task: TransferTask
) -> Tuple[LockedTransferType, str]:
role = views.role_from_transfer_task(transfer_task)
transfer: LockedTransferType
if isinstance(transfer_task, InitiatorTask):
transfer = transfer_task.manager_state.initiator_transfers[secrethash].transfer
elif isinstance(transfer_task, MediatorTask):
pairs = transfer_task.mediator_state.transfers_pair
if pairs:
transfer = pairs[-1].payer_transfer
elif transfer_task.mediator_state.waiting_transfer:
transfer = transfer_task.mediator_state.waiting_transfer.transfer
elif isinstance(transfer_task, TargetTask):
transfer = transfer_task.target_state.transfer
else:
raise ValueError("get_tranfer_from_task for a non TransferTask argument")
return transfer, role
def transfer_tasks_view(
transfer_tasks: Dict[SecretHash, TransferTask],
token_address: TokenAddress = None,
channel_id: ChannelID = None,
) -> List[Dict[str, Any]]:
view = list()
for secrethash, transfer_task in transfer_tasks.items():
transfer, role = get_transfer_from_task(secrethash, transfer_task)
if transfer is None:
continue
if token_address is not None:
if transfer.token != token_address:
continue
elif channel_id is not None:
if transfer.balance_proof.channel_identifier != channel_id:
continue
view.append(flatten_transfer(transfer, role))
return view
class RaidenAPI:
# pylint: disable=too-many-public-methods
def __init__(self, raiden):
self.raiden = raiden
@property
def address(self):
return self.raiden.address
def get_channel(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
partner_address: Address,
) -> NettingChannelState:
if not is_binary_address(token_address):
raise InvalidAddress("Expected binary address format for token in get_channel")
if not is_binary_address(partner_address):
raise InvalidAddress("Expected binary address format for partner in get_channel")
channel_list = self.get_channel_list(registry_address, token_address, partner_address)
assert len(channel_list) <= 1
if not channel_list:
raise ChannelNotFound(
"Channel with partner '{}' for token '{}' could not be found.".format(
to_checksum_address(partner_address), to_checksum_address(token_address)
)
)
return channel_list[0]
def token_network_register(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
channel_participant_deposit_limit: TokenAmount,
token_network_deposit_limit: TokenAmount,
retry_timeout: NetworkTimeout = DEFAULT_RETRY_TIMEOUT,
) -> TokenNetworkAddress:
"""Register the `token_address` in the blockchain. If the address is already
registered but the event has not been processed this function will block
until the next block to make sure the event is processed.
Raises:
InvalidAddress: If the registry_address or token_address is not a valid address.
AlreadyRegisteredTokenAddress: If the token is already registered.
TransactionThrew: If the register transaction failed, this may
happen because the account has not enough balance to pay for the
gas or this register call raced with another transaction and lost.
"""
if not is_binary_address(registry_address):
raise InvalidAddress("registry_address must be a valid address in binary")
if not is_binary_address(token_address):
raise InvalidAddress("token_address must be a valid address in binary")
if token_address in self.get_tokens_list(registry_address):
raise AlreadyRegisteredTokenAddress("Token already registered")
contracts_version = self.raiden.contract_manager.contracts_version
registry = self.raiden.chain.token_network_registry(registry_address)
try:
if contracts_version == DEVELOPMENT_CONTRACT_VERSION:
return registry.add_token_with_limits(
token_address=token_address,
channel_participant_deposit_limit=channel_participant_deposit_limit,
token_network_deposit_limit=token_network_deposit_limit,
)
else:
return registry.add_token_without_limits(token_address=token_address)
except RaidenRecoverableError as e:
if "Token already registered" in str(e):
raise AlreadyRegisteredTokenAddress("Token already registered")
# else
raise
finally:
# Assume the transaction failed because the token is already
# registered with the smart contract and this node has not yet
# polled for the event (otherwise the check above would have
# failed).
#
# To provide a consistent view to the user, wait one block, this
# will guarantee that the events have been processed.
next_block = self.raiden.get_block_number() + 1
waiting.wait_for_block(self.raiden, next_block, retry_timeout)
def token_network_connect(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
funds: TokenAmount,
initial_channel_target: int = 3,
joinable_funds_target: float = 0.4,
) -> None:
""" Automatically maintain channels open for the given token network.
Args:
token_address: the ERC20 token network to connect to.
funds: the amount of funds that can be used by the ConnectionMananger.
initial_channel_target: number of channels to open proactively.
joinable_funds_target: fraction of the funds that will be used to join
channels opened by other participants.
"""
if not is_binary_address(registry_address):
raise InvalidAddress("registry_address must be a valid address in binary")
if not is_binary_address(token_address):
raise InvalidAddress("token_address must be a valid address in binary")
token_network_identifier = views.get_token_network_identifier_by_token_address(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=registry_address,
token_address=token_address,
)
connection_manager = self.raiden.connection_manager_for_token_network(
token_network_identifier
)
has_enough_reserve, estimated_required_reserve = has_enough_gas_reserve(
raiden=self.raiden, channels_to_open=initial_channel_target
)
if not has_enough_reserve:
raise InsufficientGasReserve(
(
"The account balance is below the estimated amount necessary to "
"finish the lifecycles of all active channels. A balance of at "
f"least {estimated_required_reserve} wei is required."
)
)
connection_manager.connect(
funds=funds,
initial_channel_target=initial_channel_target,
joinable_funds_target=joinable_funds_target,
)
def token_network_leave(
self, registry_address: PaymentNetworkID, token_address: TokenAddress
) -> List[NettingChannelState]:
""" Close all channels and wait for settlement. """
if not is_binary_address(registry_address):
raise InvalidAddress("registry_address must be a valid address in binary")
if not is_binary_address(token_address):
raise InvalidAddress("token_address must be a valid address in binary")
if token_address not in self.get_tokens_list(registry_address):
raise UnknownTokenAddress("token_address unknown")
token_network_identifier = views.get_token_network_identifier_by_token_address(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=registry_address,
token_address=token_address,
)
connection_manager = self.raiden.connection_manager_for_token_network(
token_network_identifier
)
return connection_manager.leave(registry_address)
def channel_open(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
partner_address: Address,
settle_timeout: BlockTimeout = None,
retry_timeout: NetworkTimeout = DEFAULT_RETRY_TIMEOUT,
) -> ChannelID:
""" Open a channel with the peer at `partner_address`
with the given `token_address`.
"""
if settle_timeout is None:
settle_timeout = self.raiden.config["settle_timeout"]
if settle_timeout < self.raiden.config["reveal_timeout"] * 2:
raise InvalidSettleTimeout(
"settle_timeout can not be smaller than double the reveal_timeout"
)
if not is_binary_address(registry_address):
raise InvalidAddress("Expected binary address format for registry in channel open")
if not is_binary_address(token_address):
raise InvalidAddress("Expected binary address format for token in channel open")
if not is_binary_address(partner_address):
raise InvalidAddress("Expected binary address format for partner in channel open")
chain_state = views.state_from_raiden(self.raiden)
channel_state = views.get_channelstate_for(
chain_state=chain_state,
payment_network_id=registry_address,
token_address=token_address,
partner_address=partner_address,
)
if channel_state:
raise DuplicatedChannelError("Channel with given partner address already exists")
registry = self.raiden.chain.token_network_registry(registry_address)
token_network_address = registry.get_token_network(token_address)
if token_network_address is None:
raise TokenNotRegistered(
"Token network for token %s does not exist" % to_checksum_address(token_address)
)
token_network = self.raiden.chain.token_network(registry.get_token_network(token_address))
with self.raiden.gas_reserve_lock:
has_enough_reserve, estimated_required_reserve = has_enough_gas_reserve(
self.raiden, channels_to_open=1
)
if not has_enough_reserve:
raise InsufficientGasReserve(
(
"The account balance is below the estimated amount necessary to "
"finish the lifecycles of all active channels. A balance of at "
f"least {estimated_required_reserve} wei is required."
)
)
try:
token_network.new_netting_channel(
partner=partner_address,
settle_timeout=settle_timeout,
given_block_identifier=views.state_from_raiden(self.raiden).block_hash,
)
except DuplicatedChannelError:
log.info("partner opened channel first")
waiting.wait_for_newchannel(
raiden=self.raiden,
payment_network_id=registry_address,
token_address=token_address,
partner_address=partner_address,
retry_timeout=retry_timeout,
)
chain_state = views.state_from_raiden(self.raiden)
channel_state = views.get_channelstate_for(
chain_state=chain_state,
payment_network_id=registry_address,
token_address=token_address,
partner_address=partner_address,
)
assert channel_state, f"channel {channel_state} is gone"
return channel_state.identifier
def set_total_channel_deposit(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
partner_address: Address,
total_deposit: TokenAmount,
retry_timeout: NetworkTimeout = DEFAULT_RETRY_TIMEOUT,
):
""" Set the `total_deposit` in the channel with the peer at `partner_address` and the
given `token_address` in order to be able to do transfers.
Raises:
InvalidAddress: If either token_address or partner_address is not
20 bytes long.
TransactionThrew: May happen for multiple reasons:
- If the token approval fails, e.g. the token may validate if
account has enough balance for the allowance.
- The deposit failed, e.g. the allowance did not set the token
aside for use and the user spent it before deposit was called.
- The channel was closed/settled between the allowance call and
the deposit call.
AddressWithoutCode: The channel was settled during the deposit
execution.
DepositOverLimit: The total deposit amount is higher than the limit.
"""
chain_state = views.state_from_raiden(self.raiden)
token_addresses = views.get_token_identifiers(chain_state, registry_address)
channel_state = views.get_channelstate_for(
chain_state=chain_state,
payment_network_id=registry_address,
token_address=token_address,
partner_address=partner_address,
)
if not is_binary_address(token_address):
raise InvalidAddress("Expected binary address format for token in channel deposit")
if not is_binary_address(partner_address):
raise InvalidAddress("Expected binary address format for partner in channel deposit")
if token_address not in token_addresses:
raise UnknownTokenAddress("Unknown token address")
if channel_state is None:
raise InvalidAddress("No channel with partner_address for the given token")
if self.raiden.config["environment_type"] == Environment.PRODUCTION:
per_token_network_deposit_limit = RED_EYES_PER_TOKEN_NETWORK_LIMIT
else:
per_token_network_deposit_limit = UINT256_MAX
token = self.raiden.chain.token(token_address)
token_network_registry = self.raiden.chain.token_network_registry(registry_address)
token_network_address = token_network_registry.get_token_network(token_address)
token_network_proxy = self.raiden.chain.token_network(token_network_address)
channel_proxy = self.raiden.chain.payment_channel(
canonical_identifier=channel_state.canonical_identifier
)
if total_deposit == 0:
raise DepositMismatch("Attempted to deposit with total deposit being 0")
addendum = total_deposit - channel_state.our_state.contract_balance
total_network_balance = token.balance_of(registry_address)
if total_network_balance + addendum > per_token_network_deposit_limit:
raise DepositOverLimit(
f"The deposit of {addendum} will exceed the "
f"token network limit of {per_token_network_deposit_limit}"
)
balance = token.balance_of(self.raiden.address)
functions = token_network_proxy.proxy.contract.functions
deposit_limit = functions.channel_participant_deposit_limit().call()
if total_deposit > deposit_limit:
raise DepositOverLimit(
f"The additional deposit of {addendum} will exceed the "
f"channel participant limit of {deposit_limit}"
)
# If this check succeeds it does not imply the the `deposit` will
# succeed, since the `deposit` transaction may race with another
# transaction.
if not balance >= addendum:
msg = "Not enough balance to deposit. {} Available={} Needed={}".format(
pex(token_address), balance, addendum
)
raise InsufficientFunds(msg)
# set_total_deposit calls approve
# token.approve(netcontract_address, addendum)
channel_proxy.set_total_deposit(
total_deposit=total_deposit,
block_identifier=views.state_from_raiden(self.raiden).block_hash,
)
target_address = self.raiden.address
waiting.wait_for_participant_newbalance(
raiden=self.raiden,
payment_network_id=registry_address,
token_address=token_address,
partner_address=partner_address,
target_address=target_address,
target_balance=total_deposit,
retry_timeout=retry_timeout,
)
def channel_close(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
partner_address: Address,
retry_timeout: NetworkTimeout = DEFAULT_RETRY_TIMEOUT,
):
"""Close a channel opened with `partner_address` for the given
`token_address`.
Race condition, this can fail if channel was closed externally.
"""
self.channel_batch_close(
registry_address=registry_address,
token_address=token_address,
partner_addresses=[partner_address],
retry_timeout=retry_timeout,
)
def channel_batch_close(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
partner_addresses: List[Address],
retry_timeout: NetworkTimeout = DEFAULT_RETRY_TIMEOUT,
):
"""Close a channel opened with `partner_address` for the given
`token_address`.
Race condition, this can fail if channel was closed externally.
"""
if not is_binary_address(token_address):
raise InvalidAddress("Expected binary address format for token in channel close")
if not all(map(is_binary_address, partner_addresses)):
raise InvalidAddress("Expected binary address format for partner in channel close")
valid_tokens = views.get_token_identifiers(
chain_state=views.state_from_raiden(self.raiden), payment_network_id=registry_address
)
if token_address not in valid_tokens:
raise UnknownTokenAddress("Token address is not known.")
chain_state = views.state_from_raiden(self.raiden)
channels_to_close = views.filter_channels_by_partneraddress(
chain_state=chain_state,
payment_network_id=registry_address,
token_address=token_address,
partner_addresses=partner_addresses,
)
greenlets: Set[Greenlet] = set()
for channel_state in channels_to_close:
channel_close = ActionChannelClose(
canonical_identifier=channel_state.canonical_identifier
)
greenlets.update(self.raiden.handle_state_change(channel_close))
gevent.joinall(greenlets, raise_error=True)
channel_ids = [channel_state.identifier for channel_state in channels_to_close]
waiting.wait_for_close(
raiden=self.raiden,
payment_network_id=registry_address,
token_address=token_address,
channel_ids=channel_ids,
retry_timeout=retry_timeout,
)
def get_channel_list(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress = None,
partner_address: Address = None,
) -> List[NettingChannelState]:
"""Returns a list of channels associated with the optionally given
`token_address` and/or `partner_address`.
Args:
token_address: an optionally provided token address
partner_address: an optionally provided partner address
Return:
A list containing all channels the node participates. Optionally
filtered by a token address and/or partner address.
Raises:
KeyError: An error occurred when the token address is unknown to the node.
"""
if registry_address and not is_binary_address(registry_address):
raise InvalidAddress("Expected binary address format for registry in get_channel_list")
if token_address and not is_binary_address(token_address):
raise InvalidAddress("Expected binary address format for token in get_channel_list")
if partner_address:
if not is_binary_address(partner_address):
raise InvalidAddress(
"Expected binary address format for partner in get_channel_list"
)
if not token_address:
raise UnknownTokenAddress("Provided a partner address but no token address")
if token_address and partner_address:
channel_state = views.get_channelstate_for(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=registry_address,
token_address=token_address,
partner_address=partner_address,
)
if channel_state:
result = [channel_state]
else:
result = []
elif token_address:
result = views.list_channelstate_for_tokennetwork(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=registry_address,
token_address=token_address,
)
else:
result = views.list_all_channelstate(chain_state=views.state_from_raiden(self.raiden))
return result
def get_node_network_state(self, node_address: Address):
""" Returns the currently network status of `node_address`. """
return views.get_node_network_status(
chain_state=views.state_from_raiden(self.raiden), node_address=node_address
)
def start_health_check_for(self, node_address: Address):
""" Returns the currently network status of `node_address`. """
self.raiden.start_health_check_for(node_address)
def get_tokens_list(self, registry_address: PaymentNetworkID):
"""Returns a list of tokens the node knows about"""
tokens_list = views.get_token_identifiers(
chain_state=views.state_from_raiden(self.raiden), payment_network_id=registry_address
)
return tokens_list
def get_token_network_address_for_token_address(
self, registry_address: PaymentNetworkID, token_address: TokenAddress
) -> Optional[TokenNetworkID]:
return views.get_token_network_identifier_by_token_address(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=registry_address,
token_address=token_address,
)
def transfer_and_wait(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
amount: TokenAmount,
target: Address,
identifier: PaymentID = None,
transfer_timeout: int = None,
secret: Secret = None,
secrethash: SecretHash = None,
):
""" Do a transfer with `target` with the given `amount` of `token_address`. """
# pylint: disable=too-many-arguments
payment_status = self.transfer_async(
registry_address=registry_address,
token_address=token_address,
amount=amount,
target=target,
identifier=identifier,
secret=secret,
secrethash=secrethash,
)
payment_status.payment_done.wait(timeout=transfer_timeout)
return payment_status
def transfer_async(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
amount: TokenAmount,
target: Address,
identifier: PaymentID = None,
secret: Secret = None,
secrethash: SecretHash = None,
):
if not isinstance(amount, int):
raise InvalidAmount("Amount not a number")
if amount <= 0:
raise InvalidAmount("Amount negative")
if not is_binary_address(token_address):
raise InvalidAddress("token address is not valid.")
if not is_binary_address(target):
raise InvalidAddress("target address is not valid.")
if secret is not None:
if len(secret) != SECRET_HEXSTRING_LENGTH:
raise InvalidSecretOrSecretHash(
"secret length should be " + str(SECRET_HEXSTRING_LENGTH) + "."
)
if not is_hex(secret):
raise InvalidSecretOrSecretHash("provided secret is not an hexadecimal string.")
secret = to_bytes(hexstr=secret)
if secrethash is not None:
if len(secrethash) != SECRETHASH_HEXSTRING_LENGTH:
raise InvalidSecretOrSecretHash(
"secret_hash length should be " + str(SECRETHASH_HEXSTRING_LENGTH) + "."
)
if not is_hex(secrethash):
raise InvalidSecretOrSecretHash("secret_hash is not an hexadecimal string.")
secrethash = to_bytes(hexstr=secrethash)
# if both secret and secrethash were provided we check that sha3(secret)
# matches the secerthash. Note that it is valid to provide a secert_hash
# without providing a secret
if secret is not None and secrethash is not None and secrethash != sha3(secret):
raise InvalidSecretOrSecretHash("provided secret and secret_hash do not match.")
valid_tokens = views.get_token_identifiers(
views.state_from_raiden(self.raiden), registry_address
)
if token_address not in valid_tokens:
raise UnknownTokenAddress("Token address is not known.")
log.debug(
"Initiating transfer",
initiator=pex(self.raiden.address),
target=pex(target),
token=pex(token_address),
amount=amount,
identifier=identifier,
)
payment_network_identifier = self.raiden.default_registry.address
token_network_identifier = views.get_token_network_identifier_by_token_address(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=payment_network_identifier,
token_address=token_address,
)
payment_status = self.raiden.mediated_transfer_async(
token_network_identifier=token_network_identifier,
amount=amount,
target=target,
identifier=identifier,
secret=secret,
secrethash=secrethash,
)
return payment_status
def get_raiden_events_payment_history_with_timestamps(
self,
token_address: TokenAddress = None,
target_address: Address = None,
limit: int = None,
offset: int = None,
):
if token_address and not is_binary_address(token_address):
raise InvalidAddress(
"Expected binary address format for token in get_raiden_events_payment_history"
)
if target_address and not is_binary_address(target_address):
raise InvalidAddress(
"Expected binary address format for "
"target_address in get_raiden_events_payment_history"
)
token_network_identifier = None
if token_address:
token_network_identifier = views.get_token_network_identifier_by_token_address(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=self.raiden.default_registry.address,
token_address=token_address,
)
events = [
event
for event in self.raiden.wal.storage.get_events_with_timestamps(
limit=limit, offset=offset
)
if event_filter_for_payments(
event=event.wrapped_event,
token_network_identifier=token_network_identifier,
partner_address=target_address,
)
]
return events
def get_raiden_events_payment_history(
self,
token_address: TokenAddress = None,
target_address: Address = None,
limit: int = None,
offset: int = None,
):
timestamped_events = self.get_raiden_events_payment_history_with_timestamps(
token_address=token_address, target_address=target_address, limit=limit, offset=offset
)
return [event.wrapped_event for event in timestamped_events]
def get_raiden_internal_events_with_timestamps(self, limit: int = None, offset: int = None):
return self.raiden.wal.storage.get_events_with_timestamps(limit=limit, offset=offset)
transfer = transfer_and_wait
def get_blockchain_events_network(
self,
registry_address: PaymentNetworkID,
from_block: BlockSpecification = GENESIS_BLOCK_NUMBER,
to_block: BlockSpecification = "latest",
):
events = blockchain_events.get_token_network_registry_events(
chain=self.raiden.chain,
token_network_registry_address=registry_address,
contract_manager=self.raiden.contract_manager,
events=blockchain_events.ALL_EVENTS,
from_block=from_block,
to_block=to_block,
)
return sorted(events, key=lambda evt: evt.get("block_number"), reverse=True)
def get_blockchain_events_token_network(
self,
token_address: TokenAddress,
from_block: BlockSpecification = GENESIS_BLOCK_NUMBER,
to_block: BlockSpecification = "latest",
):
"""Returns a list of blockchain events coresponding to the token_address."""
if not is_binary_address(token_address):
raise InvalidAddress(
"Expected binary address format for token in get_blockchain_events_token_network"
)
token_network_address = self.raiden.default_registry.get_token_network(token_address)
if token_network_address is None:
raise UnknownTokenAddress("Token address is not known.")
returned_events = blockchain_events.get_token_network_events(
chain=self.raiden.chain,
token_network_address=token_network_address,
contract_manager=self.raiden.contract_manager,
events=blockchain_events.ALL_EVENTS,
from_block=from_block,
to_block=to_block,
)
for event in returned_events:
if event.get("args"):
event["args"] = dict(event["args"])
returned_events.sort(key=lambda evt: evt.get("block_number"), reverse=True)
return returned_events
def get_blockchain_events_channel(
self,
token_address: TokenAddress,
partner_address: Address = None,
from_block: BlockSpecification = GENESIS_BLOCK_NUMBER,
to_block: BlockSpecification = "latest",
):
if not is_binary_address(token_address):
raise InvalidAddress(
"Expected binary address format for token in get_blockchain_events_channel"
)
token_network_address = self.raiden.default_registry.get_token_network(token_address)
if token_network_address is None:
raise UnknownTokenAddress("Token address is not known.")
channel_list = self.get_channel_list(
registry_address=self.raiden.default_registry.address,
token_address=token_address,
partner_address=partner_address,
)
returned_events = []
for channel in channel_list:
returned_events.extend(
blockchain_events.get_all_netting_channel_events(
chain=self.raiden.chain,
token_network_address=token_network_address,
netting_channel_identifier=channel.identifier,
contract_manager=self.raiden.contract_manager,
from_block=from_block,
to_block=to_block,
)
)
returned_events.sort(key=lambda evt: evt.get("block_number"), reverse=True)
return returned_events
def create_monitoring_request(
self, balance_proof: BalanceProofSignedState, reward_amount: TokenAmount
) -> Optional[RequestMonitoring]:
""" This method can be used to create a `RequestMonitoring` message.
It will contain all data necessary for an external monitoring service to
- send an updateNonClosingBalanceProof transaction to the TokenNetwork contract,
for the `balance_proof` that we received from a channel partner.
- claim the `reward_amount` from the UDC.
"""
# create RequestMonitoring message from the above + `reward_amount`
monitor_request = RequestMonitoring.from_balance_proof_signed_state(
balance_proof=balance_proof, reward_amount=reward_amount
)
# sign RequestMonitoring and return
monitor_request.sign(self.raiden.signer)
return monitor_request
def get_pending_transfers(
self, token_address: TokenAddress = None, partner_address: Address = None
) -> List[Dict[str, Any]]:
chain_state = views.state_from_raiden(self.raiden)
transfer_tasks = views.get_all_transfer_tasks(chain_state)
channel_id = None
if token_address is not None:
if self.raiden.default_registry.get_token_network(token_address) is None:
raise UnknownTokenAddress(f"Token {token_address} not found.")
if partner_address is not None:
partner_channel = self.get_channel(
registry_address=self.raiden.default_registry.address,
token_address=token_address,
partner_address=partner_address,
)
channel_id = partner_channel.identifier
return transfer_tasks_view(transfer_tasks, token_address, channel_id)
| 39.156504 | 99 | 0.666883 | import gevent
import structlog
from eth_utils import is_binary_address, is_hex, to_bytes, to_checksum_address
from gevent import Greenlet
import raiden.blockchain.events as blockchain_events
from raiden import waiting
from raiden.constants import (
GENESIS_BLOCK_NUMBER,
RED_EYES_PER_TOKEN_NETWORK_LIMIT,
SECRET_HEXSTRING_LENGTH,
SECRETHASH_HEXSTRING_LENGTH,
UINT256_MAX,
Environment,
)
from raiden.exceptions import (
AlreadyRegisteredTokenAddress,
ChannelNotFound,
DepositMismatch,
DepositOverLimit,
DuplicatedChannelError,
InsufficientFunds,
InsufficientGasReserve,
InvalidAddress,
InvalidAmount,
InvalidSecretOrSecretHash,
InvalidSettleTimeout,
RaidenRecoverableError,
TokenNotRegistered,
UnknownTokenAddress,
)
from raiden.messages import RequestMonitoring
from raiden.settings import DEFAULT_RETRY_TIMEOUT, DEVELOPMENT_CONTRACT_VERSION
from raiden.transfer import architecture, views
from raiden.transfer.events import (
EventPaymentReceivedSuccess,
EventPaymentSentFailed,
EventPaymentSentSuccess,
)
from raiden.transfer.state import (
BalanceProofSignedState,
InitiatorTask,
MediatorTask,
NettingChannelState,
TargetTask,
TransferTask,
)
from raiden.transfer.state_change import ActionChannelClose
from raiden.utils import pex, sha3
from raiden.utils.gas_reserve import has_enough_gas_reserve
from raiden.utils.typing import (
Address,
Any,
BlockSpecification,
BlockTimeout,
ChannelID,
Dict,
List,
LockedTransferType,
NetworkTimeout,
Optional,
PaymentID,
PaymentNetworkID,
Secret,
SecretHash,
Set,
TokenAddress,
TokenAmount,
TokenNetworkAddress,
TokenNetworkID,
Tuple,
)
log = structlog.get_logger(__name__)
EVENTS_PAYMENT_HISTORY_RELATED = (
EventPaymentSentSuccess,
EventPaymentSentFailed,
EventPaymentReceivedSuccess,
)
def event_filter_for_payments(
event: architecture.Event,
token_network_identifier: TokenNetworkID = None,
partner_address: Address = None,
) -> bool:
is_matching_event = isinstance(event, EVENTS_PAYMENT_HISTORY_RELATED) and (
token_network_identifier is None
or token_network_identifier == event.token_network_identifier
)
if not is_matching_event:
return False
sent_and_target_matches = isinstance(
event, (EventPaymentSentFailed, EventPaymentSentSuccess)
) and (partner_address is None or event.target == partner_address)
received_and_initiator_matches = isinstance(event, EventPaymentReceivedSuccess) and (
partner_address is None or event.initiator == partner_address
)
return sent_and_target_matches or received_and_initiator_matches
def flatten_transfer(transfer: LockedTransferType, role: str) -> Dict[str, Any]:
return {
"payment_identifier": str(transfer.payment_identifier),
"token_address": to_checksum_address(transfer.token),
"token_network_identifier": to_checksum_address(
transfer.balance_proof.token_network_identifier
),
"channel_identifier": str(transfer.balance_proof.channel_identifier),
"initiator": to_checksum_address(transfer.initiator),
"target": to_checksum_address(transfer.target),
"transferred_amount": str(transfer.balance_proof.transferred_amount),
"locked_amount": str(transfer.balance_proof.locked_amount),
"role": role,
}
def get_transfer_from_task(
secrethash: SecretHash, transfer_task: TransferTask
) -> Tuple[LockedTransferType, str]:
role = views.role_from_transfer_task(transfer_task)
transfer: LockedTransferType
if isinstance(transfer_task, InitiatorTask):
transfer = transfer_task.manager_state.initiator_transfers[secrethash].transfer
elif isinstance(transfer_task, MediatorTask):
pairs = transfer_task.mediator_state.transfers_pair
if pairs:
transfer = pairs[-1].payer_transfer
elif transfer_task.mediator_state.waiting_transfer:
transfer = transfer_task.mediator_state.waiting_transfer.transfer
elif isinstance(transfer_task, TargetTask):
transfer = transfer_task.target_state.transfer
else:
raise ValueError("get_tranfer_from_task for a non TransferTask argument")
return transfer, role
def transfer_tasks_view(
transfer_tasks: Dict[SecretHash, TransferTask],
token_address: TokenAddress = None,
channel_id: ChannelID = None,
) -> List[Dict[str, Any]]:
view = list()
for secrethash, transfer_task in transfer_tasks.items():
transfer, role = get_transfer_from_task(secrethash, transfer_task)
if transfer is None:
continue
if token_address is not None:
if transfer.token != token_address:
continue
elif channel_id is not None:
if transfer.balance_proof.channel_identifier != channel_id:
continue
view.append(flatten_transfer(transfer, role))
return view
class RaidenAPI:
def __init__(self, raiden):
self.raiden = raiden
@property
def address(self):
return self.raiden.address
def get_channel(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
partner_address: Address,
) -> NettingChannelState:
if not is_binary_address(token_address):
raise InvalidAddress("Expected binary address format for token in get_channel")
if not is_binary_address(partner_address):
raise InvalidAddress("Expected binary address format for partner in get_channel")
channel_list = self.get_channel_list(registry_address, token_address, partner_address)
assert len(channel_list) <= 1
if not channel_list:
raise ChannelNotFound(
"Channel with partner '{}' for token '{}' could not be found.".format(
to_checksum_address(partner_address), to_checksum_address(token_address)
)
)
return channel_list[0]
def token_network_register(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
channel_participant_deposit_limit: TokenAmount,
token_network_deposit_limit: TokenAmount,
retry_timeout: NetworkTimeout = DEFAULT_RETRY_TIMEOUT,
) -> TokenNetworkAddress:
if not is_binary_address(registry_address):
raise InvalidAddress("registry_address must be a valid address in binary")
if not is_binary_address(token_address):
raise InvalidAddress("token_address must be a valid address in binary")
if token_address in self.get_tokens_list(registry_address):
raise AlreadyRegisteredTokenAddress("Token already registered")
contracts_version = self.raiden.contract_manager.contracts_version
registry = self.raiden.chain.token_network_registry(registry_address)
try:
if contracts_version == DEVELOPMENT_CONTRACT_VERSION:
return registry.add_token_with_limits(
token_address=token_address,
channel_participant_deposit_limit=channel_participant_deposit_limit,
token_network_deposit_limit=token_network_deposit_limit,
)
else:
return registry.add_token_without_limits(token_address=token_address)
except RaidenRecoverableError as e:
if "Token already registered" in str(e):
raise AlreadyRegisteredTokenAddress("Token already registered")
raise
finally:
next_block = self.raiden.get_block_number() + 1
waiting.wait_for_block(self.raiden, next_block, retry_timeout)
def token_network_connect(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
funds: TokenAmount,
initial_channel_target: int = 3,
joinable_funds_target: float = 0.4,
) -> None:
if not is_binary_address(registry_address):
raise InvalidAddress("registry_address must be a valid address in binary")
if not is_binary_address(token_address):
raise InvalidAddress("token_address must be a valid address in binary")
token_network_identifier = views.get_token_network_identifier_by_token_address(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=registry_address,
token_address=token_address,
)
connection_manager = self.raiden.connection_manager_for_token_network(
token_network_identifier
)
has_enough_reserve, estimated_required_reserve = has_enough_gas_reserve(
raiden=self.raiden, channels_to_open=initial_channel_target
)
if not has_enough_reserve:
raise InsufficientGasReserve(
(
"The account balance is below the estimated amount necessary to "
"finish the lifecycles of all active channels. A balance of at "
f"least {estimated_required_reserve} wei is required."
)
)
connection_manager.connect(
funds=funds,
initial_channel_target=initial_channel_target,
joinable_funds_target=joinable_funds_target,
)
def token_network_leave(
self, registry_address: PaymentNetworkID, token_address: TokenAddress
) -> List[NettingChannelState]:
if not is_binary_address(registry_address):
raise InvalidAddress("registry_address must be a valid address in binary")
if not is_binary_address(token_address):
raise InvalidAddress("token_address must be a valid address in binary")
if token_address not in self.get_tokens_list(registry_address):
raise UnknownTokenAddress("token_address unknown")
token_network_identifier = views.get_token_network_identifier_by_token_address(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=registry_address,
token_address=token_address,
)
connection_manager = self.raiden.connection_manager_for_token_network(
token_network_identifier
)
return connection_manager.leave(registry_address)
def channel_open(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
partner_address: Address,
settle_timeout: BlockTimeout = None,
retry_timeout: NetworkTimeout = DEFAULT_RETRY_TIMEOUT,
) -> ChannelID:
if settle_timeout is None:
settle_timeout = self.raiden.config["settle_timeout"]
if settle_timeout < self.raiden.config["reveal_timeout"] * 2:
raise InvalidSettleTimeout(
"settle_timeout can not be smaller than double the reveal_timeout"
)
if not is_binary_address(registry_address):
raise InvalidAddress("Expected binary address format for registry in channel open")
if not is_binary_address(token_address):
raise InvalidAddress("Expected binary address format for token in channel open")
if not is_binary_address(partner_address):
raise InvalidAddress("Expected binary address format for partner in channel open")
chain_state = views.state_from_raiden(self.raiden)
channel_state = views.get_channelstate_for(
chain_state=chain_state,
payment_network_id=registry_address,
token_address=token_address,
partner_address=partner_address,
)
if channel_state:
raise DuplicatedChannelError("Channel with given partner address already exists")
registry = self.raiden.chain.token_network_registry(registry_address)
token_network_address = registry.get_token_network(token_address)
if token_network_address is None:
raise TokenNotRegistered(
"Token network for token %s does not exist" % to_checksum_address(token_address)
)
token_network = self.raiden.chain.token_network(registry.get_token_network(token_address))
with self.raiden.gas_reserve_lock:
has_enough_reserve, estimated_required_reserve = has_enough_gas_reserve(
self.raiden, channels_to_open=1
)
if not has_enough_reserve:
raise InsufficientGasReserve(
(
"The account balance is below the estimated amount necessary to "
"finish the lifecycles of all active channels. A balance of at "
f"least {estimated_required_reserve} wei is required."
)
)
try:
token_network.new_netting_channel(
partner=partner_address,
settle_timeout=settle_timeout,
given_block_identifier=views.state_from_raiden(self.raiden).block_hash,
)
except DuplicatedChannelError:
log.info("partner opened channel first")
waiting.wait_for_newchannel(
raiden=self.raiden,
payment_network_id=registry_address,
token_address=token_address,
partner_address=partner_address,
retry_timeout=retry_timeout,
)
chain_state = views.state_from_raiden(self.raiden)
channel_state = views.get_channelstate_for(
chain_state=chain_state,
payment_network_id=registry_address,
token_address=token_address,
partner_address=partner_address,
)
assert channel_state, f"channel {channel_state} is gone"
return channel_state.identifier
def set_total_channel_deposit(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
partner_address: Address,
total_deposit: TokenAmount,
retry_timeout: NetworkTimeout = DEFAULT_RETRY_TIMEOUT,
):
chain_state = views.state_from_raiden(self.raiden)
token_addresses = views.get_token_identifiers(chain_state, registry_address)
channel_state = views.get_channelstate_for(
chain_state=chain_state,
payment_network_id=registry_address,
token_address=token_address,
partner_address=partner_address,
)
if not is_binary_address(token_address):
raise InvalidAddress("Expected binary address format for token in channel deposit")
if not is_binary_address(partner_address):
raise InvalidAddress("Expected binary address format for partner in channel deposit")
if token_address not in token_addresses:
raise UnknownTokenAddress("Unknown token address")
if channel_state is None:
raise InvalidAddress("No channel with partner_address for the given token")
if self.raiden.config["environment_type"] == Environment.PRODUCTION:
per_token_network_deposit_limit = RED_EYES_PER_TOKEN_NETWORK_LIMIT
else:
per_token_network_deposit_limit = UINT256_MAX
token = self.raiden.chain.token(token_address)
token_network_registry = self.raiden.chain.token_network_registry(registry_address)
token_network_address = token_network_registry.get_token_network(token_address)
token_network_proxy = self.raiden.chain.token_network(token_network_address)
channel_proxy = self.raiden.chain.payment_channel(
canonical_identifier=channel_state.canonical_identifier
)
if total_deposit == 0:
raise DepositMismatch("Attempted to deposit with total deposit being 0")
addendum = total_deposit - channel_state.our_state.contract_balance
total_network_balance = token.balance_of(registry_address)
if total_network_balance + addendum > per_token_network_deposit_limit:
raise DepositOverLimit(
f"The deposit of {addendum} will exceed the "
f"token network limit of {per_token_network_deposit_limit}"
)
balance = token.balance_of(self.raiden.address)
functions = token_network_proxy.proxy.contract.functions
deposit_limit = functions.channel_participant_deposit_limit().call()
if total_deposit > deposit_limit:
raise DepositOverLimit(
f"The additional deposit of {addendum} will exceed the "
f"channel participant limit of {deposit_limit}"
)
if not balance >= addendum:
msg = "Not enough balance to deposit. {} Available={} Needed={}".format(
pex(token_address), balance, addendum
)
raise InsufficientFunds(msg)
channel_proxy.set_total_deposit(
total_deposit=total_deposit,
block_identifier=views.state_from_raiden(self.raiden).block_hash,
)
target_address = self.raiden.address
waiting.wait_for_participant_newbalance(
raiden=self.raiden,
payment_network_id=registry_address,
token_address=token_address,
partner_address=partner_address,
target_address=target_address,
target_balance=total_deposit,
retry_timeout=retry_timeout,
)
def channel_close(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
partner_address: Address,
retry_timeout: NetworkTimeout = DEFAULT_RETRY_TIMEOUT,
):
self.channel_batch_close(
registry_address=registry_address,
token_address=token_address,
partner_addresses=[partner_address],
retry_timeout=retry_timeout,
)
def channel_batch_close(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
partner_addresses: List[Address],
retry_timeout: NetworkTimeout = DEFAULT_RETRY_TIMEOUT,
):
if not is_binary_address(token_address):
raise InvalidAddress("Expected binary address format for token in channel close")
if not all(map(is_binary_address, partner_addresses)):
raise InvalidAddress("Expected binary address format for partner in channel close")
valid_tokens = views.get_token_identifiers(
chain_state=views.state_from_raiden(self.raiden), payment_network_id=registry_address
)
if token_address not in valid_tokens:
raise UnknownTokenAddress("Token address is not known.")
chain_state = views.state_from_raiden(self.raiden)
channels_to_close = views.filter_channels_by_partneraddress(
chain_state=chain_state,
payment_network_id=registry_address,
token_address=token_address,
partner_addresses=partner_addresses,
)
greenlets: Set[Greenlet] = set()
for channel_state in channels_to_close:
channel_close = ActionChannelClose(
canonical_identifier=channel_state.canonical_identifier
)
greenlets.update(self.raiden.handle_state_change(channel_close))
gevent.joinall(greenlets, raise_error=True)
channel_ids = [channel_state.identifier for channel_state in channels_to_close]
waiting.wait_for_close(
raiden=self.raiden,
payment_network_id=registry_address,
token_address=token_address,
channel_ids=channel_ids,
retry_timeout=retry_timeout,
)
def get_channel_list(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress = None,
partner_address: Address = None,
) -> List[NettingChannelState]:
if registry_address and not is_binary_address(registry_address):
raise InvalidAddress("Expected binary address format for registry in get_channel_list")
if token_address and not is_binary_address(token_address):
raise InvalidAddress("Expected binary address format for token in get_channel_list")
if partner_address:
if not is_binary_address(partner_address):
raise InvalidAddress(
"Expected binary address format for partner in get_channel_list"
)
if not token_address:
raise UnknownTokenAddress("Provided a partner address but no token address")
if token_address and partner_address:
channel_state = views.get_channelstate_for(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=registry_address,
token_address=token_address,
partner_address=partner_address,
)
if channel_state:
result = [channel_state]
else:
result = []
elif token_address:
result = views.list_channelstate_for_tokennetwork(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=registry_address,
token_address=token_address,
)
else:
result = views.list_all_channelstate(chain_state=views.state_from_raiden(self.raiden))
return result
def get_node_network_state(self, node_address: Address):
return views.get_node_network_status(
chain_state=views.state_from_raiden(self.raiden), node_address=node_address
)
def start_health_check_for(self, node_address: Address):
self.raiden.start_health_check_for(node_address)
def get_tokens_list(self, registry_address: PaymentNetworkID):
tokens_list = views.get_token_identifiers(
chain_state=views.state_from_raiden(self.raiden), payment_network_id=registry_address
)
return tokens_list
def get_token_network_address_for_token_address(
self, registry_address: PaymentNetworkID, token_address: TokenAddress
) -> Optional[TokenNetworkID]:
return views.get_token_network_identifier_by_token_address(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=registry_address,
token_address=token_address,
)
def transfer_and_wait(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
amount: TokenAmount,
target: Address,
identifier: PaymentID = None,
transfer_timeout: int = None,
secret: Secret = None,
secrethash: SecretHash = None,
):
payment_status = self.transfer_async(
registry_address=registry_address,
token_address=token_address,
amount=amount,
target=target,
identifier=identifier,
secret=secret,
secrethash=secrethash,
)
payment_status.payment_done.wait(timeout=transfer_timeout)
return payment_status
def transfer_async(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
amount: TokenAmount,
target: Address,
identifier: PaymentID = None,
secret: Secret = None,
secrethash: SecretHash = None,
):
if not isinstance(amount, int):
raise InvalidAmount("Amount not a number")
if amount <= 0:
raise InvalidAmount("Amount negative")
if not is_binary_address(token_address):
raise InvalidAddress("token address is not valid.")
if not is_binary_address(target):
raise InvalidAddress("target address is not valid.")
if secret is not None:
if len(secret) != SECRET_HEXSTRING_LENGTH:
raise InvalidSecretOrSecretHash(
"secret length should be " + str(SECRET_HEXSTRING_LENGTH) + "."
)
if not is_hex(secret):
raise InvalidSecretOrSecretHash("provided secret is not an hexadecimal string.")
secret = to_bytes(hexstr=secret)
if secrethash is not None:
if len(secrethash) != SECRETHASH_HEXSTRING_LENGTH:
raise InvalidSecretOrSecretHash(
"secret_hash length should be " + str(SECRETHASH_HEXSTRING_LENGTH) + "."
)
if not is_hex(secrethash):
raise InvalidSecretOrSecretHash("secret_hash is not an hexadecimal string.")
secrethash = to_bytes(hexstr=secrethash)
if secret is not None and secrethash is not None and secrethash != sha3(secret):
raise InvalidSecretOrSecretHash("provided secret and secret_hash do not match.")
valid_tokens = views.get_token_identifiers(
views.state_from_raiden(self.raiden), registry_address
)
if token_address not in valid_tokens:
raise UnknownTokenAddress("Token address is not known.")
log.debug(
"Initiating transfer",
initiator=pex(self.raiden.address),
target=pex(target),
token=pex(token_address),
amount=amount,
identifier=identifier,
)
payment_network_identifier = self.raiden.default_registry.address
token_network_identifier = views.get_token_network_identifier_by_token_address(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=payment_network_identifier,
token_address=token_address,
)
payment_status = self.raiden.mediated_transfer_async(
token_network_identifier=token_network_identifier,
amount=amount,
target=target,
identifier=identifier,
secret=secret,
secrethash=secrethash,
)
return payment_status
def get_raiden_events_payment_history_with_timestamps(
self,
token_address: TokenAddress = None,
target_address: Address = None,
limit: int = None,
offset: int = None,
):
if token_address and not is_binary_address(token_address):
raise InvalidAddress(
"Expected binary address format for token in get_raiden_events_payment_history"
)
if target_address and not is_binary_address(target_address):
raise InvalidAddress(
"Expected binary address format for "
"target_address in get_raiden_events_payment_history"
)
token_network_identifier = None
if token_address:
token_network_identifier = views.get_token_network_identifier_by_token_address(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=self.raiden.default_registry.address,
token_address=token_address,
)
events = [
event
for event in self.raiden.wal.storage.get_events_with_timestamps(
limit=limit, offset=offset
)
if event_filter_for_payments(
event=event.wrapped_event,
token_network_identifier=token_network_identifier,
partner_address=target_address,
)
]
return events
def get_raiden_events_payment_history(
self,
token_address: TokenAddress = None,
target_address: Address = None,
limit: int = None,
offset: int = None,
):
timestamped_events = self.get_raiden_events_payment_history_with_timestamps(
token_address=token_address, target_address=target_address, limit=limit, offset=offset
)
return [event.wrapped_event for event in timestamped_events]
def get_raiden_internal_events_with_timestamps(self, limit: int = None, offset: int = None):
return self.raiden.wal.storage.get_events_with_timestamps(limit=limit, offset=offset)
transfer = transfer_and_wait
def get_blockchain_events_network(
self,
registry_address: PaymentNetworkID,
from_block: BlockSpecification = GENESIS_BLOCK_NUMBER,
to_block: BlockSpecification = "latest",
):
events = blockchain_events.get_token_network_registry_events(
chain=self.raiden.chain,
token_network_registry_address=registry_address,
contract_manager=self.raiden.contract_manager,
events=blockchain_events.ALL_EVENTS,
from_block=from_block,
to_block=to_block,
)
return sorted(events, key=lambda evt: evt.get("block_number"), reverse=True)
def get_blockchain_events_token_network(
self,
token_address: TokenAddress,
from_block: BlockSpecification = GENESIS_BLOCK_NUMBER,
to_block: BlockSpecification = "latest",
):
if not is_binary_address(token_address):
raise InvalidAddress(
"Expected binary address format for token in get_blockchain_events_token_network"
)
token_network_address = self.raiden.default_registry.get_token_network(token_address)
if token_network_address is None:
raise UnknownTokenAddress("Token address is not known.")
returned_events = blockchain_events.get_token_network_events(
chain=self.raiden.chain,
token_network_address=token_network_address,
contract_manager=self.raiden.contract_manager,
events=blockchain_events.ALL_EVENTS,
from_block=from_block,
to_block=to_block,
)
for event in returned_events:
if event.get("args"):
event["args"] = dict(event["args"])
returned_events.sort(key=lambda evt: evt.get("block_number"), reverse=True)
return returned_events
def get_blockchain_events_channel(
self,
token_address: TokenAddress,
partner_address: Address = None,
from_block: BlockSpecification = GENESIS_BLOCK_NUMBER,
to_block: BlockSpecification = "latest",
):
if not is_binary_address(token_address):
raise InvalidAddress(
"Expected binary address format for token in get_blockchain_events_channel"
)
token_network_address = self.raiden.default_registry.get_token_network(token_address)
if token_network_address is None:
raise UnknownTokenAddress("Token address is not known.")
channel_list = self.get_channel_list(
registry_address=self.raiden.default_registry.address,
token_address=token_address,
partner_address=partner_address,
)
returned_events = []
for channel in channel_list:
returned_events.extend(
blockchain_events.get_all_netting_channel_events(
chain=self.raiden.chain,
token_network_address=token_network_address,
netting_channel_identifier=channel.identifier,
contract_manager=self.raiden.contract_manager,
from_block=from_block,
to_block=to_block,
)
)
returned_events.sort(key=lambda evt: evt.get("block_number"), reverse=True)
return returned_events
def create_monitoring_request(
self, balance_proof: BalanceProofSignedState, reward_amount: TokenAmount
) -> Optional[RequestMonitoring]:
monitor_request = RequestMonitoring.from_balance_proof_signed_state(
balance_proof=balance_proof, reward_amount=reward_amount
)
monitor_request.sign(self.raiden.signer)
return monitor_request
def get_pending_transfers(
self, token_address: TokenAddress = None, partner_address: Address = None
) -> List[Dict[str, Any]]:
chain_state = views.state_from_raiden(self.raiden)
transfer_tasks = views.get_all_transfer_tasks(chain_state)
channel_id = None
if token_address is not None:
if self.raiden.default_registry.get_token_network(token_address) is None:
raise UnknownTokenAddress(f"Token {token_address} not found.")
if partner_address is not None:
partner_channel = self.get_channel(
registry_address=self.raiden.default_registry.address,
token_address=token_address,
partner_address=partner_address,
)
channel_id = partner_channel.identifier
return transfer_tasks_view(transfer_tasks, token_address, channel_id)
| true | true |
f7353cbc9ecc587e0e536617fab6c6017f0846af | 284,994 | py | Python | src/sage/combinat/partition.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | null | null | null | src/sage/combinat/partition.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | null | null | null | src/sage/combinat/partition.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
r"""
Integer partitions
A partition `p` of a nonnegative integer `n` is a
non-increasing list of positive integers (the *parts* of the
partition) with total sum `n`.
A partition can be depicted by a diagram made of rows of cells,
where the number of cells in the `i^{th}` row starting from
the top is the `i^{th}` part of the partition.
The coordinate system related to a partition applies from the top
to the bottom and from left to right. So, the corners of the
partition `[5, 3, 1]` are `[[0,4], [1,2], [2,0]]`.
For display options, see :obj:`Partitions.options`.
.. NOTE::
- Boxes is a synonym for cells. All methods will use 'cell' and 'cells'
instead of 'box' and 'boxes'.
- Partitions are 0 based with coordinates in the form of (row-index,
column-index).
- If given coordinates of the form ``(r, c)``, then use Python's
\*-operator.
- Throughout this documentation, for a partition `\lambda` we will denote
its conjugate partition by `\lambda^{\prime}`. For more on conjugate
partitions, see :meth:`Partition.conjugate()`.
- The comparisons on partitions use lexicographic order.
.. NOTE::
We use the convention that lexicographic ordering is read from
left-to-right. That is to say `[1, 3, 7]` is smaller than `[2, 3, 4]`.
AUTHORS:
- Mike Hansen (2007): initial version
- Dan Drake (2009-03-28): deprecate RestrictedPartitions and implement
Partitions_parts_in
- Travis Scrimshaw (2012-01-12): Implemented latex function to Partition_class
- Travis Scrimshaw (2012-05-09): Fixed Partitions(-1).list() infinite recursion
loop by saying Partitions_n is the empty set.
- Travis Scrimshaw (2012-05-11): Fixed bug in inner where if the length was
longer than the length of the inner partition, it would include 0's.
- Andrew Mathas (2012-06-01): Removed deprecated functions and added
compatibility with the PartitionTuple classes. See :trac:`13072`
- Travis Scrimshaw (2012-10-12): Added options. Made
``Partition_class`` to the element ``Partition``. ``Partitions*`` are now
all in the category framework except ``PartitionsRestricted`` (which will
eventually be removed). Cleaned up documentation.
- Matthew Lancellotti (2018-09-14): Added a bunch of "k" methods to Partition.
EXAMPLES:
There are `5` partitions of the integer `4`::
sage: Partitions(4).cardinality()
5
sage: Partitions(4).list()
[[4], [3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
We can use the method ``.first()`` to get the 'first' partition of a
number::
sage: Partitions(4).first()
[4]
Using the method ``.next(p)``, we can calculate the 'next' partition
after `p`. When we are at the last partition, ``None`` will be returned::
sage: Partitions(4).next([4])
[3, 1]
sage: Partitions(4).next([1,1,1,1]) is None
True
We can use ``iter`` to get an object which iterates over the partitions
one by one to save memory. Note that when we do something like
``for part in Partitions(4)`` this iterator is used in the background::
sage: g = iter(Partitions(4))
sage: next(g)
[4]
sage: next(g)
[3, 1]
sage: next(g)
[2, 2]
sage: for p in Partitions(4): print(p)
[4]
[3, 1]
[2, 2]
[2, 1, 1]
[1, 1, 1, 1]
We can add constraints to the type of partitions we want. For
example, to get all of the partitions of `4` of length `2`, we'd do the
following::
sage: Partitions(4, length=2).list()
[[3, 1], [2, 2]]
Here is the list of partitions of length at least `2` and the list of
ones with length at most `2`::
sage: Partitions(4, min_length=2).list()
[[3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
sage: Partitions(4, max_length=2).list()
[[4], [3, 1], [2, 2]]
The options ``min_part`` and ``max_part`` can be used to set constraints
on the sizes of all parts. Using ``max_part``, we can select
partitions having only 'small' entries. The following is the list
of the partitions of `4` with parts at most `2`::
sage: Partitions(4, max_part=2).list()
[[2, 2], [2, 1, 1], [1, 1, 1, 1]]
The ``min_part`` options is complementary to ``max_part`` and selects
partitions having only 'large' parts. Here is the list of all
partitions of `4` with each part at least `2`::
sage: Partitions(4, min_part=2).list()
[[4], [2, 2]]
The options ``inner`` and ``outer`` can be used to set part-by-part
constraints. This is the list of partitions of `4` with ``[3, 1, 1]`` as
an outer bound (that is, partitions of `4` contained in the partition
``[3, 1, 1]``)::
sage: Partitions(4, outer=[3,1,1]).list()
[[3, 1], [2, 1, 1]]
``outer`` sets ``max_length`` to the length of its argument. Moreover, the
parts of ``outer`` may be infinite to clear constraints on specific
parts. Here is the list of the partitions of `4` of length at most `3`
such that the second and third part are `1` when they exist::
sage: Partitions(4, outer=[oo,1,1]).list()
[[4], [3, 1], [2, 1, 1]]
Finally, here are the partitions of `4` with ``[1,1,1]`` as an inner
bound (i. e., the partitions of `4` containing the partition ``[1,1,1]``).
Note that ``inner`` sets ``min_length`` to the length of its argument::
sage: Partitions(4, inner=[1,1,1]).list()
[[2, 1, 1], [1, 1, 1, 1]]
The options ``min_slope`` and ``max_slope`` can be used to set
constraints on the slope, that is on the difference ``p[i+1]-p[i]`` of
two consecutive parts. Here is the list of the strictly decreasing
partitions of `4`::
sage: Partitions(4, max_slope=-1).list()
[[4], [3, 1]]
The constraints can be combined together in all reasonable ways.
Here are all the partitions of `11` of length between `2` and `4` such
that the difference between two consecutive parts is between `-3` and
`-1`::
sage: Partitions(11,min_slope=-3,max_slope=-1,min_length=2,max_length=4).list()
[[7, 4], [6, 5], [6, 4, 1], [6, 3, 2], [5, 4, 2], [5, 3, 2, 1]]
Partition objects can also be created individually with :class:`Partition`::
sage: Partition([2,1])
[2, 1]
Once we have a partition object, then there are a variety of
methods that we can use. For example, we can get the conjugate of a
partition. Geometrically, the conjugate of a partition is the
reflection of that partition through its main diagonal. Of course,
this operation is an involution::
sage: Partition([4,1]).conjugate()
[2, 1, 1, 1]
sage: Partition([4,1]).conjugate().conjugate()
[4, 1]
If we create a partition with extra zeros at the end, they will be dropped::
sage: Partition([4,1,0,0])
[4, 1]
sage: Partition([0])
[]
sage: Partition([0,0])
[]
The idea of a partition being followed by infinitely many parts of size
`0` is consistent with the ``get_part`` method::
sage: p = Partition([5, 2])
sage: p.get_part(0)
5
sage: p.get_part(10)
0
We can go back and forth between the standard and the exponential
notations of a partition. The exponential notation can be padded with
extra zeros::
sage: Partition([6,4,4,2,1]).to_exp()
[1, 1, 0, 2, 0, 1]
sage: Partition(exp=[1,1,0,2,0,1])
[6, 4, 4, 2, 1]
sage: Partition([6,4,4,2,1]).to_exp(5)
[1, 1, 0, 2, 0, 1]
sage: Partition([6,4,4,2,1]).to_exp(7)
[1, 1, 0, 2, 0, 1, 0]
sage: Partition([6,4,4,2,1]).to_exp(10)
[1, 1, 0, 2, 0, 1, 0, 0, 0, 0]
We can get the (zero-based!) coordinates of the corners of a
partition::
sage: Partition([4,3,1]).corners()
[(0, 3), (1, 2), (2, 0)]
We can compute the core and quotient of a partition and build
the partition back up from them::
sage: Partition([6,3,2,2]).core(3)
[2, 1, 1]
sage: Partition([7,7,5,3,3,3,1]).quotient(3)
([2], [1], [2, 2, 2])
sage: p = Partition([11,5,5,3,2,2,2])
sage: p.core(3)
[]
sage: p.quotient(3)
([2, 1], [4], [1, 1, 1])
sage: Partition(core=[],quotient=([2, 1], [4], [1, 1, 1]))
[11, 5, 5, 3, 2, 2, 2]
We can compute the `0-1` sequence and go back and forth::
sage: Partitions().from_zero_one([1, 1, 1, 1, 0, 1, 0])
[5, 4]
sage: all(Partitions().from_zero_one(mu.zero_one_sequence())
....: == mu for n in range(5) for mu in Partitions(n))
True
We can compute the Frobenius coordinates and go back and forth::
sage: Partition([7,3,1]).frobenius_coordinates()
([6, 1], [2, 0])
sage: Partition(frobenius_coordinates=([6,1],[2,0]))
[7, 3, 1]
sage: all(mu == Partition(frobenius_coordinates=mu.frobenius_coordinates())
....: for n in range(12) for mu in Partitions(n))
True
We use the lexicographic ordering::
sage: pl = Partition([4,1,1])
sage: ql = Partitions()([3,3])
sage: pl > ql
True
sage: PL = Partitions()
sage: pl = PL([4,1,1])
sage: ql = PL([3,3])
sage: pl > ql
True
"""
# ****************************************************************************
# Copyright (C) 2007 Mike Hansen <mhansen@gmail.com>,
#
# Distributed under the terms of the GNU General Public License (GPL)
# https://www.gnu.org/licenses/
# ****************************************************************************
from copy import copy
from itertools import accumulate
from sage.libs.pari.all import pari
from sage.libs.flint.arith import number_of_partitions as flint_number_of_partitions
from sage.arith.misc import multinomial
from sage.structure.global_options import GlobalOptions
from sage.structure.parent import Parent
from sage.structure.unique_representation import UniqueRepresentation
from sage.symbolic.ring import var
from sage.misc.lazy_import import lazy_import
lazy_import('sage.combinat.skew_partition', 'SkewPartition')
lazy_import('sage.combinat.partition_tuple', 'PartitionTuple')
from sage.misc.misc_c import prod
from sage.misc.prandom import randrange
from sage.misc.cachefunc import cached_method, cached_function
from sage.categories.infinite_enumerated_sets import InfiniteEnumeratedSets
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
from sage.sets.non_negative_integers import NonNegativeIntegers
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing
from sage.rings.integer_ring import ZZ
from sage.rings.rational_field import QQ
from sage.rings.semirings.all import NN
from sage.arith.all import factorial, gcd
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.integer import Integer
from sage.rings.infinity import infinity
from .combinat import CombinatorialElement
from . import tableau
from . import permutation
from . import composition
from sage.combinat.partitions import ZS1_iterator, ZS1_iterator_nk
from sage.combinat.integer_vector import IntegerVectors
from sage.combinat.integer_lists import IntegerListsLex
from sage.combinat.integer_vector_weighted import iterator_fast as weighted_iterator_fast
from sage.combinat.combinat_cython import conjugate
from sage.combinat.root_system.weyl_group import WeylGroup
from sage.combinat.combinatorial_map import combinatorial_map
from sage.groups.perm_gps.permgroup import PermutationGroup
from sage.graphs.dot2tex_utils import have_dot2tex
from sage.arith.all import binomial
class Partition(CombinatorialElement):
r"""
A partition `p` of a nonnegative integer `n` is a
non-increasing list of positive integers (the *parts* of the
partition) with total sum `n`.
A partition is often represented as a diagram consisting of **cells**,
or **boxes**, placed in rows on top of each other such that the number of
cells in the `i^{th}` row, reading from top to bottom, is the `i^{th}`
part of the partition. The rows are left-justified (and become shorter
and shorter the farther down one goes). This diagram is called the
**Young diagram** of the partition, or more precisely its Young diagram
in English notation. (French and Russian notations are variations on this
representation.)
The coordinate system related to a partition applies from the top
to the bottom and from left to right. So, the corners of the
partition ``[5, 3, 1]`` are ``[[0,4], [1,2], [2,0]]``.
For display options, see :meth:`Partitions.options`.
.. NOTE::
Partitions are 0 based with coordinates in the form of (row-index,
column-index). For example consider the partition
``mu=Partition([4,3,2,2])``, the first part is ``mu[0]`` (which is 4),
the second is ``mu[1]``, and so on, and the upper-left cell in English
convention is ``(0, 0)``.
A partition can be specified in one of the following ways:
- a list (the default)
- using exponential notation
- by Frobenius coordinates
- specifying its `0-1` sequence
- specifying the core and the quotient
See the examples below.
EXAMPLES:
Creating partitions though parents::
sage: mu = Partitions(8)([3,2,1,1,1]); mu
[3, 2, 1, 1, 1]
sage: nu = Partition([3,2,1,1,1]); nu
[3, 2, 1, 1, 1]
sage: mu == nu
True
sage: mu is nu
False
sage: mu in Partitions()
True
sage: mu.parent()
Partitions of the integer 8
sage: mu.size()
8
sage: mu.category()
Category of elements of Partitions of the integer 8
sage: nu.parent()
Partitions
sage: nu.category()
Category of elements of Partitions
sage: mu[0]
3
sage: mu[1]
2
sage: mu[2]
1
sage: mu.pp()
***
**
*
*
*
sage: mu.removable_cells()
[(0, 2), (1, 1), (4, 0)]
sage: mu.down_list()
[[2, 2, 1, 1, 1], [3, 1, 1, 1, 1], [3, 2, 1, 1]]
sage: mu.addable_cells()
[(0, 3), (1, 2), (2, 1), (5, 0)]
sage: mu.up_list()
[[4, 2, 1, 1, 1], [3, 3, 1, 1, 1], [3, 2, 2, 1, 1], [3, 2, 1, 1, 1, 1]]
sage: mu.conjugate()
[5, 2, 1]
sage: mu.dominates(nu)
True
sage: nu.dominates(mu)
True
Creating partitions using ``Partition``::
sage: Partition([3,2,1])
[3, 2, 1]
sage: Partition(exp=[2,1,1])
[3, 2, 1, 1]
sage: Partition(core=[2,1], quotient=[[2,1],[3],[1,1,1]])
[11, 5, 5, 3, 2, 2, 2]
sage: Partition(frobenius_coordinates=([3,2],[4,0]))
[4, 4, 1, 1, 1]
sage: Partitions().from_zero_one([1, 1, 1, 1, 0, 1, 0])
[5, 4]
sage: [2,1] in Partitions()
True
sage: [2,1,0] in Partitions()
True
sage: Partition([1,2,3])
Traceback (most recent call last):
...
ValueError: [1, 2, 3] is not an element of Partitions
Sage ignores trailing zeros at the end of partitions::
sage: Partition([3,2,1,0])
[3, 2, 1]
sage: Partitions()([3,2,1,0])
[3, 2, 1]
sage: Partitions(6)([3,2,1,0])
[3, 2, 1]
TESTS:
Check that only trailing zeros are stripped::
sage: TestSuite( Partition([]) ).run()
sage: TestSuite( Partition([4,3,2,2,2,1]) ).run()
sage: Partition([3,2,2,2,1,0,0,0])
[3, 2, 2, 2, 1]
sage: Partition([3,0,2,2,2,1,0])
Traceback (most recent call last):
...
ValueError: [3, 0, 2, 2, 2, 1, 0] is not an element of Partitions
sage: Partition([0,7,3])
Traceback (most recent call last):
...
ValueError: [0, 7, 3] is not an element of Partitions
"""
@staticmethod
def __classcall_private__(cls, mu=None, **keyword):
"""
This constructs a list from optional arguments and delegates the
construction of a :class:`Partition` to the ``element_class()`` call
of the appropriate parent.
EXAMPLES::
sage: Partition([3,2,1])
[3, 2, 1]
sage: Partition(exp=[2,1,1])
[3, 2, 1, 1]
sage: Partition(core=[2,1], quotient=[[2,1],[3],[1,1,1]])
[11, 5, 5, 3, 2, 2, 2]
"""
l = len(keyword)
if l == 0:
if mu is not None:
if isinstance(mu, Partition):
return mu
return _Partitions(list(mu))
if l == 1:
if 'beta_numbers' in keyword:
return _Partitions.from_beta_numbers(keyword['beta_numbers'])
elif 'exp' in keyword:
return _Partitions.from_exp(keyword['exp'])
elif 'frobenius_coordinates' in keyword:
return _Partitions.from_frobenius_coordinates(keyword['frobenius_coordinates'])
elif 'zero_one' in keyword:
return _Partitions.from_zero_one(keyword['zero_one'])
if l == 2 and 'core' in keyword and 'quotient' in keyword:
return _Partitions.from_core_and_quotient(keyword['core'], keyword['quotient'])
raise ValueError('incorrect syntax for Partition()')
def __setstate__(self, state):
r"""
In order to maintain backwards compatibility and be able to unpickle a
old pickle from ``Partition_class`` we have to override the default
``__setstate__``.
EXAMPLES::
sage: loads(b'x\x9ck`J.NLO\xd5K\xce\xcfM\xca\xccK,\xd1+H,*\xc9,\xc9\xcc\xcf\xe3\n\x80\xb1\xe2\x93s\x12\x8b\x8b\xb9\n\x195\x1b\x0b\x99j\x0b\x995BY\xe33\x12\x8b3\nY\xfc\x80\xac\x9c\xcc\xe2\x92B\xd6\xd8B6\r\x88IE\x99y\xe9\xc5z\x99y%\xa9\xe9\xa9E\\\xb9\x89\xd9\xa9\xf10N!{(\xa3qkP!G\x06\x90a\x04dp\x82\x18\x86@\x06Wji\x92\x1e\x00x0.\xb5')
[3, 2, 1]
sage: loads(dumps( Partition([3,2,1]) )) # indirect doctest
[3, 2, 1]
"""
if isinstance(state, dict): # for old pickles from Partition_class
self._set_parent(_Partitions)
self.__dict__ = state
else:
self._set_parent(state[0])
self.__dict__ = state[1]
def __init__(self, parent, mu):
"""
Initialize ``self``.
We assume that ``mu`` is a weakly decreasing list of
non-negative elements in ``ZZ``.
EXAMPLES::
sage: p = Partition([3,1])
sage: TestSuite(p).run()
TESTS:
Fix that tuples raise the correct error::
sage: Partition((3,1,7))
Traceback (most recent call last):
...
ValueError: [3, 1, 7] is not an element of Partitions
"""
if isinstance(mu, Partition):
# since we are (suppose to be) immutable, we can share the underlying data
CombinatorialElement.__init__(self, parent, mu._list)
else:
if mu and not mu[-1]:
# direct callers might assume that mu is not modified
mu = mu[:-1]
while mu and not mu[-1]:
mu.pop()
CombinatorialElement.__init__(self, parent, mu)
@cached_method
def __hash__(self):
r"""
Return the hash of ``self``.
TESTS::
sage: P = Partition([4,2,2,1])
sage: hash(P) == hash(P)
True
"""
return hash(tuple(self._list))
def _repr_(self):
r"""
Return a string representation of ``self`` depending on
:meth:`Partitions.options`.
EXAMPLES::
sage: mu=Partition([7,7,7,3,3,2,1,1,1,1,1,1,1]); mu # indirect doctest
[7, 7, 7, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1]
sage: Partitions.options.display="diagram"; mu
*******
*******
*******
***
***
**
*
*
*
*
*
*
*
sage: Partitions.options.display="list"; mu
[7, 7, 7, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1]
sage: Partitions.options.display="compact_low"; mu
1^7,2,3^2,7^3
sage: Partitions.options.display="compact_high"; mu
7^3,3^2,2,1^7
sage: Partitions.options.display="exp_low"; mu
1^7, 2, 3^2, 7^3
sage: Partitions.options.display="exp_high"; mu
7^3, 3^2, 2, 1^7
sage: Partitions.options.convention="French"
sage: mu = Partition([7,7,7,3,3,2,1,1,1,1,1,1,1]); mu # indirect doctest
7^3, 3^2, 2, 1^7
sage: Partitions.options.display="diagram"; mu
*
*
*
*
*
*
*
**
***
***
*******
*******
*******
sage: Partitions.options.display="list"; mu
[7, 7, 7, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1]
sage: Partitions.options.display="compact_low"; mu
1^7,2,3^2,7^3
sage: Partitions.options.display="compact_high"; mu
7^3,3^2,2,1^7
sage: Partitions.options.display="exp_low"; mu
1^7, 2, 3^2, 7^3
sage: Partitions.options.display="exp_high"; mu
7^3, 3^2, 2, 1^7
sage: Partitions.options._reset()
"""
return self.parent().options._dispatch(self, '_repr_', 'display')
def _ascii_art_(self):
"""
TESTS::
sage: ascii_art(Partitions(5).list())
[ * ]
[ ** * ]
[ *** ** * * ]
[ **** *** * ** * * ]
[ *****, * , ** , * , * , * , * ]
"""
from sage.typeset.ascii_art import AsciiArt
return AsciiArt(self._repr_diagram().splitlines(), baseline=0)
def _unicode_art_(self):
"""
TESTS::
sage: unicode_art(Partitions(5).list())
⎡ ┌┐ ⎤
⎢ ┌┬┐ ├┤ ⎥
⎢ ┌┬┬┐ ┌┬┐ ├┼┘ ├┤ ⎥
⎢ ┌┬┬┬┐ ┌┬┬┐ ├┼┴┘ ├┼┤ ├┤ ├┤ ⎥
⎢ ┌┬┬┬┬┐ ├┼┴┴┘ ├┼┼┘ ├┤ ├┼┘ ├┤ ├┤ ⎥
⎣ └┴┴┴┴┘, └┘ , └┴┘ , └┘ , └┘ , └┘ , └┘ ⎦
sage: Partitions.options.convention = "French"
sage: unicode_art(Partitions(5).list())
⎡ ┌┐ ⎤
⎢ ┌┐ ├┤ ⎥
⎢ ┌┐ ┌┐ ├┤ ├┤ ⎥
⎢ ┌┐ ┌┬┐ ├┤ ├┼┐ ├┤ ├┤ ⎥
⎢ ┌┬┬┬┬┐ ├┼┬┬┐ ├┼┼┐ ├┼┬┐ ├┼┤ ├┼┐ ├┤ ⎥
⎣ └┴┴┴┴┘, └┴┴┴┘, └┴┴┘, └┴┴┘, └┴┘, └┴┘, └┘ ⎦
sage: Partitions.options._reset()
"""
from sage.typeset.unicode_art import UnicodeArt
if not self._list:
return UnicodeArt(u'∅', baseline=0)
if self.parent().options.convention == "English":
data = list(self)
else:
data = list(reversed(self))
txt = [u'┌' + u'┬' * (data[0] - 1) + u'┐']
for i in range(len(data) - 1):
p = data[i]
q = data[i + 1]
if p < q:
txt += [u'├' + u'┼' * p + u'┬' * (q - p - 1) + u'┐']
elif p == q:
txt += [u'├' + u'┼' * (p - 1) + u'┤']
else:
txt += [u'├' + u'┼' * q + u'┴' * (p - q - 1) + u'┘']
txt += [u'└' + u'┴' * (data[-1] - 1) + u'┘']
return UnicodeArt(txt, baseline=0)
def _repr_list(self):
"""
Return a string representation of ``self`` as a list.
EXAMPLES::
sage: print(Partition([7,7,7,3,3,2,1,1,1,1,1,1,1])._repr_list())
[7, 7, 7, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1]
"""
return '[%s]' % ', '.join('%s' % m for m in self)
def _repr_exp_low(self):
"""
Return a string representation of ``self`` in exponential form (lowest
first).
EXAMPLES::
sage: print(Partition([7,7,7,3,3,2,1,1,1,1,1,1,1])._repr_exp_low())
1^7, 2, 3^2, 7^3
sage: print(Partition([])._repr_exp_low())
-
"""
if not self._list:
return '-'
exp = self.to_exp()
return '%s' % ', '.join('%s%s' % (m+1, '' if e==1 else '^%s'%e)
for (m,e) in enumerate(exp) if e > 0)
def _repr_exp_high(self):
"""
Return a string representation of ``self`` in exponential form (highest
first).
EXAMPLES::
sage: print(Partition([7,7,7,3,3,2,1,1,1,1,1,1,1])._repr_exp_high())
7^3, 3^2, 2, 1^7
sage: print(Partition([])._repr_exp_high())
-
"""
if not self._list:
return '-'
exp = self.to_exp()[::-1] # reversed list of exponents
M=max(self)
return '%s' % ', '.join('%s%s' % (M-m, '' if e==1 else '^%s'%e)
for (m,e) in enumerate(exp) if e>0)
def _repr_compact_low(self):
"""
Return a string representation of ``self`` in compact form (exponential
form with lowest first).
EXAMPLES::
sage: print(Partition([7,7,7,3,3,2,1,1,1,1,1,1,1])._repr_compact_low())
1^7,2,3^2,7^3
sage: print(Partition([])._repr_compact_low())
-
"""
if not self._list:
return '-'
exp = self.to_exp()
return '%s' % ','.join('%s%s' % (m+1, '' if e==1 else '^%s'%e)
for (m,e) in enumerate(exp) if e > 0)
def _repr_compact_high(self):
"""
Return a string representation of ``self`` in compact form (exponential
form with highest first).
EXAMPLES::
sage: print(Partition([7,7,7,3,3,2,1,1,1,1,1,1,1])._repr_compact_high())
7^3,3^2,2,1^7
sage: print(Partition([])._repr_compact_low())
-
"""
if not self._list:
return '-'
exp = self.to_exp()[::-1] # reversed list of exponents
M=max(self)
return '%s' % ','.join('%s%s' % (M-m, '' if e==1 else '^%s'%e)
for (m,e) in enumerate(exp) if e>0)
def _repr_diagram(self):
r"""
Return a representation of ``self`` as a Ferrers diagram.
EXAMPLES::
sage: print(Partition([7,7,7,3,3,2,1,1,1,1,1,1,1])._repr_diagram())
*******
*******
*******
***
***
**
*
*
*
*
*
*
*
"""
return self.ferrers_diagram()
def level(self):
"""
Return the level of ``self``, which is always 1.
This method exists only for compatibility with
:class:`PartitionTuples`.
EXAMPLES::
sage: Partition([4,3,2]).level()
1
"""
return 1
def components(self):
"""
Return a list containing the shape of ``self``.
This method exists only for compatibility with
:class:`PartitionTuples`.
EXAMPLES::
sage: Partition([3,2]).components()
[[3, 2]]
"""
return [ self ]
def _latex_(self):
r"""
Return a LaTeX version of ``self``.
For more on the latex options, see :meth:`Partitions.options`.
EXAMPLES::
sage: mu = Partition([2, 1])
sage: Partitions.options.latex='diagram'; latex(mu) # indirect doctest
{\def\lr#1{\multicolumn{1}{@{\hspace{.6ex}}c@{\hspace{.6ex}}}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{2}c}\\
\lr{\ast}&\lr{\ast}\\
\lr{\ast}\\
\end{array}$}
}
sage: Partitions.options.latex='exp_high'; latex(mu) # indirect doctest
2,1
sage: Partitions.options.latex='exp_low'; latex(mu) # indirect doctest
1,2
sage: Partitions.options.latex='list'; latex(mu) # indirect doctest
[2, 1]
sage: Partitions.options.latex='young_diagram'; latex(mu) # indirect doctest
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{2}c}\cline{1-2}
\lr{\phantom{x}}&\lr{\phantom{x}}\\\cline{1-2}
\lr{\phantom{x}}\\\cline{1-1}
\end{array}$}
}
sage: Partitions.options(latex="young_diagram", convention="french")
sage: Partitions.options.latex='exp_high'; latex(mu) # indirect doctest
2,1
sage: Partitions.options.latex='exp_low'; latex(mu) # indirect doctest
1,2
sage: Partitions.options.latex='list'; latex(mu) # indirect doctest
[2, 1]
sage: Partitions.options.latex='young_diagram'; latex(mu) # indirect doctest
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[t]{*{2}c}\cline{1-1}
\lr{\phantom{x}}\\\cline{1-2}
\lr{\phantom{x}}&\lr{\phantom{x}}\\\cline{1-2}
\end{array}$}
}
sage: Partitions.options._reset()
"""
return self.parent().options._dispatch(self, '_latex_', 'latex')
def _latex_young_diagram(self):
r"""
LaTeX output as a Young diagram.
EXAMPLES::
sage: print(Partition([2, 1])._latex_young_diagram())
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{2}c}\cline{1-2}
\lr{\phantom{x}}&\lr{\phantom{x}}\\\cline{1-2}
\lr{\phantom{x}}\\\cline{1-1}
\end{array}$}
}
sage: print(Partition([])._latex_young_diagram())
{\emptyset}
"""
if not self._list:
return "{\\emptyset}"
from sage.combinat.output import tex_from_array
return tex_from_array([ ["\\phantom{x}"]*row_size for row_size in self._list ])
def _latex_diagram(self):
r"""
LaTeX output as a Ferrers' diagram.
EXAMPLES::
sage: print(Partition([2, 1])._latex_diagram())
{\def\lr#1{\multicolumn{1}{@{\hspace{.6ex}}c@{\hspace{.6ex}}}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{2}c}\\
\lr{\ast}&\lr{\ast}\\
\lr{\ast}\\
\end{array}$}
}
sage: print(Partition([])._latex_diagram())
{\emptyset}
"""
if not self._list:
return "{\\emptyset}"
entry = self.parent().options("latex_diagram_str")
from sage.combinat.output import tex_from_array
return tex_from_array([ [entry]*row_size for row_size in self._list ], False)
def _latex_list(self):
r"""
LaTeX output as a list.
EXAMPLES::
sage: print(Partition([2, 1])._latex_list())
[2, 1]
sage: print(Partition([])._latex_list())
[]
"""
return repr(self._list)
def _latex_exp_low(self):
r"""
LaTeX output in exponential notation (lowest first).
EXAMPLES::
sage: print(Partition([2,2,1])._latex_exp_low())
1,2^{2}
sage: print(Partition([])._latex_exp_low())
{\emptyset}
"""
if not self._list:
return "{\\emptyset}"
exp = self.to_exp()
return '%s' % ','.join('%s%s' % (m+1, '' if e==1 else '^{%s}'%e)
for (m,e) in enumerate(exp) if e > 0)
def _latex_exp_high(self):
r"""
LaTeX output in exponential notation (highest first).
EXAMPLES::
sage: print(Partition([2,2,1])._latex_exp_high())
2^{2},1
sage: print(Partition([])._latex_exp_high())
{\emptyset}
"""
if not self._list:
return "{\\emptyset}"
exp = self.to_exp()[::-1] # reversed list of exponents
M = max(self)
return '%s' % ','.join('%s%s' % (M-m, '' if e==1 else '^{%s}'%e)
for (m,e) in enumerate(exp) if e>0)
def ferrers_diagram(self):
r"""
Return the Ferrers diagram of ``self``.
EXAMPLES::
sage: mu = Partition([5,5,2,1])
sage: Partitions.options(diagram_str='*', convention="english")
sage: print(mu.ferrers_diagram())
*****
*****
**
*
sage: Partitions.options(diagram_str='#')
sage: print(mu.ferrers_diagram())
#####
#####
##
#
sage: Partitions.options.convention="french"
sage: print(mu.ferrers_diagram())
#
##
#####
#####
sage: print(Partition([]).ferrers_diagram())
-
sage: Partitions.options(diagram_str='-')
sage: print(Partition([]).ferrers_diagram())
(/)
sage: Partitions.options._reset()
"""
diag_str = self.parent().options.diagram_str
if not self._list:
return '-' if diag_str != '-' else "(/)"
if self.parent().options.convention == "English":
return '\n'.join(diag_str * p for p in self)
else:
return '\n'.join(diag_str * p for p in reversed(self))
def pp(self):
r"""
Print the Ferrers diagram.
See :meth:`ferrers_diagram` for more on the Ferrers diagram.
EXAMPLES::
sage: Partition([5,5,2,1]).pp()
*****
*****
**
*
sage: Partitions.options.convention='French'
sage: Partition([5,5,2,1]).pp()
*
**
*****
*****
sage: Partitions.options._reset()
"""
print(self.ferrers_diagram())
def __truediv__(self, p):
"""
Return the skew partition ``self / p``.
EXAMPLES::
sage: p = Partition([3,2,1])
sage: p/[1,1]
[3, 2, 1] / [1, 1]
sage: p/[3,2,1]
[3, 2, 1] / [3, 2, 1]
sage: p/Partition([1,1])
[3, 2, 1] / [1, 1]
sage: p/[2,2,2]
Traceback (most recent call last):
...
ValueError: To form a skew partition p/q, q must be contained in p.
"""
if not self.contains(p):
raise ValueError("To form a skew partition p/q, q must be contained in p.")
return SkewPartition([self[:], p])
def power(self, k):
r"""
Return the cycle type of the `k`-th power of any permutation
with cycle type ``self`` (thus describes the powermap of
symmetric groups).
Equivalent to GAP's ``PowerPartition``.
EXAMPLES::
sage: p = Partition([5,3])
sage: p.power(1)
[5, 3]
sage: p.power(2)
[5, 3]
sage: p.power(3)
[5, 1, 1, 1]
sage: p.power(4)
[5, 3]
Now let us compare this to the power map on `S_8`::
sage: G = SymmetricGroup(8)
sage: g = G([(1,2,3,4,5),(6,7,8)])
sage: g
(1,2,3,4,5)(6,7,8)
sage: g^2
(1,3,5,2,4)(6,8,7)
sage: g^3
(1,4,2,5,3)
sage: g^4
(1,5,4,3,2)(6,7,8)
::
sage: Partition([3,2,1]).power(3)
[2, 1, 1, 1, 1]
"""
res = []
for i in self:
g = gcd(i, k)
res.extend( [ZZ(i//g)]*int(g) )
res.sort(reverse=True)
return Partition(res)
def __next__(self):
"""
Return the partition that lexicographically follows ``self``, of the
same size. If ``self`` is the last partition, then return ``False``.
EXAMPLES::
sage: next(Partition([4]))
[3, 1]
sage: next(Partition([1,1,1,1]))
False
"""
p = self
n = 0
m = 0
for i in p:
n += i
m += 1
next_p = p[:] + [1]*(n - len(p))
#Check to see if we are at the last (all ones) partition
if p == [1]*n:
return False
#
#If we are not, then run the ZS1 algorithm.
#
#Let h be the number of non-one entries in the
#partition
h = 0
for i in next_p:
if i != 1:
h += 1
if next_p[h-1] == 2:
m += 1
next_p[h-1] = 1
h -= 1
else:
r = next_p[h-1] - 1
t = m - h + 1
next_p[h-1] = r
while t >= r:
h += 1
next_p[h-1] = r
t -= r
if t == 0:
m = h
else:
m = h + 1
if t > 1:
h += 1
next_p[h-1] = t
return self.parent()(next_p[:m])
next = __next__
def size(self):
"""
Return the size of ``self``.
EXAMPLES::
sage: Partition([2,2]).size()
4
sage: Partition([3,2,1]).size()
6
"""
return sum(self)
def sign(self):
r"""
Return the sign of any permutation with cycle type ``self``.
This function corresponds to a homomorphism from the symmetric
group `S_n` into the cyclic group of order 2, whose kernel
is exactly the alternating group `A_n`. Partitions of sign
`1` are called even partitions while partitions of sign
`-1` are called odd.
EXAMPLES::
sage: Partition([5,3]).sign()
1
sage: Partition([5,2]).sign()
-1
Zolotarev's lemma states that the Legendre symbol
`\left(\frac{a}{p}\right)` for an integer
`a \pmod p` (`p` a prime number), can be computed
as sign(p_a), where sign denotes the sign of a permutation and
p_a the permutation of the residue classes `\pmod p`
induced by modular multiplication by `a`, provided
`p` does not divide `a`.
We verify this in some examples.
::
sage: F = GF(11)
sage: a = F.multiplicative_generator();a
2
sage: plist = [int(a*F(x)) for x in range(1,11)]; plist
[2, 4, 6, 8, 10, 1, 3, 5, 7, 9]
This corresponds to the permutation (1, 2, 4, 8, 5, 10, 9, 7, 3, 6)
(acting the set `\{1,2,...,10\}`) and to the partition
[10].
::
sage: p = PermutationGroupElement('(1, 2, 4, 8, 5, 10, 9, 7, 3, 6)')
sage: p.sign()
-1
sage: Partition([10]).sign()
-1
sage: kronecker_symbol(11,2)
-1
Now replace `2` by `3`::
sage: plist = [int(F(3*x)) for x in range(1,11)]; plist
[3, 6, 9, 1, 4, 7, 10, 2, 5, 8]
sage: list(range(1, 11))
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
sage: p = PermutationGroupElement('(3,4,8,7,9)')
sage: p.sign()
1
sage: kronecker_symbol(3,11)
1
sage: Partition([5,1,1,1,1,1]).sign()
1
In both cases, Zolotarev holds.
REFERENCES:
- :wikipedia:`Zolotarev%27s_lemma`
"""
return (-1)**(self.size()-self.length())
def k_size(self, k):
r"""
Given a partition ``self`` and a ``k``, return the size of the
`k`-boundary.
This is the same as the length method
:meth:`sage.combinat.core.Core.length` of the
:class:`sage.combinat.core.Core` object, with the exception that here we
don't require ``self`` to be a `k+1`-core.
EXAMPLES::
sage: Partition([2, 1, 1]).k_size(1)
2
sage: Partition([2, 1, 1]).k_size(2)
3
sage: Partition([2, 1, 1]).k_size(3)
3
sage: Partition([2, 1, 1]).k_size(4)
4
.. SEEALSO::
:meth:`k_boundary`, :meth:`SkewPartition.size`
"""
return self.k_boundary(k).size()
def boundary(self):
r"""
Return the integer coordinates of points on the boundary of ``self``.
For the following description, picture the Ferrer's diagram of ``self``
using the French convention. Recall that the French convention puts
the longest row on the bottom and the shortest row on the top. In
addition, interpret the Ferrer's diagram as 1 x 1 cells in the Euclidean
plane. So if ``self`` was the partition [3, 1], the lower-left vertices
of the 1 x 1 cells in the Ferrer's diagram would be (0, 0), (1, 0),
(2, 0), and (0, 1).
The boundary of a partition is the set `\{ \text{NE}(d) \mid \forall
d\:\text{diagonal} \}`. That is, for every diagonal line `y = x + b`
where `b \in \mathbb{Z}`, we find the northeasternmost (NE) point on
that diagonal which is also in the Ferrer's diagram.
The boundary will go from bottom-right to top-left.
EXAMPLES:
Consider the partition (1) depicted as a square on a cartesian plane
with vertices (0, 0), (1, 0), (1, 1), and (0, 1). Three of those
vertices in the appropriate order form the boundary::
sage: Partition([1]).boundary()
[(1, 0), (1, 1), (0, 1)]
The partition (3, 1) can be visualized as three squares on a cartesian
plane. The coordinates of the appropriate vertices form the boundary::
sage: Partition([3, 1]).boundary()
[(3, 0), (3, 1), (2, 1), (1, 1), (1, 2), (0, 2)]
TESTS::
sage: Partition([1]).boundary()
[(1, 0), (1, 1), (0, 1)]
sage: Partition([2, 1]).boundary()
[(2, 0), (2, 1), (1, 1), (1, 2), (0, 2)]
sage: Partition([3, 1]).boundary()
[(3, 0), (3, 1), (2, 1), (1, 1), (1, 2), (0, 2)]
sage: Partition([2, 1, 1]).boundary()
[(2, 0), (2, 1), (1, 1), (1, 2), (1, 3), (0, 3)]
.. SEEALSO::
:meth:`k_rim`. You might have been looking for :meth:`k_boundary`
instead.
"""
def horizontal_piece(xy, bdy):
(start_x, start_y) = xy
if not bdy:
h_piece = [(start_x, start_y)]
else:
stop_x = bdy[-1][0]
y = start_y # y never changes
h_piece = [(x, y) for x in range(start_x, stop_x)]
h_piece = list(reversed(h_piece))
return h_piece
bdy = []
for i, part in enumerate(self):
(cell_x, cell_y) = (part - 1, i)
(x, y) = (cell_x + 1, cell_y + 1)
bdy += horizontal_piece((x, y - 1), bdy)
bdy.append((x, y))
# add final "top-left" horizontal piece
(top_left_x, top_left_y) = (0, len(self))
bdy += horizontal_piece((top_left_x, top_left_y), bdy)
return bdy
def k_rim(self, k):
r"""
Return the ``k``-rim of ``self`` as a list of integer coordinates.
The `k`-rim of a partition is the "line between" (or "intersection of")
the `k`-boundary and the `k`-interior. (Section 2.3 of [HM2011]_)
It will be output as an ordered list of integer coordinates, where the
origin is `(0, 0)`. It will start at the top-left of the `k`-rim (using
French convention) and end at the bottom-right.
EXAMPLES:
Consider the partition (3, 1) split up into its 1-interior and
1-boundary:
.. image:: ../../media/k-rim.JPG
:height: 180px
:align: center
The line shown in bold is the 1-rim, and that information is equivalent
to the integer coordinates of the points that occur along that line::
sage: Partition([3, 1]).k_rim(1)
[(3, 0), (2, 0), (2, 1), (1, 1), (0, 1), (0, 2)]
TESTS::
sage: Partition([1]).k_rim(0)
[(1, 0), (1, 1), (0, 1)]
sage: Partition([3, 1]).k_rim(0)
[(3, 0), (3, 1), (2, 1), (1, 1), (1, 2), (0, 2)]
sage: Partition([3, 1]).k_rim(1)
[(3, 0), (2, 0), (2, 1), (1, 1), (0, 1), (0, 2)]
sage: Partition([3, 1]).k_rim(2)
[(3, 0), (2, 0), (1, 0), (1, 1), (0, 1), (0, 2)]
sage: Partition([3, 1]).k_rim(3)
[(3, 0), (2, 0), (1, 0), (1, 1), (0, 1), (0, 2)]
.. SEEALSO::
:meth:`k_interior`, :meth:`k_boundary`, :meth:`boundary`
"""
interior_rim = self.k_interior(k).boundary()
# get leftmost vertical line
interior_top_left_y = interior_rim[-1][1]
v_piece = [(0, y) for y in range(interior_top_left_y+1, len(self)+1)]
# get bottommost horizontal line
interior_bottom_right_x = interior_rim[0][0]
if self:
ptn_bottom_right_x = self[0]
else:
ptn_bottom_right_x = 0
h_piece = [(x, 0) for x in
range(ptn_bottom_right_x, interior_bottom_right_x, -1)]
# glue together with boundary
rim = h_piece + interior_rim + v_piece
return rim
def k_row_lengths(self, k):
r"""
Return the ``k``-row-shape of the partition ``self``.
This is equivalent to taking the `k`-boundary of the partition and then
returning the row-shape of that. We do *not* discard rows of length 0.
(Section 2.2 of [LLMS2013]_)
EXAMPLES::
sage: Partition([6, 1]).k_row_lengths(2)
[2, 1]
sage: Partition([4, 4, 4, 3, 2]).k_row_lengths(2)
[0, 1, 1, 1, 2]
.. SEEALSO::
:meth:`k_column_lengths`, :meth:`k_boundary`,
:meth:`SkewPartition.row_lengths`,
:meth:`SkewPartition.column_lengths`
"""
return self.k_boundary(k).row_lengths()
def k_column_lengths(self, k):
r"""
Return the ``k``-column-shape of the partition ``self``.
This is the 'column' analog of :meth:`k_row_lengths`.
EXAMPLES::
sage: Partition([6, 1]).k_column_lengths(2)
[1, 0, 0, 0, 1, 1]
sage: Partition([4, 4, 4, 3, 2]).k_column_lengths(2)
[1, 1, 1, 2]
.. SEEALSO::
:meth:`k_row_lengths`, :meth:`k_boundary`,
:meth:`SkewPartition.row_lengths`,
:meth:`SkewPartition.column_lengths`
"""
return self.k_boundary(k).column_lengths()
def has_rectangle(self, h, w):
r"""
Return ``True`` if the Ferrer's diagram of ``self`` has ``h``
(*or more*) rows of length ``w`` (*exactly*).
INPUT:
- ``h`` -- An integer `h \geq 1`. The (*minimum*) height of the
rectangle.
- ``w`` -- An integer `w \geq 1`. The width of the rectangle.
EXAMPLES::
sage: Partition([3, 3, 3, 3]).has_rectangle(2, 3)
True
sage: Partition([3, 3]).has_rectangle(2, 3)
True
sage: Partition([4, 3]).has_rectangle(2, 3)
False
sage: Partition([3]).has_rectangle(2, 3)
False
TESTS::
sage: Partition([1, 1, 1]).has_rectangle(4, 1)
False
sage: Partition([1, 1, 1]).has_rectangle(3, 1)
True
sage: Partition([1, 1, 1]).has_rectangle(2, 1)
True
sage: Partition([1, 1, 1]).has_rectangle(1, 2)
False
sage: Partition([3]).has_rectangle(1, 3)
True
sage: Partition([3]).has_rectangle(1, 2)
False
sage: Partition([3]).has_rectangle(2, 3)
False
.. SEEALSO::
:meth:`has_k_rectangle`
"""
assert h >= 1
assert w >= 1
num_rows_of_len_w = self.to_exp(w)[w - 1]
return num_rows_of_len_w >= h
def has_k_rectangle(self, k):
r"""
Return ``True`` if the Ferrer's diagram of ``self`` contains `k-i+1`
rows (*or more*) of length `i` (*exactly*) for any `i` in `[1, k]`.
This is mainly a helper function for :meth:`is_k_reducible` and
:meth:`is_k_irreducible`, the only difference between this function and
:meth:`is_k_reducible` being that this function allows any partition as
input while :meth:`is_k_reducible` requires the input to be `k`-bounded.
EXAMPLES:
The partition [1, 1, 1] has at least 2 rows of length 1::
sage: Partition([1, 1, 1]).has_k_rectangle(2)
True
The partition [1, 1, 1] does *not* have 4 rows of length 1, 3 rows of
length 2, 2 rows of length 3, nor 1 row of length 4::
sage: Partition([1, 1, 1]).has_k_rectangle(4)
False
TESTS::
sage: Partition([1]).has_k_rectangle(1)
True
sage: Partition([1]).has_k_rectangle(2)
False
sage: Partition([1, 1, 1]).has_k_rectangle(3)
True
sage: Partition([1, 1, 1]).has_k_rectangle(2)
True
sage: Partition([1, 1, 1]).has_k_rectangle(4)
False
sage: Partition([3]).has_k_rectangle(3)
True
sage: Partition([3]).has_k_rectangle(2)
False
sage: Partition([3]).has_k_rectangle(4)
False
.. SEEALSO::
:meth:`is_k_irreducible`, :meth:`is_k_reducible`,
:meth:`has_rectangle`
"""
return any(self.has_rectangle(a, b) for (a, b) in
[(k-i+1, i) for i in range(1, k+1)])
def is_k_bounded(self, k):
r"""
Return ``True`` if the partition ``self`` is bounded by ``k``.
EXAMPLES::
sage: Partition([4, 3, 1]).is_k_bounded(4)
True
sage: Partition([4, 3, 1]).is_k_bounded(7)
True
sage: Partition([4, 3, 1]).is_k_bounded(3)
False
"""
assert k >= 0
if self.is_empty():
return True
else:
return self[0] <= k
def is_k_reducible(self, k):
r"""
Return ``True`` if the partition ``self`` is ``k``-reducible.
A `k`-bounded partition is `k`-*reducible* if its Ferrer's diagram
contains `k-i+1` rows (or more) of length `i` (exactly) for some
`i \in [1, k]`.
(Also, a `k`-bounded partition is `k`-reducible if and only if it is not `k`-irreducible.)
EXAMPLES:
The partition [1, 1, 1] has at least 2 rows of length 1::
sage: Partition([1, 1, 1]).is_k_reducible(2)
True
The partition [1, 1, 1] does *not* have 4 rows of length 1, 3 rows of
length 2, 2 rows of length 3, nor 1 row of length 4::
sage: Partition([1, 1, 1]).is_k_reducible(4)
False
.. SEEALSO::
:meth:`is_k_irreducible`, :meth:`has_k_rectangle`
"""
if not self.is_k_bounded(k):
raise ValueError('we only talk about k-reducible / k-irreducible for k-bounded partitions')
return self.has_k_rectangle(k)
def is_k_irreducible(self, k):
r"""
Return ``True`` if the partition ``self`` is ``k``-irreducible.
A `k`-bounded partition is `k`-*irreducible* if its Ferrer's diagram
does *not* contain `k-i+1` rows (or more) of length `i` (exactly) for
every `i \in [1, k]`.
(Also, a `k`-bounded partition is `k`-irreducible if and only if it is
not `k`-reducible.)
EXAMPLES:
The partition [1, 1, 1] has at least 2 rows of length 1::
sage: Partition([1, 1, 1]).is_k_irreducible(2)
False
The partition [1, 1, 1] does *not* have 4 rows of length 1, 3 rows of
length 2, 2 rows of length 3, nor 1 row of length 4::
sage: Partition([1, 1, 1]).is_k_irreducible(4)
True
.. SEEALSO::
:meth:`is_k_reducible`, :meth:`has_k_rectangle`
"""
return not self.is_k_reducible(k)
def is_symmetric(self):
r"""
Return ``True`` if the partition ``self`` equals its own transpose.
EXAMPLES::
sage: Partition([2, 1]).is_symmetric()
True
sage: Partition([3, 1]).is_symmetric()
False
"""
return self == self.conjugate()
def next_within_bounds(self, min=[], max=None, partition_type=None):
r"""
Get the next partition lexicographically that contains ``min`` and is
contained in ``max``.
INPUT:
- ``min`` -- (default ``[]``, the empty partition) The
'minimum partition' that ``next_within_bounds(self)`` must contain.
- ``max`` -- (default ``None``) The 'maximum partition' that
``next_within_bounds(self)`` must be contained in. If set to ``None``,
then there is no restriction.
- ``partition_type`` -- (default ``None``) The type of partitions
allowed. For example, 'strict' for strictly decreasing partitions, or
``None`` to allow any valid partition.
EXAMPLES::
sage: m = [1, 1]
sage: M = [3, 2, 1]
sage: Partition([1, 1]).next_within_bounds(min=m, max=M)
[1, 1, 1]
sage: Partition([1, 1, 1]).next_within_bounds(min=m, max=M)
[2, 1]
sage: Partition([2, 1]).next_within_bounds(min=m, max=M)
[2, 1, 1]
sage: Partition([2, 1, 1]).next_within_bounds(min=m, max=M)
[2, 2]
sage: Partition([2, 2]).next_within_bounds(min=m, max=M)
[2, 2, 1]
sage: Partition([2, 2, 1]).next_within_bounds(min=m, max=M)
[3, 1]
sage: Partition([3, 1]).next_within_bounds(min=m, max=M)
[3, 1, 1]
sage: Partition([3, 1, 1]).next_within_bounds(min=m, max=M)
[3, 2]
sage: Partition([3, 2]).next_within_bounds(min=m, max=M)
[3, 2, 1]
sage: Partition([3, 2, 1]).next_within_bounds(min=m, max=M) == None
True
.. SEEALSO::
:meth:`next`
"""
# make sure min <= self <= max
if max is not None:
assert _Partitions(max).contains(_Partitions(self))
assert _Partitions(self).contains(_Partitions(min))
# check for empty max
if max is not None and _Partitions(max).is_empty():
return None
# convert partitions to lists to make them mutable
p = list(self)
min = list(min)
# if there is no max, the next partition just tacks a '1' on to the end!
if max is None:
return _Partitions(p + [1])
# extend p and min to include 0's at the end
p = p + [0] * (len(max) - len(p))
min = min + [0] * (len(max) - len(min))
# finally, run the algo to find next_p
next_p = copy(p)
def condition(a, b):
if partition_type in ('strict', 'strictly decreasing'):
return a < b - 1
elif partition_type in (None, 'weak', 'weakly decreasing'):
return a < b
else:
raise ValueError('unrecognized partition type')
for r in range(len(p) - 1, -1, -1):
if r == 0:
if (max is None or p[r] < max[r]):
next_p[r] += 1
break
else:
return None
else:
if (max is None or p[r] < max[r]) and condition(p[r], p[r-1]):
next_p[r] += 1
break
else:
next_p[r] = min[r]
continue
return _Partitions(next_p)
def row_standard_tableaux(self):
"""
Return the :class:`row standard tableaux
<sage.combinat.tableau.RowStandardTableaux>` of shape ``self``.
EXAMPLES::
sage: Partition([3,2,2,1]).row_standard_tableaux()
Row standard tableaux of shape [3, 2, 2, 1]
"""
return tableau.RowStandardTableaux(self)
def standard_tableaux(self):
"""
Return the :class:`standard tableaux<StandardTableaux>`
of shape ``self``.
EXAMPLES::
sage: Partition([3,2,2,1]).standard_tableaux()
Standard tableaux of shape [3, 2, 2, 1]
"""
return tableau.StandardTableaux(self)
def up(self):
r"""
Return a generator for partitions that can be obtained from ``self``
by adding a cell.
EXAMPLES::
sage: list(Partition([2,1,1]).up())
[[3, 1, 1], [2, 2, 1], [2, 1, 1, 1]]
sage: list(Partition([3,2]).up())
[[4, 2], [3, 3], [3, 2, 1]]
sage: [p for p in Partition([]).up()]
[[1]]
"""
p = self
previous = p.get_part(0) + 1
for i, current in enumerate(p):
if current < previous:
yield Partition(p[:i] + [current + 1] + p[i + 1:])
previous = current
yield Partition(p + [1])
def up_list(self):
"""
Return a list of the partitions that can be formed from ``self`` by
adding a cell.
EXAMPLES::
sage: Partition([2,1,1]).up_list()
[[3, 1, 1], [2, 2, 1], [2, 1, 1, 1]]
sage: Partition([3,2]).up_list()
[[4, 2], [3, 3], [3, 2, 1]]
sage: Partition([]).up_list()
[[1]]
"""
return list(self.up())
def down(self):
r"""
Return a generator for partitions that can be obtained from ``self``
by removing a cell.
EXAMPLES::
sage: [p for p in Partition([2,1,1]).down()]
[[1, 1, 1], [2, 1]]
sage: [p for p in Partition([3,2]).down()]
[[2, 2], [3, 1]]
sage: [p for p in Partition([3,2,1]).down()]
[[2, 2, 1], [3, 1, 1], [3, 2]]
TESTS:
We check that :trac:`11435` is fixed::
sage: Partition([]).down_list() #indirect doctest
[]
"""
p = self
l = len(p)
for i in range(l-1):
if p[i] > p[i+1]:
yield Partition(p[:i] + [ p[i]-1 ] + p[i+1:])
if l >= 1:
last = p[-1]
if last == 1:
yield Partition(p[:-1])
else:
yield Partition(p[:-1] + [ p[-1] - 1 ])
def down_list(self):
"""
Return a list of the partitions that can be obtained from ``self``
by removing a cell.
EXAMPLES::
sage: Partition([2,1,1]).down_list()
[[1, 1, 1], [2, 1]]
sage: Partition([3,2]).down_list()
[[2, 2], [3, 1]]
sage: Partition([3,2,1]).down_list()
[[2, 2, 1], [3, 1, 1], [3, 2]]
sage: Partition([]).down_list() #checks :trac:`11435`
[]
"""
return [p for p in self.down()]
@combinatorial_map(name="cell poset")
def cell_poset(self, orientation="SE"):
"""
Return the Young diagram of ``self`` as a poset. The optional
keyword variable ``orientation`` determines the order relation
of the poset.
The poset always uses the set of cells of the Young diagram
of ``self`` as its ground set. The order relation of the poset
depends on the ``orientation`` variable (which defaults to
``"SE"``). Concretely, ``orientation`` has to be specified to
one of the strings ``"NW"``, ``"NE"``, ``"SW"``, and ``"SE"``,
standing for "northwest", "northeast", "southwest" and
"southeast", respectively. If ``orientation`` is ``"SE"``, then
the order relation of the poset is such that a cell `u` is
greater or equal to a cell `v` in the poset if and only if `u`
lies weakly southeast of `v` (this means that `u` can be
reached from `v` by a sequence of south and east steps; the
sequence is allowed to consist of south steps only, or of east
steps only, or even be empty). Similarly the order relation is
defined for the other three orientations. The Young diagram is
supposed to be drawn in English notation.
The elements of the poset are the cells of the Young diagram
of ``self``, written as tuples of zero-based coordinates (so
that `(3, 7)` stands for the `8`-th cell of the `4`-th row,
etc.).
EXAMPLES::
sage: p = Partition([3,3,1])
sage: Q = p.cell_poset(); Q
Finite poset containing 7 elements
sage: sorted(Q)
[(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0)]
sage: sorted(Q.maximal_elements())
[(1, 2), (2, 0)]
sage: Q.minimal_elements()
[(0, 0)]
sage: sorted(Q.upper_covers((1, 0)))
[(1, 1), (2, 0)]
sage: Q.upper_covers((1, 1))
[(1, 2)]
sage: P = p.cell_poset(orientation="NW"); P
Finite poset containing 7 elements
sage: sorted(P)
[(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0)]
sage: sorted(P.minimal_elements())
[(1, 2), (2, 0)]
sage: P.maximal_elements()
[(0, 0)]
sage: P.upper_covers((2, 0))
[(1, 0)]
sage: sorted(P.upper_covers((1, 2)))
[(0, 2), (1, 1)]
sage: sorted(P.upper_covers((1, 1)))
[(0, 1), (1, 0)]
sage: sorted([len(P.upper_covers(v)) for v in P])
[0, 1, 1, 1, 1, 2, 2]
sage: R = p.cell_poset(orientation="NE"); R
Finite poset containing 7 elements
sage: sorted(R)
[(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0)]
sage: R.maximal_elements()
[(0, 2)]
sage: R.minimal_elements()
[(2, 0)]
sage: sorted([len(R.upper_covers(v)) for v in R])
[0, 1, 1, 1, 1, 2, 2]
sage: R.is_isomorphic(P)
False
sage: R.is_isomorphic(P.dual())
False
Linear extensions of ``p.cell_poset()`` are in 1-to-1 correspondence
with standard Young tableaux of shape `p`::
sage: all( len(p.cell_poset().linear_extensions())
....: == len(p.standard_tableaux())
....: for n in range(8) for p in Partitions(n) )
True
This is not the case for northeast orientation::
sage: q = Partition([3, 1])
sage: q.cell_poset(orientation="NE").is_chain()
True
TESTS:
We check that the posets are really what they should be for size
up to `7`::
sage: def check_NW(n):
....: for p in Partitions(n):
....: P = p.cell_poset(orientation="NW")
....: for c in p.cells():
....: for d in p.cells():
....: if P.le(c, d) != (c[0] >= d[0]
....: and c[1] >= d[1]):
....: return False
....: return True
sage: all( check_NW(n) for n in range(8) )
True
sage: def check_NE(n):
....: for p in Partitions(n):
....: P = p.cell_poset(orientation="NE")
....: for c in p.cells():
....: for d in p.cells():
....: if P.le(c, d) != (c[0] >= d[0]
....: and c[1] <= d[1]):
....: return False
....: return True
sage: all( check_NE(n) for n in range(8) )
True
sage: def test_duality(n, ori1, ori2):
....: for p in Partitions(n):
....: P = p.cell_poset(orientation=ori1)
....: Q = p.cell_poset(orientation=ori2)
....: for c in p.cells():
....: for d in p.cells():
....: if P.lt(c, d) != Q.lt(d, c):
....: return False
....: return True
sage: all( test_duality(n, "NW", "SE") for n in range(8) )
True
sage: all( test_duality(n, "NE", "SW") for n in range(8) )
True
sage: all( test_duality(n, "NE", "SE") for n in range(4) )
False
"""
from sage.combinat.posets.posets import Poset
covers = {}
if orientation == "NW":
for i, row in enumerate(self):
if i == 0:
covers[(0, 0)] = []
for j in range(1, row):
covers[(0, j)] = [(0, j - 1)]
else:
covers[(i, 0)] = [(i - 1, 0)]
for j in range(1, row):
covers[(i, j)] = [(i - 1, j), (i, j - 1)]
elif orientation == "NE":
for i, row in enumerate(self):
if i == 0:
covers[(0, row - 1)] = []
for j in range(row - 1):
covers[(0, j)] = [(0, j + 1)]
else:
covers[(i, row - 1)] = [(i - 1, row - 1)]
for j in range(row - 1):
covers[(i, j)] = [(i - 1, j), (i, j + 1)]
elif orientation == "SE":
l = len(self) - 1
for i, row in enumerate(self):
if i == l:
covers[(i, row - 1)] = []
for j in range(row - 1):
covers[(i, j)] = [(i, j + 1)]
else:
next_row = self[i + 1]
if row == next_row:
covers[(i, row - 1)] = [(i + 1, row - 1)]
for j in range(row - 1):
covers[(i, j)] = [(i + 1, j), (i, j + 1)]
else:
covers[(i, row - 1)] = []
for j in range(next_row):
covers[(i, j)] = [(i + 1, j), (i, j + 1)]
for j in range(next_row, row - 1):
covers[(i, j)] = [(i, j + 1)]
elif orientation == "SW":
l = len(self) - 1
for i, row in enumerate(self):
if i == l:
covers[(i, 0)] = []
for j in range(1, row):
covers[(i, j)] = [(i, j - 1)]
else:
covers[(i, 0)] = [(i + 1, 0)]
next_row = self[i + 1]
for j in range(1, next_row):
covers[(i, j)] = [(i + 1, j), (i, j - 1)]
for j in range(next_row, row):
covers[(i, j)] = [(i, j - 1)]
return Poset(covers)
def frobenius_coordinates(self):
"""
Return a pair of sequences of Frobenius coordinates aka beta numbers
of the partition.
These are two strictly decreasing sequences of nonnegative integers
of the same length.
EXAMPLES::
sage: Partition([]).frobenius_coordinates()
([], [])
sage: Partition([1]).frobenius_coordinates()
([0], [0])
sage: Partition([3,3,3]).frobenius_coordinates()
([2, 1, 0], [2, 1, 0])
sage: Partition([9,1,1,1,1,1,1]).frobenius_coordinates()
([8], [6])
"""
mu = self
muconj = mu.conjugate() # Naive implementation
if len(mu) <= len(muconj):
a = [x for x in (val-i-1 for i, val in enumerate(mu)) if x>=0]
b = [x for x in (muconj[i]-i-1 for i in range(len(a))) if x>=0]
else:
b = [x for x in (val-i-1 for i, val in enumerate(muconj)) if x>=0]
a = [x for x in (mu[i]-i-1 for i in range(len(b))) if x>=0]
return (a,b)
def frobenius_rank(self):
r"""
Return the Frobenius rank of the partition ``self``.
The Frobenius rank of a partition
`\lambda = (\lambda_1, \lambda_2, \lambda_3, \cdots)` is
defined to be the largest `i` such that `\lambda_i \geq i`.
In other words, it is the number of cells on the main diagonal
of `\lambda`. In yet other words, it is the size of the largest
square fitting into the Young diagram of `\lambda`.
EXAMPLES::
sage: Partition([]).frobenius_rank()
0
sage: Partition([1]).frobenius_rank()
1
sage: Partition([3,3,3]).frobenius_rank()
3
sage: Partition([9,1,1,1,1,1]).frobenius_rank()
1
sage: Partition([2,1,1,1,1,1]).frobenius_rank()
1
sage: Partition([2,2,1,1,1,1]).frobenius_rank()
2
sage: Partition([3,2]).frobenius_rank()
2
sage: Partition([3,2,2]).frobenius_rank()
2
sage: Partition([8,4,4,4,4]).frobenius_rank()
4
sage: Partition([8,4,1]).frobenius_rank()
2
sage: Partition([3,3,1]).frobenius_rank()
2
"""
for i, x in enumerate(self):
if x <= i:
return i
return len(self)
def beta_numbers(self, length=None):
"""
Return the set of beta numbers corresponding to ``self``.
The optional argument ``length`` specifies the length of the beta set
(which must be at least the length of ``self``).
For more on beta numbers, see :meth:`frobenius_coordinates`.
EXAMPLES::
sage: Partition([4,3,2]).beta_numbers()
[6, 4, 2]
sage: Partition([4,3,2]).beta_numbers(5)
[8, 6, 4, 1, 0]
sage: Partition([]).beta_numbers()
[]
sage: Partition([]).beta_numbers(3)
[2, 1, 0]
sage: Partition([6,4,1,1]).beta_numbers()
[9, 6, 2, 1]
sage: Partition([6,4,1,1]).beta_numbers(6)
[11, 8, 4, 3, 1, 0]
sage: Partition([1,1,1]).beta_numbers()
[3, 2, 1]
sage: Partition([1,1,1]).beta_numbers(4)
[4, 3, 2, 0]
"""
true_length = len(self)
if length is None:
length = true_length
elif length < true_length:
raise ValueError("length must be at least the length of the partition")
beta = [l + length - i - 1 for (i, l) in enumerate(self)]
if length > true_length:
beta.extend(list(range(length-true_length-1,-1,-1)))
return beta
def crank(self):
r"""
Return the Dyson crank of ``self``.
The Dyson crank of a partition `\lambda` is defined as follows:
If `\lambda` contains at least one `1`, then the crank is
`\mu(\lambda) - \omega(\lambda)`, where `\omega(\lambda)` is the
number of `1`s in `\lambda`, and `\mu(\lambda)` is the number of
parts of `\lambda` larger than `\omega(\lambda)`. If `\lambda`
contains no `1`, then the crank is simply the largest part of
`\lambda`.
REFERENCES:
- [AG1988]_
EXAMPLES::
sage: Partition([]).crank()
0
sage: Partition([3,2,2]).crank()
3
sage: Partition([5,4,2,1,1]).crank()
0
sage: Partition([1,1,1]).crank()
-3
sage: Partition([6,4,4,3]).crank()
6
sage: Partition([6,3,3,1,1]).crank()
1
sage: Partition([6]).crank()
6
sage: Partition([5,1]).crank()
0
sage: Partition([4,2]).crank()
4
sage: Partition([4,1,1]).crank()
-1
sage: Partition([3,3]).crank()
3
sage: Partition([3,2,1]).crank()
1
sage: Partition([3,1,1,1]).crank()
-3
sage: Partition([2,2,2]).crank()
2
sage: Partition([2,2,1,1]).crank()
-2
sage: Partition([2,1,1,1,1]).crank()
-4
sage: Partition([1,1,1,1,1,1]).crank()
-6
"""
l = len(self)
if l == 0:
return 0
if self[-1] > 1:
return self[0]
ind_1 = self.index(1)
w = l - ind_1 # w is omega(self).
m = len([x for x in self if x > w])
return m - w
def t_completion(self, t):
r"""
Return the ``t``-completion of the partition ``self``.
If `\lambda = (\lambda_1, \lambda_2, \lambda_3, \ldots)` is a
partition and `t` is an integer greater or equal to
`\left\lvert \lambda \right\rvert + \lambda_1`, then the
`t`-*completion of* `\lambda` is defined as the partition
`(t - \left\lvert \lambda \right\rvert, \lambda_1, \lambda_2,
\lambda_3, \ldots)` of `t`. This partition is denoted by `\lambda[t]`
in [BOR2009]_, by `\lambda_{[t]}` in [BdVO2012]_, and by `\lambda(t)`
in [CO2010]_.
EXAMPLES::
sage: Partition([]).t_completion(0)
[]
sage: Partition([]).t_completion(1)
[1]
sage: Partition([]).t_completion(2)
[2]
sage: Partition([]).t_completion(3)
[3]
sage: Partition([2, 1]).t_completion(5)
[2, 2, 1]
sage: Partition([2, 1]).t_completion(6)
[3, 2, 1]
sage: Partition([4, 2, 2, 1]).t_completion(13)
[4, 4, 2, 2, 1]
sage: Partition([4, 2, 2, 1]).t_completion(19)
[10, 4, 2, 2, 1]
sage: Partition([4, 2, 2, 1]).t_completion(10)
Traceback (most recent call last):
...
ValueError: 10-completion is not defined
sage: Partition([4, 2, 2, 1]).t_completion(5)
Traceback (most recent call last):
...
ValueError: 5-completion is not defined
"""
if self._list and t < self.size() + self._list[0]:
raise ValueError("{}-completion is not defined".format(t))
return Partition([t - self.size()] + self._list)
def larger_lex(self, rhs):
"""
Return ``True`` if ``self`` is larger than ``rhs`` in lexicographic
order. Otherwise return ``False``.
EXAMPLES::
sage: p = Partition([3,2])
sage: p.larger_lex([3,1])
True
sage: p.larger_lex([1,4])
True
sage: p.larger_lex([3,2,1])
False
sage: p.larger_lex([3])
True
sage: p.larger_lex([5])
False
sage: p.larger_lex([3,1,1,1,1,1,1,1])
True
"""
return CombinatorialElement.__gt__(self, rhs)
def dominates(self, p2):
r"""
Return ``True`` if ``self`` dominates the partition ``p2``. Otherwise
it returns ``False``.
EXAMPLES::
sage: p = Partition([3,2])
sage: p.dominates([3,1])
True
sage: p.dominates([2,2])
True
sage: p.dominates([2,1,1])
True
sage: p.dominates([3,3])
False
sage: p.dominates([4])
False
sage: Partition([4]).dominates(p)
False
sage: Partition([]).dominates([1])
False
sage: Partition([]).dominates([])
True
sage: Partition([1]).dominates([])
True
"""
p1 = self
sum1 = 0
sum2 = 0
min_length = min(len(p1), len(p2))
if min_length == 0:
return not p2 # equivalent to len(p1) >= len(p2) = 0
for i in range(min_length):
sum1 += p1[i]
sum2 += p2[i]
if sum2 > sum1:
return False
return sum(p1) >= sum(p2)
def cells(self):
"""
Return the coordinates of the cells of ``self``.
EXAMPLES::
sage: Partition([2,2]).cells()
[(0, 0), (0, 1), (1, 0), (1, 1)]
sage: Partition([3,2]).cells()
[(0, 0), (0, 1), (0, 2), (1, 0), (1, 1)]
"""
res = []
for i in range(len(self)):
for j in range(self[i]):
res.append( (i,j) )
return res
def generalized_pochhammer_symbol(self, a, alpha):
r"""
Return the generalized Pochhammer symbol
`(a)_{self}^{(\alpha)}`. This is the product over all
cells `(i,j)` in ``self`` of `a - (i-1) / \alpha + j - 1`.
EXAMPLES::
sage: Partition([2,2]).generalized_pochhammer_symbol(2,1)
12
"""
res = 1
for (i,j) in self.cells():
res *= (a - (i-1)/alpha + j-1)
return res
def get_part(self, i, default=Integer(0)):
r"""
Return the `i^{th}` part of ``self``, or ``default`` if it does
not exist.
EXAMPLES::
sage: p = Partition([2,1])
sage: p.get_part(0), p.get_part(1), p.get_part(2)
(2, 1, 0)
sage: p.get_part(10,-1)
-1
sage: Partition([]).get_part(0)
0
"""
if i < len(self._list):
return self._list[i]
else:
return default
@combinatorial_map(name="partition to minimal Dyck word")
def to_dyck_word(self, n=None):
r"""
Return the ``n``-Dyck word whose corresponding partition is
``self`` (or, if ``n`` is not specified, the `n`-Dyck word with
smallest `n` to satisfy this property).
If `w` is an `n`-Dyck word (that is, a Dyck word with `n` open
symbols and `n` close symbols), then the Dyck path corresponding
to `w` can be regarded as a lattice path in the northeastern
half of an `n \times n`-square. The region to the northeast of
this Dyck path can be regarded as a partition. It is called the
partition corresponding to the Dyck word `w`. (See
:meth:`~sage.combinat.dyck_word.DyckWord.to_partition`.)
For every partition `\lambda` and every nonnegative integer `n`,
there exists at most one `n`-Dyck word `w` such that the
partition corresponding to `w` is `\lambda` (in fact, such `w`
exists if and only if `\lambda_i + i \leq n` for every `i`,
where `\lambda` is written in the form
`(\lambda_1, \lambda_2, \ldots, \lambda_k)` with `\lambda_k > 0`).
This method computes this `w` for a given `\lambda` and `n`.
If `n` is not specified, this method computes the `w` for the
smallest possible `n` for which such an `w` exists.
(The minimality of `n` means that the partition demarcated by the
Dyck path touches the diagonal.)
EXAMPLES::
sage: Partition([2,2]).to_dyck_word()
[1, 1, 0, 0, 1, 1, 0, 0]
sage: Partition([2,2]).to_dyck_word(4)
[1, 1, 0, 0, 1, 1, 0, 0]
sage: Partition([2,2]).to_dyck_word(5)
[1, 1, 1, 0, 0, 1, 1, 0, 0, 0]
sage: Partition([6,3,1]).to_dyck_word()
[1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0]
sage: Partition([]).to_dyck_word()
[]
sage: Partition([]).to_dyck_word(3)
[1, 1, 1, 0, 0, 0]
The partition corresponding to ``self.dyck_word()`` is ``self``
indeed::
sage: all( p.to_dyck_word().to_partition() == p
....: for p in Partitions(5) )
True
"""
from sage.combinat.dyck_word import DyckWord
if not self._list:
if n is None:
return DyckWord([])
return DyckWord([1]*n + [0]*n)
list_of_word = []
if n is None:
n = max(i + l + 1 for (i, l) in enumerate(self))
# This n is also max(i+j for (i,j) in self.cells()) + 2.
list_of_word.extend([1]*(n-self.length()))
copy_part = list(self)
while copy_part:
c = copy_part.pop()
list_of_word.extend([0]*c)
for i in range(len(copy_part)):
copy_part[i] -= c
list_of_word.append(1)
list_of_word.extend([0]*(n-self[0]))
return DyckWord(list_of_word)
@combinatorial_map(order=2, name="conjugate partition")
def conjugate(self):
"""
Return the conjugate partition of the partition ``self``. This
is also called the associated partition or the transpose in the
literature.
EXAMPLES::
sage: Partition([2,2]).conjugate()
[2, 2]
sage: Partition([6,3,1]).conjugate()
[3, 2, 2, 1, 1, 1]
The conjugate partition is obtained by transposing the Ferrers
diagram of the partition (see :meth:`.ferrers_diagram`)::
sage: print(Partition([6,3,1]).ferrers_diagram())
******
***
*
sage: print(Partition([6,3,1]).conjugate().ferrers_diagram())
***
**
**
*
*
*
"""
if not self:
par = Partitions_n(0)
return par.element_class(par, [])
par = Partitions_n(sum(self))
return par.element_class(par, conjugate(self))
def suter_diagonal_slide(self, n, exp=1):
r"""
Return the image of ``self`` in `Y_n` under Suter's diagonal slide
`\sigma_n`, where the notations used are those defined in [Sut2002]_.
The set `Y_n` is defined as the set of all partitions
`\lambda` such that the hook length of the `(0, 0)`-cell (i.e. the
northwestern most cell in English notation) of `\lambda` is less
than `n`, including the empty partition.
The map `\sigma_n` sends a partition (with non-zero entries)
`(\lambda_1, \lambda_2, \ldots, \lambda_m) \in Y_n` to the partition
`(\lambda_2 + 1, \lambda_3 + 1, \ldots, \lambda_m + 1,
\underbrace{1, 1, \ldots, 1}_{n - m - \lambda_1\text{ ones}})`.
In other words, it pads the partition with trailing zeroes
until it has length `n - \lambda_1`, then removes its first
part, and finally adds `1` to each part.
By Theorem 2.1 of [Sut2002]_, the dihedral group `D_n` with
`2n` elements acts on `Y_n` by letting the primitive rotation
act as `\sigma_n` and the reflection act as conjugation of
partitions (:meth:`conjugate()`). This action is faithful if
`n \geq 3`.
INPUT:
- ``n`` -- nonnegative integer
- ``exp`` -- (default: 1) how many times `\sigma_n` should be applied
OUTPUT:
The result of applying Suter's diagonal slide `\sigma_n` to
``self``, assuming that ``self`` lies in `Y_n`. If the
optional argument ``exp`` is set, then the slide
`\sigma_n` is applied not just once, but ``exp`` times
(note that ``exp`` is allowed to be negative, since
the slide has finite order).
EXAMPLES::
sage: Partition([5,4,1]).suter_diagonal_slide(8)
[5, 2]
sage: Partition([5,4,1]).suter_diagonal_slide(9)
[5, 2, 1]
sage: Partition([]).suter_diagonal_slide(7)
[1, 1, 1, 1, 1, 1]
sage: Partition([]).suter_diagonal_slide(1)
[]
sage: Partition([]).suter_diagonal_slide(7, exp=-1)
[6]
sage: Partition([]).suter_diagonal_slide(1, exp=-1)
[]
sage: P7 = Partitions(7)
sage: all( p == p.suter_diagonal_slide(9, exp=-1).suter_diagonal_slide(9)
....: for p in P7 )
True
sage: all( p == p.suter_diagonal_slide(9, exp=3)
....: .suter_diagonal_slide(9, exp=3)
....: .suter_diagonal_slide(9, exp=3)
....: for p in P7 )
True
sage: all( p == p.suter_diagonal_slide(9, exp=6)
....: .suter_diagonal_slide(9, exp=6)
....: .suter_diagonal_slide(9, exp=6)
....: for p in P7 )
True
sage: all( p == p.suter_diagonal_slide(9, exp=-1)
....: .suter_diagonal_slide(9, exp=1)
....: for p in P7 )
True
Check of the assertion in [Sut2002]_ that `\sigma_n\bigl( \sigma_n(
\lambda^{\prime})^{\prime} \bigr) = \lambda`::
sage: all( p.suter_diagonal_slide(8).conjugate()
....: == p.conjugate().suter_diagonal_slide(8, exp=-1)
....: for p in P7 )
True
Check of Claim 1 in [Sut2002]_::
sage: P5 = Partitions(5)
sage: all( all( (p.suter_diagonal_slide(6) in q.suter_diagonal_slide(6).down())
....: or (q.suter_diagonal_slide(6) in p.suter_diagonal_slide(6).down())
....: for p in q.down() )
....: for q in P5 )
True
TESTS:
Check for ``exp = 0``::
sage: P = Partitions(4)
sage: all(p == p.suter_diagonal_slide(7, 0) for p in P)
True
Check for invalid input::
sage: p = Partition([2,1])
sage: p.hook_length(0, 0)
3
sage: p.suter_diagonal_slide(2)
Traceback (most recent call last):
...
ValueError: the hook length must be less than n
"""
# Check for valid input
if len(self) > 0 and len(self) + self._list[0] > n: # >, not >=, since we double count the (0,0) cell
raise ValueError("the hook length must be less than n")
ret = self
# Arbitrary exp
exp = exp % n # It is at most order n
if exp > n / 2:
exp -= n
while exp != 0:
leng = len(ret)
if exp > 0:
# Suter's map \sigma_n
if leng == 0: # Taking extra care about the empty partition.
ret = Partition([1] * (n - 1))
exp -= 1
continue
res = [i + 1 for i in ret._list[1:]]
res += [1] * (n - leng - ret._list[0])
ret = Partition(res)
exp -= 1
else: # exp < 0 since if exp == 0, we would exit the while loop
# inverse map \sigma_n^{-1}
if leng == 0: # Taking extra care about the empty partition.
ret = Partition([n - 1])
exp += 1
continue
res = [n - leng - 1]
res.extend([i - 1 for i in ret._list if i > 1])
ret = Partition(res)
exp += 1
return ret
@combinatorial_map(name="reading tableau")
def reading_tableau(self):
r"""
Return the RSK recording tableau of the reading word of the
(standard) tableau `T` labeled down (in English convention)
each column to the shape of ``self``.
For an example of the tableau `T`, consider the partition
`\lambda = (3,2,1)`, then we have::
1 4 6
2 5
3
For more, see :func:`~sage.combinat.rsk.RSK()`.
EXAMPLES::
sage: Partition([3,2,1]).reading_tableau()
[[1, 3, 6], [2, 5], [4]]
"""
st = tableau.StandardTableaux(self).first()
return st.reading_word_permutation().right_tableau()
@combinatorial_map(name="initial tableau")
def initial_tableau(self):
r"""
Return the :class:`standard tableau<StandardTableau>` which has the
numbers `1, 2, \ldots, n` where `n` is the :meth:`size` of ``self``
entered in order from left to right along the rows of each component,
where the components are ordered from left to right.
EXAMPLES::
sage: Partition([3,2,2]).initial_tableau()
[[1, 2, 3], [4, 5], [6, 7]]
"""
sigma = list(accumulate([1] + self._list))
tab = [list(range(sigma[i], sigma[i + 1]))
for i in range(len(sigma) - 1)]
return tableau.StandardTableau(tab)
def initial_column_tableau(self):
r"""
Return the initial column tableau of shape ``self``.
The initial column tableau of shape self is the standard tableau
that has the numbers `1` to `n`, where `n` is the :meth:`size` of ``self``,
entered in order from top to bottom and then left to right down the
columns of ``self``.
EXAMPLES::
sage: Partition([3,2]).initial_column_tableau()
[[1, 3, 5], [2, 4]]
"""
return self.conjugate().initial_tableau().conjugate()
def garnir_tableau(self, *cell):
r"""
Return the Garnir tableau of shape ``self`` corresponding to the cell
``cell``. If ``cell`` `= (a,c)` then `(a+1,c)` must belong to the
diagram of ``self``.
The Garnir tableaux play an important role in integral and
non-semisimple representation theory because they determine the
"straightening" rules for the Specht modules over an arbitrary ring.
The Garnir tableaux are the "first" non-standard tableaux which arise
when you act by simple transpositions. If `(a,c)` is a cell in the
Young diagram of a partition, which is not at the bottom of its
column, then the corresponding Garnir tableau has the integers
`1, 2, \ldots, n` entered in order from left to right along the rows
of the diagram up to the cell `(a,c-1)`, then along the cells
`(a+1,1)` to `(a+1,c)`, then `(a,c)` until the end of row `a` and
then continuing from left to right in the remaining positions. The
examples below probably make this clearer!
.. NOTE::
The function also sets ``g._garnir_cell``, where ``g`` is the
resulting Garnir tableau, equal to ``cell`` which is used by
some other functions.
EXAMPLES::
sage: g = Partition([5,3,3,2]).garnir_tableau((0,2)); g.pp()
1 2 6 7 8
3 4 5
9 10 11
12 13
sage: g.is_row_strict(); g.is_column_strict()
True
False
sage: Partition([5,3,3,2]).garnir_tableau(0,2).pp()
1 2 6 7 8
3 4 5
9 10 11
12 13
sage: Partition([5,3,3,2]).garnir_tableau(2,1).pp()
1 2 3 4 5
6 7 8
9 12 13
10 11
sage: Partition([5,3,3,2]).garnir_tableau(2,2).pp()
Traceback (most recent call last):
...
ValueError: (row+1, col) must be inside the diagram
.. SEEALSO::
- :meth:`top_garnir_tableau`
"""
try:
(row, col) = cell
except ValueError:
(row, col) = cell[0]
if row + 1 >= len(self) or col >= self[row+1]:
raise ValueError('(row+1, col) must be inside the diagram')
g=self.initial_tableau().to_list()
a=g[row][col]
g[row][col:] = list(range(a+col+1,g[row+1][col]+1))
g[row+1][:col+1] = list(range(a,a+col+1))
g=tableau.Tableau(g)
g._garnir_cell = (row, col)
return g
def top_garnir_tableau(self,e,cell):
r"""
Return the most dominant *standard* tableau which dominates the
corresponding Garnir tableau and has the same ``e``-residue.
The Garnir tableau play an important role in integral and non-semisimple
representation theory because they determine the "straightening" rules
for the Specht modules. The *top Garnir tableaux* arise in the graded
representation theory of the symmetric groups and higher level Hecke
algebras. They were introduced in [KMR2012]_.
If the Garnir node is ``cell=(r,c)`` and `m` and `M` are the entries
in the cells ``(r,c)`` and ``(r+1,c)``, respectively, in the initial
tableau then the top ``e``-Garnir tableau is obtained by inserting the
numbers `m, m+1, \ldots, M` in order from left to right first in the
cells in row ``r+1`` which are not in the ``e``-Garnir belt, then in
the cell in rows ``r`` and ``r+1`` which are in the Garnir belt and
then, finally, in the remaining cells in row ``r`` which are not in
the Garnir belt. All other entries in the tableau remain unchanged.
If ``e = 0``, or if there are no ``e``-bricks in either row ``r``
or ``r+1``, then the top Garnir tableau is the corresponding Garnir
tableau.
EXAMPLES::
sage: Partition([5,4,3,2]).top_garnir_tableau(2,(0,2)).pp()
1 2 4 5 8
3 6 7 9
10 11 12
13 14
sage: Partition([5,4,3,2]).top_garnir_tableau(3,(0,2)).pp()
1 2 3 4 5
6 7 8 9
10 11 12
13 14
sage: Partition([5,4,3,2]).top_garnir_tableau(4,(0,2)).pp()
1 2 6 7 8
3 4 5 9
10 11 12
13 14
sage: Partition([5,4,3,2]).top_garnir_tableau(0,(0,2)).pp()
1 2 6 7 8
3 4 5 9
10 11 12
13 14
TESTS::
sage: Partition([5,4,3,2]).top_garnir_tableau(0,(3,2)).pp()
Traceback (most recent call last):
...
ValueError: (4,2)=(row+1,col) must be inside the diagram
REFERENCES:
- [KMR2012]_
"""
(row,col)=cell
if row+1>=len(self) or col>=self[row+1]:
raise ValueError('(%s,%s)=(row+1,col) must be inside the diagram' %(row+1,col))
g=self.garnir_tableau(cell) # start with the Garnir tableau and modify
if e==0:
return g # no more dominant tableau of the same residue
a=e*int((self[row]-col)/e) # number of cells in the e-bricks in row `row`
b=e*int((col+1)/e) # number of cells in the e-bricks in row `row+1`
if a==0 or b==0:
return g
t=g.to_list()
m=g[row+1][0] # smallest number in 0-Garnir belt
# now we will put the number m,m+1,...,t[row+1][col] in order into t
t[row][col:a+col]=[m+col-b+1+i for i in range(a)]
t[row+1][col-b+1:col+1]=[m+a+col-b+1+i for i in range(b)]
return tableau.StandardTableau(t)
@cached_method
def young_subgroup(self):
r"""
Return the corresponding Young, or parabolic, subgroup of the symmetric
group.
The Young subgroup of a partition
`\lambda = (\lambda_1, \lambda_2, \ldots, \lambda_{\ell})` of `n` is
the group:
.. MATH::
S_{\lambda_1} \times S_{\lambda_2} \times \cdots \times
S_{\lambda_{\ell}}
embedded into `S_n` in the standard way (i.e.,
the `S_{\lambda_i}` factor acts on the numbers from
`\lambda_1 + \lambda_2 + \cdots + \lambda_{i-1} + 1` to
`\lambda_1 + \lambda_2 + \cdots + \lambda_i`).
EXAMPLES::
sage: Partition([4,2]).young_subgroup()
Permutation Group with generators [(), (5,6), (3,4), (2,3), (1,2)]
"""
gens=[]
m=0
for row in self:
gens.extend([ (c,c+1) for c in range(m+1,m+row)])
m+=row
gens.append(list(range(1,self.size() + 1))) # to ensure we get a subgroup of Sym_n
return PermutationGroup( gens )
def young_subgroup_generators(self):
r"""
Return an indexing set for the generators of the corresponding Young
subgroup. Here the generators correspond to the simple adjacent
transpositions `s_i = (i \; i+1)`.
EXAMPLES::
sage: Partition([4,2]).young_subgroup_generators()
[1, 2, 3, 5]
sage: Partition([1,1,1]).young_subgroup_generators()
[]
sage: Partition([2,2]).young_subgroup_generators()
[1, 3]
.. SEEALSO::
:meth:`young_subgroup`
"""
gens = []
m = 0
for row in self:
gens.extend(list(range(m + 1, m + row)))
m += row
return gens
@cached_method
def _initial_degree(self, e, multicharge=(0,)):
r"""
Return the Brundan-Kleshchev-Wang degree of the initial row tableau
of shape ``self``.
This degree depends only the shape of the tableau and it is
used as the base case for computing the degrees of all tableau
of shape ``self``, which is why this method is cached. See
:meth:`sage.combinat.tableau.Tableau.degree` for more information.
EXAMPLES::
sage: Partition([5,3,2])._initial_degree(0)
0
sage: Partition([5,3,2])._initial_degree(2)
4
sage: Partition([5,3,2])._initial_degree(3)
2
sage: Partition([5,3,2])._initial_degree(4)
1
"""
if e == 0:
return ZZ.zero()
else:
return sum(m // e for m in self)
def degree(self, e):
r"""
Return the ``e``-th degree of ``self``.
The `e`-th degree of a partition `\lambda` is the sum of the `e`-th
degrees of the standard tableaux of shape `\lambda`. The `e`-th degree
is the exponent of `\Phi_e(q)` in the Gram determinant of the Specht
module for a semisimple Iwahori-Hecke algebra of type `A` with
parameter `q`.
INPUT:
- ``e`` -- an integer `e > 1`
OUTPUT:
A non-negative integer.
EXAMPLES::
sage: Partition([4,3]).degree(2)
28
sage: Partition([4,3]).degree(3)
15
sage: Partition([4,3]).degree(4)
8
sage: Partition([4,3]).degree(5)
13
sage: Partition([4,3]).degree(6)
0
sage: Partition([4,3]).degree(7)
0
Therefore, the Gram determinant of `S(5,3)` when the Hecke parameter
`q` is "generic" is
.. MATH::
q^N \Phi_2(q)^{28} \Phi_3(q)^{15} \Phi_4(q)^8 \Phi_5(q)^{13}
for some integer `N`. Compare with :meth:`prime_degree`.
"""
return sum(t.degree(e) for t in self.standard_tableaux())
def prime_degree(self, p):
r"""
Return the prime degree for the prime integer``p`` for ``self``.
INPUT:
- ``p`` -- a prime integer
OUTPUT:
A non-negative integer
The degree of a partition `\lambda` is the sum of the
`e`-:meth:`degree` of the standard tableaux of shape `\lambda`, for
`e` a power of the prime `p`. The prime degree gives the exponent of
`p` in the Gram determinant of the integral Specht module of the
symmetric group.
EXAMPLES::
sage: Partition([4,3]).prime_degree(2)
36
sage: Partition([4,3]).prime_degree(3)
15
sage: Partition([4,3]).prime_degree(5)
13
sage: Partition([4,3]).prime_degree(7)
0
Therefore, the Gram determinant of `S(5,3)` when `q = 1` is
`2^{36} 3^{15} 5^{13}`. Compare with :meth:`degree`.
"""
ps = [p]
while ps[-1] * p < self.size():
ps.append(ps[-1] * p)
return sum(t.degree(pk) for pk in ps for t in self.standard_tableaux())
def arm_length(self, i, j):
r"""
Return the length of the arm of cell `(i,j)` in ``self``.
The arm of cell `(i,j)` is the cells that appear to the right of
cell `(i,j)`.
The cell coordinates are zero-based, i. e., the northwesternmost
cell is `(0,0)`.
INPUT:
- ``i, j`` -- two integers
OUTPUT:
An integer or a ``ValueError``
EXAMPLES::
sage: p = Partition([2,2,1])
sage: p.arm_length(0, 0)
1
sage: p.arm_length(0, 1)
0
sage: p.arm_length(2, 0)
0
sage: Partition([3,3]).arm_length(0, 0)
2
sage: Partition([3,3]).arm_length(*[0,0])
2
"""
p = self
if i < len(p) and j < p[i]:
return p[i]-(j+1)
else:
raise ValueError("The cell is not in the diagram")
def arm_lengths(self, flat=False):
"""
Return a tableau of shape ``self`` where each cell is filled with
its arm length. The optional boolean parameter ``flat`` provides
the option of returning a flat list.
EXAMPLES::
sage: Partition([2,2,1]).arm_lengths()
[[1, 0], [1, 0], [0]]
sage: Partition([2,2,1]).arm_lengths(flat=True)
[1, 0, 1, 0, 0]
sage: Partition([3,3]).arm_lengths()
[[2, 1, 0], [2, 1, 0]]
sage: Partition([3,3]).arm_lengths(flat=True)
[2, 1, 0, 2, 1, 0]
"""
p = self
if not flat:
return [[pi - (j + 1) for j in range(pi)] for pi in p]
return [pi - (j + 1) for pi in p for j in range(pi)]
def arm_cells(self, i, j):
r"""
Return the list of the cells of the arm of cell `(i,j)` in ``self``.
The arm of cell `c = (i,j)` is the boxes that appear to the right of
`c`.
The cell coordinates are zero-based, i. e., the northwesternmost
cell is `(0,0)`.
INPUT:
- ``i, j`` -- two integers
OUTPUT:
A list of pairs of integers
EXAMPLES::
sage: Partition([4,4,3,1]).arm_cells(1,1)
[(1, 2), (1, 3)]
sage: Partition([]).arm_cells(0,0)
Traceback (most recent call last):
...
ValueError: The cell is not in the diagram
"""
p = self
if i < len(p) and j < p[i]:
return [ (i, x) for x in range(j+1, p[i]) ]
else:
raise ValueError("The cell is not in the diagram")
def leg_length(self, i, j):
"""
Return the length of the leg of cell `(i,j)` in ``self``.
The leg of cell `c = (i,j)` is defined to be the cells below `c`
(in English convention).
The cell coordinates are zero-based, i. e., the northwesternmost
cell is `(0,0)`.
INPUT:
- ``i, j`` -- two integers
OUTPUT:
An integer or a ``ValueError``
EXAMPLES::
sage: p = Partition([2,2,1])
sage: p.leg_length(0, 0)
2
sage: p.leg_length(0,1)
1
sage: p.leg_length(2,0)
0
sage: Partition([3,3]).leg_length(0, 0)
1
sage: cell = [0,0]; Partition([3,3]).leg_length(*cell)
1
"""
conj = self.conjugate()
if j < len(conj) and i < conj[j]:
return conj[j]-(i+1)
else:
raise ValueError("The cell is not in the diagram")
def leg_lengths(self, flat=False):
"""
Return a tableau of shape ``self`` with each cell filled in with
its leg length. The optional boolean parameter ``flat`` provides
the option of returning a flat list.
EXAMPLES::
sage: Partition([2,2,1]).leg_lengths()
[[2, 1], [1, 0], [0]]
sage: Partition([2,2,1]).leg_lengths(flat=True)
[2, 1, 1, 0, 0]
sage: Partition([3,3]).leg_lengths()
[[1, 1, 1], [0, 0, 0]]
sage: Partition([3,3]).leg_lengths(flat=True)
[1, 1, 1, 0, 0, 0]
"""
p = self
conj = p.conjugate()
if not flat:
return [[conj[j] - (i + 1) for j in range(pi)]
for i, pi in enumerate(p)]
return [conj[j] - (i + 1) for i, pi in enumerate(p)
for j in range(pi)]
def leg_cells(self, i, j):
r"""
Return the list of the cells of the leg of cell `(i,j)` in ``self``.
The leg of cell `c = (i,j)` is defined to be the cells below `c` (in
English convention).
The cell coordinates are zero-based, i. e., the northwesternmost
cell is `(0,0)`.
INPUT:
- ``i, j`` -- two integers
OUTPUT:
A list of pairs of integers
EXAMPLES::
sage: Partition([4,4,3,1]).leg_cells(1,1)
[(2, 1)]
sage: Partition([4,4,3,1]).leg_cells(0,1)
[(1, 1), (2, 1)]
sage: Partition([]).leg_cells(0,0)
Traceback (most recent call last):
...
ValueError: The cell is not in the diagram
"""
l = self.leg_length(i, j)
return [(x, j) for x in range(i+1, i+l+1)]
def attacking_pairs(self):
"""
Return a list of the attacking pairs of the Young diagram of
``self``.
A pair of cells `(c, d)` of a Young diagram (in English notation) is
said to be attacking if one of the following conditions holds:
1. `c` and `d` lie in the same row with `c` strictly to the west
of `d`.
2. `c` is in the row immediately to the south of `d`, and `c`
lies strictly east of `d`.
This particular method returns each pair `(c, d)` as a tuple,
where each of `c` and `d` is given as a tuple `(i, j)` with
`i` and `j` zero-based (so `i = 0` means that the cell lies
in the topmost row).
EXAMPLES::
sage: p = Partition([3, 2])
sage: p.attacking_pairs()
[((0, 0), (0, 1)),
((0, 0), (0, 2)),
((0, 1), (0, 2)),
((1, 0), (1, 1)),
((1, 1), (0, 0))]
sage: Partition([]).attacking_pairs()
[]
"""
attacking_pairs = []
for i, r in enumerate(self):
for j in range(r):
#c is in position (i,j)
#Find the d that satisfy condition 1
for k in range(j+1, r):
attacking_pairs.append( ((i,j),(i,k)) )
#Find the d that satisfy condition 2
if i == 0:
continue
for k in range(j):
attacking_pairs.append( ((i,j),(i-1,k)) )
return attacking_pairs
def dominated_partitions(self, rows=None):
"""
Return a list of the partitions dominated by `n`. If ``rows`` is
specified, then it only returns the ones whose number of rows
is at most ``rows``.
EXAMPLES::
sage: Partition([3,2,1]).dominated_partitions()
[[3, 2, 1], [3, 1, 1, 1], [2, 2, 2], [2, 2, 1, 1], [2, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1]]
sage: Partition([3,2,1]).dominated_partitions(rows=3)
[[3, 2, 1], [2, 2, 2]]
"""
#Naive implementation because iteration is so fast
n = sum(self)
P = Partitions_n(n)
if rows:
return [P(x) for x in ZS1_iterator_nk(n, rows) if self.dominates(x)]
else:
return [P(x) for x in ZS1_iterator(n) if self.dominates(x)]
def contains(self, x):
"""
Return ``True`` if ``x`` is a partition whose Ferrers diagram is
contained in the Ferrers diagram of ``self``.
EXAMPLES::
sage: p = Partition([3,2,1])
sage: p.contains([2,1])
True
sage: all(p.contains(mu) for mu in Partitions(3))
True
sage: all(p.contains(mu) for mu in Partitions(4))
False
"""
return len(self) >= len(x) and all(self[i] >= x[i] for i in range(len(x)))
def hook_product(self, a):
"""
Return the Jack hook-product.
EXAMPLES::
sage: Partition([3,2,1]).hook_product(x)
(2*x + 3)*(x + 2)^2
sage: Partition([2,2]).hook_product(x)
2*(x + 2)*(x + 1)
"""
nu = self.conjugate()
res = 1
for i in range(len(self)):
for j in range(self[i]):
res *= a*(self[i]-j-1)+nu[j]-i
return res
def hook_polynomial(self, q, t):
"""
Return the two-variable hook polynomial.
EXAMPLES::
sage: R.<q,t> = PolynomialRing(QQ)
sage: a = Partition([2,2]).hook_polynomial(q,t)
sage: a == (1 - t)*(1 - q*t)*(1 - t^2)*(1 - q*t^2)
True
sage: a = Partition([3,2,1]).hook_polynomial(q,t)
sage: a == (1 - t)^3*(1 - q*t^2)^2*(1 - q^2*t^3)
True
"""
nu = self.conjugate()
res = 1
for i in range(len(self)):
for j in range(self[i]):
res *= 1-q**(self[i]-j-1)*t**(nu[j]-i)
return res
def hook_length(self, i, j):
r"""
Return the length of the hook of cell `(i,j)` in ``self``.
The (length of the) hook of cell `(i,j)` of a partition `\lambda`
is
.. MATH::
\lambda_i + \lambda^{\prime}_j - i - j + 1
where `\lambda^{\prime}` is the conjugate partition. In English
convention, the hook length is the number of cells horizontally
to the right and vertically below the cell `(i,j)` (including
that cell).
EXAMPLES::
sage: p = Partition([2,2,1])
sage: p.hook_length(0, 0)
4
sage: p.hook_length(0, 1)
2
sage: p.hook_length(2, 0)
1
sage: Partition([3,3]).hook_length(0, 0)
4
sage: cell = [0,0]; Partition([3,3]).hook_length(*cell)
4
"""
return self.leg_length(i,j)+self.arm_length(i,j)+1
def hooks(self):
"""
Return a sorted list of the hook lengths in ``self``.
EXAMPLES::
sage: Partition([3,2,1]).hooks()
[5, 3, 3, 1, 1, 1]
"""
res = []
for row in self.hook_lengths():
res += row
res.sort(reverse=True)
return res
def hook_lengths(self):
r"""
Return a tableau of shape ``self`` with the cells filled in with the
hook lengths.
In each cell, put the sum of one plus the number of cells
horizontally to the right and vertically below the cell (the
hook length).
For example, consider the partition ``[3,2,1]`` of 6 with Ferrers
diagram::
# # #
# #
#
When we fill in the cells with the hook lengths, we obtain::
5 3 1
3 1
1
EXAMPLES::
sage: Partition([2,2,1]).hook_lengths()
[[4, 2], [3, 1], [1]]
sage: Partition([3,3]).hook_lengths()
[[4, 3, 2], [3, 2, 1]]
sage: Partition([3,2,1]).hook_lengths()
[[5, 3, 1], [3, 1], [1]]
sage: Partition([2,2]).hook_lengths()
[[3, 2], [2, 1]]
sage: Partition([5]).hook_lengths()
[[5, 4, 3, 2, 1]]
REFERENCES:
- http://mathworld.wolfram.com/HookLengthFormula.html
"""
p = self
conj = p.conjugate()
return [[p[i]-(i+1)+conj[j]-(j+1)+1 for j in range(p[i])] for i in range(len(p))]
def upper_hook(self, i, j, alpha):
r"""
Return the upper hook length of the cell `(i,j)` in ``self``.
When ``alpha = 1``, this is just the normal hook length.
The upper hook length of a cell `(i,j)` in a partition
`\kappa` is defined by
.. MATH::
h^*_\kappa(i,j) = \kappa^\prime_j - i + \alpha(\kappa_i - j + 1).
EXAMPLES::
sage: p = Partition([2,1])
sage: p.upper_hook(0,0,1)
3
sage: p.hook_length(0,0)
3
sage: [ p.upper_hook(i,j,x) for i,j in p.cells() ]
[2*x + 1, x, x]
"""
p = self
conj = self.conjugate()
return conj[j] - (i+1) + alpha*(p[i]-j)
def upper_hook_lengths(self, alpha):
r"""
Return a tableau of shape ``self`` with the cells filled in with the
upper hook lengths. When ``alpha = 1``, these are just the normal hook
lengths.
The upper hook length of a cell `(i,j)` in a partition
`\kappa` is defined by
.. MATH::
h^*_\kappa(i,j) = \kappa^\prime_j - i + \alpha(\kappa_i - j + 1).
EXAMPLES::
sage: Partition([3,2,1]).upper_hook_lengths(x)
[[3*x + 2, 2*x + 1, x], [2*x + 1, x], [x]]
sage: Partition([3,2,1]).upper_hook_lengths(1)
[[5, 3, 1], [3, 1], [1]]
sage: Partition([3,2,1]).hook_lengths()
[[5, 3, 1], [3, 1], [1]]
"""
p = self
conj = p.conjugate()
return [[conj[j] - (i+1) + alpha*(p[i]-j) for j in range(p[i])] for i in range(len(p))]
def lower_hook(self, i, j, alpha):
r"""
Return the lower hook length of the cell `(i,j)` in ``self``.
When ``alpha = 1``, this is just the normal hook length.
The lower hook length of a cell `(i,j)` in a partition
`\kappa` is defined by
.. MATH::
h_*^\kappa(i,j) = \kappa^\prime_j - i + 1 + \alpha(\kappa_i - j).
EXAMPLES::
sage: p = Partition([2,1])
sage: p.lower_hook(0,0,1)
3
sage: p.hook_length(0,0)
3
sage: [ p.lower_hook(i,j,x) for i,j in p.cells() ]
[x + 2, 1, 1]
"""
p = self
conj = self.conjugate()
return conj[j] - i + alpha*(p[i] - (j+1))
def lower_hook_lengths(self, alpha):
r"""
Return a tableau of shape ``self`` with the cells filled in with the
lower hook lengths. When ``alpha = 1``, these are just the normal hook
lengths.
The lower hook length of a cell `(i,j)` in a partition
`\kappa` is defined by
.. MATH::
h_*^\kappa(i,j) = \kappa^\prime_j - i + 1 + \alpha(\kappa_i - j).
EXAMPLES::
sage: Partition([3,2,1]).lower_hook_lengths(x)
[[2*x + 3, x + 2, 1], [x + 2, 1], [1]]
sage: Partition([3,2,1]).lower_hook_lengths(1)
[[5, 3, 1], [3, 1], [1]]
sage: Partition([3,2,1]).hook_lengths()
[[5, 3, 1], [3, 1], [1]]
"""
p = self
conj = p.conjugate()
return [[conj[j] - i + alpha*(p[i]-(j+1)) for j in range(p[i])] for i in range(len(p))]
def weighted_size(self):
r"""
Return the weighted size of ``self``.
The weighted size of a partition `\lambda` is
.. MATH::
\sum_i i \cdot \lambda_i,
where `\lambda = (\lambda_0, \lambda_1, \lambda_2, \cdots )`.
This also the sum of the leg length of every cell in `\lambda`, or
.. MATH::
\sum_i \binom{\lambda^{\prime}_i}{2}
where `\lambda^{\prime}` is the conjugate partition of `\lambda`.
EXAMPLES::
sage: Partition([2,2]).weighted_size()
2
sage: Partition([3,3,3]).weighted_size()
9
sage: Partition([5,2]).weighted_size()
2
sage: Partition([]).weighted_size()
0
"""
p = self
return sum([i*p[i] for i in range(len(p))])
def is_empty(self):
"""
Return ``True`` if ``self`` is the empty partition.
EXAMPLES::
sage: Partition([]).is_empty()
True
sage: Partition([2,1,1]).is_empty()
False
"""
return len(self) == 0
def length(self):
"""
Return the number of parts in ``self``.
EXAMPLES::
sage: Partition([3,2]).length()
2
sage: Partition([2,2,1]).length()
3
sage: Partition([]).length()
0
"""
return len(self)
def to_exp(self, k=0):
"""
Return a list of the multiplicities of the parts of a partition.
Use the optional parameter ``k`` to get a return list of length at
least ``k``.
EXAMPLES::
sage: Partition([3,2,2,1]).to_exp()
[1, 2, 1]
sage: Partition([3,2,2,1]).to_exp(5)
[1, 2, 1, 0, 0]
TESTS::
sage: [parent(x) for x in Partition([3,2,2,1]).to_exp(5)]
[Integer Ring, Integer Ring, Integer Ring, Integer Ring, Integer Ring]
"""
p = self
if len(p) > 0:
k = max(k, p[0])
a = [ZZ.zero()] * k
for i in p:
a[i-1] += 1
return a
def evaluation(self):
r"""
Return the evaluation of ``self``.
The **commutative evaluation**, often shortened to **evaluation**, of
a word (we think of a partition as a word in `\{1, 2, 3, \ldots\}`)
is its image in the free commutative monoid. In other words,
this counts how many occurrences there are of each letter.
This is also is known as **Parikh vector** and **abelianization** and
has the same output as :meth:`to_exp()`.
EXAMPLES::
sage: Partition([4,3,1,1]).evaluation()
[2, 0, 1, 1]
"""
return self.to_exp()
def to_exp_dict(self):
"""
Return a dictionary containing the multiplicities of the parts of
``self``.
EXAMPLES::
sage: p = Partition([4,2,2,1])
sage: d = p.to_exp_dict()
sage: d[4]
1
sage: d[2]
2
sage: d[1]
1
sage: 5 in d
False
"""
d = {}
for part in self:
d[part] = d.get(part, 0) + 1
return d
def centralizer_size(self, t=0, q=0):
r"""
Return the size of the centralizer of any permutation of cycle type
``self``.
If `m_i` is the multiplicity of `i` as a part of `p`, this is given by
.. MATH::
\prod_i m_i! i^{m_i}.
Including the optional parameters `t` and `q` gives the `q,t` analog,
which is the former product times
.. MATH::
\prod_{i=1}^{\mathrm{length}(p)} \frac{1 - q^{p_i}}{1 - t^{p_i}}.
See Section 1.3, p. 24, in [Ke1991]_.
EXAMPLES::
sage: Partition([2,2,1]).centralizer_size()
8
sage: Partition([2,2,2]).centralizer_size()
48
sage: Partition([2,2,1]).centralizer_size(q=2, t=3)
9/16
sage: Partition([]).centralizer_size()
1
sage: Partition([]).centralizer_size(q=2, t=4)
1
TESTS::
sage: Partition([2,2,2]).aut()
48
"""
size = prod(i**mi * factorial(mi)
for i, mi in self.to_exp_dict().items())
if t or q:
size *= prod((ZZ.one() - q ** j) / (ZZ.one() - t ** j)
for j in self)
return size
aut = centralizer_size
def content(self, r, c, multicharge=(0,)):
r"""
Return the content of the cell at row `r` and column `c`.
The content of a cell is `c - r`.
For consistency with partition tuples there is also an optional
``multicharge`` argument which is an offset to the usual content. By
setting the ``multicharge`` equal to the 0-element of the ring
`\ZZ/e\ZZ`, the corresponding `e`-residue will be returned. This is
the content modulo `e`.
The content (and residue) do not strictly depend on the partition,
however, this method is included because it is often useful in the
context of partitions.
EXAMPLES::
sage: Partition([2,1]).content(1,0)
-1
sage: p = Partition([3,2])
sage: sum([p.content(*c) for c in p.cells()])
2
and now we return the 3-residue of a cell::
sage: Partition([2,1]).content(1,0, multicharge=[IntegerModRing(3)(0)])
2
"""
return c - r + multicharge[0]
def residue(self, r, c, l):
r"""
Return the ``l``-residue of the cell at row ``r`` and column ``c``.
The `\ell`-residue of a cell is `c - r` modulo `\ell`.
This does not strictly depend upon the partition, however, this method
is included because it is often useful in the context of partitions.
EXAMPLES::
sage: Partition([2,1]).residue(1, 0, 3)
2
"""
return (c - r) % l
@cached_method
def block(self, e, multicharge=(0,)):
r"""
Return a dictionary `\beta` that determines the block associated to
the partition ``self`` and the
:meth:`~sage.combinat.tableau_residues.ResidueSequence.quantum_characteristic` ``e``.
INPUT:
- ``e`` -- the quantum characteristic
- ``multicharge`` -- the multicharge (default `(0,)`)
OUTPUT:
- A dictionary giving the multiplicities of the residues in the
partition tuple ``self``
In more detail, the value ``beta[i]`` is equal to the
number of nodes of residue ``i``. This corresponds to
the positive root
.. MATH::
\sum_{i\in I} \beta_i \alpha_i \in Q^+,
a element of the positive root lattice of the corresponding
Kac-Moody algebra. See [DJM1998]_ and [BK2009]_ for more details.
This is a useful statistics because two Specht modules for a
Hecke algebra of type `A` belong to the same block if and only if they
correspond to same element `\beta` of the root lattice, given above.
We return a dictionary because when the quantum characteristic is `0`,
the Cartan type is `A_{\infty}`, in which case the simple roots are
indexed by the integers.
EXAMPLES::
sage: Partition([4,3,2]).block(0)
{-2: 1, -1: 2, 0: 2, 1: 2, 2: 1, 3: 1}
sage: Partition([4,3,2]).block(2)
{0: 4, 1: 5}
sage: Partition([4,3,2]).block(2, multicharge=(1,))
{0: 5, 1: 4}
sage: Partition([4,3,2]).block(3)
{0: 3, 1: 3, 2: 3}
sage: Partition([4,3,2]).block(4)
{0: 2, 1: 2, 2: 2, 3: 3}
"""
block = {}
Ie = IntegerModRing(e)
for (r,c) in self.cells():
i = Ie(multicharge[0] + c - r)
block[i] = block.get(i, 0) + 1
return block
def defect(self, e, multicharge=(0,)):
r"""
Return the ``e``-defect or the ``e``-weight of ``self``.
The `e`-defect is the number of (connected) `e`-rim hooks that
can be removed from the partition.
The defect of a partition is given by
.. MATH::
\text{defect}(\beta) = (\Lambda, \beta) - \tfrac12(\beta, \beta),
where `\Lambda = \sum_r \Lambda_{\kappa_r}` for the multicharge
`(\kappa_1, \ldots, \kappa_{\ell})` and
`\beta = \sum_{(r,c)} \alpha_{(c-r) \pmod e}`, with the sum
being over the cells in the partition.
INPUT:
- ``e`` -- the quantum characteristic
- ``multicharge`` -- the multicharge (default `(0,)`)
OUTPUT:
- a non-negative integer, which is the defect of the block
containing the partition ``self``
EXAMPLES::
sage: Partition([4,3,2]).defect(2)
3
sage: Partition([0]).defect(2)
0
sage: Partition([3]).defect(2)
1
sage: Partition([6]).defect(2)
3
sage: Partition([9]).defect(2)
4
sage: Partition([12]).defect(2)
6
sage: Partition([4,3,2]).defect(3)
3
sage: Partition([0]).defect(3)
0
sage: Partition([3]).defect(3)
1
sage: Partition([6]).defect(3)
2
sage: Partition([9]).defect(3)
3
sage: Partition([12]).defect(3)
4
TESTS::
sage: all(mu.core(e).size() + e * mu.defect(e) == 9
....: for mu in Partitions(9) for e in [2,3,4])
True
"""
beta = self.block(e, multicharge)
Ie = IntegerModRing(e)
return beta.get(multicharge[0], 0) - sum(beta[r]**2 - beta[r] * beta.get(Ie(r+1), 0)
for r in beta)
def contents_tableau(self, multicharge=(0,)):
"""
Return the tableau which has ``(k,r,c)``-th cell equal to the
content ``multicharge[k] - r + c`` of the cell.
EXAMPLES::
sage: Partition([2,1]).contents_tableau()
[[0, 1], [-1]]
sage: Partition([3,2,1,1]).contents_tableau().pp()
0 1 2
-1 0
-2
-3
sage: Partition([3,2,1,1]).contents_tableau([ IntegerModRing(3)(0)] ).pp()
0 1 2
2 0
1
0
"""
return tableau.Tableau([[multicharge[0]-r+c for c in range(self[r])]
for r in range(len(self))])
def is_restricted(self, e, multicharge=(0,)):
"""
Return ``True`` is this is an ``e``-restricted partition.
An `e`-restricted partition is a partition such that the
difference of consecutive parts is always strictly less
than `e`, where partitions are considered to have an infinite
number of `0` parts. I.e., the last part must be strictly
less than `e`.
EXAMPLES::
sage: Partition([4,3,3,2]).is_restricted(2)
False
sage: Partition([4,3,3,2]).is_restricted(3)
True
sage: Partition([4,3,3,2]).is_restricted(4)
True
sage: Partition([4]).is_restricted(4)
False
"""
return (not self
or ( self[-1] < e and all(self[r]-self[r+1] < e for r in range(len(self)-1)) ))
def is_regular(self, e, multicharge=(0,)):
"""
Return ``True`` is this is an ``e``-regular partition.
A partition is `e`-regular if it does not have `e` equal
non-zero parts.
EXAMPLES::
sage: Partition([4,3,3,3]).is_regular(2)
False
sage: Partition([4,3,3,3]).is_regular(3)
False
sage: Partition([4,3,3,3]).is_regular(4)
True
"""
return all(self[r] > self[r+e-1] for r in range(len(self)-e+1))
def conjugacy_class_size(self):
"""
Return the size of the conjugacy class of the symmetric group
indexed by ``self``.
EXAMPLES::
sage: Partition([2,2,2]).conjugacy_class_size()
15
sage: Partition([2,2,1]).conjugacy_class_size()
15
sage: Partition([2,1,1]).conjugacy_class_size()
6
"""
return factorial(sum(self))/self.centralizer_size()
def corners(self):
r"""
Return a list of the corners of the partition ``self``.
A corner of a partition `\lambda` is a cell of the Young diagram
of `\lambda` which can be removed from the Young diagram while
still leaving a straight shape behind.
The entries of the list returned are pairs of the form `(i,j)`,
where `i` and `j` are the coordinates of the respective corner.
The coordinates are counted from `0`.
EXAMPLES::
sage: Partition([3,2,1]).corners()
[(0, 2), (1, 1), (2, 0)]
sage: Partition([3,3,1]).corners()
[(1, 2), (2, 0)]
sage: Partition([]).corners()
[]
"""
p = self
if p.is_empty():
return []
lcors = [[0,p[0]-1]]
nn = len(p)
if nn == 1:
return [tuple(c) for c in lcors]
lcors_index = 0
for i in range(1, nn):
if p[i] == p[i-1]:
lcors[lcors_index][0] += 1
else:
lcors.append([i,p[i]-1])
lcors_index += 1
return [tuple(c) for c in lcors]
inside_corners = corners
removable_cells = corners # for compatibility with partition tuples
def corners_residue(self, i, l):
r"""
Return a list of the corners of the partition ``self`` having
``l``-residue ``i``.
A corner of a partition `\lambda` is a cell of the Young diagram
of `\lambda` which can be removed from the Young diagram while
still leaving a straight shape behind. See :meth:`residue` for
the definition of the ``l``-residue.
The entries of the list returned are pairs of the form `(i,j)`,
where `i` and `j` are the coordinates of the respective corner.
The coordinates are counted from `0`.
EXAMPLES::
sage: Partition([3,2,1]).corners_residue(0, 3)
[(1, 1)]
sage: Partition([3,2,1]).corners_residue(1, 3)
[(2, 0)]
sage: Partition([3,2,1]).corners_residue(2, 3)
[(0, 2)]
"""
return [x for x in self.corners() if self.residue(*x, l=l) == i]
inside_corners_residue = corners_residue
removable_cells_residue = corners_residue
def outside_corners(self):
r"""
Return a list of the outside corners of the partition ``self``.
An outside corner (also called a cocorner) of a partition
`\lambda` is a cell on `\ZZ^2` which does not belong to
the Young diagram of `\lambda` but can be added to this Young
diagram to still form a straight-shape Young diagram.
The entries of the list returned are pairs of the form `(i,j)`,
where `i` and `j` are the coordinates of the respective corner.
The coordinates are counted from `0`.
EXAMPLES::
sage: Partition([2,2,1]).outside_corners()
[(0, 2), (2, 1), (3, 0)]
sage: Partition([2,2]).outside_corners()
[(0, 2), (2, 0)]
sage: Partition([6,3,3,1,1,1]).outside_corners()
[(0, 6), (1, 3), (3, 1), (6, 0)]
sage: Partition([]).outside_corners()
[(0, 0)]
"""
p = self
if p.is_empty():
return [(0,0)]
res = [ (0, p[0]) ]
for i in range(1, len(p)):
if p[i-1] != p[i]:
res.append((i,p[i]))
res.append((len(p), 0))
return res
addable_cells = outside_corners # for compatibility with partition tuples
def outside_corners_residue(self, i, l):
r"""
Return a list of the outside corners of the partition ``self``
having ``l``-residue ``i``.
An outside corner (also called a cocorner) of a partition
`\lambda` is a cell on `\ZZ^2` which does not belong to
the Young diagram of `\lambda` but can be added to this Young
diagram to still form a straight-shape Young diagram. See
:meth:`residue` for the definition of the ``l``-residue.
The entries of the list returned are pairs of the form `(i,j)`,
where `i` and `j` are the coordinates of the respective corner.
The coordinates are counted from `0`.
EXAMPLES::
sage: Partition([3,2,1]).outside_corners_residue(0, 3)
[(0, 3), (3, 0)]
sage: Partition([3,2,1]).outside_corners_residue(1, 3)
[(1, 2)]
sage: Partition([3,2,1]).outside_corners_residue(2, 3)
[(2, 1)]
"""
return [x for x in self.outside_corners() if self.residue(*x, l=l) == i]
addable_cells_residue = outside_corners_residue
def rim(self):
r"""
Return the rim of ``self``.
The rim of a partition `\lambda` is defined as the cells which belong
to `\lambda` and which are adjacent to cells not in `\lambda`.
EXAMPLES:
The rim of the partition `[5,5,2,1]` consists of the cells marked with
``#`` below::
****#
*####
##
#
sage: Partition([5,5,2,1]).rim()
[(3, 0), (2, 0), (2, 1), (1, 1), (1, 2), (1, 3), (1, 4), (0, 4)]
sage: Partition([2,2,1]).rim()
[(2, 0), (1, 0), (1, 1), (0, 1)]
sage: Partition([2,2]).rim()
[(1, 0), (1, 1), (0, 1)]
sage: Partition([6,3,3,1,1]).rim()
[(4, 0), (3, 0), (2, 0), (2, 1), (2, 2), (1, 2), (0, 2), (0, 3), (0, 4), (0, 5)]
sage: Partition([]).rim()
[]
"""
p = self
res = []
prevLen = 1
for i in range(len(p)-1, -1, -1):
for c in range(prevLen-1, p[i]):
res.append((i,c))
prevLen = p[i]
return res
def outer_rim(self):
r"""
Return the outer rim of ``self``.
The outer rim of a partition `\lambda` is defined as the cells which do
not belong to `\lambda` and which are adjacent to cells in `\lambda`.
EXAMPLES:
The outer rim of the partition `[4,1]` consists of the cells marked
with ``#`` below::
****#
*####
##
::
sage: Partition([4,1]).outer_rim()
[(2, 0), (2, 1), (1, 1), (1, 2), (1, 3), (1, 4), (0, 4)]
sage: Partition([2,2,1]).outer_rim()
[(3, 0), (3, 1), (2, 1), (2, 2), (1, 2), (0, 2)]
sage: Partition([2,2]).outer_rim()
[(2, 0), (2, 1), (2, 2), (1, 2), (0, 2)]
sage: Partition([6,3,3,1,1]).outer_rim()
[(5, 0), (5, 1), (4, 1), (3, 1), (3, 2), (3, 3), (2, 3), (1, 3), (1, 4), (1, 5), (1, 6), (0, 6)]
sage: Partition([]).outer_rim()
[(0, 0)]
"""
p = self
res = []
prevLen = 0
for i in range(len(p)-1, -1, -1):
for c in range(prevLen, p[i]+1):
res.append((i+1,c))
prevLen = p[i]
res.append((0, prevLen))
return res
def zero_one_sequence(self):
r"""
Compute the finite `0-1` sequence of the partition.
The full `0-1` sequence is the sequence (infinite in both
directions) indicating the steps taken when following the
outer rim of the diagram of the partition. We use the convention
that in English convention, a 1 corresponds to an East step, and
a 0 corresponds to a North step.
Note that every full `0-1` sequence starts with infinitely many 0's and
ends with infinitely many 1's.
One place where these arise is in the affine symmetric group where
one takes an affine permutation `w` and every `i` such that
`w(i) \leq 0` corresponds to a 1 and `w(i) > 0` corresponds to a 0.
See pages 24-25 of [LLMSSZ2013]_ for connections to affine Grassmannian
elements (note there they use the French convention for their
partitions).
These are also known as **path sequences**, **Maya diagrams**,
**plus-minus diagrams**, **Comet code** [Sta-EC2]_, among others.
OUTPUT:
The finite `0-1` sequence is obtained from the full `0-1`
sequence by omitting all heading 0's and trailing 1's. The
output sequence is finite, starts with a 1 and ends with a
0 (unless it is empty, for the empty partition). Its length
is the sum of the first part of the partition with the
length of the partition.
EXAMPLES::
sage: Partition([5,4]).zero_one_sequence()
[1, 1, 1, 1, 0, 1, 0]
sage: Partition([]).zero_one_sequence()
[]
sage: Partition([2]).zero_one_sequence()
[1, 1, 0]
TESTS::
sage: all(Partitions().from_zero_one(mu.zero_one_sequence()) == mu for n in range(10) for mu in Partitions(n))
True
"""
tmp = [self[i]-i for i in range(len(self))]
return ([Integer(not (i in tmp)) for i in range(-len(self)+1,self.get_part(0)+1)])
def core(self, length):
r"""
Return the ``length``-core of the partition -- in the literature
the core is commonly referred to as the `k`-core, `p`-core,
`r`-core, ... .
The `r`-core of a partition `\lambda` can be obtained by
repeatedly removing rim hooks of size `r` from (the Young diagram
of) `\lambda` until this is no longer possible. The remaining
partition is the core.
EXAMPLES::
sage: Partition([6,3,2,2]).core(3)
[2, 1, 1]
sage: Partition([]).core(3)
[]
sage: Partition([8,7,7,4,1,1,1,1,1]).core(3)
[2, 1, 1]
TESTS::
sage: Partition([3,3,3,2,1]).core(3)
[]
sage: Partition([10,8,7,7]).core(4)
[]
sage: Partition([21,15,15,9,6,6,6,3,3]).core(3)
[]
"""
p = self
#Normalize the length
remainder = len(p) % length
part = p[:] + [0]*remainder
#Add the canonical vector to the partition
part = [part[i-1] + len(part)-i for i in range(1, len(part)+1)]
for e in range(length):
k = e
for i in reversed(range(1,len(part)+1)):
if part[i-1] % length == e:
part[i-1] = k
k += length
part.sort()
part.reverse()
#Remove the canonical vector
part = [part[i-1]-len(part)+i for i in range(1, len(part)+1)]
#Select the r-core
return Partition([x for x in part if x != 0])
def quotient(self, length):
r"""
Return the quotient of the partition -- in the literature the
quotient is commonly referred to as the `k`-quotient, `p`-quotient,
`r`-quotient, ... .
The `r`-quotient of a partition `\lambda` is a list of `r`
partitions (labelled from `0` to `r-1`), constructed in the following
way. Label each cell in the Young diagram of `\lambda` with its
content modulo `r`. Let `R_i` be the set of rows ending in a cell
labelled `i`, and `C_i` be the set of columns ending in a cell
labelled `i`. Then the `j`-th component of the quotient of
`\lambda` is the partition defined by intersecting `R_j` with
`C_{j+1}`. (See Theorem 2.7.37 in [JK1981]_.)
EXAMPLES::
sage: Partition([7,7,5,3,3,3,1]).quotient(3)
([2], [1], [2, 2, 2])
TESTS::
sage: Partition([8,7,7,4,1,1,1,1,1]).quotient(3)
([2, 1], [2, 2], [2])
sage: Partition([10,8,7,7]).quotient(4)
([2], [3], [2], [1])
sage: Partition([6,3,3]).quotient(3)
([1], [1], [2])
sage: Partition([3,3,3,2,1]).quotient(3)
([1], [1, 1], [1])
sage: Partition([6,6,6,3,3,3]).quotient(3)
([2, 1], [2, 1], [2, 1])
sage: Partition([21,15,15,9,6,6,6,3,3]).quotient(3)
([5, 2, 1], [5, 2, 1], [7, 3, 2])
sage: Partition([21,15,15,9,6,6,3,3]).quotient(3)
([5, 2], [5, 2, 1], [7, 3, 1])
sage: Partition([14,12,11,10,10,10,10,9,6,4,3,3,2,1]).quotient(5)
([3, 3], [2, 2, 1], [], [3, 3, 3], [1])
sage: all(p == Partition(core=p.core(k), quotient=p.quotient(k))
....: for i in range(10) for p in Partitions(i)
....: for k in range(1,6))
True
"""
p = self
#Normalize the length
remainder = len(p) % length
part = p[:] + [0]*(length-remainder)
#Add the canonical vector to the partition
part = [part[i-1] + len(part)-i for i in range(1, len(part)+1)]
result = [None]*length
#Reducing vector
for e in range(length):
k = e
tmp = []
for i in reversed(range(len(part))):
if part[i] % length == e:
tmp.append(ZZ((part[i]-k)//length))
k += length
a = [i for i in tmp if i != 0]
a.reverse()
result[e] = a
from .partition_tuple import PartitionTuple
return PartitionTuple(result) #tuple(map(Partition, result))
def is_core(self, k):
r"""
Return ``True`` if the Partition ``self`` is a ``k``-core.
A partition is said to be a *`k`-core* if it has no hooks of length
`k`. Equivalently, a partition is said to be a `k`-core if it is its
own `k`-core (where the latter is defined as in :meth:`core`).
Visually, this can be checked by trying to remove border strips of size
`k` from ``self``. If this is not possible, then ``self`` is a
`k`-core.
EXAMPLES:
In the partition (2, 1), a hook length of 2 does not occur, but a hook
length of 3 does::
sage: p = Partition([2, 1])
sage: p.is_core(2)
True
sage: p.is_core(3)
False
sage: q = Partition([12, 8, 5, 5, 2, 2, 1])
sage: q.is_core(4)
False
sage: q.is_core(5)
True
sage: q.is_core(0)
True
.. SEEALSO::
:meth:`core`, :class:`Core`
"""
return k not in self.hooks()
def k_interior(self, k):
r"""
Return the partition consisting of the cells of ``self`` whose hook
lengths are greater than ``k``.
EXAMPLES::
sage: p = Partition([3,2,1])
sage: p.hook_lengths()
[[5, 3, 1], [3, 1], [1]]
sage: p.k_interior(2)
[2, 1]
sage: p.k_interior(3)
[1]
sage: p = Partition([])
sage: p.k_interior(3)
[]
"""
return Partition([len([i for i in row if i > k])
for row in self.hook_lengths()])
def k_boundary(self, k):
r"""
Return the skew partition formed by removing the cells of the
``k``-interior, see :meth:`k_interior`.
EXAMPLES::
sage: p = Partition([3,2,1])
sage: p.k_boundary(2)
[3, 2, 1] / [2, 1]
sage: p.k_boundary(3)
[3, 2, 1] / [1]
sage: p = Partition([12,8,5,5,2,2,1])
sage: p.k_boundary(4)
[12, 8, 5, 5, 2, 2, 1] / [8, 5, 2, 2]
"""
return SkewPartition([self, self.k_interior(k)])
def add_cell(self, i, j = None):
r"""
Return a partition corresponding to ``self`` with a cell added in
row ``i``. (This does not change ``self``.)
EXAMPLES::
sage: Partition([3, 2, 1, 1]).add_cell(0)
[4, 2, 1, 1]
sage: cell = [4, 0]; Partition([3, 2, 1, 1]).add_cell(*cell)
[3, 2, 1, 1, 1]
"""
if j is None:
if i >= len(self):
j = 0
else:
j = self[i]
if (i,j) in self.outside_corners():
pl = self.to_list()
if i == len(pl):
pl.append(1)
else:
pl[i] += 1
return Partition(pl)
raise ValueError("[%s, %s] is not an addable cell"%(i,j))
def remove_cell(self, i, j = None):
"""
Return the partition obtained by removing a cell at the end of row
``i`` of ``self``.
EXAMPLES::
sage: Partition([2,2]).remove_cell(1)
[2, 1]
sage: Partition([2,2,1]).remove_cell(2)
[2, 2]
sage: #Partition([2,2]).remove_cell(0)
::
sage: Partition([2,2]).remove_cell(1,1)
[2, 1]
sage: #Partition([2,2]).remove_cell(1,0)
"""
if i >= len(self):
raise ValueError("i must be less than the length of the partition")
if j is None:
j = self[i] - 1
if (i,j) not in self.corners():
raise ValueError("[%d,%d] is not a corner of the partition" % (i,j))
if self[i] == 1:
return Partition(self[:-1])
else:
return Partition(self[:i] + [ self[i:i+1][0] - 1 ] + self[i+1:])
def k_irreducible(self, k):
r"""
Return the partition with all `r \times (k+1-r)` rectangles removed.
If ``self`` is a `k`-bounded partition, then this method will return the partition
where all rectangles of dimension `r \times (k+1-r)` for `1 \leq r \leq k`
have been deleted.
If ``self`` is not a `k`-bounded partition then the method will raise an error.
INPUT:
- ``k`` -- a non-negative integer
OUTPUT:
- a partition
EXAMPLES::
sage: Partition([3,2,2,1,1,1]).k_irreducible(4)
[3, 2, 2, 1, 1, 1]
sage: Partition([3,2,2,1,1,1]).k_irreducible(3)
[]
sage: Partition([3,3,3,2,2,2,2,2,1,1,1,1]).k_irreducible(3)
[2, 1]
"""
pexp = self.to_exp()
return Partition(sum(([r+1] for r in range(len(pexp)-1,-1,-1) for m in range(pexp[r] % (k-r))),[]))
def k_skew(self, k):
r"""
Return the `k`-skew partition.
The `k`-skew diagram of a `k`-bounded partition is the skew diagram
denoted `\lambda/^k` satisfying the conditions:
1. row `i` of `\lambda/^k` has length `\lambda_i`,
2. no cell in `\lambda/^k` has hook-length exceeding `k`,
3. every square above the diagram of `\lambda/^k` has hook
length exceeding `k`.
REFERENCES:
- [LM2004]_
EXAMPLES::
sage: p = Partition([4,3,2,2,1,1])
sage: p.k_skew(4)
[9, 5, 3, 2, 1, 1] / [5, 2, 1]
"""
if len(self) == 0:
return SkewPartition([[],[]])
if self[0] > k:
raise ValueError("the partition must be %d-bounded" % k)
#Find the k-skew diagram of the partition formed
#by removing the first row
s = Partition(self[1:]).k_skew(k)
s_inner = list(s.inner())
s_outer = list(s.outer())
s_conj_rl = s.conjugate().row_lengths()
#Find the leftmost column with less than
# or equal to kdiff cells
kdiff = k - self[0]
if s_outer == []:
spot = 0
else:
spot = s_outer[0]
for i in range(len(s_conj_rl)):
if s_conj_rl[i] <= kdiff:
spot = i
break
outer = [ self[0] + spot ] + s_outer[:]
if spot > 0:
inner = [ spot ] + s_inner[:]
else:
inner = s_inner[:]
return SkewPartition([outer, inner])
def to_core(self, k):
r"""
Maps the `k`-bounded partition ``self`` to its corresponding `k+1`-core.
See also :meth:`k_skew`.
EXAMPLES::
sage: p = Partition([4,3,2,2,1,1])
sage: c = p.to_core(4); c
[9, 5, 3, 2, 1, 1]
sage: type(c)
<class 'sage.combinat.core.Cores_length_with_category.element_class'>
sage: c.to_bounded_partition() == p
True
"""
from sage.combinat.core import Core
return Core(self.k_skew(k)[0],k+1)
def from_kbounded_to_reduced_word(self, k):
r"""
Maps a `k`-bounded partition to a reduced word for an element in
the affine permutation group.
This uses the fact that there is a bijection between `k`-bounded
partitions and `(k+1)`-cores and an action of the affine nilCoxeter
algebra of type `A_k^{(1)}` on `(k+1)`-cores as described in [LM2006b]_.
EXAMPLES::
sage: p=Partition([2,1,1])
sage: p.from_kbounded_to_reduced_word(2)
[2, 1, 2, 0]
sage: p=Partition([3,1])
sage: p.from_kbounded_to_reduced_word(3)
[3, 2, 1, 0]
sage: p.from_kbounded_to_reduced_word(2)
Traceback (most recent call last):
...
ValueError: the partition must be 2-bounded
sage: p=Partition([])
sage: p.from_kbounded_to_reduced_word(2)
[]
"""
p=self.k_skew(k)[0]
result = []
while not p.is_empty():
corners = p.corners()
c = p.content(corners[0][0],corners[0][1])%(k+1)
result.append(Integer(c))
list = [x for x in corners if p.content(x[0],x[1])%(k+1) ==c]
for x in list:
p = p.remove_cell(x[0])
return result
def from_kbounded_to_grassmannian(self, k):
r"""
Maps a `k`-bounded partition to a Grassmannian element in
the affine Weyl group of type `A_k^{(1)}`.
For details, see the documentation of the method
:meth:`from_kbounded_to_reduced_word` .
EXAMPLES::
sage: p=Partition([2,1,1])
sage: p.from_kbounded_to_grassmannian(2)
[-1 1 1]
[-2 2 1]
[-2 1 2]
sage: p=Partition([])
sage: p.from_kbounded_to_grassmannian(2)
[1 0 0]
[0 1 0]
[0 0 1]
"""
return WeylGroup(['A',k,1]).from_reduced_word(self.from_kbounded_to_reduced_word(k))
def to_list(self):
r"""
Return ``self`` as a list.
EXAMPLES::
sage: p = Partition([2,1]).to_list(); p
[2, 1]
sage: type(p)
<class 'list'>
TESTS::
sage: p = Partition([2,1])
sage: pl = p.to_list()
sage: pl[0] = 0; p
[2, 1]
"""
return self._list[:]
def add_vertical_border_strip(self, k):
"""
Return a list of all the partitions that can be obtained by adding
a vertical border strip of length ``k`` to ``self``.
EXAMPLES::
sage: Partition([]).add_vertical_border_strip(0)
[[]]
sage: Partition([]).add_vertical_border_strip(2)
[[1, 1]]
sage: Partition([2,2]).add_vertical_border_strip(2)
[[3, 3], [3, 2, 1], [2, 2, 1, 1]]
sage: Partition([3,2,2]).add_vertical_border_strip(2)
[[4, 3, 2], [4, 2, 2, 1], [3, 3, 3], [3, 3, 2, 1], [3, 2, 2, 1, 1]]
"""
return [p.conjugate() for p in self.conjugate().add_horizontal_border_strip(k)]
def add_horizontal_border_strip(self, k):
"""
Return a list of all the partitions that can be obtained by adding
a horizontal border strip of length ``k`` to ``self``.
EXAMPLES::
sage: Partition([]).add_horizontal_border_strip(0)
[[]]
sage: Partition([]).add_horizontal_border_strip(2)
[[2]]
sage: Partition([2,2]).add_horizontal_border_strip(2)
[[2, 2, 2], [3, 2, 1], [4, 2]]
sage: Partition([3,2,2]).add_horizontal_border_strip(2)
[[3, 2, 2, 2], [3, 3, 2, 1], [4, 2, 2, 1], [4, 3, 2], [5, 2, 2]]
.. TODO::
Reimplement like ``remove_horizontal_border_strip`` using
:class:`IntegerListsLex`
"""
conj = self.conjugate().to_list()
shelf = []
res = []
i = 0
while i < len(conj):
tmp = 1
while i+1 < len(conj) and conj[i] == conj[i+1]:
tmp += 1
i += 1
if i == len(conj)-1 and i > 0 and conj[i] != conj[i-1]:
tmp = 1
shelf.append(tmp)
i += 1
#added the last shelf on the right side of
#the first line
shelf.append(k)
#list all of the positions for cells
#filling each self from the left to the right
for iv in IntegerVectors(k, len(shelf), outer=shelf):
iv = list(iv) # Make a mutable list
tmp = conj + [0]*k
j = 0
for t in range(len(iv)):
while iv[t] > 0:
tmp[j] += 1
iv[t] -= 1
j += 1
j = sum(shelf[:t+1])
res.append(Partition([u for u in tmp if u != 0]).conjugate())
return res
def remove_horizontal_border_strip(self, k):
"""
Return the partitions obtained from ``self`` by removing an
horizontal border strip of length ``k``.
EXAMPLES::
sage: Partition([5,3,1]).remove_horizontal_border_strip(0).list()
[[5, 3, 1]]
sage: Partition([5,3,1]).remove_horizontal_border_strip(1).list()
[[5, 3], [5, 2, 1], [4, 3, 1]]
sage: Partition([5,3,1]).remove_horizontal_border_strip(2).list()
[[5, 2], [5, 1, 1], [4, 3], [4, 2, 1], [3, 3, 1]]
sage: Partition([5,3,1]).remove_horizontal_border_strip(3).list()
[[5, 1], [4, 2], [4, 1, 1], [3, 3], [3, 2, 1]]
sage: Partition([5,3,1]).remove_horizontal_border_strip(4).list()
[[4, 1], [3, 2], [3, 1, 1]]
sage: Partition([5,3,1]).remove_horizontal_border_strip(5).list()
[[3, 1]]
sage: Partition([5,3,1]).remove_horizontal_border_strip(6).list()
[]
The result is returned as an instance of
:class:`Partitions_with_constraints`::
sage: Partition([5,3,1]).remove_horizontal_border_strip(5)
The subpartitions of [5, 3, 1] obtained by removing an horizontal border strip of length 5
TESTS::
sage: Partition([3,2,2]).remove_horizontal_border_strip(2).list()
[[3, 2], [2, 2, 1]]
sage: Partition([3,2,2]).remove_horizontal_border_strip(2).first().parent()
The subpartitions of [3, 2, 2] obtained by removing an horizontal border strip of length 2
sage: Partition([]).remove_horizontal_border_strip(0).list()
[[]]
sage: Partition([]).remove_horizontal_border_strip(6).list()
[]
"""
return Partitions_with_constraints(n = self.size()-k,
min_length = len(self)-1,
max_length = len(self),
floor = self[1:]+[0],
ceiling = self[:],
max_slope = 0,
name = "The subpartitions of {} obtained by removing an horizontal border strip of length {}".format(self,k))
def k_conjugate(self, k):
r"""
Return the ``k``-conjugate of ``self``.
The `k`-conjugate is the partition that is given by the columns of
the `k`-skew diagram of the partition.
We can also define the `k`-conjugate in the following way. Let `P`
denote the bijection from `(k+1)`-cores to `k`-bounded partitions. The
`k`-conjugate of a `(k+1)`-core `\lambda` is
.. MATH::
\lambda^{(k)} = P^{-1}\left( (P(\lambda))^{\prime} \right).
EXAMPLES::
sage: p = Partition([4,3,2,2,1,1])
sage: p.k_conjugate(4)
[3, 2, 2, 1, 1, 1, 1, 1, 1]
"""
return Partition(self.k_skew(k).conjugate().row_lengths())
def arms_legs_coeff(self, i, j):
r"""
This is a statistic on a cell `c = (i,j)` in the diagram of partition
`p` given by
.. MATH::
\frac{ 1 - q^a \cdot t^{\ell + 1} }{ 1 - q^{a + 1} \cdot t^{\ell} }
where `a` is the arm length of `c` and `\ell` is the leg length of `c`.
The coordinates ``i`` and ``j`` of the cell are understood to be
`0`-based, so that ``(0, 0)`` is the northwesternmost cell (in
English notation).
EXAMPLES::
sage: Partition([3,2,1]).arms_legs_coeff(1,1)
(-t + 1)/(-q + 1)
sage: Partition([3,2,1]).arms_legs_coeff(0,0)
(-q^2*t^3 + 1)/(-q^3*t^2 + 1)
sage: Partition([3,2,1]).arms_legs_coeff(*[0,0])
(-q^2*t^3 + 1)/(-q^3*t^2 + 1)
"""
QQqt = PolynomialRing(QQ, ['q', 't'])
(q, t) = QQqt.gens()
if i < len(self) and j < self[i]:
res = (1-q**self.arm_length(i,j) * t**(self.leg_length(i,j)+1))
res /= (1-q**(self.arm_length(i,j)+1) * t**self.leg_length(i,j))
return res
return ZZ.one()
def atom(self):
"""
Return a list of the standard tableaux of size ``self.size()`` whose
atom is equal to ``self``.
EXAMPLES::
sage: Partition([2,1]).atom()
[[[1, 2], [3]]]
sage: Partition([3,2,1]).atom()
[[[1, 2, 3, 6], [4, 5]], [[1, 2, 3], [4, 5], [6]]]
"""
res = []
for tab in tableau.StandardTableaux_size(self.size()):
if tab.atom() == self:
res.append(tab)
return res
def k_atom(self, k):
"""
Return a list of the standard tableaux of size ``self.size()`` whose
``k``-atom is equal to ``self``.
EXAMPLES::
sage: p = Partition([3,2,1])
sage: p.k_atom(1)
[]
sage: p.k_atom(3)
[[[1, 1, 1], [2, 2], [3]],
[[1, 1, 1, 2], [2], [3]],
[[1, 1, 1, 3], [2, 2]],
[[1, 1, 1, 2, 3], [2]]]
sage: Partition([3,2,1]).k_atom(4)
[[[1, 1, 1], [2, 2], [3]], [[1, 1, 1, 3], [2, 2]]]
TESTS::
sage: Partition([1]).k_atom(1)
[[[1]]]
sage: Partition([1]).k_atom(2)
[[[1]]]
sage: Partition([]).k_atom(1)
[[]]
"""
res = [tableau.Tableau([])]
for i in range(len(self)):
res = (x.promotion_operator(self[-i - 1]) for x in res)
res = sum(res, [])
res = (y.catabolism_projector(Partition(self[-i - 1:]).k_split(k))
for y in res)
res = [i for i in res if i]
return res
def k_split(self, k):
"""
Return the ``k``-split of ``self``.
EXAMPLES::
sage: Partition([4,3,2,1]).k_split(3)
[]
sage: Partition([4,3,2,1]).k_split(4)
[[4], [3, 2], [1]]
sage: Partition([4,3,2,1]).k_split(5)
[[4, 3], [2, 1]]
sage: Partition([4,3,2,1]).k_split(6)
[[4, 3, 2], [1]]
sage: Partition([4,3,2,1]).k_split(7)
[[4, 3, 2, 1]]
sage: Partition([4,3,2,1]).k_split(8)
[[4, 3, 2, 1]]
"""
if self == []:
return []
elif k < self[0]:
return []
else:
res = []
part = list(self)
while part and part[0] + len(part) - 1 >= k:
p = k - part[0]
res.append(part[:p + 1])
part = part[p + 1:]
if part:
res.append(part)
return res
def jacobi_trudi(self):
"""
Return the Jacobi-Trudi matrix of ``self`` thought of as a skew
partition. See :meth:`SkewPartition.jacobi_trudi()
<sage.combinat.skew_partition.SkewPartition.jacobi_trudi>`.
EXAMPLES::
sage: part = Partition([3,2,1])
sage: jt = part.jacobi_trudi(); jt
[h[3] h[1] 0]
[h[4] h[2] h[]]
[h[5] h[3] h[1]]
sage: s = SymmetricFunctions(QQ).schur()
sage: h = SymmetricFunctions(QQ).homogeneous()
sage: h( s(part) )
h[3, 2, 1] - h[3, 3] - h[4, 1, 1] + h[5, 1]
sage: jt.det()
h[3, 2, 1] - h[3, 3] - h[4, 1, 1] + h[5, 1]
"""
return SkewPartition([ self, [] ]).jacobi_trudi()
def character_polynomial(self):
r"""
Return the character polynomial associated to the partition
``self``.
The character polynomial `q_\mu` associated to a partition `\mu`
is defined by
.. MATH::
q_\mu(x_1, x_2, \ldots, x_k) = \downarrow \sum_{\alpha \vdash k}
\frac{ \chi^\mu_\alpha }{1^{a_1}2^{a_2}\cdots k^{a_k}a_1!a_2!\cdots
a_k!} \prod_{i=1}^{k} (ix_i-1)^{a_i}
where `k` is the size of `\mu`, and `a_i` is the multiplicity of
`i` in `\alpha`.
It is computed in the following manner:
1. Expand the Schur function `s_\mu` in the power-sum basis,
2. Replace each `p_i` with `ix_i-1`,
3. Apply the umbral operator `\downarrow` to the resulting polynomial.
EXAMPLES::
sage: Partition([1]).character_polynomial()
x - 1
sage: Partition([1,1]).character_polynomial()
1/2*x0^2 - 3/2*x0 - x1 + 1
sage: Partition([2,1]).character_polynomial()
1/3*x0^3 - 2*x0^2 + 8/3*x0 - x2
"""
#Create the polynomial ring we will use
k = self.size()
P = PolynomialRing(QQ, k, 'x')
x = P.gens()
#Expand s_mu in the power sum basis
from sage.combinat.sf.sf import SymmetricFunctions
Sym = SymmetricFunctions(QQ)
s = Sym.schur()
p = Sym.power()
ps_mu = p(s(self))
#Replace each p_i by i*x_i-1
items = ps_mu.monomial_coefficients().items() #items contains a list of (partition, coeff) pairs
partition_to_monomial = lambda part: prod([ (i*x[i-1]-1) for i in part ])
res = [ [partition_to_monomial(mc[0]), mc[1]] for mc in items ]
#Write things in the monomial basis
res = [ prod(pair) for pair in res ]
res = sum( res )
#Apply the umbral operator and return the result
from sage.combinat.misc import umbral_operation
return umbral_operation(res)
def dimension(self, smaller=None, k=1):
r"""
Return the number of paths from the ``smaller`` partition to
the partition ``self``, where each step consists of adding a
`k`-ribbon while keeping a partition.
Note that a 1-ribbon is just a single cell, so this counts paths
in the Young graph when `k = 1`.
Note also that the default case (`k = 1` and ``smaller = []``)
gives the dimension of the irreducible representation of the
symmetric group corresponding to ``self``.
INPUT:
- ``smaller`` -- a partition (default: an empty list ``[]``)
- `k` -- a positive integer (default: 1)
OUTPUT:
The number of such paths
EXAMPLES:
Looks at the number of ways of getting from ``[5,4]`` to the empty
partition, removing one cell at a time::
sage: mu = Partition([5,4])
sage: mu.dimension()
42
Same, but removing one 3-ribbon at a time. Note that the 3-core of
``mu`` is empty::
sage: mu.dimension(k=3)
3
The 2-core of ``mu`` is not the empty partition::
sage: mu.dimension(k=2)
0
Indeed, the 2-core of ``mu`` is ``[1]``::
sage: mu.dimension(Partition([1]),k=2)
2
TESTS:
Checks that the sum of squares of dimensions of characters of the
symmetric group is the order of the group::
sage: all(sum(mu.dimension()^2 for mu in Partitions(i))==factorial(i) for i in range(10))
True
A check coming from the theory of `k`-differentiable posets::
sage: k=2; core = Partition([2,1])
sage: all(sum(mu.dimension(core,k=2)^2
....: for mu in Partitions(3+i*2) if mu.core(2) == core)
....: == 2^i*factorial(i) for i in range(10))
True
Checks that the dimension satisfies the obvious recursion relation::
sage: test = lambda larger, smaller: larger.dimension(smaller) == sum(mu.dimension(smaller) for mu in larger.down())
sage: all(test(larger,smaller) for l in range(1,10) for s in range(10)
....: for larger in Partitions(l) for smaller in Partitions(s) if smaller != larger)
True
ALGORITHM:
Depending on the parameters given, different simplifications
occur. When `k=1` and ``smaller`` is empty, this function uses
the hook formula. When `k=1` and ``smaller`` is not empty, it
uses a formula from [ORV]_.
When `k \neq 1`, we first check that both ``self`` and
``smaller`` have the same `k`-core, then use the `k`-quotients
and the same algorithm on each of the `k`-quotients.
AUTHORS:
- Paul-Olivier Dehaye (2011-06-07)
"""
larger = self
if smaller is None:
smaller = Partition([])
if k == 1:
if smaller == Partition([]): # In this case, use the hook dimension formula
return factorial(larger.size())/prod(larger.hooks())
else:
if not larger.contains(smaller): # easy case
return 0
else:
# relative dimension
# Uses a formula of Olshanski, Regev, Vershik (see reference)
def inv_factorial(i):
if i < 0:
return 0
else:
return 1/factorial(i)
len_range = list(range(larger.length()))
from sage.matrix.constructor import matrix
M = matrix(QQ,[[inv_factorial(larger.get_part(i)-smaller.get_part(j)-i+j) for i in len_range] for j in len_range])
return factorial(larger.size()-smaller.size())*M.determinant()
else:
larger_core = larger.core(k)
smaller_core = smaller.core(k)
if smaller_core != larger_core: # easy case
return 0
larger_quotients = larger.quotient(k)
smaller_quotients = smaller.quotient(k)
def multinomial_with_partitions(sizes,path_counts):
# count the number of ways of performing the k paths in parallel,
# if we know the total length allotted for each of the paths (sizes), and the number
# of paths for each component. A multinomial picks the ordering of the components where
# each step is taken.
return prod(path_counts) * multinomial(sizes)
sizes = [larger_quotients[i].size()-smaller_quotients[i].size() for i in range(k)]
path_counts = [larger_quotients[i].dimension(smaller_quotients[i]) for i in range(k)]
return multinomial_with_partitions(sizes,path_counts)
def plancherel_measure(self):
r"""
Return the probability of ``self`` under the Plancherel probability
measure on partitions of the same size.
This probability distribution comes from the uniform distribution
on permutations via the Robinson-Schensted correspondence.
See :wikipedia:`Plancherel\_measure`
and :meth:`Partitions_n.random_element_plancherel`.
EXAMPLES::
sage: Partition([]).plancherel_measure()
1
sage: Partition([1]).plancherel_measure()
1
sage: Partition([2]).plancherel_measure()
1/2
sage: [mu.plancherel_measure() for mu in Partitions(3)]
[1/6, 2/3, 1/6]
sage: Partition([5,4]).plancherel_measure()
7/1440
TESTS::
sage: all(sum(mu.plancherel_measure() for mu in Partitions(n))==1 for n in range(10))
True
"""
return self.dimension()**2/factorial(self.size())
def outline(self, variable=None):
r"""
Return the outline of the partition ``self``.
This is a piecewise linear function, normalized so that the area
under the partition ``[1]`` is 2.
INPUT:
- variable -- a variable (default: ``'x'`` in the symbolic ring)
EXAMPLES::
sage: [Partition([5,4]).outline()(x=i) for i in range(-10,11)]
[10, 9, 8, 7, 6, 5, 6, 5, 6, 5, 4, 3, 2, 3, 4, 5, 6, 7, 8, 9, 10]
sage: Partition([]).outline()
abs(x)
sage: Partition([1]).outline()
abs(x + 1) + abs(x - 1) - abs(x)
sage: y=sage.symbolic.ring.var("y")
sage: Partition([6,5,1]).outline(variable=y)
abs(y + 6) - abs(y + 5) + abs(y + 4) - abs(y + 3) + abs(y - 1) - abs(y - 2) + abs(y - 3)
TESTS::
sage: integrate(Partition([1]).outline()-abs(x),(x,-10,10))
2
"""
if variable is None:
variable = var('x')
outside_contents = [self.content(*c) for c in self.outside_corners()]
inside_contents = [self.content(*c) for c in self.corners()]
return sum(abs(variable+c) for c in outside_contents)\
-sum(abs(variable+c) for c in inside_contents)
def dual_equivalence_graph(self, directed=False, coloring=None):
r"""
Return the dual equivalence graph of ``self``.
Two permutations `p` and `q` in the symmetric group `S_n`
differ by an `i`-*elementary dual equivalence (or dual Knuth)
relation* (where `i` is an integer with `1 < i < n`) when the
following two conditions are satisfied:
- In the one-line notation of the permutation `p`, the letter
`i` does not appear inbetween `i-1` and `i+1`.
- The permutation `q` is obtained from `p` by switching two
of the three letters `i-1, i, i+1` (in its one-line
notation) -- namely, the leftmost and the rightmost one
in order of their appearance in `p`.
Notice that this is equivalent to the statement that the
permutations `p^{-1}` and `q^{-1}` differ by an elementary
Knuth equivalence at positions `i-1, i, i+1`.
Two standard Young tableaux of shape `\lambda` differ by an
`i`-elementary dual equivalence relation (of color `i`), if
their reading words differ by an `i`-elementary dual
equivalence relation.
The *dual equivalence graph* of the partition `\lambda` is the
edge-colored graph whose vertices are the standard Young
tableaux of shape `\lambda`, and whose edges colored by `i` are
given by the `i`-elementary dual equivalences.
INPUT:
- ``directed`` -- (default: ``False``) whether to have the dual
equivalence graph be directed (where we have a directed edge
`S \to T` if `i` appears to the left of `i+1` in the
reading word of `T`; otherwise we have the directed edge
`T \to S`)
- ``coloring`` -- (optional) a function which sends each
integer `i > 1` to a color (as a string, e.g., ``'red'`` or
``'black'``) to be used when visually representing the
resulting graph using dot2tex; the default choice is
``2 -> 'red', 3 -> 'blue', 4 -> 'green', 5 -> 'purple',
6 -> 'brown', 7 -> 'orange', 8 -> 'yellow', anything greater
than 8 -> 'black'``.
REFERENCES:
- [As2008b]_
EXAMPLES::
sage: P = Partition([3,1,1])
sage: G = P.dual_equivalence_graph()
sage: sorted(G.edges())
[([[1, 2, 3], [4], [5]], [[1, 2, 4], [3], [5]], 3),
([[1, 2, 4], [3], [5]], [[1, 2, 5], [3], [4]], 4),
([[1, 2, 4], [3], [5]], [[1, 3, 4], [2], [5]], 2),
([[1, 2, 5], [3], [4]], [[1, 3, 5], [2], [4]], 2),
([[1, 3, 4], [2], [5]], [[1, 3, 5], [2], [4]], 4),
([[1, 3, 5], [2], [4]], [[1, 4, 5], [2], [3]], 3)]
sage: G = P.dual_equivalence_graph(directed=True)
sage: sorted(G.edges())
[([[1, 2, 4], [3], [5]], [[1, 2, 3], [4], [5]], 3),
([[1, 2, 5], [3], [4]], [[1, 2, 4], [3], [5]], 4),
([[1, 3, 4], [2], [5]], [[1, 2, 4], [3], [5]], 2),
([[1, 3, 5], [2], [4]], [[1, 2, 5], [3], [4]], 2),
([[1, 3, 5], [2], [4]], [[1, 3, 4], [2], [5]], 4),
([[1, 4, 5], [2], [3]], [[1, 3, 5], [2], [4]], 3)]
TESTS::
sage: G = Partition([1]).dual_equivalence_graph()
sage: G.vertices()
[[[1]]]
sage: G = Partition([]).dual_equivalence_graph()
sage: G.vertices()
[[]]
sage: P = Partition([3,1,1])
sage: G = P.dual_equivalence_graph(coloring=lambda x: 'red')
sage: G2 = P.dual_equivalence_graph(coloring={2: 'black', 3: 'blue', 4: 'cyan', 5: 'grey'})
sage: G is G2
False
sage: G == G2
True
"""
# We do some custom caching to not recreate the graph, but to make
# copies with the desired coloring (i.e., act like a factory).
try:
if directed:
G = self._DDEG.copy(immutable=False)
else:
G = self._DEG.copy(immutable=False)
if have_dot2tex():
if coloring is None:
d = {2: 'red', 3: 'blue', 4: 'green', 5: 'purple',
6: 'brown', 7: 'orange', 8: 'yellow'}
def coloring(i):
if i in d:
return d[i]
return 'black'
elif isinstance(coloring, dict):
d = coloring
coloring = lambda x: d[x]
G.set_latex_options(format="dot2tex",
edge_labels=True,
color_by_label=coloring)
return G
except AttributeError:
pass
T = list(tableau.StandardTableaux(self))
n = sum(self)
edges = []
to_perms = {t: t.reading_word_permutation() for t in T}
to_tab = {to_perms[k]: k for k in to_perms}
Perm = permutation.Permutations()
for t in T:
pt = list(to_perms[t])
for i in range(2, n):
ii = pt.index(i)
iip = pt.index(i+1)
iim = pt.index(i-1)
l = sorted([iim, ii, iip])
if l[0] != ii:
continue
x = pt[:]
x[l[0]], x[l[2]] = x[l[2]], x[l[0]]
if ii < iip:
e = [t, to_tab[Perm(x)], i]
edges.append(e)
else:
e = [to_tab[Perm(x)], t, i]
edges.append(e)
if directed:
from sage.graphs.digraph import DiGraph
self._DDEG = DiGraph([T, edges], format="vertices_and_edges",
immutable=True, multiedges=True)
else:
from sage.graphs.graph import Graph
self._DEG = Graph([T, edges], format="vertices_and_edges",
immutable=True, multiedges=True)
return self.dual_equivalence_graph(directed, coloring)
##############
# Partitions #
##############
class Partitions(UniqueRepresentation, Parent):
r"""
``Partitions(n, **kwargs)`` returns the combinatorial class of
integer partitions of `n` subject to the constraints given by the
keywords.
Valid keywords are: ``starting``, ``ending``, ``min_part``,
``max_part``, ``max_length``, ``min_length``, ``length``,
``max_slope``, ``min_slope``, ``inner``, ``outer``, ``parts_in``,
``regular``, and ``restricted``. They have the following meanings:
- ``starting=p`` specifies that the partitions should all be less
than or equal to `p` in lex order. This argument cannot be combined
with any other (see :trac:`15467`).
- ``ending=p`` specifies that the partitions should all be greater than
or equal to `p` in lex order. This argument cannot be combined with any
other (see :trac:`15467`).
- ``length=k`` specifies that the partitions have
exactly `k` parts.
- ``min_length=k`` specifies that the partitions have
at least `k` parts.
- ``min_part=k`` specifies that all parts of the
partitions are at least `k`.
- ``inner=p`` specifies that the partitions must contain the
partition `p`.
- ``outer=p`` specifies that the partitions
be contained inside the partition `p`.
- ``min_slope=k`` specifies that the partitions have slope at least
`k`; the slope at position `i` is the difference between the
`(i+1)`-th part and the `i`-th part.
- ``parts_in=S`` specifies that the partitions have parts in the set
`S`, which can be any sequence of pairwise distinct positive
integers. This argument cannot be combined with any other
(see :trac:`15467`).
- ``regular=ell`` specifies that the partitions are `\ell`-regular,
and can only be combined with the ``max_length`` or ``max_part``, but
not both, keywords if `n` is not specified
- ``restricted=ell`` specifies that the partitions are `\ell`-restricted,
and cannot be combined with any other keywords
The ``max_*`` versions, along with ``inner`` and ``ending``, work
analogously.
Right now, the ``parts_in``, ``starting``, ``ending``, ``regular``, and
``restricted`` keyword arguments are mutually exclusive, both of each
other and of other keyword arguments. If you specify, say, ``parts_in``,
all other keyword arguments will be ignored; ``starting``, ``ending``,
``regular``, and ``restricted`` work the same way.
EXAMPLES:
If no arguments are passed, then the combinatorial class
of all integer partitions is returned::
sage: Partitions()
Partitions
sage: [2,1] in Partitions()
True
If an integer `n` is passed, then the combinatorial class of integer
partitions of `n` is returned::
sage: Partitions(3)
Partitions of the integer 3
sage: Partitions(3).list()
[[3], [2, 1], [1, 1, 1]]
If ``starting=p`` is passed, then the combinatorial class of partitions
greater than or equal to `p` in lexicographic order is returned::
sage: Partitions(3, starting=[2,1])
Partitions of the integer 3 starting with [2, 1]
sage: Partitions(3, starting=[2,1]).list()
[[2, 1], [1, 1, 1]]
If ``ending=p`` is passed, then the combinatorial class of
partitions at most `p` in lexicographic order is returned::
sage: Partitions(3, ending=[2,1])
Partitions of the integer 3 ending with [2, 1]
sage: Partitions(3, ending=[2,1]).list()
[[3], [2, 1]]
Using ``max_slope=-1`` yields partitions into distinct parts -- each
part differs from the next by at least 1. Use a different
``max_slope`` to get parts that differ by, say, 2::
sage: Partitions(7, max_slope=-1).list()
[[7], [6, 1], [5, 2], [4, 3], [4, 2, 1]]
sage: Partitions(15, max_slope=-1).cardinality()
27
The number of partitions of `n` into odd parts equals the number of
partitions into distinct parts. Let's test that for `n` from 10 to 20::
sage: test = lambda n: Partitions(n, max_slope=-1).cardinality() == Partitions(n, parts_in=[1,3..n]).cardinality()
sage: all(test(n) for n in [10..20])
True
The number of partitions of `n` into distinct parts that differ by
at least 2 equals the number of partitions into parts that equal 1
or 4 modulo 5; this is one of the Rogers-Ramanujan identities::
sage: test = lambda n: Partitions(n, max_slope=-2).cardinality() == Partitions(n, parts_in=([1,6..n] + [4,9..n])).cardinality()
sage: all(test(n) for n in [10..20])
True
Here are some more examples illustrating ``min_part``, ``max_part``,
and ``length``::
sage: Partitions(5,min_part=2)
Partitions of the integer 5 satisfying constraints min_part=2
sage: Partitions(5,min_part=2).list()
[[5], [3, 2]]
::
sage: Partitions(3,max_length=2).list()
[[3], [2, 1]]
::
sage: Partitions(10, min_part=2, length=3).list()
[[6, 2, 2], [5, 3, 2], [4, 4, 2], [4, 3, 3]]
Some examples using the ``regular`` keyword::
sage: Partitions(regular=4)
4-Regular Partitions
sage: Partitions(regular=4, max_length=3)
4-Regular Partitions with max length 3
sage: Partitions(regular=4, max_part=3)
4-Regular 3-Bounded Partitions
sage: Partitions(3, regular=4)
4-Regular Partitions of the integer 3
Some examples using the ``restricted`` keyword::
sage: Partitions(restricted=4)
4-Restricted Partitions
sage: Partitions(3, restricted=4)
4-Restricted Partitions of the integer 3
Here are some further examples using various constraints::
sage: [x for x in Partitions(4)]
[[4], [3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
sage: [x for x in Partitions(4, length=2)]
[[3, 1], [2, 2]]
sage: [x for x in Partitions(4, min_length=2)]
[[3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
sage: [x for x in Partitions(4, max_length=2)]
[[4], [3, 1], [2, 2]]
sage: [x for x in Partitions(4, min_length=2, max_length=2)]
[[3, 1], [2, 2]]
sage: [x for x in Partitions(4, max_part=2)]
[[2, 2], [2, 1, 1], [1, 1, 1, 1]]
sage: [x for x in Partitions(4, min_part=2)]
[[4], [2, 2]]
sage: [x for x in Partitions(4, outer=[3,1,1])]
[[3, 1], [2, 1, 1]]
sage: [x for x in Partitions(4, outer=[infinity, 1, 1])]
[[4], [3, 1], [2, 1, 1]]
sage: [x for x in Partitions(4, inner=[1,1,1])]
[[2, 1, 1], [1, 1, 1, 1]]
sage: [x for x in Partitions(4, max_slope=-1)]
[[4], [3, 1]]
sage: [x for x in Partitions(4, min_slope=-1)]
[[4], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
sage: [x for x in Partitions(11, max_slope=-1, min_slope=-3, min_length=2, max_length=4)]
[[7, 4], [6, 5], [6, 4, 1], [6, 3, 2], [5, 4, 2], [5, 3, 2, 1]]
sage: [x for x in Partitions(11, max_slope=-1, min_slope=-3, min_length=2, max_length=4, outer=[6,5,2])]
[[6, 5], [6, 4, 1], [6, 3, 2], [5, 4, 2]]
Note that if you specify ``min_part=0``, then it will treat the minimum
part as being 1 (see :trac:`13605`)::
sage: [x for x in Partitions(4, length=3, min_part=0)]
[[2, 1, 1]]
sage: [x for x in Partitions(4, min_length=3, min_part=0)]
[[2, 1, 1], [1, 1, 1, 1]]
Except for very special cases, counting is done by brute force iteration
through all the partitions. However the iteration itself has a reasonable
complexity (see :class:`IntegerListsLex`), which allows for
manipulating large partitions::
sage: Partitions(1000, max_length=1).list()
[[1000]]
In particular, getting the first element is also constant time::
sage: Partitions(30, max_part=29).first()
[29, 1]
TESTS::
sage: TestSuite(Partitions(0)).run()
sage: TestSuite(Partitions(5)).run()
sage: TestSuite(Partitions(5, min_part=2)).run()
sage: repr( Partitions(5, min_part=2) )
'Partitions of the integer 5 satisfying constraints min_part=2'
sage: P = Partitions(5, min_part=2)
sage: P.first().parent()
Partitions...
sage: [2,1] in P
False
sage: [2,2,1] in P
False
sage: [3,2] in P
True
sage: Partitions(5, inner=[2,1], min_length=3).list()
[[3, 1, 1], [2, 2, 1], [2, 1, 1, 1]]
sage: Partitions(5, inner=Partition([2,2]), min_length=3).list()
[[2, 2, 1]]
sage: Partitions(7, inner=(2, 2), min_length=3).list()
[[4, 2, 1], [3, 3, 1], [3, 2, 2], [3, 2, 1, 1], [2, 2, 2, 1], [2, 2, 1, 1, 1]]
sage: Partitions(5, inner=[2,0,0,0,0,0]).list()
[[5], [4, 1], [3, 2], [3, 1, 1], [2, 2, 1], [2, 1, 1, 1]]
sage: Partitions(6, length=2, max_slope=-1).list()
[[5, 1], [4, 2]]
sage: Partitions(length=2, max_slope=-1).list()
Traceback (most recent call last):
...
ValueError: the size must be specified with any keyword argument
sage: Partitions(max_part = 3)
3-Bounded Partitions
Check that :trac:`14145` has been fixed::
sage: 1 in Partitions()
False
Check :trac:`15467`::
sage: Partitions(5,parts_in=[1,2,3,4], length=4)
Traceback (most recent call last):
...
ValueError: The parameters 'parts_in', 'starting' and 'ending' cannot be combined with anything else.
sage: Partitions(5,starting=[3,2], length=2)
Traceback (most recent call last):
...
ValueError: The parameters 'parts_in', 'starting' and 'ending' cannot be combined with anything else.
sage: Partitions(5,ending=[3,2], length=2)
Traceback (most recent call last):
...
ValueError: The parameters 'parts_in', 'starting' and 'ending' cannot be combined with anything else.
sage: Partitions(NN, length=2)
Traceback (most recent call last):
...
ValueError: the size must be specified with any keyword argument
sage: Partitions(('la','la','laaaa'), max_part=8)
Traceback (most recent call last):
...
ValueError: n must be an integer or be equal to one of None, NN, NonNegativeIntegers()
Check that calling ``Partitions`` with ``outer=a`` no longer
mutates ``a`` (:trac:`16234`)::
sage: a = [4,3,2,1,1,1,1]
sage: for p in Partitions(8, outer=a, min_slope=-1):
....: print(p)
[3, 3, 2]
[3, 2, 2, 1]
[3, 2, 1, 1, 1]
[2, 2, 2, 1, 1]
[2, 2, 1, 1, 1, 1]
[2, 1, 1, 1, 1, 1, 1]
sage: a
[4, 3, 2, 1, 1, 1, 1]
Check that ``inner`` and ``outer`` indeed accept a partition as
argument (:trac:`18423`)::
sage: P = Partitions(5, inner=Partition([2,1]), outer=Partition([3,2])); P
Partitions of the integer 5 satisfying constraints inner=[2, 1], outer=[3, 2]
sage: P.list()
[[3, 2]]
"""
@staticmethod
def __classcall_private__(cls, n=None, **kwargs):
"""
Return the correct parent based upon the input.
TESTS::
sage: P = Partitions()
sage: P2 = Partitions(NN)
sage: P is P2
True
sage: P2 = Partitions(NonNegativeIntegers())
sage: P is P2
True
sage: P = Partitions(4)
sage: P2 = Partitions(int(4))
sage: P is P2
True
Check that :trac:`17898` is fixed::
sage: P = Partitions(5, min_slope=0)
sage: list(P)
[[5], [1, 1, 1, 1, 1]]
"""
if n == infinity:
raise ValueError("n cannot be infinite")
if n is None or n is NN or n is NonNegativeIntegers():
if len(kwargs) > 0:
if len(kwargs) == 1:
if 'max_part' in kwargs:
return Partitions_all_bounded(kwargs['max_part'])
if 'regular' in kwargs:
return RegularPartitions_all(kwargs['regular'])
if 'restricted' in kwargs:
return RestrictedPartitions_all(kwargs['restricted'])
elif len(kwargs) == 2:
if 'regular' in kwargs:
if kwargs['regular'] < 1 or kwargs['regular'] not in ZZ:
raise ValueError("the regularity must be a positive integer")
if 'max_part' in kwargs:
return RegularPartitions_bounded(kwargs['regular'], kwargs['max_part'])
if 'max_length' in kwargs:
return RegularPartitions_truncated(kwargs['regular'], kwargs['max_length'])
raise ValueError("the size must be specified with any keyword argument")
return Partitions_all()
elif isinstance(n, (int,Integer)):
if len(kwargs) == 0:
return Partitions_n(n)
if len(kwargs) == 1:
if 'max_part' in kwargs:
return PartitionsGreatestLE(n, kwargs['max_part'])
if 'length' in kwargs:
return Partitions_nk(n, kwargs['length'])
if (len(kwargs) > 1 and
('parts_in' in kwargs or
'starting' in kwargs or
'ending' in kwargs)):
raise ValueError("The parameters 'parts_in', 'starting' and "+
"'ending' cannot be combined with anything else.")
if 'parts_in' in kwargs:
return Partitions_parts_in(n, kwargs['parts_in'])
elif 'starting' in kwargs:
return Partitions_starting(n, kwargs['starting'])
elif 'ending' in kwargs:
return Partitions_ending(n, kwargs['ending'])
elif 'regular' in kwargs:
return RegularPartitions_n(n, kwargs['regular'])
elif 'restricted' in kwargs:
return RestrictedPartitions_n(n, kwargs['restricted'])
# FIXME: should inherit from IntegerListLex, and implement repr, or _name as a lazy attribute
kwargs['name'] = "Partitions of the integer %s satisfying constraints %s"%(n, ", ".join( ["%s=%s"%(key, kwargs[key]) for key in sorted(kwargs)] ))
# min_part is at least 1, and it is 1 by default
kwargs['min_part'] = max(1,kwargs.get('min_part',1))
# max_slope is at most 0, and it is 0 by default
kwargs['max_slope'] = min(0,kwargs.get('max_slope',0))
if kwargs.get('min_slope', -float('inf')) > 0:
raise ValueError("the minimum slope must be non-negative")
if 'outer' in kwargs:
kwargs['max_length'] = min(len(kwargs['outer']),
kwargs.get('max_length', infinity))
kwargs['ceiling'] = tuple(kwargs['outer'])
del kwargs['outer']
if 'inner' in kwargs:
inner = [x for x in kwargs['inner'] if x > 0]
kwargs['floor'] = inner
kwargs['min_length'] = max(len(inner),
kwargs.get('min_length',0))
del kwargs['inner']
return Partitions_with_constraints(n, **kwargs)
raise ValueError("n must be an integer or be equal to one of "
"None, NN, NonNegativeIntegers()")
def __init__(self, is_infinite=False):
"""
Initialize ``self``.
INPUT:
- ``is_infinite`` -- (Default: ``False``) If ``True``, then the number
of partitions in this set is infinite.
EXAMPLES::
sage: Partitions()
Partitions
sage: Partitions(2)
Partitions of the integer 2
"""
if is_infinite:
Parent.__init__(self, category=InfiniteEnumeratedSets())
else:
Parent.__init__(self, category=FiniteEnumeratedSets())
Element = Partition
# add options to class
class options(GlobalOptions):
r"""
Sets and displays the global options for elements of the partition,
skew partition, and partition tuple classes. If no parameters are
set, then the function returns a copy of the options dictionary.
The ``options`` to partitions can be accessed as the method
:obj:`Partitions.options` of :class:`Partitions` and
related parent classes.
@OPTIONS@
EXAMPLES::
sage: P = Partition([4,2,2,1])
sage: P
[4, 2, 2, 1]
sage: Partitions.options.display="exp"
sage: P
1, 2^2, 4
sage: Partitions.options.display="exp_high"
sage: P
4, 2^2, 1
It is also possible to use user defined functions for the ``display`` and
``latex`` options::
sage: Partitions.options(display=lambda mu: '<%s>' % ','.join('%s'%m for m in mu._list)); P
<4,2,2,1>
sage: Partitions.options(latex=lambda mu: '\\Diagram{%s}' % ','.join('%s'%m for m in mu._list)); latex(P)
\Diagram{4,2,2,1}
sage: Partitions.options(display="diagram", diagram_str="#")
sage: P
####
##
##
#
sage: Partitions.options(diagram_str="*", convention="french")
sage: print(P.ferrers_diagram())
*
**
**
****
Changing the ``convention`` for partitions also changes the ``convention``
option for tableaux and vice versa::
sage: T = Tableau([[1,2,3],[4,5]])
sage: T.pp()
4 5
1 2 3
sage: Tableaux.options.convention="english"
sage: print(P.ferrers_diagram())
****
**
**
*
sage: T.pp()
1 2 3
4 5
sage: Partitions.options._reset()
"""
NAME = 'Partitions'
module = 'sage.combinat.partition'
display = dict(default="list",
description='Specifies how partitions should be printed',
values=dict(list='displayed as a list',
exp_low='in exponential form (lowest first)',
exp_high='in exponential form (highest first)',
diagram='as a Ferrers diagram',
compact_low='compact form of ``exp_low``',
compact_high='compact form of ``exp_high``'),
alias=dict(exp="exp_low", compact="compact_low", array="diagram",
ferrers_diagram="diagram", young_diagram="diagram"),
case_sensitive=False)
latex = dict(default="young_diagram",
description='Specifies how partitions should be latexed',
values=dict(diagram='latex as a Ferrers diagram',
young_diagram='latex as a Young diagram',
list='latex as a list',
exp_high='latex as a list in exponential notation (highest first)',
exp_low='as a list latex in exponential notation (lowest first)'),
alias=dict(exp="exp_low", array="diagram", ferrers_diagram="diagram"),
case_sensitive=False)
diagram_str = dict(default="*",
description='The character used for the cells when printing Ferrers diagrams',
checker=lambda char: isinstance(char,str))
latex_diagram_str = dict(default="\\ast",
description='The character used for the cells when latexing Ferrers diagrams',
checker=lambda char: isinstance(char,str))
convention = dict(link_to=(tableau.Tableaux.options,'convention'))
notation = dict(alt_name='convention')
def __reversed__(self):
"""
A reversed iterator.
EXAMPLES::
sage: [x for x in reversed(Partitions(4))]
[[1, 1, 1, 1], [2, 1, 1], [2, 2], [3, 1], [4]]
"""
if not self.is_finite():
raise NotImplementedError("The set is infinite. This needs a custom reverse iterator")
for i in reversed(range(self.cardinality())):
yield self[i]
def _element_constructor_(self, lst):
"""
Construct an element with ``self`` as parent.
EXAMPLES::
sage: P = Partitions()
sage: p = P([3,3,1]); p
[3, 3, 1]
sage: P(p) is p
True
sage: P([3, 2, 1, 0])
[3, 2, 1]
sage: PT = PartitionTuples()
sage: elt = PT([[4,4,2,2,1]]); elt
([4, 4, 2, 2, 1])
sage: P(elt)
[4, 4, 2, 2, 1]
TESTS::
sage: Partition([3/2])
Traceback (most recent call last):
...
ValueError: all parts of [3/2] should be nonnegative integers
"""
if isinstance(lst, PartitionTuple):
if lst.level() != 1:
raise ValueError('%s is not an element of %s' % (lst, self))
lst = lst[0]
if lst.parent() is self:
return lst
try:
lst = list(map(ZZ, lst))
except TypeError:
raise ValueError('all parts of %s should be nonnegative integers' % repr(lst))
if lst in self:
# trailing zeros are removed in Partition.__init__
return self.element_class(self, lst)
raise ValueError('%s is not an element of %s' % (lst, self))
def __contains__(self, x):
"""
Check if ``x`` is contained in ``self``.
TESTS::
sage: P = Partitions()
sage: Partition([2,1]) in P
True
sage: [2,1] in P
True
sage: [3,2,1] in P
True
sage: [1,2] in P
False
sage: [] in P
True
sage: [0] in P
True
Check that types that represent integers are not excluded::
sage: P = Partitions()
sage: [3/1, 2/2] in P
True
sage: Partition([3/1, 2]) in P
True
Check that non-integers and non-lists are excluded::
sage: P = Partitions()
sage: [2,1.5] in P
False
sage: 0 in P
False
"""
if isinstance(x, Partition):
return True
if isinstance(x, (list, tuple)):
return not x or (all((a in ZZ) and (a >= b) for a, b in zip(x, x[1:]))
and (x[-1] in ZZ) and (x[-1] >= 0))
return False
def subset(self, *args, **kwargs):
r"""
Return ``self`` if no arguments are given, otherwise raises a
``ValueError``.
EXAMPLES::
sage: P = Partitions(5, starting=[3,1]); P
Partitions of the integer 5 starting with [3, 1]
sage: P.subset()
Partitions of the integer 5 starting with [3, 1]
sage: P.subset(ending=[3,1])
Traceback (most recent call last):
...
ValueError: Invalid combination of arguments
"""
if len(args) != 0 or len(kwargs) != 0:
raise ValueError("Invalid combination of arguments")
return self
class Partitions_all(Partitions):
"""
Class of all partitions.
TESTS::
sage: TestSuite( sage.combinat.partition.Partitions_all() ).run()
"""
def __init__(self):
"""
Initialize ``self``.
TESTS::
sage: P = Partitions()
sage: P.category()
Category of infinite enumerated sets
sage: Partitions().cardinality()
+Infinity
sage: TestSuite(P).run()
"""
Partitions.__init__(self, is_infinite=True)
def subset(self, size=None, **kwargs):
"""
Return the subset of partitions of a given size and additional
keyword arguments.
EXAMPLES::
sage: P = Partitions()
sage: P.subset(4)
Partitions of the integer 4
"""
if size is None:
return self
return Partitions(size, **kwargs)
def _repr_(self):
"""
Return a string representation of ``self``.
TESTS::
sage: Partitions() # indirect doctest
Partitions
"""
return "Partitions"
def __iter__(self):
"""
An iterator for all partitions.
EXAMPLES::
sage: p = Partitions()
sage: it = p.__iter__()
sage: [next(it) for i in range(10)]
[[], [1], [2], [1, 1], [3], [2, 1], [1, 1, 1], [4], [3, 1], [2, 2]]
"""
n = 0
while True:
for p in ZS1_iterator(n):
yield self.element_class(self, p)
n += 1
def __reversed__(self):
"""
A reversed iterator for all partitions.
This reverse iterates through partitions of fixed `n` and incrementing
`n` after reaching the end.
EXAMPLES::
sage: p = Partitions()
sage: revit = p.__reversed__()
sage: [next(revit) for i in range(10)]
[[], [1], [1, 1], [2], [1, 1, 1], [2, 1], [3], [1, 1, 1, 1], [2, 1, 1], [2, 2]]
"""
n = 0
while True:
for p in reversed(list(ZS1_iterator(n))):
yield self.element_class(self, p)
n += 1
def from_frobenius_coordinates(self, frobenius_coordinates):
"""
Return a partition from a pair of sequences of Frobenius coordinates.
EXAMPLES::
sage: Partitions().from_frobenius_coordinates(([],[]))
[]
sage: Partitions().from_frobenius_coordinates(([0],[0]))
[1]
sage: Partitions().from_frobenius_coordinates(([1],[1]))
[2, 1]
sage: Partitions().from_frobenius_coordinates(([6,3,2],[4,1,0]))
[7, 5, 5, 1, 1]
"""
if len(frobenius_coordinates) != 2:
raise ValueError('%s is not a valid partition, two sequences of coordinates are needed'%str(frobenius_coordinates))
else:
a = frobenius_coordinates[0]
b = frobenius_coordinates[1]
if len(a) != len(b):
raise ValueError('%s is not a valid partition, the sequences of coordinates need to be the same length'%str(frobenius_coordinates))
# should add tests to see if a and b are sorted down, nonnegative and strictly decreasing
r = len(a)
if r == 0:
return self.element_class(self, [])
tmp = [a[i]+i+1 for i in range(r)]
# should check that a is strictly decreasing
if a[-1] < 0:
raise ValueError('%s is not a partition, no coordinate can be negative'%str(frobenius_coordinates))
if b[-1] >= 0:
tmp.extend([r]*b[r-1])
else:
raise ValueError('%s is not a partition, no coordinate can be negative'%str(frobenius_coordinates))
for i in range(r-1,0,-1):
if b[i-1]-b[i] > 0:
tmp.extend([i]*(b[i-1]-b[i]-1))
else:
raise ValueError('%s is not a partition, the coordinates need to be strictly decreasing'%str(frobenius_coordinates))
return self.element_class(self, tmp)
def from_beta_numbers(self, beta):
r"""
Return a partition corresponding to a sequence of beta numbers.
A sequence of beta numbers is a strictly increasing sequence
`0 \leq b_1 < \cdots < b_k` of non-negative integers. The
corresponding partition `\mu = (\mu_k, \ldots, \mu_1)` is
given by `\mu_i = [1,i) \setminus \{ b_1, \ldots, b_i \}`. This gives
a bijection from the set of partitions with at most `k` non-zero parts
to the set of strictly increasing sequences of non-negative integers
of length `k`.
EXAMPLES::
sage: Partitions().from_beta_numbers([0,1,2,4,5,8])
[3, 1, 1]
sage: Partitions().from_beta_numbers([0,2,3,6])
[3, 1, 1]
"""
beta.sort() # put them into increasing order just in case
offset = 0
while offset < len(beta)-1 and beta[offset] == offset:
offset+=1
beta = beta[offset:]
mu = [beta[i]-offset-i for i in range(len(beta))]
return self.element_class(self, list(reversed(mu)))
def from_exp(self, exp):
"""
Return a partition from its list of multiplicities.
EXAMPLES::
sage: Partitions().from_exp([2,2,1])
[3, 2, 2, 1, 1]
"""
p = []
for i in reversed(range(len(exp))):
p += [i+1]*exp[i]
return self.element_class(self, p)
def from_zero_one(self, seq):
r"""
Return a partition from its `0-1` sequence.
The full `0-1` sequence is the sequence (infinite in both
directions) indicating the steps taken when following the
outer rim of the diagram of the partition. We use the convention
that in English convention, a 1 corresponds to an East step, and
a 0 corresponds to a North step.
Note that every full `0-1` sequence starts with infinitely many 0's and
ends with infinitely many 1's.
.. SEEALSO::
:meth:`Partition.zero_one_sequence()`
INPUT:
The input should be a finite sequence of 0's and 1's. The
heading 0's and trailing 1's will be discarded.
EXAMPLES::
sage: Partitions().from_zero_one([])
[]
sage: Partitions().from_zero_one([1,0])
[1]
sage: Partitions().from_zero_one([1, 1, 1, 1, 0, 1, 0])
[5, 4]
Heading 0's and trailing 1's are correctly handled::
sage: Partitions().from_zero_one([0,0,1,1,1,1,0,1,0,1,1,1])
[5, 4]
TESTS::
sage: all(Partitions().from_zero_one(mu.zero_one_sequence()) == mu for n in range(10) for mu in Partitions(n))
True
"""
tmp = [i for i in range(len(seq)) if seq[i] == 0]
return self.element_class(self,[tmp[i]-i for i in range(len(tmp)-1,-1,-1)])
def from_core_and_quotient(self, core, quotient):
"""
Return a partition from its core and quotient.
Algorithm from mupad-combinat.
EXAMPLES::
sage: Partitions().from_core_and_quotient([2,1], [[2,1],[3],[1,1,1]])
[11, 5, 5, 3, 2, 2, 2]
TESTS::
sage: Partitions().from_core_and_quotient([2,1], [[2,1],[2,3,1],[1,1,1]])
Traceback (most recent call last):
...
ValueError: the quotient [[2, 1], [2, 3, 1], [1, 1, 1]] must be a tuple of partitions
We check that :trac:`11412` is actually fixed::
sage: test = lambda x, k: x == Partition(core=x.core(k),
....: quotient=x.quotient(k))
sage: all(test(mu,k) for k in range(1,5)
....: for n in range(10) for mu in Partitions(n))
True
sage: test2 = lambda core, mus: (
....: Partition(core=core, quotient=mus).core(mus.level()) == core
....: and
....: Partition(core=core, quotient=mus).quotient(mus.level()) == mus)
sage: all(test2(core,mus) # long time (5s on sage.math, 2011)
....: for k in range(1,10)
....: for n_core in range(10-k)
....: for core in Partitions(n_core)
....: if core.core(k) == core
....: for n_mus in range(10-k)
....: for mus in PartitionTuples(k,n_mus))
True
"""
from .partition_tuple import PartitionTuple, PartitionTuples
if quotient not in PartitionTuples():
raise ValueError('the quotient %s must be a tuple of partitions'%quotient)
components = PartitionTuple(quotient).components()
length = len(components)
k = length*max(len(q) for q in components) + len(core)
# k needs to be large enough. this seems to me like the smallest it can be
v = [core[i]-i for i in range(len(core))] + [ -i for i in range(len(core),k) ]
w = [ [x for x in v if (x-i) % length == 0] for i in range(1, length+1) ]
new_w = []
for i in range(length):
lw = len(w[i])
lq = len(components[i])
# k needs to be chosen so lw >= lq
new_w += [ w[i][j] + length*components[i][j] for j in range(lq)]
new_w += [ w[i][j] for j in range(lq,lw)]
new_w.sort(reverse=True)
return self.element_class(self, [new_w[i]+i for i in range(len(new_w))])
class Partitions_all_bounded(Partitions):
def __init__(self, k):
"""
TESTS::
sage: TestSuite( sage.combinat.partition.Partitions_all_bounded(3) ).run() # long time
"""
self.k = k
Partitions.__init__(self, is_infinite=True)
def __contains__(self, x):
"""
TESTS::
sage: P = Partitions(max_part=3)
sage: Partition([2,1]) in P
True
sage: [2,1] in P
True
sage: [3,2,1] in P
True
sage: [1,2] in P
False
sage: [5,1] in P
False
sage: [0] in P
True
sage: [] in P
True
"""
return not x or (x[0] <= self.k and x in _Partitions)
def _repr_(self):
"""
TESTS::
sage: from sage.combinat.partition import Partitions_all_bounded
sage: Partitions_all_bounded(3)
3-Bounded Partitions
"""
return "%d-Bounded Partitions"%self.k
def __iter__(self):
"""
An iterator for all `k`-bounded partitions.
EXAMPLES::
sage: p = Partitions(max_part=3)
sage: it = p.__iter__()
sage: [next(it) for i in range(10)]
[[], [1], [2], [1, 1], [3], [2, 1], [1, 1, 1], [3, 1], [2, 2], [2, 1, 1]]
"""
n = 0
while True:
for p in Partitions(n, max_part=self.k):
yield self.element_class(self, p)
n += 1
class Partitions_n(Partitions):
"""
Partitions of the integer `n`.
TESTS::
sage: TestSuite( sage.combinat.partition.Partitions_n(0) ).run()
sage: TestSuite( sage.combinat.partition.Partitions_n(0) ).run()
"""
def __init__(self, n):
"""
Initialize ``self``.
TESTS::
sage: TestSuite( Partitions(5) ).run()
"""
Partitions.__init__(self)
self.n = n
def __contains__(self, x):
"""
Check if ``x`` is contained in ``self``.
TESTS::
sage: p = Partitions(5)
sage: [2,1] in p
False
sage: [2,2,1] in p
True
sage: [3,2] in p
True
sage: [2,3] in p
False
"""
return x in _Partitions and sum(x) == self.n
def _repr_(self):
"""
Return a string representation of ``self``.
TESTS::
sage: Partitions(5) # indirect doctest
Partitions of the integer 5
"""
return "Partitions of the integer %s"%self.n
def _an_element_(self):
"""
Return a partition in ``self``.
EXAMPLES::
sage: Partitions(4).an_element() # indirect doctest
[3, 1]
sage: Partitions(0).an_element()
[]
sage: Partitions(1).an_element()
[1]
"""
if self.n == 0:
lst = []
elif self.n == 1:
lst = [1]
else:
lst = [self.n-1, 1]
return self.element_class(self, lst)
def cardinality(self, algorithm='flint'):
r"""
Return the number of partitions of the specified size.
INPUT:
- ``algorithm`` - (default: ``'flint'``)
- ``'flint'`` -- use FLINT (currently the fastest)
- ``'gap'`` -- use GAP (VERY *slow*)
- ``'pari'`` -- use PARI. Speed seems the same as GAP until
`n` is in the thousands, in which case PARI is faster.
It is possible to associate with every partition of the integer `n` a
conjugacy class of permutations in the symmetric group on `n` points
and vice versa. Therefore the number of partitions `p_n` is the number
of conjugacy classes of the symmetric group on `n` points.
EXAMPLES::
sage: v = Partitions(5).list(); v
[[5], [4, 1], [3, 2], [3, 1, 1], [2, 2, 1], [2, 1, 1, 1], [1, 1, 1, 1, 1]]
sage: len(v)
7
sage: Partitions(5).cardinality(algorithm='gap')
7
sage: Partitions(5).cardinality(algorithm='pari')
7
sage: number_of_partitions(5, algorithm='flint')
7
::
sage: Partitions(10).cardinality()
42
sage: Partitions(3).cardinality()
3
sage: Partitions(10).cardinality()
42
sage: Partitions(3).cardinality(algorithm='pari')
3
sage: Partitions(10).cardinality(algorithm='pari')
42
sage: Partitions(40).cardinality()
37338
sage: Partitions(100).cardinality()
190569292
A generating function for `p_n` is given by the reciprocal of
Euler's function:
.. MATH::
\sum_{n=0}^{\infty} p_n x^n = \prod_{k=1}^{\infty} \frac{1}{1-x^k}.
We use Sage to verify that the first several coefficients do
indeed agree::
sage: q = PowerSeriesRing(QQ, 'q', default_prec=9).gen()
sage: prod([(1-q^k)^(-1) for k in range(1,9)]) ## partial product of
1 + q + 2*q^2 + 3*q^3 + 5*q^4 + 7*q^5 + 11*q^6 + 15*q^7 + 22*q^8 + O(q^9)
sage: [Partitions(k).cardinality() for k in range(2,10)]
[2, 3, 5, 7, 11, 15, 22, 30]
Another consistency test for ``n`` up to 500::
sage: len([n for n in [1..500] if Partitions(n).cardinality() != Partitions(n).cardinality(algorithm='pari')])
0
For negative inputs, the result is zero (the algorithm is ignored)::
sage: Partitions(-5).cardinality()
0
REFERENCES:
- :wikipedia:`Partition\_(number\_theory)`
"""
if self.n < 0:
return ZZ.zero()
if algorithm == 'flint':
return cached_number_of_partitions(self.n)
elif algorithm == 'gap':
from sage.libs.gap.libgap import libgap
return ZZ(libgap.NrPartitions(ZZ(self.n)))
elif algorithm == 'pari':
return ZZ(pari(ZZ(self.n)).numbpart())
raise ValueError("unknown algorithm '%s'" % algorithm)
def random_element(self, measure = 'uniform'):
"""
Return a random partitions of `n` for the specified measure.
INPUT:
- ``measure`` -- ``'uniform'`` or ``'Plancherel'``
(default: ``'uniform'``)
.. SEEALSO::
- :meth:`random_element_uniform`
- :meth:`random_element_plancherel`
EXAMPLES::
sage: Partitions(5).random_element() # random
[2, 1, 1, 1]
sage: Partitions(5).random_element(measure='Plancherel') # random
[2, 1, 1, 1]
"""
if measure == 'uniform':
return self.random_element_uniform()
elif measure == 'Plancherel':
return self.random_element_plancherel()
else:
raise ValueError("Unknown measure: %s" % measure)
def random_element_uniform(self):
"""
Return a random partition of `n` with uniform probability.
EXAMPLES::
sage: Partitions(5).random_element_uniform() # random
[2, 1, 1, 1]
sage: Partitions(20).random_element_uniform() # random
[9, 3, 3, 2, 2, 1]
TESTS::
sage: all(Part.random_element_uniform() in Part
....: for Part in map(Partitions, range(10)))
True
Check that :trac:`18752` is fixed::
sage: P = Partitions(5)
sage: la = P.random_element_uniform()
sage: la.parent() is P
True
ALGORITHM:
- It is a python Implementation of RANDPAR, see [NW1978]_. The
complexity is unknown, there may be better algorithms.
.. TODO::
Check in Knuth AOCP4.
- There is also certainly a lot of room for optimizations, see
comments in the code.
AUTHOR:
- Florent Hivert (2009-11-23)
"""
n = self.n
res = [] # A dictionary of multiplicities could be faster.
while n > 0:
# Choose a pair d,j = 1,2..., with d*j <= n with probability
# d*numpart(n-d*j) / n / numpart(n)
# and add d^j to the result partition. The resulting partitions is
# equiprobable.
# The following could be made faster by a clever use of floats
rand = randrange(0, n*cached_number_of_partitions(n)) # cached number_of_partition
# It is better to start by the j = 1 pairs because they are the
# most probable. Maybe there is an even more clever order.
for j in range(1, n+1):
d = 1
r = n-j # n - d*j
while r >= 0:
rand -= d * cached_number_of_partitions(r)
if rand < 0:
break
d +=1
r -= j
else:
continue
break
res.extend([d]*j)
n = r
res.sort(reverse=True)
return self.element_class(self, res)
def random_element_plancherel(self):
r"""
Return a random partition of `n` (for the Plancherel measure).
This probability distribution comes from the uniform distribution
on permutations via the Robinson-Schensted correspondence.
See :wikipedia:`Plancherel\_measure`
and :meth:`Partition.plancherel_measure`.
EXAMPLES::
sage: Partitions(5).random_element_plancherel() # random
[2, 1, 1, 1]
sage: Partitions(20).random_element_plancherel() # random
[9, 3, 3, 2, 2, 1]
TESTS::
sage: all(Part.random_element_plancherel() in Part
....: for Part in map(Partitions, range(10)))
True
Check that :trac:`18752` is fixed::
sage: P = Partitions(5)
sage: la = P.random_element_plancherel()
sage: la.parent() is P
True
ALGORITHM:
- insert by Robinson-Schensted a uniform random permutations of n and
returns the shape of the resulting tableau. The complexity is
`O(n\ln(n))` which is likely optimal. However, the implementation
could be optimized.
AUTHOR:
- Florent Hivert (2009-11-23)
"""
T = permutation.Permutations(self.n).random_element().left_tableau()
return self.element_class(self, [len(row) for row in T])
def first(self):
"""
Return the lexicographically first partition of a positive integer
`n`. This is the partition ``[n]``.
EXAMPLES::
sage: Partitions(4).first()
[4]
"""
return self.element_class(self, [self.n])
def next(self, p):
"""
Return the lexicographically next partition after the partition ``p``.
EXAMPLES::
sage: Partitions(4).next([4])
[3, 1]
sage: Partitions(4).next([1,1,1,1]) is None
True
"""
found = False
for i in self:
if found:
return i
if i == p:
found = True
return None
def last(self):
"""
Return the lexicographically last partition of the positive
integer `n`. This is the all-ones partition.
EXAMPLES::
sage: Partitions(4).last()
[1, 1, 1, 1]
"""
return self.element_class(self, [1]*self.n)
def __iter__(self):
"""
An iterator for the partitions of `n`.
EXAMPLES::
sage: [x for x in Partitions(4)]
[[4], [3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
TESTS::
sage: all(isinstance(i, Integer) for p in Partitions(4) for i in p)
True
"""
for p in ZS1_iterator(self.n):
yield self.element_class(self, [Integer(i) for i in p])
def subset(self, **kwargs):
r"""
Return a subset of ``self`` with the additional optional arguments.
EXAMPLES::
sage: P = Partitions(5); P
Partitions of the integer 5
sage: P.subset(starting=[3,1])
Partitions of the integer 5 starting with [3, 1]
"""
return Partitions(self.n, **kwargs)
class Partitions_nk(Partitions):
"""
Partitions of the integer `n` of length equal to `k`.
TESTS::
sage: TestSuite( sage.combinat.partition.Partitions_nk(0,0) ).run()
sage: TestSuite( sage.combinat.partition.Partitions_nk(0,0) ).run()
"""
def __init__(self, n, k):
"""
Initialize ``self``.
TESTS::
sage: TestSuite( Partitions(5, length=2) ).run()
"""
Partitions.__init__(self)
self.n = n
self.k = k
def __contains__(self, x):
"""
Check if ``x`` is contained in ``self``.
TESTS::
sage: p = Partitions(5, length=2)
sage: [2,1] in p
False
sage: [2,2,1] in p
False
sage: [3,2] in p
True
sage: [2,3] in p
False
sage: [4,1] in p
True
sage: [1,1,1,1,1] in p
False
sage: [5] in p
False
"""
return x in _Partitions and sum(x) == self.n and len(x) == self.k
def _repr_(self):
"""
Return a string representation of ``self``.
TESTS::
sage: Partitions(5, length=2) # indirect doctest
Partitions of the integer 5 of length 2
"""
return "Partitions of the integer {} of length {}".format(self.n, self.k)
def _an_element_(self):
"""
Return a partition in ``self``.
EXAMPLES::
sage: Partitions(4, length=1).an_element() # indirect doctest
[4]
sage: Partitions(4, length=2).an_element()
[3, 1]
sage: Partitions(4, length=3).an_element()
[2, 1, 1]
sage: Partitions(4, length=4).an_element()
[1, 1, 1, 1]
sage: Partitions(1, length=1).an_element()
[1]
sage: Partitions(0, length=0).an_element()
[]
"""
if self.n == 0:
if self.k == 0:
lst = []
else:
from sage.categories.sets_cat import EmptySetError
raise EmptySetError
elif self.n >= self.k > 0:
lst = [self.n - self.k + 1] + [1] * (self.k-1)
else:
from sage.categories.sets_cat import EmptySetError
raise EmptySetError
return self.element_class(self, lst)
def __iter__(self):
"""
An iterator for all partitions of `n` of length `k`.
EXAMPLES::
sage: p = Partitions(9, length=3)
sage: it = p.__iter__()
sage: list(it)
[[7, 1, 1], [6, 2, 1], [5, 3, 1], [5, 2, 2], [4, 4, 1], [4, 3, 2], [3, 3, 3]]
sage: p = Partitions(9, length=10)
sage: list(p.__iter__())
[]
sage: p = Partitions(0, length=0)
sage: list(p.__iter__())
[[]]
sage: from sage.combinat.partition import number_of_partitions_length
sage: all( len(Partitions(n, length=k).list())
....: == number_of_partitions_length(n, k)
....: for n in range(9) for k in range(n+2) )
True
TESTS::
sage: partitions = Partitions(9, length=3)
sage: all(isinstance(i, Integer) for p in partitions for i in p)
True
"""
for p in ZS1_iterator_nk(self.n - self.k, self.k):
v = [Integer(i + 1) for i in p]
adds = [Integer(1)] * (self.k - len(v))
yield self.element_class(self, v + adds)
def cardinality(self, algorithm='hybrid'):
r"""
Return the number of partitions of the specified size with the
specified length.
INPUT:
- ``algorithm`` -- (default: ``'hybrid'``) the algorithm to compute
the cardinality and can be one of the following:
* ``'hybrid'`` - use a hybrid algorithm which uses heuristics to
reduce the complexity
* ``'gap'`` - use GAP
EXAMPLES::
sage: v = Partitions(5, length=2).list(); v
[[4, 1], [3, 2]]
sage: len(v)
2
sage: Partitions(5, length=2).cardinality()
2
More generally, the number of partitions of `n` of length `2`
is `\left\lfloor \frac{n}{2} \right\rfloor`::
sage: all( Partitions(n, length=2).cardinality()
....: == n // 2 for n in range(10) )
True
The number of partitions of `n` of length `1` is `1` for `n`
positive::
sage: all( Partitions(n, length=1).cardinality() == 1
....: for n in range(1, 10) )
True
Further examples::
sage: Partitions(5, length=3).cardinality()
2
sage: Partitions(6, length=3).cardinality()
3
sage: Partitions(8, length=4).cardinality()
5
sage: Partitions(8, length=5).cardinality()
3
sage: Partitions(15, length=6).cardinality()
26
sage: Partitions(0, length=0).cardinality()
1
sage: Partitions(0, length=1).cardinality()
0
sage: Partitions(1, length=0).cardinality()
0
sage: Partitions(1, length=4).cardinality()
0
TESTS:
We check the hybrid approach gives the same results as GAP::
sage: N = [0, 1, 2, 3, 5, 10, 20, 500, 850]
sage: K = [0, 1, 2, 3, 5, 10, 11, 20, 21, 250, 499, 500]
sage: all(Partitions(n,length=k).cardinality() == Partitions(n,length=k).cardinality('gap')
....: for n in N for k in K)
True
sage: P = Partitions(4562, length=2800)
sage: P.cardinality() == P.cardinality('gap')
True
"""
return number_of_partitions_length(self.n, self.k, algorithm)
def subset(self, **kwargs):
r"""
Return a subset of ``self`` with the additional optional arguments.
EXAMPLES::
sage: P = Partitions(5, length=2); P
Partitions of the integer 5 of length 2
sage: P.subset(max_part=3)
Partitions of the integer 5 satisfying constraints length=2, max_part=3
"""
return Partitions(self.n, length=self.k, **kwargs)
class Partitions_parts_in(Partitions):
"""
Partitions of `n` with parts in a given set `S`.
This is invoked indirectly when calling
``Partitions(n, parts_in=parts)``, where ``parts`` is a list of
pairwise distinct integers.
TESTS::
sage: TestSuite( sage.combinat.partition.Partitions_parts_in(6, parts=[2,1]) ).run()
"""
@staticmethod
def __classcall_private__(cls, n, parts):
"""
Normalize the input to ensure a unique representation.
TESTS::
sage: P = Partitions(4, parts_in=[2,1])
sage: P2 = Partitions(4, parts_in=(1,2))
sage: P is P2
True
"""
parts = tuple(sorted(parts))
return super(Partitions_parts_in, cls).__classcall__(cls, Integer(n), parts)
def __init__(self, n, parts):
"""
Initialize ``self``.
TESTS::
sage: TestSuite(Partitions(5, parts_in=[1,2,3])).run()
"""
Partitions.__init__(self)
self.n = n
self.parts = list(parts)
def __contains__(self, x):
"""
TESTS::
sage: p = Partitions(5, parts_in=[1,2])
sage: [2,1,1,1] in p
True
sage: [4,1] in p
False
"""
return (x in _Partitions and sum(x) == self.n and
all(p in self.parts for p in x))
def _repr_(self):
"""
TESTS::
sage: Partitions(5, parts_in=[1,2,3]) # indirect doctest
Partitions of the integer 5 with parts in [1, 2, 3]
"""
return "Partitions of the integer %s with parts in %s" % (self.n, self.parts)
def cardinality(self):
r"""
Return the number of partitions with parts in ``self``. Wraps GAP's
``NrRestrictedPartitions``.
EXAMPLES::
sage: Partitions(15, parts_in=[2,3,7]).cardinality()
5
If you can use all parts 1 through `n`, we'd better get `p(n)`::
sage: Partitions(20, parts_in=[1..20]).cardinality() == Partitions(20).cardinality()
True
TESTS:
Let's check the consistency of GAP's function and our own
algorithm that actually generates the partitions::
sage: ps = Partitions(15, parts_in=[1,2,3])
sage: ps.cardinality() == len(ps.list())
True
sage: ps = Partitions(15, parts_in=[])
sage: ps.cardinality() == len(ps.list())
True
sage: ps = Partitions(3000, parts_in=[50,100,500,1000])
sage: ps.cardinality() == len(ps.list())
True
sage: ps = Partitions(10, parts_in=[3,6,9])
sage: ps.cardinality() == len(ps.list())
True
sage: ps = Partitions(0, parts_in=[1,2])
sage: ps.cardinality() == len(ps.list())
True
"""
# GAP complains if you give it an empty list
if self.parts:
from sage.libs.gap.libgap import libgap
return ZZ(libgap.NrRestrictedPartitions(ZZ(self.n), self.parts))
return Integer(self.n == 0)
def first(self):
"""
Return the lexicographically first partition of a positive
integer `n` with the specified parts, or ``None`` if no such
partition exists.
EXAMPLES::
sage: Partitions(9, parts_in=[3,4]).first()
[3, 3, 3]
sage: Partitions(6, parts_in=[1..6]).first()
[6]
sage: Partitions(30, parts_in=[4,7,8,10,11]).first()
[11, 11, 8]
"""
try:
return self.element_class(self, self._findfirst(self.n, self.parts[:]))
except TypeError:
return None
def _findfirst(self, n, parts):
"""
TESTS::
sage: p = Partitions(9, parts_in=[3,4])
sage: p._findfirst(p.n, p.parts[:])
[3, 3, 3]
sage: p._findfirst(0, p.parts[:])
[]
sage: p._findfirst(p.n, [10])
"""
if n == 0:
return []
else:
while parts:
p = parts.pop()
for k in range(n.quo_rem(p)[0], 0, -1):
try:
return k * [p] + self._findfirst(n - k * p, parts[:])
except TypeError:
pass
def last(self):
"""
Return the lexicographically last partition of the positive
integer `n` with the specified parts, or ``None`` if no such
partition exists.
EXAMPLES::
sage: Partitions(15, parts_in=[2,3]).last()
[3, 2, 2, 2, 2, 2, 2]
sage: Partitions(30, parts_in=[4,7,8,10,11]).last()
[7, 7, 4, 4, 4, 4]
sage: Partitions(10, parts_in=[3,6]).last() is None
True
sage: Partitions(50, parts_in=[11,12,13]).last()
[13, 13, 12, 12]
sage: Partitions(30, parts_in=[4,7,8,10,11]).last()
[7, 7, 4, 4, 4, 4]
TESTS::
sage: Partitions(6, parts_in=[1..6]).last()
[1, 1, 1, 1, 1, 1]
sage: Partitions(0, parts_in=[]).last()
[]
sage: Partitions(50, parts_in=[11,12]).last() is None
True
"""
try:
return self.element_class(self, self._findlast(self.n, self.parts))
except TypeError:
return None
def _findlast(self, n, parts):
"""
Return the lexicographically largest partition of `n` using the
given parts, or ``None`` if no such partition exists. This function
is not intended to be called directly.
INPUT:
- ``n`` -- nonnegative integer
- ``parts`` -- a sorted list of positive integers.
OUTPUT:
A list of integers in weakly decreasing order, or ``None``. The
output is just a list, not a partition object.
EXAMPLES::
sage: ps = Partitions(1, parts_in=[1])
sage: ps._findlast(15, [2,3])
[3, 2, 2, 2, 2, 2, 2]
sage: ps._findlast(9, [2,4]) is None
True
sage: ps._findlast(0, [])
[]
sage: ps._findlast(100, [9,17,31])
[31, 17, 17, 17, 9, 9]
"""
if n < 0:
return None
elif n == 0:
return []
elif parts:
p = parts[0]
q, r = n.quo_rem(p)
if r == 0:
return [p] * q
# If the smallest part doesn't divide n, try using the next
# largest part
else:
for i, p in enumerate(parts[1:]):
rest = self._findlast(n - p, parts[:i + 2])
if rest is not None:
return [p] + rest
# If we get to here, nothing ever worked, so there's no such
# partitions, and we return None.
return None
def __iter__(self):
"""
An iterator through the partitions of `n` with all parts belonging
to a particular set.
EXAMPLES::
sage: [x for x in Partitions(5, parts_in=[1,2,3])]
[[3, 2], [3, 1, 1], [2, 2, 1], [2, 1, 1, 1], [1, 1, 1, 1, 1]]
"""
for p in self._other_iterator(self.n, self.parts):
yield self.element_class(self, p)
def _fast_iterator(self, n, parts):
"""
A fast iterator for the partitions of ``n`` which returns lists and
not partition types. This function is not intended to be called
directly.
INPUT:
- ``n`` -- nonnegative integer.
- ``parts`` -- a list of parts to use. This list will be
destroyed, so pass things here with ``foo[:]`` (or something
equivalent) if you want to preserve your list. In particular,
the ``__iter__`` method needs to use ``self.parts[:]``, or else we
forget which parts we're using!
OUTPUT:
A generator object for partitions of `n` with parts in
``parts``.
If the parts in ``parts`` are sorted in increasing order, this
function returns weakly decreasing lists. If ``parts`` is not
sorted, your lists won't be, either.
EXAMPLES::
sage: P = Partitions(4, parts_in=[2,4])
sage: it = P._fast_iterator(4, [2,4])
sage: next(it)
[4]
sage: type(_)
<class 'list'>
"""
if n == 0:
yield []
else:
while parts:
p = parts.pop()
for k in range(n.quo_rem(p)[0], 0, -1):
for q in self._fast_iterator(n - k * p, parts[:]):
yield k * [p] + q
def _other_iterator(self, n, parts):
"""
A fast iterator for the partitions of ``n`` which returns lists and
not partition types. This function is not intended to be called
directly.
INPUT:
- ``n`` -- nonnegative integer.
- ``parts`` -- a list of parts to use.
OUTPUT:
A generator object for partitions of `n` with parts in
``parts``.
EXAMPLES::
sage: P = Partitions(4, parts_in=[2,4])
sage: it = P._other_iterator(4, [2,4])
sage: next(it)
[4]
sage: type(_)
<class 'list'>
"""
sorted_parts = sorted(parts, reverse=True)
for vec in weighted_iterator_fast(n, sorted_parts):
yield sum(([pi] * multi
for pi, multi in zip(sorted_parts, vec)), [])
class Partitions_starting(Partitions):
"""
All partitions with a given start.
"""
@staticmethod
def __classcall_private__(cls, n, starting_partition):
"""
Normalize the input to ensure a unique representation.
TESTS::
sage: P = Partitions(4, starting=[2,1])
sage: P2 = Partitions(4, starting=[2,1])
sage: P is P2
True
"""
starting_partition = Partition(starting_partition)
return super(Partitions_starting, cls).__classcall__(cls, Integer(n),
starting_partition)
def __init__(self, n, starting_partition):
"""
Initialize ``self``.
EXAMPLES::
sage: Partitions(3, starting=[2,1])
Partitions of the integer 3 starting with [2, 1]
sage: Partitions(3, starting=[2,1]).list()
[[2, 1], [1, 1, 1]]
TESTS::
sage: p = Partitions(3, starting=[2,1])
sage: TestSuite(p).run()
"""
Partitions.__init__(self)
self.n = n
self._starting = starting_partition
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: Partitions(3, starting=[2,1]) # indirect doctest
Partitions of the integer 3 starting with [2, 1]
"""
return "Partitions of the integer %s starting with %s"%(self.n, self._starting)
def __contains__(self, x):
"""
Checks if ``x`` is contained in ``self``.
EXAMPLES::
sage: p = Partitions(3, starting=[2,1])
sage: [1,1] in p
False
sage: [2,1] in p
True
sage: [1,1,1] in p
True
sage: [3] in p
False
"""
return x in Partitions_n(self.n) and x <= self._starting
def first(self):
"""
Return the first partition in ``self``.
EXAMPLES::
sage: Partitions(3, starting=[2,1]).first()
[2, 1]
"""
return self._starting
def next(self, part):
"""
Return the next partition after ``part`` in ``self``.
EXAMPLES::
sage: Partitions(3, starting=[2,1]).next(Partition([2,1]))
[1, 1, 1]
"""
return next(part)
class Partitions_ending(Partitions):
"""
All partitions with a given ending.
"""
@staticmethod
def __classcall_private__(cls, n, ending_partition):
"""
Normalize the input to ensure a unique representation.
TESTS::
sage: P = Partitions(4)
sage: P2 = Partitions(4)
sage: P is P2
True
"""
ending_partition = Partition(ending_partition)
return super(Partitions_ending, cls).__classcall__(cls, Integer(n),
ending_partition)
def __init__(self, n, ending_partition):
"""
Initializes ``self``.
EXAMPLES::
sage: Partitions(4, ending=[1,1,1,1]).list()
[[4], [3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
sage: Partitions(4, ending=[2,2]).list()
[[4], [3, 1], [2, 2]]
sage: Partitions(4, ending=[4]).list()
[[4]]
TESTS::
sage: p = Partitions(4, ending=[1,1,1,1])
sage: TestSuite(p).run()
"""
Partitions.__init__(self)
self.n = n
self._ending = ending_partition
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: Partitions(4, ending=[1,1,1,1]) # indirect doctest
Partitions of the integer 4 ending with [1, 1, 1, 1]
"""
return "Partitions of the integer %s ending with %s"%(self.n, self._ending)
def __contains__(self, x):
"""
Checks if ``x`` is contained in ``self``.
EXAMPLES::
sage: p = Partitions(4, ending=[2,2])
sage: [4] in p
True
sage: [2,1,1] in p
False
sage: [2,1] in p
False
"""
return x in Partitions_n(self.n) and x >= self._ending
def first(self):
"""
Return the first partition in ``self``.
EXAMPLES::
sage: Partitions(4, ending=[1,1,1,1]).first()
[4]
"""
return self.element_class(self, [self.n])
def next(self, part):
"""
Return the next partition after ``part`` in ``self``.
EXAMPLES::
sage: Partitions(4, ending=[1,1,1,1]).next(Partition([4]))
[3, 1]
sage: Partitions(4, ending=[1,1,1,1]).next(Partition([1,1,1,1])) is None
True
"""
if part == self._ending:
return None
else:
return next(part)
class PartitionsInBox(Partitions):
r"""
All partitions which fit in an `h \times w` box.
EXAMPLES::
sage: PartitionsInBox(2,2)
Integer partitions which fit in a 2 x 2 box
sage: PartitionsInBox(2,2).list()
[[], [1], [1, 1], [2], [2, 1], [2, 2]]
"""
def __init__(self, h, w):
"""
Initialize ``self``.
TESTS::
sage: p = PartitionsInBox(2,2)
sage: TestSuite(p).run()
"""
Partitions.__init__(self)
self.h = h
self.w = w
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: PartitionsInBox(2,2) # indirect doctest
Integer partitions which fit in a 2 x 2 box
"""
return "Integer partitions which fit in a %s x %s box" % (self.h, self.w)
def __contains__(self, x):
"""
Checks if ``x`` is contained in ``self``.
EXAMPLES::
sage: [] in PartitionsInBox(2,2)
True
sage: [2,1] in PartitionsInBox(2,2)
True
sage: [3,1] in PartitionsInBox(2,2)
False
sage: [2,1,1] in PartitionsInBox(2,2)
False
sage: [3,1] in PartitionsInBox(3, 2)
False
sage: [3,1] in PartitionsInBox(2, 3)
True
"""
return x in _Partitions and len(x) <= self.h \
and (len(x) == 0 or x[0] <= self.w)
def list(self):
"""
Return a list of all the partitions inside a box of height `h` and
width `w`.
EXAMPLES::
sage: PartitionsInBox(2,2).list()
[[], [1], [1, 1], [2], [2, 1], [2, 2]]
sage: PartitionsInBox(2,3).list()
[[], [1], [1, 1], [2], [2, 1], [2, 2], [3], [3, 1], [3, 2], [3, 3]]
TESTS:
Check :trac:`10890`::
sage: type(PartitionsInBox(0,0)[0])
<class 'sage.combinat.partition.PartitionsInBox_with_category.element_class'>
"""
h = self.h
w = self.w
if h == 0:
return [self.element_class(self, [])]
else:
l = [[i] for i in range(w + 1)]
add = lambda x: [x + [i] for i in range(x[-1] + 1)]
for i in range(h-1):
new_list = []
for element in l:
new_list += add(element)
l = new_list
return [self.element_class(self, [x for x in p if x!=0]) for p in l]
def cardinality(self):
"""
Return the cardinality of ``self``.
EXAMPLES::
sage: PartitionsInBox(2, 3).cardinality()
10
TESTS:
Check the corner case::
sage: PartitionsInBox(0, 0).cardinality()
1
sage: PartitionsInBox(0, 1).cardinality()
1
sage: all(PartitionsInBox(a, b).cardinality() ==
....: len(PartitionsInBox(a, b).list())
....: for a in range(6) for b in range(6))
True
"""
return binomial(self.h + self.w, self.w)
class Partitions_constraints(IntegerListsLex):
"""
For unpickling old constrained ``Partitions_constraints`` objects created
with sage <= 3.4.1. See :class:`Partitions`.
"""
def __setstate__(self, data):
r"""
TESTS::
sage: dmp = b'x\x9ck`J.NLO\xd5K\xce\xcfM\xca\xccK,\xd1+H,*\xc9,\xc9\xcc\xcf\xe3\n\x80\xb1\x8a\xe3\x93\x81DIQbf^I1W!\xa3fc!Sm!\xb3F(7\x92x!Km!k(GnbE<\xc8\x88B6\x88\xb9E\x99y\xe9\xc5z@\x05\xa9\xe9\xa9E\\\xb9\x89\xd9\xa9\xf10N!{(\xa3QkP!Gq(c^\x06\x90c\x0c\xe4p\x96&\xe9\x01\x00\xc2\xe53\xfd'
sage: sp = loads(dmp); sp
Integer lists of sum 3 satisfying certain constraints
sage: sp.list()
[[2, 1], [1, 1, 1]]
"""
n = data['n']
self.__class__ = Partitions_with_constraints
constraints = {'max_slope': 0,
'min_part': 1}
constraints.update(data['constraints'])
self.__init__(n, **constraints)
class Partitions_with_constraints(IntegerListsLex):
"""
Partitions which satisfy a set of constraints.
EXAMPLES::
sage: P = Partitions(6, inner=[1,1], max_slope=-1)
sage: list(P)
[[5, 1], [4, 2], [3, 2, 1]]
TESTS::
sage: P = Partitions(6, min_part=2, max_slope=-1)
sage: TestSuite(P).run()
Test that :trac:`15525` is fixed::
sage: loads(dumps(P)) == P
True
"""
# def __init__(self, n, **kwargs):
# """
# Initialize ``self``.
# """
# IntegerListsLex.__init__(self, n, **kwargs)
Element = Partition
options = Partitions.options
######################
# Regular Partitions #
######################
class RegularPartitions(Partitions):
r"""
Base class for `\ell`-regular partitions.
Let `\ell` be a positive integer. A partition `\lambda` is
`\ell`-*regular* if `m_i < \ell` for all `i`, where `m_i` is the
multiplicity of `i` in `\lambda`.
.. NOTE::
This is conjugate to the notion of `\ell`-*restricted* partitions,
where the difference between any two consecutive
parts is `< \ell`.
INPUT:
- ``ell`` -- the positive integer `\ell`
- ``is_infinite`` -- boolean; if the subset of `\ell`-regular
partitions is infinite
"""
def __init__(self, ell, is_infinite=False):
"""
Initialize ``self``.
EXAMPLES::
sage: P = Partitions(regular=2)
sage: TestSuite(P).run()
"""
self._ell = ell
Partitions.__init__(self, is_infinite)
def ell(self):
r"""
Return the value `\ell`.
EXAMPLES::
sage: P = Partitions(regular=2)
sage: P.ell()
2
"""
return self._ell
def __contains__(self, x):
"""
TESTS::
sage: P = Partitions(regular=3)
sage: [5] in P
True
sage: [] in P
True
sage: [3, 3, 2, 2] in P
True
sage: [3, 3, 3, 1] in P
False
sage: [4, 0, 0, 0, 0, 0] in P
True
sage: Partition([4,2,2,1]) in P
True
sage: Partition([4,2,2,2]) in P
False
sage: Partition([10,1]) in P
True
"""
if not Partitions.__contains__(self, x):
return False
if isinstance(x, Partition):
return max(x.to_exp() + [0]) < self._ell
return all(x.count(i) < self._ell for i in set(x) if i > 0)
def _fast_iterator(self, n, max_part):
"""
A fast (recursive) iterator which returns a list.
EXAMPLES::
sage: P = Partitions(regular=3)
sage: list(P._fast_iterator(5, 5))
[[5], [4, 1], [3, 2], [3, 1, 1], [2, 2, 1]]
sage: list(P._fast_iterator(5, 3))
[[3, 2], [3, 1, 1], [2, 2, 1]]
sage: list(P._fast_iterator(5, 6))
[[5], [4, 1], [3, 2], [3, 1, 1], [2, 2, 1]]
"""
if n == 0:
yield []
return
if n < max_part:
max_part = n
bdry = self._ell - 1
for i in reversed(range(1, max_part+1)):
for p in self._fast_iterator(n-i, i):
if p.count(i) < bdry:
yield [i] + p
class RegularPartitions_all(RegularPartitions):
r"""
The class of all `\ell`-regular partitions.
INPUT:
- ``ell`` -- the positive integer `\ell`
.. SEEALSO::
:class:`~sage.combinat.partition.RegularPartitions`
"""
def __init__(self, ell):
"""
Initialize ``self``.
EXAMPLES::
sage: P = Partitions(regular=4)
sage: TestSuite(P).run()
1-regular partitions::
sage: P = Partitions(regular=1)
sage: P in FiniteEnumeratedSets()
True
sage: TestSuite(P).run()
"""
RegularPartitions.__init__(self, ell, bool(ell > 1))
def _repr_(self):
"""
TESTS::
sage: from sage.combinat.partition import RegularPartitions_all
sage: RegularPartitions_all(3)
3-Regular Partitions
"""
return "{}-Regular Partitions".format(self._ell)
def __iter__(self):
"""
Iterate over ``self``.
EXAMPLES::
sage: P = Partitions(regular=3)
sage: it = P.__iter__()
sage: [next(it) for x in range(10)]
[[], [1], [2], [1, 1], [3], [2, 1], [4], [3, 1], [2, 2], [2, 1, 1]]
Check that 1-regular partitions works (:trac:`20584`)::
sage: P = Partitions(regular=1)
sage: list(P)
[[]]
"""
if self._ell == 1:
yield self.element_class(self, [])
return
n = 0
while True:
for p in self._fast_iterator(n, n):
yield self.element_class(self, p)
n += 1
class RegularPartitions_truncated(RegularPartitions):
r"""
The class of `\ell`-regular partitions with max length `k`.
INPUT:
- ``ell`` -- the integer `\ell`
- ``max_len`` -- integer; the maximum length
.. SEEALSO::
:class:`~sage.combinat.partition.RegularPartitions`
"""
def __init__(self, ell, max_len):
"""
Initialize ``self``.
EXAMPLES::
sage: P = Partitions(regular=4, max_length=3)
sage: TestSuite(P).run()
"""
self._max_len = max_len
RegularPartitions.__init__(self, ell, bool(ell > 1))
def max_length(self):
"""
Return the maximum length of the partitions of ``self``.
EXAMPLES::
sage: P = Partitions(regular=4, max_length=3)
sage: P.max_length()
3
"""
return self._max_len
def __contains__(self, x):
"""
TESTS::
sage: P = Partitions(regular=4, max_length=3)
sage: [3, 3, 3] in P
True
sage: [] in P
True
sage: [4, 2, 1, 1] in P
False
"""
return len(x) <= self._max_len and RegularPartitions.__contains__(self, x)
def _repr_(self):
"""
TESTS::
sage: from sage.combinat.partition import RegularPartitions_truncated
sage: RegularPartitions_truncated(4, 3)
4-Regular Partitions with max length 3
"""
return "{}-Regular Partitions with max length {}".format(self._ell, self._max_len)
def __iter__(self):
"""
Iterate over ``self``.
EXAMPLES::
sage: P = Partitions(regular=3, max_length=2)
sage: it = P.__iter__()
sage: [next(it) for x in range(10)]
[[], [1], [2], [1, 1], [3], [2, 1], [4], [3, 1], [2, 2], [5]]
Check that 1-regular partitions works (:trac:`20584`)::
sage: P = Partitions(regular=1, max_length=2)
sage: list(P)
[[]]
"""
if self._ell == 1:
yield self.element_class(self, [])
return
n = 0
while True:
for p in self._fast_iterator(n, n):
yield self.element_class(self, p)
n += 1
def _fast_iterator(self, n, max_part, depth=0):
"""
A fast (recursive) iterator which returns a list.
EXAMPLES::
sage: P = Partitions(regular=2, max_length=2)
sage: list(P._fast_iterator(5, 5))
[[5], [4, 1], [3, 2]]
sage: list(P._fast_iterator(5, 3))
[[3, 2]]
sage: list(P._fast_iterator(5, 6))
[[5], [4, 1], [3, 2]]
"""
if n == 0 or depth >= self._max_len:
yield []
return
# Special case
if depth + 1 == self._max_len:
if max_part >= n:
yield [n]
return
if n < max_part:
max_part = n
bdry = self._ell - 1
for i in reversed(range(1, max_part+1)):
for p in self._fast_iterator(n-i, i, depth+1):
if p.count(i) < bdry:
yield [i] + p
class RegularPartitions_bounded(RegularPartitions):
r"""
The class of `\ell`-regular `k`-bounded partitions.
INPUT:
- ``ell`` -- the integer `\ell`
- ``k`` -- integer; the value `k`
.. SEEALSO::
:class:`~sage.combinat.partition.RegularPartitions`
"""
def __init__(self, ell, k):
"""
Initialize ``self``.
EXAMPLES::
sage: P = Partitions(regular=4, max_part=3)
sage: TestSuite(P).run()
1-regular partitions::
sage: P = Partitions(regular=1, max_part=3)
sage: P in FiniteEnumeratedSets()
True
sage: TestSuite(P).run()
"""
self.k = k
RegularPartitions.__init__(self, ell, False)
def __contains__(self, x):
"""
TESTS::
sage: P = Partitions(regular=4, max_part=3)
sage: [3, 3, 3] in P
True
sage: [] in P
True
sage: [4, 2, 1] in P
False
"""
return len(x) == 0 or (x[0] <= self.k and RegularPartitions.__contains__(self, x))
def _repr_(self):
"""
TESTS::
sage: from sage.combinat.partition import RegularPartitions_bounded
sage: RegularPartitions_bounded(4, 3)
4-Regular 3-Bounded Partitions
"""
return "{}-Regular {}-Bounded Partitions".format(self._ell, self.k)
def __iter__(self):
"""
Iterate over ``self``.
EXAMPLES::
sage: P = Partitions(regular=2, max_part=3)
sage: list(P)
[[3, 2, 1], [3, 2], [3, 1], [3], [2, 1], [2], [1], []]
Check that 1-regular partitions works (:trac:`20584`)::
sage: P = Partitions(regular=1, max_part=3)
sage: list(P)
[[]]
"""
k = self.k
for n in reversed(range(k*(k+1)/2 * self._ell)):
for p in self._fast_iterator(n, k):
yield self.element_class(self, p)
class RegularPartitions_n(RegularPartitions, Partitions_n):
r"""
The class of `\ell`-regular partitions of `n`.
INPUT:
- ``n`` -- the integer `n` to partition
- ``ell`` -- the integer `\ell`
.. SEEALSO::
:class:`~sage.combinat.partition.RegularPartitions`
"""
def __init__(self, n, ell):
"""
Initialize ``self``.
EXAMPLES::
sage: P = Partitions(5, regular=3)
sage: TestSuite(P).run()
1-regular partitions::
sage: P = Partitions(5, regular=1)
sage: TestSuite(P).run()
"""
RegularPartitions.__init__(self, ell)
Partitions_n.__init__(self, n)
def _repr_(self):
"""
TESTS::
sage: from sage.combinat.partition import RegularPartitions_n
sage: RegularPartitions_n(3, 5)
5-Regular Partitions of the integer 3
"""
return "{}-Regular Partitions of the integer {}".format(self._ell, self.n)
def __contains__(self, x):
"""
TESTS::
sage: P = Partitions(5, regular=3)
sage: [3, 1, 1] in P
True
sage: [3, 2, 1] in P
False
"""
return RegularPartitions.__contains__(self, x) and sum(x) == self.n
def __iter__(self):
"""
Iterate over ``self``.
EXAMPLES::
sage: P = Partitions(5, regular=3)
sage: list(P)
[[5], [4, 1], [3, 2], [3, 1, 1], [2, 2, 1]]
"""
for p in self._fast_iterator(self.n, self.n):
yield self.element_class(self, p)
def cardinality(self):
"""
Return the cardinality of ``self``.
EXAMPLES::
sage: P = Partitions(5, regular=3)
sage: P.cardinality()
5
sage: P = Partitions(5, regular=6)
sage: P.cardinality()
7
sage: P.cardinality() == Partitions(5).cardinality()
True
TESTS:
Check the corner case::
sage: P = Partitions(0, regular=3)
sage: P.cardinality()
1
Check for 1-regular partitions::
sage: P = Partitions(0, regular=1)
sage: P.cardinality()
1
sage: P = Partitions(5, regular=1)
sage: P.cardinality()
0
"""
if self._ell > self.n:
return Partitions_n.cardinality(self)
return ZZ.sum(1 for x in self)
def _an_element_(self):
"""
Return a partition in ``self``.
EXAMPLES::
sage: P = Partitions(5, regular=2)
sage: P._an_element_()
[4, 1]
sage: P = Partitions(0, regular=1)
sage: P._an_element_()
[]
sage: P = Partitions(5, regular=1)
sage: P._an_element_()
Traceback (most recent call last):
...
EmptySetError
"""
if self._ell == 1 and self.n > 0:
from sage.categories.sets_cat import EmptySetError
raise EmptySetError
return Partitions_n._an_element_(self)
######################
# Ordered Partitions #
######################
class OrderedPartitions(Partitions):
"""
The class of ordered partitions of `n`. If `k` is specified, then this
contains only the ordered partitions of length `k`.
An *ordered partition* of a nonnegative integer `n` means a list of
positive integers whose sum is `n`. This is the same as a composition
of `n`.
.. NOTE::
It is recommended that you use :meth:`Compositions` instead as
:meth:`OrderedPartitions` wraps GAP.
EXAMPLES::
sage: OrderedPartitions(3)
Ordered partitions of 3
sage: OrderedPartitions(3).list()
[[3], [2, 1], [1, 2], [1, 1, 1]]
sage: OrderedPartitions(3,2)
Ordered partitions of 3 of length 2
sage: OrderedPartitions(3,2).list()
[[2, 1], [1, 2]]
sage: OrderedPartitions(10,k=2).list()
[[9, 1], [8, 2], [7, 3], [6, 4], [5, 5], [4, 6], [3, 7], [2, 8], [1, 9]]
sage: OrderedPartitions(4).list()
[[4], [3, 1], [2, 2], [2, 1, 1], [1, 3], [1, 2, 1], [1, 1, 2], [1, 1, 1, 1]]
"""
@staticmethod
def __classcall_private__(cls, n, k=None):
"""
Normalize the input to ensure a unique representation.
TESTS::
sage: P = OrderedPartitions(3,2)
sage: P2 = OrderedPartitions(3,2)
sage: P is P2
True
"""
if k is not None:
k = Integer(k)
return super(OrderedPartitions, cls).__classcall__(cls, Integer(n), k)
def __init__(self, n, k):
"""
Initialize ``self``.
EXAMPLES::
sage: o = OrderedPartitions(4,2)
TESTS::
sage: TestSuite( OrderedPartitions(5,3) ).run()
"""
Partitions.__init__(self)
self.n = n
self.k = k
def __contains__(self, x):
"""
Check to see if ``x`` is an element of ``self``.
EXAMPLES::
sage: o = OrderedPartitions(4,2)
sage: [2,1] in o
False
sage: [2,2] in o
True
sage: [1,2,1] in o
False
"""
C = composition.Compositions(self.n, length=self.k)
return C(x) in composition.Compositions(self.n, length=self.k)
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: OrderedPartitions(3) # indirect doctest
Ordered partitions of 3
sage: OrderedPartitions(3,2) # indirect doctest
Ordered partitions of 3 of length 2
"""
string = "Ordered partitions of %s" % self.n
if self.k is not None:
string += " of length %s" % self.k
return string
def list(self):
"""
Return a list of partitions in ``self``.
EXAMPLES::
sage: OrderedPartitions(3).list()
[[3], [2, 1], [1, 2], [1, 1, 1]]
sage: OrderedPartitions(3,2).list()
[[2, 1], [1, 2]]
"""
from sage.libs.gap.libgap import libgap
n = self.n
k = self.k
if k is None:
ans = libgap.OrderedPartitions(ZZ(n))
else:
ans = libgap.OrderedPartitions(ZZ(n), ZZ(k))
result = ans.sage()
result.reverse()
return result
def cardinality(self):
"""
Return the cardinality of ``self``.
EXAMPLES::
sage: OrderedPartitions(3).cardinality()
4
sage: OrderedPartitions(3,2).cardinality()
2
sage: OrderedPartitions(10,2).cardinality()
9
sage: OrderedPartitions(15).cardinality()
16384
"""
from sage.libs.gap.libgap import libgap
n = self.n
k = self.k
if k is None:
ans = libgap.NrOrderedPartitions(n)
else:
ans = libgap.NrOrderedPartitions(n, k)
return ZZ(ans)
##########################
# Partitions Greatest LE #
##########################
class PartitionsGreatestLE(UniqueRepresentation, IntegerListsLex):
"""
The class of all (unordered) "restricted" partitions of the integer `n`
having parts less than or equal to the integer `k`.
EXAMPLES::
sage: PartitionsGreatestLE(10, 2)
Partitions of 10 having parts less than or equal to 2
sage: PartitionsGreatestLE(10, 2).list()
[[2, 2, 2, 2, 2],
[2, 2, 2, 2, 1, 1],
[2, 2, 2, 1, 1, 1, 1],
[2, 2, 1, 1, 1, 1, 1, 1],
[2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
sage: [4,3,2,1] in PartitionsGreatestLE(10, 2)
False
sage: [2,2,2,2,2] in PartitionsGreatestLE(10, 2)
True
sage: PartitionsGreatestLE(10, 2).first().parent()
Partitions...
"""
def __init__(self, n, k):
"""
Initialize ``self``.
TESTS::
sage: p = PartitionsGreatestLE(10, 2)
sage: p.n, p.k
(10, 2)
sage: TestSuite(p).run()
"""
IntegerListsLex.__init__(self, n, max_slope=0, min_part=1, max_part=k)
self.n = n
self.k = k
def _repr_(self):
"""
Return a string representation of ``self``.
TESTS::
sage: PartitionsGreatestLE(10, 2) # indirect doctest
Partitions of 10 having parts less than or equal to 2
"""
return "Partitions of %s having parts less than or equal to %s" % (self.n, self.k)
def cardinality(self):
"""
Return the cardinality of ``self``.
EXAMPLES::
sage: PartitionsGreatestLE(9, 5).cardinality()
23
TESTS::
sage: all(PartitionsGreatestLE(n, a).cardinality() ==
....: len(PartitionsGreatestLE(n, a).list())
....: for n in range(20) for a in range(6))
True
"""
return sum(number_of_partitions_length(self.n, i) for i in range(self.k+1))
Element = Partition
options = Partitions.options
##########################
# Partitions Greatest EQ #
##########################
class PartitionsGreatestEQ(UniqueRepresentation, IntegerListsLex):
"""
The class of all (unordered) "restricted" partitions of the integer `n`
having all its greatest parts equal to the integer `k`.
EXAMPLES::
sage: PartitionsGreatestEQ(10, 2)
Partitions of 10 having greatest part equal to 2
sage: PartitionsGreatestEQ(10, 2).list()
[[2, 2, 2, 2, 2],
[2, 2, 2, 2, 1, 1],
[2, 2, 2, 1, 1, 1, 1],
[2, 2, 1, 1, 1, 1, 1, 1],
[2, 1, 1, 1, 1, 1, 1, 1, 1]]
sage: [4,3,2,1] in PartitionsGreatestEQ(10, 2)
False
sage: [2,2,2,2,2] in PartitionsGreatestEQ(10, 2)
True
The empty partition has no maximal part, but it is contained in
the set of partitions with any specified maximal part::
sage: PartitionsGreatestEQ(0, 2).list()
[[]]
TESTS::
sage: [1]*10 in PartitionsGreatestEQ(10, 2)
False
sage: PartitionsGreatestEQ(10, 2).first().parent()
Partitions...
"""
def __init__(self, n, k):
"""
Initialize ``self``.
TESTS::
sage: p = PartitionsGreatestEQ(10, 2)
sage: p.n, p.k
(10, 2)
sage: TestSuite(p).run()
"""
IntegerListsLex.__init__(self, n, max_slope=0, max_part=k, floor=[k])
self.n = n
self.k = k
def _repr_(self):
"""
Return a string representation of ``self``.
TESTS::
sage: PartitionsGreatestEQ(10, 2) # indirect doctest
Partitions of 10 having greatest part equal to 2
"""
return "Partitions of %s having greatest part equal to %s" % (self.n, self.k)
def cardinality(self):
"""
Return the cardinality of ``self``.
EXAMPLES::
sage: PartitionsGreatestEQ(10, 2).cardinality()
5
TESTS::
sage: all(PartitionsGreatestEQ(n, a).cardinality() ==
....: len(PartitionsGreatestEQ(n, a).list())
....: for n in range(20) for a in range(6))
True
"""
if not self.n:
return 1
return number_of_partitions_length(self.n, self.k)
Element = Partition
options = Partitions.options
#########################
# Restricted Partitions #
#########################
class RestrictedPartitions_generic(Partitions):
r"""
Base class for `\ell`-restricted partitions.
Let `\ell` be a positive integer. A partition `\lambda` is
`\ell`-*restricted* if `\lambda_i - \lambda_{i+1} < \ell` for all `i`,
including rows of length 0.
.. NOTE::
This is conjugate to the notion of `\ell`-*regular* partitions,
where the multiplicity of any parts is at most `\ell`.
INPUT:
- ``ell`` -- the positive integer `\ell`
- ``is_infinite`` -- boolean; if the subset of `\ell`-restricted
partitions is infinite
"""
def __init__(self, ell, is_infinite=False):
"""
Initialize ``self``.
EXAMPLES::
sage: P = Partitions(restricted=2)
sage: TestSuite(P).run()
"""
self._ell = ell
Partitions.__init__(self, is_infinite)
def ell(self):
r"""
Return the value `\ell`.
EXAMPLES::
sage: P = Partitions(restricted=2)
sage: P.ell()
2
"""
return self._ell
def __contains__(self, x):
"""
TESTS::
sage: P = Partitions(restricted=3)
sage: [5] in P
False
sage: [2] in P
True
sage: [] in P
True
sage: [3, 3, 3, 3, 2, 2] in P
True
sage: [3, 3, 3, 1] in P
True
sage: [8, 3, 3, 1] in P
False
sage: [2, 0, 0, 0, 0, 0] in P
True
sage: Partition([4,2,2,1]) in P
True
sage: Partition([4,2,2,2]) in P
True
sage: Partition([6,6,6,6,4,3,2]) in P
True
sage: Partition([7,6,6,2]) in P
False
sage: Partition([6,5]) in P
False
sage: Partition([10,1]) in P
False
sage: Partition([3,3] + [1]*10) in P
True
"""
if not Partitions.__contains__(self, x):
return False
if x == []:
return True
return (all(x[i] - x[i+1] < self._ell for i in range(len(x)-1))
and x[-1] < self._ell)
def _fast_iterator(self, n, max_part):
"""
A fast (recursive) iterator which returns a list.
EXAMPLES::
sage: P = Partitions(restricted=3)
sage: list(P._fast_iterator(5, 5))
[[3, 2], [3, 1, 1], [2, 2, 1], [2, 1, 1, 1], [1, 1, 1, 1, 1]]
sage: list(P._fast_iterator(5, 2))
[[2, 2, 1], [2, 1, 1, 1], [1, 1, 1, 1, 1]]
TESTS::
sage: for n in range(10):
....: for ell in range(2, n):
....: Pres = Partitions(n, restricted=ell)
....: Preg = Partitions(n, regular=ell)
....: assert set(Pres) == set(p.conjugate() for p in Preg)
"""
if n == 0:
yield []
return
if n < max_part:
max_part = n
for i in range(max_part, 0, -1):
for p in self._fast_iterator(n-i, i):
if (p and i - p[0] >= self._ell) or (not p and i >= self._ell):
break
yield [i] + p
class RestrictedPartitions_all(RestrictedPartitions_generic):
r"""
The class of all `\ell`-restricted partitions.
INPUT:
- ``ell`` -- the positive integer `\ell`
.. SEEALSO::
:class:`~sage.combinat.partition.RestrictedPartitions_generic`
"""
def __init__(self, ell):
"""
Initialize ``self``.
EXAMPLES::
sage: P = Partitions(restricted=4)
sage: TestSuite(P).run()
"""
RestrictedPartitions_generic.__init__(self, ell, True)
def _repr_(self):
"""
TESTS::
sage: from sage.combinat.partition import RestrictedPartitions_all
sage: RestrictedPartitions_all(3)
3-Restricted Partitions
"""
return "{}-Restricted Partitions".format(self._ell)
def __iter__(self):
"""
Iterate over ``self``.
EXAMPLES::
sage: P = Partitions(restricted=3)
sage: it = P.__iter__()
sage: [next(it) for x in range(10)]
[[], [1], [2], [1, 1], [2, 1], [1, 1, 1],
[3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
"""
n = 0
while True:
for p in self._fast_iterator(n, n):
yield self.element_class(self, p)
n += 1
class RestrictedPartitions_n(RestrictedPartitions_generic, Partitions_n):
r"""
The class of `\ell`-restricted partitions of `n`.
INPUT:
- ``n`` -- the integer `n` to partition
- ``ell`` -- the integer `\ell`
.. SEEALSO::
:class:`~sage.combinat.partition.RestrictedPartitions_generic`
"""
def __init__(self, n, ell):
"""
Initialize ``self``.
EXAMPLES::
sage: P = Partitions(5, restricted=3)
sage: TestSuite(P).run()
"""
RestrictedPartitions_generic.__init__(self, ell)
Partitions_n.__init__(self, n)
def _repr_(self):
"""
TESTS::
sage: from sage.combinat.partition import RestrictedPartitions_n
sage: RestrictedPartitions_n(3, 5)
5-Restricted Partitions of the integer 3
"""
return "{}-Restricted Partitions of the integer {}".format(self._ell, self.n)
def __contains__(self, x):
"""
TESTS::
sage: P = Partitions(5, regular=3)
sage: [3, 1, 1] in P
True
sage: [3, 2, 1] in P
False
"""
return RestrictedPartitions_generic.__contains__(self, x) and sum(x) == self.n
def __iter__(self):
"""
Iterate over ``self``.
EXAMPLES::
sage: P = Partitions(5, restricted=3)
sage: list(P)
[[3, 2], [3, 1, 1], [2, 2, 1], [2, 1, 1, 1], [1, 1, 1, 1, 1]]
"""
for p in self._fast_iterator(self.n, self.n):
yield self.element_class(self, p)
def cardinality(self):
"""
Return the cardinality of ``self``.
EXAMPLES::
sage: P = Partitions(5, restricted=3)
sage: P.cardinality()
5
sage: P = Partitions(5, restricted=6)
sage: P.cardinality()
7
sage: P.cardinality() == Partitions(5).cardinality()
True
"""
if self._ell > self.n:
return Partitions_n.cardinality(self)
return ZZ.sum(ZZ.one() for x in self)
def _an_element_(self):
"""
Return an element of ``self``.
EXAMPLES::
sage: P = Partitions(5, restricted=3)
sage: P.an_element()
[2, 1, 1, 1]
sage: Partitions(0, restricted=3).an_element()
[]
sage: Partitions(1, restricted=3).an_element()
[1]
"""
return self.element_class(self, Partitions_n._an_element_(self).conjugate())
#########################################################################
#### partitions
def number_of_partitions(n, algorithm='default'):
r"""
Return the number of partitions of `n` with, optionally, at most `k`
parts.
The options of :meth:`number_of_partitions()` are being deprecated
:trac:`13072` in favour of :meth:`Partitions_n.cardinality()` so that
:meth:`number_of_partitions()` can become a stripped down version of
the fastest algorithm available (currently this is using FLINT).
INPUT:
- ``n`` -- an integer
- ``algorithm`` -- (default: 'default')
[Will be deprecated except in Partition().cardinality() ]
- ``'default'`` -- If ``k`` is not ``None``, then use Gap (very slow).
If ``k`` is ``None``, use FLINT.
- ``'flint'`` -- use FLINT
EXAMPLES::
sage: v = Partitions(5).list(); v
[[5], [4, 1], [3, 2], [3, 1, 1], [2, 2, 1], [2, 1, 1, 1], [1, 1, 1, 1, 1]]
sage: len(v)
7
The input must be a nonnegative integer or a ``ValueError`` is raised.
::
sage: number_of_partitions(-5)
Traceback (most recent call last):
...
ValueError: n (=-5) must be a nonnegative integer
::
sage: number_of_partitions(10)
42
sage: number_of_partitions(3)
3
sage: number_of_partitions(10)
42
sage: number_of_partitions(40)
37338
sage: number_of_partitions(100)
190569292
sage: number_of_partitions(100000)
27493510569775696512677516320986352688173429315980054758203125984302147328114964173055050741660736621590157844774296248940493063070200461792764493033510116079342457190155718943509725312466108452006369558934464248716828789832182345009262853831404597021307130674510624419227311238999702284408609370935531629697851569569892196108480158600569421098519
A generating function for the number of partitions `p_n` is given by the
reciprocal of Euler's function:
.. MATH::
\sum_{n=0}^{\infty} p_n x^n = \prod_{k=1}^{\infty} \left(
\frac{1}{1-x^k} \right).
We use Sage to verify that the first several coefficients do
instead agree::
sage: q = PowerSeriesRing(QQ, 'q', default_prec=9).gen()
sage: prod([(1-q^k)^(-1) for k in range(1,9)]) ## partial product of
1 + q + 2*q^2 + 3*q^3 + 5*q^4 + 7*q^5 + 11*q^6 + 15*q^7 + 22*q^8 + O(q^9)
sage: [number_of_partitions(k) for k in range(2,10)]
[2, 3, 5, 7, 11, 15, 22, 30]
REFERENCES:
- :wikipedia:`Partition\_(number\_theory)`
TESTS::
sage: n = 500 + randint(0,500)
sage: number_of_partitions( n - (n % 385) + 369) % 385 == 0
True
sage: n = 1500 + randint(0,1500)
sage: number_of_partitions( n - (n % 385) + 369) % 385 == 0
True
sage: n = 1000000 + randint(0,1000000)
sage: number_of_partitions( n - (n % 385) + 369) % 385 == 0
True
sage: n = 1000000 + randint(0,1000000)
sage: number_of_partitions( n - (n % 385) + 369) % 385 == 0
True
sage: n = 1000000 + randint(0,1000000)
sage: number_of_partitions( n - (n % 385) + 369) % 385 == 0
True
sage: n = 1000000 + randint(0,1000000)
sage: number_of_partitions( n - (n % 385) + 369) % 385 == 0
True
sage: n = 1000000 + randint(0,1000000)
sage: number_of_partitions( n - (n % 385) + 369) % 385 == 0
True
sage: n = 1000000 + randint(0,1000000)
sage: number_of_partitions( n - (n % 385) + 369) % 385 == 0
True
sage: n = 100000000 + randint(0,100000000)
sage: number_of_partitions( n - (n % 385) + 369) % 385 == 0 # long time (4s on sage.math, 2011)
True
"""
n = ZZ(n)
if n < 0:
raise ValueError("n (=%s) must be a nonnegative integer"%n)
elif n == 0:
return ZZ.one()
if algorithm == 'default':
algorithm = 'flint'
if algorithm == 'flint':
return cached_number_of_partitions(n)
raise ValueError("unknown algorithm '%s'"%algorithm)
def number_of_partitions_length(n, k, algorithm='hybrid'):
r"""
Return the number of partitions of `n` with length `k`.
This is a wrapper for GAP's ``NrPartitions`` function.
EXAMPLES::
sage: from sage.combinat.partition import number_of_partitions_length
sage: number_of_partitions_length(5, 2)
2
sage: number_of_partitions_length(10, 2)
5
sage: number_of_partitions_length(10, 4)
9
sage: number_of_partitions_length(10, 0)
0
sage: number_of_partitions_length(10, 1)
1
sage: number_of_partitions_length(0, 0)
1
sage: number_of_partitions_length(0, 1)
0
"""
if algorithm == 'hybrid':
# Do the hybrid algorithm
# Special relations between n and k
if n < k:
return ZZ.zero()
if n == k and n >= 0:
return ZZ.one()
# Special case of n
if n <= 0:
# Note: we've already checked the case when n == k == 0
return ZZ.zero()
# Small values of k
if k <= 0:
return ZZ.zero()
if k == 1:
return ZZ.one()
if k == 2:
return n // 2
# We have one column of length `k` and all (inner) partitions of
# size `n-k` can't have length more than `k`
if n <= k*2:
return number_of_partitions(n - k)
# Fall back to GAP
from sage.libs.gap.libgap import libgap
return ZZ(libgap.NrPartitions(ZZ(n), ZZ(k)))
##########
# trac 14225: Partitions() is frequently used, but only weakly cached. Hence,
# establish a strong reference to it.
_Partitions = Partitions()
# Rather than caching an under-used function I have cached the default
# number_of_partitions functions which is currently using FLINT.
# AM trac #13072
cached_number_of_partitions = cached_function( flint_number_of_partitions )
# October 2012: fixing outdated pickles which use classes being deprecated
from sage.misc.persist import register_unpickle_override
from sage.combinat.partition_tuple import PartitionTuples_level_size
register_unpickle_override('sage.combinat.partition', 'PartitionTuples_nk', PartitionTuples_level_size)
register_unpickle_override('sage.combinat.partition', 'Partition_class', Partition)
register_unpickle_override('sage.combinat.partition', 'OrderedPartitions_nk', OrderedPartitions)
register_unpickle_override('sage.combinat.partition', 'PartitionsInBox_hw', PartitionsInBox)
register_unpickle_override('sage.combinat.partition', 'PartitionsGreatestLE_nk', PartitionsGreatestLE)
register_unpickle_override('sage.combinat.partition', 'PartitionsGreatestEQ_nk', PartitionsGreatestEQ)
| 32.166366 | 355 | 0.506474 |
from copy import copy
from itertools import accumulate
from sage.libs.pari.all import pari
from sage.libs.flint.arith import number_of_partitions as flint_number_of_partitions
from sage.arith.misc import multinomial
from sage.structure.global_options import GlobalOptions
from sage.structure.parent import Parent
from sage.structure.unique_representation import UniqueRepresentation
from sage.symbolic.ring import var
from sage.misc.lazy_import import lazy_import
lazy_import('sage.combinat.skew_partition', 'SkewPartition')
lazy_import('sage.combinat.partition_tuple', 'PartitionTuple')
from sage.misc.misc_c import prod
from sage.misc.prandom import randrange
from sage.misc.cachefunc import cached_method, cached_function
from sage.categories.infinite_enumerated_sets import InfiniteEnumeratedSets
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
from sage.sets.non_negative_integers import NonNegativeIntegers
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing
from sage.rings.integer_ring import ZZ
from sage.rings.rational_field import QQ
from sage.rings.semirings.all import NN
from sage.arith.all import factorial, gcd
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.integer import Integer
from sage.rings.infinity import infinity
from .combinat import CombinatorialElement
from . import tableau
from . import permutation
from . import composition
from sage.combinat.partitions import ZS1_iterator, ZS1_iterator_nk
from sage.combinat.integer_vector import IntegerVectors
from sage.combinat.integer_lists import IntegerListsLex
from sage.combinat.integer_vector_weighted import iterator_fast as weighted_iterator_fast
from sage.combinat.combinat_cython import conjugate
from sage.combinat.root_system.weyl_group import WeylGroup
from sage.combinat.combinatorial_map import combinatorial_map
from sage.groups.perm_gps.permgroup import PermutationGroup
from sage.graphs.dot2tex_utils import have_dot2tex
from sage.arith.all import binomial
class Partition(CombinatorialElement):
@staticmethod
def __classcall_private__(cls, mu=None, **keyword):
l = len(keyword)
if l == 0:
if mu is not None:
if isinstance(mu, Partition):
return mu
return _Partitions(list(mu))
if l == 1:
if 'beta_numbers' in keyword:
return _Partitions.from_beta_numbers(keyword['beta_numbers'])
elif 'exp' in keyword:
return _Partitions.from_exp(keyword['exp'])
elif 'frobenius_coordinates' in keyword:
return _Partitions.from_frobenius_coordinates(keyword['frobenius_coordinates'])
elif 'zero_one' in keyword:
return _Partitions.from_zero_one(keyword['zero_one'])
if l == 2 and 'core' in keyword and 'quotient' in keyword:
return _Partitions.from_core_and_quotient(keyword['core'], keyword['quotient'])
raise ValueError('incorrect syntax for Partition()')
def __setstate__(self, state):
if isinstance(state, dict):
self._set_parent(_Partitions)
self.__dict__ = state
else:
self._set_parent(state[0])
self.__dict__ = state[1]
def __init__(self, parent, mu):
if isinstance(mu, Partition):
CombinatorialElement.__init__(self, parent, mu._list)
else:
if mu and not mu[-1]:
mu = mu[:-1]
while mu and not mu[-1]:
mu.pop()
CombinatorialElement.__init__(self, parent, mu)
@cached_method
def __hash__(self):
return hash(tuple(self._list))
def _repr_(self):
return self.parent().options._dispatch(self, '_repr_', 'display')
def _ascii_art_(self):
from sage.typeset.ascii_art import AsciiArt
return AsciiArt(self._repr_diagram().splitlines(), baseline=0)
def _unicode_art_(self):
from sage.typeset.unicode_art import UnicodeArt
if not self._list:
return UnicodeArt(u'∅', baseline=0)
if self.parent().options.convention == "English":
data = list(self)
else:
data = list(reversed(self))
txt = [u'┌' + u'┬' * (data[0] - 1) + u'┐']
for i in range(len(data) - 1):
p = data[i]
q = data[i + 1]
if p < q:
txt += [u'├' + u'┼' * p + u'┬' * (q - p - 1) + u'┐']
elif p == q:
txt += [u'├' + u'┼' * (p - 1) + u'┤']
else:
txt += [u'├' + u'┼' * q + u'┴' * (p - q - 1) + u'┘']
txt += [u'└' + u'┴' * (data[-1] - 1) + u'┘']
return UnicodeArt(txt, baseline=0)
def _repr_list(self):
return '[%s]' % ', '.join('%s' % m for m in self)
def _repr_exp_low(self):
if not self._list:
return '-'
exp = self.to_exp()
return '%s' % ', '.join('%s%s' % (m+1, '' if e==1 else '^%s'%e)
for (m,e) in enumerate(exp) if e > 0)
def _repr_exp_high(self):
if not self._list:
return '-'
exp = self.to_exp()[::-1]
M=max(self)
return '%s' % ', '.join('%s%s' % (M-m, '' if e==1 else '^%s'%e)
for (m,e) in enumerate(exp) if e>0)
def _repr_compact_low(self):
if not self._list:
return '-'
exp = self.to_exp()
return '%s' % ','.join('%s%s' % (m+1, '' if e==1 else '^%s'%e)
for (m,e) in enumerate(exp) if e > 0)
def _repr_compact_high(self):
if not self._list:
return '-'
exp = self.to_exp()[::-1]
M=max(self)
return '%s' % ','.join('%s%s' % (M-m, '' if e==1 else '^%s'%e)
for (m,e) in enumerate(exp) if e>0)
def _repr_diagram(self):
return self.ferrers_diagram()
def level(self):
return 1
def components(self):
return [ self ]
def _latex_(self):
return self.parent().options._dispatch(self, '_latex_', 'latex')
def _latex_young_diagram(self):
if not self._list:
return "{\\emptyset}"
from sage.combinat.output import tex_from_array
return tex_from_array([ ["\\phantom{x}"]*row_size for row_size in self._list ])
def _latex_diagram(self):
if not self._list:
return "{\\emptyset}"
entry = self.parent().options("latex_diagram_str")
from sage.combinat.output import tex_from_array
return tex_from_array([ [entry]*row_size for row_size in self._list ], False)
def _latex_list(self):
return repr(self._list)
def _latex_exp_low(self):
if not self._list:
return "{\\emptyset}"
exp = self.to_exp()
return '%s' % ','.join('%s%s' % (m+1, '' if e==1 else '^{%s}'%e)
for (m,e) in enumerate(exp) if e > 0)
def _latex_exp_high(self):
if not self._list:
return "{\\emptyset}"
exp = self.to_exp()[::-1]
M = max(self)
return '%s' % ','.join('%s%s' % (M-m, '' if e==1 else '^{%s}'%e)
for (m,e) in enumerate(exp) if e>0)
def ferrers_diagram(self):
diag_str = self.parent().options.diagram_str
if not self._list:
return '-' if diag_str != '-' else "(/)"
if self.parent().options.convention == "English":
return '\n'.join(diag_str * p for p in self)
else:
return '\n'.join(diag_str * p for p in reversed(self))
def pp(self):
print(self.ferrers_diagram())
def __truediv__(self, p):
if not self.contains(p):
raise ValueError("To form a skew partition p/q, q must be contained in p.")
return SkewPartition([self[:], p])
def power(self, k):
res = []
for i in self:
g = gcd(i, k)
res.extend( [ZZ(i//g)]*int(g) )
res.sort(reverse=True)
return Partition(res)
def __next__(self):
p = self
n = 0
m = 0
for i in p:
n += i
m += 1
next_p = p[:] + [1]*(n - len(p))
if p == [1]*n:
return False
h = 0
for i in next_p:
if i != 1:
h += 1
if next_p[h-1] == 2:
m += 1
next_p[h-1] = 1
h -= 1
else:
r = next_p[h-1] - 1
t = m - h + 1
next_p[h-1] = r
while t >= r:
h += 1
next_p[h-1] = r
t -= r
if t == 0:
m = h
else:
m = h + 1
if t > 1:
h += 1
next_p[h-1] = t
return self.parent()(next_p[:m])
next = __next__
def size(self):
return sum(self)
def sign(self):
return (-1)**(self.size()-self.length())
def k_size(self, k):
return self.k_boundary(k).size()
def boundary(self):
def horizontal_piece(xy, bdy):
(start_x, start_y) = xy
if not bdy:
h_piece = [(start_x, start_y)]
else:
stop_x = bdy[-1][0]
y = start_y
h_piece = [(x, y) for x in range(start_x, stop_x)]
h_piece = list(reversed(h_piece))
return h_piece
bdy = []
for i, part in enumerate(self):
(cell_x, cell_y) = (part - 1, i)
(x, y) = (cell_x + 1, cell_y + 1)
bdy += horizontal_piece((x, y - 1), bdy)
bdy.append((x, y))
(top_left_x, top_left_y) = (0, len(self))
bdy += horizontal_piece((top_left_x, top_left_y), bdy)
return bdy
def k_rim(self, k):
interior_rim = self.k_interior(k).boundary()
interior_top_left_y = interior_rim[-1][1]
v_piece = [(0, y) for y in range(interior_top_left_y+1, len(self)+1)]
interior_bottom_right_x = interior_rim[0][0]
if self:
ptn_bottom_right_x = self[0]
else:
ptn_bottom_right_x = 0
h_piece = [(x, 0) for x in
range(ptn_bottom_right_x, interior_bottom_right_x, -1)]
rim = h_piece + interior_rim + v_piece
return rim
def k_row_lengths(self, k):
return self.k_boundary(k).row_lengths()
def k_column_lengths(self, k):
return self.k_boundary(k).column_lengths()
def has_rectangle(self, h, w):
assert h >= 1
assert w >= 1
num_rows_of_len_w = self.to_exp(w)[w - 1]
return num_rows_of_len_w >= h
def has_k_rectangle(self, k):
return any(self.has_rectangle(a, b) for (a, b) in
[(k-i+1, i) for i in range(1, k+1)])
def is_k_bounded(self, k):
assert k >= 0
if self.is_empty():
return True
else:
return self[0] <= k
def is_k_reducible(self, k):
if not self.is_k_bounded(k):
raise ValueError('we only talk about k-reducible / k-irreducible for k-bounded partitions')
return self.has_k_rectangle(k)
def is_k_irreducible(self, k):
return not self.is_k_reducible(k)
def is_symmetric(self):
return self == self.conjugate()
def next_within_bounds(self, min=[], max=None, partition_type=None):
if max is not None:
assert _Partitions(max).contains(_Partitions(self))
assert _Partitions(self).contains(_Partitions(min))
if max is not None and _Partitions(max).is_empty():
return None
p = list(self)
min = list(min)
if max is None:
return _Partitions(p + [1])
p = p + [0] * (len(max) - len(p))
min = min + [0] * (len(max) - len(min))
# finally, run the algo to find next_p
next_p = copy(p)
def condition(a, b):
if partition_type in ('strict', 'strictly decreasing'):
return a < b - 1
elif partition_type in (None, 'weak', 'weakly decreasing'):
return a < b
else:
raise ValueError('unrecognized partition type')
for r in range(len(p) - 1, -1, -1):
if r == 0:
if (max is None or p[r] < max[r]):
next_p[r] += 1
break
else:
return None
else:
if (max is None or p[r] < max[r]) and condition(p[r], p[r-1]):
next_p[r] += 1
break
else:
next_p[r] = min[r]
continue
return _Partitions(next_p)
def row_standard_tableaux(self):
return tableau.RowStandardTableaux(self)
def standard_tableaux(self):
return tableau.StandardTableaux(self)
def up(self):
p = self
previous = p.get_part(0) + 1
for i, current in enumerate(p):
if current < previous:
yield Partition(p[:i] + [current + 1] + p[i + 1:])
previous = current
yield Partition(p + [1])
def up_list(self):
return list(self.up())
def down(self):
p = self
l = len(p)
for i in range(l-1):
if p[i] > p[i+1]:
yield Partition(p[:i] + [ p[i]-1 ] + p[i+1:])
if l >= 1:
last = p[-1]
if last == 1:
yield Partition(p[:-1])
else:
yield Partition(p[:-1] + [ p[-1] - 1 ])
def down_list(self):
return [p for p in self.down()]
@combinatorial_map(name="cell poset")
def cell_poset(self, orientation="SE"):
from sage.combinat.posets.posets import Poset
covers = {}
if orientation == "NW":
for i, row in enumerate(self):
if i == 0:
covers[(0, 0)] = []
for j in range(1, row):
covers[(0, j)] = [(0, j - 1)]
else:
covers[(i, 0)] = [(i - 1, 0)]
for j in range(1, row):
covers[(i, j)] = [(i - 1, j), (i, j - 1)]
elif orientation == "NE":
for i, row in enumerate(self):
if i == 0:
covers[(0, row - 1)] = []
for j in range(row - 1):
covers[(0, j)] = [(0, j + 1)]
else:
covers[(i, row - 1)] = [(i - 1, row - 1)]
for j in range(row - 1):
covers[(i, j)] = [(i - 1, j), (i, j + 1)]
elif orientation == "SE":
l = len(self) - 1
for i, row in enumerate(self):
if i == l:
covers[(i, row - 1)] = []
for j in range(row - 1):
covers[(i, j)] = [(i, j + 1)]
else:
next_row = self[i + 1]
if row == next_row:
covers[(i, row - 1)] = [(i + 1, row - 1)]
for j in range(row - 1):
covers[(i, j)] = [(i + 1, j), (i, j + 1)]
else:
covers[(i, row - 1)] = []
for j in range(next_row):
covers[(i, j)] = [(i + 1, j), (i, j + 1)]
for j in range(next_row, row - 1):
covers[(i, j)] = [(i, j + 1)]
elif orientation == "SW":
l = len(self) - 1
for i, row in enumerate(self):
if i == l:
covers[(i, 0)] = []
for j in range(1, row):
covers[(i, j)] = [(i, j - 1)]
else:
covers[(i, 0)] = [(i + 1, 0)]
next_row = self[i + 1]
for j in range(1, next_row):
covers[(i, j)] = [(i + 1, j), (i, j - 1)]
for j in range(next_row, row):
covers[(i, j)] = [(i, j - 1)]
return Poset(covers)
def frobenius_coordinates(self):
mu = self
muconj = mu.conjugate() # Naive implementation
if len(mu) <= len(muconj):
a = [x for x in (val-i-1 for i, val in enumerate(mu)) if x>=0]
b = [x for x in (muconj[i]-i-1 for i in range(len(a))) if x>=0]
else:
b = [x for x in (val-i-1 for i, val in enumerate(muconj)) if x>=0]
a = [x for x in (mu[i]-i-1 for i in range(len(b))) if x>=0]
return (a,b)
def frobenius_rank(self):
for i, x in enumerate(self):
if x <= i:
return i
return len(self)
def beta_numbers(self, length=None):
true_length = len(self)
if length is None:
length = true_length
elif length < true_length:
raise ValueError("length must be at least the length of the partition")
beta = [l + length - i - 1 for (i, l) in enumerate(self)]
if length > true_length:
beta.extend(list(range(length-true_length-1,-1,-1)))
return beta
def crank(self):
l = len(self)
if l == 0:
return 0
if self[-1] > 1:
return self[0]
ind_1 = self.index(1)
w = l - ind_1 # w is omega(self).
m = len([x for x in self if x > w])
return m - w
def t_completion(self, t):
if self._list and t < self.size() + self._list[0]:
raise ValueError("{}-completion is not defined".format(t))
return Partition([t - self.size()] + self._list)
def larger_lex(self, rhs):
return CombinatorialElement.__gt__(self, rhs)
def dominates(self, p2):
p1 = self
sum1 = 0
sum2 = 0
min_length = min(len(p1), len(p2))
if min_length == 0:
return not p2 # equivalent to len(p1) >= len(p2) = 0
for i in range(min_length):
sum1 += p1[i]
sum2 += p2[i]
if sum2 > sum1:
return False
return sum(p1) >= sum(p2)
def cells(self):
res = []
for i in range(len(self)):
for j in range(self[i]):
res.append( (i,j) )
return res
def generalized_pochhammer_symbol(self, a, alpha):
res = 1
for (i,j) in self.cells():
res *= (a - (i-1)/alpha + j-1)
return res
def get_part(self, i, default=Integer(0)):
if i < len(self._list):
return self._list[i]
else:
return default
@combinatorial_map(name="partition to minimal Dyck word")
def to_dyck_word(self, n=None):
from sage.combinat.dyck_word import DyckWord
if not self._list:
if n is None:
return DyckWord([])
return DyckWord([1]*n + [0]*n)
list_of_word = []
if n is None:
n = max(i + l + 1 for (i, l) in enumerate(self))
# This n is also max(i+j for (i,j) in self.cells()) + 2.
list_of_word.extend([1]*(n-self.length()))
copy_part = list(self)
while copy_part:
c = copy_part.pop()
list_of_word.extend([0]*c)
for i in range(len(copy_part)):
copy_part[i] -= c
list_of_word.append(1)
list_of_word.extend([0]*(n-self[0]))
return DyckWord(list_of_word)
@combinatorial_map(order=2, name="conjugate partition")
def conjugate(self):
if not self:
par = Partitions_n(0)
return par.element_class(par, [])
par = Partitions_n(sum(self))
return par.element_class(par, conjugate(self))
def suter_diagonal_slide(self, n, exp=1):
# Check for valid input
if len(self) > 0 and len(self) + self._list[0] > n: # >, not >=, since we double count the (0,0) cell
raise ValueError("the hook length must be less than n")
ret = self
# Arbitrary exp
exp = exp % n # It is at most order n
if exp > n / 2:
exp -= n
while exp != 0:
leng = len(ret)
if exp > 0:
# Suter's map \sigma_n
if leng == 0:
ret = Partition([1] * (n - 1))
exp -= 1
continue
res = [i + 1 for i in ret._list[1:]]
res += [1] * (n - leng - ret._list[0])
ret = Partition(res)
exp -= 1
else:
if leng == 0:
ret = Partition([n - 1])
exp += 1
continue
res = [n - leng - 1]
res.extend([i - 1 for i in ret._list if i > 1])
ret = Partition(res)
exp += 1
return ret
@combinatorial_map(name="reading tableau")
def reading_tableau(self):
st = tableau.StandardTableaux(self).first()
return st.reading_word_permutation().right_tableau()
@combinatorial_map(name="initial tableau")
def initial_tableau(self):
sigma = list(accumulate([1] + self._list))
tab = [list(range(sigma[i], sigma[i + 1]))
for i in range(len(sigma) - 1)]
return tableau.StandardTableau(tab)
def initial_column_tableau(self):
return self.conjugate().initial_tableau().conjugate()
def garnir_tableau(self, *cell):
try:
(row, col) = cell
except ValueError:
(row, col) = cell[0]
if row + 1 >= len(self) or col >= self[row+1]:
raise ValueError('(row+1, col) must be inside the diagram')
g=self.initial_tableau().to_list()
a=g[row][col]
g[row][col:] = list(range(a+col+1,g[row+1][col]+1))
g[row+1][:col+1] = list(range(a,a+col+1))
g=tableau.Tableau(g)
g._garnir_cell = (row, col)
return g
def top_garnir_tableau(self,e,cell):
(row,col)=cell
if row+1>=len(self) or col>=self[row+1]:
raise ValueError('(%s,%s)=(row+1,col) must be inside the diagram' %(row+1,col))
g=self.garnir_tableau(cell)
if e==0:
return g
a=e*int((self[row]-col)/e)
b=e*int((col+1)/e)
if a==0 or b==0:
return g
t=g.to_list()
m=g[row+1][0]
t[row][col:a+col]=[m+col-b+1+i for i in range(a)]
t[row+1][col-b+1:col+1]=[m+a+col-b+1+i for i in range(b)]
return tableau.StandardTableau(t)
@cached_method
def young_subgroup(self):
gens=[]
m=0
for row in self:
gens.extend([ (c,c+1) for c in range(m+1,m+row)])
m+=row
gens.append(list(range(1,self.size() + 1)))
return PermutationGroup( gens )
def young_subgroup_generators(self):
gens = []
m = 0
for row in self:
gens.extend(list(range(m + 1, m + row)))
m += row
return gens
@cached_method
def _initial_degree(self, e, multicharge=(0,)):
if e == 0:
return ZZ.zero()
else:
return sum(m // e for m in self)
def degree(self, e):
return sum(t.degree(e) for t in self.standard_tableaux())
def prime_degree(self, p):
ps = [p]
while ps[-1] * p < self.size():
ps.append(ps[-1] * p)
return sum(t.degree(pk) for pk in ps for t in self.standard_tableaux())
def arm_length(self, i, j):
p = self
if i < len(p) and j < p[i]:
return p[i]-(j+1)
else:
raise ValueError("The cell is not in the diagram")
def arm_lengths(self, flat=False):
p = self
if not flat:
return [[pi - (j + 1) for j in range(pi)] for pi in p]
return [pi - (j + 1) for pi in p for j in range(pi)]
def arm_cells(self, i, j):
p = self
if i < len(p) and j < p[i]:
return [ (i, x) for x in range(j+1, p[i]) ]
else:
raise ValueError("The cell is not in the diagram")
def leg_length(self, i, j):
conj = self.conjugate()
if j < len(conj) and i < conj[j]:
return conj[j]-(i+1)
else:
raise ValueError("The cell is not in the diagram")
def leg_lengths(self, flat=False):
p = self
conj = p.conjugate()
if not flat:
return [[conj[j] - (i + 1) for j in range(pi)]
for i, pi in enumerate(p)]
return [conj[j] - (i + 1) for i, pi in enumerate(p)
for j in range(pi)]
def leg_cells(self, i, j):
l = self.leg_length(i, j)
return [(x, j) for x in range(i+1, i+l+1)]
def attacking_pairs(self):
attacking_pairs = []
for i, r in enumerate(self):
for j in range(r):
for k in range(j+1, r):
attacking_pairs.append( ((i,j),(i,k)) )
if i == 0:
continue
for k in range(j):
attacking_pairs.append( ((i,j),(i-1,k)) )
return attacking_pairs
def dominated_partitions(self, rows=None):
n = sum(self)
P = Partitions_n(n)
if rows:
return [P(x) for x in ZS1_iterator_nk(n, rows) if self.dominates(x)]
else:
return [P(x) for x in ZS1_iterator(n) if self.dominates(x)]
def contains(self, x):
return len(self) >= len(x) and all(self[i] >= x[i] for i in range(len(x)))
def hook_product(self, a):
nu = self.conjugate()
res = 1
for i in range(len(self)):
for j in range(self[i]):
res *= a*(self[i]-j-1)+nu[j]-i
return res
def hook_polynomial(self, q, t):
nu = self.conjugate()
res = 1
for i in range(len(self)):
for j in range(self[i]):
res *= 1-q**(self[i]-j-1)*t**(nu[j]-i)
return res
def hook_length(self, i, j):
return self.leg_length(i,j)+self.arm_length(i,j)+1
def hooks(self):
res = []
for row in self.hook_lengths():
res += row
res.sort(reverse=True)
return res
def hook_lengths(self):
p = self
conj = p.conjugate()
return [[p[i]-(i+1)+conj[j]-(j+1)+1 for j in range(p[i])] for i in range(len(p))]
def upper_hook(self, i, j, alpha):
p = self
conj = self.conjugate()
return conj[j] - (i+1) + alpha*(p[i]-j)
def upper_hook_lengths(self, alpha):
p = self
conj = p.conjugate()
return [[conj[j] - (i+1) + alpha*(p[i]-j) for j in range(p[i])] for i in range(len(p))]
def lower_hook(self, i, j, alpha):
p = self
conj = self.conjugate()
return conj[j] - i + alpha*(p[i] - (j+1))
def lower_hook_lengths(self, alpha):
p = self
conj = p.conjugate()
return [[conj[j] - i + alpha*(p[i]-(j+1)) for j in range(p[i])] for i in range(len(p))]
def weighted_size(self):
p = self
return sum([i*p[i] for i in range(len(p))])
def is_empty(self):
return len(self) == 0
def length(self):
return len(self)
def to_exp(self, k=0):
p = self
if len(p) > 0:
k = max(k, p[0])
a = [ZZ.zero()] * k
for i in p:
a[i-1] += 1
return a
def evaluation(self):
return self.to_exp()
def to_exp_dict(self):
d = {}
for part in self:
d[part] = d.get(part, 0) + 1
return d
def centralizer_size(self, t=0, q=0):
size = prod(i**mi * factorial(mi)
for i, mi in self.to_exp_dict().items())
if t or q:
size *= prod((ZZ.one() - q ** j) / (ZZ.one() - t ** j)
for j in self)
return size
aut = centralizer_size
def content(self, r, c, multicharge=(0,)):
return c - r + multicharge[0]
def residue(self, r, c, l):
return (c - r) % l
@cached_method
def block(self, e, multicharge=(0,)):
block = {}
Ie = IntegerModRing(e)
for (r,c) in self.cells():
i = Ie(multicharge[0] + c - r)
block[i] = block.get(i, 0) + 1
return block
def defect(self, e, multicharge=(0,)):
beta = self.block(e, multicharge)
Ie = IntegerModRing(e)
return beta.get(multicharge[0], 0) - sum(beta[r]**2 - beta[r] * beta.get(Ie(r+1), 0)
for r in beta)
def contents_tableau(self, multicharge=(0,)):
return tableau.Tableau([[multicharge[0]-r+c for c in range(self[r])]
for r in range(len(self))])
def is_restricted(self, e, multicharge=(0,)):
return (not self
or ( self[-1] < e and all(self[r]-self[r+1] < e for r in range(len(self)-1)) ))
def is_regular(self, e, multicharge=(0,)):
return all(self[r] > self[r+e-1] for r in range(len(self)-e+1))
def conjugacy_class_size(self):
return factorial(sum(self))/self.centralizer_size()
def corners(self):
p = self
if p.is_empty():
return []
lcors = [[0,p[0]-1]]
nn = len(p)
if nn == 1:
return [tuple(c) for c in lcors]
lcors_index = 0
for i in range(1, nn):
if p[i] == p[i-1]:
lcors[lcors_index][0] += 1
else:
lcors.append([i,p[i]-1])
lcors_index += 1
return [tuple(c) for c in lcors]
inside_corners = corners
removable_cells = corners
def corners_residue(self, i, l):
return [x for x in self.corners() if self.residue(*x, l=l) == i]
inside_corners_residue = corners_residue
removable_cells_residue = corners_residue
def outside_corners(self):
p = self
if p.is_empty():
return [(0,0)]
res = [ (0, p[0]) ]
for i in range(1, len(p)):
if p[i-1] != p[i]:
res.append((i,p[i]))
res.append((len(p), 0))
return res
addable_cells = outside_corners
def outside_corners_residue(self, i, l):
return [x for x in self.outside_corners() if self.residue(*x, l=l) == i]
addable_cells_residue = outside_corners_residue
def rim(self):
p = self
res = []
prevLen = 1
for i in range(len(p)-1, -1, -1):
for c in range(prevLen-1, p[i]):
res.append((i,c))
prevLen = p[i]
return res
def outer_rim(self):
p = self
res = []
prevLen = 0
for i in range(len(p)-1, -1, -1):
for c in range(prevLen, p[i]+1):
res.append((i+1,c))
prevLen = p[i]
res.append((0, prevLen))
return res
def zero_one_sequence(self):
tmp = [self[i]-i for i in range(len(self))]
return ([Integer(not (i in tmp)) for i in range(-len(self)+1,self.get_part(0)+1)])
def core(self, length):
p = self
remainder = len(p) % length
part = p[:] + [0]*remainder
part = [part[i-1] + len(part)-i for i in range(1, len(part)+1)]
for e in range(length):
k = e
for i in reversed(range(1,len(part)+1)):
if part[i-1] % length == e:
part[i-1] = k
k += length
part.sort()
part.reverse()
part = [part[i-1]-len(part)+i for i in range(1, len(part)+1)]
return Partition([x for x in part if x != 0])
def quotient(self, length):
p = self
remainder = len(p) % length
part = p[:] + [0]*(length-remainder)
part = [part[i-1] + len(part)-i for i in range(1, len(part)+1)]
result = [None]*length
for e in range(length):
k = e
tmp = []
for i in reversed(range(len(part))):
if part[i] % length == e:
tmp.append(ZZ((part[i]-k)//length))
k += length
a = [i for i in tmp if i != 0]
a.reverse()
result[e] = a
from .partition_tuple import PartitionTuple
return PartitionTuple(result)
def is_core(self, k):
return k not in self.hooks()
def k_interior(self, k):
return Partition([len([i for i in row if i > k])
for row in self.hook_lengths()])
def k_boundary(self, k):
return SkewPartition([self, self.k_interior(k)])
def add_cell(self, i, j = None):
if j is None:
if i >= len(self):
j = 0
else:
j = self[i]
if (i,j) in self.outside_corners():
pl = self.to_list()
if i == len(pl):
pl.append(1)
else:
pl[i] += 1
return Partition(pl)
raise ValueError("[%s, %s] is not an addable cell"%(i,j))
def remove_cell(self, i, j = None):
if i >= len(self):
raise ValueError("i must be less than the length of the partition")
if j is None:
j = self[i] - 1
if (i,j) not in self.corners():
raise ValueError("[%d,%d] is not a corner of the partition" % (i,j))
if self[i] == 1:
return Partition(self[:-1])
else:
return Partition(self[:i] + [ self[i:i+1][0] - 1 ] + self[i+1:])
def k_irreducible(self, k):
pexp = self.to_exp()
return Partition(sum(([r+1] for r in range(len(pexp)-1,-1,-1) for m in range(pexp[r] % (k-r))),[]))
def k_skew(self, k):
if len(self) == 0:
return SkewPartition([[],[]])
if self[0] > k:
raise ValueError("the partition must be %d-bounded" % k)
s = Partition(self[1:]).k_skew(k)
s_inner = list(s.inner())
s_outer = list(s.outer())
s_conj_rl = s.conjugate().row_lengths()
kdiff = k - self[0]
if s_outer == []:
spot = 0
else:
spot = s_outer[0]
for i in range(len(s_conj_rl)):
if s_conj_rl[i] <= kdiff:
spot = i
break
outer = [ self[0] + spot ] + s_outer[:]
if spot > 0:
inner = [ spot ] + s_inner[:]
else:
inner = s_inner[:]
return SkewPartition([outer, inner])
def to_core(self, k):
from sage.combinat.core import Core
return Core(self.k_skew(k)[0],k+1)
def from_kbounded_to_reduced_word(self, k):
p=self.k_skew(k)[0]
result = []
while not p.is_empty():
corners = p.corners()
c = p.content(corners[0][0],corners[0][1])%(k+1)
result.append(Integer(c))
list = [x for x in corners if p.content(x[0],x[1])%(k+1) ==c]
for x in list:
p = p.remove_cell(x[0])
return result
def from_kbounded_to_grassmannian(self, k):
return WeylGroup(['A',k,1]).from_reduced_word(self.from_kbounded_to_reduced_word(k))
def to_list(self):
return self._list[:]
def add_vertical_border_strip(self, k):
return [p.conjugate() for p in self.conjugate().add_horizontal_border_strip(k)]
def add_horizontal_border_strip(self, k):
conj = self.conjugate().to_list()
shelf = []
res = []
i = 0
while i < len(conj):
tmp = 1
while i+1 < len(conj) and conj[i] == conj[i+1]:
tmp += 1
i += 1
if i == len(conj)-1 and i > 0 and conj[i] != conj[i-1]:
tmp = 1
shelf.append(tmp)
i += 1
shelf.append(k)
for iv in IntegerVectors(k, len(shelf), outer=shelf):
iv = list(iv)
tmp = conj + [0]*k
j = 0
for t in range(len(iv)):
while iv[t] > 0:
tmp[j] += 1
iv[t] -= 1
j += 1
j = sum(shelf[:t+1])
res.append(Partition([u for u in tmp if u != 0]).conjugate())
return res
def remove_horizontal_border_strip(self, k):
return Partitions_with_constraints(n = self.size()-k,
min_length = len(self)-1,
max_length = len(self),
floor = self[1:]+[0],
ceiling = self[:],
max_slope = 0,
name = "The subpartitions of {} obtained by removing an horizontal border strip of length {}".format(self,k))
def k_conjugate(self, k):
return Partition(self.k_skew(k).conjugate().row_lengths())
def arms_legs_coeff(self, i, j):
QQqt = PolynomialRing(QQ, ['q', 't'])
(q, t) = QQqt.gens()
if i < len(self) and j < self[i]:
res = (1-q**self.arm_length(i,j) * t**(self.leg_length(i,j)+1))
res /= (1-q**(self.arm_length(i,j)+1) * t**self.leg_length(i,j))
return res
return ZZ.one()
def atom(self):
res = []
for tab in tableau.StandardTableaux_size(self.size()):
if tab.atom() == self:
res.append(tab)
return res
def k_atom(self, k):
res = [tableau.Tableau([])]
for i in range(len(self)):
res = (x.promotion_operator(self[-i - 1]) for x in res)
res = sum(res, [])
res = (y.catabolism_projector(Partition(self[-i - 1:]).k_split(k))
for y in res)
res = [i for i in res if i]
return res
def k_split(self, k):
if self == []:
return []
elif k < self[0]:
return []
else:
res = []
part = list(self)
while part and part[0] + len(part) - 1 >= k:
p = k - part[0]
res.append(part[:p + 1])
part = part[p + 1:]
if part:
res.append(part)
return res
def jacobi_trudi(self):
return SkewPartition([ self, [] ]).jacobi_trudi()
def character_polynomial(self):
k = self.size()
P = PolynomialRing(QQ, k, 'x')
x = P.gens()
from sage.combinat.sf.sf import SymmetricFunctions
Sym = SymmetricFunctions(QQ)
s = Sym.schur()
p = Sym.power()
ps_mu = p(s(self))
items = ps_mu.monomial_coefficients().items()
partition_to_monomial = lambda part: prod([ (i*x[i-1]-1) for i in part ])
res = [ [partition_to_monomial(mc[0]), mc[1]] for mc in items ]
res = [ prod(pair) for pair in res ]
res = sum( res )
from sage.combinat.misc import umbral_operation
return umbral_operation(res)
def dimension(self, smaller=None, k=1):
larger = self
if smaller is None:
smaller = Partition([])
if k == 1:
if smaller == Partition([]):
return factorial(larger.size())/prod(larger.hooks())
else:
if not larger.contains(smaller):
return 0
else:
def inv_factorial(i):
if i < 0:
return 0
else:
return 1/factorial(i)
len_range = list(range(larger.length()))
from sage.matrix.constructor import matrix
M = matrix(QQ,[[inv_factorial(larger.get_part(i)-smaller.get_part(j)-i+j) for i in len_range] for j in len_range])
return factorial(larger.size()-smaller.size())*M.determinant()
else:
larger_core = larger.core(k)
smaller_core = smaller.core(k)
if smaller_core != larger_core:
return 0
larger_quotients = larger.quotient(k)
smaller_quotients = smaller.quotient(k)
def multinomial_with_partitions(sizes,path_counts):
return prod(path_counts) * multinomial(sizes)
sizes = [larger_quotients[i].size()-smaller_quotients[i].size() for i in range(k)]
path_counts = [larger_quotients[i].dimension(smaller_quotients[i]) for i in range(k)]
return multinomial_with_partitions(sizes,path_counts)
def plancherel_measure(self):
return self.dimension()**2/factorial(self.size())
def outline(self, variable=None):
if variable is None:
variable = var('x')
outside_contents = [self.content(*c) for c in self.outside_corners()]
inside_contents = [self.content(*c) for c in self.corners()]
return sum(abs(variable+c) for c in outside_contents)\
-sum(abs(variable+c) for c in inside_contents)
def dual_equivalence_graph(self, directed=False, coloring=None):
try:
if directed:
G = self._DDEG.copy(immutable=False)
else:
G = self._DEG.copy(immutable=False)
if have_dot2tex():
if coloring is None:
d = {2: 'red', 3: 'blue', 4: 'green', 5: 'purple',
6: 'brown', 7: 'orange', 8: 'yellow'}
def coloring(i):
if i in d:
return d[i]
return 'black'
elif isinstance(coloring, dict):
d = coloring
coloring = lambda x: d[x]
G.set_latex_options(format="dot2tex",
edge_labels=True,
color_by_label=coloring)
return G
except AttributeError:
pass
T = list(tableau.StandardTableaux(self))
n = sum(self)
edges = []
to_perms = {t: t.reading_word_permutation() for t in T}
to_tab = {to_perms[k]: k for k in to_perms}
Perm = permutation.Permutations()
for t in T:
pt = list(to_perms[t])
for i in range(2, n):
ii = pt.index(i)
iip = pt.index(i+1)
iim = pt.index(i-1)
l = sorted([iim, ii, iip])
if l[0] != ii:
continue
x = pt[:]
x[l[0]], x[l[2]] = x[l[2]], x[l[0]]
if ii < iip:
e = [t, to_tab[Perm(x)], i]
edges.append(e)
else:
e = [to_tab[Perm(x)], t, i]
edges.append(e)
if directed:
from sage.graphs.digraph import DiGraph
self._DDEG = DiGraph([T, edges], format="vertices_and_edges",
immutable=True, multiedges=True)
else:
from sage.graphs.graph import Graph
self._DEG = Graph([T, edges], format="vertices_and_edges",
immutable=True, multiedges=True)
return self.dual_equivalence_graph(directed, coloring)
annot be infinite")
if n is None or n is NN or n is NonNegativeIntegers():
if len(kwargs) > 0:
if len(kwargs) == 1:
if 'max_part' in kwargs:
return Partitions_all_bounded(kwargs['max_part'])
if 'regular' in kwargs:
return RegularPartitions_all(kwargs['regular'])
if 'restricted' in kwargs:
return RestrictedPartitions_all(kwargs['restricted'])
elif len(kwargs) == 2:
if 'regular' in kwargs:
if kwargs['regular'] < 1 or kwargs['regular'] not in ZZ:
raise ValueError("the regularity must be a positive integer")
if 'max_part' in kwargs:
return RegularPartitions_bounded(kwargs['regular'], kwargs['max_part'])
if 'max_length' in kwargs:
return RegularPartitions_truncated(kwargs['regular'], kwargs['max_length'])
raise ValueError("the size must be specified with any keyword argument")
return Partitions_all()
elif isinstance(n, (int,Integer)):
if len(kwargs) == 0:
return Partitions_n(n)
if len(kwargs) == 1:
if 'max_part' in kwargs:
return PartitionsGreatestLE(n, kwargs['max_part'])
if 'length' in kwargs:
return Partitions_nk(n, kwargs['length'])
if (len(kwargs) > 1 and
('parts_in' in kwargs or
'starting' in kwargs or
'ending' in kwargs)):
raise ValueError("The parameters 'parts_in', 'starting' and "+
"'ending' cannot be combined with anything else.")
if 'parts_in' in kwargs:
return Partitions_parts_in(n, kwargs['parts_in'])
elif 'starting' in kwargs:
return Partitions_starting(n, kwargs['starting'])
elif 'ending' in kwargs:
return Partitions_ending(n, kwargs['ending'])
elif 'regular' in kwargs:
return RegularPartitions_n(n, kwargs['regular'])
elif 'restricted' in kwargs:
return RestrictedPartitions_n(n, kwargs['restricted'])
kwargs['name'] = "Partitions of the integer %s satisfying constraints %s"%(n, ", ".join( ["%s=%s"%(key, kwargs[key]) for key in sorted(kwargs)] ))
kwargs['min_part'] = max(1,kwargs.get('min_part',1))
kwargs['max_slope'] = min(0,kwargs.get('max_slope',0))
if kwargs.get('min_slope', -float('inf')) > 0:
raise ValueError("the minimum slope must be non-negative")
if 'outer' in kwargs:
kwargs['max_length'] = min(len(kwargs['outer']),
kwargs.get('max_length', infinity))
kwargs['ceiling'] = tuple(kwargs['outer'])
del kwargs['outer']
if 'inner' in kwargs:
inner = [x for x in kwargs['inner'] if x > 0]
kwargs['floor'] = inner
kwargs['min_length'] = max(len(inner),
kwargs.get('min_length',0))
del kwargs['inner']
return Partitions_with_constraints(n, **kwargs)
raise ValueError("n must be an integer or be equal to one of "
"None, NN, NonNegativeIntegers()")
def __init__(self, is_infinite=False):
if is_infinite:
Parent.__init__(self, category=InfiniteEnumeratedSets())
else:
Parent.__init__(self, category=FiniteEnumeratedSets())
Element = Partition
class options(GlobalOptions):
NAME = 'Partitions'
module = 'sage.combinat.partition'
display = dict(default="list",
description='Specifies how partitions should be printed',
values=dict(list='displayed as a list',
exp_low='in exponential form (lowest first)',
exp_high='in exponential form (highest first)',
diagram='as a Ferrers diagram',
compact_low='compact form of ``exp_low``',
compact_high='compact form of ``exp_high``'),
alias=dict(exp="exp_low", compact="compact_low", array="diagram",
ferrers_diagram="diagram", young_diagram="diagram"),
case_sensitive=False)
latex = dict(default="young_diagram",
description='Specifies how partitions should be latexed',
values=dict(diagram='latex as a Ferrers diagram',
young_diagram='latex as a Young diagram',
list='latex as a list',
exp_high='latex as a list in exponential notation (highest first)',
exp_low='as a list latex in exponential notation (lowest first)'),
alias=dict(exp="exp_low", array="diagram", ferrers_diagram="diagram"),
case_sensitive=False)
diagram_str = dict(default="*",
description='The character used for the cells when printing Ferrers diagrams',
checker=lambda char: isinstance(char,str))
latex_diagram_str = dict(default="\\ast",
description='The character used for the cells when latexing Ferrers diagrams',
checker=lambda char: isinstance(char,str))
convention = dict(link_to=(tableau.Tableaux.options,'convention'))
notation = dict(alt_name='convention')
def __reversed__(self):
if not self.is_finite():
raise NotImplementedError("The set is infinite. This needs a custom reverse iterator")
for i in reversed(range(self.cardinality())):
yield self[i]
def _element_constructor_(self, lst):
if isinstance(lst, PartitionTuple):
if lst.level() != 1:
raise ValueError('%s is not an element of %s' % (lst, self))
lst = lst[0]
if lst.parent() is self:
return lst
try:
lst = list(map(ZZ, lst))
except TypeError:
raise ValueError('all parts of %s should be nonnegative integers' % repr(lst))
if lst in self:
return self.element_class(self, lst)
raise ValueError('%s is not an element of %s' % (lst, self))
def __contains__(self, x):
if isinstance(x, Partition):
return True
if isinstance(x, (list, tuple)):
return not x or (all((a in ZZ) and (a >= b) for a, b in zip(x, x[1:]))
and (x[-1] in ZZ) and (x[-1] >= 0))
return False
def subset(self, *args, **kwargs):
if len(args) != 0 or len(kwargs) != 0:
raise ValueError("Invalid combination of arguments")
return self
class Partitions_all(Partitions):
def __init__(self):
Partitions.__init__(self, is_infinite=True)
def subset(self, size=None, **kwargs):
if size is None:
return self
return Partitions(size, **kwargs)
def _repr_(self):
return "Partitions"
def __iter__(self):
n = 0
while True:
for p in ZS1_iterator(n):
yield self.element_class(self, p)
n += 1
def __reversed__(self):
n = 0
while True:
for p in reversed(list(ZS1_iterator(n))):
yield self.element_class(self, p)
n += 1
def from_frobenius_coordinates(self, frobenius_coordinates):
if len(frobenius_coordinates) != 2:
raise ValueError('%s is not a valid partition, two sequences of coordinates are needed'%str(frobenius_coordinates))
else:
a = frobenius_coordinates[0]
b = frobenius_coordinates[1]
if len(a) != len(b):
raise ValueError('%s is not a valid partition, the sequences of coordinates need to be the same length'%str(frobenius_coordinates))
r = len(a)
if r == 0:
return self.element_class(self, [])
tmp = [a[i]+i+1 for i in range(r)]
if a[-1] < 0:
raise ValueError('%s is not a partition, no coordinate can be negative'%str(frobenius_coordinates))
if b[-1] >= 0:
tmp.extend([r]*b[r-1])
else:
raise ValueError('%s is not a partition, no coordinate can be negative'%str(frobenius_coordinates))
for i in range(r-1,0,-1):
if b[i-1]-b[i] > 0:
tmp.extend([i]*(b[i-1]-b[i]-1))
else:
raise ValueError('%s is not a partition, the coordinates need to be strictly decreasing'%str(frobenius_coordinates))
return self.element_class(self, tmp)
def from_beta_numbers(self, beta):
beta.sort()
offset = 0
while offset < len(beta)-1 and beta[offset] == offset:
offset+=1
beta = beta[offset:]
mu = [beta[i]-offset-i for i in range(len(beta))]
return self.element_class(self, list(reversed(mu)))
def from_exp(self, exp):
p = []
for i in reversed(range(len(exp))):
p += [i+1]*exp[i]
return self.element_class(self, p)
def from_zero_one(self, seq):
tmp = [i for i in range(len(seq)) if seq[i] == 0]
return self.element_class(self,[tmp[i]-i for i in range(len(tmp)-1,-1,-1)])
def from_core_and_quotient(self, core, quotient):
from .partition_tuple import PartitionTuple, PartitionTuples
if quotient not in PartitionTuples():
raise ValueError('the quotient %s must be a tuple of partitions'%quotient)
components = PartitionTuple(quotient).components()
length = len(components)
k = length*max(len(q) for q in components) + len(core)
v = [core[i]-i for i in range(len(core))] + [ -i for i in range(len(core),k) ]
w = [ [x for x in v if (x-i) % length == 0] for i in range(1, length+1) ]
new_w = []
for i in range(length):
lw = len(w[i])
lq = len(components[i])
new_w += [ w[i][j] + length*components[i][j] for j in range(lq)]
new_w += [ w[i][j] for j in range(lq,lw)]
new_w.sort(reverse=True)
return self.element_class(self, [new_w[i]+i for i in range(len(new_w))])
class Partitions_all_bounded(Partitions):
def __init__(self, k):
self.k = k
Partitions.__init__(self, is_infinite=True)
def __contains__(self, x):
return not x or (x[0] <= self.k and x in _Partitions)
def _repr_(self):
return "%d-Bounded Partitions"%self.k
def __iter__(self):
n = 0
while True:
for p in Partitions(n, max_part=self.k):
yield self.element_class(self, p)
n += 1
class Partitions_n(Partitions):
def __init__(self, n):
Partitions.__init__(self)
self.n = n
def __contains__(self, x):
return x in _Partitions and sum(x) == self.n
def _repr_(self):
return "Partitions of the integer %s"%self.n
def _an_element_(self):
if self.n == 0:
lst = []
elif self.n == 1:
lst = [1]
else:
lst = [self.n-1, 1]
return self.element_class(self, lst)
def cardinality(self, algorithm='flint'):
if self.n < 0:
return ZZ.zero()
if algorithm == 'flint':
return cached_number_of_partitions(self.n)
elif algorithm == 'gap':
from sage.libs.gap.libgap import libgap
return ZZ(libgap.NrPartitions(ZZ(self.n)))
elif algorithm == 'pari':
return ZZ(pari(ZZ(self.n)).numbpart())
raise ValueError("unknown algorithm '%s'" % algorithm)
def random_element(self, measure = 'uniform'):
if measure == 'uniform':
return self.random_element_uniform()
elif measure == 'Plancherel':
return self.random_element_plancherel()
else:
raise ValueError("Unknown measure: %s" % measure)
def random_element_uniform(self):
n = self.n
res = []
while n > 0:
rand = randrange(0, n*cached_number_of_partitions(n))
for j in range(1, n+1):
d = 1
r = n-j
while r >= 0:
rand -= d * cached_number_of_partitions(r)
if rand < 0:
break
d +=1
r -= j
else:
continue
break
res.extend([d]*j)
n = r
res.sort(reverse=True)
return self.element_class(self, res)
def random_element_plancherel(self):
T = permutation.Permutations(self.n).random_element().left_tableau()
return self.element_class(self, [len(row) for row in T])
def first(self):
return self.element_class(self, [self.n])
def next(self, p):
found = False
for i in self:
if found:
return i
if i == p:
found = True
return None
def last(self):
return self.element_class(self, [1]*self.n)
def __iter__(self):
for p in ZS1_iterator(self.n):
yield self.element_class(self, [Integer(i) for i in p])
def subset(self, **kwargs):
return Partitions(self.n, **kwargs)
class Partitions_nk(Partitions):
def __init__(self, n, k):
Partitions.__init__(self)
self.n = n
self.k = k
def __contains__(self, x):
return x in _Partitions and sum(x) == self.n and len(x) == self.k
def _repr_(self):
return "Partitions of the integer {} of length {}".format(self.n, self.k)
def _an_element_(self):
if self.n == 0:
if self.k == 0:
lst = []
else:
from sage.categories.sets_cat import EmptySetError
raise EmptySetError
elif self.n >= self.k > 0:
lst = [self.n - self.k + 1] + [1] * (self.k-1)
else:
from sage.categories.sets_cat import EmptySetError
raise EmptySetError
return self.element_class(self, lst)
def __iter__(self):
for p in ZS1_iterator_nk(self.n - self.k, self.k):
v = [Integer(i + 1) for i in p]
adds = [Integer(1)] * (self.k - len(v))
yield self.element_class(self, v + adds)
def cardinality(self, algorithm='hybrid'):
return number_of_partitions_length(self.n, self.k, algorithm)
def subset(self, **kwargs):
return Partitions(self.n, length=self.k, **kwargs)
class Partitions_parts_in(Partitions):
@staticmethod
def __classcall_private__(cls, n, parts):
parts = tuple(sorted(parts))
return super(Partitions_parts_in, cls).__classcall__(cls, Integer(n), parts)
def __init__(self, n, parts):
Partitions.__init__(self)
self.n = n
self.parts = list(parts)
def __contains__(self, x):
return (x in _Partitions and sum(x) == self.n and
all(p in self.parts for p in x))
def _repr_(self):
return "Partitions of the integer %s with parts in %s" % (self.n, self.parts)
def cardinality(self):
if self.parts:
from sage.libs.gap.libgap import libgap
return ZZ(libgap.NrRestrictedPartitions(ZZ(self.n), self.parts))
return Integer(self.n == 0)
def first(self):
try:
return self.element_class(self, self._findfirst(self.n, self.parts[:]))
except TypeError:
return None
def _findfirst(self, n, parts):
if n == 0:
return []
else:
while parts:
p = parts.pop()
for k in range(n.quo_rem(p)[0], 0, -1):
try:
return k * [p] + self._findfirst(n - k * p, parts[:])
except TypeError:
pass
def last(self):
try:
return self.element_class(self, self._findlast(self.n, self.parts))
except TypeError:
return None
def _findlast(self, n, parts):
if n < 0:
return None
elif n == 0:
return []
elif parts:
p = parts[0]
q, r = n.quo_rem(p)
if r == 0:
return [p] * q
# largest part
else:
for i, p in enumerate(parts[1:]):
rest = self._findlast(n - p, parts[:i + 2])
if rest is not None:
return [p] + rest
# If we get to here, nothing ever worked, so there's no such
return None
def __iter__(self):
for p in self._other_iterator(self.n, self.parts):
yield self.element_class(self, p)
def _fast_iterator(self, n, parts):
if n == 0:
yield []
else:
while parts:
p = parts.pop()
for k in range(n.quo_rem(p)[0], 0, -1):
for q in self._fast_iterator(n - k * p, parts[:]):
yield k * [p] + q
def _other_iterator(self, n, parts):
sorted_parts = sorted(parts, reverse=True)
for vec in weighted_iterator_fast(n, sorted_parts):
yield sum(([pi] * multi
for pi, multi in zip(sorted_parts, vec)), [])
class Partitions_starting(Partitions):
@staticmethod
def __classcall_private__(cls, n, starting_partition):
starting_partition = Partition(starting_partition)
return super(Partitions_starting, cls).__classcall__(cls, Integer(n),
starting_partition)
def __init__(self, n, starting_partition):
Partitions.__init__(self)
self.n = n
self._starting = starting_partition
def _repr_(self):
return "Partitions of the integer %s starting with %s"%(self.n, self._starting)
def __contains__(self, x):
return x in Partitions_n(self.n) and x <= self._starting
def first(self):
return self._starting
def next(self, part):
return next(part)
class Partitions_ending(Partitions):
@staticmethod
def __classcall_private__(cls, n, ending_partition):
ending_partition = Partition(ending_partition)
return super(Partitions_ending, cls).__classcall__(cls, Integer(n),
ending_partition)
def __init__(self, n, ending_partition):
Partitions.__init__(self)
self.n = n
self._ending = ending_partition
def _repr_(self):
return "Partitions of the integer %s ending with %s"%(self.n, self._ending)
def __contains__(self, x):
return x in Partitions_n(self.n) and x >= self._ending
def first(self):
return self.element_class(self, [self.n])
def next(self, part):
if part == self._ending:
return None
else:
return next(part)
class PartitionsInBox(Partitions):
def __init__(self, h, w):
Partitions.__init__(self)
self.h = h
self.w = w
def _repr_(self):
return "Integer partitions which fit in a %s x %s box" % (self.h, self.w)
def __contains__(self, x):
return x in _Partitions and len(x) <= self.h \
and (len(x) == 0 or x[0] <= self.w)
def list(self):
h = self.h
w = self.w
if h == 0:
return [self.element_class(self, [])]
else:
l = [[i] for i in range(w + 1)]
add = lambda x: [x + [i] for i in range(x[-1] + 1)]
for i in range(h-1):
new_list = []
for element in l:
new_list += add(element)
l = new_list
return [self.element_class(self, [x for x in p if x!=0]) for p in l]
def cardinality(self):
return binomial(self.h + self.w, self.w)
class Partitions_constraints(IntegerListsLex):
def __setstate__(self, data):
n = data['n']
self.__class__ = Partitions_with_constraints
constraints = {'max_slope': 0,
'min_part': 1}
constraints.update(data['constraints'])
self.__init__(n, **constraints)
class Partitions_with_constraints(IntegerListsLex):
# Initialize ``self``.
# """
Element = Partition
options = Partitions.options
> 0)
def _fast_iterator(self, n, max_part):
if n == 0:
yield []
return
if n < max_part:
max_part = n
bdry = self._ell - 1
for i in reversed(range(1, max_part+1)):
for p in self._fast_iterator(n-i, i):
if p.count(i) < bdry:
yield [i] + p
class RegularPartitions_all(RegularPartitions):
def __init__(self, ell):
RegularPartitions.__init__(self, ell, bool(ell > 1))
def _repr_(self):
return "{}-Regular Partitions".format(self._ell)
def __iter__(self):
if self._ell == 1:
yield self.element_class(self, [])
return
n = 0
while True:
for p in self._fast_iterator(n, n):
yield self.element_class(self, p)
n += 1
class RegularPartitions_truncated(RegularPartitions):
def __init__(self, ell, max_len):
self._max_len = max_len
RegularPartitions.__init__(self, ell, bool(ell > 1))
def max_length(self):
return self._max_len
def __contains__(self, x):
return len(x) <= self._max_len and RegularPartitions.__contains__(self, x)
def _repr_(self):
return "{}-Regular Partitions with max length {}".format(self._ell, self._max_len)
def __iter__(self):
if self._ell == 1:
yield self.element_class(self, [])
return
n = 0
while True:
for p in self._fast_iterator(n, n):
yield self.element_class(self, p)
n += 1
def _fast_iterator(self, n, max_part, depth=0):
if n == 0 or depth >= self._max_len:
yield []
return
if depth + 1 == self._max_len:
if max_part >= n:
yield [n]
return
if n < max_part:
max_part = n
bdry = self._ell - 1
for i in reversed(range(1, max_part+1)):
for p in self._fast_iterator(n-i, i, depth+1):
if p.count(i) < bdry:
yield [i] + p
class RegularPartitions_bounded(RegularPartitions):
def __init__(self, ell, k):
self.k = k
RegularPartitions.__init__(self, ell, False)
def __contains__(self, x):
return len(x) == 0 or (x[0] <= self.k and RegularPartitions.__contains__(self, x))
def _repr_(self):
return "{}-Regular {}-Bounded Partitions".format(self._ell, self.k)
def __iter__(self):
k = self.k
for n in reversed(range(k*(k+1)/2 * self._ell)):
for p in self._fast_iterator(n, k):
yield self.element_class(self, p)
class RegularPartitions_n(RegularPartitions, Partitions_n):
def __init__(self, n, ell):
RegularPartitions.__init__(self, ell)
Partitions_n.__init__(self, n)
def _repr_(self):
return "{}-Regular Partitions of the integer {}".format(self._ell, self.n)
def __contains__(self, x):
return RegularPartitions.__contains__(self, x) and sum(x) == self.n
def __iter__(self):
for p in self._fast_iterator(self.n, self.n):
yield self.element_class(self, p)
def cardinality(self):
if self._ell > self.n:
return Partitions_n.cardinality(self)
return ZZ.sum(1 for x in self)
def _an_element_(self):
if self._ell == 1 and self.n > 0:
from sage.categories.sets_cat import EmptySetError
raise EmptySetError
return Partitions_n._an_element_(self)
ition.Compositions(self.n, length=self.k)
def _repr_(self):
string = "Ordered partitions of %s" % self.n
if self.k is not None:
string += " of length %s" % self.k
return string
def list(self):
from sage.libs.gap.libgap import libgap
n = self.n
k = self.k
if k is None:
ans = libgap.OrderedPartitions(ZZ(n))
else:
ans = libgap.OrderedPartitions(ZZ(n), ZZ(k))
result = ans.sage()
result.reverse()
return result
def cardinality(self):
from sage.libs.gap.libgap import libgap
n = self.n
k = self.k
if k is None:
ans = libgap.NrOrderedPartitions(n)
else:
ans = libgap.NrOrderedPartitions(n, k)
return ZZ(ans)
ions_all(RestrictedPartitions_generic):
def __init__(self, ell):
RestrictedPartitions_generic.__init__(self, ell, True)
def _repr_(self):
return "{}-Restricted Partitions".format(self._ell)
def __iter__(self):
n = 0
while True:
for p in self._fast_iterator(n, n):
yield self.element_class(self, p)
n += 1
class RestrictedPartitions_n(RestrictedPartitions_generic, Partitions_n):
def __init__(self, n, ell):
RestrictedPartitions_generic.__init__(self, ell)
Partitions_n.__init__(self, n)
def _repr_(self):
return "{}-Restricted Partitions of the integer {}".format(self._ell, self.n)
def __contains__(self, x):
return RestrictedPartitions_generic.__contains__(self, x) and sum(x) == self.n
def __iter__(self):
for p in self._fast_iterator(self.n, self.n):
yield self.element_class(self, p)
def cardinality(self):
if self._ell > self.n:
return Partitions_n.cardinality(self)
return ZZ.sum(ZZ.one() for x in self)
def _an_element_(self):
return self.element_class(self, Partitions_n._an_element_(self).conjugate())
| true | true |
f7353d57ed779b909a1a402da5c8468d9d06e232 | 10,621 | py | Python | pySDC/playgrounds/fft/AllenCahn_contracting_circle_FFT.py | tlunet/pySDC | 6ab2390d017aad7e503df5c978bc3d217ac8b375 | [
"BSD-2-Clause"
] | null | null | null | pySDC/playgrounds/fft/AllenCahn_contracting_circle_FFT.py | tlunet/pySDC | 6ab2390d017aad7e503df5c978bc3d217ac8b375 | [
"BSD-2-Clause"
] | 1 | 2021-08-12T08:34:19.000Z | 2021-08-12T08:34:19.000Z | pySDC/playgrounds/fft/AllenCahn_contracting_circle_FFT.py | MichaelFlec/pySDC | 209e0015a46f861e3658691b7f8724cb1b36c97e | [
"BSD-2-Clause"
] | null | null | null | import os
import dill
import matplotlib.ticker as ticker
import numpy as np
import pySDC.helpers.plot_helper as plt_helper
from pySDC.helpers.stats_helper import filter_stats, sort_stats
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.AllenCahn_2D_FFT import allencahn2d_imex, allencahn2d_imex_stab
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.transfer_classes.TransferMesh_FFT2D import mesh_to_mesh_fft2d
from pySDC.projects.TOMS.AllenCahn_monitor import monitor
# http://www.personal.psu.edu/qud2/Res/Pre/dz09sisc.pdf
def setup_parameters():
"""
Helper routine to fill in all relevant parameters
Note that this file will be used for all versions of SDC, containing more than necessary for each individual run
Returns:
description (dict)
controller_params (dict)
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-08
level_params['dt'] = 1E-03
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = ['LU']
sweeper_params['QE'] = ['EE']
sweeper_params['initial_guess'] = 'zero'
# This comes as read-in for the problem class
problem_params = dict()
problem_params['nu'] = 2
problem_params['L'] = 1.0
problem_params['nvars'] = [(256, 256), (64, 64)]
problem_params['eps'] = [0.04, 0.16]
problem_params['radius'] = 0.25
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20
controller_params['hook_class'] = monitor
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = None # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = None # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh_fft2d
return description, controller_params
def run_SDC_variant(variant=None):
"""
Routine to run particular SDC variant
Args:
variant (str): string describing the variant
Returns:
timing (float)
niter (float)
"""
# load (incomplete) default parameters
description, controller_params = setup_parameters()
# add stuff based on variant
if variant == 'semi-implicit':
description['problem_class'] = allencahn2d_imex
description['sweeper_class'] = imex_1st_order
elif variant == 'semi-implicit-stab':
description['problem_class'] = allencahn2d_imex_stab
description['sweeper_class'] = imex_1st_order
else:
raise NotImplemented('Wrong variant specified, got %s' % variant)
# setup parameters "in time"
t0 = 0
Tend = 0.032
# instantiate controller
controller = controller_nonMPI(num_procs=8, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# plt_helper.plt.imshow(uinit)
# plt_helper.plt.show()
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# plt_helper.plt.imshow(uend)
# plt_helper.plt.show()
# filter statistics by variant (number of iterations)
filtered_stats = filter_stats(stats, type='niter')
# convert filtered statistics to list of iterations count, sorted by process
iter_counts = sort_stats(filtered_stats, sortby='time')
# compute and print statistics
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
print(out)
out = ' Range of values for number of iterations: %2i ' % np.ptp(niters)
print(out)
out = ' Position of max/min number of iterations: %2i -- %2i' % \
(int(np.argmax(niters)), int(np.argmin(niters)))
print(out)
out = ' Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters)))
print(out)
timing = sort_stats(filter_stats(stats, type='timing_run'), sortby='time')
print('Time to solution: %6.4f sec.' % timing[0][1])
print()
return stats
def show_results(fname, cwd=''):
"""
Plotting routine
Args:
fname (str): file name to read in and name plots
cwd (str): current working directory
"""
file = open(cwd + fname + '.pkl', 'rb')
results = dill.load(file)
file.close()
# plt_helper.mpl.style.use('classic')
plt_helper.setup_mpl()
# set up plot for timings
fig, ax1 = plt_helper.newfig(textwidth=238.96, scale=1.5, ratio=0.4)
timings = {}
niters = {}
for key, item in results.items():
timings[key] = sort_stats(filter_stats(item, type='timing_run'), sortby='time')[0][1]
iter_counts = sort_stats(filter_stats(item, type='niter'), sortby='time')
niters[key] = np.mean(np.array([item[1] for item in iter_counts]))
xcoords = [i for i in range(len(timings))]
sorted_timings = sorted([(key, timings[key]) for key in timings], reverse=True, key=lambda tup: tup[1])
sorted_niters = [(k, niters[k]) for k in [key[0] for key in sorted_timings]]
heights_timings = [item[1] for item in sorted_timings]
heights_niters = [item[1] for item in sorted_niters]
keys = [(item[0][1] + ' ' + item[0][0]).replace('-', '\n').replace('_v2', ' mod.') for item in sorted_timings]
ax1.bar(xcoords, heights_timings, align='edge', width=-0.3, label='timings (left axis)')
ax1.set_ylabel('time (sec)')
ax2 = ax1.twinx()
ax2.bar(xcoords, heights_niters, color='r', align='edge', width=0.3, label='iterations (right axis)')
ax2.set_ylabel('mean number of iterations')
ax1.set_xticks(xcoords)
ax1.set_xticklabels(keys, rotation=90, ha='center')
# ask matplotlib for the plotted objects and their labels
lines, labels = ax1.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc=0)
# save plot, beautify
f = fname + '_timings'
plt_helper.savefig(f)
assert os.path.isfile(f + '.pdf'), 'ERROR: plotting did not create PDF file'
assert os.path.isfile(f + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(f + '.png'), 'ERROR: plotting did not create PNG file'
# set up plot for radii
fig, ax = plt_helper.newfig(textwidth=238.96, scale=1.0)
exact_radii = []
for key, item in results.items():
computed_radii = sort_stats(filter_stats(item, type='computed_radius'), sortby='time')
xcoords = [item0[0] for item0 in computed_radii]
radii = [item0[1] for item0 in computed_radii]
if key[0] + ' ' + key[1] == 'semi-implicit-stab exact':
ax.plot(xcoords, radii, label=(key[0] + ' ' + key[1]).replace('_v2', ' mod.'))
exact_radii = sort_stats(filter_stats(item, type='exact_radius'), sortby='time')
# diff = np.array([abs(item0[1] - item1[1]) for item0, item1 in zip(exact_radii, computed_radii)])
# max_pos = int(np.argmax(diff))
# assert max(diff) < 0.07, 'ERROR: computed radius is too far away from exact radius, got %s' % max(diff)
# assert 0.028 < computed_radii[max_pos][0] < 0.03, \
# 'ERROR: largest difference is at wrong time, got %s' % computed_radii[max_pos][0]
xcoords = [item[0] for item in exact_radii]
radii = [item[1] for item in exact_radii]
ax.plot(xcoords, radii, color='k', linestyle='--', linewidth=1, label='exact')
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))
ax.set_ylabel('radius')
ax.set_xlabel('time')
ax.grid()
ax.legend(loc=3)
# save plot, beautify
f = fname + '_radii'
plt_helper.savefig(f)
assert os.path.isfile(f + '.pdf'), 'ERROR: plotting did not create PDF file'
assert os.path.isfile(f + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(f + '.png'), 'ERROR: plotting did not create PNG file'
# set up plot for interface width
fig, ax = plt_helper.newfig(textwidth=238.96, scale=1.0)
interface_width = []
for key, item in results.items():
interface_width = sort_stats(filter_stats(item, type='interface_width'), sortby='time')
xcoords = [item[0] for item in interface_width]
width = [item[1] for item in interface_width]
if key[0] + ' ' + key[1] == 'fully-implicit exact':
ax.plot(xcoords, width, label=key[0] + ' ' + key[1])
xcoords = [item[0] for item in interface_width]
init_width = [interface_width[0][1]] * len(xcoords)
ax.plot(xcoords, init_width, color='k', linestyle='--', linewidth=1, label='exact')
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))
ax.set_ylabel(r'interface width ($\epsilon$)')
ax.set_xlabel('time')
ax.grid()
ax.legend(loc=3)
# save plot, beautify
f = fname + '_interface'
plt_helper.savefig(f)
assert os.path.isfile(f + '.pdf'), 'ERROR: plotting did not create PDF file'
assert os.path.isfile(f + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(f + '.png'), 'ERROR: plotting did not create PNG file'
return None
def main(cwd=''):
"""
Main driver
Args:
cwd (str): current working directory (need this for testing)
"""
# Loop over variants, exact and inexact solves
results = {}
for variant in ['semi-implicit-stab']:
results[(variant, 'exact')] = run_SDC_variant(variant=variant)
# dump result
fname = 'data/results_SDC_variants_AllenCahn_1E-03'
file = open(cwd + fname + '.pkl', 'wb')
dill.dump(results, file)
file.close()
assert os.path.isfile(cwd + fname + '.pkl'), 'ERROR: dill did not create file'
# visualize
show_results(fname, cwd=cwd)
if __name__ == "__main__":
main()
| 35.521739 | 116 | 0.668487 | import os
import dill
import matplotlib.ticker as ticker
import numpy as np
import pySDC.helpers.plot_helper as plt_helper
from pySDC.helpers.stats_helper import filter_stats, sort_stats
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.AllenCahn_2D_FFT import allencahn2d_imex, allencahn2d_imex_stab
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.transfer_classes.TransferMesh_FFT2D import mesh_to_mesh_fft2d
from pySDC.projects.TOMS.AllenCahn_monitor import monitor
def setup_parameters():
level_params = dict()
level_params['restol'] = 1E-08
level_params['dt'] = 1E-03
level_params['nsweeps'] = [3, 1]
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = ['LU']
sweeper_params['QE'] = ['EE']
sweeper_params['initial_guess'] = 'zero'
problem_params = dict()
problem_params['nu'] = 2
problem_params['L'] = 1.0
problem_params['nvars'] = [(256, 256), (64, 64)]
problem_params['eps'] = [0.04, 0.16]
problem_params['radius'] = 0.25
step_params = dict()
step_params['maxiter'] = 50
controller_params = dict()
controller_params['logger_level'] = 20
controller_params['hook_class'] = monitor
description = dict()
description['problem_class'] = None
description['problem_params'] = problem_params
description['sweeper_class'] = None
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
description['space_transfer_class'] = mesh_to_mesh_fft2d
return description, controller_params
def run_SDC_variant(variant=None):
description, controller_params = setup_parameters()
if variant == 'semi-implicit':
description['problem_class'] = allencahn2d_imex
description['sweeper_class'] = imex_1st_order
elif variant == 'semi-implicit-stab':
description['problem_class'] = allencahn2d_imex_stab
description['sweeper_class'] = imex_1st_order
else:
raise NotImplemented('Wrong variant specified, got %s' % variant)
t0 = 0
Tend = 0.032
controller = controller_nonMPI(num_procs=8, controller_params=controller_params, description=description)
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
filtered_stats = filter_stats(stats, type='niter')
iter_counts = sort_stats(filtered_stats, sortby='time')
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
print(out)
out = ' Range of values for number of iterations: %2i ' % np.ptp(niters)
print(out)
out = ' Position of max/min number of iterations: %2i -- %2i' % \
(int(np.argmax(niters)), int(np.argmin(niters)))
print(out)
out = ' Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters)))
print(out)
timing = sort_stats(filter_stats(stats, type='timing_run'), sortby='time')
print('Time to solution: %6.4f sec.' % timing[0][1])
print()
return stats
def show_results(fname, cwd=''):
file = open(cwd + fname + '.pkl', 'rb')
results = dill.load(file)
file.close()
plt_helper.setup_mpl()
fig, ax1 = plt_helper.newfig(textwidth=238.96, scale=1.5, ratio=0.4)
timings = {}
niters = {}
for key, item in results.items():
timings[key] = sort_stats(filter_stats(item, type='timing_run'), sortby='time')[0][1]
iter_counts = sort_stats(filter_stats(item, type='niter'), sortby='time')
niters[key] = np.mean(np.array([item[1] for item in iter_counts]))
xcoords = [i for i in range(len(timings))]
sorted_timings = sorted([(key, timings[key]) for key in timings], reverse=True, key=lambda tup: tup[1])
sorted_niters = [(k, niters[k]) for k in [key[0] for key in sorted_timings]]
heights_timings = [item[1] for item in sorted_timings]
heights_niters = [item[1] for item in sorted_niters]
keys = [(item[0][1] + ' ' + item[0][0]).replace('-', '\n').replace('_v2', ' mod.') for item in sorted_timings]
ax1.bar(xcoords, heights_timings, align='edge', width=-0.3, label='timings (left axis)')
ax1.set_ylabel('time (sec)')
ax2 = ax1.twinx()
ax2.bar(xcoords, heights_niters, color='r', align='edge', width=0.3, label='iterations (right axis)')
ax2.set_ylabel('mean number of iterations')
ax1.set_xticks(xcoords)
ax1.set_xticklabels(keys, rotation=90, ha='center')
lines, labels = ax1.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc=0)
f = fname + '_timings'
plt_helper.savefig(f)
assert os.path.isfile(f + '.pdf'), 'ERROR: plotting did not create PDF file'
assert os.path.isfile(f + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(f + '.png'), 'ERROR: plotting did not create PNG file'
fig, ax = plt_helper.newfig(textwidth=238.96, scale=1.0)
exact_radii = []
for key, item in results.items():
computed_radii = sort_stats(filter_stats(item, type='computed_radius'), sortby='time')
xcoords = [item0[0] for item0 in computed_radii]
radii = [item0[1] for item0 in computed_radii]
if key[0] + ' ' + key[1] == 'semi-implicit-stab exact':
ax.plot(xcoords, radii, label=(key[0] + ' ' + key[1]).replace('_v2', ' mod.'))
exact_radii = sort_stats(filter_stats(item, type='exact_radius'), sortby='time')
xcoords = [item[0] for item in exact_radii]
radii = [item[1] for item in exact_radii]
ax.plot(xcoords, radii, color='k', linestyle='--', linewidth=1, label='exact')
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))
ax.set_ylabel('radius')
ax.set_xlabel('time')
ax.grid()
ax.legend(loc=3)
f = fname + '_radii'
plt_helper.savefig(f)
assert os.path.isfile(f + '.pdf'), 'ERROR: plotting did not create PDF file'
assert os.path.isfile(f + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(f + '.png'), 'ERROR: plotting did not create PNG file'
fig, ax = plt_helper.newfig(textwidth=238.96, scale=1.0)
interface_width = []
for key, item in results.items():
interface_width = sort_stats(filter_stats(item, type='interface_width'), sortby='time')
xcoords = [item[0] for item in interface_width]
width = [item[1] for item in interface_width]
if key[0] + ' ' + key[1] == 'fully-implicit exact':
ax.plot(xcoords, width, label=key[0] + ' ' + key[1])
xcoords = [item[0] for item in interface_width]
init_width = [interface_width[0][1]] * len(xcoords)
ax.plot(xcoords, init_width, color='k', linestyle='--', linewidth=1, label='exact')
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))
ax.set_ylabel(r'interface width ($\epsilon$)')
ax.set_xlabel('time')
ax.grid()
ax.legend(loc=3)
f = fname + '_interface'
plt_helper.savefig(f)
assert os.path.isfile(f + '.pdf'), 'ERROR: plotting did not create PDF file'
assert os.path.isfile(f + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(f + '.png'), 'ERROR: plotting did not create PNG file'
return None
def main(cwd=''):
results = {}
for variant in ['semi-implicit-stab']:
results[(variant, 'exact')] = run_SDC_variant(variant=variant)
fname = 'data/results_SDC_variants_AllenCahn_1E-03'
file = open(cwd + fname + '.pkl', 'wb')
dill.dump(results, file)
file.close()
assert os.path.isfile(cwd + fname + '.pkl'), 'ERROR: dill did not create file'
show_results(fname, cwd=cwd)
if __name__ == "__main__":
main()
| true | true |
f7353ea7e493eeeb862ebf58a5aff1da7c7bd374 | 221 | py | Python | tests/test_count_customer_states.py | swimmio/sqlalchemy_swimm | d24accb7792743cf586bd7062531d108e7063eba | [
"MIT"
] | null | null | null | tests/test_count_customer_states.py | swimmio/sqlalchemy_swimm | d24accb7792743cf586bd7062531d108e7063eba | [
"MIT"
] | null | null | null | tests/test_count_customer_states.py | swimmio/sqlalchemy_swimm | d24accb7792743cf586bd7062531d108e7063eba | [
"MIT"
] | null | null | null | from src import count_customer_states
EXPECTED_RESULT = 11
def test_count_customer_countries() -> None:
tested_result = count_customer_states.count_usa_customer_states()
assert tested_result == EXPECTED_RESULT
| 24.555556 | 69 | 0.81448 | from src import count_customer_states
EXPECTED_RESULT = 11
def test_count_customer_countries() -> None:
tested_result = count_customer_states.count_usa_customer_states()
assert tested_result == EXPECTED_RESULT
| true | true |
f7353f1950961d00565d9273f846e64aea7c3397 | 2,123 | py | Python | mean_teacher/losses.py | Shuai-Xie/LP-DeepSSL | 9389c6cb0b83c7ca509ce284c4d86b600ca44a9b | [
"MIT"
] | null | null | null | mean_teacher/losses.py | Shuai-Xie/LP-DeepSSL | 9389c6cb0b83c7ca509ce284c4d86b600ca44a9b | [
"MIT"
] | null | null | null | mean_teacher/losses.py | Shuai-Xie/LP-DeepSSL | 9389c6cb0b83c7ca509ce284c4d86b600ca44a9b | [
"MIT"
] | null | null | null | # Copyright (c) 2018, Curious AI Ltd. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Custom loss functions"""
import torch
from torch.nn import functional as F
from torch.autograd import Variable
import pdb
import numpy as np
def softmax_mse_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns MSE loss
Note:
- Returns the sum over all examples. Divide by num_classes
Divide by the batch size afterwards if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
input_softmax = F.softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
num_classes = input_logits.size()[1]
return F.mse_loss(input_softmax, target_softmax, size_average=False) / num_classes
def softmax_kl_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns KL divergence
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
input_log_softmax = F.log_softmax(input_logits, dim=1) # log(q)
target_softmax = F.softmax(target_logits, dim=1) # p
return F.kl_div(input_log_softmax, target_softmax, size_average=False)
def symmetric_mse_loss(input1, input2):
"""Like F.mse_loss but sends gradients to both directions.
cuz input1/input2 are tensors with grad, while target in F.mse_loss has no grad.
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to both input1 and input2.
"""
assert input1.size() == input2.size()
num_classes = input1.size()[1]
return torch.sum((input1 - input2)**2) / num_classes | 37.910714 | 88 | 0.724447 |
import torch
from torch.nn import functional as F
from torch.autograd import Variable
import pdb
import numpy as np
def softmax_mse_loss(input_logits, target_logits):
assert input_logits.size() == target_logits.size()
input_softmax = F.softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
num_classes = input_logits.size()[1]
return F.mse_loss(input_softmax, target_softmax, size_average=False) / num_classes
def softmax_kl_loss(input_logits, target_logits):
assert input_logits.size() == target_logits.size()
input_log_softmax = F.log_softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
return F.kl_div(input_log_softmax, target_softmax, size_average=False)
def symmetric_mse_loss(input1, input2):
assert input1.size() == input2.size()
num_classes = input1.size()[1]
return torch.sum((input1 - input2)**2) / num_classes | true | true |
f73540fbb20d57168fcef3aa827ceeb19cea2e10 | 274 | py | Python | tests/artificial/transf_Quantization/trend_ConstantTrend/cycle_0/ar_12/test_artificial_1024_Quantization_ConstantTrend_0_12_100.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/artificial/transf_Quantization/trend_ConstantTrend/cycle_0/ar_12/test_artificial_1024_Quantization_ConstantTrend_0_12_100.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/artificial/transf_Quantization/trend_ConstantTrend/cycle_0/ar_12/test_artificial_1024_Quantization_ConstantTrend_0_12_100.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 0, transform = "Quantization", sigma = 0.0, exog_count = 100, ar_order = 12); | 39.142857 | 174 | 0.740876 | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 0, transform = "Quantization", sigma = 0.0, exog_count = 100, ar_order = 12); | true | true |
f7354106e2d6bdd10705f670903f39a818fd47a7 | 21,065 | py | Python | dcov/violationsreporters/violations_reporter.py | xiak/dcov | 27d31e90602dc7fff9ddd32b8f42837505467d4b | [
"Apache-2.0"
] | null | null | null | dcov/violationsreporters/violations_reporter.py | xiak/dcov | 27d31e90602dc7fff9ddd32b8f42837505467d4b | [
"Apache-2.0"
] | null | null | null | dcov/violationsreporters/violations_reporter.py | xiak/dcov | 27d31e90602dc7fff9ddd32b8f42837505467d4b | [
"Apache-2.0"
] | null | null | null | """
Classes for querying the information in a test coverage report.
"""
from email import message
import itertools
import os
import os.path
import re
from collections import defaultdict
from dcov import util
from dcov.command_runner import run_command_for_code
from dcov.git_path import GitPathTool
from dcov.violationsreporters.base import (
BaseViolationReporter,
QualityDriver,
RegexBasedDriver,
Violation,
)
class XmlCoverageReporter(BaseViolationReporter):
"""
Query information from a Cobertura|Clover|JaCoCo XML coverage report.
"""
def __init__(self, xml_roots, diff_reporter, src_roots=None):
"""
:param xml_roots Load the XML coverage report represented
by the cElementTree with root element `xml_root`.
:param git diff reporter, used to compare with content of xml report
:src_root
"""
super().__init__("XML")
self._xml_roots = xml_roots
self._diff_reporter = diff_reporter
# Create a dict to cache violations dict results
# Keys are source file paths, values are output of `violations()`
self._info_cache = defaultdict(list)
self._src_roots = src_roots or [""]
def _get_classes(self, xml_document, src_path):
"""
Given a path and parsed xml_document provides class nodes
with the relevant lines
First, we look to see if xml_document contains a source
node providing paths to search for
If we don't have that we check each nodes filename attribute
matches an absolute path
Finally, if we found no nodes, we check the filename attribute
for the relative path
"""
# Remove git_root from src_path for searching the correct filename
# If cwd is `/home/user/work/diff-cover/diff_cover`
# and src_path is `diff_cover/violations_reporter.py`
# search for `violations_reporter.py`
src_rel_path = util.to_unix_path(GitPathTool.relative_path(src_path))
# If cwd is `/home/user/work/diff-cover/diff_cover`
# and src_path is `other_package/some_file.py`
# search for `/home/user/work/diff-cover/other_package/some_file.py`
src_abs_path = util.to_unix_path(GitPathTool.absolute_path(src_path))
# cobertura sometimes provides the sources for the measurements
# within it. If we have that we outta use it
sources = xml_document.findall("sources/source")
sources = [source.text for source in sources if source.text]
classes = xml_document.findall(".//class") or []
# if cannot find src_path in report
# if find src_path in report
return (
[
clazz
for clazz in classes
if src_abs_path
in [
util.to_unix_path(
os.path.join(source.strip(), clazz.get("filename"))
)
for source in sources
]
]
or [
clazz
for clazz in classes
if util.to_unix_path(clazz.get("filename")) == src_abs_path
]
or [
clazz
for clazz in classes
if util.to_unix_path(clazz.get("filename")) == src_rel_path
]
)
def get_src_path_line_nodes_cobertura(self, xml_document, src_path):
classes = self._get_classes(xml_document, src_path)
if not classes:
return None
lines = [clazz.findall("./lines/line") for clazz in classes]
return list(itertools.chain(*lines))
@staticmethod
def get_src_path_line_nodes_clover(xml_document, src_path):
"""
Return a list of nodes containing line information for `src_path`
in `xml_document`.
If file is not present in `xml_document`, return None
"""
files = [
file_tree
for file_tree in xml_document.findall(".//file")
if GitPathTool.relative_path(file_tree.get("name")) == src_path
# if GitPathTool.compare_path(file_tree.get("name"), src_path)
]
if not files:
return None
lines = []
for file_tree in files:
lines.append(file_tree.findall('./line[@type="stmt"]'))
lines.append(file_tree.findall('./line[@type="cond"]'))
return list(itertools.chain(*lines))
def _measured_source_path_matches(self, package_name, file_name, src_path):
# find src_path in any of the source roots
if not src_path.endswith(file_name):
return False
norm_src_path = os.path.normcase(src_path)
for root in self._src_roots:
if (
os.path.normcase(
GitPathTool.relative_path(
os.path.join(root, package_name, file_name)
)
)
== norm_src_path
):
return True
return False
def get_src_path_line_nodes_jacoco(self, xml_document, src_path):
"""
Return a list of nodes containing line information for `src_path`
in `xml_document`.
If file is not present in `xml_document`, return None
"""
files = []
packages = list(xml_document.findall(".//package"))
for pkg in packages:
_files = [
_file
for _file in pkg.findall("sourcefile")
if self._measured_source_path_matches(
pkg.get("name"), _file.get("name"), src_path
)
]
files.extend(_files)
if not files:
return None
lines = [file_tree.findall("./line") for file_tree in files]
return list(itertools.chain(*lines))
def _cache_file(self, src_path):
"""
Load the data from `self._xml_roots`
for `src_path`, if it hasn't been already.
"""
# If we have not yet loaded this source file
if src_path not in self._info_cache:
# We only want to keep violations that show up in each xml source.
# Thus, each time, we take the intersection. However, to do this
# we must treat the first time as a special case and just add all
# the violations from the first xml report.
violations = None
# A line is measured if it is measured in any of the reports, so
# we take set union each time and can just start with the empty set
measured = set()
changed_lines = self._diff_reporter.lines_changed(src_path)
# Loop through the files that contain the xml roots
for xml_document in self._xml_roots:
if xml_document.findall(".[@clover]") or xml_document.findall(".[@generated]"):
# see etc/schema/clover.xsd at https://bitbucket.org/atlassian/clover/src
line_nodes = self.get_src_path_line_nodes_clover(
xml_document, src_path
)
_number = "num"
_hits = "count"
elif xml_document.findall(".[@name]"):
# https://github.com/jacoco/jacoco/blob/master/org.jacoco.report/src/org/jacoco/report/xml/report.dtd
line_nodes = self.get_src_path_line_nodes_jacoco(
xml_document, src_path
)
_number = "nr"
_hits = "ci"
else:
# https://github.com/cobertura/web/blob/master/htdocs/xml/coverage-04.dtd
line_nodes = self.get_src_path_line_nodes_cobertura(
xml_document, src_path
)
_number = "number"
_hits = "hits"
# # if cannot find data in report file
# if changed_lines is None:
# continue
# if line_nodes is None:
# measured = set(changed_lines)
# violations = {
# Violation(int(line), None)
# for line in measured
# }
# continue
# # First case, need to define violations initially
# measured = measured | {int(line.get(_number)) for line in line_nodes}
# # difference between changed_lines and line_nodes
# df = set(changed_lines).difference(measured)
# # intersection between changed_lines and line_nodes
# it = set(changed_lines).intersection(measured)
# violations = {
# Violation(int(line), None)
# for line in df
# }
# violations = violations.union({
# Violation(int(line.get(_number)), None)
# for line in line_nodes
# if line.get(_number) in it
# or int(line.get(_number)) in it
# and int(line.get(_hits, 0)) == 0
# })
# measured = df.union(it)
# # If we don't have any information about the source file,
# # don't report any violations
# # or if the file has not violations
# if violations is None:
# violations = set()
# self._info_cache[src_path] = (violations, measured)
if line_nodes is None:
continue
# First case, need to define violations initially
if violations is None:
violations = {
Violation(int(line.get(_number)), None)
for line in line_nodes
if int(line.get(_hits, 0)) == 0
}
# If we already have a violations set,
# take the intersection of the new
# violations set and its old self
else:
violations = violations & {
Violation(int(line.get(_number)), None)
for line in line_nodes
if int(line.get(_hits, 0)) == 0
}
# Measured is the union of itself and the new measured
measured = measured | {int(line.get(_number)) for line in line_nodes}
# If we don't have any information about the source file,
# don't report any violations
if violations is None:
violations = set()
self._info_cache[src_path] = (violations, measured)
def violations(self, src_path):
"""
See base class comments.
"""
self._cache_file(src_path)
# Yield all lines not covered
return self._info_cache[src_path][0]
def measured_lines(self, src_path):
"""
See base class docstring.
"""
self._cache_file(src_path)
return self._info_cache[src_path][1]
pycodestyle_driver = RegexBasedDriver(
name="pycodestyle",
supported_extensions=["py"],
command=["pycodestyle"],
expression=r"^([^:]+):(\d+).*([EW]\d{3}.*)$",
command_to_check_install=["pycodestyle", "--version"],
# pycodestyle exit code is 1 if there are violations
# http://pycodestyle.pycqa.org/en/latest/intro.html
exit_codes=[0, 1],
)
pyflakes_driver = RegexBasedDriver(
name="pyflakes",
supported_extensions=["py"],
command=["pyflakes"],
# Match lines of the form:
# path/to/file.py:328: undefined name '_thing'
# path/to/file.py:418: 'random' imported but unused
expression=r"^([^:]+):(\d+):\d* (.*)$",
command_to_check_install=["pyflakes", "--version"],
# pyflakes exit code is 1 if there are violations
# https://github.com/PyCQA/pyflakes/blob/master/pyflakes/api.py#L211
exit_codes=[0, 1],
)
"""
Report Flake8 violations.
"""
flake8_driver = RegexBasedDriver(
name="flake8",
supported_extensions=["py"],
command=["flake8"],
# Match lines of the form:
# new_file.py:1:17: E231 whitespace
expression=r"^([^:]+):(\d+):(?:\d+): ([a-zA-Z]+\d+.*)$",
command_to_check_install=["flake8", "--version"],
# flake8 exit code is 1 if there are violations
# http://flake8.pycqa.org/en/latest/user/invocation.html
exit_codes=[0, 1],
)
jshint_driver = RegexBasedDriver(
name="jshint",
supported_extensions=["js"],
command=["jshint"],
expression=r"^([^:]+): line (\d+), col \d+, (.*)$",
command_to_check_install=["jshint", "-v"],
)
class EslintDriver(RegexBasedDriver):
def __init__(self):
super().__init__(
name="eslint",
supported_extensions=["js"],
command=["eslint", "--format=compact"],
expression=r"^([^:]+): line (\d+), col \d+, (.*)$",
command_to_check_install=["eslint", "-v"],
)
self.report_root_path = None
def add_driver_args(self, **kwargs):
self.report_root_path = kwargs.pop("report_root_path", None)
if kwargs:
super().add_driver_args(**kwargs)
def parse_reports(self, reports):
violations_dict = super().parse_reports(reports)
if self.report_root_path:
keys = list(violations_dict.keys())
for key in keys:
new_key = os.path.relpath(key, self.report_root_path)
violations_dict[new_key] = violations_dict.pop(key)
return violations_dict
"""
Report pydocstyle violations.
Warning/error codes:
D1**: Missing Docstrings
D2**: Whitespace Issues
D3**: Quotes Issues
D4**: Docstring Content Issues
http://www.pydocstyle.org/en/latest/error_codes.html
"""
pydocstyle_driver = RegexBasedDriver(
name="pydocstyle",
supported_extensions=["py"],
command=["pydocstyle"],
expression=r"^(.+?):(\d+).*?$.+?^ (.*?)$",
command_to_check_install=["pydocstyle", "--version"],
flags=re.MULTILINE | re.DOTALL,
# pydocstyle exit code is 1 if there are violations
# http://www.pydocstyle.org/en/2.1.1/usage.html#return-code
exit_codes=[0, 1],
)
class PylintDriver(QualityDriver):
def __init__(self):
"""
args:
expression: regex used to parse report
See super for other args
"""
super().__init__(
"pylint",
["py"],
[
"pylint",
'--msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"',
],
# Pylint returns bit-encoded exit codes as documented here:
# https://pylint.readthedocs.io/en/latest/user_guide/run.html
# 1 = fatal error, occurs if an error prevents pylint from doing further processing
# 2,4,8,16 = error/warning/refactor/convention message issued
# 32 = usage error
[
0,
2,
4,
2 | 4,
8,
2 | 8,
4 | 8,
2 | 4 | 8,
16,
2 | 16,
4 | 16,
2 | 4 | 16,
8 | 16,
2 | 8 | 16,
4 | 8 | 16,
2 | 4 | 8 | 16,
],
)
self.pylint_expression = re.compile(
r"^([^:]+):(\d+): \[(\w+),? ?([^\]]*)] (.*)$"
)
self.dupe_code_violation = "R0801"
self.command_to_check_install = ["pylint", "--version"]
# Match lines of the form:
# path/to/file.py:123: [C0111] Missing docstring
# path/to/file.py:456: [C0111, Foo.bar] Missing docstring
self.multi_line_violation_regex = re.compile(r"==(\w|.+):(.*)")
self.dupe_code_violation_regex = re.compile(r"Similar lines in (\d+) files")
def _process_dupe_code_violation(self, lines, current_line, message):
"""
The duplicate code violation is a multi line error. This pulls out
all the relevant files
"""
src_paths = []
message_match = self.dupe_code_violation_regex.match(message)
if message_match:
for _ in range(int(message_match.group(1))):
current_line += 1
match = self.multi_line_violation_regex.match(lines[current_line])
src_path, l_number = match.groups()
src_paths.append(("%s.py" % src_path, l_number))
return src_paths
def parse_reports(self, reports):
"""
Args:
reports: list[str] - output from the report
Return:
A dict[Str:Violation]
Violation is a simple named tuple Defined above
"""
violations_dict = defaultdict(list)
for report in reports:
output_lines = report.split("\n")
for output_line_number, line in enumerate(output_lines):
match = self.pylint_expression.match(line)
# Ignore any line that isn't matched
# (for example, snippets from the source code)
if match is not None:
(
pylint_src_path,
line_number,
pylint_code,
function_name,
message,
) = match.groups()
if pylint_code == self.dupe_code_violation:
files_involved = self._process_dupe_code_violation(
output_lines, output_line_number, message
)
else:
files_involved = [(pylint_src_path, line_number)]
for violation in files_involved:
pylint_src_path, line_number = violation
# pylint might uses windows paths
pylint_src_path = util.to_unix_path(pylint_src_path)
# If we're looking for a particular source file,
# ignore any other source files.
if function_name:
error_str = "{}: {}: {}".format(
pylint_code, function_name, message
)
else:
error_str = f"{pylint_code}: {message}"
violation = Violation(int(line_number), error_str)
violations_dict[pylint_src_path].append(violation)
return violations_dict
def installed(self):
"""
Method checks if the provided tool is installed.
Returns: boolean True if installed
"""
return run_command_for_code(self.command_to_check_install) == 0
class CppcheckDriver(QualityDriver):
"""
Driver for cppcheck c/c++ static analyzer.
"""
def __init__(self):
"""
args:
expression: regex used to parse report
See super for other args
"""
super().__init__(
"cppcheck",
["c", "cpp", "h", "hpp"],
["cppcheck", "--quiet"],
output_stderr=True,
)
# Errors look like:
# [src/foo.c:123]: (error) Array 'yolo[4]' accessed at index 4, which is out of bounds.
# Match for everything, including ":" in the file name (first capturing
# group), in case there are pathological path names with ":"
self.cppcheck_expression = re.compile(r"^\[(.*?):(\d+)\]: (.*$)")
self.command_to_check_install = ["cppcheck", "--version"]
def parse_reports(self, reports):
"""
Args:
reports: list[str] - output from the report
Return:
A dict[Str:Violation]
Violation is a simple named tuple Defined above
"""
violations_dict = defaultdict(list)
for report in reports:
output_lines = report.splitlines()
for line in output_lines:
match = self.cppcheck_expression.match(line)
# Ignore any line that isn't matched
# (for example, snippets from the source code)
if match is not None:
(cppcheck_src_path, line_number, message) = match.groups()
violation = Violation(int(line_number), message)
violations_dict[cppcheck_src_path].append(violation)
return violations_dict
def installed(self):
"""
Method checks if the provided tool is installed.
Returns: boolean True if installed
"""
return run_command_for_code(self.command_to_check_install) == 0
| 36.008547 | 121 | 0.545122 |
from email import message
import itertools
import os
import os.path
import re
from collections import defaultdict
from dcov import util
from dcov.command_runner import run_command_for_code
from dcov.git_path import GitPathTool
from dcov.violationsreporters.base import (
BaseViolationReporter,
QualityDriver,
RegexBasedDriver,
Violation,
)
class XmlCoverageReporter(BaseViolationReporter):
def __init__(self, xml_roots, diff_reporter, src_roots=None):
super().__init__("XML")
self._xml_roots = xml_roots
self._diff_reporter = diff_reporter
self._info_cache = defaultdict(list)
self._src_roots = src_roots or [""]
def _get_classes(self, xml_document, src_path):
src_rel_path = util.to_unix_path(GitPathTool.relative_path(src_path))
src_abs_path = util.to_unix_path(GitPathTool.absolute_path(src_path))
sources = xml_document.findall("sources/source")
sources = [source.text for source in sources if source.text]
classes = xml_document.findall(".//class") or []
return (
[
clazz
for clazz in classes
if src_abs_path
in [
util.to_unix_path(
os.path.join(source.strip(), clazz.get("filename"))
)
for source in sources
]
]
or [
clazz
for clazz in classes
if util.to_unix_path(clazz.get("filename")) == src_abs_path
]
or [
clazz
for clazz in classes
if util.to_unix_path(clazz.get("filename")) == src_rel_path
]
)
def get_src_path_line_nodes_cobertura(self, xml_document, src_path):
classes = self._get_classes(xml_document, src_path)
if not classes:
return None
lines = [clazz.findall("./lines/line") for clazz in classes]
return list(itertools.chain(*lines))
@staticmethod
def get_src_path_line_nodes_clover(xml_document, src_path):
files = [
file_tree
for file_tree in xml_document.findall(".//file")
if GitPathTool.relative_path(file_tree.get("name")) == src_path
]
if not files:
return None
lines = []
for file_tree in files:
lines.append(file_tree.findall('./line[@type="stmt"]'))
lines.append(file_tree.findall('./line[@type="cond"]'))
return list(itertools.chain(*lines))
def _measured_source_path_matches(self, package_name, file_name, src_path):
if not src_path.endswith(file_name):
return False
norm_src_path = os.path.normcase(src_path)
for root in self._src_roots:
if (
os.path.normcase(
GitPathTool.relative_path(
os.path.join(root, package_name, file_name)
)
)
== norm_src_path
):
return True
return False
def get_src_path_line_nodes_jacoco(self, xml_document, src_path):
files = []
packages = list(xml_document.findall(".//package"))
for pkg in packages:
_files = [
_file
for _file in pkg.findall("sourcefile")
if self._measured_source_path_matches(
pkg.get("name"), _file.get("name"), src_path
)
]
files.extend(_files)
if not files:
return None
lines = [file_tree.findall("./line") for file_tree in files]
return list(itertools.chain(*lines))
def _cache_file(self, src_path):
if src_path not in self._info_cache:
violations = None
measured = set()
changed_lines = self._diff_reporter.lines_changed(src_path)
for xml_document in self._xml_roots:
if xml_document.findall(".[@clover]") or xml_document.findall(".[@generated]"):
line_nodes = self.get_src_path_line_nodes_clover(
xml_document, src_path
)
_number = "num"
_hits = "count"
elif xml_document.findall(".[@name]"):
line_nodes = self.get_src_path_line_nodes_jacoco(
xml_document, src_path
)
_number = "nr"
_hits = "ci"
else:
line_nodes = self.get_src_path_line_nodes_cobertura(
xml_document, src_path
)
_number = "number"
_hits = "hits"
if line_nodes is None:
continue
if violations is None:
violations = {
Violation(int(line.get(_number)), None)
for line in line_nodes
if int(line.get(_hits, 0)) == 0
}
else:
violations = violations & {
Violation(int(line.get(_number)), None)
for line in line_nodes
if int(line.get(_hits, 0)) == 0
}
measured = measured | {int(line.get(_number)) for line in line_nodes}
# don't report any violations
if violations is None:
violations = set()
self._info_cache[src_path] = (violations, measured)
def violations(self, src_path):
self._cache_file(src_path)
return self._info_cache[src_path][0]
def measured_lines(self, src_path):
self._cache_file(src_path)
return self._info_cache[src_path][1]
pycodestyle_driver = RegexBasedDriver(
name="pycodestyle",
supported_extensions=["py"],
command=["pycodestyle"],
expression=r"^([^:]+):(\d+).*([EW]\d{3}.*)$",
command_to_check_install=["pycodestyle", "--version"],
exit_codes=[0, 1],
)
pyflakes_driver = RegexBasedDriver(
name="pyflakes",
supported_extensions=["py"],
command=["pyflakes"],
expression=r"^([^:]+):(\d+):\d* (.*)$",
command_to_check_install=["pyflakes", "--version"],
exit_codes=[0, 1],
)
flake8_driver = RegexBasedDriver(
name="flake8",
supported_extensions=["py"],
command=["flake8"],
expression=r"^([^:]+):(\d+):(?:\d+): ([a-zA-Z]+\d+.*)$",
command_to_check_install=["flake8", "--version"],
exit_codes=[0, 1],
)
jshint_driver = RegexBasedDriver(
name="jshint",
supported_extensions=["js"],
command=["jshint"],
expression=r"^([^:]+): line (\d+), col \d+, (.*)$",
command_to_check_install=["jshint", "-v"],
)
class EslintDriver(RegexBasedDriver):
def __init__(self):
super().__init__(
name="eslint",
supported_extensions=["js"],
command=["eslint", "--format=compact"],
expression=r"^([^:]+): line (\d+), col \d+, (.*)$",
command_to_check_install=["eslint", "-v"],
)
self.report_root_path = None
def add_driver_args(self, **kwargs):
self.report_root_path = kwargs.pop("report_root_path", None)
if kwargs:
super().add_driver_args(**kwargs)
def parse_reports(self, reports):
violations_dict = super().parse_reports(reports)
if self.report_root_path:
keys = list(violations_dict.keys())
for key in keys:
new_key = os.path.relpath(key, self.report_root_path)
violations_dict[new_key] = violations_dict.pop(key)
return violations_dict
pydocstyle_driver = RegexBasedDriver(
name="pydocstyle",
supported_extensions=["py"],
command=["pydocstyle"],
expression=r"^(.+?):(\d+).*?$.+?^ (.*?)$",
command_to_check_install=["pydocstyle", "--version"],
flags=re.MULTILINE | re.DOTALL,
des=[0, 1],
)
class PylintDriver(QualityDriver):
def __init__(self):
super().__init__(
"pylint",
["py"],
[
"pylint",
'--msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"',
],
[
0,
2,
4,
2 | 4,
8,
2 | 8,
4 | 8,
2 | 4 | 8,
16,
2 | 16,
4 | 16,
2 | 4 | 16,
8 | 16,
2 | 8 | 16,
4 | 8 | 16,
2 | 4 | 8 | 16,
],
)
self.pylint_expression = re.compile(
r"^([^:]+):(\d+): \[(\w+),? ?([^\]]*)] (.*)$"
)
self.dupe_code_violation = "R0801"
self.command_to_check_install = ["pylint", "--version"]
self.multi_line_violation_regex = re.compile(r"==(\w|.+):(.*)")
self.dupe_code_violation_regex = re.compile(r"Similar lines in (\d+) files")
def _process_dupe_code_violation(self, lines, current_line, message):
src_paths = []
message_match = self.dupe_code_violation_regex.match(message)
if message_match:
for _ in range(int(message_match.group(1))):
current_line += 1
match = self.multi_line_violation_regex.match(lines[current_line])
src_path, l_number = match.groups()
src_paths.append(("%s.py" % src_path, l_number))
return src_paths
def parse_reports(self, reports):
violations_dict = defaultdict(list)
for report in reports:
output_lines = report.split("\n")
for output_line_number, line in enumerate(output_lines):
match = self.pylint_expression.match(line)
# (for example, snippets from the source code)
if match is not None:
(
pylint_src_path,
line_number,
pylint_code,
function_name,
message,
) = match.groups()
if pylint_code == self.dupe_code_violation:
files_involved = self._process_dupe_code_violation(
output_lines, output_line_number, message
)
else:
files_involved = [(pylint_src_path, line_number)]
for violation in files_involved:
pylint_src_path, line_number = violation
# pylint might uses windows paths
pylint_src_path = util.to_unix_path(pylint_src_path)
# If we're looking for a particular source file,
if function_name:
error_str = "{}: {}: {}".format(
pylint_code, function_name, message
)
else:
error_str = f"{pylint_code}: {message}"
violation = Violation(int(line_number), error_str)
violations_dict[pylint_src_path].append(violation)
return violations_dict
def installed(self):
return run_command_for_code(self.command_to_check_install) == 0
class CppcheckDriver(QualityDriver):
def __init__(self):
super().__init__(
"cppcheck",
["c", "cpp", "h", "hpp"],
["cppcheck", "--quiet"],
output_stderr=True,
)
self.cppcheck_expression = re.compile(r"^\[(.*?):(\d+)\]: (.*$)")
self.command_to_check_install = ["cppcheck", "--version"]
def parse_reports(self, reports):
violations_dict = defaultdict(list)
for report in reports:
output_lines = report.splitlines()
for line in output_lines:
match = self.cppcheck_expression.match(line)
# (for example, snippets from the source code)
if match is not None:
(cppcheck_src_path, line_number, message) = match.groups()
violation = Violation(int(line_number), message)
violations_dict[cppcheck_src_path].append(violation)
return violations_dict
def installed(self):
return run_command_for_code(self.command_to_check_install) == 0
| true | true |
f73541ae61d95278a0ad1bb2699e87d355c14c54 | 6,499 | py | Python | ros/src/tl_detector/tl_detector.py | balrajmarimuthu/CarND-Capstone | bc3e52c5e940e3da51efad219ab89fb3580fb717 | [
"MIT"
] | null | null | null | ros/src/tl_detector/tl_detector.py | balrajmarimuthu/CarND-Capstone | bc3e52c5e940e3da51efad219ab89fb3580fb717 | [
"MIT"
] | null | null | null | ros/src/tl_detector/tl_detector.py | balrajmarimuthu/CarND-Capstone | bc3e52c5e940e3da51efad219ab89fb3580fb717 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
import tf
import cv2
import yaml
from scipy.spatial import KDTree
STATE_COUNT_THRESHOLD = 3
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.camera_image = None
self.lights = []
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.bridge = CvBridge()
self.light_classifier = TLClassifier()
self.listener = tf.TransformListener()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[w.pose.pose.position.x, w.pose.pose.position.y] for w in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
self.has_image = True
self.camera_image = msg
light_wp, state = self.process_traffic_lights()
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if state == TrafficLight.RED else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
def get_closest_waypoint(self, x , y):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
#TODO implement
closest_idx = self.waypoint_tree.query([x,y], 1)[1]
return closest_idx
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# if(not self.has_image):
# self.prev_light_loc = None
# return False
# cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
# #Get classification
# return self.light_classifier.get_classification(cv_image)
return light.state
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#light = None
closest_light = None
line_wp_idx = None
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.config['stop_line_positions']
if(self.pose):
#car_position = self.get_closest_waypoint(self.pose.pose)
car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)
#TODO find the closest visible traffic light (if one exists)
diff = len(self.waypoints.waypoints)
for i, light in enumerate(self.lights):
#Get stop line waypoint index
line = stop_line_positions[i]
temp_wp_idx = self.get_closest_waypoint(line[0], line[1])
#Find closest stop line waypoint index
d = temp_wp_idx - car_wp_idx
if d>=0 and d<diff:
diff = d
closest_light = light
line_wp_idx = temp_wp_idx
if closest_light:
state = self.get_light_state(closest_light)
return line_wp_idx, state
return -1, TrafficLight.UNKNOWN
if light:
state = self.get_light_state(light)
return light_wp, state
self.waypoints = None
return -1, TrafficLight.UNKNOWN
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
| 35.513661 | 111 | 0.637329 |
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
import tf
import cv2
import yaml
from scipy.spatial import KDTree
STATE_COUNT_THRESHOLD = 3
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.camera_image = None
self.lights = []
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.bridge = CvBridge()
self.light_classifier = TLClassifier()
self.listener = tf.TransformListener()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[w.pose.pose.position.x, w.pose.pose.position.y] for w in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
self.has_image = True
self.camera_image = msg
light_wp, state = self.process_traffic_lights()
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if state == TrafficLight.RED else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
def get_closest_waypoint(self, x , y):
closest_idx = self.waypoint_tree.query([x,y], 1)[1]
return closest_idx
def get_light_state(self, light):
ght.state
def process_traffic_lights(self):
closest_light = None
line_wp_idx = None
stop_line_positions = self.config['stop_line_positions']
if(self.pose):
car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)
diff = len(self.waypoints.waypoints)
for i, light in enumerate(self.lights):
line = stop_line_positions[i]
temp_wp_idx = self.get_closest_waypoint(line[0], line[1])
d = temp_wp_idx - car_wp_idx
if d>=0 and d<diff:
diff = d
closest_light = light
line_wp_idx = temp_wp_idx
if closest_light:
state = self.get_light_state(closest_light)
return line_wp_idx, state
return -1, TrafficLight.UNKNOWN
if light:
state = self.get_light_state(light)
return light_wp, state
self.waypoints = None
return -1, TrafficLight.UNKNOWN
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
| true | true |
f73542726d21cf0df047613ca0890b579620009b | 2,325 | py | Python | compute_distance_and_align/align_strings.py | alyonavyshnevska/dynamic_programming_levenshtein_distance | e8ecd72ebbee7b3e59977a1a684b5e3ecd9bb930 | [
"MIT"
] | null | null | null | compute_distance_and_align/align_strings.py | alyonavyshnevska/dynamic_programming_levenshtein_distance | e8ecd72ebbee7b3e59977a1a684b5e3ecd9bb930 | [
"MIT"
] | null | null | null | compute_distance_and_align/align_strings.py | alyonavyshnevska/dynamic_programming_levenshtein_distance | e8ecd72ebbee7b3e59977a1a684b5e3ecd9bb930 | [
"MIT"
] | null | null | null | import compute_distance_and_align.compute_levenshtein_distance as compute_dist
def align_strings(seq1, seq2):
'''
Calculates minimum edit distance between str1 and str2
and saves backpointers to retrieve the allignments
:param srt seq1: from this s®tring
:param srt seq2: into this string
:returns: edit distance, a tuple of (seq1, changes)
:rtype: a tuple of (seq1, changes)
changes is a string where:
"-": deletion from either seq1 or seq2
a lowercase letter: no editing needed
an uppercase letter: substitution or adding of this letter to seq2
'''
distance = 0
alignment = ""
if len(seq1) == 0 and len(seq2) == 0:
return distance, (alignment, alignment)
elif len(seq1) == 0:
distance = len(seq2)
alignment = seq2.upper()
elif len(seq2) == 0:
distance = len(seq1)
for letter in seq1:
alignment += '-'
elif seq1 == seq2:
distance = 0
alignment = seq1
else:
shortest_dist, table, row, column = compute_dist.compute_levenshtein_distance(seq1, seq2)
while True:
if (row == 0 and column == 0):
break
# Make sure that i or j haven't reached 0'th row or 0'th column
if row != 0 and column != 0 and seq2[row - 1] == seq1[column - 1]:
alignment += seq2[row - 1]
row = row - 1
column = column - 1
elif table[row][column] == (table[row - 1][column - 1] + 1):
alignment += seq2[row - 1].upper()
row = row - 1
column = column - 1
elif table[row][column] == (table[row - 1][column] + 1):
alignment += seq2[row - 1].upper()
row = row - 1
elif table[row][column] == (table[row][column - 1] + 1):
alignment += '-'
column = column - 1
distance = table[row][column]
alignment = alignment[::-1]
return distance, (seq1, alignment)
if __name__ == "__main__":
seq1 = 'abcdef'
seq2 = 'azced'
distance, alignment = align_strings('abcdef', 'azced')
print("\nFrom string: ", seq1, "\nto string:", seq2,
"\nMinimum edit distance:", distance,
"\nChanges:", alignment) | 29.43038 | 97 | 0.554409 | import compute_distance_and_align.compute_levenshtein_distance as compute_dist
def align_strings(seq1, seq2):
distance = 0
alignment = ""
if len(seq1) == 0 and len(seq2) == 0:
return distance, (alignment, alignment)
elif len(seq1) == 0:
distance = len(seq2)
alignment = seq2.upper()
elif len(seq2) == 0:
distance = len(seq1)
for letter in seq1:
alignment += '-'
elif seq1 == seq2:
distance = 0
alignment = seq1
else:
shortest_dist, table, row, column = compute_dist.compute_levenshtein_distance(seq1, seq2)
while True:
if (row == 0 and column == 0):
break
if row != 0 and column != 0 and seq2[row - 1] == seq1[column - 1]:
alignment += seq2[row - 1]
row = row - 1
column = column - 1
elif table[row][column] == (table[row - 1][column - 1] + 1):
alignment += seq2[row - 1].upper()
row = row - 1
column = column - 1
elif table[row][column] == (table[row - 1][column] + 1):
alignment += seq2[row - 1].upper()
row = row - 1
elif table[row][column] == (table[row][column - 1] + 1):
alignment += '-'
column = column - 1
distance = table[row][column]
alignment = alignment[::-1]
return distance, (seq1, alignment)
if __name__ == "__main__":
seq1 = 'abcdef'
seq2 = 'azced'
distance, alignment = align_strings('abcdef', 'azced')
print("\nFrom string: ", seq1, "\nto string:", seq2,
"\nMinimum edit distance:", distance,
"\nChanges:", alignment) | true | true |
f7354626844df5082680fdf972d139d4511fe5ec | 2,046 | py | Python | graphgallery/gallery/nodeclas/tensorflow/densegcn.py | TobiasSchmidtDE/GraphGallery | e627e4f454e0ce3813171305a524f5190a6e6f45 | [
"MIT"
] | null | null | null | graphgallery/gallery/nodeclas/tensorflow/densegcn.py | TobiasSchmidtDE/GraphGallery | e627e4f454e0ce3813171305a524f5190a6e6f45 | [
"MIT"
] | null | null | null | graphgallery/gallery/nodeclas/tensorflow/densegcn.py | TobiasSchmidtDE/GraphGallery | e627e4f454e0ce3813171305a524f5190a6e6f45 | [
"MIT"
] | null | null | null | from graphgallery.sequence import FullBatchSequence
from graphgallery import functional as gf
from graphgallery.gallery.nodeclas import TensorFlow
from graphgallery.gallery import Trainer
from graphgallery.nn.models import get_model
@TensorFlow.register()
class DenseGCN(Trainer):
"""
Implementation of Dense version of Graph Convolutional Networks (GCN).
`[`Semi-Supervised Classification with Graph Convolutional Networks <https://arxiv.org/abs/1609.02907>`
Tensorflow 1.x `Sparse version` implementation: <https://github.com/tkipf/gcn>
Pytorch `Sparse version` implementation: <https://github.com/tkipf/pygcn>
"""
def data_step(self,
adj_transform="normalize_adj",
attr_transform=None):
graph = self.graph
adj_matrix = gf.get(adj_transform)(graph.adj_matrix).toarray()
node_attr = gf.get(attr_transform)(graph.node_attr)
X, A = gf.astensors(node_attr, adj_matrix, device=self.data_device)
# ``A`` and ``X`` are cached for later use
self.register_cache(X=X, A=A)
def model_step(self,
hids=[16],
acts=['relu'],
dropout=0.5,
weight_decay=5e-4,
lr=0.01,
bias=False):
model = get_model("DenseGCN", self.backend)
model = model(self.graph.num_node_attrs,
self.graph.num_node_classes,
hids=hids,
acts=acts,
dropout=dropout,
weight_decay=weight_decay,
lr=lr,
bias=bias)
return model
def train_loader(self, index):
labels = self.graph.node_label[index]
sequence = FullBatchSequence([self.cache.X, self.cache.A],
labels,
out_index=index,
device=self.data_device)
return sequence
| 34.677966 | 111 | 0.567449 | from graphgallery.sequence import FullBatchSequence
from graphgallery import functional as gf
from graphgallery.gallery.nodeclas import TensorFlow
from graphgallery.gallery import Trainer
from graphgallery.nn.models import get_model
@TensorFlow.register()
class DenseGCN(Trainer):
def data_step(self,
adj_transform="normalize_adj",
attr_transform=None):
graph = self.graph
adj_matrix = gf.get(adj_transform)(graph.adj_matrix).toarray()
node_attr = gf.get(attr_transform)(graph.node_attr)
X, A = gf.astensors(node_attr, adj_matrix, device=self.data_device)
self.register_cache(X=X, A=A)
def model_step(self,
hids=[16],
acts=['relu'],
dropout=0.5,
weight_decay=5e-4,
lr=0.01,
bias=False):
model = get_model("DenseGCN", self.backend)
model = model(self.graph.num_node_attrs,
self.graph.num_node_classes,
hids=hids,
acts=acts,
dropout=dropout,
weight_decay=weight_decay,
lr=lr,
bias=bias)
return model
def train_loader(self, index):
labels = self.graph.node_label[index]
sequence = FullBatchSequence([self.cache.X, self.cache.A],
labels,
out_index=index,
device=self.data_device)
return sequence
| true | true |
f7354732fe2c9f7afd7eb1aed50431d427735d45 | 10,818 | py | Python | gimmemotifs/prediction.py | JGASmits/gimmemotifs | 16d82004d62c22d67c2a2e01493b07ad4a62ef1e | [
"MIT"
] | null | null | null | gimmemotifs/prediction.py | JGASmits/gimmemotifs | 16d82004d62c22d67c2a2e01493b07ad4a62ef1e | [
"MIT"
] | null | null | null | gimmemotifs/prediction.py | JGASmits/gimmemotifs | 16d82004d62c22d67c2a2e01493b07ad4a62ef1e | [
"MIT"
] | null | null | null | # Copyright (c) 2009-2019 Simon van Heeringen <simon.vanheeringen@gmail.com>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
"""Parallel prediction of sequence motifs """
# Python imports
import sys
import logging
try:
import _thread as thread
except ImportError:
import thread
from time import sleep
import inspect
from multiprocessing import Pool
# GimmeMotifs imports
from gimmemotifs import tools as tool_classes
from gimmemotifs.config import MotifConfig, parse_denovo_params
from gimmemotifs.fasta import Fasta
from gimmemotifs import mytmpdir
from gimmemotifs.stats import calc_stats
logger = logging.getLogger("gimme.prediction")
try:
import copy_reg
import types
def _pickle_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
copy_reg.pickle(types.MethodType, _pickle_method)
except Exception:
pass
def mp_calc_stats(motifs, fg_fa, bg_fa, zscore, gc, genome, bg_name=None):
"""Parallel calculation of motif statistics."""
try:
stats = calc_stats(
motifs=motifs,
fg_file=fg_fa,
bg_file=bg_fa,
ncpus=1,
zscore=zscore,
gc=gc,
genome=genome,
)
except Exception as e:
sys.stderr.write("ERROR: {}\n".format(str(e)))
stats = {}
raise
if not bg_name:
bg_name = "default"
return bg_name, stats
def _run_tool(job_name, t, fastafile, params):
"""Parallel motif prediction."""
try:
result = t.run(fastafile, params, mytmpdir())
except Exception as e:
result = ([], "", "{} failed to run: {}".format(job_name, e))
return job_name, result
class PredictionResult(object):
"""Store predicted motifs and calculate statistics."""
def __init__(
self,
outfile,
genome=None,
fg_file=None,
background=None,
gc=False,
do_counter=True,
job_server=None,
):
self.lock = thread.allocate_lock()
self.motifs = []
self.finished = []
self.stats = {}
self.stat_jobs = []
self.outfile = outfile
self.genome = genome
if job_server:
self.job_server = job_server
else:
self.job_server = Pool(2)
self.counter = 0
self.do_counter = do_counter
open(outfile, "w").close()
if fg_file and background:
self.fg_fa = Fasta(fg_file)
self.background = dict(
[(bg, Fasta(fname)) for bg, fname in background.items()]
)
self.do_stats = True
self.gc = gc
self.zscore = self.gc
if self.gc:
if genome is None:
raise ValueError(
"Need a genome when calculating GC% zscores for motif statistics"
)
else:
self.genome = genome
else:
self.do_stats = False
def add_motifs(self, args):
"""Add motifs to the result object."""
self.lock.acquire()
# Callback function for motif programs
if args is None or len(args) != 2 or len(args[1]) != 3:
try:
job = args[0]
logger.warn("job %s failed", job)
self.finished.append(job)
except Exception:
logger.warn("job failed")
return
job, (motifs, stdout, stderr) = args
logger.info("%s finished, found %s motifs", job, len(motifs))
for motif in motifs:
if self.do_counter:
self.counter += 1
motif.id = "gimme_{}_".format(self.counter) + motif.id
f = open(self.outfile, "a")
f.write("%s\n" % motif.to_pfm())
f.close()
self.motifs.append(motif)
if self.do_stats and len(motifs) > 0:
# job_id = "%s_%s" % (motif.id, motif.to_consensus())
logger.debug("Starting stats job of %s motifs", len(motifs))
for bg_name, bg_fa in self.background.items():
job = self.job_server.apply_async(
mp_calc_stats,
(
motifs,
self.fg_fa,
bg_fa,
self.zscore,
self.gc,
self.genome,
bg_name,
),
callback=self.add_stats,
)
self.stat_jobs.append(job)
logger.debug("stdout %s: %s", job, stdout)
logger.debug("stdout %s: %s", job, stderr)
self.finished.append(job)
self.lock.release()
def wait_for_stats(self):
"""Make sure all jobs are finished."""
logger.debug("waiting for statistics to finish")
for job in self.stat_jobs:
job.get()
sleep(2)
def add_stats(self, args):
"""Callback to add motif statistics."""
bg_name, stats = args
logger.debug("Stats: %s %s", bg_name, stats)
for motif_id in stats.keys():
if motif_id not in self.stats:
self.stats[motif_id] = {}
self.stats[motif_id][bg_name] = stats[motif_id]
# def submit_remaining_stats(self):
# for motif in self.motifs:
# n = "%s_%s" % (motif.id, motif.to_consensus())
# if n in self.stats:
#
# logger.info("Adding %s again!" % n)
# #job_id = "%s_%s" % (motif.id, motif.to_consensus())
# self.job_server.apply_async(
# _calc_motif_stats,
# (motif, self.fg_fa, self.bg_fa),
# callback=self.add_stats)
#
def pp_predict_motifs(
fastafile,
outfile,
analysis="small",
organism="hg19",
single=False,
background="",
tools=None,
job_server=None,
ncpus=8,
max_time=-1,
stats_fg=None,
stats_bg=None,
gc=True,
):
"""Parallel prediction of motifs.
Utility function for gimmemotifs.denovo.gimme_motifs. Probably better to
use that, instead of this function directly.
"""
if tools is None:
tools = {}
config = MotifConfig()
if not tools:
tools = dict([(x, 1) for x in config.get_default_params()["tools"].split(",")])
# logger = logging.getLogger('gimme.prediction.pp_predict_motifs')
wmin = 5
step = 1
if analysis in ["large", "xl"]:
step = 2
wmin = 6
analysis_max = {"xs": 5, "small": 8, "medium": 10, "large": 14, "xl": 20}
wmax = analysis_max[analysis]
if analysis == "xs":
sys.stderr.write("Setting analysis xs to small")
analysis = "small"
if not job_server:
n_cpus = int(config.get_default_params()["ncpus"])
job_server = Pool(processes=n_cpus, maxtasksperchild=1000)
jobs = {}
result = PredictionResult(
outfile,
organism,
fg_file=stats_fg,
background=stats_bg,
gc=gc,
job_server=job_server,
)
# Dynamically load all tools
toolio = [
x[1]()
for x in inspect.getmembers(
tool_classes,
lambda x: inspect.isclass(x)
and issubclass(x, tool_classes.motifprogram.MotifProgram),
)
if x[0] != "MotifProgram"
]
# TODO:
# Add warnings for running time: Weeder, GADEM
# Add all jobs to the job_server
params = {
"analysis": analysis,
"background": background,
"single": single,
"organism": organism,
}
# Tools that don't use a specified width usually take longer
# ie. GADEM, XXmotif, MEME
# Start these first.
for t in [tool for tool in toolio if not tool.use_width]:
if t.name in tools and tools[t.name]:
logger.debug("Starting %s job", t.name)
job_name = t.name
jobs[job_name] = job_server.apply_async(
_run_tool, (job_name, t, fastafile, params), callback=result.add_motifs
)
else:
logger.debug("Skipping %s", t.name)
for t in [tool for tool in toolio if tool.use_width]:
if t.name in tools and tools[t.name]:
for i in range(wmin, wmax + 1, step):
logger.debug("Starting %s job, width %s", t.name, i)
job_name = "%s_width_%s" % (t.name, i)
my_params = params.copy()
my_params["width"] = i
jobs[job_name] = job_server.apply_async(
_run_tool,
(job_name, t, fastafile, my_params),
callback=result.add_motifs,
)
else:
logger.debug("Skipping %s", t.name)
logger.info("all jobs submitted")
for job in jobs.values():
job.get()
result.wait_for_stats()
return result
def predict_motifs(infile, bgfile, outfile, params=None, stats_fg=None, stats_bg=None):
""" Predict motifs, input is a FASTA-file"""
# Parse parameters
required_params = [
"tools",
"available_tools",
"analysis",
"genome",
"use_strand",
"max_time",
]
if params is None:
params = parse_denovo_params()
else:
for p in required_params:
if p not in params:
params = parse_denovo_params()
break
if "genome" not in params:
logger.error("Need a genome for de novo motif prediction")
# Define all tools
tools = dict(
[
(x.strip(), x in [y.strip() for y in params["tools"].split(",")])
for x in params["available_tools"].split(",")
]
)
# Predict the motifs
analysis = params["analysis"]
logger.info("starting motif prediction (%s)", analysis)
logger.info("tools: %s", ", ".join([x for x in tools.keys() if tools[x]]))
result = pp_predict_motifs(
infile,
outfile,
analysis,
params.get("genome", None),
params["use_strand"],
bgfile,
tools,
None,
# logger=logger,
max_time=params["max_time"],
stats_fg=stats_fg,
stats_bg=stats_bg,
)
motifs = result.motifs
logger.info("predicted %s motifs", len(motifs))
logger.debug("written to %s", outfile)
if len(motifs) == 0:
logger.info("no motifs found")
result.motifs = []
return result
| 28.171875 | 89 | 0.547513 |
import sys
import logging
try:
import _thread as thread
except ImportError:
import thread
from time import sleep
import inspect
from multiprocessing import Pool
from gimmemotifs import tools as tool_classes
from gimmemotifs.config import MotifConfig, parse_denovo_params
from gimmemotifs.fasta import Fasta
from gimmemotifs import mytmpdir
from gimmemotifs.stats import calc_stats
logger = logging.getLogger("gimme.prediction")
try:
import copy_reg
import types
def _pickle_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
copy_reg.pickle(types.MethodType, _pickle_method)
except Exception:
pass
def mp_calc_stats(motifs, fg_fa, bg_fa, zscore, gc, genome, bg_name=None):
try:
stats = calc_stats(
motifs=motifs,
fg_file=fg_fa,
bg_file=bg_fa,
ncpus=1,
zscore=zscore,
gc=gc,
genome=genome,
)
except Exception as e:
sys.stderr.write("ERROR: {}\n".format(str(e)))
stats = {}
raise
if not bg_name:
bg_name = "default"
return bg_name, stats
def _run_tool(job_name, t, fastafile, params):
try:
result = t.run(fastafile, params, mytmpdir())
except Exception as e:
result = ([], "", "{} failed to run: {}".format(job_name, e))
return job_name, result
class PredictionResult(object):
def __init__(
self,
outfile,
genome=None,
fg_file=None,
background=None,
gc=False,
do_counter=True,
job_server=None,
):
self.lock = thread.allocate_lock()
self.motifs = []
self.finished = []
self.stats = {}
self.stat_jobs = []
self.outfile = outfile
self.genome = genome
if job_server:
self.job_server = job_server
else:
self.job_server = Pool(2)
self.counter = 0
self.do_counter = do_counter
open(outfile, "w").close()
if fg_file and background:
self.fg_fa = Fasta(fg_file)
self.background = dict(
[(bg, Fasta(fname)) for bg, fname in background.items()]
)
self.do_stats = True
self.gc = gc
self.zscore = self.gc
if self.gc:
if genome is None:
raise ValueError(
"Need a genome when calculating GC% zscores for motif statistics"
)
else:
self.genome = genome
else:
self.do_stats = False
def add_motifs(self, args):
self.lock.acquire()
if args is None or len(args) != 2 or len(args[1]) != 3:
try:
job = args[0]
logger.warn("job %s failed", job)
self.finished.append(job)
except Exception:
logger.warn("job failed")
return
job, (motifs, stdout, stderr) = args
logger.info("%s finished, found %s motifs", job, len(motifs))
for motif in motifs:
if self.do_counter:
self.counter += 1
motif.id = "gimme_{}_".format(self.counter) + motif.id
f = open(self.outfile, "a")
f.write("%s\n" % motif.to_pfm())
f.close()
self.motifs.append(motif)
if self.do_stats and len(motifs) > 0:
logger.debug("Starting stats job of %s motifs", len(motifs))
for bg_name, bg_fa in self.background.items():
job = self.job_server.apply_async(
mp_calc_stats,
(
motifs,
self.fg_fa,
bg_fa,
self.zscore,
self.gc,
self.genome,
bg_name,
),
callback=self.add_stats,
)
self.stat_jobs.append(job)
logger.debug("stdout %s: %s", job, stdout)
logger.debug("stdout %s: %s", job, stderr)
self.finished.append(job)
self.lock.release()
def wait_for_stats(self):
logger.debug("waiting for statistics to finish")
for job in self.stat_jobs:
job.get()
sleep(2)
def add_stats(self, args):
bg_name, stats = args
logger.debug("Stats: %s %s", bg_name, stats)
for motif_id in stats.keys():
if motif_id not in self.stats:
self.stats[motif_id] = {}
self.stats[motif_id][bg_name] = stats[motif_id]
tfile,
analysis="small",
organism="hg19",
single=False,
background="",
tools=None,
job_server=None,
ncpus=8,
max_time=-1,
stats_fg=None,
stats_bg=None,
gc=True,
):
if tools is None:
tools = {}
config = MotifConfig()
if not tools:
tools = dict([(x, 1) for x in config.get_default_params()["tools"].split(",")])
wmin = 5
step = 1
if analysis in ["large", "xl"]:
step = 2
wmin = 6
analysis_max = {"xs": 5, "small": 8, "medium": 10, "large": 14, "xl": 20}
wmax = analysis_max[analysis]
if analysis == "xs":
sys.stderr.write("Setting analysis xs to small")
analysis = "small"
if not job_server:
n_cpus = int(config.get_default_params()["ncpus"])
job_server = Pool(processes=n_cpus, maxtasksperchild=1000)
jobs = {}
result = PredictionResult(
outfile,
organism,
fg_file=stats_fg,
background=stats_bg,
gc=gc,
job_server=job_server,
)
toolio = [
x[1]()
for x in inspect.getmembers(
tool_classes,
lambda x: inspect.isclass(x)
and issubclass(x, tool_classes.motifprogram.MotifProgram),
)
if x[0] != "MotifProgram"
]
params = {
"analysis": analysis,
"background": background,
"single": single,
"organism": organism,
}
# ie. GADEM, XXmotif, MEME
# Start these first.
for t in [tool for tool in toolio if not tool.use_width]:
if t.name in tools and tools[t.name]:
logger.debug("Starting %s job", t.name)
job_name = t.name
jobs[job_name] = job_server.apply_async(
_run_tool, (job_name, t, fastafile, params), callback=result.add_motifs
)
else:
logger.debug("Skipping %s", t.name)
for t in [tool for tool in toolio if tool.use_width]:
if t.name in tools and tools[t.name]:
for i in range(wmin, wmax + 1, step):
logger.debug("Starting %s job, width %s", t.name, i)
job_name = "%s_width_%s" % (t.name, i)
my_params = params.copy()
my_params["width"] = i
jobs[job_name] = job_server.apply_async(
_run_tool,
(job_name, t, fastafile, my_params),
callback=result.add_motifs,
)
else:
logger.debug("Skipping %s", t.name)
logger.info("all jobs submitted")
for job in jobs.values():
job.get()
result.wait_for_stats()
return result
def predict_motifs(infile, bgfile, outfile, params=None, stats_fg=None, stats_bg=None):
# Parse parameters
required_params = [
"tools",
"available_tools",
"analysis",
"genome",
"use_strand",
"max_time",
]
if params is None:
params = parse_denovo_params()
else:
for p in required_params:
if p not in params:
params = parse_denovo_params()
break
if "genome" not in params:
logger.error("Need a genome for de novo motif prediction")
# Define all tools
tools = dict(
[
(x.strip(), x in [y.strip() for y in params["tools"].split(",")])
for x in params["available_tools"].split(",")
]
)
# Predict the motifs
analysis = params["analysis"]
logger.info("starting motif prediction (%s)", analysis)
logger.info("tools: %s", ", ".join([x for x in tools.keys() if tools[x]]))
result = pp_predict_motifs(
infile,
outfile,
analysis,
params.get("genome", None),
params["use_strand"],
bgfile,
tools,
None,
# logger=logger,
max_time=params["max_time"],
stats_fg=stats_fg,
stats_bg=stats_bg,
)
motifs = result.motifs
logger.info("predicted %s motifs", len(motifs))
logger.debug("written to %s", outfile)
if len(motifs) == 0:
logger.info("no motifs found")
result.motifs = []
return result
| true | true |
f735474b231ea9c8e5dc166f22a06f796ec61d52 | 15,525 | py | Python | xayagametest/testcase.py | spaceexpanse/libxgame | f604e81d152d8c697846a3a01278e5305a4d6d3e | [
"MIT"
] | null | null | null | xayagametest/testcase.py | spaceexpanse/libxgame | f604e81d152d8c697846a3a01278e5305a4d6d3e | [
"MIT"
] | null | null | null | xayagametest/testcase.py | spaceexpanse/libxgame | f604e81d152d8c697846a3a01278e5305a4d6d3e | [
"MIT"
] | null | null | null | # Copyright (C) 2018-2021 The Xaya developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Basic framework for integration tests of Xaya games.
"""
from . import game
from . import premine
from . import xaya
import argparse
from contextlib import contextmanager
import json
import logging
import os.path
import random
import re
import shlex
import shutil
import sys
import time
from jsonrpclib import ProtocolError
XAYAD_BINARY_DEFAULT = "/usr/local/bin/xayad"
XCORE_BINARY_DEFAULT = "/usr/local/bin/xayax-core"
XETH_BINARY_DEFAULT = "/usr/local/bin/xayax-eth"
DEFAULT_DIR = "/tmp"
DIR_PREFIX = "xayagametest_"
def portGenerator (start):
"""
Generator that yields a sequence of port numbers for use,
starting at the given one.
"""
p = start
while True:
yield p
p += 1
class XayaGameTest (object):
"""
Base class for integration test cases of Xaya games. This manages the
Xaya Core daemon, the game daemon, basic logging and the data directory.
The actual test should override the "run" method with its test logic. It
can control the Xaya Core daemon through rpc.xaya and the game daemon through
rpc.game.
"""
##############################################################################
# Main functionality, handling the setup of daemons and all that.
def __init__ (self, gameId, gameBinaryDefault):
self.gameId = gameId
desc = "Runs an integration test for the Xaya game g/%s." % gameId
parser = argparse.ArgumentParser (description=desc)
parser.add_argument ("--xayad_binary", default=XAYAD_BINARY_DEFAULT,
help="xayad binary to use in the test")
parser.add_argument ("--xcore_binary", default=XCORE_BINARY_DEFAULT,
help="xayax-core binary to use")
parser.add_argument ("--xeth_binary", default=XETH_BINARY_DEFAULT,
help="xayax-eth binary to use")
parser.add_argument ("--game_daemon", default=gameBinaryDefault,
help="game daemon binary to use in the test")
parser.add_argument ("--run_game_with", default="",
help="run game daemon with this helper binary"
" (e.g. valgrind)")
parser.add_argument ("--dir", default=DEFAULT_DIR,
help="base directory for test runs")
parser.add_argument ("--nocleanup", default=False, action="store_true",
help="do not clean up logs after success")
self.addArguments (parser)
self.args = parser.parse_args ()
# This can be set to "none", "one socket" (default) and "two sockets".
# That way, tests can control how the ZMQ notifications for pending moves
# should be sent, if they need to.
self.zmqPending = "one socket"
def addArguments (self, parser):
"""
This function is called to add additional arguments (test specific)
for the argument parser. By default, none are added, but subclasses
can override it as needed.
"""
pass
def main (self):
"""
Executes the testcase, including setup and cleanup.
"""
randomSuffix = "%08x" % random.getrandbits (32)
self.basedir = os.path.join (self.args.dir, DIR_PREFIX + randomSuffix)
shutil.rmtree (self.basedir, ignore_errors=True)
os.mkdir (self.basedir)
logfile = os.path.join (self.basedir, "xayagametest.log")
logHandler = logging.FileHandler (logfile)
logFmt = "%(asctime)s %(name)s (%(levelname)s): %(message)s"
logHandler.setFormatter (logging.Formatter (logFmt))
rootLogger = logging.getLogger ()
rootLogger.setLevel (logging.INFO)
rootLogger.addHandler (logHandler)
self.log = logging.getLogger ("xayagametest.testcase")
mainHandler = logging.StreamHandler (sys.stderr)
mainHandler.setFormatter (logging.Formatter ("%(message)s"))
self.mainLogger = logging.getLogger ("main")
self.mainLogger.addHandler (logHandler)
self.mainLogger.addHandler (mainHandler)
self.mainLogger.info ("Base directory for integration test: %s"
% self.basedir)
# Potentially split multiple parts of the "run_game_with" argument
# into individual arguments. If run_game_with with "", then this
# produces an empty array.
self.runGameWith = shlex.split (self.args.run_game_with)
startPort = random.randint (1024, 30000)
self.log.info ("Using port range starting at %d, hopefully it is free"
% (startPort))
self.ports = portGenerator (startPort)
class RpcHandles:
game = None
# Other fields may be set based on the base-chain environment
# (see baseChainEnvironment).
self.rpc = RpcHandles ()
cleanup = False
success = False
with self.runBaseChainEnvironment () as env:
self.env = env
self.gamenode = self.createGameNode ()
self.startGameDaemon ()
try:
self.setup ()
self.run ()
self.mainLogger.info ("Test succeeded")
success = True
if self.args.nocleanup:
self.mainLogger.info ("Not cleaning up logs as requested")
else:
cleanup = True
except:
self.mainLogger.exception ("Test failed")
self.log.info ("Not cleaning up base directory %s" % self.basedir)
finally:
self.shutdown ()
self.stopGameDaemon ()
if cleanup:
self.log.info ("Cleaning up base directory in %s" % self.basedir)
shutil.rmtree (self.basedir, ignore_errors=True)
logging.shutdown ()
if not success:
sys.exit ("Test failed")
def setup (self):
"""
This method does nothing, but it can be overridden by subclasses to
provide custom setup functionality. That is run after setting up the
base environment (e.g. Xaya Core RPC) but before the actual test
logic in each test's run() method.
"""
pass
def shutdown (self):
"""
This method does nothing, but it can be overridden by subclasses.
It gets called when the test run is done and can be used to clean
up resources.
"""
pass
def run (self):
self.mainLogger.warning (
"Test 'run' method not overridden, this tests nothing")
def startGameDaemon (self, extraArgs=[], wait=True):
"""
Starts the game daemon (again). This can be used to test situations where
the game daemon is restarted and needs to catch up.
"""
rpcurl, args = self.env.getGspArguments ()
extraArgs.extend (args)
self.gamenode.start (rpcurl, extraArgs, wait=wait)
self.rpc.game = self.gamenode.rpc
def stopGameDaemon (self):
"""
Stops the game daemon. This can be used for testing situations where
the game daemon is temporarily not running.
"""
self.rpc.game = None
self.gamenode.stop ()
def recreateGameDaemon (self, gameBinary=None, extraArgs=[]):
"""
Recreates and resyncs from scratch the game daemon. This can optionally
set a different binary and extra args for it as well.
"""
self.stopGameDaemon ()
self.gamenode = self.createGameNode (gameBinary)
self.startGameDaemon (extraArgs=extraArgs)
self.log.info ("Restarted fresh game daemon with binary %s"
% self.gamenode.realBinary)
def createGameNode (self, gameBinary=None):
"""
Creates a Game instance with the configuration of this test case
(optionally overriding the GSP binary with the given one).
This is used internally and should not be called from tests themselves.
"""
if gameBinary is None:
gameBinary = self.args.game_daemon
gameCmd = list (self.runGameWith)
gameCmd.append (gameBinary)
return game.Node (self.basedir, next (self.ports), gameCmd)
##############################################################################
# Methods for setting up the base-chain environment.
def runBaseChainEnvironment (self):
"""
Constructs a context manager that runs the environment instance
that should be used for the base chain that the GSP is then linked to.
By default, this is just a xaya.Environment instance, but tests
can override this method to e.g. link to a custom chain using Xaya X.
"""
return self.runDirectCoreEnvironment ()
@contextmanager
def runDirectCoreEnvironment (self):
"""
Runs a base-chain environment that just consists of a Xaya Core
instance that is used directly by the GSP. This is the default
implementation.
"""
zmqPorts = {
"gameblocks": next (self.ports),
}
if self.zmqPending == "none":
self.log.info ("Disabling ZMQ for pending moves in Xaya Core")
elif self.zmqPending == "one socket":
self.log.info ("Pending moves are sent on the same socket as blocks")
zmqPorts["gamepending"] = zmqPorts["gameblocks"]
elif self.zmqPending == "two sockets":
self.log.info ("Pending moves are sent on a different socket as blocks")
zmqPorts["gamepending"] = next (self.ports)
assert zmqPorts["gamepending"] != zmqPorts["gameblocks"]
else:
raise AssertionError ("Invalid zmqPending: %s" % self.zmqPending)
env = xaya.Environment (self.basedir, next (self.ports), zmqPorts,
self.args.xayad_binary)
with env.run ():
self.xayanode = env.node
self.rpc.xaya = env.node.rpc
yield env
@contextmanager
def runXayaXCoreEnvironment (self):
"""
Runs a base-chain environment that uses Xaya X to link back to
a real Xaya Core instance.
"""
if self.zmqPending != "one socket":
raise AssertionError ("Xaya-X-Core only supports one-socket pending")
from xayax import core
env = core.Environment (self.basedir, self.ports,
self.args.xayad_binary, self.args.xcore_binary)
with env.run ():
self.xayanode = env.xayanode
self.rpc.xaya = env.xayanode.rpc
yield env
@contextmanager
def runXayaXEthEnvironment (self):
"""
Runs a base-chain environment that uses Xaya X to link to an
Ethereum-like test chain (based on Ganache).
"""
if self.zmqPending != "one socket":
raise AssertionError ("Xaya-X-Eth only supports one-socket pending")
from xayax import eth
env = eth.Environment (self.basedir, self.ports, self.args.xeth_binary)
env.enablePending ()
with env.run ():
self.ethnode = env.ganache
self.contracts = env.contracts
self.rpc.eth = env.createGanacheRpc ()
self.w3 = env.ganache.w3
yield env
##############################################################################
# Utility methods for testing.
def registerNames (self, names):
"""
Utility method to register names without any data yet. This can be used
by tests to set up some initial player accounts for use in the test.
"""
for nm in names:
self.env.register ("p", nm)
def registerOrUpdateName (self, name, value, *args, **kwargs):
"""
Tries to update or register the name with the given value, depending
on whether or not it already exists.
Extra arguments are forwarded directly to the environment's
move function.
"""
pos = name.find ("/")
assert pos > -1, "'%s' contains no namespace" % name
ns = name[:pos]
base = name[pos + 1:]
if not self.env.nameExists (ns, base):
self.env.register (ns, base)
return self.env.move (ns, base, value, *args, **kwargs)
def sendMove (self, name, move, *args, **kwargs):
"""
Sends a given move for the name. This calls name_register or name_update,
depending on whether the name exists already. It also builds up the
full name value from self.gameId and move.
"""
value = json.dumps ({"g": {self.gameId: move}})
return self.registerOrUpdateName ("p/" + name, value, *args, **kwargs)
def adminCommand (self, cmd, *args, **kwargs):
"""
Sends an admin command with the given value. This calls name_register or
name_update, depending on whether or not the g/ name for the game being
tested already exists or not.
"""
value = json.dumps ({"cmd": cmd})
return self.registerOrUpdateName ("g/" + self.gameId, value,
*args, **kwargs)
def getCustomState (self, field, method, *args, **kwargs):
"""
Calls an RPC method on the game daemon that returns game state. Makes
sure to wait until the game state is synced.
"""
fcn = getattr (self.rpc.game, method)
bestblk, bestheight = self.env.getChainTip ()
while True:
state = fcn (*args, **kwargs)
self.assertEqual (state["gameid"], self.gameId)
if state["state"] == "up-to-date" and state["blockhash"] == bestblk:
self.assertEqual (state["height"], bestheight)
if field is not None:
return state[field]
return
self.log.warning (("Game state (%s, %s) does not match"
+" the best block (%s), waiting")
% (state["state"], state["blockhash"], bestblk))
time.sleep (0.1)
def syncGame (self):
"""
Waits for the game daemon to sync up to the current Xaya Core
blockchain state.
"""
self.getCustomState (None, "getnullstate")
def getGameState (self):
"""
Returns the current game state. Makes sure to wait for the game daemon
to sync up with Xaya's best block first.
"""
return self.getCustomState ("gamestate", "getcurrentstate")
def expectGameState (self, expected):
"""
Expects that the current game state matches the given value.
"""
actual = self.getGameState ()
self.assertEqual (actual, expected)
def getPendingState (self):
"""
Returns the current state of pending moves. Callers must make sure to
wait some time before calling here themselves, as there is no way to
ensure this has synced with sent moves.
"""
return self.getCustomState ("pending", "getpendingstate")
def assertEqual (self, a, b):
"""
Asserts that two values are equal, logging them if not.
"""
if a == b:
return
self.log.error ("The value of:\n%s\n\nis not equal to:\n%s" % (a, b))
raise AssertionError ("%s != %s" % (a, b))
def generate (self, n):
"""
Generates n new blocks on the Xaya network.
"""
return self.env.generate (n)
def expectError (self, code, msgRegExp, method, *args, **kwargs):
"""
Calls the method object with the given arguments, and expects that
an RPC error is raised matching the code and message.
"""
try:
method (*args, **kwargs)
self.log.error ("Expected RPC error with code=%d and message %s"
% (code, msgRegExp))
raise AssertionError ("expected RPC error was not raised")
except ProtocolError as exc:
self.log.info ("Caught expected RPC error: %s" % exc)
(c, m, *_) = exc.args[0]
self.assertEqual (c, code)
msgPattern = re.compile (msgRegExp)
assert msgPattern.match (m)
def collectPremine (self, addr=None):
"""
Collects the premine coins (whose keys are publicly known on regtest)
and sends them to the given address or a new address from the wallet if
no address is given. This can be used in tests to obtain a large balance
and use it for testing purposes.
"""
premine.collect (self.rpc.xaya, addr, logger=self.log)
| 31.813525 | 80 | 0.645926 |
from . import game
from . import premine
from . import xaya
import argparse
from contextlib import contextmanager
import json
import logging
import os.path
import random
import re
import shlex
import shutil
import sys
import time
from jsonrpclib import ProtocolError
XAYAD_BINARY_DEFAULT = "/usr/local/bin/xayad"
XCORE_BINARY_DEFAULT = "/usr/local/bin/xayax-core"
XETH_BINARY_DEFAULT = "/usr/local/bin/xayax-eth"
DEFAULT_DIR = "/tmp"
DIR_PREFIX = "xayagametest_"
def portGenerator (start):
p = start
while True:
yield p
p += 1
class XayaGameTest (object):
cess = True
if self.args.nocleanup:
self.mainLogger.info ("Not cleaning up logs as requested")
else:
cleanup = True
except:
self.mainLogger.exception ("Test failed")
self.log.info ("Not cleaning up base directory %s" % self.basedir)
finally:
self.shutdown ()
self.stopGameDaemon ()
if cleanup:
self.log.info ("Cleaning up base directory in %s" % self.basedir)
shutil.rmtree (self.basedir, ignore_errors=True)
logging.shutdown ()
if not success:
sys.exit ("Test failed")
def setup (self):
pass
def shutdown (self):
pass
def run (self):
self.mainLogger.warning (
"Test 'run' method not overridden, this tests nothing")
def startGameDaemon (self, extraArgs=[], wait=True):
rpcurl, args = self.env.getGspArguments ()
extraArgs.extend (args)
self.gamenode.start (rpcurl, extraArgs, wait=wait)
self.rpc.game = self.gamenode.rpc
def stopGameDaemon (self):
self.rpc.game = None
self.gamenode.stop ()
def recreateGameDaemon (self, gameBinary=None, extraArgs=[]):
self.stopGameDaemon ()
self.gamenode = self.createGameNode (gameBinary)
self.startGameDaemon (extraArgs=extraArgs)
self.log.info ("Restarted fresh game daemon with binary %s"
% self.gamenode.realBinary)
def createGameNode (self, gameBinary=None):
if gameBinary is None:
gameBinary = self.args.game_daemon
gameCmd = list (self.runGameWith)
gameCmd.append (gameBinary)
return game.Node (self.basedir, next (self.ports), gameCmd)
| true | true |
f735494d76e89eea633a6cb6df06dbff18c83820 | 410 | py | Python | trinity/nodes/events.py | C4Coin/py-fhm-evm | 0892d5f40b6a346eb4e60b2dd2f712f9226e6888 | [
"MIT"
] | 2 | 2020-01-30T21:51:00.000Z | 2020-07-22T14:51:05.000Z | trinity/nodes/events.py | C4Coin/py-fhm-evm | 0892d5f40b6a346eb4e60b2dd2f712f9226e6888 | [
"MIT"
] | null | null | null | trinity/nodes/events.py | C4Coin/py-fhm-evm | 0892d5f40b6a346eb4e60b2dd2f712f9226e6888 | [
"MIT"
] | 2 | 2019-09-05T01:31:56.000Z | 2019-09-17T09:09:16.000Z | from typing import (
Type,
)
from lahja import (
BaseEvent,
BaseRequestResponseEvent,
)
class NetworkIdResponse(BaseEvent):
def __init__(self, network_id: int) -> None:
self.network_id = network_id
class NetworkIdRequest(BaseRequestResponseEvent[NetworkIdResponse]):
@staticmethod
def expected_response_type() -> Type[NetworkIdResponse]:
return NetworkIdResponse
| 18.636364 | 68 | 0.726829 | from typing import (
Type,
)
from lahja import (
BaseEvent,
BaseRequestResponseEvent,
)
class NetworkIdResponse(BaseEvent):
def __init__(self, network_id: int) -> None:
self.network_id = network_id
class NetworkIdRequest(BaseRequestResponseEvent[NetworkIdResponse]):
@staticmethod
def expected_response_type() -> Type[NetworkIdResponse]:
return NetworkIdResponse
| true | true |
f735499a53c6056424be38df56808e21b45eaacb | 558 | py | Python | project/delibere/migrations/0028_auto_20170615_2245.py | guglielmo/mosic2-db-delibere | a7c92adf42ad53023af47776c32baa2ee525f5e9 | [
"BSD-3-Clause"
] | null | null | null | project/delibere/migrations/0028_auto_20170615_2245.py | guglielmo/mosic2-db-delibere | a7c92adf42ad53023af47776c32baa2ee525f5e9 | [
"BSD-3-Clause"
] | null | null | null | project/delibere/migrations/0028_auto_20170615_2245.py | guglielmo/mosic2-db-delibere | a7c92adf42ad53023af47776c32baa2ee525f5e9 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-15 22:45
from __future__ import unicode_literals
from django.db import migrations
from delibere.models import Settore
# this migration rebuilds the tree object
# into the mptt representation, after migrating
# from a flat table
def rebuild_objects(apps, schema_editor):
Settore.objects.rebuild()
class Migration(migrations.Migration):
dependencies = [
('delibere', '0027_auto_20170615_2238'),
]
operations = [
migrations.RunPython(rebuild_objects),
]
| 22.32 | 48 | 0.722222 |
from __future__ import unicode_literals
from django.db import migrations
from delibere.models import Settore
def rebuild_objects(apps, schema_editor):
Settore.objects.rebuild()
class Migration(migrations.Migration):
dependencies = [
('delibere', '0027_auto_20170615_2238'),
]
operations = [
migrations.RunPython(rebuild_objects),
]
| true | true |
f73549cbedd9934db449c55fc52dc5f5d985dc0d | 5,388 | py | Python | coffee_machine/main.py | PratyushPriyam/Python_Projects | d97785464921ce24d2665360ec023b51dd26318b | [
"MIT"
] | null | null | null | coffee_machine/main.py | PratyushPriyam/Python_Projects | d97785464921ce24d2665360ec023b51dd26318b | [
"MIT"
] | null | null | null | coffee_machine/main.py | PratyushPriyam/Python_Projects | d97785464921ce24d2665360ec023b51dd26318b | [
"MIT"
] | null | null | null | from Utilities import MENU, resources
from art import logo
print(logo)
shop_open_and_ingredients_available = True
pay = 0
Water = resources["water"]
Milk = resources["milk"]
Coffee = resources["coffee"]
espresso_water = MENU["espresso"]["ingredients"]["water"]
espresso_coffee = MENU["espresso"]["ingredients"]["coffee"]
latte_water = MENU["latte"]["ingredients"]["water"]
latte_coffee = MENU["latte"]["ingredients"]["coffee"]
latte_milk = MENU["latte"]["ingredients"]["milk"]
cappuccino_water = MENU["cappuccino"]["ingredients"]["water"]
cappuccino_coffee = MENU["cappuccino"]["ingredients"]["coffee"]
cappuccino_milk = MENU["cappuccino"]["ingredients"]["milk"]
def report():
print(f"Water left : {Water}")
print(f"Milk left : {Milk}")
print(f"Coffee left : {Coffee}")
print(f"Total money collected: {pay}")
# Shut Down Machine when OFF is called
def make_coffee():
global Water, Coffee, Milk, shop_open_and_ingredients_available, pay
choice = input("What would you like to have? (espresso Rs.25/latte Rs.35/cappuccino Rs.50): ")
if "report" in choice:
report()
elif "off" in choice:
shop_open_and_ingredients_available = False
print("SYSTEM IS CLOSED FOR REPAIR.")
elif "espresso" in choice:
money = int(input("Enter the money for the drink of your choice"))
if money < MENU['espresso']['cost']:
print(f"Money insufficient. Here's your refund of RS.{money}")
elif Water >= espresso_water and Coffee >= espresso_coffee:
print("Here is your Espresso. Thank You!")
print(f"Here's your change of RS.{money - MENU['espresso']['cost']}")
Water -= espresso_water
Coffee -= espresso_coffee
pay += 25
elif Water < espresso_water and espresso_coffee:
print("Sorry, Water is over")
elif Water > espresso_water and espresso_coffee:
print("Sorry, Coffee is over")
elif Water < espresso_water and espresso_coffee:
print("Water and Coffee are over")
elif Water < espresso_water:
print("Sorry, Water Shortage")
elif Coffee < espresso_coffee:
print("Sorry, Coffee Shortage")
else:
print("Sorry, We are currently facing some technical issues")
elif "latte" in choice:
money = int(input("Enter the money for the drink of your choice"))
if money < MENU['latte']['cost']:
print(f"Money insufficient. Here's your refund of RS.{money}")
if Water >= latte_water and Coffee >= latte_coffee and Milk >= latte_milk:
print("Here is your Latte. Thank You!")
print(f"Here's your change of RS.{money - MENU['latte']['cost']}")
Water -= latte_water
Coffee -= latte_coffee
Milk -= latte_milk
pay += 35
elif Water < latte_water and Coffee > latte_coffee and Milk > latte_milk:
print("Sorry, Water is over")
elif Water > latte_water and Coffee < latte_coffee and Milk > latte_milk:
print("Sorry, Coffee is over")
elif Water > latte_water and Coffee > latte_coffee and Milk < latte_milk:
print("Sorry, Milk is over")
elif Water < latte_water and Coffee < latte_coffee and Milk < latte_milk:
print("Water, Coffee and Milk are over")
elif Water < latte_water:
print("Sorry, Water Shortage")
elif Coffee < latte_coffee:
print("Sorry, Coffee Shortage")
elif Milk < latte_milk:
print("Sorry, Milk shortage")
else:
print("Sorry, We are currently facing some technical issues")
elif "cappuccino" in choice:
money = int(input("Enter the money for the drink of your choice"))
if money < MENU['cappuccino']['cost']:
print(f"Money insufficient. Here's your refund of RS.{money}")
if Water >= cappuccino_water and Coffee >= cappuccino_coffee and Milk >= cappuccino_milk:
print("Here is your cappuccino. Thank You!")
print(f"Here's your change of RS.{money - MENU['cappuccino']['cost']}")
Water -= cappuccino_water
Coffee -= cappuccino_coffee
Milk -= cappuccino_milk
pay += 50
elif Water < cappuccino_water and Coffee > cappuccino_coffee and Milk > cappuccino_milk:
print("Sorry, Water is over")
elif Water > cappuccino_water and Coffee < cappuccino_coffee and Milk > cappuccino_milk:
print("Sorry, Coffee is over")
elif Water > cappuccino_water and Coffee > cappuccino_coffee and Milk < cappuccino_milk:
print("Sorry, Milk is over")
elif Water < cappuccino_water and Coffee < cappuccino_coffee and Milk < cappuccino_milk:
print("Water, Coffee and Milk are over")
elif Water < cappuccino_water:
print("Sorry, Water Shortage")
elif Coffee < cappuccino_coffee:
print("Sorry, Coffee Shortage")
elif Milk < cappuccino_milk:
print("Sorry, Milk shortage")
else:
print("Sorry, We are currently facing some technical issues")
while shop_open_and_ingredients_available:
make_coffee()
| 43.804878 | 99 | 0.617483 | from Utilities import MENU, resources
from art import logo
print(logo)
shop_open_and_ingredients_available = True
pay = 0
Water = resources["water"]
Milk = resources["milk"]
Coffee = resources["coffee"]
espresso_water = MENU["espresso"]["ingredients"]["water"]
espresso_coffee = MENU["espresso"]["ingredients"]["coffee"]
latte_water = MENU["latte"]["ingredients"]["water"]
latte_coffee = MENU["latte"]["ingredients"]["coffee"]
latte_milk = MENU["latte"]["ingredients"]["milk"]
cappuccino_water = MENU["cappuccino"]["ingredients"]["water"]
cappuccino_coffee = MENU["cappuccino"]["ingredients"]["coffee"]
cappuccino_milk = MENU["cappuccino"]["ingredients"]["milk"]
def report():
print(f"Water left : {Water}")
print(f"Milk left : {Milk}")
print(f"Coffee left : {Coffee}")
print(f"Total money collected: {pay}")
def make_coffee():
global Water, Coffee, Milk, shop_open_and_ingredients_available, pay
choice = input("What would you like to have? (espresso Rs.25/latte Rs.35/cappuccino Rs.50): ")
if "report" in choice:
report()
elif "off" in choice:
shop_open_and_ingredients_available = False
print("SYSTEM IS CLOSED FOR REPAIR.")
elif "espresso" in choice:
money = int(input("Enter the money for the drink of your choice"))
if money < MENU['espresso']['cost']:
print(f"Money insufficient. Here's your refund of RS.{money}")
elif Water >= espresso_water and Coffee >= espresso_coffee:
print("Here is your Espresso. Thank You!")
print(f"Here's your change of RS.{money - MENU['espresso']['cost']}")
Water -= espresso_water
Coffee -= espresso_coffee
pay += 25
elif Water < espresso_water and espresso_coffee:
print("Sorry, Water is over")
elif Water > espresso_water and espresso_coffee:
print("Sorry, Coffee is over")
elif Water < espresso_water and espresso_coffee:
print("Water and Coffee are over")
elif Water < espresso_water:
print("Sorry, Water Shortage")
elif Coffee < espresso_coffee:
print("Sorry, Coffee Shortage")
else:
print("Sorry, We are currently facing some technical issues")
elif "latte" in choice:
money = int(input("Enter the money for the drink of your choice"))
if money < MENU['latte']['cost']:
print(f"Money insufficient. Here's your refund of RS.{money}")
if Water >= latte_water and Coffee >= latte_coffee and Milk >= latte_milk:
print("Here is your Latte. Thank You!")
print(f"Here's your change of RS.{money - MENU['latte']['cost']}")
Water -= latte_water
Coffee -= latte_coffee
Milk -= latte_milk
pay += 35
elif Water < latte_water and Coffee > latte_coffee and Milk > latte_milk:
print("Sorry, Water is over")
elif Water > latte_water and Coffee < latte_coffee and Milk > latte_milk:
print("Sorry, Coffee is over")
elif Water > latte_water and Coffee > latte_coffee and Milk < latte_milk:
print("Sorry, Milk is over")
elif Water < latte_water and Coffee < latte_coffee and Milk < latte_milk:
print("Water, Coffee and Milk are over")
elif Water < latte_water:
print("Sorry, Water Shortage")
elif Coffee < latte_coffee:
print("Sorry, Coffee Shortage")
elif Milk < latte_milk:
print("Sorry, Milk shortage")
else:
print("Sorry, We are currently facing some technical issues")
elif "cappuccino" in choice:
money = int(input("Enter the money for the drink of your choice"))
if money < MENU['cappuccino']['cost']:
print(f"Money insufficient. Here's your refund of RS.{money}")
if Water >= cappuccino_water and Coffee >= cappuccino_coffee and Milk >= cappuccino_milk:
print("Here is your cappuccino. Thank You!")
print(f"Here's your change of RS.{money - MENU['cappuccino']['cost']}")
Water -= cappuccino_water
Coffee -= cappuccino_coffee
Milk -= cappuccino_milk
pay += 50
elif Water < cappuccino_water and Coffee > cappuccino_coffee and Milk > cappuccino_milk:
print("Sorry, Water is over")
elif Water > cappuccino_water and Coffee < cappuccino_coffee and Milk > cappuccino_milk:
print("Sorry, Coffee is over")
elif Water > cappuccino_water and Coffee > cappuccino_coffee and Milk < cappuccino_milk:
print("Sorry, Milk is over")
elif Water < cappuccino_water and Coffee < cappuccino_coffee and Milk < cappuccino_milk:
print("Water, Coffee and Milk are over")
elif Water < cappuccino_water:
print("Sorry, Water Shortage")
elif Coffee < cappuccino_coffee:
print("Sorry, Coffee Shortage")
elif Milk < cappuccino_milk:
print("Sorry, Milk shortage")
else:
print("Sorry, We are currently facing some technical issues")
while shop_open_and_ingredients_available:
make_coffee()
| true | true |
f73549dbc37500831c326b4af09cfc79c275f5f1 | 6,494 | py | Python | src/transformers/adapters/models/distilbert.py | uunal/adapter-transformers | 73a95a75f803e8fd243fc3d55ff3a9d557891377 | [
"Apache-2.0"
] | 723 | 2020-07-16T13:02:25.000Z | 2022-03-31T21:03:55.000Z | src/transformers/adapters/models/distilbert.py | uunal/adapter-transformers | 73a95a75f803e8fd243fc3d55ff3a9d557891377 | [
"Apache-2.0"
] | 170 | 2020-07-16T14:39:11.000Z | 2022-03-31T13:02:11.000Z | src/transformers/adapters/models/distilbert.py | uunal/adapter-transformers | 73a95a75f803e8fd243fc3d55ff3a9d557891377 | [
"Apache-2.0"
] | 131 | 2020-07-16T14:38:16.000Z | 2022-03-29T19:43:18.000Z | from typing import Union
import torch
from torch import nn
from ..composition import AdapterCompositionBlock, parse_composition
from ..model_mixin import InvertibleAdaptersMixin, ModelAdaptersMixin
from .bert import BertEncoderAdaptersMixin, BertModelHeadsMixin, BertOutputAdaptersMixin, BertSelfOutputAdaptersMixin
class DistilBertSelfAttentionAdaptersModule(BertSelfOutputAdaptersMixin, nn.Module):
"""Adds attention adapters to the Transformer module of DistilBert."""
def __init__(self, parent):
super().__init__()
# keep a reference to the parent module without registering as a submodule
object.__setattr__(self, "parent", parent)
self.config = parent.config
@property
def transformer_layer_norm(self):
return self.parent.sa_layer_norm
class DistilBertOutputAdaptersModule(BertOutputAdaptersMixin, nn.Module):
"""Adds output adapters to the Transformer module of DistilBert."""
def __init__(self, parent):
super().__init__()
# keep a reference to the parent module without registering as a submodule
object.__setattr__(self, "parent", parent)
self.config = parent.config
@property
def transformer_layer_norm(self):
return self.parent.output_layer_norm
class DistilBertTransfomerBlockAdaptersMixin:
"""Adds adapters to the TransformerBlock module of DistilBert."""
def _init_adapter_modules(self):
self.attention_adapters = DistilBertSelfAttentionAdaptersModule(self)
self.output_adapters = DistilBertOutputAdaptersModule(self)
self.attention_adapters._init_adapter_modules()
self.output_adapters._init_adapter_modules()
self.register_forward_pre_hook(self._adapter_block_pre_hook)
def add_fusion_layer(self, adapter_names):
self.attention_adapters.add_fusion_layer(adapter_names)
self.output_adapters.add_fusion_layer(adapter_names)
def add_adapter(self, adapter_name: str, layer_idx: int):
self.attention_adapters.add_adapter(adapter_name, layer_idx)
self.output_adapters.add_adapter(adapter_name, layer_idx)
def delete_adapter(self, adapter_name):
self.attention_adapters.delete_adapter(adapter_name)
self.output_adapters.delete_adapter(adapter_name)
def delete_fusion_layer(self, adapter_names):
self.attention_adapters.delete_fusion_layer(adapter_names)
self.output_adapters.delete_fusion_layer(adapter_names)
def enable_adapters(self, adapter_names: list, unfreeze_adapters: bool, unfreeze_attention: bool):
self.attention_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)
self.output_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)
# Makes sure the "parent" reference always points to the correct module.
# This is especially relevant when using torch data parallelism.
@staticmethod
def _adapter_block_pre_hook(module, input_tensors):
object.__setattr__(module.attention_adapters, "parent", module)
object.__setattr__(module.output_adapters, "parent", module)
class DistilBertTransformerAdaptersMixin(BertEncoderAdaptersMixin):
"""Adds adapters to the Transformer module of DistilBert."""
pass
class DistilBertModelAdaptersMixin(InvertibleAdaptersMixin, ModelAdaptersMixin):
"""Adds adapters to the DistilBert module."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock]):
"""Sets the model into mode for training the given adapters."""
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.transformer.enable_adapters(adapter_setup, True, False)
self.enable_invertible_adapters(adapter_setup.flatten())
# use the adapters to be trained by default in every forward pass
self.set_active_adapters(adapter_setup)
def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):
"""Sets the model into mode for training of adapter fusion determined by a list of adapter names."""
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.transformer.enable_adapters(adapter_setup, unfreeze_adapters, True)
# use the adapters to be trained by default in every forward pass
self.set_active_adapters(adapter_setup)
def _add_adapter(self, adapter_name):
self.transformer.add_adapter(adapter_name)
self.add_invertible_adapter(adapter_name)
def _add_fusion_layer(self, adapter_names):
self.transformer.add_fusion_layer(adapter_names)
def _delete_adapter(self, adapter_name: str):
self.transformer.delete_adapter(adapter_name)
self.delete_invertible_adapter(adapter_name)
def _delete_fusion_layer(self, adapter_names):
self.transformer.delete_fusion_layer(adapter_names)
def get_fusion_regularization_loss(self):
reg_loss = 0.0
target = torch.zeros((self.config.hidden_size, self.config.hidden_size)).fill_diagonal_(1.0).to(self.device)
for _, v in self.transformer.layer._modules.items():
for _, layer_fusion in v.output_adapters.adapter_fusion_layer.items():
if hasattr(layer_fusion, "value"):
reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()
for _, layer_fusion in v.attention_adapters.adapter_fusion_layer.items():
if hasattr(layer_fusion, "value"):
reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()
return reg_loss
def get_adapter(self, name):
return_adapters = {}
for idx, layer in enumerate(self.transformer.layer):
adapters = {
"attention": layer.attention_adapters.adapters,
"output": layer.output_adapters.adapters,
}
for key, adapt in adapters.items():
if hasattr(adapt, name):
if idx not in return_adapters:
return_adapters[idx] = {}
return_adapters[idx][key] = getattr(adapt, name)
return return_adapters
class DistilBertModelHeadsMixin(BertModelHeadsMixin):
"""Adds heads to a DistilBert model."""
pass
| 41.363057 | 117 | 0.721743 | from typing import Union
import torch
from torch import nn
from ..composition import AdapterCompositionBlock, parse_composition
from ..model_mixin import InvertibleAdaptersMixin, ModelAdaptersMixin
from .bert import BertEncoderAdaptersMixin, BertModelHeadsMixin, BertOutputAdaptersMixin, BertSelfOutputAdaptersMixin
class DistilBertSelfAttentionAdaptersModule(BertSelfOutputAdaptersMixin, nn.Module):
def __init__(self, parent):
super().__init__()
object.__setattr__(self, "parent", parent)
self.config = parent.config
@property
def transformer_layer_norm(self):
return self.parent.sa_layer_norm
class DistilBertOutputAdaptersModule(BertOutputAdaptersMixin, nn.Module):
def __init__(self, parent):
super().__init__()
object.__setattr__(self, "parent", parent)
self.config = parent.config
@property
def transformer_layer_norm(self):
return self.parent.output_layer_norm
class DistilBertTransfomerBlockAdaptersMixin:
def _init_adapter_modules(self):
self.attention_adapters = DistilBertSelfAttentionAdaptersModule(self)
self.output_adapters = DistilBertOutputAdaptersModule(self)
self.attention_adapters._init_adapter_modules()
self.output_adapters._init_adapter_modules()
self.register_forward_pre_hook(self._adapter_block_pre_hook)
def add_fusion_layer(self, adapter_names):
self.attention_adapters.add_fusion_layer(adapter_names)
self.output_adapters.add_fusion_layer(adapter_names)
def add_adapter(self, adapter_name: str, layer_idx: int):
self.attention_adapters.add_adapter(adapter_name, layer_idx)
self.output_adapters.add_adapter(adapter_name, layer_idx)
def delete_adapter(self, adapter_name):
self.attention_adapters.delete_adapter(adapter_name)
self.output_adapters.delete_adapter(adapter_name)
def delete_fusion_layer(self, adapter_names):
self.attention_adapters.delete_fusion_layer(adapter_names)
self.output_adapters.delete_fusion_layer(adapter_names)
def enable_adapters(self, adapter_names: list, unfreeze_adapters: bool, unfreeze_attention: bool):
self.attention_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)
self.output_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)
@staticmethod
def _adapter_block_pre_hook(module, input_tensors):
object.__setattr__(module.attention_adapters, "parent", module)
object.__setattr__(module.output_adapters, "parent", module)
class DistilBertTransformerAdaptersMixin(BertEncoderAdaptersMixin):
pass
class DistilBertModelAdaptersMixin(InvertibleAdaptersMixin, ModelAdaptersMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock]):
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.transformer.enable_adapters(adapter_setup, True, False)
self.enable_invertible_adapters(adapter_setup.flatten())
self.set_active_adapters(adapter_setup)
def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.transformer.enable_adapters(adapter_setup, unfreeze_adapters, True)
self.set_active_adapters(adapter_setup)
def _add_adapter(self, adapter_name):
self.transformer.add_adapter(adapter_name)
self.add_invertible_adapter(adapter_name)
def _add_fusion_layer(self, adapter_names):
self.transformer.add_fusion_layer(adapter_names)
def _delete_adapter(self, adapter_name: str):
self.transformer.delete_adapter(adapter_name)
self.delete_invertible_adapter(adapter_name)
def _delete_fusion_layer(self, adapter_names):
self.transformer.delete_fusion_layer(adapter_names)
def get_fusion_regularization_loss(self):
reg_loss = 0.0
target = torch.zeros((self.config.hidden_size, self.config.hidden_size)).fill_diagonal_(1.0).to(self.device)
for _, v in self.transformer.layer._modules.items():
for _, layer_fusion in v.output_adapters.adapter_fusion_layer.items():
if hasattr(layer_fusion, "value"):
reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()
for _, layer_fusion in v.attention_adapters.adapter_fusion_layer.items():
if hasattr(layer_fusion, "value"):
reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()
return reg_loss
def get_adapter(self, name):
return_adapters = {}
for idx, layer in enumerate(self.transformer.layer):
adapters = {
"attention": layer.attention_adapters.adapters,
"output": layer.output_adapters.adapters,
}
for key, adapt in adapters.items():
if hasattr(adapt, name):
if idx not in return_adapters:
return_adapters[idx] = {}
return_adapters[idx][key] = getattr(adapt, name)
return return_adapters
class DistilBertModelHeadsMixin(BertModelHeadsMixin):
pass
| true | true |
f7354a089df3bea548928bfa626d5ec386918849 | 12,938 | py | Python | pandas/tests/arrays/boolean/test_construction.py | gsyqax/pandas | cb35d8a938c9222d903482d2f66c62fece5a7aae | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 4 | 2015-06-09T07:27:52.000Z | 2021-08-06T13:50:05.000Z | pandas/tests/arrays/boolean/test_construction.py | gsyqax/pandas | cb35d8a938c9222d903482d2f66c62fece5a7aae | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 7 | 2015-08-30T23:51:00.000Z | 2018-12-29T19:52:35.000Z | pandas/tests/arrays/boolean/test_construction.py | gsyqax/pandas | cb35d8a938c9222d903482d2f66c62fece5a7aae | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 5 | 2017-10-04T22:24:49.000Z | 2021-08-06T13:50:13.000Z | import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.arrays import BooleanArray
from pandas.core.arrays.boolean import coerce_to_array
@pytest.fixture
def data():
return pd.array(
[True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False],
dtype="boolean",
)
def test_boolean_array_constructor():
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(values, mask)
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
with pytest.raises(TypeError, match="values should be boolean numpy array"):
BooleanArray(values.tolist(), mask)
with pytest.raises(TypeError, match="mask should be boolean numpy array"):
BooleanArray(values, mask.tolist())
with pytest.raises(TypeError, match="values should be boolean numpy array"):
BooleanArray(values.astype(int), mask)
with pytest.raises(TypeError, match="mask should be boolean numpy array"):
BooleanArray(values, None)
with pytest.raises(ValueError, match="values must be a 1D array"):
BooleanArray(values.reshape(1, -1), mask)
with pytest.raises(ValueError, match="mask must be a 1D array"):
BooleanArray(values, mask.reshape(1, -1))
def test_boolean_array_constructor_copy():
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(values, mask)
assert result._data is values
assert result._mask is mask
result = BooleanArray(values, mask, copy=True)
assert result._data is not values
assert result._mask is not mask
def test_to_boolean_array():
expected = BooleanArray(
np.array([True, False, True]), np.array([False, False, False])
)
result = pd.array([True, False, True], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, True]), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, True], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
expected = BooleanArray(
np.array([True, False, True]), np.array([False, False, True])
)
result = pd.array([True, False, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, None], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_all_none():
expected = BooleanArray(np.array([True, True, True]), np.array([True, True, True]))
result = pd.array([None, None, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([None, None, None], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"a, b",
[
([True, False, None, np.nan, pd.NA], [True, False, None, None, None]),
([True, np.nan], [True, None]),
([True, pd.NA], [True, None]),
([np.nan, np.nan], [None, None]),
(np.array([np.nan, np.nan], dtype=float), [None, None]),
],
)
def test_to_boolean_array_missing_indicators(a, b):
result = pd.array(a, dtype="boolean")
expected = pd.array(b, dtype="boolean")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"values",
[
["foo", "bar"],
["1", "2"],
# "foo",
[1, 2],
[1.0, 2.0],
pd.date_range("20130101", periods=2),
np.array(["foo"]),
np.array([1, 2]),
np.array([1.0, 2.0]),
[np.nan, {"a": 1}],
],
)
def test_to_boolean_array_error(values):
# error in converting existing arrays to BooleanArray
msg = "Need to pass bool-like value"
with pytest.raises(TypeError, match=msg):
pd.array(values, dtype="boolean")
def test_to_boolean_array_from_integer_array():
result = pd.array(np.array([1, 0, 1, 0]), dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
result = pd.array(np.array([1, 0, 1, None]), dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_from_float_array():
result = pd.array(np.array([1.0, 0.0, 1.0, 0.0]), dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
result = pd.array(np.array([1.0, 0.0, 1.0, np.nan]), dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_integer_like():
# integers of 0's and 1's
result = pd.array([1, 0, 1, 0], dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
result = pd.array([1, 0, 1, None], dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_coerce_to_array():
# TODO this is currently not public API
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(*coerce_to_array(values, mask=mask))
expected = BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
assert result._data is values
assert result._mask is mask
result = BooleanArray(*coerce_to_array(values, mask=mask, copy=True))
expected = BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
assert result._data is not values
assert result._mask is not mask
# mixed missing from values and mask
values = [True, False, None, False]
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(*coerce_to_array(values, mask=mask))
expected = BooleanArray(
np.array([True, False, True, True]), np.array([False, False, True, True])
)
tm.assert_extension_array_equal(result, expected)
result = BooleanArray(*coerce_to_array(np.array(values, dtype=object), mask=mask))
tm.assert_extension_array_equal(result, expected)
result = BooleanArray(*coerce_to_array(values, mask=mask.tolist()))
tm.assert_extension_array_equal(result, expected)
# raise errors for wrong dimension
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
with pytest.raises(ValueError, match="values must be a 1D list-like"):
coerce_to_array(values.reshape(1, -1))
with pytest.raises(ValueError, match="mask must be a 1D list-like"):
coerce_to_array(values, mask=mask.reshape(1, -1))
def test_coerce_to_array_from_boolean_array():
# passing BooleanArray to coerce_to_array
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
arr = BooleanArray(values, mask)
result = BooleanArray(*coerce_to_array(arr))
tm.assert_extension_array_equal(result, arr)
# no copy
assert result._data is arr._data
assert result._mask is arr._mask
result = BooleanArray(*coerce_to_array(arr), copy=True)
tm.assert_extension_array_equal(result, arr)
assert result._data is not arr._data
assert result._mask is not arr._mask
with pytest.raises(ValueError, match="cannot pass mask for BooleanArray input"):
coerce_to_array(arr, mask=mask)
def test_coerce_to_numpy_array():
# with missing values -> object dtype
arr = pd.array([True, False, None], dtype="boolean")
result = np.array(arr)
expected = np.array([True, False, pd.NA], dtype="object")
tm.assert_numpy_array_equal(result, expected)
# also with no missing values -> object dtype
arr = pd.array([True, False, True], dtype="boolean")
result = np.array(arr)
expected = np.array([True, False, True], dtype="object")
tm.assert_numpy_array_equal(result, expected)
# force bool dtype
result = np.array(arr, dtype="bool")
expected = np.array([True, False, True], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
# with missing values will raise error
arr = pd.array([True, False, None], dtype="boolean")
msg = (
"cannot convert to 'bool'-dtype NumPy array with missing values. "
"Specify an appropriate 'na_value' for this dtype."
)
with pytest.raises(ValueError, match=msg):
np.array(arr, dtype="bool")
def test_to_boolean_array_from_strings():
result = BooleanArray._from_sequence_of_strings(
np.array(["True", "False", np.nan], dtype=object)
)
expected = BooleanArray(
np.array([True, False, False]), np.array([False, False, True])
)
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_from_strings_invalid_string():
with pytest.raises(ValueError, match="cannot be cast"):
BooleanArray._from_sequence_of_strings(["donkey"])
@pytest.mark.parametrize("box", [True, False], ids=["series", "array"])
def test_to_numpy(box):
con = pd.Series if box else pd.array
# default (with or without missing values) -> object dtype
arr = con([True, False, True], dtype="boolean")
result = arr.to_numpy()
expected = np.array([True, False, True], dtype="object")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy()
expected = np.array([True, False, pd.NA], dtype="object")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy(dtype="str")
expected = np.array([True, False, pd.NA], dtype="<U5")
tm.assert_numpy_array_equal(result, expected)
# no missing values -> can convert to bool, otherwise raises
arr = con([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype="bool")
expected = np.array([True, False, True], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
with pytest.raises(ValueError, match="cannot convert to 'bool'-dtype"):
result = arr.to_numpy(dtype="bool")
# specify dtype and na_value
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy(dtype=object, na_value=None)
expected = np.array([True, False, None], dtype="object")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype=bool, na_value=False)
expected = np.array([True, False, False], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype="int64", na_value=-99)
expected = np.array([1, 0, -99], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype="float64", na_value=np.nan)
expected = np.array([1, 0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# converting to int or float without specifying na_value raises
with pytest.raises(ValueError, match="cannot convert to 'int64'-dtype"):
arr.to_numpy(dtype="int64")
with pytest.raises(ValueError, match="cannot convert to 'float64'-dtype"):
arr.to_numpy(dtype="float64")
def test_to_numpy_copy():
# to_numpy can be zero-copy if no missing values
arr = pd.array([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype=bool)
result[0] = False
tm.assert_extension_array_equal(
arr, pd.array([False, False, True], dtype="boolean")
)
arr = pd.array([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype=bool, copy=True)
result[0] = False
tm.assert_extension_array_equal(arr, pd.array([True, False, True], dtype="boolean"))
# FIXME: don't leave commented out
# TODO when BooleanArray coerces to object dtype numpy array, need to do conversion
# manually in the indexing code
# def test_indexing_boolean_mask():
# arr = pd.array([1, 2, 3, 4], dtype="Int64")
# mask = pd.array([True, False, True, False], dtype="boolean")
# result = arr[mask]
# expected = pd.array([1, 3], dtype="Int64")
# tm.assert_extension_array_equal(result, expected)
# # missing values -> error
# mask = pd.array([True, False, True, None], dtype="boolean")
# with pytest.raises(IndexError):
# result = arr[mask]
| 37.285303 | 88 | 0.675375 | import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.arrays import BooleanArray
from pandas.core.arrays.boolean import coerce_to_array
@pytest.fixture
def data():
return pd.array(
[True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False],
dtype="boolean",
)
def test_boolean_array_constructor():
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(values, mask)
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
with pytest.raises(TypeError, match="values should be boolean numpy array"):
BooleanArray(values.tolist(), mask)
with pytest.raises(TypeError, match="mask should be boolean numpy array"):
BooleanArray(values, mask.tolist())
with pytest.raises(TypeError, match="values should be boolean numpy array"):
BooleanArray(values.astype(int), mask)
with pytest.raises(TypeError, match="mask should be boolean numpy array"):
BooleanArray(values, None)
with pytest.raises(ValueError, match="values must be a 1D array"):
BooleanArray(values.reshape(1, -1), mask)
with pytest.raises(ValueError, match="mask must be a 1D array"):
BooleanArray(values, mask.reshape(1, -1))
def test_boolean_array_constructor_copy():
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(values, mask)
assert result._data is values
assert result._mask is mask
result = BooleanArray(values, mask, copy=True)
assert result._data is not values
assert result._mask is not mask
def test_to_boolean_array():
expected = BooleanArray(
np.array([True, False, True]), np.array([False, False, False])
)
result = pd.array([True, False, True], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, True]), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, True], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
expected = BooleanArray(
np.array([True, False, True]), np.array([False, False, True])
)
result = pd.array([True, False, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, None], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_all_none():
expected = BooleanArray(np.array([True, True, True]), np.array([True, True, True]))
result = pd.array([None, None, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([None, None, None], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"a, b",
[
([True, False, None, np.nan, pd.NA], [True, False, None, None, None]),
([True, np.nan], [True, None]),
([True, pd.NA], [True, None]),
([np.nan, np.nan], [None, None]),
(np.array([np.nan, np.nan], dtype=float), [None, None]),
],
)
def test_to_boolean_array_missing_indicators(a, b):
result = pd.array(a, dtype="boolean")
expected = pd.array(b, dtype="boolean")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"values",
[
["foo", "bar"],
["1", "2"],
[1, 2],
[1.0, 2.0],
pd.date_range("20130101", periods=2),
np.array(["foo"]),
np.array([1, 2]),
np.array([1.0, 2.0]),
[np.nan, {"a": 1}],
],
)
def test_to_boolean_array_error(values):
msg = "Need to pass bool-like value"
with pytest.raises(TypeError, match=msg):
pd.array(values, dtype="boolean")
def test_to_boolean_array_from_integer_array():
result = pd.array(np.array([1, 0, 1, 0]), dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([1, 0, 1, None]), dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_from_float_array():
result = pd.array(np.array([1.0, 0.0, 1.0, 0.0]), dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([1.0, 0.0, 1.0, np.nan]), dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_integer_like():
result = pd.array([1, 0, 1, 0], dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array([1, 0, 1, None], dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_coerce_to_array():
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(*coerce_to_array(values, mask=mask))
expected = BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
assert result._data is values
assert result._mask is mask
result = BooleanArray(*coerce_to_array(values, mask=mask, copy=True))
expected = BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
assert result._data is not values
assert result._mask is not mask
values = [True, False, None, False]
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(*coerce_to_array(values, mask=mask))
expected = BooleanArray(
np.array([True, False, True, True]), np.array([False, False, True, True])
)
tm.assert_extension_array_equal(result, expected)
result = BooleanArray(*coerce_to_array(np.array(values, dtype=object), mask=mask))
tm.assert_extension_array_equal(result, expected)
result = BooleanArray(*coerce_to_array(values, mask=mask.tolist()))
tm.assert_extension_array_equal(result, expected)
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
with pytest.raises(ValueError, match="values must be a 1D list-like"):
coerce_to_array(values.reshape(1, -1))
with pytest.raises(ValueError, match="mask must be a 1D list-like"):
coerce_to_array(values, mask=mask.reshape(1, -1))
def test_coerce_to_array_from_boolean_array():
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
arr = BooleanArray(values, mask)
result = BooleanArray(*coerce_to_array(arr))
tm.assert_extension_array_equal(result, arr)
assert result._data is arr._data
assert result._mask is arr._mask
result = BooleanArray(*coerce_to_array(arr), copy=True)
tm.assert_extension_array_equal(result, arr)
assert result._data is not arr._data
assert result._mask is not arr._mask
with pytest.raises(ValueError, match="cannot pass mask for BooleanArray input"):
coerce_to_array(arr, mask=mask)
def test_coerce_to_numpy_array():
arr = pd.array([True, False, None], dtype="boolean")
result = np.array(arr)
expected = np.array([True, False, pd.NA], dtype="object")
tm.assert_numpy_array_equal(result, expected)
arr = pd.array([True, False, True], dtype="boolean")
result = np.array(arr)
expected = np.array([True, False, True], dtype="object")
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, dtype="bool")
expected = np.array([True, False, True], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
arr = pd.array([True, False, None], dtype="boolean")
msg = (
"cannot convert to 'bool'-dtype NumPy array with missing values. "
"Specify an appropriate 'na_value' for this dtype."
)
with pytest.raises(ValueError, match=msg):
np.array(arr, dtype="bool")
def test_to_boolean_array_from_strings():
result = BooleanArray._from_sequence_of_strings(
np.array(["True", "False", np.nan], dtype=object)
)
expected = BooleanArray(
np.array([True, False, False]), np.array([False, False, True])
)
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_from_strings_invalid_string():
with pytest.raises(ValueError, match="cannot be cast"):
BooleanArray._from_sequence_of_strings(["donkey"])
@pytest.mark.parametrize("box", [True, False], ids=["series", "array"])
def test_to_numpy(box):
con = pd.Series if box else pd.array
arr = con([True, False, True], dtype="boolean")
result = arr.to_numpy()
expected = np.array([True, False, True], dtype="object")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy()
expected = np.array([True, False, pd.NA], dtype="object")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy(dtype="str")
expected = np.array([True, False, pd.NA], dtype="<U5")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype="bool")
expected = np.array([True, False, True], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
with pytest.raises(ValueError, match="cannot convert to 'bool'-dtype"):
result = arr.to_numpy(dtype="bool")
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy(dtype=object, na_value=None)
expected = np.array([True, False, None], dtype="object")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype=bool, na_value=False)
expected = np.array([True, False, False], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype="int64", na_value=-99)
expected = np.array([1, 0, -99], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype="float64", na_value=np.nan)
expected = np.array([1, 0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
with pytest.raises(ValueError, match="cannot convert to 'int64'-dtype"):
arr.to_numpy(dtype="int64")
with pytest.raises(ValueError, match="cannot convert to 'float64'-dtype"):
arr.to_numpy(dtype="float64")
def test_to_numpy_copy():
arr = pd.array([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype=bool)
result[0] = False
tm.assert_extension_array_equal(
arr, pd.array([False, False, True], dtype="boolean")
)
arr = pd.array([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype=bool, copy=True)
result[0] = False
tm.assert_extension_array_equal(arr, pd.array([True, False, True], dtype="boolean"))
# TODO when BooleanArray coerces to object dtype numpy array, need to do conversion
# manually in the indexing code
# def test_indexing_boolean_mask():
# arr = pd.array([1, 2, 3, 4], dtype="Int64")
# mask = pd.array([True, False, True, False], dtype="boolean")
# result = arr[mask]
# expected = pd.array([1, 3], dtype="Int64")
# tm.assert_extension_array_equal(result, expected)
# # missing values -> error
# mask = pd.array([True, False, True, None], dtype="boolean")
# with pytest.raises(IndexError):
# result = arr[mask]
| true | true |
f7354b7e2b2d0bf1ae24dc3ba868c4e621da0c74 | 26,742 | py | Python | second/pytorch/train.py | VitoRazor/Lidar_RGB_detector | 5308ba24a90d6e8d73940be4b40d31eccb4df94b | [
"MIT"
] | 15 | 2019-08-13T15:28:41.000Z | 2020-03-11T12:36:35.000Z | second/pytorch/train.py | VitoRazor/Lidar_RGB_detector | 5308ba24a90d6e8d73940be4b40d31eccb4df94b | [
"MIT"
] | null | null | null | second/pytorch/train.py | VitoRazor/Lidar_RGB_detector | 5308ba24a90d6e8d73940be4b40d31eccb4df94b | [
"MIT"
] | 1 | 2019-11-15T10:30:01.000Z | 2019-11-15T10:30:01.000Z | import copy
import json
import os
from pathlib import Path
import pickle
import shutil
import time
import re
import fire
import numpy as np
import torch
from google.protobuf import text_format
import second.data.kitti_common as kitti
import torchplus
from second.builder import target_assigner_builder, voxel_builder
from second.core import box_np_ops
from second.data.preprocess import merge_second_batch, merge_second_batch_multigpu
from second.protos import pipeline_pb2
from second.pytorch.builder import (box_coder_builder, input_reader_builder,
lr_scheduler_builder, optimizer_builder,
second_builder)
from second.utils.log_tool import SimpleModelLog
from second.utils.progress_bar import ProgressBar
import psutil
def example_convert_to_torch(example, dtype=torch.float32,
device=None) -> dict:
device = device or torch.device("cuda:0")
example_torch = {}
float_names = [
"voxels", "anchors", "reg_targets", "reg_weights", "bev_map", "importance"
]
for k, v in example.items():
if k in float_names:
# slow when directly provide fp32 data with dtype=torch.half
example_torch[k] = torch.tensor(
v, dtype=torch.float32, device=device).to(dtype)
elif k in ["coordinates", "labels", "num_points"]:
example_torch[k] = torch.tensor(
v, dtype=torch.int32, device=device)
elif k in ["anchors_mask"]:
example_torch[k] = torch.tensor(
v, dtype=torch.uint8, device=device)
elif k == "calib":
calib = {}
for k1, v1 in v.items():
calib[k1] = torch.tensor(
v1, dtype=dtype, device=device).to(dtype)
example_torch[k] = calib
elif k == "num_voxels":
example_torch[k] = torch.tensor(v)
else:
example_torch[k] = v
return example_torch
def build_network(model_cfg, measure_time=False, KL=False):
voxel_generator = voxel_builder.build(model_cfg.voxel_generator)
bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
box_coder = box_coder_builder.build(model_cfg.box_coder)
target_assigner_cfg = model_cfg.target_assigner
target_assigner = target_assigner_builder.build(target_assigner_cfg,
bv_range, box_coder)
box_coder.custom_ndim = target_assigner._anchor_generators[0].custom_ndim
print(KL)
net = second_builder.build(
model_cfg, voxel_generator, target_assigner, measure_time=measure_time, KL = KL )
return net
def _worker_init_fn(worker_id):
time_seed = np.array(time.time(), dtype=np.int32)
np.random.seed(time_seed + worker_id)
print(f"WORKER {worker_id} seed:", np.random.get_state()[1][0])
def freeze_params(params: dict, include: str=None, exclude: str=None):
assert isinstance(params, dict)
include_re = None
if include is not None:
include_re = re.compile(include)
exclude_re = None
if exclude is not None:
exclude_re = re.compile(exclude)
remain_params = []
for k, p in params.items():
if include_re is not None:
if include_re.match(k) is not None:
continue
if exclude_re is not None:
if exclude_re.match(k) is None:
continue
remain_params.append(p)
return remain_params
def freeze_params_v2(params: dict, include: str=None, exclude: str=None):
assert isinstance(params, dict)
include_re = None
if include is not None:
include_re = re.compile(include)
exclude_re = None
if exclude is not None:
exclude_re = re.compile(exclude)
for k, p in params.items():
if include_re is not None:
if include_re.match(k) is not None:
p.requires_grad = False
if exclude_re is not None:
if exclude_re.match(k) is None:
p.requires_grad = False
def filter_param_dict(state_dict: dict, include: str=None, exclude: str=None):
assert isinstance(state_dict, dict)
include_re = None
if include is not None:
include_re = re.compile(include)
exclude_re = None
if exclude is not None:
exclude_re = re.compile(exclude)
res_dict = {}
for k, p in state_dict.items():
if include_re is not None:
if include_re.match(k) is None:
continue
if exclude_re is not None:
if exclude_re.match(k) is not None:
continue
res_dict[k] = p
return res_dict
def train(config_path,
model_dir,
KL = False,
result_path=None,
create_folder=False,
display_step=50,
summary_step=5,
pretrained_path=None,
pretrained_include=None,
pretrained_exclude=None,
freeze_include=None,
freeze_exclude=None,
multi_gpu=False,
measure_time=False,
resume=False):
"""train a VoxelNet model specified by a config file.
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_dir = str(Path(model_dir).resolve())
if create_folder:
if Path(model_dir).exists():
model_dir = torchplus.train.create_folder(model_dir)
model_dir = Path(model_dir)
if not resume and model_dir.exists():
raise ValueError("model dir exists and you don't specify resume.")
model_dir.mkdir(parents=True, exist_ok=True)
if result_path is None:
result_path = model_dir / 'results'
config_file_bkp = "pipeline.config"
if isinstance(config_path, str):
# directly provide a config object. this usually used
# when you want to train with several different parameters in
# one script.
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
else:
config = config_path
proto_str = text_format.MessageToString(config, indent=2)
with (model_dir / config_file_bkp).open("w") as f:
f.write(proto_str)
input_cfg = config.train_input_reader
eval_input_cfg = config.eval_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
if model_cfg.rpn.module_class_name == "RPN_KL":
KL = True
else:
KL = False
print(KL)
net = build_network(model_cfg, measure_time,KL).to(device)
# if train_cfg.enable_mixed_precision:
# net.half()
# net.metrics_to_float()
# net.convert_norm_to_float(net)
target_assigner = net.target_assigner
voxel_generator = net.voxel_generator
print("num parameters:", len(list(net.parameters())))
torchplus.train.try_restore_latest_checkpoints(model_dir, [net])
if pretrained_path is not None:
model_dict = net.state_dict()
pretrained_dict = torch.load(pretrained_path)
pretrained_dict = filter_param_dict(pretrained_dict, pretrained_include, pretrained_exclude)
new_pretrained_dict = {}
for k, v in pretrained_dict.items():
if k in model_dict and v.shape == model_dict[k].shape:
new_pretrained_dict[k] = v
print("Load pretrained parameters:")
for k, v in new_pretrained_dict.items():
print(k, v.shape)
model_dict.update(new_pretrained_dict)
net.load_state_dict(model_dict)
freeze_params_v2(dict(net.named_parameters()), freeze_include, freeze_exclude)
net.clear_global_step()
net.clear_metrics()
if multi_gpu:
net_parallel = torch.nn.DataParallel(net)
else:
net_parallel = net
optimizer_cfg = train_cfg.optimizer
loss_scale = train_cfg.loss_scale_factor
fastai_optimizer = optimizer_builder.build(
optimizer_cfg,
net,
mixed=False,
loss_scale=loss_scale)
if loss_scale < 0:
loss_scale = "dynamic"
if train_cfg.enable_mixed_precision:
max_num_voxels = input_cfg.preprocess.max_number_of_voxels * input_cfg.batch_size
assert max_num_voxels < 65535, "spconv fp16 training only support this"
from apex import amp
net, amp_optimizer = amp.initialize(net, fastai_optimizer,
opt_level="O2",
keep_batchnorm_fp32=True,
loss_scale=loss_scale
)
net.metrics_to_float()
else:
amp_optimizer = fastai_optimizer
torchplus.train.try_restore_latest_checkpoints(model_dir,
[fastai_optimizer])
lr_scheduler = lr_scheduler_builder.build(optimizer_cfg, amp_optimizer,
train_cfg.steps)
if train_cfg.enable_mixed_precision:
float_dtype = torch.float16
else:
float_dtype = torch.float32
if multi_gpu:
num_gpu = torch.cuda.device_count()
print(f"MULTI-GPU: use {num_gpu} gpu")
collate_fn = merge_second_batch_multigpu
else:
collate_fn = merge_second_batch
num_gpu = 1
######################
# PREPARE INPUT
######################
dataset = input_reader_builder.build(
input_cfg,
model_cfg,
training=True,
voxel_generator=voxel_generator,
target_assigner=target_assigner,
multi_gpu=multi_gpu)
eval_dataset = input_reader_builder.build(
eval_input_cfg,
model_cfg,
training=False,
voxel_generator=voxel_generator,
target_assigner=target_assigner)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=input_cfg.batch_size * num_gpu,
shuffle=True,
num_workers=input_cfg.preprocess.num_workers * num_gpu,
pin_memory=False,
collate_fn=collate_fn,
worker_init_fn=_worker_init_fn,
drop_last=not multi_gpu)
eval_dataloader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=eval_input_cfg.batch_size, # only support multi-gpu train
shuffle=False,
num_workers=eval_input_cfg.preprocess.num_workers,
pin_memory=False,
collate_fn=merge_second_batch)
######################
# TRAINING
######################
model_logging = SimpleModelLog(model_dir)
model_logging.open()
model_logging.log_text(proto_str + "\n", 0, tag="config")
start_step = net.get_global_step()
total_step = train_cfg.steps
t = time.time()
steps_per_eval = train_cfg.steps_per_eval
clear_metrics_every_epoch = train_cfg.clear_metrics_every_epoch
amp_optimizer.zero_grad()
step_times = []
step = start_step
try:
while True:
if clear_metrics_every_epoch:
net.clear_metrics()
for example in dataloader:
lr_scheduler.step(net.get_global_step())
time_metrics = example["metrics"]
example.pop("metrics")
example_torch = example_convert_to_torch(example, float_dtype)
batch_size = example["anchors"].shape[0]
# print("num_points:",max(example_torch['num_points']))
# print("num_voxels:",example_torch['num_voxels'].shape)
# print("anchors:",example_torch['anchors'].shape)
# print("voxels:",example_torch['voxels'].shape)
# print(example_torch['voxels'][0:3])
# print("coordinates:",example_torch['coordinates'].shape)
# exit()
ret_dict = net_parallel(example_torch)
cls_preds = ret_dict["cls_preds"]
loss = ret_dict["loss"].mean()
cls_loss_reduced = ret_dict["cls_loss_reduced"].mean()
loc_loss_reduced = ret_dict["loc_loss_reduced"].mean()
cls_pos_loss = ret_dict["cls_pos_loss"].mean()
cls_neg_loss = ret_dict["cls_neg_loss"].mean()
loc_loss = ret_dict["loc_loss"]
cls_loss = ret_dict["cls_loss"]
cared = ret_dict["cared"]
labels = example_torch["labels"]
if train_cfg.enable_mixed_precision:
with amp.scale_loss(loss, amp_optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(net.parameters(), 10.0)
amp_optimizer.step()
amp_optimizer.zero_grad()
net.update_global_step()
net_metrics = net.update_metrics(cls_loss_reduced,
loc_loss_reduced, cls_preds,
labels, cared)
step_time = (time.time() - t)
step_times.append(step_time)
t = time.time()
metrics = {}
num_pos = int((labels > 0)[0].float().sum().cpu().numpy())
num_neg = int((labels == 0)[0].float().sum().cpu().numpy())
if 'anchors_mask' not in example_torch:
num_anchors = example_torch['anchors'].shape[1]
else:
num_anchors = int(example_torch['anchors_mask'][0].sum())
global_step = net.get_global_step()
if global_step % display_step == 0:
if measure_time:
for name, val in net.get_avg_time_dict().items():
print(f"avg {name} time = {val * 1000:.3f} ms")
loc_loss_elem = [
float(loc_loss[:, :, i].sum().detach().cpu().numpy() /
batch_size) for i in range(loc_loss.shape[-1])
]
metrics["runtime"] = {
"step": global_step,
"steptime": np.mean(step_times),
}
metrics["runtime"].update(time_metrics[0])
step_times = []
metrics.update(net_metrics)
metrics["loss"]["loc_elem"] = loc_loss_elem
metrics["loss"]["cls_pos_rt"] = float(
cls_pos_loss.detach().cpu().numpy())
metrics["loss"]["cls_neg_rt"] = float(
cls_neg_loss.detach().cpu().numpy())
if model_cfg.use_direction_classifier:
dir_loss_reduced = ret_dict["dir_loss_reduced"].mean()
metrics["loss"]["dir_rt"] = float(
dir_loss_reduced.detach().cpu().numpy())
metrics["misc"] = {
"num_vox": int(example_torch["voxels"].shape[0]),
"num_pos": int(num_pos),
"num_neg": int(num_neg),
"num_anchors": int(num_anchors),
"lr": float(amp_optimizer.lr),
"mem_usage": psutil.virtual_memory().percent,
}
model_logging.log_metrics(metrics, global_step)
if global_step % steps_per_eval == 0:
torchplus.train.save_models(model_dir, [net, amp_optimizer],
net.get_global_step())
net.eval()
result_path_step = result_path / f"step_{net.get_global_step()}"
result_path_step.mkdir(parents=True, exist_ok=True)
model_logging.log_text("#################################",
global_step)
model_logging.log_text("# EVAL", global_step)
model_logging.log_text("#################################",
global_step)
model_logging.log_text("Generate output labels...", global_step)
t = time.time()
detections = []
prog_bar = ProgressBar()
net.clear_timer()
prog_bar.start((len(eval_dataset) + eval_input_cfg.batch_size - 1)
// eval_input_cfg.batch_size)
for example in iter(eval_dataloader):
example = example_convert_to_torch(example, float_dtype)
detections += net(example)
prog_bar.print_bar()
sec_per_ex = len(eval_dataset) / (time.time() - t)
model_logging.log_text(
f'generate label finished({sec_per_ex:.2f}/s). start eval:',
global_step)
result_dict = eval_dataset.dataset.evaluation(
detections, str(result_path_step))
for k, v in result_dict["results"].items():
model_logging.log_text("Evaluation {}".format(k), global_step)
model_logging.log_text(v, global_step)
model_logging.log_metrics(result_dict["detail"], global_step)
with open(result_path_step / "result.pkl", 'wb') as f:
pickle.dump(detections, f)
net.train()
step += 1
if step >= total_step:
break
if step >= total_step:
break
except Exception as e:
print(json.dumps(example["metadata"], indent=2))
model_logging.log_text(str(e), step)
model_logging.log_text(json.dumps(example["metadata"], indent=2), step)
torchplus.train.save_models(model_dir, [net, amp_optimizer],
step)
raise e
finally:
model_logging.close()
torchplus.train.save_models(model_dir, [net, amp_optimizer],
net.get_global_step())
def evaluate(config_path,
model_dir=None,
result_path=None,
ckpt_path=None,
measure_time=False,
batch_size=None,
**kwargs):
"""Don't support pickle_result anymore. if you want to generate kitti label file,
please use kitti_anno_to_label_file and convert_detection_to_kitti_annos
in second.data.kitti_dataset.
"""
assert len(kwargs) == 0
model_dir = str(Path(model_dir).resolve())
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
result_name = 'eval_results'
if result_path is None:
model_dir = Path(model_dir)
result_path = model_dir / result_name
else:
result_path = Path(result_path)
if isinstance(config_path, str):
# directly provide a config object. this usually used
# when you want to eval with several different parameters in
# one script.
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
else:
config = config_path
input_cfg = config.eval_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
net = build_network(model_cfg, measure_time=measure_time).to(device)
if train_cfg.enable_mixed_precision:
net.half()
print("half inference!")
net.metrics_to_float()
net.convert_norm_to_float(net)
target_assigner = net.target_assigner
voxel_generator = net.voxel_generator
if ckpt_path is None:
assert model_dir is not None
torchplus.train.try_restore_latest_checkpoints(model_dir, [net])
else:
torchplus.train.restore(ckpt_path, net)
batch_size = batch_size or input_cfg.batch_size
eval_dataset = input_reader_builder.build(
input_cfg,
model_cfg,
training=False,
voxel_generator=voxel_generator,
target_assigner=target_assigner)
eval_dataloader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=input_cfg.preprocess.num_workers,
pin_memory=False,
collate_fn=merge_second_batch)
if train_cfg.enable_mixed_precision:
float_dtype = torch.float16
else:
float_dtype = torch.float32
net.eval()
result_path_step = result_path / f"step_{net.get_global_step()}"
result_path_step.mkdir(parents=True, exist_ok=True)
t = time.time()
detections = []
print("Generate output labels...")
bar = ProgressBar()
bar.start((len(eval_dataset) + batch_size - 1) // batch_size)
prep_example_times = []
prep_times = []
t2 = time.time()
for example in iter(eval_dataloader):
if measure_time:
prep_times.append(time.time() - t2)
torch.cuda.synchronize()
t1 = time.time()
example = example_convert_to_torch(example, float_dtype)
if measure_time:
torch.cuda.synchronize()
prep_example_times.append(time.time() - t1)
with torch.no_grad():
detections += net(example)
bar.print_bar()
if measure_time:
t2 = time.time()
sec_per_example = len(eval_dataset) / (time.time() - t)
print(f'generate label finished({sec_per_example:.2f}/s). start eval:')
if measure_time:
print(
f"avg example to torch time: {np.mean(prep_example_times) * 1000:.3f} ms"
)
print(f"avg prep time: {np.mean(prep_times) * 1000:.3f} ms")
for name, val in net.get_avg_time_dict().items():
print(f"avg {name} time = {val * 1000:.3f} ms")
with open(result_path_step / "result.pkl", 'wb') as f:
pickle.dump(detections, f)
result_dict = eval_dataset.dataset.evaluation(detections,
str(result_path_step))
if result_dict is not None:
for k, v in result_dict["results"].items():
print("Evaluation {}".format(k))
print(v)
def helper_tune_target_assigner(config_path, target_rate=None, update_freq=200, update_delta=0.01, num_tune_epoch=5):
"""get information of target assign to tune thresholds in anchor generator.
"""
if isinstance(config_path, str):
# directly provide a config object. this usually used
# when you want to train with several different parameters in
# one script.
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
else:
config = config_path
proto_str = text_format.MessageToString(config, indent=2)
input_cfg = config.train_input_reader
eval_input_cfg = config.eval_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
net = build_network(model_cfg, False, KL)
# if train_cfg.enable_mixed_precision:
# net.half()
# net.metrics_to_float()
# net.convert_norm_to_float(net)
target_assigner = net.target_assigner
voxel_generator = net.voxel_generator
dataset = input_reader_builder.build(
input_cfg,
model_cfg,
training=True,
voxel_generator=voxel_generator,
target_assigner=target_assigner,
multi_gpu=False)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=1,
shuffle=False,
num_workers=0,
pin_memory=False,
collate_fn=merge_second_batch,
worker_init_fn=_worker_init_fn,
drop_last=False)
class_count = {}
anchor_count = {}
class_count_tune = {}
anchor_count_tune = {}
for c in target_assigner.classes:
class_count[c] = 0
anchor_count[c] = 0
class_count_tune[c] = 0
anchor_count_tune[c] = 0
step = 0
classes = target_assigner.classes
if target_rate is None:
num_tune_epoch = 0
for epoch in range(num_tune_epoch):
for example in dataloader:
gt_names = example["gt_names"]
for name in gt_names:
class_count_tune[name] += 1
labels = example['labels']
for i in range(1, len(classes) + 1):
anchor_count_tune[classes[i - 1]] += int(np.sum(labels == i))
if target_rate is not None:
for name, rate in target_rate.items():
if class_count_tune[name] > update_freq:
# calc rate
current_rate = anchor_count_tune[name] / class_count_tune[name]
if current_rate > rate:
target_assigner._anchor_generators[classes.index(name)].match_threshold += update_delta
target_assigner._anchor_generators[classes.index(name)].unmatch_threshold += update_delta
else:
target_assigner._anchor_generators[classes.index(name)].match_threshold -= update_delta
target_assigner._anchor_generators[classes.index(name)].unmatch_threshold -= update_delta
anchor_count_tune[name] = 0
class_count_tune[name] = 0
step += 1
for c in target_assigner.classes:
class_count[c] = 0
anchor_count[c] = 0
total_voxel_gene_time = 0
count = 0
for example in dataloader:
gt_names = example["gt_names"]
total_voxel_gene_time += example["metrics"][0]["voxel_gene_time"]
count += 1
for name in gt_names:
class_count[name] += 1
labels = example['labels']
for i in range(1, len(classes) + 1):
anchor_count[classes[i - 1]] += int(np.sum(labels == i))
print("avg voxel gene time", total_voxel_gene_time / count)
print(json.dumps(class_count, indent=2))
print(json.dumps(anchor_count, indent=2))
if target_rate is not None:
for ag in target_assigner._anchor_generators:
if ag.class_name in target_rate:
print(ag.class_name, ag.match_threshold, ag.unmatch_threshold)
def mcnms_parameters_search(config_path,
model_dir,
preds_path):
pass
if __name__ == '__main__':
fire.Fire()
| 39.617778 | 117 | 0.588139 | import copy
import json
import os
from pathlib import Path
import pickle
import shutil
import time
import re
import fire
import numpy as np
import torch
from google.protobuf import text_format
import second.data.kitti_common as kitti
import torchplus
from second.builder import target_assigner_builder, voxel_builder
from second.core import box_np_ops
from second.data.preprocess import merge_second_batch, merge_second_batch_multigpu
from second.protos import pipeline_pb2
from second.pytorch.builder import (box_coder_builder, input_reader_builder,
lr_scheduler_builder, optimizer_builder,
second_builder)
from second.utils.log_tool import SimpleModelLog
from second.utils.progress_bar import ProgressBar
import psutil
def example_convert_to_torch(example, dtype=torch.float32,
device=None) -> dict:
device = device or torch.device("cuda:0")
example_torch = {}
float_names = [
"voxels", "anchors", "reg_targets", "reg_weights", "bev_map", "importance"
]
for k, v in example.items():
if k in float_names:
example_torch[k] = torch.tensor(
v, dtype=torch.float32, device=device).to(dtype)
elif k in ["coordinates", "labels", "num_points"]:
example_torch[k] = torch.tensor(
v, dtype=torch.int32, device=device)
elif k in ["anchors_mask"]:
example_torch[k] = torch.tensor(
v, dtype=torch.uint8, device=device)
elif k == "calib":
calib = {}
for k1, v1 in v.items():
calib[k1] = torch.tensor(
v1, dtype=dtype, device=device).to(dtype)
example_torch[k] = calib
elif k == "num_voxels":
example_torch[k] = torch.tensor(v)
else:
example_torch[k] = v
return example_torch
def build_network(model_cfg, measure_time=False, KL=False):
voxel_generator = voxel_builder.build(model_cfg.voxel_generator)
bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
box_coder = box_coder_builder.build(model_cfg.box_coder)
target_assigner_cfg = model_cfg.target_assigner
target_assigner = target_assigner_builder.build(target_assigner_cfg,
bv_range, box_coder)
box_coder.custom_ndim = target_assigner._anchor_generators[0].custom_ndim
print(KL)
net = second_builder.build(
model_cfg, voxel_generator, target_assigner, measure_time=measure_time, KL = KL )
return net
def _worker_init_fn(worker_id):
time_seed = np.array(time.time(), dtype=np.int32)
np.random.seed(time_seed + worker_id)
print(f"WORKER {worker_id} seed:", np.random.get_state()[1][0])
def freeze_params(params: dict, include: str=None, exclude: str=None):
assert isinstance(params, dict)
include_re = None
if include is not None:
include_re = re.compile(include)
exclude_re = None
if exclude is not None:
exclude_re = re.compile(exclude)
remain_params = []
for k, p in params.items():
if include_re is not None:
if include_re.match(k) is not None:
continue
if exclude_re is not None:
if exclude_re.match(k) is None:
continue
remain_params.append(p)
return remain_params
def freeze_params_v2(params: dict, include: str=None, exclude: str=None):
assert isinstance(params, dict)
include_re = None
if include is not None:
include_re = re.compile(include)
exclude_re = None
if exclude is not None:
exclude_re = re.compile(exclude)
for k, p in params.items():
if include_re is not None:
if include_re.match(k) is not None:
p.requires_grad = False
if exclude_re is not None:
if exclude_re.match(k) is None:
p.requires_grad = False
def filter_param_dict(state_dict: dict, include: str=None, exclude: str=None):
assert isinstance(state_dict, dict)
include_re = None
if include is not None:
include_re = re.compile(include)
exclude_re = None
if exclude is not None:
exclude_re = re.compile(exclude)
res_dict = {}
for k, p in state_dict.items():
if include_re is not None:
if include_re.match(k) is None:
continue
if exclude_re is not None:
if exclude_re.match(k) is not None:
continue
res_dict[k] = p
return res_dict
def train(config_path,
model_dir,
KL = False,
result_path=None,
create_folder=False,
display_step=50,
summary_step=5,
pretrained_path=None,
pretrained_include=None,
pretrained_exclude=None,
freeze_include=None,
freeze_exclude=None,
multi_gpu=False,
measure_time=False,
resume=False):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_dir = str(Path(model_dir).resolve())
if create_folder:
if Path(model_dir).exists():
model_dir = torchplus.train.create_folder(model_dir)
model_dir = Path(model_dir)
if not resume and model_dir.exists():
raise ValueError("model dir exists and you don't specify resume.")
model_dir.mkdir(parents=True, exist_ok=True)
if result_path is None:
result_path = model_dir / 'results'
config_file_bkp = "pipeline.config"
if isinstance(config_path, str):
# directly provide a config object. this usually used
# when you want to train with several different parameters in
# one script.
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
else:
config = config_path
proto_str = text_format.MessageToString(config, indent=2)
with (model_dir / config_file_bkp).open("w") as f:
f.write(proto_str)
input_cfg = config.train_input_reader
eval_input_cfg = config.eval_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
if model_cfg.rpn.module_class_name == "RPN_KL":
KL = True
else:
KL = False
print(KL)
net = build_network(model_cfg, measure_time,KL).to(device)
# if train_cfg.enable_mixed_precision:
# net.half()
# net.metrics_to_float()
# net.convert_norm_to_float(net)
target_assigner = net.target_assigner
voxel_generator = net.voxel_generator
print("num parameters:", len(list(net.parameters())))
torchplus.train.try_restore_latest_checkpoints(model_dir, [net])
if pretrained_path is not None:
model_dict = net.state_dict()
pretrained_dict = torch.load(pretrained_path)
pretrained_dict = filter_param_dict(pretrained_dict, pretrained_include, pretrained_exclude)
new_pretrained_dict = {}
for k, v in pretrained_dict.items():
if k in model_dict and v.shape == model_dict[k].shape:
new_pretrained_dict[k] = v
print("Load pretrained parameters:")
for k, v in new_pretrained_dict.items():
print(k, v.shape)
model_dict.update(new_pretrained_dict)
net.load_state_dict(model_dict)
freeze_params_v2(dict(net.named_parameters()), freeze_include, freeze_exclude)
net.clear_global_step()
net.clear_metrics()
if multi_gpu:
net_parallel = torch.nn.DataParallel(net)
else:
net_parallel = net
optimizer_cfg = train_cfg.optimizer
loss_scale = train_cfg.loss_scale_factor
fastai_optimizer = optimizer_builder.build(
optimizer_cfg,
net,
mixed=False,
loss_scale=loss_scale)
if loss_scale < 0:
loss_scale = "dynamic"
if train_cfg.enable_mixed_precision:
max_num_voxels = input_cfg.preprocess.max_number_of_voxels * input_cfg.batch_size
assert max_num_voxels < 65535, "spconv fp16 training only support this"
from apex import amp
net, amp_optimizer = amp.initialize(net, fastai_optimizer,
opt_level="O2",
keep_batchnorm_fp32=True,
loss_scale=loss_scale
)
net.metrics_to_float()
else:
amp_optimizer = fastai_optimizer
torchplus.train.try_restore_latest_checkpoints(model_dir,
[fastai_optimizer])
lr_scheduler = lr_scheduler_builder.build(optimizer_cfg, amp_optimizer,
train_cfg.steps)
if train_cfg.enable_mixed_precision:
float_dtype = torch.float16
else:
float_dtype = torch.float32
if multi_gpu:
num_gpu = torch.cuda.device_count()
print(f"MULTI-GPU: use {num_gpu} gpu")
collate_fn = merge_second_batch_multigpu
else:
collate_fn = merge_second_batch
num_gpu = 1
######################
# PREPARE INPUT
######################
dataset = input_reader_builder.build(
input_cfg,
model_cfg,
training=True,
voxel_generator=voxel_generator,
target_assigner=target_assigner,
multi_gpu=multi_gpu)
eval_dataset = input_reader_builder.build(
eval_input_cfg,
model_cfg,
training=False,
voxel_generator=voxel_generator,
target_assigner=target_assigner)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=input_cfg.batch_size * num_gpu,
shuffle=True,
num_workers=input_cfg.preprocess.num_workers * num_gpu,
pin_memory=False,
collate_fn=collate_fn,
worker_init_fn=_worker_init_fn,
drop_last=not multi_gpu)
eval_dataloader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=eval_input_cfg.batch_size, # only support multi-gpu train
shuffle=False,
num_workers=eval_input_cfg.preprocess.num_workers,
pin_memory=False,
collate_fn=merge_second_batch)
######################
# TRAINING
######################
model_logging = SimpleModelLog(model_dir)
model_logging.open()
model_logging.log_text(proto_str + "\n", 0, tag="config")
start_step = net.get_global_step()
total_step = train_cfg.steps
t = time.time()
steps_per_eval = train_cfg.steps_per_eval
clear_metrics_every_epoch = train_cfg.clear_metrics_every_epoch
amp_optimizer.zero_grad()
step_times = []
step = start_step
try:
while True:
if clear_metrics_every_epoch:
net.clear_metrics()
for example in dataloader:
lr_scheduler.step(net.get_global_step())
time_metrics = example["metrics"]
example.pop("metrics")
example_torch = example_convert_to_torch(example, float_dtype)
batch_size = example["anchors"].shape[0]
# print("num_points:",max(example_torch['num_points']))
# print("num_voxels:",example_torch['num_voxels'].shape)
# print("anchors:",example_torch['anchors'].shape)
# print("voxels:",example_torch['voxels'].shape)
# print(example_torch['voxels'][0:3])
# print("coordinates:",example_torch['coordinates'].shape)
# exit()
ret_dict = net_parallel(example_torch)
cls_preds = ret_dict["cls_preds"]
loss = ret_dict["loss"].mean()
cls_loss_reduced = ret_dict["cls_loss_reduced"].mean()
loc_loss_reduced = ret_dict["loc_loss_reduced"].mean()
cls_pos_loss = ret_dict["cls_pos_loss"].mean()
cls_neg_loss = ret_dict["cls_neg_loss"].mean()
loc_loss = ret_dict["loc_loss"]
cls_loss = ret_dict["cls_loss"]
cared = ret_dict["cared"]
labels = example_torch["labels"]
if train_cfg.enable_mixed_precision:
with amp.scale_loss(loss, amp_optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(net.parameters(), 10.0)
amp_optimizer.step()
amp_optimizer.zero_grad()
net.update_global_step()
net_metrics = net.update_metrics(cls_loss_reduced,
loc_loss_reduced, cls_preds,
labels, cared)
step_time = (time.time() - t)
step_times.append(step_time)
t = time.time()
metrics = {}
num_pos = int((labels > 0)[0].float().sum().cpu().numpy())
num_neg = int((labels == 0)[0].float().sum().cpu().numpy())
if 'anchors_mask' not in example_torch:
num_anchors = example_torch['anchors'].shape[1]
else:
num_anchors = int(example_torch['anchors_mask'][0].sum())
global_step = net.get_global_step()
if global_step % display_step == 0:
if measure_time:
for name, val in net.get_avg_time_dict().items():
print(f"avg {name} time = {val * 1000:.3f} ms")
loc_loss_elem = [
float(loc_loss[:, :, i].sum().detach().cpu().numpy() /
batch_size) for i in range(loc_loss.shape[-1])
]
metrics["runtime"] = {
"step": global_step,
"steptime": np.mean(step_times),
}
metrics["runtime"].update(time_metrics[0])
step_times = []
metrics.update(net_metrics)
metrics["loss"]["loc_elem"] = loc_loss_elem
metrics["loss"]["cls_pos_rt"] = float(
cls_pos_loss.detach().cpu().numpy())
metrics["loss"]["cls_neg_rt"] = float(
cls_neg_loss.detach().cpu().numpy())
if model_cfg.use_direction_classifier:
dir_loss_reduced = ret_dict["dir_loss_reduced"].mean()
metrics["loss"]["dir_rt"] = float(
dir_loss_reduced.detach().cpu().numpy())
metrics["misc"] = {
"num_vox": int(example_torch["voxels"].shape[0]),
"num_pos": int(num_pos),
"num_neg": int(num_neg),
"num_anchors": int(num_anchors),
"lr": float(amp_optimizer.lr),
"mem_usage": psutil.virtual_memory().percent,
}
model_logging.log_metrics(metrics, global_step)
if global_step % steps_per_eval == 0:
torchplus.train.save_models(model_dir, [net, amp_optimizer],
net.get_global_step())
net.eval()
result_path_step = result_path / f"step_{net.get_global_step()}"
result_path_step.mkdir(parents=True, exist_ok=True)
model_logging.log_text("#################################",
global_step)
model_logging.log_text("# EVAL", global_step)
model_logging.log_text("#################################",
global_step)
model_logging.log_text("Generate output labels...", global_step)
t = time.time()
detections = []
prog_bar = ProgressBar()
net.clear_timer()
prog_bar.start((len(eval_dataset) + eval_input_cfg.batch_size - 1)
// eval_input_cfg.batch_size)
for example in iter(eval_dataloader):
example = example_convert_to_torch(example, float_dtype)
detections += net(example)
prog_bar.print_bar()
sec_per_ex = len(eval_dataset) / (time.time() - t)
model_logging.log_text(
f'generate label finished({sec_per_ex:.2f}/s). start eval:',
global_step)
result_dict = eval_dataset.dataset.evaluation(
detections, str(result_path_step))
for k, v in result_dict["results"].items():
model_logging.log_text("Evaluation {}".format(k), global_step)
model_logging.log_text(v, global_step)
model_logging.log_metrics(result_dict["detail"], global_step)
with open(result_path_step / "result.pkl", 'wb') as f:
pickle.dump(detections, f)
net.train()
step += 1
if step >= total_step:
break
if step >= total_step:
break
except Exception as e:
print(json.dumps(example["metadata"], indent=2))
model_logging.log_text(str(e), step)
model_logging.log_text(json.dumps(example["metadata"], indent=2), step)
torchplus.train.save_models(model_dir, [net, amp_optimizer],
step)
raise e
finally:
model_logging.close()
torchplus.train.save_models(model_dir, [net, amp_optimizer],
net.get_global_step())
def evaluate(config_path,
model_dir=None,
result_path=None,
ckpt_path=None,
measure_time=False,
batch_size=None,
**kwargs):
assert len(kwargs) == 0
model_dir = str(Path(model_dir).resolve())
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
result_name = 'eval_results'
if result_path is None:
model_dir = Path(model_dir)
result_path = model_dir / result_name
else:
result_path = Path(result_path)
if isinstance(config_path, str):
# directly provide a config object. this usually used
# when you want to eval with several different parameters in
# one script.
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
else:
config = config_path
input_cfg = config.eval_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
net = build_network(model_cfg, measure_time=measure_time).to(device)
if train_cfg.enable_mixed_precision:
net.half()
print("half inference!")
net.metrics_to_float()
net.convert_norm_to_float(net)
target_assigner = net.target_assigner
voxel_generator = net.voxel_generator
if ckpt_path is None:
assert model_dir is not None
torchplus.train.try_restore_latest_checkpoints(model_dir, [net])
else:
torchplus.train.restore(ckpt_path, net)
batch_size = batch_size or input_cfg.batch_size
eval_dataset = input_reader_builder.build(
input_cfg,
model_cfg,
training=False,
voxel_generator=voxel_generator,
target_assigner=target_assigner)
eval_dataloader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=input_cfg.preprocess.num_workers,
pin_memory=False,
collate_fn=merge_second_batch)
if train_cfg.enable_mixed_precision:
float_dtype = torch.float16
else:
float_dtype = torch.float32
net.eval()
result_path_step = result_path / f"step_{net.get_global_step()}"
result_path_step.mkdir(parents=True, exist_ok=True)
t = time.time()
detections = []
print("Generate output labels...")
bar = ProgressBar()
bar.start((len(eval_dataset) + batch_size - 1) // batch_size)
prep_example_times = []
prep_times = []
t2 = time.time()
for example in iter(eval_dataloader):
if measure_time:
prep_times.append(time.time() - t2)
torch.cuda.synchronize()
t1 = time.time()
example = example_convert_to_torch(example, float_dtype)
if measure_time:
torch.cuda.synchronize()
prep_example_times.append(time.time() - t1)
with torch.no_grad():
detections += net(example)
bar.print_bar()
if measure_time:
t2 = time.time()
sec_per_example = len(eval_dataset) / (time.time() - t)
print(f'generate label finished({sec_per_example:.2f}/s). start eval:')
if measure_time:
print(
f"avg example to torch time: {np.mean(prep_example_times) * 1000:.3f} ms"
)
print(f"avg prep time: {np.mean(prep_times) * 1000:.3f} ms")
for name, val in net.get_avg_time_dict().items():
print(f"avg {name} time = {val * 1000:.3f} ms")
with open(result_path_step / "result.pkl", 'wb') as f:
pickle.dump(detections, f)
result_dict = eval_dataset.dataset.evaluation(detections,
str(result_path_step))
if result_dict is not None:
for k, v in result_dict["results"].items():
print("Evaluation {}".format(k))
print(v)
def helper_tune_target_assigner(config_path, target_rate=None, update_freq=200, update_delta=0.01, num_tune_epoch=5):
if isinstance(config_path, str):
# directly provide a config object. this usually used
# when you want to train with several different parameters in
# one script.
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
else:
config = config_path
proto_str = text_format.MessageToString(config, indent=2)
input_cfg = config.train_input_reader
eval_input_cfg = config.eval_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
net = build_network(model_cfg, False, KL)
# if train_cfg.enable_mixed_precision:
# net.half()
# net.metrics_to_float()
# net.convert_norm_to_float(net)
target_assigner = net.target_assigner
voxel_generator = net.voxel_generator
dataset = input_reader_builder.build(
input_cfg,
model_cfg,
training=True,
voxel_generator=voxel_generator,
target_assigner=target_assigner,
multi_gpu=False)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=1,
shuffle=False,
num_workers=0,
pin_memory=False,
collate_fn=merge_second_batch,
worker_init_fn=_worker_init_fn,
drop_last=False)
class_count = {}
anchor_count = {}
class_count_tune = {}
anchor_count_tune = {}
for c in target_assigner.classes:
class_count[c] = 0
anchor_count[c] = 0
class_count_tune[c] = 0
anchor_count_tune[c] = 0
step = 0
classes = target_assigner.classes
if target_rate is None:
num_tune_epoch = 0
for epoch in range(num_tune_epoch):
for example in dataloader:
gt_names = example["gt_names"]
for name in gt_names:
class_count_tune[name] += 1
labels = example['labels']
for i in range(1, len(classes) + 1):
anchor_count_tune[classes[i - 1]] += int(np.sum(labels == i))
if target_rate is not None:
for name, rate in target_rate.items():
if class_count_tune[name] > update_freq:
# calc rate
current_rate = anchor_count_tune[name] / class_count_tune[name]
if current_rate > rate:
target_assigner._anchor_generators[classes.index(name)].match_threshold += update_delta
target_assigner._anchor_generators[classes.index(name)].unmatch_threshold += update_delta
else:
target_assigner._anchor_generators[classes.index(name)].match_threshold -= update_delta
target_assigner._anchor_generators[classes.index(name)].unmatch_threshold -= update_delta
anchor_count_tune[name] = 0
class_count_tune[name] = 0
step += 1
for c in target_assigner.classes:
class_count[c] = 0
anchor_count[c] = 0
total_voxel_gene_time = 0
count = 0
for example in dataloader:
gt_names = example["gt_names"]
total_voxel_gene_time += example["metrics"][0]["voxel_gene_time"]
count += 1
for name in gt_names:
class_count[name] += 1
labels = example['labels']
for i in range(1, len(classes) + 1):
anchor_count[classes[i - 1]] += int(np.sum(labels == i))
print("avg voxel gene time", total_voxel_gene_time / count)
print(json.dumps(class_count, indent=2))
print(json.dumps(anchor_count, indent=2))
if target_rate is not None:
for ag in target_assigner._anchor_generators:
if ag.class_name in target_rate:
print(ag.class_name, ag.match_threshold, ag.unmatch_threshold)
def mcnms_parameters_search(config_path,
model_dir,
preds_path):
pass
if __name__ == '__main__':
fire.Fire()
| true | true |
f7354be59a3c929871a00a722d94d52ec6fe73a8 | 4,652 | py | Python | python-Riemann-online.py | KyunghoWon-GIST/PyRiemann-with-OpenViBE | 2a070fdadb040ce6edad81aef497d054ddd70130 | [
"MIT"
] | null | null | null | python-Riemann-online.py | KyunghoWon-GIST/PyRiemann-with-OpenViBE | 2a070fdadb040ce6edad81aef497d054ddd70130 | [
"MIT"
] | null | null | null | python-Riemann-online.py | KyunghoWon-GIST/PyRiemann-with-OpenViBE | 2a070fdadb040ce6edad81aef497d054ddd70130 | [
"MIT"
] | 1 | 2022-03-24T23:32:52.000Z | 2022-03-24T23:32:52.000Z | import pickle
import numpy as np
import pyriemann
import sklearn
import scipy
import matplotlib as mpl
mpl.use('Qt5Agg') # for using pyplot (pip install pyqt5)
import matplotlib.pyplot as plt
from scipy import signal
from scipy.signal import butter, filtfilt, sosfiltfilt
# Pyriemann with OV Python scripting plugin --------------------------------------------------- written by Kyungho Won
#
# Step
# 1. Loads covariance matrices estimated using calibration EEG at the beginning and fits MDM (__init__)
# 2. During test scenario, python scripting module receives the segmented EEG from OpenViBE every epoch (input: signal)
# 3. In Python scripting plugin, the segmented EEG is band-pass filtered and transformed to a covariance matrix
# 4. The Fitted MDM predicts the current label with the covariance matrix
# 5. Python scripting plugin sends stimulution (predicted labels) as an output (output: stimulation)
# 6. Ohter external modules could be added
def butter_bandpass_filter(data, lowcut, highcut, fs, order):
nyq = fs/2
low = lowcut/nyq
high = highcut/nyq
sos = butter(order, [low, high], btype='band', output='sos')
# demean before filtering
meandat = np.mean(data, axis=1)
data = data - meandat[:, np.newaxis]
y = sosfiltfilt(sos, data) # zero-phase filter # data: [ch x time]
# specify pandlen to make the result the same as Matlab filtfilt()
return y
def draw_feedback(nth, nClass):
labels_arr = ['LEFT','RIGHT','UP','DOWN']
mpl.rcParams['toolbar'] = 'None' # Remove tool bar (upper bar)
plt.clf()
plt.plot(0,0)
ax = plt.gca()
ax.set_facecolor('black')
plt.xlim([-10, 10])
plt.ylim([-10, 10])
plt.axis('off')
plt.title('%02d Predicted: %s' %(nth, labels_arr[int(nClass)-1]))
if nClass == 1: # left
plt.arrow(0,0, -4, 0, width=1)
elif nClass == 2: # right
plt.arrow(0,0, 4, 0, width=1)
elif nClass == 3: # up
plt.arrow(0,0, 0, 4, width=1)
elif nClass == 4: # down
plt.arrow(0,0, 0, -4, width=1)
class MyOVBox(OVBox):
def __init__(self):
OVBox.__init__(self)
self.signalHeader = None
self.nth_trial = 0
def initialize(self):
# Append to the box output a stimulation header.
self.output[0].append(OVStimulationHeader(0., 0.))
# Load covariance matrices estimated from the calibrated EEG
load_file = open(self.setting['Trained model path'], 'rb')
trained = pickle.load(load_file)
self.mdm = pyriemann.classification.MDM()
self.mdm.metric = 'Riemann'
self.mdm.fit(trained['COV'], trained['Labels'])
print('Training accuracy is', np.sum(self.mdm.predict(trained['COV'])==trained['Labels'])/len(trained['Labels']))
print('== Trained COV:', trained['COV'].shape)
print('==', self.mdm)
print('\n\n')
# User defined parameters
self.lowbp = int(self.setting['low bp'])
self.highbp = int(self.setting['high bp'])
self.filterorder = int(self.setting['filter order'])
self.sampling = int(self.setting['sampling rate'])
self.isfeedback = self.setting['Feedback']
self.ans_mi = [769, 770, 780, 774] # left right up down
plt.ion()
def process(self):
for chunkIdx in range( len(self.input[0]) ):
# borrowed from python-signal-average.py
if(type(self.input[0][chunkIdx]) == OVSignalHeader): # called only once
self.signalHeader = self.input[0].pop()
elif(type(self.input[0][chunkIdx]) == OVSignalBuffer): # called every epoch
chunk = self.input[0].pop()
numpyBuffer = np.array(chunk, dtype=np.float64).reshape(tuple(self.signalHeader.dimensionSizes))
# numpyBuffer has [ch x time]
numpyBuffer = butter_bandpass_filter(numpyBuffer, self.lowbp, self.highbp, self.sampling, self.filterorder)
# Pyriemann only accpets 3D inputs with [nMatrices, nCh, nTime]
cur_input = np.expand_dims(numpyBuffer, axis=0) # now (1, nCh, nTime)
COV_cur = pyriemann.estimation.Covariances().fit_transform(cur_input)
predict_class = self.mdm.predict(COV_cur) # among [1, 2, 3, 4]
print(predict_class)
# send stimulation (classified results)
stimSet = OVStimulationSet(self.getCurrentTime(), self.getCurrentTime()+1./self.getClock())
stimSet.append(OVStimulation(self.ans_mi[int(predict_class)-1], self.getCurrentTime(), 0.))
self.output[0].append(stimSet)
self.nth_trial = self.nth_trial + 1
if self.isfeedback == 'True':
draw_feedback(self.nth_trial, predict_class)
def uninitialize(self):
end = self.getCurrentTime()
self.output[0].append(OVStimulationEnd(end,end))
print('uninitialize')
plt.ioff()
plt.close()
box = MyOVBox() # When it ends (the last call)
| 37.516129 | 120 | 0.681212 | import pickle
import numpy as np
import pyriemann
import sklearn
import scipy
import matplotlib as mpl
mpl.use('Qt5Agg')
import matplotlib.pyplot as plt
from scipy import signal
from scipy.signal import butter, filtfilt, sosfiltfilt
def butter_bandpass_filter(data, lowcut, highcut, fs, order):
nyq = fs/2
low = lowcut/nyq
high = highcut/nyq
sos = butter(order, [low, high], btype='band', output='sos')
meandat = np.mean(data, axis=1)
data = data - meandat[:, np.newaxis]
y = sosfiltfilt(sos, data) draw_feedback(nth, nClass):
labels_arr = ['LEFT','RIGHT','UP','DOWN']
mpl.rcParams['toolbar'] = 'None'
plt.clf()
plt.plot(0,0)
ax = plt.gca()
ax.set_facecolor('black')
plt.xlim([-10, 10])
plt.ylim([-10, 10])
plt.axis('off')
plt.title('%02d Predicted: %s' %(nth, labels_arr[int(nClass)-1]))
if nClass == 1:
plt.arrow(0,0, -4, 0, width=1)
elif nClass == 2:
plt.arrow(0,0, 4, 0, width=1)
elif nClass == 3:
plt.arrow(0,0, 0, 4, width=1)
elif nClass == 4:
plt.arrow(0,0, 0, -4, width=1)
class MyOVBox(OVBox):
def __init__(self):
OVBox.__init__(self)
self.signalHeader = None
self.nth_trial = 0
def initialize(self):
self.output[0].append(OVStimulationHeader(0., 0.))
load_file = open(self.setting['Trained model path'], 'rb')
trained = pickle.load(load_file)
self.mdm = pyriemann.classification.MDM()
self.mdm.metric = 'Riemann'
self.mdm.fit(trained['COV'], trained['Labels'])
print('Training accuracy is', np.sum(self.mdm.predict(trained['COV'])==trained['Labels'])/len(trained['Labels']))
print('== Trained COV:', trained['COV'].shape)
print('==', self.mdm)
print('\n\n')
self.lowbp = int(self.setting['low bp'])
self.highbp = int(self.setting['high bp'])
self.filterorder = int(self.setting['filter order'])
self.sampling = int(self.setting['sampling rate'])
self.isfeedback = self.setting['Feedback']
self.ans_mi = [769, 770, 780, 774]
plt.ion()
def process(self):
for chunkIdx in range( len(self.input[0]) ):
if(type(self.input[0][chunkIdx]) == OVSignalHeader):
self.signalHeader = self.input[0].pop()
elif(type(self.input[0][chunkIdx]) == OVSignalBuffer):
chunk = self.input[0].pop()
numpyBuffer = np.array(chunk, dtype=np.float64).reshape(tuple(self.signalHeader.dimensionSizes))
numpyBuffer = butter_bandpass_filter(numpyBuffer, self.lowbp, self.highbp, self.sampling, self.filterorder)
cur_input = np.expand_dims(numpyBuffer, axis=0)
COV_cur = pyriemann.estimation.Covariances().fit_transform(cur_input)
predict_class = self.mdm.predict(COV_cur)
print(predict_class)
stimSet = OVStimulationSet(self.getCurrentTime(), self.getCurrentTime()+1./self.getClock())
stimSet.append(OVStimulation(self.ans_mi[int(predict_class)-1], self.getCurrentTime(), 0.))
self.output[0].append(stimSet)
self.nth_trial = self.nth_trial + 1
if self.isfeedback == 'True':
draw_feedback(self.nth_trial, predict_class)
def uninitialize(self):
end = self.getCurrentTime()
self.output[0].append(OVStimulationEnd(end,end))
print('uninitialize')
plt.ioff()
plt.close()
box = MyOVBox()
| true | true |
f7354bf17177900c99c3c4dd85040eab66621832 | 9,815 | py | Python | pysim/old/samplerate_retry.py | pavpanchekha/bitrate-lab | f9f804ad08bb544a90d5191d3db3f78398e1f51a | [
"MIT"
] | 5 | 2015-03-22T20:45:05.000Z | 2022-02-13T15:41:41.000Z | pysim/old/samplerate_retry.py | pavpanchekha/bitrate-lab | f9f804ad08bb544a90d5191d3db3f78398e1f51a | [
"MIT"
] | null | null | null | pysim/old/samplerate_retry.py | pavpanchekha/bitrate-lab | f9f804ad08bb544a90d5191d3db3f78398e1f51a | [
"MIT"
] | 4 | 2017-10-16T21:45:47.000Z | 2019-11-19T12:41:33.000Z | # Colleen Josephson, 2013
# This file attempts to implement the SampleRate bit rate selection algorithm
# as outlined in the JBicket MS Thesis.
from __future__ import division
from random import choice
from rates import ieee80211_to_idx
import rates
npkts = 0 #number of packets sent over link
nsuccess = 0 #number of packets sent successfully
NBYTES = 1500 #constant
currRate = 54 #current best bitRate
NRETRIES = 1
# The average back-off period, in microseconds, for up to 8 attempts of a 802.11b unicast packet.
# TODO: find g data
backoff = {0:0, 1:155, 2:315, 3:635, 4:1275, 5:2555, 6:5115, 7:5115, 8:5115, 9:5115,
10:5115, 11:5115, 12:5115, 13:5115, 14:5115, 15:5115, 16:5115, 17:5115,
18:5115, 19:5115, 20:5115}
def bitrate_type(bitrate):
return rates.RATES[ieee80211_to_idx(bitrate)].phy
#"To calculate the transmission time of a n-byte unicast packet given the bit-rate b and
# number of retries r, SampleRate uses the following equation based on the 802.11 unicast
# retransmission mechanism detailed in Section 2.2"
#
# tx_time(b, r, n) = difs + backoff[r] + (r + 1)*(sifs + ack + header + (n * 8/b))
def tx_time(bitrate, retries, nbytes):
# bitrate in MBPS, since 1*10^6 bps / 10-6 seconds/microseconds = 1 bit per microsecond
global currRate, npkts, nsuccess, NBYTES
brtype = bitrate_type(bitrate)
if bitrate == 1:
difs = 50
sifs = 10
ack = 304
header = 192
elif brtype == "ds" or brtype == "dsss":
difs = 50
sifs = 10
ack = 304
header = 96
elif brtype == "ofdm":
difs = 28
sifs = 9
ack = 304 # Somehow 6mb acks aren't used
header = 20
else:
raise ValueError("Unknown bitrate type", brtype, bitrate)
return difs + backoff[retries] + (retries+1)*(sifs + ack + header + (nbytes * 8/(bitrate)))
class Packet:
def __init__(self, time_sent, success, txTime, rate):
self.time_sent = time_sent
self.success = success
self.txTime = txTime
self.rate = rate
def __repr__(self):
return ("Pkt sent at time %r, rate %r was successful: %r\n"
% (self.time_sent, self.rate, self.success))
class Rate:
def __init__(self, rate):
self.rate = rate #in mbps
self.success = 0
self.tries = 0
self.pktAcked = 0
self.succFails = 0
self.totalTX = 0
self.avgTX = float("inf")
#pktsize/channelrate. pktsize = 1500 bytes
self.losslessTX = tx_time(rate, 0, 1500) #microseconds
self.window = [] #packets rcvd in last 10s
def __repr__(self):
return ("Bitrate %r mbps: \n"
" tries: %r \n"
" pktsAcked: %r \n"
" succFails: %r \n"
" totalTX: %r microseconds \n"
" avgTx: %r microseconds \n"
" losslessTX: %r microseconds"
% (self.rate, self.tries, self.pktAcked, self.succFails,
self.totalTX, self.avgTX, self.losslessTX))
# The modulation scheme used in 802.11g is orthogonal frequency-division multiplexing (OFDM)
# copied from 802.11a with data rates of 6, 9, 12, 18, 24, 36, 48, and 54 Mbit/s, and reverts
# to CCK (like the 802.11b standard) for 5.5 and 11 Mbit/s and DBPSK/DQPSK+DSSS for 1 and 2 Mbit/s.
# Even though 802.11g operates in the same frequency band as 802.11b, it can achieve higher
# data rates because of its heritage to 802.11a.
RATES = dict((r, Rate(r)) for r in [1, 2, 5.5, 6, 9, 11, 12, 18, 24, 36, 48, 54])
#multi-rate retry returns an array of (rate, ntries) for the next n packets
def apply_rate(cur_time):
global currRate, npkts, nsuccess, NBYTES, NRETRIES
remove_stale_results(cur_time)
#"Increment the number of packets sent over the link"
npkts += 1
#"If no packets have been successfully acknowledged, return the
# highest bit-rate that has not had 4 successive failures."
if nsuccess == 0:
rrates = [r[1] for r in sorted(RATES.items())]
rrates.reverse()
retry = []
for r in rrates:
if r.succFails < 4:
currRate = r.rate
retry.append((ieee80211_to_idx(currRate), NRETRIES))
return retry
# Every 10 packets, select a random non-failing bit rate w/ better avg tx
#"If the number of packets sent over the link is a multiple of ten,"
if (nsuccess != 0) and (npkts%10 == 0):
#"select a random bit-rate from the bit-rates"
cavgTX = RATES[currRate].avgTX
#" that have not failed four successive times and that
#have a minimum packet transmission time lower than the
#current bit-rate's average transmission time."
eligible = [r for i, r in RATES.items()
if r.losslessTX < cavgTX and r.succFails < 4]
if len(eligible) > 0:
sampleRate = choice(eligible).rate #select random rate from eligible
return [(ieee80211_to_idx(sampleRate), NRETRIES)]
#"Otherwise, send packet at the bit-rate that has the lowest avg transmission time"
# Trusts that currRate is properly maintained to be lowest avgTX
return [(ieee80211_to_idx(currRate), NRETRIES)]
#"When process f eedback() runs, it updates information that tracks
# the number of samples and recalculates the average transmission
# time for the bit-rate and destination. process_feedback() performs
# the following operations:"
def process_feedback(status, timestamp, delay, tries):
#status: true if packet was rcvd successfully
#timestamp: time pkt was sent
#delay: rtt for entire process (inluding multiple tries) in nanoseconds
#tries: an array of (bitrate, nretries)
global currRate, npkts, nsuccess, NBYTES
(bitrate, nretries) = tries[0]
nretries -= 1
bitrate = rates.RATES[bitrate].mbps
#"Calculate the transmission time for the packet based on the
# bit-rate and number of retries using Equation 5.1 below."
tx = tx_time(bitrate, nretries, NBYTES)
#"Look up the destination and add the transmission time to the
# total transmission times for the bit-rate."
br = RATES[bitrate]
if not status:
br.succFails += 1
#"If the packet failed, increment the number of successive
# failures for the bit-rate.
else:
#"Otherwise reset it."
br.succFails = 0
#"If the packet succeeded, increment the number of successful
# packets sent at that bit-rate.
br.success += 1
nsuccess += 1
#"Re-calculate the average transmission time for the bit-rate
# based on the sum of trans- mission times and the number of
# successful packets sent at that bit-rate."
br.totalTX += tx
if br.success == 0:
br.avgTX = float("inf")
else:
br.avgTX = br.totalTX/br.success
#"Set the current-bit rate for the destination to the one with the
# minimum average transmission time."
calculateMin()
#"Append the current time, packet status, transmission time, and
# bit-rate to the list of transmission results."
p = Packet(timestamp, status, tx, bitrate)
br.window.append(p)
#"SampleRate's remove stale results() function removes results from
# the transmission results queue that were obtained longer than ten
# seconds ago."
def remove_stale_results(cur_time):
window_cutoff = cur_time - 1e10 #window size of 10s
for r in RATES.values():
for p in r.window:
#"For each stale transmission result, it does the following"
if p.time_sent < window_cutoff:
#"Remove the transmission time from the total transmission times
# at that bit-rate to that destination."
r.window.remove(p)
r.totalTX -= p.txTime
#"If the packet succeeded, decrement the number of
# successful packets at that bit-rate to that
# destination."
if p.success:
r.success -= 1
#"After remove stale results() performs these operations for
#each stale sample, it recalculates the minimum average
#transmission times for each bit-rate and destination.
if r.success == 0:
r.avgTX = float("inf")
else:
r.avgTX = r.totalTX/r.success
for r in RATES.values():
succFails = 0
maxSuccFails = 0
for p in r.window:
if p.success:
if succFails > maxSuccFails:
maxSuccFails = succFails
succFails = 0
else:
succFails += 1
if succFails > maxSuccFails:
maxSuccFails = succFails
r.succFails = maxSuccFails
#"remove_stale_results() then sets the current bit-rate for each
# destination to the one with the smallest average trans- mission
# time."
calculateMin()
def calculateMin():
global currRate, npkts, nsuccess, NBYTES
#set current rate to the one w/ min avg tx time
c = RATES[currRate]
if c.succFails > 4:
c.avgTX = float("inf")
#c = rates[1]
for i, r in sorted(RATES.items(), reverse=True):
#print("------------------------------------------------")
#we've never tried this rate thoroughly before
if r.rate < c.rate and r.avgTX == float("inf") \
and r.succFails == 0 and r.losslessTX < c.avgTX:
#print ("c = %r " % c)
#print ("r = %r " %r)
c = r
break
#print("------------------------------------------------")
if c.avgTX > r.avgTX and r.succFails < 4:
c = r
currRate = c.rate
| 35.433213 | 99 | 0.613449 |
from __future__ import division
from random import choice
from rates import ieee80211_to_idx
import rates
npkts = 0
nsuccess = 0
NBYTES = 1500
currRate = 54
NRETRIES = 1
backoff = {0:0, 1:155, 2:315, 3:635, 4:1275, 5:2555, 6:5115, 7:5115, 8:5115, 9:5115,
10:5115, 11:5115, 12:5115, 13:5115, 14:5115, 15:5115, 16:5115, 17:5115,
18:5115, 19:5115, 20:5115}
def bitrate_type(bitrate):
return rates.RATES[ieee80211_to_idx(bitrate)].phy
# number of retries r, SampleRate uses the following equation based on the 802.11 unicast
# retransmission mechanism detailed in Section 2.2"
def tx_time(bitrate, retries, nbytes):
global currRate, npkts, nsuccess, NBYTES
brtype = bitrate_type(bitrate)
if bitrate == 1:
difs = 50
sifs = 10
ack = 304
header = 192
elif brtype == "ds" or brtype == "dsss":
difs = 50
sifs = 10
ack = 304
header = 96
elif brtype == "ofdm":
difs = 28
sifs = 9
ack = 304
header = 20
else:
raise ValueError("Unknown bitrate type", brtype, bitrate)
return difs + backoff[retries] + (retries+1)*(sifs + ack + header + (nbytes * 8/(bitrate)))
class Packet:
def __init__(self, time_sent, success, txTime, rate):
self.time_sent = time_sent
self.success = success
self.txTime = txTime
self.rate = rate
def __repr__(self):
return ("Pkt sent at time %r, rate %r was successful: %r\n"
% (self.time_sent, self.rate, self.success))
class Rate:
def __init__(self, rate):
self.rate = rate #in mbps
self.success = 0
self.tries = 0
self.pktAcked = 0
self.succFails = 0
self.totalTX = 0
self.avgTX = float("inf")
#pktsize/channelrate. pktsize = 1500 bytes
self.losslessTX = tx_time(rate, 0, 1500) #microseconds
self.window = [] #packets rcvd in last 10s
def __repr__(self):
return ("Bitrate %r mbps: \n"
" tries: %r \n"
" pktsAcked: %r \n"
" succFails: %r \n"
" totalTX: %r microseconds \n"
" avgTx: %r microseconds \n"
" losslessTX: %r microseconds"
% (self.rate, self.tries, self.pktAcked, self.succFails,
self.totalTX, self.avgTX, self.losslessTX))
# The modulation scheme used in 802.11g is orthogonal frequency-division multiplexing (OFDM)
# copied from 802.11a with data rates of 6, 9, 12, 18, 24, 36, 48, and 54 Mbit/s, and reverts
# to CCK (like the 802.11b standard) for 5.5 and 11 Mbit/s and DBPSK/DQPSK+DSSS for 1 and 2 Mbit/s.
# Even though 802.11g operates in the same frequency band as 802.11b, it can achieve higher
# data rates because of its heritage to 802.11a.
RATES = dict((r, Rate(r)) for r in [1, 2, 5.5, 6, 9, 11, 12, 18, 24, 36, 48, 54])
#multi-rate retry returns an array of (rate, ntries) for the next n packets
def apply_rate(cur_time):
global currRate, npkts, nsuccess, NBYTES, NRETRIES
remove_stale_results(cur_time)
#"Increment the number of packets sent over the link"
npkts += 1
#"If no packets have been successfully acknowledged, return the
# highest bit-rate that has not had 4 successive failures."
if nsuccess == 0:
rrates = [r[1] for r in sorted(RATES.items())]
rrates.reverse()
retry = []
for r in rrates:
if r.succFails < 4:
currRate = r.rate
retry.append((ieee80211_to_idx(currRate), NRETRIES))
return retry
# Every 10 packets, select a random non-failing bit rate w/ better avg tx
#"If the number of packets sent over the link is a multiple of ten,"
if (nsuccess != 0) and (npkts%10 == 0):
#"select a random bit-rate from the bit-rates"
cavgTX = RATES[currRate].avgTX
#" that have not failed four successive times and that
#have a minimum packet transmission time lower than the
#current bit-rate's average transmission time."
eligible = [r for i, r in RATES.items()
if r.losslessTX < cavgTX and r.succFails < 4]
if len(eligible) > 0:
sampleRate = choice(eligible).rate
return [(ieee80211_to_idx(sampleRate), NRETRIES)]
return [(ieee80211_to_idx(currRate), NRETRIES)]
# the number of samples and recalculates the average transmission
# time for the bit-rate and destination. process_feedback() performs
# the following operations:"
def process_feedback(status, timestamp, delay, tries):
global currRate, npkts, nsuccess, NBYTES
(bitrate, nretries) = tries[0]
nretries -= 1
bitrate = rates.RATES[bitrate].mbps
# bit-rate and number of retries using Equation 5.1 below."
tx = tx_time(bitrate, nretries, NBYTES)
# total transmission times for the bit-rate."
br = RATES[bitrate]
if not status:
br.succFails += 1
# failures for the bit-rate.
else:
#"Otherwise reset it."
br.succFails = 0
#"If the packet succeeded, increment the number of successful
br.success += 1
nsuccess += 1
# based on the sum of trans- mission times and the number of
# successful packets sent at that bit-rate."
br.totalTX += tx
if br.success == 0:
br.avgTX = float("inf")
else:
br.avgTX = br.totalTX/br.success
# minimum average transmission time."
calculateMin()
# bit-rate to the list of transmission results."
p = Packet(timestamp, status, tx, bitrate)
br.window.append(p)
# the transmission results queue that were obtained longer than ten
# seconds ago."
def remove_stale_results(cur_time):
window_cutoff = cur_time - 1e10 #window size of 10s
for r in RATES.values():
for p in r.window:
#"For each stale transmission result, it does the following"
if p.time_sent < window_cutoff:
#"Remove the transmission time from the total transmission times
# at that bit-rate to that destination."
r.window.remove(p)
r.totalTX -= p.txTime
#"If the packet succeeded, decrement the number of
# successful packets at that bit-rate to that
# destination."
if p.success:
r.success -= 1
#"After remove stale results() performs these operations for
#each stale sample, it recalculates the minimum average
#transmission times for each bit-rate and destination.
if r.success == 0:
r.avgTX = float("inf")
else:
r.avgTX = r.totalTX/r.success
for r in RATES.values():
succFails = 0
maxSuccFails = 0
for p in r.window:
if p.success:
if succFails > maxSuccFails:
maxSuccFails = succFails
succFails = 0
else:
succFails += 1
if succFails > maxSuccFails:
maxSuccFails = succFails
r.succFails = maxSuccFails
#"remove_stale_results() then sets the current bit-rate for each
# destination to the one with the smallest average trans- mission
# time."
calculateMin()
def calculateMin():
global currRate, npkts, nsuccess, NBYTES
#set current rate to the one w/ min avg tx time
c = RATES[currRate]
if c.succFails > 4:
c.avgTX = float("inf")
#c = rates[1]
for i, r in sorted(RATES.items(), reverse=True):
#print("------------------------------------------------")
#we've never tried this rate thoroughly before
if r.rate < c.rate and r.avgTX == float("inf") \
and r.succFails == 0 and r.losslessTX < c.avgTX:
#print ("c = %r " % c)
#print ("r = %r " %r)
c = r
break
#print("------------------------------------------------")
if c.avgTX > r.avgTX and r.succFails < 4:
c = r
currRate = c.rate
| true | true |
f7354c67c9dc8c68e73f2c80fed876c54bc5a839 | 38,009 | py | Python | yolk/cli.py | tastuteche/yolk-py3 | 8944b4f0c78ef91451c796e93f482c9dbd23e316 | [
"BSD-3-Clause"
] | 77 | 2015-01-02T19:59:08.000Z | 2022-02-14T06:59:59.000Z | yolk/cli.py | tastuteche/yolk-py3 | 8944b4f0c78ef91451c796e93f482c9dbd23e316 | [
"BSD-3-Clause"
] | 19 | 2015-01-02T20:01:44.000Z | 2020-07-06T22:54:04.000Z | yolk/cli.py | tastuteche/yolk-py3 | 8944b4f0c78ef91451c796e93f482c9dbd23e316 | [
"BSD-3-Clause"
] | 19 | 2015-01-11T11:08:11.000Z | 2022-01-01T13:13:45.000Z | # pylint: disable-msg=W0613,W0612,W0212,W0511,R0912,C0322,W0704
# W0511 = XXX (my own todo's)
"""
cli.py
======
Desc: Command-line tool for listing Python packages installed by setuptools,
package metadata, package dependencies, and querying The Cheese Shop
(PyPI) for Python package release information such as which installed
packages have updates available.
Author: Rob Cakebread <gentoodev a t gmail.com>
License : BSD (See COPYING)
"""
__docformat__ = 'restructuredtext'
import inspect
import re
import pprint
import os
import sys
import optparse
import pkg_resources
import webbrowser
import logging
import platform
if platform.python_version().startswith('2'):
from xmlrpclib import Fault as XMLRPCFault
from urllib import urlretrieve
from urlparse import urlparse
else:
from xmlrpc.client import Fault as XMLRPCFault
from urllib.request import urlretrieve
from urllib.parse import urlparse
from distutils.sysconfig import get_python_lib
from yolk.metadata import get_metadata
from yolk.yolklib import get_highest_version, Distributions
from yolk.pypi import CheeseShop
from yolk.setuptools_support import get_download_uri, get_pkglist
from yolk.plugins import load_plugins
from yolk.utils import run_command, command_successful
from yolk.__init__ import __version__ as VERSION
class StdOut:
"""
Filter stdout or stderr from specific modules
So far this is just used for pkg_resources
"""
def __init__(self, stream, modulenames):
self.stdout = stream
#Modules to squelch
self.modulenames = modulenames
def __getattr__(self, attribute):
if not self.__dict__.has_key(attribute) or attribute == '__doc__':
return getattr(self.stdout, attribute)
return self.__dict__[attribute]
def flush(self):
"""Bug workaround for Python 3.2+:
Exception AttributeError: 'flush' in <yolk.cli.StdOut object...
"""
pass
def write(self, inline):
"""
Write a line to stdout if it isn't in a blacklist
Try to get the name of the calling module to see if we want
to filter it. If there is no calling module, use current
frame in case there's a traceback before there is any calling module
"""
frame = inspect.currentframe().f_back
if frame:
mod = frame.f_globals.get('__name__')
else:
mod = sys._getframe(0).f_globals.get('__name__')
if not mod in self.modulenames:
self.stdout.write(inline)
def writelines(self, inline):
"""Write multiple lines"""
for line in inline:
self.write(line)
class Yolk(object):
"""
Main class for yolk
"""
def __init__(self):
#PyPI project name with proper case
self.project_name = ""
#PyPI project version
self.version = ""
#List of all versions not hidden on PyPI
self.all_versions = []
self.pkg_spec = []
self.options = None
self.logger = logging.getLogger("yolk")
#Squelch output from setuptools
#Add future offenders to this list.
shut_up = ['distutils.log']
sys.stdout = StdOut(sys.stdout, shut_up)
sys.stderr = StdOut(sys.stderr, shut_up)
self.pypi = None
def get_plugin(self, method):
"""
Return plugin object if CLI option is activated and method exists
@param method: name of plugin's method we're calling
@type method: string
@returns: list of plugins with `method`
"""
all_plugins = []
for entry_point in pkg_resources.iter_entry_points('yolk.plugins'):
plugin_obj = entry_point.load()
plugin = plugin_obj()
plugin.configure(self.options, None)
if plugin.enabled:
if not hasattr(plugin, method):
self.logger.warn("Error: plugin has no method: %s" % method)
plugin = None
else:
all_plugins.append(plugin)
return all_plugins
def set_log_level(self):
"""
Set log level according to command-line options
@returns: logger object
"""
if self.options.debug:
self.logger.setLevel(logging.DEBUG)
elif self.options.quiet:
self.logger.setLevel(logging.ERROR)
else:
self.logger.setLevel(logging.INFO)
self.logger.addHandler(logging.StreamHandler())
return self.logger
def run(self):
"""
Perform actions based on CLI options
@returns: status code
"""
opt_parser = setup_opt_parser()
(self.options, remaining_args) = opt_parser.parse_args()
logger = self.set_log_level()
pkg_spec = validate_pypi_opts(opt_parser)
if not pkg_spec:
pkg_spec = remaining_args
self.pkg_spec = pkg_spec
if not self.options.pypi_search and (len(sys.argv) == 1 or\
len(remaining_args) > 2):
opt_parser.print_help()
return 2
#Options that depend on querying installed packages, not PyPI.
#We find the proper case for package names if they are installed,
#otherwise PyPI returns the correct case.
if self.options.show_deps or self.options.show_all or \
self.options.show_active or self.options.show_non_active or \
(self.options.show_updates and pkg_spec):
want_installed = True
else:
want_installed = False
#show_updates may or may not have a pkg_spec
if not want_installed or self.options.show_updates:
self.pypi = CheeseShop(self.options.debug)
#XXX: We should return 2 here if we couldn't create xmlrpc server
if pkg_spec:
(self.project_name, self.version, self.all_versions) = \
self.parse_pkg_ver(want_installed)
if want_installed and not self.project_name:
logger.error("%s is not installed." % pkg_spec[0])
return 1
#I could prefix all these with 'cmd_' and the methods also
#and then iterate over the `options` dictionary keys...
commands = ['show_deps', 'query_metadata_pypi', 'fetch',
'versions_available', 'show_updates', 'browse_website',
'show_download_links', 'pypi_search', 'show_pypi_changelog',
'show_pypi_releases', 'yolk_version', 'show_all',
'show_active', 'show_non_active', 'show_entry_map',
'show_entry_points']
#Run first command it finds, and only the first command, then return
#XXX: Check if more than one command was set in options and give error?
for action in commands:
if getattr(self.options, action):
return getattr(self, action)()
opt_parser.print_help()
def show_active(self):
"""
Show installed active packages
"""
return self.show_distributions("active")
def show_non_active(self):
"""
Show installed non-active packages
"""
return self.show_distributions("nonactive")
def show_all(self):
"""
Show all installed packages
"""
return self.show_distributions("all")
def show_updates(self):
"""
Check installed packages for available updates on PyPI
@param project_name: optional package name to check; checks every
installed pacakge if none specified
@type project_name: string
@returns: None
"""
dists = Distributions()
if self.project_name:
#Check for a single package
pkg_list = [self.project_name]
else:
#Check for every installed package
pkg_list = get_pkglist()
found = None
for pkg in pkg_list:
for (dist, active) in dists.get_distributions("all", pkg,
dists.get_highest_installed(pkg)):
(project_name, versions) = \
self.pypi.query_versions_pypi(dist.project_name)
if versions:
#PyPI returns them in chronological order,
#but who knows if its guaranteed in the API?
#Make sure we grab the highest version:
newest = get_highest_version(versions)
if newest != dist.version:
#We may have newer than what PyPI knows about
if pkg_resources.parse_version(dist.version) < \
pkg_resources.parse_version(newest):
found = True
print(" %s %s (%s)" % (project_name, dist.version,
newest))
if not found and self.project_name:
self.logger.info("You have the latest version installed.")
elif not found:
self.logger.info("No newer packages found at The Cheese Shop")
return 0
def show_distributions(self, show):
"""
Show list of installed activated OR non-activated packages
@param show: type of pkgs to show (all, active or nonactive)
@type show: string
@returns: None or 2 if error
"""
show_metadata = self.options.metadata
#Search for any plugins with active CLI options with add_column() method
plugins = self.get_plugin("add_column")
#Some locations show false positive for 'development' packages:
ignores = ["/UNIONFS", "/KNOPPIX.IMG"]
#Check if we're in a workingenv
#See http://cheeseshop.python.org/pypi/workingenv.py
workingenv = os.environ.get('WORKING_ENV')
if workingenv:
ignores.append(workingenv)
dists = Distributions()
results = None
for (dist, active) in dists.get_distributions(show, self.project_name,
self.version):
metadata = get_metadata(dist)
for prefix in ignores:
if dist.location.startswith(prefix):
dist.location = dist.location.replace(prefix, "")
#Case-insensitve search because of Windows
if dist.location.lower().startswith(get_python_lib().lower()):
develop = ""
else:
develop = dist.location
if metadata:
add_column_text = ""
for my_plugin in plugins:
#See if package is 'owned' by a package manager such as
#portage, apt, rpm etc.
#add_column_text += my_plugin.add_column(filename) + " "
add_column_text += my_plugin.add_column(dist) + " "
self.print_metadata(metadata, develop, active, add_column_text)
else:
print(str(dist) + " has no metadata")
results = True
if not results and self.project_name:
if self.version:
pkg_spec = "%s==%s" % (self.project_name, self.version)
else:
pkg_spec = "%s" % self.project_name
if show == "all":
self.logger.error("There are no versions of %s installed." \
% pkg_spec)
else:
self.logger.error("There are no %s versions of %s installed." \
% \
(show, pkg_spec))
return 2
elif show == "all" and results and self.options.fields:
print("Versions with '*' are non-active.")
print("Versions with '!' are deployed in development mode.")
def print_metadata(self, metadata, develop, active, installed_by):
"""
Print out formatted metadata
@param metadata: package's metadata
@type metadata: pkg_resources Distribution obj
@param develop: path to pkg if its deployed in development mode
@type develop: string
@param active: show if package is activated or not
@type active: boolean
@param installed_by: Shows if pkg was installed by a package manager other
than setuptools
@type installed_by: string
@returns: None
"""
show_metadata = self.options.metadata
if self.options.fields:
fields = self.options.fields.split(',')
fields = map(str.strip, fields)
else:
fields = []
version = metadata['Version']
#When showing all packages, note which are not active:
if active:
if fields:
active_status = ""
else:
active_status = "active"
else:
if fields:
active_status = "*"
else:
active_status = "non-active"
if develop:
if fields:
development_status = "! (%s)" % develop
else:
development_status = "development (%s)" % develop
else:
development_status = installed_by
status = "%s %s" % (active_status, development_status)
if fields:
print('%s (%s)%s %s' % (metadata['Name'], version, active_status,
development_status))
else:
# Need intelligent justification
print(metadata['Name'].ljust(15) + " - " + version.ljust(12) + \
" - " + status)
if fields:
#Only show specific fields, using case-insensitive search
fields = map(str.lower, fields)
for field in metadata.keys():
if field.lower() in fields:
print(' %s: %s' % (field, metadata[field]))
print()
elif show_metadata:
#Print all available metadata fields
for field in metadata.keys():
if field != 'Name' and field != 'Summary':
print(' %s: %s' % (field, metadata[field]))
def show_deps(self):
"""
Show dependencies for package(s)
@returns: 0 - sucess 1 - No dependency info supplied
"""
pkgs = pkg_resources.Environment()
for pkg in pkgs[self.project_name]:
if not self.version:
print(pkg.project_name, pkg.version)
i = len(pkg._dep_map.values()[0])
if i:
while i:
if not self.version or self.version and \
pkg.version == self.version:
if self.version and i == len(pkg._dep_map.values()[0]):
print(pkg.project_name, pkg.version)
print(" " + str(pkg._dep_map.values()[0][i - 1]))
i -= 1
else:
self.logger.info(\
"No dependency information was supplied with the package.")
return 1
return 0
def show_pypi_changelog(self):
"""
Show detailed PyPI ChangeLog for the last `hours`
@returns: 0 = sucess or 1 if failed to retrieve from XML-RPC server
"""
hours = self.options.show_pypi_changelog
if not hours.isdigit():
self.logger.error("Error: You must supply an integer.")
return 1
try:
changelog = self.pypi.changelog(int(hours))
except XMLRPCFault as err_msg:
self.logger.error(err_msg)
self.logger.error("ERROR: Couldn't retrieve changelog.")
return 1
last_pkg = ''
for entry in changelog:
pkg = entry[0]
if pkg != last_pkg:
print("%s %s\n\t%s" % (entry[0], entry[1], entry[3]))
last_pkg = pkg
else:
print("\t%s" % entry[3])
return 0
def show_pypi_releases(self):
"""
Show PyPI releases for the last number of `hours`
@returns: 0 = success or 1 if failed to retrieve from XML-RPC server
"""
try:
hours = int(self.options.show_pypi_releases)
except ValueError:
self.logger.error("ERROR: You must supply an integer.")
return 1
try:
latest_releases = self.pypi.updated_releases(hours)
except XMLRPCFault as err_msg:
self.logger.error(err_msg)
self.logger.error("ERROR: Couldn't retrieve latest releases.")
return 1
for release in latest_releases:
print("%s %s" % (release[0], release[1]))
return 0
def show_download_links(self):
"""
Query PyPI for pkg download URI for a packge
@returns: 0
"""
#In case they specify version as 'dev' instead of using -T svn,
#don't show three svn URI's
if self.options.file_type == "all" and self.version == "dev":
self.options.file_type = "svn"
if self.options.file_type == "svn":
version = "dev"
else:
if self.version:
version = self.version
else:
version = self.all_versions[0]
if self.options.file_type == "all":
#Search for source, egg, and svn
self.print_download_uri(version, True)
self.print_download_uri(version, False)
self.print_download_uri("dev", True)
else:
if self.options.file_type == "source":
source = True
else:
source = False
self.print_download_uri(version, source)
return 0
def print_download_uri(self, version, source):
"""
@param version: version number or 'dev' for svn
@type version: string
@param source: download source or egg
@type source: boolean
@returns: None
"""
if version == "dev":
pkg_type = "subversion"
source = True
elif source:
pkg_type = "source"
else:
pkg_type = "egg"
#Use setuptools monkey-patch to grab url
url = get_download_uri(self.project_name, version, source,
self.options.pypi_index)
if url:
print("%s" % url)
else:
self.logger.info("No download URL found for %s" % pkg_type)
def fetch(self):
"""
Download a package
@returns: 0 = success or 1 if failed download
"""
#Default type to download
source = True
directory = "."
if self.options.file_type == "svn":
version = "dev"
svn_uri = get_download_uri(self.project_name, \
"dev", True)
if svn_uri:
directory = self.project_name + "_svn"
return self.fetch_svn(svn_uri, directory)
else:
self.logger.error(\
"ERROR: No subversion repository found for %s" % \
self.project_name)
return 1
elif self.options.file_type == "source":
source = True
elif self.options.file_type == "egg":
source = False
uri = get_download_uri(self.project_name, self.version, source)
if uri:
return self.fetch_uri(directory, uri)
else:
self.logger.error("No %s URI found for package: %s " % \
(self.options.file_type, self.project_name))
return 1
def fetch_uri(self, directory, uri):
"""
Use ``urllib.urlretrieve`` to download package to file in sandbox dir.
@param directory: directory to download to
@type directory: string
@param uri: uri to download
@type uri: string
@returns: 0 = success or 1 for failed download
"""
filename = os.path.basename(urlparse(uri)[2])
if os.path.exists(filename):
self.logger.error("ERROR: File exists: " + filename)
return 1
try:
downloaded_filename, headers = urlretrieve(uri, filename)
self.logger.info("Downloaded ./" + filename)
except IOError as err_msg:
self.logger.error("Error downloading package %s from URL %s" \
% (filename, uri))
self.logger.error(str(err_msg))
return 1
if headers.gettype() in ["text/html"]:
dfile = open(downloaded_filename)
if re.search("404 Not Found", "".join(dfile.readlines())):
dfile.close()
self.logger.error("'404 Not Found' error")
return 1
dfile.close()
return 0
def fetch_svn(self, svn_uri, directory):
"""
Fetch subversion repository
@param svn_uri: subversion repository uri to check out
@type svn_uri: string
@param directory: directory to download to
@type directory: string
@returns: 0 = success or 1 for failed download
"""
if not command_successful("svn --version"):
self.logger.error("ERROR: Do you have subversion installed?")
return 1
if os.path.exists(directory):
self.logger.error("ERROR: Checkout directory exists - %s" \
% directory)
return 1
try:
os.mkdir(directory)
except OSError as err_msg:
self.logger.error("ERROR: " + str(err_msg))
return 1
cwd = os.path.realpath(os.curdir)
os.chdir(directory)
self.logger.info("Doing subversion checkout for %s" % svn_uri)
status, output = run_command("/usr/bin/svn co %s" % svn_uri)
self.logger.info(output)
os.chdir(cwd)
self.logger.info("subversion checkout is in directory './%s'" \
% directory)
return 0
def browse_website(self, browser=None):
"""
Launch web browser at project's homepage
@param browser: name of web browser to use
@type browser: string
@returns: 0 if homepage found, 1 if no homepage found
"""
if len(self.all_versions):
metadata = self.pypi.release_data(self.project_name, \
self.all_versions[0])
self.logger.debug("DEBUG: browser: %s" % browser)
if metadata.has_key("home_page"):
self.logger.info("Launching browser: %s" \
% metadata["home_page"])
if browser == 'konqueror':
browser = webbrowser.Konqueror()
else:
browser = webbrowser.get()
browser.open(metadata["home_page"], 2)
return 0
self.logger.error("No homepage URL found.")
return 1
def query_metadata_pypi(self):
"""
Show pkg metadata queried from PyPI
@returns: 0
"""
if self.version and self.version in self.all_versions:
metadata = self.pypi.release_data(self.project_name, self.version)
else:
#Give highest version
metadata = self.pypi.release_data(self.project_name, \
self.all_versions[0])
if metadata:
for key in metadata.keys():
if not self.options.fields or (self.options.fields and \
self.options.fields==key):
print("%s: %s" % (key, metadata[key]))
return 0
def versions_available(self):
"""
Query PyPI for a particular version or all versions of a package
@returns: 0 if version(s) found or 1 if none found
"""
if self.version:
spec = "%s==%s" % (self.project_name, self.version)
else:
spec = self.project_name
if self.all_versions and self.version in self.all_versions:
print_pkg_versions(self.project_name, [self.version])
elif not self.version and self.all_versions:
print_pkg_versions(self.project_name, self.all_versions)
else:
if self.version:
self.logger.error("No pacakge found for version %s" \
% self.version)
else:
self.logger.error("No pacakge found for %s" % self.project_name)
return 1
return 0
def parse_search_spec(self, spec):
"""
Parse search args and return spec dict for PyPI
* Owwww, my eyes!. Re-write this.
@param spec: Cheese Shop package search spec
e.g.
name=Cheetah
license=ZPL
license=ZPL AND name=Cheetah
@type spec: string
@returns: tuple with spec and operator
"""
usage = \
"""You can search PyPI by the following:
name
version
author
author_email
maintainer
maintainer_email
home_page
license
summary
description
keywords
platform
download_url
e.g. yolk -S name=Cheetah
yolk -S name=yolk AND license=PSF
"""
if not spec:
self.logger.error(usage)
return (None, None)
try:
spec = (" ").join(spec)
operator = 'AND'
first = second = ""
if " AND " in spec:
(first, second) = spec.split('AND')
elif " OR " in spec:
(first, second) = spec.split('OR')
operator = 'OR'
else:
first = spec
(key1, term1) = first.split('=')
key1 = key1.strip()
if second:
(key2, term2) = second.split('=')
key2 = key2.strip()
spec = {}
spec[key1] = term1
if second:
spec[key2] = term2
except:
self.logger.error(usage)
spec = operator = None
return (spec, operator)
def pypi_search(self):
"""
Search PyPI by metadata keyword
e.g. yolk -S name=yolk AND license=GPL
@param spec: Cheese Shop search spec
@type spec: list of strings
spec examples:
["name=yolk"]
["license=GPL"]
["name=yolk", "AND", "license=GPL"]
@returns: 0 on success or 1 if mal-formed search spec
"""
spec = self.pkg_spec
#Add remainging cli arguments to options.pypi_search
search_arg = self.options.pypi_search
spec.insert(0, search_arg.strip())
(spec, operator) = self.parse_search_spec(spec)
if not spec:
return 1
for pkg in self.pypi.search(spec, operator):
if pkg['summary']:
summary = pkg['summary'].encode('utf-8')
else:
summary = ""
print("""%s (%s):
%s
""" % (pkg['name'].encode('utf-8'), pkg["version"],
summary))
return 0
def show_entry_map(self):
"""
Show entry map for a package
@param dist: package
@param type: srting
@returns: 0 for success or 1 if error
"""
pprinter = pprint.PrettyPrinter()
try:
entry_map = pkg_resources.get_entry_map(self.options.show_entry_map)
if entry_map:
pprinter.pprint(entry_map)
except pkg_resources.DistributionNotFound:
self.logger.error("Distribution not found: %s" \
% self.options.show_entry_map)
return 1
return 0
def show_entry_points(self):
"""
Show entry points for a module
@returns: 0 for success or 1 if error
"""
found = False
for entry_point in \
pkg_resources.iter_entry_points(self.options.show_entry_points):
found = True
try:
plugin = entry_point.load()
print(plugin.__module__)
print(" %s" % entry_point)
if plugin.__doc__:
print(plugin.__doc__)
print
except ImportError:
pass
if not found:
self.logger.error("No entry points found for %s" \
% self.options.show_entry_points)
return 1
return 0
def yolk_version(self):
"""
Show yolk's version
@returns: 0
"""
self.logger.info("yolk version %s" % VERSION)
return 0
def parse_pkg_ver(self, want_installed):
"""
Return tuple with project_name and version from CLI args
If the user gave the wrong case for the project name, this corrects it
@param want_installed: whether package we want is installed or not
@type want_installed: boolean
@returns: tuple(project_name, version, all_versions)
"""
all_versions = []
arg_str = ("").join(self.pkg_spec)
if "==" not in arg_str:
#No version specified
project_name = arg_str
version = None
else:
(project_name, version) = arg_str.split("==")
project_name = project_name.strip()
version = version.strip()
#Find proper case for package name
if want_installed:
dists = Distributions()
project_name = dists.case_sensitive_name(project_name)
else:
(project_name, all_versions) = \
self.pypi.query_versions_pypi(project_name)
if not len(all_versions):
msg = "I'm afraid we have no '%s' at " % project_name
msg += "The Cheese Shop. A little Red Leicester, perhaps?"
self.logger.error(msg)
sys.exit(2)
return (project_name, version, all_versions)
def setup_opt_parser():
"""
Setup the optparser
@returns: opt_parser.OptionParser
"""
#pylint: disable-msg=C0301
#line too long
usage = "usage: %prog [options]"
opt_parser = optparse.OptionParser(usage=usage)
opt_parser.add_option("--version", action='store_true', dest=
"yolk_version", default=False, help=
"Show yolk version and exit.")
opt_parser.add_option("--debug", action='store_true', dest=
"debug", default=False, help=
"Show debugging information.")
opt_parser.add_option("-q", "--quiet", action='store_true', dest=
"quiet", default=False, help=
"Show less output.")
group_local = optparse.OptionGroup(opt_parser,
"Query installed Python packages",
"The following options show information about installed Python packages. Activated packages are normal packages on sys.path that can be imported. Non-activated packages need 'pkg_resources.require()' before they can be imported, such as packages installed with 'easy_install --multi-version'. PKG_SPEC can be either a package name or package name and version e.g. Paste==0.9")
group_local.add_option("-l", "--list", action='store_true', dest=
"show_all", default=False, help=
"List all Python packages installed by distutils or setuptools. Use PKG_SPEC to narrow results.")
group_local.add_option("-a", "--activated", action='store_true',
dest="show_active", default=False, help=
'List activated packages installed by distutils or ' +
'setuptools. Use PKG_SPEC to narrow results.')
group_local.add_option("-n", "--non-activated", action='store_true',
dest="show_non_active", default=False, help=
'List non-activated packages installed by distutils or ' +
'setuptools. Use PKG_SPEC to narrow results.')
group_local.add_option("-m", "--metadata", action='store_true', dest=
"metadata", default=False, help=
'Show all metadata for packages installed by ' +
'setuptools (use with -l -a or -n)')
group_local.add_option("-f", "--fields", action="store", dest=
"fields", default=False, help=
'Show specific metadata fields. ' +
'(use with -m or -M)')
group_local.add_option("-d", "--depends", action='store', dest=
"show_deps", metavar='PKG_SPEC',
help= "Show dependencies for a package installed by " +
"setuptools if they are available.")
group_local.add_option("--entry-points", action='store',
dest="show_entry_points", default=False, help=
'List entry points for a module. e.g. --entry-points nose.plugins',
metavar="MODULE")
group_local.add_option("--entry-map", action='store',
dest="show_entry_map", default=False, help=
'List entry map for a package. e.g. --entry-map yolk',
metavar="PACKAGE_NAME")
group_pypi = optparse.OptionGroup(opt_parser,
"PyPI (Cheese Shop) options",
"The following options query the Python Package Index:")
group_pypi.add_option("-C", "--changelog", action='store',
dest="show_pypi_changelog", metavar='HOURS',
default=False, help=
"Show detailed ChangeLog for PyPI for last n hours. ")
group_pypi.add_option("-D", "--download-links", action='store',
metavar="PKG_SPEC", dest="show_download_links",
default=False, help=
"Show download URL's for package listed on PyPI. Use with -T to specify egg, source etc.")
group_pypi.add_option("-F", "--fetch-package", action='store',
metavar="PKG_SPEC", dest="fetch",
default=False, help=
"Download package source or egg. You can specify a file type with -T")
group_pypi.add_option("-H", "--browse-homepage", action='store',
metavar="PKG_SPEC", dest="browse_website",
default=False, help=
"Launch web browser at home page for package.")
group_pypi.add_option("-I", "--pypi-index", action='store',
dest="pypi_index",
default=False, help=
"Specify PyPI mirror for package index.")
group_pypi.add_option("-L", "--latest-releases", action='store',
dest="show_pypi_releases", metavar="HOURS",
default=False, help=
"Show PyPI releases for last n hours. ")
group_pypi.add_option("-M", "--query-metadata", action='store',
dest="query_metadata_pypi", default=False,
metavar="PKG_SPEC", help=
"Show metadata for a package listed on PyPI. Use -f to show particular fields.")
group_pypi.add_option("-S", "", action="store", dest="pypi_search",
default=False, help=
"Search PyPI by spec and optional AND/OR operator.",
metavar='SEARCH_SPEC <AND/OR SEARCH_SPEC>')
group_pypi.add_option("-T", "--file-type", action="store", dest=
"file_type", default="all", help=
"You may specify 'source', 'egg', 'svn' or 'all' when using -D.")
group_pypi.add_option("-U", "--show-updates", action='store_true',
dest="show_updates", metavar='<PKG_NAME>',
default=False, help=
"Check PyPI for updates on package(s).")
group_pypi.add_option("-V", "--versions-available", action=
'store', dest="versions_available",
default=False, metavar='PKG_SPEC',
help="Show available versions for given package " +
"listed on PyPI.")
opt_parser.add_option_group(group_local)
opt_parser.add_option_group(group_pypi)
# add opts from plugins
all_plugins = []
for plugcls in load_plugins(others=True):
plug = plugcls()
try:
plug.add_options(opt_parser)
except AttributeError:
pass
return opt_parser
def print_pkg_versions(project_name, versions):
"""
Print list of versions available for a package
@returns: None
"""
for ver in versions:
print("%s %s" % (project_name, ver))
def validate_pypi_opts(opt_parser):
"""
Check parse options that require pkg_spec
@returns: pkg_spec
"""
(options, remaining_args) = opt_parser.parse_args()
options_pkg_specs = [ options.versions_available,
options.query_metadata_pypi,
options.show_download_links,
options.browse_website,
options.fetch,
options.show_deps,
]
for pkg_spec in options_pkg_specs:
if pkg_spec:
return pkg_spec
def main():
"""
Let's do it.
"""
my_yolk = Yolk()
my_yolk.run()
if __name__ == "__main__":
sys.exit(main())
| 34.397285 | 388 | 0.551001 |
__docformat__ = 'restructuredtext'
import inspect
import re
import pprint
import os
import sys
import optparse
import pkg_resources
import webbrowser
import logging
import platform
if platform.python_version().startswith('2'):
from xmlrpclib import Fault as XMLRPCFault
from urllib import urlretrieve
from urlparse import urlparse
else:
from xmlrpc.client import Fault as XMLRPCFault
from urllib.request import urlretrieve
from urllib.parse import urlparse
from distutils.sysconfig import get_python_lib
from yolk.metadata import get_metadata
from yolk.yolklib import get_highest_version, Distributions
from yolk.pypi import CheeseShop
from yolk.setuptools_support import get_download_uri, get_pkglist
from yolk.plugins import load_plugins
from yolk.utils import run_command, command_successful
from yolk.__init__ import __version__ as VERSION
class StdOut:
def __init__(self, stream, modulenames):
self.stdout = stream
#Modules to squelch
self.modulenames = modulenames
def __getattr__(self, attribute):
if not self.__dict__.has_key(attribute) or attribute == '__doc__':
return getattr(self.stdout, attribute)
return self.__dict__[attribute]
def flush(self):
pass
def write(self, inline):
frame = inspect.currentframe().f_back
if frame:
mod = frame.f_globals.get('__name__')
else:
mod = sys._getframe(0).f_globals.get('__name__')
if not mod in self.modulenames:
self.stdout.write(inline)
def writelines(self, inline):
for line in inline:
self.write(line)
class Yolk(object):
def __init__(self):
#PyPI project name with proper case
self.project_name = ""
#PyPI project version
self.version = ""
#List of all versions not hidden on PyPI
self.all_versions = []
self.pkg_spec = []
self.options = None
self.logger = logging.getLogger("yolk")
#Squelch output from setuptools
#Add future offenders to this list.
shut_up = ['distutils.log']
sys.stdout = StdOut(sys.stdout, shut_up)
sys.stderr = StdOut(sys.stderr, shut_up)
self.pypi = None
def get_plugin(self, method):
all_plugins = []
for entry_point in pkg_resources.iter_entry_points('yolk.plugins'):
plugin_obj = entry_point.load()
plugin = plugin_obj()
plugin.configure(self.options, None)
if plugin.enabled:
if not hasattr(plugin, method):
self.logger.warn("Error: plugin has no method: %s" % method)
plugin = None
else:
all_plugins.append(plugin)
return all_plugins
def set_log_level(self):
if self.options.debug:
self.logger.setLevel(logging.DEBUG)
elif self.options.quiet:
self.logger.setLevel(logging.ERROR)
else:
self.logger.setLevel(logging.INFO)
self.logger.addHandler(logging.StreamHandler())
return self.logger
def run(self):
opt_parser = setup_opt_parser()
(self.options, remaining_args) = opt_parser.parse_args()
logger = self.set_log_level()
pkg_spec = validate_pypi_opts(opt_parser)
if not pkg_spec:
pkg_spec = remaining_args
self.pkg_spec = pkg_spec
if not self.options.pypi_search and (len(sys.argv) == 1 or\
len(remaining_args) > 2):
opt_parser.print_help()
return 2
#Options that depend on querying installed packages, not PyPI.
#We find the proper case for package names if they are installed,
#otherwise PyPI returns the correct case.
if self.options.show_deps or self.options.show_all or \
self.options.show_active or self.options.show_non_active or \
(self.options.show_updates and pkg_spec):
want_installed = True
else:
want_installed = False
#show_updates may or may not have a pkg_spec
if not want_installed or self.options.show_updates:
self.pypi = CheeseShop(self.options.debug)
#XXX: We should return 2 here if we couldn't create xmlrpc server
if pkg_spec:
(self.project_name, self.version, self.all_versions) = \
self.parse_pkg_ver(want_installed)
if want_installed and not self.project_name:
logger.error("%s is not installed." % pkg_spec[0])
return 1
commands = ['show_deps', 'query_metadata_pypi', 'fetch',
'versions_available', 'show_updates', 'browse_website',
'show_download_links', 'pypi_search', 'show_pypi_changelog',
'show_pypi_releases', 'yolk_version', 'show_all',
'show_active', 'show_non_active', 'show_entry_map',
'show_entry_points']
for action in commands:
if getattr(self.options, action):
return getattr(self, action)()
opt_parser.print_help()
def show_active(self):
return self.show_distributions("active")
def show_non_active(self):
return self.show_distributions("nonactive")
def show_all(self):
return self.show_distributions("all")
def show_updates(self):
dists = Distributions()
if self.project_name:
pkg_list = [self.project_name]
else:
pkg_list = get_pkglist()
found = None
for pkg in pkg_list:
for (dist, active) in dists.get_distributions("all", pkg,
dists.get_highest_installed(pkg)):
(project_name, versions) = \
self.pypi.query_versions_pypi(dist.project_name)
if versions:
newest = get_highest_version(versions)
if newest != dist.version:
if pkg_resources.parse_version(dist.version) < \
pkg_resources.parse_version(newest):
found = True
print(" %s %s (%s)" % (project_name, dist.version,
newest))
if not found and self.project_name:
self.logger.info("You have the latest version installed.")
elif not found:
self.logger.info("No newer packages found at The Cheese Shop")
return 0
def show_distributions(self, show):
show_metadata = self.options.metadata
plugins = self.get_plugin("add_column")
ignores = ["/UNIONFS", "/KNOPPIX.IMG"]
#See http://cheeseshop.python.org/pypi/workingenv.py
workingenv = os.environ.get('WORKING_ENV')
if workingenv:
ignores.append(workingenv)
dists = Distributions()
results = None
for (dist, active) in dists.get_distributions(show, self.project_name,
self.version):
metadata = get_metadata(dist)
for prefix in ignores:
if dist.location.startswith(prefix):
dist.location = dist.location.replace(prefix, "")
#Case-insensitve search because of Windows
if dist.location.lower().startswith(get_python_lib().lower()):
develop = ""
else:
develop = dist.location
if metadata:
add_column_text = ""
for my_plugin in plugins:
#See if package is 'owned' by a package manager such as
#portage, apt, rpm etc.
#add_column_text += my_plugin.add_column(filename) + " "
add_column_text += my_plugin.add_column(dist) + " "
self.print_metadata(metadata, develop, active, add_column_text)
else:
print(str(dist) + " has no metadata")
results = True
if not results and self.project_name:
if self.version:
pkg_spec = "%s==%s" % (self.project_name, self.version)
else:
pkg_spec = "%s" % self.project_name
if show == "all":
self.logger.error("There are no versions of %s installed." \
% pkg_spec)
else:
self.logger.error("There are no %s versions of %s installed." \
% \
(show, pkg_spec))
return 2
elif show == "all" and results and self.options.fields:
print("Versions with '*' are non-active.")
print("Versions with '!' are deployed in development mode.")
def print_metadata(self, metadata, develop, active, installed_by):
show_metadata = self.options.metadata
if self.options.fields:
fields = self.options.fields.split(',')
fields = map(str.strip, fields)
else:
fields = []
version = metadata['Version']
#When showing all packages, note which are not active:
if active:
if fields:
active_status = ""
else:
active_status = "active"
else:
if fields:
active_status = "*"
else:
active_status = "non-active"
if develop:
if fields:
development_status = "! (%s)" % develop
else:
development_status = "development (%s)" % develop
else:
development_status = installed_by
status = "%s %s" % (active_status, development_status)
if fields:
print('%s (%s)%s %s' % (metadata['Name'], version, active_status,
development_status))
else:
# Need intelligent justification
print(metadata['Name'].ljust(15) + " - " + version.ljust(12) + \
" - " + status)
if fields:
#Only show specific fields, using case-insensitive search
fields = map(str.lower, fields)
for field in metadata.keys():
if field.lower() in fields:
print(' %s: %s' % (field, metadata[field]))
print()
elif show_metadata:
#Print all available metadata fields
for field in metadata.keys():
if field != 'Name' and field != 'Summary':
print(' %s: %s' % (field, metadata[field]))
def show_deps(self):
pkgs = pkg_resources.Environment()
for pkg in pkgs[self.project_name]:
if not self.version:
print(pkg.project_name, pkg.version)
i = len(pkg._dep_map.values()[0])
if i:
while i:
if not self.version or self.version and \
pkg.version == self.version:
if self.version and i == len(pkg._dep_map.values()[0]):
print(pkg.project_name, pkg.version)
print(" " + str(pkg._dep_map.values()[0][i - 1]))
i -= 1
else:
self.logger.info(\
"No dependency information was supplied with the package.")
return 1
return 0
def show_pypi_changelog(self):
hours = self.options.show_pypi_changelog
if not hours.isdigit():
self.logger.error("Error: You must supply an integer.")
return 1
try:
changelog = self.pypi.changelog(int(hours))
except XMLRPCFault as err_msg:
self.logger.error(err_msg)
self.logger.error("ERROR: Couldn't retrieve changelog.")
return 1
last_pkg = ''
for entry in changelog:
pkg = entry[0]
if pkg != last_pkg:
print("%s %s\n\t%s" % (entry[0], entry[1], entry[3]))
last_pkg = pkg
else:
print("\t%s" % entry[3])
return 0
def show_pypi_releases(self):
try:
hours = int(self.options.show_pypi_releases)
except ValueError:
self.logger.error("ERROR: You must supply an integer.")
return 1
try:
latest_releases = self.pypi.updated_releases(hours)
except XMLRPCFault as err_msg:
self.logger.error(err_msg)
self.logger.error("ERROR: Couldn't retrieve latest releases.")
return 1
for release in latest_releases:
print("%s %s" % (release[0], release[1]))
return 0
def show_download_links(self):
#In case they specify version as 'dev' instead of using -T svn,
#don't show three svn URI's
if self.options.file_type == "all" and self.version == "dev":
self.options.file_type = "svn"
if self.options.file_type == "svn":
version = "dev"
else:
if self.version:
version = self.version
else:
version = self.all_versions[0]
if self.options.file_type == "all":
#Search for source, egg, and svn
self.print_download_uri(version, True)
self.print_download_uri(version, False)
self.print_download_uri("dev", True)
else:
if self.options.file_type == "source":
source = True
else:
source = False
self.print_download_uri(version, source)
return 0
def print_download_uri(self, version, source):
if version == "dev":
pkg_type = "subversion"
source = True
elif source:
pkg_type = "source"
else:
pkg_type = "egg"
#Use setuptools monkey-patch to grab url
url = get_download_uri(self.project_name, version, source,
self.options.pypi_index)
if url:
print("%s" % url)
else:
self.logger.info("No download URL found for %s" % pkg_type)
def fetch(self):
#Default type to download
source = True
directory = "."
if self.options.file_type == "svn":
version = "dev"
svn_uri = get_download_uri(self.project_name, \
"dev", True)
if svn_uri:
directory = self.project_name + "_svn"
return self.fetch_svn(svn_uri, directory)
else:
self.logger.error(\
"ERROR: No subversion repository found for %s" % \
self.project_name)
return 1
elif self.options.file_type == "source":
source = True
elif self.options.file_type == "egg":
source = False
uri = get_download_uri(self.project_name, self.version, source)
if uri:
return self.fetch_uri(directory, uri)
else:
self.logger.error("No %s URI found for package: %s " % \
(self.options.file_type, self.project_name))
return 1
def fetch_uri(self, directory, uri):
filename = os.path.basename(urlparse(uri)[2])
if os.path.exists(filename):
self.logger.error("ERROR: File exists: " + filename)
return 1
try:
downloaded_filename, headers = urlretrieve(uri, filename)
self.logger.info("Downloaded ./" + filename)
except IOError as err_msg:
self.logger.error("Error downloading package %s from URL %s" \
% (filename, uri))
self.logger.error(str(err_msg))
return 1
if headers.gettype() in ["text/html"]:
dfile = open(downloaded_filename)
if re.search("404 Not Found", "".join(dfile.readlines())):
dfile.close()
self.logger.error("'404 Not Found' error")
return 1
dfile.close()
return 0
def fetch_svn(self, svn_uri, directory):
if not command_successful("svn --version"):
self.logger.error("ERROR: Do you have subversion installed?")
return 1
if os.path.exists(directory):
self.logger.error("ERROR: Checkout directory exists - %s" \
% directory)
return 1
try:
os.mkdir(directory)
except OSError as err_msg:
self.logger.error("ERROR: " + str(err_msg))
return 1
cwd = os.path.realpath(os.curdir)
os.chdir(directory)
self.logger.info("Doing subversion checkout for %s" % svn_uri)
status, output = run_command("/usr/bin/svn co %s" % svn_uri)
self.logger.info(output)
os.chdir(cwd)
self.logger.info("subversion checkout is in directory './%s'" \
% directory)
return 0
def browse_website(self, browser=None):
if len(self.all_versions):
metadata = self.pypi.release_data(self.project_name, \
self.all_versions[0])
self.logger.debug("DEBUG: browser: %s" % browser)
if metadata.has_key("home_page"):
self.logger.info("Launching browser: %s" \
% metadata["home_page"])
if browser == 'konqueror':
browser = webbrowser.Konqueror()
else:
browser = webbrowser.get()
browser.open(metadata["home_page"], 2)
return 0
self.logger.error("No homepage URL found.")
return 1
def query_metadata_pypi(self):
if self.version and self.version in self.all_versions:
metadata = self.pypi.release_data(self.project_name, self.version)
else:
#Give highest version
metadata = self.pypi.release_data(self.project_name, \
self.all_versions[0])
if metadata:
for key in metadata.keys():
if not self.options.fields or (self.options.fields and \
self.options.fields==key):
print("%s: %s" % (key, metadata[key]))
return 0
def versions_available(self):
if self.version:
spec = "%s==%s" % (self.project_name, self.version)
else:
spec = self.project_name
if self.all_versions and self.version in self.all_versions:
print_pkg_versions(self.project_name, [self.version])
elif not self.version and self.all_versions:
print_pkg_versions(self.project_name, self.all_versions)
else:
if self.version:
self.logger.error("No pacakge found for version %s" \
% self.version)
else:
self.logger.error("No pacakge found for %s" % self.project_name)
return 1
return 0
def parse_search_spec(self, spec):
usage = \
"""You can search PyPI by the following:
name
version
author
author_email
maintainer
maintainer_email
home_page
license
summary
description
keywords
platform
download_url
e.g. yolk -S name=Cheetah
yolk -S name=yolk AND license=PSF
"""
if not spec:
self.logger.error(usage)
return (None, None)
try:
spec = (" ").join(spec)
operator = 'AND'
first = second = ""
if " AND " in spec:
(first, second) = spec.split('AND')
elif " OR " in spec:
(first, second) = spec.split('OR')
operator = 'OR'
else:
first = spec
(key1, term1) = first.split('=')
key1 = key1.strip()
if second:
(key2, term2) = second.split('=')
key2 = key2.strip()
spec = {}
spec[key1] = term1
if second:
spec[key2] = term2
except:
self.logger.error(usage)
spec = operator = None
return (spec, operator)
def pypi_search(self):
spec = self.pkg_spec
#Add remainging cli arguments to options.pypi_search
search_arg = self.options.pypi_search
spec.insert(0, search_arg.strip())
(spec, operator) = self.parse_search_spec(spec)
if not spec:
return 1
for pkg in self.pypi.search(spec, operator):
if pkg['summary']:
summary = pkg['summary'].encode('utf-8')
else:
summary = ""
print("""%s (%s):
%s
""" % (pkg['name'].encode('utf-8'), pkg["version"],
summary))
return 0
def show_entry_map(self):
pprinter = pprint.PrettyPrinter()
try:
entry_map = pkg_resources.get_entry_map(self.options.show_entry_map)
if entry_map:
pprinter.pprint(entry_map)
except pkg_resources.DistributionNotFound:
self.logger.error("Distribution not found: %s" \
% self.options.show_entry_map)
return 1
return 0
def show_entry_points(self):
found = False
for entry_point in \
pkg_resources.iter_entry_points(self.options.show_entry_points):
found = True
try:
plugin = entry_point.load()
print(plugin.__module__)
print(" %s" % entry_point)
if plugin.__doc__:
print(plugin.__doc__)
print
except ImportError:
pass
if not found:
self.logger.error("No entry points found for %s" \
% self.options.show_entry_points)
return 1
return 0
def yolk_version(self):
self.logger.info("yolk version %s" % VERSION)
return 0
def parse_pkg_ver(self, want_installed):
all_versions = []
arg_str = ("").join(self.pkg_spec)
if "==" not in arg_str:
#No version specified
project_name = arg_str
version = None
else:
(project_name, version) = arg_str.split("==")
project_name = project_name.strip()
version = version.strip()
#Find proper case for package name
if want_installed:
dists = Distributions()
project_name = dists.case_sensitive_name(project_name)
else:
(project_name, all_versions) = \
self.pypi.query_versions_pypi(project_name)
if not len(all_versions):
msg = "I'm afraid we have no '%s' at " % project_name
msg += "The Cheese Shop. A little Red Leicester, perhaps?"
self.logger.error(msg)
sys.exit(2)
return (project_name, version, all_versions)
def setup_opt_parser():
usage = "usage: %prog [options]"
opt_parser = optparse.OptionParser(usage=usage)
opt_parser.add_option("--version", action='store_true', dest=
"yolk_version", default=False, help=
"Show yolk version and exit.")
opt_parser.add_option("--debug", action='store_true', dest=
"debug", default=False, help=
"Show debugging information.")
opt_parser.add_option("-q", "--quiet", action='store_true', dest=
"quiet", default=False, help=
"Show less output.")
group_local = optparse.OptionGroup(opt_parser,
"Query installed Python packages",
"The following options show information about installed Python packages. Activated packages are normal packages on sys.path that can be imported. Non-activated packages need 'pkg_resources.require()' before they can be imported, such as packages installed with 'easy_install --multi-version'. PKG_SPEC can be either a package name or package name and version e.g. Paste==0.9")
group_local.add_option("-l", "--list", action='store_true', dest=
"show_all", default=False, help=
"List all Python packages installed by distutils or setuptools. Use PKG_SPEC to narrow results.")
group_local.add_option("-a", "--activated", action='store_true',
dest="show_active", default=False, help=
'List activated packages installed by distutils or ' +
'setuptools. Use PKG_SPEC to narrow results.')
group_local.add_option("-n", "--non-activated", action='store_true',
dest="show_non_active", default=False, help=
'List non-activated packages installed by distutils or ' +
'setuptools. Use PKG_SPEC to narrow results.')
group_local.add_option("-m", "--metadata", action='store_true', dest=
"metadata", default=False, help=
'Show all metadata for packages installed by ' +
'setuptools (use with -l -a or -n)')
group_local.add_option("-f", "--fields", action="store", dest=
"fields", default=False, help=
'Show specific metadata fields. ' +
'(use with -m or -M)')
group_local.add_option("-d", "--depends", action='store', dest=
"show_deps", metavar='PKG_SPEC',
help= "Show dependencies for a package installed by " +
"setuptools if they are available.")
group_local.add_option("--entry-points", action='store',
dest="show_entry_points", default=False, help=
'List entry points for a module. e.g. --entry-points nose.plugins',
metavar="MODULE")
group_local.add_option("--entry-map", action='store',
dest="show_entry_map", default=False, help=
'List entry map for a package. e.g. --entry-map yolk',
metavar="PACKAGE_NAME")
group_pypi = optparse.OptionGroup(opt_parser,
"PyPI (Cheese Shop) options",
"The following options query the Python Package Index:")
group_pypi.add_option("-C", "--changelog", action='store',
dest="show_pypi_changelog", metavar='HOURS',
default=False, help=
"Show detailed ChangeLog for PyPI for last n hours. ")
group_pypi.add_option("-D", "--download-links", action='store',
metavar="PKG_SPEC", dest="show_download_links",
default=False, help=
"Show download URL's for package listed on PyPI. Use with -T to specify egg, source etc.")
group_pypi.add_option("-F", "--fetch-package", action='store',
metavar="PKG_SPEC", dest="fetch",
default=False, help=
"Download package source or egg. You can specify a file type with -T")
group_pypi.add_option("-H", "--browse-homepage", action='store',
metavar="PKG_SPEC", dest="browse_website",
default=False, help=
"Launch web browser at home page for package.")
group_pypi.add_option("-I", "--pypi-index", action='store',
dest="pypi_index",
default=False, help=
"Specify PyPI mirror for package index.")
group_pypi.add_option("-L", "--latest-releases", action='store',
dest="show_pypi_releases", metavar="HOURS",
default=False, help=
"Show PyPI releases for last n hours. ")
group_pypi.add_option("-M", "--query-metadata", action='store',
dest="query_metadata_pypi", default=False,
metavar="PKG_SPEC", help=
"Show metadata for a package listed on PyPI. Use -f to show particular fields.")
group_pypi.add_option("-S", "", action="store", dest="pypi_search",
default=False, help=
"Search PyPI by spec and optional AND/OR operator.",
metavar='SEARCH_SPEC <AND/OR SEARCH_SPEC>')
group_pypi.add_option("-T", "--file-type", action="store", dest=
"file_type", default="all", help=
"You may specify 'source', 'egg', 'svn' or 'all' when using -D.")
group_pypi.add_option("-U", "--show-updates", action='store_true',
dest="show_updates", metavar='<PKG_NAME>',
default=False, help=
"Check PyPI for updates on package(s).")
group_pypi.add_option("-V", "--versions-available", action=
'store', dest="versions_available",
default=False, metavar='PKG_SPEC',
help="Show available versions for given package " +
"listed on PyPI.")
opt_parser.add_option_group(group_local)
opt_parser.add_option_group(group_pypi)
# add opts from plugins
all_plugins = []
for plugcls in load_plugins(others=True):
plug = plugcls()
try:
plug.add_options(opt_parser)
except AttributeError:
pass
return opt_parser
def print_pkg_versions(project_name, versions):
for ver in versions:
print("%s %s" % (project_name, ver))
def validate_pypi_opts(opt_parser):
(options, remaining_args) = opt_parser.parse_args()
options_pkg_specs = [ options.versions_available,
options.query_metadata_pypi,
options.show_download_links,
options.browse_website,
options.fetch,
options.show_deps,
]
for pkg_spec in options_pkg_specs:
if pkg_spec:
return pkg_spec
def main():
my_yolk = Yolk()
my_yolk.run()
if __name__ == "__main__":
sys.exit(main())
| true | true |
f7354ccee8fb926395d5c25e4ca5bc5ed92f36ed | 3,258 | py | Python | sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2019_09_01_preview/aio/_configuration.py | aiven/azure-sdk-for-python | 8764dc07423beca46ed0b51212d81289d9e52c60 | [
"MIT"
] | null | null | null | sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2019_09_01_preview/aio/_configuration.py | aiven/azure-sdk-for-python | 8764dc07423beca46ed0b51212d81289d9e52c60 | [
"MIT"
] | 1 | 2021-02-23T23:11:26.000Z | 2021-02-23T23:11:26.000Z | sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2019_09_01_preview/aio/_configuration.py | aiven/azure-sdk-for-python | 8764dc07423beca46ed0b51212d81289d9e52c60 | [
"MIT"
] | 1 | 2021-05-19T02:55:10.000Z | 2021-05-19T02:55:10.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
VERSION = "unknown"
class ApplicationInsightsManagementClientConfiguration(Configuration):
"""Configuration for ApplicationInsightsManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(ApplicationInsightsManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2019-09-01-preview"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-applicationinsights/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| 48.626866 | 134 | 0.702271 |
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential
VERSION = "unknown"
class ApplicationInsightsManagementClientConfiguration(Configuration):
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(ApplicationInsightsManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2019-09-01-preview"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-applicationinsights/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| true | true |
f7354d371638fad528d5e25ee8ab26f860c103b1 | 57 | py | Python | mini-scripts/Python_-_Output_Variables_1.txt.py | Web-Dev-Collaborative/PYTHON_PRAC | 856f902fb43dcccae168d34ee6aacc02427a7ac6 | [
"MIT"
] | 5 | 2021-06-02T23:44:25.000Z | 2021-12-27T16:21:57.000Z | mini-scripts/Python_-_Output_Variables_1.txt.py | Web-Dev-Collaborative/PYTHON_PRAC | 856f902fb43dcccae168d34ee6aacc02427a7ac6 | [
"MIT"
] | 22 | 2021-05-31T01:33:25.000Z | 2021-10-18T18:32:39.000Z | mini-scripts/Python_-_Output_Variables_1.txt.py | Web-Dev-Collaborative/PYTHON_PRAC | 856f902fb43dcccae168d34ee6aacc02427a7ac6 | [
"MIT"
] | 3 | 2021-06-19T03:37:47.000Z | 2021-08-31T00:49:51.000Z | x = "awesome"
print("Python is " + x)
# Author: Bryan G
| 14.25 | 23 | 0.596491 | x = "awesome"
print("Python is " + x)
| true | true |
f7354da7414334f4bb43360cb78f171a1a3a8177 | 6,266 | py | Python | oscar/apps/search/views.py | makielab/django-oscar | 0a325cd0f04a4278201872b2e163868b72b6fabe | [
"BSD-3-Clause"
] | 1 | 2015-11-07T12:37:50.000Z | 2015-11-07T12:37:50.000Z | oscar/apps/search/views.py | makielab/django-oscar | 0a325cd0f04a4278201872b2e163868b72b6fabe | [
"BSD-3-Clause"
] | null | null | null | oscar/apps/search/views.py | makielab/django-oscar | 0a325cd0f04a4278201872b2e163868b72b6fabe | [
"BSD-3-Clause"
] | null | null | null | import json
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic.base import View
from django.conf import settings
from django.db.models import get_model
from haystack.query import SearchQuerySet
from haystack import views
from purl import URL
Product = get_model('catalogue', 'Product')
class SuggestionsView(View):
"""
Auto suggest view
Returns the suggestions in JSON format (especially suited for consumption
by jQuery autocomplete) """
suggest_limit = settings.OSCAR_SEARCH_SUGGEST_LIMIT
def get(self, request):
context = self.get_context_data()
return self.render_to_response(context)
def get_context_data(self):
'''
Creates a list of suggestions
'''
query_term = self.request.GET['query_term']
query_set = SearchQuerySet().filter(text__contains=query_term)[
:self.suggest_limit]
context = []
for item in query_set:
context.append({
'label': item.object.title,
'url': item.object.get_absolute_url(),
})
return context
def render_to_response(self, context):
"Returns a JSON response containing 'context' as payload"
return self.get_json_response(self.convert_context_to_json(context))
def get_json_response(self, content, **httpresponse_kwargs):
"Construct an `HttpResponse` object."
return HttpResponse(content,
content_type='application/json',
**httpresponse_kwargs)
def convert_context_to_json(self, context):
"Convert the context into a JSON object"
return json.dumps(context)
class FacetedSearchView(views.FacetedSearchView):
def extra_context(self):
extra = super(FacetedSearchView, self).extra_context()
if 'fields' not in extra['facets']:
# Looks like Solr is not responding correctly
return extra
# Convert facet data into a more useful datastructure
# Field facets
facet_data = {}
base_url = URL(self.request.get_full_path())
selected = dict(
map(lambda x: x.split(':'), self.form.selected_facets))
for field, facets in extra['facets']['fields'].items():
facet_data[field] = []
for name, count in facets:
# Ignore zero-count facets for field
if count == 0:
continue
field_filter = '%s_exact' % field
datum = {
'name': name,
'count': count}
if selected.get(field_filter, None) == name:
# This filter is selected - build the 'deselect' URL
datum['selected'] = True
url = base_url.remove_query_param(
'selected_facets', '%s:%s' % (
field_filter, name))
datum['deselect_url'] = url.as_string()
else:
# This filter is not selected - built the 'select' URL
datum['selected'] = False
url = base_url.append_query_param(
'selected_facets', '%s:%s' % (
field_filter, name))
datum['select_url'] = url.as_string()
facet_data[field].append(datum)
# Query facets
for key, facet in settings.OSCAR_SEARCH_FACETS['queries'].items():
facet_data[key] = []
for name, query in facet['queries']:
field_filter = '%s_exact' % facet['field']
match = '%s_exact:%s' % (facet['field'], query)
if not match in extra['facets']['queries']:
datum = {
'name': name,
'count': 0,
}
else:
datum = {
'name': name,
'count': extra['facets']['queries'][match],
}
if selected.get(field_filter, None) == query:
# Selected
datum['selected'] = True
url = base_url.remove_query_param(
'selected_facets', match)
datum['deselect_url'] = url.as_string()
else:
datum['selected'] = False
url = base_url.append_query_param(
'selected_facets', match)
datum['select_url'] = url.as_string()
facet_data[key].append(datum)
extra['facet_data'] = facet_data
return extra
class MultiFacetedSearchView(FacetedSearchView):
"""
Search view for multifaceted searches
"""
template = 'search/results.html'
def __call__(self, request, *args, **kwargs):
"""
Generates the actual response to the search.
Relies on internal, overridable methods to construct the response.
"""
# Look for UPC match
query = request.GET.get('q', '').strip()
try:
item = Product._default_manager.get(upc=query)
return HttpResponseRedirect(item.get_absolute_url())
except Product.DoesNotExist:
pass
return super(MultiFacetedSearchView, self).__call__(request, *args, **kwargs)
@property
def __name__(self):
return "MultiFacetedSearchView"
def extra_context(self):
"""
Adds details about the facets applied
"""
extra = super(MultiFacetedSearchView, self).extra_context()
if hasattr(self.form, 'cleaned_data') and 'selected_facets' in self.form.cleaned_data:
extra['facets_applied'] = []
for f in self.form.cleaned_data['selected_facets'].split("|"):
facet = f.split(":")
extra['facets_applied'].append({
'facet': facet[0][:-6], # removing the _exact suffix that haystack uses for some reason
'value' : facet[1].strip('"')
})
return extra
| 36.219653 | 107 | 0.543568 | import json
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic.base import View
from django.conf import settings
from django.db.models import get_model
from haystack.query import SearchQuerySet
from haystack import views
from purl import URL
Product = get_model('catalogue', 'Product')
class SuggestionsView(View):
suggest_limit = settings.OSCAR_SEARCH_SUGGEST_LIMIT
def get(self, request):
context = self.get_context_data()
return self.render_to_response(context)
def get_context_data(self):
query_term = self.request.GET['query_term']
query_set = SearchQuerySet().filter(text__contains=query_term)[
:self.suggest_limit]
context = []
for item in query_set:
context.append({
'label': item.object.title,
'url': item.object.get_absolute_url(),
})
return context
def render_to_response(self, context):
return self.get_json_response(self.convert_context_to_json(context))
def get_json_response(self, content, **httpresponse_kwargs):
return HttpResponse(content,
content_type='application/json',
**httpresponse_kwargs)
def convert_context_to_json(self, context):
return json.dumps(context)
class FacetedSearchView(views.FacetedSearchView):
def extra_context(self):
extra = super(FacetedSearchView, self).extra_context()
if 'fields' not in extra['facets']:
return extra
facet_data = {}
base_url = URL(self.request.get_full_path())
selected = dict(
map(lambda x: x.split(':'), self.form.selected_facets))
for field, facets in extra['facets']['fields'].items():
facet_data[field] = []
for name, count in facets:
if count == 0:
continue
field_filter = '%s_exact' % field
datum = {
'name': name,
'count': count}
if selected.get(field_filter, None) == name:
datum['selected'] = True
url = base_url.remove_query_param(
'selected_facets', '%s:%s' % (
field_filter, name))
datum['deselect_url'] = url.as_string()
else:
datum['selected'] = False
url = base_url.append_query_param(
'selected_facets', '%s:%s' % (
field_filter, name))
datum['select_url'] = url.as_string()
facet_data[field].append(datum)
for key, facet in settings.OSCAR_SEARCH_FACETS['queries'].items():
facet_data[key] = []
for name, query in facet['queries']:
field_filter = '%s_exact' % facet['field']
match = '%s_exact:%s' % (facet['field'], query)
if not match in extra['facets']['queries']:
datum = {
'name': name,
'count': 0,
}
else:
datum = {
'name': name,
'count': extra['facets']['queries'][match],
}
if selected.get(field_filter, None) == query:
datum['selected'] = True
url = base_url.remove_query_param(
'selected_facets', match)
datum['deselect_url'] = url.as_string()
else:
datum['selected'] = False
url = base_url.append_query_param(
'selected_facets', match)
datum['select_url'] = url.as_string()
facet_data[key].append(datum)
extra['facet_data'] = facet_data
return extra
class MultiFacetedSearchView(FacetedSearchView):
template = 'search/results.html'
def __call__(self, request, *args, **kwargs):
query = request.GET.get('q', '').strip()
try:
item = Product._default_manager.get(upc=query)
return HttpResponseRedirect(item.get_absolute_url())
except Product.DoesNotExist:
pass
return super(MultiFacetedSearchView, self).__call__(request, *args, **kwargs)
@property
def __name__(self):
return "MultiFacetedSearchView"
def extra_context(self):
extra = super(MultiFacetedSearchView, self).extra_context()
if hasattr(self.form, 'cleaned_data') and 'selected_facets' in self.form.cleaned_data:
extra['facets_applied'] = []
for f in self.form.cleaned_data['selected_facets'].split("|"):
facet = f.split(":")
extra['facets_applied'].append({
'facet': facet[0][:-6],
'value' : facet[1].strip('"')
})
return extra
| true | true |
f7354e64a3afa8d1f4a6c949a6e4f8dca77e083b | 3,182 | py | Python | tensorflow/python/data/experimental/service/server_lib_test.py | yage99/tensorflow | c7fa71b32a3635eb25596ae80d007b41007769c4 | [
"Apache-2.0"
] | 4 | 2020-06-28T08:25:36.000Z | 2021-08-12T12:41:34.000Z | tensorflow/python/data/experimental/service/server_lib_test.py | yage99/tensorflow | c7fa71b32a3635eb25596ae80d007b41007769c4 | [
"Apache-2.0"
] | 10 | 2021-08-03T08:42:38.000Z | 2022-01-03T03:29:12.000Z | tensorflow/python/data/experimental/service/server_lib_test.py | yage99/tensorflow | c7fa71b32a3635eb25596ae80d007b41007769c4 | [
"Apache-2.0"
] | 28 | 2020-02-10T07:03:06.000Z | 2022-01-12T11:19:20.000Z | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.data service server lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.service import server_lib
from tensorflow.python.platform import test
class ServerLibTest(test.TestCase):
def testStartDispatcher(self):
dispatcher = server_lib.DispatchServer(0, start=False)
dispatcher.start()
def testMultipleStartDispatcher(self):
dispatcher = server_lib.DispatchServer(0, start=True)
dispatcher.start()
def testStartWorker(self):
dispatcher = server_lib.DispatchServer(0)
worker = server_lib.WorkerServer(0, dispatcher._address, start=False)
worker.start()
def testMultipleStartWorker(self):
dispatcher = server_lib.DispatchServer(0)
worker = server_lib.WorkerServer(0, dispatcher._address, start=True)
worker.start()
def testStopDispatcher(self):
dispatcher = server_lib.DispatchServer(0)
dispatcher._stop()
dispatcher._stop()
def testStopWorker(self):
dispatcher = server_lib.DispatchServer(0)
worker = server_lib.WorkerServer(0, dispatcher._address)
worker._stop()
worker._stop()
def testStopStartDispatcher(self):
dispatcher = server_lib.DispatchServer(0)
dispatcher._stop()
with self.assertRaisesRegex(
RuntimeError, "Server cannot be started after it has been stopped"):
dispatcher.start()
def testStopStartWorker(self):
dispatcher = server_lib.DispatchServer(0)
worker = server_lib.WorkerServer(0, dispatcher._address)
worker._stop()
with self.assertRaisesRegex(
RuntimeError, "Server cannot be started after it has been stopped"):
worker.start()
def testJoinDispatcher(self):
dispatcher = server_lib.DispatchServer(0)
dispatcher._stop()
dispatcher.join()
def testJoinWorker(self):
dispatcher = server_lib.DispatchServer(0)
worker = server_lib.WorkerServer(0, dispatcher._address)
worker._stop()
worker.join()
def testDispatcherNumWorkers(self):
dispatcher = server_lib.DispatchServer(0)
self.assertEqual(0, dispatcher._num_workers())
worker1 = server_lib.WorkerServer(0, dispatcher._address) # pylint: disable=unused-variable
self.assertEqual(1, dispatcher._num_workers())
worker2 = server_lib.WorkerServer(0, dispatcher._address) # pylint: disable=unused-variable
self.assertEqual(2, dispatcher._num_workers())
if __name__ == "__main__":
test.main()
| 33.851064 | 96 | 0.730044 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.service import server_lib
from tensorflow.python.platform import test
class ServerLibTest(test.TestCase):
def testStartDispatcher(self):
dispatcher = server_lib.DispatchServer(0, start=False)
dispatcher.start()
def testMultipleStartDispatcher(self):
dispatcher = server_lib.DispatchServer(0, start=True)
dispatcher.start()
def testStartWorker(self):
dispatcher = server_lib.DispatchServer(0)
worker = server_lib.WorkerServer(0, dispatcher._address, start=False)
worker.start()
def testMultipleStartWorker(self):
dispatcher = server_lib.DispatchServer(0)
worker = server_lib.WorkerServer(0, dispatcher._address, start=True)
worker.start()
def testStopDispatcher(self):
dispatcher = server_lib.DispatchServer(0)
dispatcher._stop()
dispatcher._stop()
def testStopWorker(self):
dispatcher = server_lib.DispatchServer(0)
worker = server_lib.WorkerServer(0, dispatcher._address)
worker._stop()
worker._stop()
def testStopStartDispatcher(self):
dispatcher = server_lib.DispatchServer(0)
dispatcher._stop()
with self.assertRaisesRegex(
RuntimeError, "Server cannot be started after it has been stopped"):
dispatcher.start()
def testStopStartWorker(self):
dispatcher = server_lib.DispatchServer(0)
worker = server_lib.WorkerServer(0, dispatcher._address)
worker._stop()
with self.assertRaisesRegex(
RuntimeError, "Server cannot be started after it has been stopped"):
worker.start()
def testJoinDispatcher(self):
dispatcher = server_lib.DispatchServer(0)
dispatcher._stop()
dispatcher.join()
def testJoinWorker(self):
dispatcher = server_lib.DispatchServer(0)
worker = server_lib.WorkerServer(0, dispatcher._address)
worker._stop()
worker.join()
def testDispatcherNumWorkers(self):
dispatcher = server_lib.DispatchServer(0)
self.assertEqual(0, dispatcher._num_workers())
worker1 = server_lib.WorkerServer(0, dispatcher._address)
self.assertEqual(1, dispatcher._num_workers())
worker2 = server_lib.WorkerServer(0, dispatcher._address)
self.assertEqual(2, dispatcher._num_workers())
if __name__ == "__main__":
test.main()
| true | true |
f7354fb589d6141cc7a419690010c3a08bd3d9c7 | 2,344 | py | Python | routes/repos.py | apuayush/gdginfo | ec5fa3d39f44ce5bb94a3e71d1893093624da3dc | [
"MIT"
] | 4 | 2017-03-30T17:31:08.000Z | 2020-01-15T18:53:04.000Z | routes/repos.py | apuayush/gdginfo | ec5fa3d39f44ce5bb94a3e71d1893093624da3dc | [
"MIT"
] | 16 | 2017-03-28T22:46:38.000Z | 2020-01-26T19:39:57.000Z | routes/repos.py | apuayush/gdginfo | ec5fa3d39f44ce5bb94a3e71d1893093624da3dc | [
"MIT"
] | 2 | 2017-04-10T12:38:10.000Z | 2019-05-10T02:52:25.000Z | import simplejson as json
from tornado.gen import coroutine
from tornado.web import RequestHandler
from tornado_cors import CorsMixin
from utility import utility
"""
@api {get} /repos data related to repos
@apiName data related to repos
@apiGroup all
@apiParam org organization name
@apiPermission logged-in
@apiParamExample {json} response-example
{
status: 200,
message: "OK",
payload:
{
"name": "CodeCombat",
"ref": {
"target": {
"history": {
"edges": [{
"node": {
"author": {
"name": "Angad Sharma",
"date": "2019-06-06T08:38:08+05:30"
},
"additions": 3,
"deletions": 27,
"pushedDate": "2019-06-06T03:08:09Z",
"message": "Merge pull request #8 from CodeChefVIT/dependabot/npm_and_yarn/mongoose-5.5.13\n\nBump mongoose from 5.5.12 to 5.5.13",
"messageBody": "\u2026se-5.5.13\n\nBump mongoose from 5.5.12 to 5.5.13",
"url": "https://github.com/CodeChefVIT/CodeCombat/commit/60e45681c9baf8b02c2996ffc14442741f0c6fea"
}
}]
}
}
}
}
"""
class Repos(CorsMixin, RequestHandler):
CORS_ORIGIN = "*"
CORS_HEADERS = 'Content-Type, Authorization'
CORS_METHODS = 'GET'
CORS_MAX_AGE = 21600
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header('Access-Control-Allow-Methods', 'GET, OPTIONS')
self.set_header('Access-Control-Allow-Headers', 'authorization')
def initialize(self, redis):
self.redis = redis
@coroutine
def get(self):
token=self.request.headers.get("Authorization")
if token is None or not token:
self.write("You are not logged in")
org=self.get_query_argument("org")
response = utility.repos(token, org, self.redis)
jsonData = {
'status': 200,
'message': 'OK',
'payload': response
}
self.write(json.dumps(jsonData))
def write_error(self, status_code, **kwargs):
jsonData = {
'status': int(status_code),
'message': "Internal server error",
'answer': 'NULL'
}
self.write(json.dumps(jsonData))
def options(self):
self.set_status(204)
self.finish()
| 26.636364 | 138 | 0.595137 | import simplejson as json
from tornado.gen import coroutine
from tornado.web import RequestHandler
from tornado_cors import CorsMixin
from utility import utility
class Repos(CorsMixin, RequestHandler):
CORS_ORIGIN = "*"
CORS_HEADERS = 'Content-Type, Authorization'
CORS_METHODS = 'GET'
CORS_MAX_AGE = 21600
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header('Access-Control-Allow-Methods', 'GET, OPTIONS')
self.set_header('Access-Control-Allow-Headers', 'authorization')
def initialize(self, redis):
self.redis = redis
@coroutine
def get(self):
token=self.request.headers.get("Authorization")
if token is None or not token:
self.write("You are not logged in")
org=self.get_query_argument("org")
response = utility.repos(token, org, self.redis)
jsonData = {
'status': 200,
'message': 'OK',
'payload': response
}
self.write(json.dumps(jsonData))
def write_error(self, status_code, **kwargs):
jsonData = {
'status': int(status_code),
'message': "Internal server error",
'answer': 'NULL'
}
self.write(json.dumps(jsonData))
def options(self):
self.set_status(204)
self.finish()
| true | true |
f7354fcf962cbde596ca7cc862b065cdde1659ef | 47,255 | py | Python | src/olympia/addons/serializers.py | mozilla/addons-server | 42abcfa61032555c6d9ad76a0298134bc7f3b5c1 | [
"BSD-3-Clause"
] | 843 | 2016-02-09T13:00:37.000Z | 2022-03-20T19:17:06.000Z | src/olympia/addons/serializers.py | mozilla/addons-server | 42abcfa61032555c6d9ad76a0298134bc7f3b5c1 | [
"BSD-3-Clause"
] | 10,187 | 2016-02-05T23:51:05.000Z | 2022-03-31T15:24:44.000Z | src/olympia/addons/serializers.py | mozilla/addons-server | 42abcfa61032555c6d9ad76a0298134bc7f3b5c1 | [
"BSD-3-Clause"
] | 551 | 2016-02-08T20:32:16.000Z | 2022-03-15T16:49:24.000Z | import re
from urllib.parse import urlsplit, urlunsplit
from django.http.request import QueryDict
from django.urls import reverse
from rest_framework import exceptions, serializers
import olympia.core.logger
from olympia import activity, amo
from olympia.accounts.serializers import (
BaseUserSerializer,
UserProfileBasketSyncSerializer,
)
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.utils import sorted_groupby
from olympia.api.fields import (
ESTranslationSerializerField,
GetTextTranslationSerializerField,
OutgoingTranslationField,
OutgoingURLField,
ReverseChoiceField,
SplitField,
TranslationSerializerField,
)
from olympia.api.serializers import BaseESSerializer
from olympia.api.utils import is_gate_active
from olympia.applications.models import AppVersion
from olympia.bandwagon.models import Collection
from olympia.blocklist.models import Block
from olympia.constants.applications import APPS, APPS_ALL, APP_IDS
from olympia.constants.base import ADDON_TYPE_CHOICES_API
from olympia.constants.categories import CATEGORIES, CATEGORIES_BY_ID
from olympia.constants.promoted import PROMOTED_GROUPS, RECOMMENDED
from olympia.files.models import File, FileUpload
from olympia.files.utils import parse_addon
from olympia.promoted.models import PromotedAddon
from olympia.search.filters import AddonAppVersionQueryParam
from olympia.ratings.utils import get_grouped_ratings
from olympia.users.models import UserProfile
from olympia.versions.models import (
ApplicationsVersions,
License,
Version,
VersionPreview,
)
from .models import Addon, Preview, ReplacementAddon, attach_tags
class FileSerializer(serializers.ModelSerializer):
url = serializers.SerializerMethodField()
platform = serializers.SerializerMethodField()
status = ReverseChoiceField(choices=list(amo.STATUS_CHOICES_API.items()))
permissions = serializers.ListField(child=serializers.CharField())
optional_permissions = serializers.ListField(child=serializers.CharField())
is_restart_required = serializers.SerializerMethodField()
is_webextension = serializers.SerializerMethodField()
class Meta:
model = File
fields = (
'id',
'created',
'hash',
'is_restart_required',
'is_webextension',
'is_mozilla_signed_extension',
'platform',
'size',
'status',
'url',
'permissions',
'optional_permissions',
)
def get_url(self, obj):
return obj.get_absolute_url()
def to_representation(self, obj):
data = super().to_representation(obj)
request = self.context.get('request', None)
if request and not is_gate_active(request, 'platform-shim'):
data.pop('platform', None)
if request and not is_gate_active(request, 'is-restart-required-shim'):
data.pop('is_restart_required', None)
if request and not is_gate_active(request, 'is-webextension-shim'):
data.pop('is_webextension', None)
return data
def get_platform(self, obj):
# platform is gone, but we need to keep the API backwards compatible so
# fake it by just returning 'all' all the time.
return 'all'
def get_is_restart_required(self, obj):
# is_restart_required is gone from the model and all addons are restartless now
# so fake it for older API clients with False
return False
def get_is_webextension(self, obj):
# is_webextension is always True these days because all addons are webextensions
# but fake it for older API clients.
return True
class PreviewSerializer(serializers.ModelSerializer):
caption = TranslationSerializerField()
image_url = serializers.SerializerMethodField()
thumbnail_url = serializers.SerializerMethodField()
image_size = serializers.ReadOnlyField(source='image_dimensions')
thumbnail_size = serializers.ReadOnlyField(source='thumbnail_dimensions')
class Meta:
# Note: this serializer can also be used for VersionPreview.
model = Preview
fields = (
'id',
'caption',
'image_size',
'image_url',
'thumbnail_size',
'thumbnail_url',
)
def get_image_url(self, obj):
return absolutify(obj.image_url)
def get_thumbnail_url(self, obj):
return absolutify(obj.thumbnail_url)
class ESPreviewSerializer(BaseESSerializer, PreviewSerializer):
# Because we have translated fields and dates coming from ES, we can't use
# a regular PreviewSerializer to handle previews for ESAddonSerializer.
# Unfortunately we also need to get the class right (it can be either
# Preview or VersionPreview) so fake_object() implementation in this class
# does nothing, the instance has already been created by a parent
# serializer.
datetime_fields = ('modified',)
translated_fields = ('caption',)
def fake_object(self, data):
return data
class LicenseNameSerializerField(serializers.Field):
"""Field to handle license name translations.
Builtin licenses, for better or worse, don't necessarily have their name
translated in the database like custom licenses. Instead, the string is in
this repos, and translated using gettext. This field deals with that
difference, delegating the rendering to TranslationSerializerField or
GetTextTranslationSerializerField depending on what the license instance
is.
"""
builtin_translation_field_class = GetTextTranslationSerializerField
custom_translation_field_class = TranslationSerializerField
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.builtin_translation_field = self.builtin_translation_field_class()
self.custom_translation_field = self.custom_translation_field_class()
def bind(self, field_name, parent):
super().bind(field_name, parent)
self.builtin_translation_field.bind(field_name, parent)
self.custom_translation_field.bind(field_name, parent)
def get_attribute(self, obj):
if obj._constant:
return self.builtin_translation_field.get_attribute(obj._constant)
else:
return self.custom_translation_field.get_attribute(obj)
def to_representation(self, obj):
# Like TranslationSerializerField, the bulk of the logic is in
# get_attribute(), we just have to return the data at this point.
return obj
class ESLicenseNameSerializerField(LicenseNameSerializerField):
"""Like LicenseNameSerializerField, but uses the data from ES to avoid
a database query for custom licenses.
BaseESSerializer automatically changes
TranslationSerializerField to ESTranslationSerializerField for all base
fields on the serializer, but License name has its own special field to
handle builtin licences so it's done separately."""
custom_translation_field_class = ESTranslationSerializerField
def attach_translations(self, obj, data, field_name):
return self.custom_translation_field.attach_translations(obj, data, field_name)
class LicenseSerializer(serializers.ModelSerializer):
is_custom = serializers.SerializerMethodField()
name = LicenseNameSerializerField()
text = TranslationSerializerField()
url = serializers.SerializerMethodField()
class Meta:
model = License
fields = ('id', 'is_custom', 'name', 'text', 'url')
def get_is_custom(self, obj):
return not bool(obj.builtin)
def get_url(self, obj):
return obj.url or self.get_version_license_url(obj)
def get_version_license_url(self, obj):
# We need the version associated with the license, because that's where
# the license_url() method lives. The problem is, normally we would not
# be able to do that, because there can be multiple versions for a
# given License. However, since we're serializing through a nested
# serializer, we cheat and use `instance.version_instance` which is
# set by SimpleVersionSerializer.to_representation() while serializing.
# Only get the version license url for non-builtin licenses.
if not obj.builtin and hasattr(obj, 'version_instance'):
return absolutify(obj.version_instance.license_url())
return None
def to_representation(self, instance):
data = super().to_representation(instance)
request = self.context.get('request', None)
if request and is_gate_active(request, 'del-version-license-is-custom'):
data.pop('is_custom', None)
return data
class CompactLicenseSerializer(LicenseSerializer):
class Meta:
model = License
fields = ('id', 'is_custom', 'name', 'url')
class MinimalVersionSerializer(serializers.ModelSerializer):
file = FileSerializer(read_only=True)
class Meta:
model = Version
fields = ('id', 'file', 'reviewed', 'version')
read_only_fields = fields
def to_representation(self, instance):
repr = super().to_representation(instance)
request = self.context.get('request', None)
if 'file' in repr and request and is_gate_active(request, 'version-files'):
# In v3/v4 files is expected to be a list but now we only have one file.
repr['files'] = [repr.pop('file')]
return repr
class VersionCompatabilityField(serializers.Field):
def to_internal_value(self, data):
"""Note: this returns unsaved and incomplete ApplicationsVersions objects that
need to have version set, and may have missing min or max AppVersion instances
for new Version instances. (As intended - we want to be able to partially
specify min or max and have the manifest or defaults be instead used).
"""
try:
if isinstance(data, list):
# if it's a list of apps, normalize into a dict first
data = {key: {} for key in data}
if isinstance(data, dict):
version = self.parent.instance
existing = version.compatible_apps if version else {}
qs = AppVersion.objects
internal = {}
for app_name, min_max in data.items():
app = amo.APPS[app_name]
apps_versions = existing.get(
app, ApplicationsVersions(application=app.id)
)
app_qs = qs.filter(application=app.id)
if 'max' in min_max:
apps_versions.max = app_qs.get(version=min_max['max'])
elif version:
apps_versions.max = app_qs.get(
version=amo.DEFAULT_WEBEXT_MAX_VERSION
)
app_qs = app_qs.exclude(version='*')
if 'min' in min_max:
apps_versions.min = app_qs.get(version=min_max['min'])
elif version:
apps_versions.min = app_qs.get(
version=amo.DEFAULT_WEBEXT_MIN_VERSIONS[app]
)
internal[app] = apps_versions
return internal
else:
# if it's neither it's not a valid input
raise exceptions.ValidationError('Invalid value')
except KeyError:
raise exceptions.ValidationError('Invalid app specified')
except AppVersion.DoesNotExist:
raise exceptions.ValidationError('Unknown app version specified')
def to_representation(self, value):
return {
app.short: (
{
'min': compat.min.version,
'max': compat.max.version,
}
if compat
else {
'min': amo.D2C_MIN_VERSIONS.get(app.id, '1.0'),
'max': amo.FAKE_MAX_VERSION,
}
)
for app, compat in value.items()
}
class SimpleVersionSerializer(MinimalVersionSerializer):
compatibility = VersionCompatabilityField(
# default to just Desktop Firefox; most of the times developers don't develop
# their WebExtensions for Android. See https://bit.ly/2QaMicU
source='compatible_apps',
default=serializers.CreateOnlyDefault(
{
amo.APPS['firefox']: ApplicationsVersions(
application=amo.APPS['firefox'].id
)
}
),
)
edit_url = serializers.SerializerMethodField()
is_strict_compatibility_enabled = serializers.BooleanField(
source='file.strict_compatibility', read_only=True
)
license = CompactLicenseSerializer()
release_notes = TranslationSerializerField(required=False)
class Meta:
model = Version
fields = (
'id',
'compatibility',
'edit_url',
'file',
'is_strict_compatibility_enabled',
'license',
'release_notes',
'reviewed',
'version',
)
read_only_fields = fields
def to_representation(self, instance):
# Help the LicenseSerializer find the version we're currently serializing.
if 'license' in self.fields and instance.license:
instance.license.version_instance = instance
return super().to_representation(instance)
def get_edit_url(self, obj):
return absolutify(
obj.addon.get_dev_url('versions.edit', args=[obj.pk], prefix_only=True)
)
class VersionSerializer(SimpleVersionSerializer):
channel = ReverseChoiceField(
choices=list(amo.CHANNEL_CHOICES_API.items()), read_only=True
)
license = SplitField(
serializers.PrimaryKeyRelatedField(queryset=License.objects.builtins()),
LicenseSerializer(),
)
upload = serializers.SlugRelatedField(
slug_field='uuid', queryset=FileUpload.objects.all(), write_only=True
)
class Meta:
model = Version
fields = (
'id',
'channel',
'compatibility',
'edit_url',
'file',
'is_strict_compatibility_enabled',
'license',
'release_notes',
'reviewed',
'upload',
'version',
)
writeable_fields = (
'compatibility',
'license',
'release_notes',
'upload',
)
read_only_fields = tuple(set(fields) - set(writeable_fields))
def __init__(self, instance=None, data=serializers.empty, **kwargs):
self.addon = kwargs.pop('addon', None)
if instance and isinstance(data, dict):
data.pop('upload', None) # we only support upload field for create
super().__init__(instance=instance, data=data, **kwargs)
def validate_upload(self, value):
own_upload = (request := self.context.get('request')) and (
request.user == value.user
)
if not own_upload or not value.valid or value.validation_timeout:
raise exceptions.ValidationError('Upload is not valid.')
return value
def _check_blocklist(self, guid, version_string):
# check the guid/version isn't in the addon blocklist
block_qs = Block.objects.filter(guid=guid) if guid else ()
if block_qs and block_qs.first().is_version_blocked(version_string):
msg = (
'Version {version} matches {block_link} for this add-on. '
'You can contact {amo_admins} for additional information.'
)
raise exceptions.ValidationError(
msg.format(
version=version_string,
block_link=absolutify(reverse('blocklist.block', args=[guid])),
amo_admins='amo-admins@mozilla.com',
),
)
def validate(self, data):
if not self.instance:
# Parse the file to get and validate package data with the addon.
self.parsed_data = parse_addon(
data.get('upload'), addon=self.addon, user=self.context['request'].user
)
guid = self.addon.guid if self.addon else self.parsed_data.get('guid')
self._check_blocklist(guid, self.parsed_data.get('version'))
else:
data.pop('upload', None) # upload can only be set during create
return data
def create(self, validated_data):
upload = validated_data.get('upload')
parsed_and_validated_data = {
**self.parsed_data,
**validated_data,
'license_id': validated_data['license'].id,
}
version = Version.from_upload(
upload=upload,
addon=self.addon or validated_data.get('addon'),
channel=upload.channel,
compatibility=validated_data.get('compatible_apps'),
parsed_data=parsed_and_validated_data,
)
upload.update(addon=version.addon)
return version
def update(self, instance, validated_data):
instance = super().update(instance, validated_data)
if 'compatible_apps' in validated_data:
instance.set_compatible_apps(validated_data['compatible_apps'])
return instance
class VersionListSerializer(VersionSerializer):
# When we're listing versions, we don't want to include the full license
# text every time: we only do this for the version detail endpoint.
license = CompactLicenseSerializer()
class CurrentVersionSerializer(SimpleVersionSerializer):
def to_representation(self, obj):
# If the add-on is a langpack, and `appversion` is passed, try to
# determine the latest public compatible version and replace the obj
# with the result. Because of the perf impact, only done for langpacks
# in the detail API.
request = self.context.get('request')
view = self.context.get('view')
addon = obj.addon
if (
request
and request.GET.get('appversion')
and getattr(view, 'action', None) == 'retrieve'
and addon.type == amo.ADDON_LPAPP
):
obj = self.get_current_compatible_version(addon)
return super().to_representation(obj)
def get_current_compatible_version(self, addon):
"""
Return latest public version compatible with the app & appversion
passed through the request, or fall back to addon.current_version if
none is found.
Only use on langpacks if the appversion parameter is present.
"""
request = self.context.get('request')
try:
# AddonAppVersionQueryParam.get_values() returns (app_id, min, max)
# but we want {'min': min, 'max': max}.
value = AddonAppVersionQueryParam(request.GET).get_values()
application = value[0]
appversions = dict(zip(('min', 'max'), value[1:]))
except ValueError as exc:
raise exceptions.ParseError(str(exc))
version_qs = Version.objects.latest_public_compatible_with(
application, appversions
).filter(addon=addon)
return version_qs.first() or addon.current_version
class ESCompactLicenseSerializer(BaseESSerializer, CompactLicenseSerializer):
name = ESLicenseNameSerializerField()
translated_fields = ('name',)
def fake_object(self, data):
# We just pass the data as the fake object will have been created
# before by ESAddonSerializer.fake_version_object()
return data
class ESCurrentVersionSerializer(BaseESSerializer, CurrentVersionSerializer):
license = ESCompactLicenseSerializer()
datetime_fields = ('reviewed',)
translated_fields = ('release_notes',)
def fake_object(self, data):
# We just pass the data as the fake object will have been created
# before by ESAddonSerializer.fake_version_object()
return data
class AddonEulaPolicySerializer(serializers.ModelSerializer):
eula = TranslationSerializerField()
privacy_policy = TranslationSerializerField()
class Meta:
model = Addon
fields = (
'eula',
'privacy_policy',
)
class AddonDeveloperSerializer(BaseUserSerializer):
picture_url = serializers.SerializerMethodField()
class Meta(BaseUserSerializer.Meta):
fields = BaseUserSerializer.Meta.fields + ('picture_url',)
read_only_fields = fields
class PromotedAddonSerializer(serializers.ModelSerializer):
GROUP_CHOICES = [(group.id, group.api_name) for group in PROMOTED_GROUPS]
apps = serializers.SerializerMethodField()
category = ReverseChoiceField(choices=GROUP_CHOICES, source='group_id')
class Meta:
model = PromotedAddon
fields = (
'apps',
'category',
)
def get_apps(self, obj):
return [app.short for app in obj.approved_applications]
class CategoriesSerializerField(serializers.Field):
def to_internal_value(self, data):
try:
categories = []
for app_name, category_names in data.items():
app_cats = CATEGORIES[APPS[app_name].id]
# We don't know the addon_type at this point, so try them all and we'll
# drop anything that's wrong later in AddonSerializer.validate
all_cat_slugs = set()
for type_cats in app_cats.values():
categories.extend(
type_cats[name] for name in category_names if name in type_cats
)
all_cat_slugs.update(type_cats.keys())
# Now double-check all the category names were found
if not all_cat_slugs.issuperset(category_names):
raise exceptions.ValidationError('Invalid category name.')
return categories
except KeyError:
raise exceptions.ValidationError('Invalid app name.')
def to_representation(self, value):
grouped = sorted_groupby(
sorted(value),
key=lambda x: getattr(amo.APP_IDS.get(x.application), 'short', ''),
)
return {
app_name: [cat.slug for cat in categories]
for app_name, categories in grouped
}
class ContributionSerializerField(OutgoingURLField):
def to_representation(self, value):
if not value:
# don't add anything when it's not set.
return value
parts = urlsplit(value)
query = QueryDict(parts.query, mutable=True)
query.update(amo.CONTRIBUTE_UTM_PARAMS)
return super().to_representation(
urlunsplit(
(
parts.scheme,
parts.netloc,
parts.path,
query.urlencode(),
parts.fragment,
)
)
)
class AddonSerializer(serializers.ModelSerializer):
authors = AddonDeveloperSerializer(
many=True, source='listed_authors', read_only=True
)
categories = CategoriesSerializerField(source='all_categories')
contributions_url = ContributionSerializerField(
source='contributions', read_only=True
)
current_version = CurrentVersionSerializer(read_only=True)
description = TranslationSerializerField(required=False)
developer_comments = TranslationSerializerField(required=False)
edit_url = serializers.SerializerMethodField()
has_eula = serializers.SerializerMethodField()
has_privacy_policy = serializers.SerializerMethodField()
homepage = OutgoingTranslationField(required=False)
icon_url = serializers.SerializerMethodField()
icons = serializers.SerializerMethodField()
is_source_public = serializers.SerializerMethodField()
is_featured = serializers.SerializerMethodField()
name = TranslationSerializerField(required=False)
previews = PreviewSerializer(many=True, source='current_previews', read_only=True)
promoted = PromotedAddonSerializer(read_only=True)
ratings = serializers.SerializerMethodField()
ratings_url = serializers.SerializerMethodField()
review_url = serializers.SerializerMethodField()
status = ReverseChoiceField(
choices=list(amo.STATUS_CHOICES_API.items()), read_only=True
)
summary = TranslationSerializerField(required=False)
support_email = TranslationSerializerField(required=False)
support_url = OutgoingTranslationField(required=False)
tags = serializers.SerializerMethodField()
type = ReverseChoiceField(
choices=list(amo.ADDON_TYPE_CHOICES_API.items()), read_only=True
)
url = serializers.SerializerMethodField()
version = VersionSerializer(write_only=True)
versions_url = serializers.SerializerMethodField()
class Meta:
model = Addon
fields = (
'id',
'authors',
'average_daily_users',
'categories',
'contributions_url',
'created',
'current_version',
'default_locale',
'description',
'developer_comments',
'edit_url',
'guid',
'has_eula',
'has_privacy_policy',
'homepage',
'icon_url',
'icons',
'is_disabled',
'is_experimental',
'is_featured',
'is_source_public',
'last_updated',
'name',
'previews',
'promoted',
'ratings',
'ratings_url',
'requires_payment',
'review_url',
'slug',
'status',
'summary',
'support_email',
'support_url',
'tags',
'type',
'url',
'version',
'versions_url',
'weekly_downloads',
)
writeable_fields = (
'categories',
'description',
'developer_comments',
'homepage',
'name',
'slug',
'summary',
'support_email',
'support_url',
'version',
)
read_only_fields = tuple(set(fields) - set(writeable_fields))
def __init__(self, instance=None, data=serializers.empty, **kwargs):
if instance and isinstance(data, dict):
data.pop('version', None) # we only support version field for create
super().__init__(instance=instance, data=data, **kwargs)
def to_representation(self, obj):
data = super().to_representation(obj)
request = self.context.get('request', None)
if request and is_gate_active(request, 'del-addons-created-field'):
data.pop('created', None)
if request and not is_gate_active(request, 'is-source-public-shim'):
data.pop('is_source_public', None)
if request and not is_gate_active(request, 'is-featured-addon-shim'):
data.pop('is_featured', None)
return data
def get_has_eula(self, obj):
return bool(getattr(obj, 'has_eula', obj.eula))
def get_is_featured(self, obj):
# featured is gone, but we need to keep the API backwards compatible so
# fake it with promoted status instead.
return bool(obj.promoted and obj.promoted.group == RECOMMENDED)
def get_has_privacy_policy(self, obj):
return bool(getattr(obj, 'has_privacy_policy', obj.privacy_policy))
def get_tags(self, obj):
if not hasattr(obj, 'tag_list'):
attach_tags([obj])
# attach_tags() might not have attached anything to the addon, if it
# had no tags.
return getattr(obj, 'tag_list', [])
def get_url(self, obj):
# Use absolutify(get_detail_url()), get_absolute_url() calls
# get_url_path() which does an extra check on current_version that is
# annoying in subclasses which don't want to load that version.
return absolutify(obj.get_detail_url())
def get_edit_url(self, obj):
return absolutify(obj.get_dev_url())
def get_ratings_url(self, obj):
return absolutify(obj.ratings_url)
def get_versions_url(self, obj):
return absolutify(obj.versions_url)
def get_review_url(self, obj):
return absolutify(reverse('reviewers.review', args=[obj.pk]))
def get_icon_url(self, obj):
return absolutify(obj.get_icon_url(64))
def get_icons(self, obj):
get_icon = obj.get_icon_url
return {str(size): absolutify(get_icon(size)) for size in amo.ADDON_ICON_SIZES}
def get_ratings(self, obj):
ratings = {
'average': obj.average_rating,
'bayesian_average': obj.bayesian_rating,
'count': obj.total_ratings,
'text_count': obj.text_ratings_count,
}
if (request := self.context.get('request', None)) and (
grouped := get_grouped_ratings(request, obj)
):
ratings['grouped_counts'] = grouped
return ratings
def get_is_source_public(self, obj):
return False
def validate(self, data):
if not self.instance:
addon_type = self.fields['version'].parsed_data['type']
else:
addon_type = self.instance.type
if 'all_categories' in data:
# filter out categories for the wrong type.
# There might be dupes, e.g. "other" is a category for 2 types
slugs = {cat.slug for cat in data['all_categories']}
data['all_categories'] = [
cat for cat in data['all_categories'] if cat.type == addon_type
]
# double check we didn't lose any
if slugs != {cat.slug for cat in data['all_categories']}:
raise exceptions.ValidationError(
{'categories': 'Invalid category name.'}
)
return data
def create(self, validated_data):
upload = validated_data.get('version').get('upload')
addon = Addon.initialize_addon_from_upload(
data={**self.fields['version'].parsed_data, **validated_data},
upload=upload,
channel=upload.channel,
user=self.context['request'].user,
)
# Add categories
addon.set_categories(validated_data.get('all_categories', []))
self.fields['version'].create(
{**validated_data.get('version', {}), 'addon': addon}
)
activity.log_create(amo.LOG.CREATE_ADDON, addon)
olympia.core.logger.getLogger('z.addons').info(
f'New addon {addon!r} from {upload!r}'
)
if (
addon.status == amo.STATUS_NULL
and addon.has_complete_metadata()
and upload.channel == amo.RELEASE_CHANNEL_LISTED
):
addon.update(status=amo.STATUS_NOMINATED)
return addon
def update(self, instance, validated_data):
instance = super().update(instance, validated_data)
if 'all_categories' in validated_data:
del instance.all_categories # super.update will have set it.
instance.set_categories(validated_data['all_categories'])
return instance
class AddonSerializerWithUnlistedData(AddonSerializer):
latest_unlisted_version = SimpleVersionSerializer(read_only=True)
class Meta:
model = Addon
fields = AddonSerializer.Meta.fields + ('latest_unlisted_version',)
read_only_fields = tuple(
set(fields) - set(AddonSerializer.Meta.writeable_fields)
)
class SimpleAddonSerializer(AddonSerializer):
class Meta:
model = Addon
fields = ('id', 'slug', 'name', 'icon_url')
class ESAddonSerializer(BaseESSerializer, AddonSerializer):
# Override various fields for related objects which we don't want to expose
# data the same way than the regular serializer does (usually because we
# some of the data is not indexed in ES).
authors = BaseUserSerializer(many=True, source='listed_authors')
current_version = ESCurrentVersionSerializer()
previews = ESPreviewSerializer(many=True, source='current_previews')
_score = serializers.SerializerMethodField()
datetime_fields = ('created', 'last_updated', 'modified')
translated_fields = (
'name',
'description',
'developer_comments',
'homepage',
'summary',
'support_email',
'support_url',
)
class Meta:
model = Addon
fields = AddonSerializer.Meta.fields + ('_score',)
def fake_preview_object(self, obj, data, model_class=Preview):
# This is what ESPreviewSerializer.fake_object() would do, but we do
# it here and make that fake_object() method a no-op in order to have
# access to the right model_class to use - VersionPreview for static
# themes, Preview for the rest.
preview = model_class(id=data['id'], sizes=data.get('sizes', {}))
preview.addon = obj
preview.version = obj.current_version
preview_serializer = self.fields['previews'].child
# Attach base attributes that have the same name/format in ES and in
# the model.
preview_serializer._attach_fields(preview, data, ('modified',))
# Attach translations.
preview_serializer._attach_translations(
preview, data, preview_serializer.translated_fields
)
return preview
def fake_file_object(self, obj, data):
file_ = File(
id=data['id'],
created=self.handle_date(data['created']),
hash=data['hash'],
filename=data['filename'],
is_mozilla_signed_extension=data.get('is_mozilla_signed_extension'),
size=data['size'],
status=data['status'],
strict_compatibility=data.get('strict_compatibility', False),
version=obj,
)
file_.permissions = data.get(
'permissions', data.get('webext_permissions_list', [])
)
file_.optional_permissions = data.get('optional_permissions', [])
return file_
def fake_version_object(self, obj, data, channel):
if data:
version = Version(
addon=obj,
id=data['id'],
reviewed=self.handle_date(data['reviewed']),
version=data['version'],
channel=channel,
)
version.file = self.fake_file_object(version, data['files'][0])
# In ES we store integers for the appversion info, we need to
# convert it back to strings.
compatible_apps = {}
for app_id, compat_dict in data.get('compatible_apps', {}).items():
app_name = APPS_ALL[int(app_id)]
compatible_apps[app_name] = ApplicationsVersions(
min=AppVersion(version=compat_dict.get('min_human', '')),
max=AppVersion(version=compat_dict.get('max_human', '')),
)
version.compatible_apps = compatible_apps
version_serializer = self.fields.get('current_version') or None
if version_serializer:
version_serializer._attach_translations(
version, data, version_serializer.translated_fields
)
if 'license' in data and version_serializer:
license_serializer = version_serializer.fields['license']
version.license = License(id=data['license']['id'])
license_serializer._attach_fields(
version.license, data['license'], ('builtin', 'url')
)
license_serializer._attach_translations(
version.license, data['license'], ('name',)
)
else:
version.license = None
else:
version = None
return version
def fake_object(self, data):
"""Create a fake instance of Addon and related models from ES data."""
obj = Addon(id=data['id'], slug=data['slug'])
# Attach base attributes that have the same name/format in ES and in
# the model.
self._attach_fields(
obj,
data,
(
'average_daily_users',
'bayesian_rating',
'contributions',
'created',
'default_locale',
'guid',
'has_eula',
'has_privacy_policy',
'hotness',
'icon_hash',
'icon_type',
'is_experimental',
'last_updated',
'modified',
'requires_payment',
'slug',
'status',
'type',
'weekly_downloads',
),
)
# Attach attributes that do not have the same name/format in ES.
obj.tag_list = data.get('tags', [])
obj.all_categories = [
CATEGORIES_BY_ID[cat_id] for cat_id in data.get('category', [])
]
# Not entirely accurate, but enough in the context of the search API.
obj.disabled_by_user = data.get('is_disabled', False)
# Attach translations (they require special treatment).
self._attach_translations(obj, data, self.translated_fields)
# Attach related models (also faking them). `current_version` is a
# property we can't write to, so we use the underlying field which
# begins with an underscore.
data_version = data.get('current_version') or {}
obj._current_version = self.fake_version_object(
obj, data_version, amo.RELEASE_CHANNEL_LISTED
)
obj._current_version_id = data_version.get('id')
data_authors = data.get('listed_authors', [])
obj.listed_authors = [
UserProfile(
id=data_author['id'],
display_name=data_author['name'],
username=data_author['username'],
is_public=data_author.get('is_public', False),
)
for data_author in data_authors
]
is_static_theme = data.get('type') == amo.ADDON_STATICTHEME
preview_model_class = VersionPreview if is_static_theme else Preview
obj.current_previews = [
self.fake_preview_object(obj, preview_data, model_class=preview_model_class)
for preview_data in data.get('previews', [])
]
promoted = data.get('promoted', None)
if promoted:
# set .approved_for_groups cached_property because it's used in
# .approved_applications.
approved_for_apps = promoted.get('approved_for_apps')
obj.promoted = PromotedAddon(
addon=obj,
approved_application_ids=approved_for_apps,
group_id=promoted['group_id'],
)
# we can safely regenerate these tuples because
# .appproved_applications only cares about the current group
obj._current_version.approved_for_groups = (
(obj.promoted.group, APP_IDS.get(app_id))
for app_id in approved_for_apps
)
else:
obj.promoted = None
ratings = data.get('ratings', {})
obj.average_rating = ratings.get('average')
obj.total_ratings = ratings.get('count')
obj.text_ratings_count = ratings.get('text_count')
return obj
def get__score(self, obj):
# es_meta is added by BaseESSerializer.to_representation() before DRF's
# to_representation() is called, so it's present on all objects.
return obj._es_meta['score']
def get_ratings(self, obj):
return {
'average': obj.average_rating,
'bayesian_average': obj.bayesian_rating,
'count': obj.total_ratings,
'text_count': obj.text_ratings_count,
}
def to_representation(self, obj):
data = super().to_representation(obj)
request = self.context.get('request')
if (
request
and '_score' in data
and not is_gate_active(request, 'addons-search-_score-field')
):
data.pop('_score')
return data
class ESAddonAutoCompleteSerializer(ESAddonSerializer):
class Meta(ESAddonSerializer.Meta):
fields = ('id', 'icon_url', 'name', 'promoted', 'type', 'url')
model = Addon
def get_url(self, obj):
# Addon.get_absolute_url() calls get_url_path(), which wants
# _current_version_id to exist, but that's just a safeguard. We don't
# care and don't want to fetch the current version field to improve
# perf, so give it a fake one.
obj._current_version_id = 1
return obj.get_absolute_url()
class StaticCategorySerializer(serializers.Serializer):
"""Serializes a `StaticCategory` as found in constants.categories"""
id = serializers.IntegerField()
name = serializers.CharField()
slug = serializers.CharField()
application = serializers.SerializerMethodField()
misc = serializers.BooleanField()
type = serializers.SerializerMethodField()
weight = serializers.IntegerField()
description = serializers.CharField()
def get_application(self, obj):
return APPS_ALL[obj.application].short
def get_type(self, obj):
return ADDON_TYPE_CHOICES_API[obj.type]
class LanguageToolsSerializer(AddonSerializer):
target_locale = serializers.CharField()
current_compatible_version = serializers.SerializerMethodField()
class Meta:
model = Addon
fields = (
'id',
'current_compatible_version',
'default_locale',
'guid',
'name',
'slug',
'target_locale',
'type',
'url',
)
def get_current_compatible_version(self, obj):
compatible_versions = getattr(obj, 'compatible_versions', None)
if compatible_versions is not None:
data = MinimalVersionSerializer(
compatible_versions, context=self.context, many=True
).data
try:
# 99% of the cases there will only be one result, since most
# language packs are automatically uploaded for a given app
# version. If there are more, pick the most recent one.
return data[0]
except IndexError:
# This should not happen, because the queryset in the view is
# supposed to filter results to only return add-ons that do
# have at least one compatible version, but let's not fail
# too loudly if the unthinkable happens...
pass
return None
def to_representation(self, obj):
data = super().to_representation(obj)
request = self.context['request']
if (
AddonAppVersionQueryParam.query_param not in request.GET
and 'current_compatible_version' in data
):
data.pop('current_compatible_version')
if request and is_gate_active(request, 'addons-locale_disambiguation-shim'):
data['locale_disambiguation'] = None
return data
class VersionBasketSerializer(SimpleVersionSerializer):
class Meta:
model = Version
fields = ('id', 'compatibility', 'is_strict_compatibility_enabled', 'version')
class AddonBasketSyncSerializer(AddonSerializerWithUnlistedData):
# We want to send all authors to basket, not just listed ones, and have
# the full basket-specific serialization.
authors = UserProfileBasketSyncSerializer(many=True)
current_version = VersionBasketSerializer()
is_recommended = serializers.SerializerMethodField()
latest_unlisted_version = VersionBasketSerializer()
name = serializers.SerializerMethodField()
class Meta:
model = Addon
fields = (
'authors',
'average_daily_users',
'categories',
'current_version',
'default_locale',
'guid',
'id',
'is_disabled',
'is_recommended',
'last_updated',
'latest_unlisted_version',
'name',
'ratings',
'slug',
'status',
'type',
)
read_only_fields = fields
def get_name(self, obj):
# Basket doesn't want translations, we run the serialization task under
# the add-on default locale so we can just return the name as string.
return str(obj.name)
def get_is_recommended(self, obj):
# Borrow the logic from is_featured so we don't have to define it twice
return self.get_is_featured(obj)
class ReplacementAddonSerializer(serializers.ModelSerializer):
replacement = serializers.SerializerMethodField()
ADDON_PATH_REGEX = r"""/addon/(?P<addon_id>[^/<>"']+)/$"""
COLLECTION_PATH_REGEX = (
r"""/collections/(?P<user_id>[^/<>"']+)/(?P<coll_slug>[^/]+)/$"""
)
class Meta:
model = ReplacementAddon
fields = ('guid', 'replacement')
def _get_addon_guid(self, addon_id):
try:
addon = Addon.objects.public().id_or_slug(addon_id).get()
except Addon.DoesNotExist:
return []
return [addon.guid]
def _get_collection_guids(self, user_id, collection_slug):
try:
get_args = {'slug': collection_slug, 'listed': True}
if isinstance(user_id, str) and not user_id.isdigit():
get_args.update(**{'author__username': user_id})
else:
get_args.update(**{'author': user_id})
collection = Collection.objects.get(**get_args)
except Collection.DoesNotExist:
return []
valid_q = Addon.objects.get_queryset().valid_q([amo.STATUS_APPROVED])
return list(collection.addons.filter(valid_q).values_list('guid', flat=True))
def get_replacement(self, obj):
if obj.has_external_url():
# It's an external url so no guids.
return []
addon_match = re.search(self.ADDON_PATH_REGEX, obj.path)
if addon_match:
return self._get_addon_guid(addon_match.group('addon_id'))
coll_match = re.search(self.COLLECTION_PATH_REGEX, obj.path)
if coll_match:
return self._get_collection_guids(
coll_match.group('user_id'), coll_match.group('coll_slug')
)
return []
| 37.033699 | 88 | 0.623257 | import re
from urllib.parse import urlsplit, urlunsplit
from django.http.request import QueryDict
from django.urls import reverse
from rest_framework import exceptions, serializers
import olympia.core.logger
from olympia import activity, amo
from olympia.accounts.serializers import (
BaseUserSerializer,
UserProfileBasketSyncSerializer,
)
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.utils import sorted_groupby
from olympia.api.fields import (
ESTranslationSerializerField,
GetTextTranslationSerializerField,
OutgoingTranslationField,
OutgoingURLField,
ReverseChoiceField,
SplitField,
TranslationSerializerField,
)
from olympia.api.serializers import BaseESSerializer
from olympia.api.utils import is_gate_active
from olympia.applications.models import AppVersion
from olympia.bandwagon.models import Collection
from olympia.blocklist.models import Block
from olympia.constants.applications import APPS, APPS_ALL, APP_IDS
from olympia.constants.base import ADDON_TYPE_CHOICES_API
from olympia.constants.categories import CATEGORIES, CATEGORIES_BY_ID
from olympia.constants.promoted import PROMOTED_GROUPS, RECOMMENDED
from olympia.files.models import File, FileUpload
from olympia.files.utils import parse_addon
from olympia.promoted.models import PromotedAddon
from olympia.search.filters import AddonAppVersionQueryParam
from olympia.ratings.utils import get_grouped_ratings
from olympia.users.models import UserProfile
from olympia.versions.models import (
ApplicationsVersions,
License,
Version,
VersionPreview,
)
from .models import Addon, Preview, ReplacementAddon, attach_tags
class FileSerializer(serializers.ModelSerializer):
url = serializers.SerializerMethodField()
platform = serializers.SerializerMethodField()
status = ReverseChoiceField(choices=list(amo.STATUS_CHOICES_API.items()))
permissions = serializers.ListField(child=serializers.CharField())
optional_permissions = serializers.ListField(child=serializers.CharField())
is_restart_required = serializers.SerializerMethodField()
is_webextension = serializers.SerializerMethodField()
class Meta:
model = File
fields = (
'id',
'created',
'hash',
'is_restart_required',
'is_webextension',
'is_mozilla_signed_extension',
'platform',
'size',
'status',
'url',
'permissions',
'optional_permissions',
)
def get_url(self, obj):
return obj.get_absolute_url()
def to_representation(self, obj):
data = super().to_representation(obj)
request = self.context.get('request', None)
if request and not is_gate_active(request, 'platform-shim'):
data.pop('platform', None)
if request and not is_gate_active(request, 'is-restart-required-shim'):
data.pop('is_restart_required', None)
if request and not is_gate_active(request, 'is-webextension-shim'):
data.pop('is_webextension', None)
return data
def get_platform(self, obj):
return 'all'
def get_is_restart_required(self, obj):
return False
def get_is_webextension(self, obj):
return True
class PreviewSerializer(serializers.ModelSerializer):
caption = TranslationSerializerField()
image_url = serializers.SerializerMethodField()
thumbnail_url = serializers.SerializerMethodField()
image_size = serializers.ReadOnlyField(source='image_dimensions')
thumbnail_size = serializers.ReadOnlyField(source='thumbnail_dimensions')
class Meta:
model = Preview
fields = (
'id',
'caption',
'image_size',
'image_url',
'thumbnail_size',
'thumbnail_url',
)
def get_image_url(self, obj):
return absolutify(obj.image_url)
def get_thumbnail_url(self, obj):
return absolutify(obj.thumbnail_url)
class ESPreviewSerializer(BaseESSerializer, PreviewSerializer):
# a regular PreviewSerializer to handle previews for ESAddonSerializer.
# Unfortunately we also need to get the class right (it can be either
# Preview or VersionPreview) so fake_object() implementation in this class
# does nothing, the instance has already been created by a parent
# serializer.
datetime_fields = ('modified',)
translated_fields = ('caption',)
def fake_object(self, data):
return data
class LicenseNameSerializerField(serializers.Field):
builtin_translation_field_class = GetTextTranslationSerializerField
custom_translation_field_class = TranslationSerializerField
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.builtin_translation_field = self.builtin_translation_field_class()
self.custom_translation_field = self.custom_translation_field_class()
def bind(self, field_name, parent):
super().bind(field_name, parent)
self.builtin_translation_field.bind(field_name, parent)
self.custom_translation_field.bind(field_name, parent)
def get_attribute(self, obj):
if obj._constant:
return self.builtin_translation_field.get_attribute(obj._constant)
else:
return self.custom_translation_field.get_attribute(obj)
def to_representation(self, obj):
# Like TranslationSerializerField, the bulk of the logic is in
# get_attribute(), we just have to return the data at this point.
return obj
class ESLicenseNameSerializerField(LicenseNameSerializerField):
custom_translation_field_class = ESTranslationSerializerField
def attach_translations(self, obj, data, field_name):
return self.custom_translation_field.attach_translations(obj, data, field_name)
class LicenseSerializer(serializers.ModelSerializer):
is_custom = serializers.SerializerMethodField()
name = LicenseNameSerializerField()
text = TranslationSerializerField()
url = serializers.SerializerMethodField()
class Meta:
model = License
fields = ('id', 'is_custom', 'name', 'text', 'url')
def get_is_custom(self, obj):
return not bool(obj.builtin)
def get_url(self, obj):
return obj.url or self.get_version_license_url(obj)
def get_version_license_url(self, obj):
# We need the version associated with the license, because that's where
# serializer, we cheat and use `instance.version_instance` which is
# set by SimpleVersionSerializer.to_representation() while serializing.
# Only get the version license url for non-builtin licenses.
if not obj.builtin and hasattr(obj, 'version_instance'):
return absolutify(obj.version_instance.license_url())
return None
def to_representation(self, instance):
data = super().to_representation(instance)
request = self.context.get('request', None)
if request and is_gate_active(request, 'del-version-license-is-custom'):
data.pop('is_custom', None)
return data
class CompactLicenseSerializer(LicenseSerializer):
class Meta:
model = License
fields = ('id', 'is_custom', 'name', 'url')
class MinimalVersionSerializer(serializers.ModelSerializer):
file = FileSerializer(read_only=True)
class Meta:
model = Version
fields = ('id', 'file', 'reviewed', 'version')
read_only_fields = fields
def to_representation(self, instance):
repr = super().to_representation(instance)
request = self.context.get('request', None)
if 'file' in repr and request and is_gate_active(request, 'version-files'):
# In v3/v4 files is expected to be a list but now we only have one file.
repr['files'] = [repr.pop('file')]
return repr
class VersionCompatabilityField(serializers.Field):
def to_internal_value(self, data):
try:
if isinstance(data, list):
# if it's a list of apps, normalize into a dict first
data = {key: {} for key in data}
if isinstance(data, dict):
version = self.parent.instance
existing = version.compatible_apps if version else {}
qs = AppVersion.objects
internal = {}
for app_name, min_max in data.items():
app = amo.APPS[app_name]
apps_versions = existing.get(
app, ApplicationsVersions(application=app.id)
)
app_qs = qs.filter(application=app.id)
if 'max' in min_max:
apps_versions.max = app_qs.get(version=min_max['max'])
elif version:
apps_versions.max = app_qs.get(
version=amo.DEFAULT_WEBEXT_MAX_VERSION
)
app_qs = app_qs.exclude(version='*')
if 'min' in min_max:
apps_versions.min = app_qs.get(version=min_max['min'])
elif version:
apps_versions.min = app_qs.get(
version=amo.DEFAULT_WEBEXT_MIN_VERSIONS[app]
)
internal[app] = apps_versions
return internal
else:
raise exceptions.ValidationError('Invalid value')
except KeyError:
raise exceptions.ValidationError('Invalid app specified')
except AppVersion.DoesNotExist:
raise exceptions.ValidationError('Unknown app version specified')
def to_representation(self, value):
return {
app.short: (
{
'min': compat.min.version,
'max': compat.max.version,
}
if compat
else {
'min': amo.D2C_MIN_VERSIONS.get(app.id, '1.0'),
'max': amo.FAKE_MAX_VERSION,
}
)
for app, compat in value.items()
}
class SimpleVersionSerializer(MinimalVersionSerializer):
compatibility = VersionCompatabilityField(
# their WebExtensions for Android. See https://bit.ly/2QaMicU
source='compatible_apps',
default=serializers.CreateOnlyDefault(
{
amo.APPS['firefox']: ApplicationsVersions(
application=amo.APPS['firefox'].id
)
}
),
)
edit_url = serializers.SerializerMethodField()
is_strict_compatibility_enabled = serializers.BooleanField(
source='file.strict_compatibility', read_only=True
)
license = CompactLicenseSerializer()
release_notes = TranslationSerializerField(required=False)
class Meta:
model = Version
fields = (
'id',
'compatibility',
'edit_url',
'file',
'is_strict_compatibility_enabled',
'license',
'release_notes',
'reviewed',
'version',
)
read_only_fields = fields
def to_representation(self, instance):
# Help the LicenseSerializer find the version we're currently serializing.
if 'license' in self.fields and instance.license:
instance.license.version_instance = instance
return super().to_representation(instance)
def get_edit_url(self, obj):
return absolutify(
obj.addon.get_dev_url('versions.edit', args=[obj.pk], prefix_only=True)
)
class VersionSerializer(SimpleVersionSerializer):
channel = ReverseChoiceField(
choices=list(amo.CHANNEL_CHOICES_API.items()), read_only=True
)
license = SplitField(
serializers.PrimaryKeyRelatedField(queryset=License.objects.builtins()),
LicenseSerializer(),
)
upload = serializers.SlugRelatedField(
slug_field='uuid', queryset=FileUpload.objects.all(), write_only=True
)
class Meta:
model = Version
fields = (
'id',
'channel',
'compatibility',
'edit_url',
'file',
'is_strict_compatibility_enabled',
'license',
'release_notes',
'reviewed',
'upload',
'version',
)
writeable_fields = (
'compatibility',
'license',
'release_notes',
'upload',
)
read_only_fields = tuple(set(fields) - set(writeable_fields))
def __init__(self, instance=None, data=serializers.empty, **kwargs):
self.addon = kwargs.pop('addon', None)
if instance and isinstance(data, dict):
data.pop('upload', None)
super().__init__(instance=instance, data=data, **kwargs)
def validate_upload(self, value):
own_upload = (request := self.context.get('request')) and (
request.user == value.user
)
if not own_upload or not value.valid or value.validation_timeout:
raise exceptions.ValidationError('Upload is not valid.')
return value
def _check_blocklist(self, guid, version_string):
block_qs = Block.objects.filter(guid=guid) if guid else ()
if block_qs and block_qs.first().is_version_blocked(version_string):
msg = (
'Version {version} matches {block_link} for this add-on. '
'You can contact {amo_admins} for additional information.'
)
raise exceptions.ValidationError(
msg.format(
version=version_string,
block_link=absolutify(reverse('blocklist.block', args=[guid])),
amo_admins='amo-admins@mozilla.com',
),
)
def validate(self, data):
if not self.instance:
# Parse the file to get and validate package data with the addon.
self.parsed_data = parse_addon(
data.get('upload'), addon=self.addon, user=self.context['request'].user
)
guid = self.addon.guid if self.addon else self.parsed_data.get('guid')
self._check_blocklist(guid, self.parsed_data.get('version'))
else:
data.pop('upload', None) # upload can only be set during create
return data
def create(self, validated_data):
upload = validated_data.get('upload')
parsed_and_validated_data = {
**self.parsed_data,
**validated_data,
'license_id': validated_data['license'].id,
}
version = Version.from_upload(
upload=upload,
addon=self.addon or validated_data.get('addon'),
channel=upload.channel,
compatibility=validated_data.get('compatible_apps'),
parsed_data=parsed_and_validated_data,
)
upload.update(addon=version.addon)
return version
def update(self, instance, validated_data):
instance = super().update(instance, validated_data)
if 'compatible_apps' in validated_data:
instance.set_compatible_apps(validated_data['compatible_apps'])
return instance
class VersionListSerializer(VersionSerializer):
# When we're listing versions, we don't want to include the full license
# text every time: we only do this for the version detail endpoint.
license = CompactLicenseSerializer()
class CurrentVersionSerializer(SimpleVersionSerializer):
def to_representation(self, obj):
# If the add-on is a langpack, and `appversion` is passed, try to
# determine the latest public compatible version and replace the obj
# with the result. Because of the perf impact, only done for langpacks
# in the detail API.
request = self.context.get('request')
view = self.context.get('view')
addon = obj.addon
if (
request
and request.GET.get('appversion')
and getattr(view, 'action', None) == 'retrieve'
and addon.type == amo.ADDON_LPAPP
):
obj = self.get_current_compatible_version(addon)
return super().to_representation(obj)
def get_current_compatible_version(self, addon):
request = self.context.get('request')
try:
# AddonAppVersionQueryParam.get_values() returns (app_id, min, max)
# but we want {'min': min, 'max': max}.
value = AddonAppVersionQueryParam(request.GET).get_values()
application = value[0]
appversions = dict(zip(('min', 'max'), value[1:]))
except ValueError as exc:
raise exceptions.ParseError(str(exc))
version_qs = Version.objects.latest_public_compatible_with(
application, appversions
).filter(addon=addon)
return version_qs.first() or addon.current_version
class ESCompactLicenseSerializer(BaseESSerializer, CompactLicenseSerializer):
name = ESLicenseNameSerializerField()
translated_fields = ('name',)
def fake_object(self, data):
# We just pass the data as the fake object will have been created
# before by ESAddonSerializer.fake_version_object()
return data
class ESCurrentVersionSerializer(BaseESSerializer, CurrentVersionSerializer):
license = ESCompactLicenseSerializer()
datetime_fields = ('reviewed',)
translated_fields = ('release_notes',)
def fake_object(self, data):
# We just pass the data as the fake object will have been created
# before by ESAddonSerializer.fake_version_object()
return data
class AddonEulaPolicySerializer(serializers.ModelSerializer):
eula = TranslationSerializerField()
privacy_policy = TranslationSerializerField()
class Meta:
model = Addon
fields = (
'eula',
'privacy_policy',
)
class AddonDeveloperSerializer(BaseUserSerializer):
picture_url = serializers.SerializerMethodField()
class Meta(BaseUserSerializer.Meta):
fields = BaseUserSerializer.Meta.fields + ('picture_url',)
read_only_fields = fields
class PromotedAddonSerializer(serializers.ModelSerializer):
GROUP_CHOICES = [(group.id, group.api_name) for group in PROMOTED_GROUPS]
apps = serializers.SerializerMethodField()
category = ReverseChoiceField(choices=GROUP_CHOICES, source='group_id')
class Meta:
model = PromotedAddon
fields = (
'apps',
'category',
)
def get_apps(self, obj):
return [app.short for app in obj.approved_applications]
class CategoriesSerializerField(serializers.Field):
def to_internal_value(self, data):
try:
categories = []
for app_name, category_names in data.items():
app_cats = CATEGORIES[APPS[app_name].id]
# We don't know the addon_type at this point, so try them all and we'll
# drop anything that's wrong later in AddonSerializer.validate
all_cat_slugs = set()
for type_cats in app_cats.values():
categories.extend(
type_cats[name] for name in category_names if name in type_cats
)
all_cat_slugs.update(type_cats.keys())
if not all_cat_slugs.issuperset(category_names):
raise exceptions.ValidationError('Invalid category name.')
return categories
except KeyError:
raise exceptions.ValidationError('Invalid app name.')
def to_representation(self, value):
grouped = sorted_groupby(
sorted(value),
key=lambda x: getattr(amo.APP_IDS.get(x.application), 'short', ''),
)
return {
app_name: [cat.slug for cat in categories]
for app_name, categories in grouped
}
class ContributionSerializerField(OutgoingURLField):
def to_representation(self, value):
if not value:
return value
parts = urlsplit(value)
query = QueryDict(parts.query, mutable=True)
query.update(amo.CONTRIBUTE_UTM_PARAMS)
return super().to_representation(
urlunsplit(
(
parts.scheme,
parts.netloc,
parts.path,
query.urlencode(),
parts.fragment,
)
)
)
class AddonSerializer(serializers.ModelSerializer):
authors = AddonDeveloperSerializer(
many=True, source='listed_authors', read_only=True
)
categories = CategoriesSerializerField(source='all_categories')
contributions_url = ContributionSerializerField(
source='contributions', read_only=True
)
current_version = CurrentVersionSerializer(read_only=True)
description = TranslationSerializerField(required=False)
developer_comments = TranslationSerializerField(required=False)
edit_url = serializers.SerializerMethodField()
has_eula = serializers.SerializerMethodField()
has_privacy_policy = serializers.SerializerMethodField()
homepage = OutgoingTranslationField(required=False)
icon_url = serializers.SerializerMethodField()
icons = serializers.SerializerMethodField()
is_source_public = serializers.SerializerMethodField()
is_featured = serializers.SerializerMethodField()
name = TranslationSerializerField(required=False)
previews = PreviewSerializer(many=True, source='current_previews', read_only=True)
promoted = PromotedAddonSerializer(read_only=True)
ratings = serializers.SerializerMethodField()
ratings_url = serializers.SerializerMethodField()
review_url = serializers.SerializerMethodField()
status = ReverseChoiceField(
choices=list(amo.STATUS_CHOICES_API.items()), read_only=True
)
summary = TranslationSerializerField(required=False)
support_email = TranslationSerializerField(required=False)
support_url = OutgoingTranslationField(required=False)
tags = serializers.SerializerMethodField()
type = ReverseChoiceField(
choices=list(amo.ADDON_TYPE_CHOICES_API.items()), read_only=True
)
url = serializers.SerializerMethodField()
version = VersionSerializer(write_only=True)
versions_url = serializers.SerializerMethodField()
class Meta:
model = Addon
fields = (
'id',
'authors',
'average_daily_users',
'categories',
'contributions_url',
'created',
'current_version',
'default_locale',
'description',
'developer_comments',
'edit_url',
'guid',
'has_eula',
'has_privacy_policy',
'homepage',
'icon_url',
'icons',
'is_disabled',
'is_experimental',
'is_featured',
'is_source_public',
'last_updated',
'name',
'previews',
'promoted',
'ratings',
'ratings_url',
'requires_payment',
'review_url',
'slug',
'status',
'summary',
'support_email',
'support_url',
'tags',
'type',
'url',
'version',
'versions_url',
'weekly_downloads',
)
writeable_fields = (
'categories',
'description',
'developer_comments',
'homepage',
'name',
'slug',
'summary',
'support_email',
'support_url',
'version',
)
read_only_fields = tuple(set(fields) - set(writeable_fields))
def __init__(self, instance=None, data=serializers.empty, **kwargs):
if instance and isinstance(data, dict):
data.pop('version', None)
super().__init__(instance=instance, data=data, **kwargs)
def to_representation(self, obj):
data = super().to_representation(obj)
request = self.context.get('request', None)
if request and is_gate_active(request, 'del-addons-created-field'):
data.pop('created', None)
if request and not is_gate_active(request, 'is-source-public-shim'):
data.pop('is_source_public', None)
if request and not is_gate_active(request, 'is-featured-addon-shim'):
data.pop('is_featured', None)
return data
def get_has_eula(self, obj):
return bool(getattr(obj, 'has_eula', obj.eula))
def get_is_featured(self, obj):
return bool(obj.promoted and obj.promoted.group == RECOMMENDED)
def get_has_privacy_policy(self, obj):
return bool(getattr(obj, 'has_privacy_policy', obj.privacy_policy))
def get_tags(self, obj):
if not hasattr(obj, 'tag_list'):
attach_tags([obj])
return getattr(obj, 'tag_list', [])
def get_url(self, obj):
return absolutify(obj.get_detail_url())
def get_edit_url(self, obj):
return absolutify(obj.get_dev_url())
def get_ratings_url(self, obj):
return absolutify(obj.ratings_url)
def get_versions_url(self, obj):
return absolutify(obj.versions_url)
def get_review_url(self, obj):
return absolutify(reverse('reviewers.review', args=[obj.pk]))
def get_icon_url(self, obj):
return absolutify(obj.get_icon_url(64))
def get_icons(self, obj):
get_icon = obj.get_icon_url
return {str(size): absolutify(get_icon(size)) for size in amo.ADDON_ICON_SIZES}
def get_ratings(self, obj):
ratings = {
'average': obj.average_rating,
'bayesian_average': obj.bayesian_rating,
'count': obj.total_ratings,
'text_count': obj.text_ratings_count,
}
if (request := self.context.get('request', None)) and (
grouped := get_grouped_ratings(request, obj)
):
ratings['grouped_counts'] = grouped
return ratings
def get_is_source_public(self, obj):
return False
def validate(self, data):
if not self.instance:
addon_type = self.fields['version'].parsed_data['type']
else:
addon_type = self.instance.type
if 'all_categories' in data:
# filter out categories for the wrong type.
# There might be dupes, e.g. "other" is a category for 2 types
slugs = {cat.slug for cat in data['all_categories']}
data['all_categories'] = [
cat for cat in data['all_categories'] if cat.type == addon_type
]
# double check we didn't lose any
if slugs != {cat.slug for cat in data['all_categories']}:
raise exceptions.ValidationError(
{'categories': 'Invalid category name.'}
)
return data
def create(self, validated_data):
upload = validated_data.get('version').get('upload')
addon = Addon.initialize_addon_from_upload(
data={**self.fields['version'].parsed_data, **validated_data},
upload=upload,
channel=upload.channel,
user=self.context['request'].user,
)
addon.set_categories(validated_data.get('all_categories', []))
self.fields['version'].create(
{**validated_data.get('version', {}), 'addon': addon}
)
activity.log_create(amo.LOG.CREATE_ADDON, addon)
olympia.core.logger.getLogger('z.addons').info(
f'New addon {addon!r} from {upload!r}'
)
if (
addon.status == amo.STATUS_NULL
and addon.has_complete_metadata()
and upload.channel == amo.RELEASE_CHANNEL_LISTED
):
addon.update(status=amo.STATUS_NOMINATED)
return addon
def update(self, instance, validated_data):
instance = super().update(instance, validated_data)
if 'all_categories' in validated_data:
del instance.all_categories
instance.set_categories(validated_data['all_categories'])
return instance
class AddonSerializerWithUnlistedData(AddonSerializer):
latest_unlisted_version = SimpleVersionSerializer(read_only=True)
class Meta:
model = Addon
fields = AddonSerializer.Meta.fields + ('latest_unlisted_version',)
read_only_fields = tuple(
set(fields) - set(AddonSerializer.Meta.writeable_fields)
)
class SimpleAddonSerializer(AddonSerializer):
class Meta:
model = Addon
fields = ('id', 'slug', 'name', 'icon_url')
class ESAddonSerializer(BaseESSerializer, AddonSerializer):
# data the same way than the regular serializer does (usually because we
# some of the data is not indexed in ES).
authors = BaseUserSerializer(many=True, source='listed_authors')
current_version = ESCurrentVersionSerializer()
previews = ESPreviewSerializer(many=True, source='current_previews')
_score = serializers.SerializerMethodField()
datetime_fields = ('created', 'last_updated', 'modified')
translated_fields = (
'name',
'description',
'developer_comments',
'homepage',
'summary',
'support_email',
'support_url',
)
class Meta:
model = Addon
fields = AddonSerializer.Meta.fields + ('_score',)
def fake_preview_object(self, obj, data, model_class=Preview):
# This is what ESPreviewSerializer.fake_object() would do, but we do
# it here and make that fake_object() method a no-op in order to have
# access to the right model_class to use - VersionPreview for static
# themes, Preview for the rest.
preview = model_class(id=data['id'], sizes=data.get('sizes', {}))
preview.addon = obj
preview.version = obj.current_version
preview_serializer = self.fields['previews'].child
# Attach base attributes that have the same name/format in ES and in
# the model.
preview_serializer._attach_fields(preview, data, ('modified',))
# Attach translations.
preview_serializer._attach_translations(
preview, data, preview_serializer.translated_fields
)
return preview
def fake_file_object(self, obj, data):
file_ = File(
id=data['id'],
created=self.handle_date(data['created']),
hash=data['hash'],
filename=data['filename'],
is_mozilla_signed_extension=data.get('is_mozilla_signed_extension'),
size=data['size'],
status=data['status'],
strict_compatibility=data.get('strict_compatibility', False),
version=obj,
)
file_.permissions = data.get(
'permissions', data.get('webext_permissions_list', [])
)
file_.optional_permissions = data.get('optional_permissions', [])
return file_
def fake_version_object(self, obj, data, channel):
if data:
version = Version(
addon=obj,
id=data['id'],
reviewed=self.handle_date(data['reviewed']),
version=data['version'],
channel=channel,
)
version.file = self.fake_file_object(version, data['files'][0])
# In ES we store integers for the appversion info, we need to
# convert it back to strings.
compatible_apps = {}
for app_id, compat_dict in data.get('compatible_apps', {}).items():
app_name = APPS_ALL[int(app_id)]
compatible_apps[app_name] = ApplicationsVersions(
min=AppVersion(version=compat_dict.get('min_human', '')),
max=AppVersion(version=compat_dict.get('max_human', '')),
)
version.compatible_apps = compatible_apps
version_serializer = self.fields.get('current_version') or None
if version_serializer:
version_serializer._attach_translations(
version, data, version_serializer.translated_fields
)
if 'license' in data and version_serializer:
license_serializer = version_serializer.fields['license']
version.license = License(id=data['license']['id'])
license_serializer._attach_fields(
version.license, data['license'], ('builtin', 'url')
)
license_serializer._attach_translations(
version.license, data['license'], ('name',)
)
else:
version.license = None
else:
version = None
return version
def fake_object(self, data):
obj = Addon(id=data['id'], slug=data['slug'])
# Attach base attributes that have the same name/format in ES and in
# the model.
self._attach_fields(
obj,
data,
(
'average_daily_users',
'bayesian_rating',
'contributions',
'created',
'default_locale',
'guid',
'has_eula',
'has_privacy_policy',
'hotness',
'icon_hash',
'icon_type',
'is_experimental',
'last_updated',
'modified',
'requires_payment',
'slug',
'status',
'type',
'weekly_downloads',
),
)
# Attach attributes that do not have the same name/format in ES.
obj.tag_list = data.get('tags', [])
obj.all_categories = [
CATEGORIES_BY_ID[cat_id] for cat_id in data.get('category', [])
]
# Not entirely accurate, but enough in the context of the search API.
obj.disabled_by_user = data.get('is_disabled', False)
# Attach translations (they require special treatment).
self._attach_translations(obj, data, self.translated_fields)
# Attach related models (also faking them). `current_version` is a
# property we can't write to, so we use the underlying field which
data_version = data.get('current_version') or {}
obj._current_version = self.fake_version_object(
obj, data_version, amo.RELEASE_CHANNEL_LISTED
)
obj._current_version_id = data_version.get('id')
data_authors = data.get('listed_authors', [])
obj.listed_authors = [
UserProfile(
id=data_author['id'],
display_name=data_author['name'],
username=data_author['username'],
is_public=data_author.get('is_public', False),
)
for data_author in data_authors
]
is_static_theme = data.get('type') == amo.ADDON_STATICTHEME
preview_model_class = VersionPreview if is_static_theme else Preview
obj.current_previews = [
self.fake_preview_object(obj, preview_data, model_class=preview_model_class)
for preview_data in data.get('previews', [])
]
promoted = data.get('promoted', None)
if promoted:
# .approved_applications.
approved_for_apps = promoted.get('approved_for_apps')
obj.promoted = PromotedAddon(
addon=obj,
approved_application_ids=approved_for_apps,
group_id=promoted['group_id'],
)
# we can safely regenerate these tuples because
# .appproved_applications only cares about the current group
obj._current_version.approved_for_groups = (
(obj.promoted.group, APP_IDS.get(app_id))
for app_id in approved_for_apps
)
else:
obj.promoted = None
ratings = data.get('ratings', {})
obj.average_rating = ratings.get('average')
obj.total_ratings = ratings.get('count')
obj.text_ratings_count = ratings.get('text_count')
return obj
def get__score(self, obj):
# es_meta is added by BaseESSerializer.to_representation() before DRF's
return obj._es_meta['score']
def get_ratings(self, obj):
return {
'average': obj.average_rating,
'bayesian_average': obj.bayesian_rating,
'count': obj.total_ratings,
'text_count': obj.text_ratings_count,
}
def to_representation(self, obj):
data = super().to_representation(obj)
request = self.context.get('request')
if (
request
and '_score' in data
and not is_gate_active(request, 'addons-search-_score-field')
):
data.pop('_score')
return data
class ESAddonAutoCompleteSerializer(ESAddonSerializer):
class Meta(ESAddonSerializer.Meta):
fields = ('id', 'icon_url', 'name', 'promoted', 'type', 'url')
model = Addon
def get_url(self, obj):
# Addon.get_absolute_url() calls get_url_path(), which wants
# _current_version_id to exist, but that's just a safeguard. We don't
# care and don't want to fetch the current version field to improve
obj._current_version_id = 1
return obj.get_absolute_url()
class StaticCategorySerializer(serializers.Serializer):
id = serializers.IntegerField()
name = serializers.CharField()
slug = serializers.CharField()
application = serializers.SerializerMethodField()
misc = serializers.BooleanField()
type = serializers.SerializerMethodField()
weight = serializers.IntegerField()
description = serializers.CharField()
def get_application(self, obj):
return APPS_ALL[obj.application].short
def get_type(self, obj):
return ADDON_TYPE_CHOICES_API[obj.type]
class LanguageToolsSerializer(AddonSerializer):
target_locale = serializers.CharField()
current_compatible_version = serializers.SerializerMethodField()
class Meta:
model = Addon
fields = (
'id',
'current_compatible_version',
'default_locale',
'guid',
'name',
'slug',
'target_locale',
'type',
'url',
)
def get_current_compatible_version(self, obj):
compatible_versions = getattr(obj, 'compatible_versions', None)
if compatible_versions is not None:
data = MinimalVersionSerializer(
compatible_versions, context=self.context, many=True
).data
try:
return data[0]
except IndexError:
# too loudly if the unthinkable happens...
pass
return None
def to_representation(self, obj):
data = super().to_representation(obj)
request = self.context['request']
if (
AddonAppVersionQueryParam.query_param not in request.GET
and 'current_compatible_version' in data
):
data.pop('current_compatible_version')
if request and is_gate_active(request, 'addons-locale_disambiguation-shim'):
data['locale_disambiguation'] = None
return data
class VersionBasketSerializer(SimpleVersionSerializer):
class Meta:
model = Version
fields = ('id', 'compatibility', 'is_strict_compatibility_enabled', 'version')
class AddonBasketSyncSerializer(AddonSerializerWithUnlistedData):
# We want to send all authors to basket, not just listed ones, and have
# the full basket-specific serialization.
authors = UserProfileBasketSyncSerializer(many=True)
current_version = VersionBasketSerializer()
is_recommended = serializers.SerializerMethodField()
latest_unlisted_version = VersionBasketSerializer()
name = serializers.SerializerMethodField()
class Meta:
model = Addon
fields = (
'authors',
'average_daily_users',
'categories',
'current_version',
'default_locale',
'guid',
'id',
'is_disabled',
'is_recommended',
'last_updated',
'latest_unlisted_version',
'name',
'ratings',
'slug',
'status',
'type',
)
read_only_fields = fields
def get_name(self, obj):
# Basket doesn't want translations, we run the serialization task under
return str(obj.name)
def get_is_recommended(self, obj):
return self.get_is_featured(obj)
class ReplacementAddonSerializer(serializers.ModelSerializer):
replacement = serializers.SerializerMethodField()
ADDON_PATH_REGEX = r"""/addon/(?P<addon_id>[^/<>"']+)/$"""
COLLECTION_PATH_REGEX = (
r"""/collections/(?P<user_id>[^/<>"']+)/(?P<coll_slug>[^/]+)/$"""
)
class Meta:
model = ReplacementAddon
fields = ('guid', 'replacement')
def _get_addon_guid(self, addon_id):
try:
addon = Addon.objects.public().id_or_slug(addon_id).get()
except Addon.DoesNotExist:
return []
return [addon.guid]
def _get_collection_guids(self, user_id, collection_slug):
try:
get_args = {'slug': collection_slug, 'listed': True}
if isinstance(user_id, str) and not user_id.isdigit():
get_args.update(**{'author__username': user_id})
else:
get_args.update(**{'author': user_id})
collection = Collection.objects.get(**get_args)
except Collection.DoesNotExist:
return []
valid_q = Addon.objects.get_queryset().valid_q([amo.STATUS_APPROVED])
return list(collection.addons.filter(valid_q).values_list('guid', flat=True))
def get_replacement(self, obj):
if obj.has_external_url():
# It's an external url so no guids.
return []
addon_match = re.search(self.ADDON_PATH_REGEX, obj.path)
if addon_match:
return self._get_addon_guid(addon_match.group('addon_id'))
coll_match = re.search(self.COLLECTION_PATH_REGEX, obj.path)
if coll_match:
return self._get_collection_guids(
coll_match.group('user_id'), coll_match.group('coll_slug')
)
return []
| true | true |
f7355003748af6a4791de9bf10e65f9d69998bf6 | 4,944 | py | Python | drive.py | evanloshin/CarND-Behavioral-Cloning-P3 | 22ec89cdea5257a10512f07b07fc4c074bc7c649 | [
"MIT"
] | null | null | null | drive.py | evanloshin/CarND-Behavioral-Cloning-P3 | 22ec89cdea5257a10512f07b07fc4c074bc7c649 | [
"MIT"
] | null | null | null | drive.py | evanloshin/CarND-Behavioral-Cloning-P3 | 22ec89cdea5257a10512f07b07fc4c074bc7c649 | [
"MIT"
] | null | null | null | import argparse
import base64
from datetime import datetime
import os
import shutil
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
from PIL import Image
from flask import Flask
from io import BytesIO
from keras.models import load_model
import h5py
from keras import __version__ as keras_version
from keras import Model
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
class SimplePIController:
def __init__(self, Kp, Ki):
self.Kp = Kp
self.Ki = Ki
self.set_point = 0.
self.error = 0.
self.integral = 0.
def set_desired(self, desired):
self.set_point = desired
def update(self, measurement):
# proportional error
self.error = self.set_point - measurement
# integral error
self.integral += self.error
return self.Kp * self.error + self.Ki * self.integral
controller = SimplePIController(0.1, 0.002)
set_speed = 9
controller.set_desired(set_speed)
@sio.on('telemetry')
def telemetry(sid, data):
if data:
# The current steering angle of the car
steering_angle = data["steering_angle"]
# The current throttle of the car
throttle = data["throttle"]
# The current speed of the car
speed = data["speed"]
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)
steering_angle = float(model.predict(image_array[None, :, :, :], batch_size=1))
# # Extract intermediate layer output
# layer_name = 'first_convolution'
# intermediate_layer_model = Model(inputs=model.input,
# outputs=model.get_layer(layer_name).output)
# intermediate_output = intermediate_layer_model.predict(image_array[None, :, :, :], batch_size=1)
# intermediate_output = np.squeeze(intermediate_output)
# intermediate_output = (255.0 / intermediate_output.max() * (intermediate_output - intermediate_output.min())).astype(np.uint8)
# intermediate_output_img = Image.fromarray(intermediate_output[12])
#
# # save intermediate output layer
# timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
# image_filename = os.path.join('/Users/evanloshin/Documents/Udacity/SDC/behavioral-cloning-data/Intermediate-Layer/', timestamp)
# intermediate_output_img.save('{}.jpg'.format(image_filename))
throttle = controller.update(float(speed))
print("Predicted Steering Angle: {} Throttle: {}".format(round(steering_angle, 5), round(throttle, 5)))
send_control(steering_angle, throttle)
# save frame
if args.image_folder != '':
timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
image_filename = os.path.join(args.image_folder, timestamp)
image.save('{}.jpg'.format(image_filename))
else:
# NOTE: DON'T EDIT THIS.
sio.emit('manual', data={}, skip_sid=True)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit(
"steer",
data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
},
skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument(
'model',
type=str,
help='Path to model h5 file. Model should be on the same path.'
)
parser.add_argument(
'image_folder',
type=str,
nargs='?',
default='',
help='Path to image folder. This is where the images from the run will be saved.'
)
args = parser.parse_args()
# check that model Keras version is same as local Keras version
f = h5py.File(args.model, mode='r')
model_version = f.attrs.get('keras_version')
keras_version = str(keras_version).encode('utf8')
if model_version != keras_version:
print('You are using Keras version ', keras_version,
', but the model was built using ', model_version)
model = load_model(args.model)
if args.image_folder != '':
print("Creating image folder at {}".format(args.image_folder))
if not os.path.exists(args.image_folder):
os.makedirs(args.image_folder)
else:
shutil.rmtree(args.image_folder)
os.makedirs(args.image_folder)
print("RECORDING THIS RUN ...")
else:
print("NOT RECORDING THIS RUN ...")
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
| 31.896774 | 137 | 0.64462 | import argparse
import base64
from datetime import datetime
import os
import shutil
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
from PIL import Image
from flask import Flask
from io import BytesIO
from keras.models import load_model
import h5py
from keras import __version__ as keras_version
from keras import Model
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
class SimplePIController:
def __init__(self, Kp, Ki):
self.Kp = Kp
self.Ki = Ki
self.set_point = 0.
self.error = 0.
self.integral = 0.
def set_desired(self, desired):
self.set_point = desired
def update(self, measurement):
self.error = self.set_point - measurement
self.integral += self.error
return self.Kp * self.error + self.Ki * self.integral
controller = SimplePIController(0.1, 0.002)
set_speed = 9
controller.set_desired(set_speed)
@sio.on('telemetry')
def telemetry(sid, data):
if data:
steering_angle = data["steering_angle"]
throttle = data["throttle"]
speed = data["speed"]
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)
steering_angle = float(model.predict(image_array[None, :, :, :], batch_size=1))
throttle = controller.update(float(speed))
print("Predicted Steering Angle: {} Throttle: {}".format(round(steering_angle, 5), round(throttle, 5)))
send_control(steering_angle, throttle)
if args.image_folder != '':
timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
image_filename = os.path.join(args.image_folder, timestamp)
image.save('{}.jpg'.format(image_filename))
else:
sio.emit('manual', data={}, skip_sid=True)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit(
"steer",
data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
},
skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument(
'model',
type=str,
help='Path to model h5 file. Model should be on the same path.'
)
parser.add_argument(
'image_folder',
type=str,
nargs='?',
default='',
help='Path to image folder. This is where the images from the run will be saved.'
)
args = parser.parse_args()
# check that model Keras version is same as local Keras version
f = h5py.File(args.model, mode='r')
model_version = f.attrs.get('keras_version')
keras_version = str(keras_version).encode('utf8')
if model_version != keras_version:
print('You are using Keras version ', keras_version,
', but the model was built using ', model_version)
model = load_model(args.model)
if args.image_folder != '':
print("Creating image folder at {}".format(args.image_folder))
if not os.path.exists(args.image_folder):
os.makedirs(args.image_folder)
else:
shutil.rmtree(args.image_folder)
os.makedirs(args.image_folder)
print("RECORDING THIS RUN ...")
else:
print("NOT RECORDING THIS RUN ...")
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
| true | true |
f73550951879199e9ad244e693a953a5cf72bf17 | 24,024 | py | Python | test/test_treestore.py | helix-collective/s3ts | f60e71f576562badc41828952562e5936f85bfcf | [
"BSD-3-Clause"
] | 2 | 2019-05-21T00:08:13.000Z | 2020-01-29T10:42:48.000Z | test/test_treestore.py | helix-collective/s3ts | f60e71f576562badc41828952562e5936f85bfcf | [
"BSD-3-Clause"
] | 1 | 2016-11-23T23:06:59.000Z | 2016-11-23T23:30:51.000Z | test/test_treestore.py | helix-collective/s3ts | f60e71f576562badc41828952562e5936f85bfcf | [
"BSD-3-Clause"
] | 4 | 2016-10-24T01:14:59.000Z | 2020-07-23T02:37:48.000Z | import os, tempfile, unittest, shutil, subprocess, datetime, time
from s3ts.filestore import LocalFileStore
from s3ts.s3filestore import S3FileStore
from s3ts.config import TreeStoreConfig, readInstallProperties, S3TS_PROPERTIES
from s3ts.treestore import TreeStore
from s3ts.utils import datetimeFromIso
from s3ts.package import PackageJS, S3TS_PACKAGEFILE
from s3ts.metapackage import MetaPackage, SubPackage
import boto
import logging
# boto.set_stream_logger('boto')
class CaptureDownloadProgress:
def __init__( self ):
self.recorded = []
def __call__( self, bytesDownloaded, bytesFromCache ):
self.recorded.append( bytesDownloaded + bytesFromCache )
CaptureUploadProgress = CaptureDownloadProgress
class CaptureInstallProgress:
def __init__( self ):
self.recorded = []
def __call__( self, nBytes ):
self.recorded.append( nBytes )
class EmptyS3Bucket:
def __init__( self, bucket ):
self.bucket = bucket
def __enter__(self):
# Ensure the bucket starts empty
assert len(list(self.bucket.list()))==0, "S3 bucket is not empty"
def __exit__(self, type, value, traceback):
# Clean the bucket (ok, as we know it started empty)
self.bucket.delete_keys( self.bucket.list() )
class TestTreeStore(unittest.TestCase):
def setUp(self):
self.workdir = tempfile.mkdtemp()
if os.path.exists( self.workdir ):
shutil.rmtree( self.workdir )
os.makedirs( self.workdir )
self.FILE1 = b'#!/bin/env python\n def main(): print "hello"\n'
self.FILE2 = b'#!/bin/env python\n def main(): print "goodbye"\n'
self.FILE2_A = b'#!/bin/env python\n def main(): print "goodbye foreever"\n'
self.FILE3 = b'#!/bin/env python\n def main(): print "goodbye foreever"\n'
self.FILE4 = b'#!/bin/env python\n def main(): print "what now"\n'
self.FILE5 = b'Just text'
self.CAR01 = (
b'Some big and complicated data structure goes here, hopefully big enough that it requires chunking and compression.\n'
b'sydney london paris port moresby okinawa st petersburg salt lake city new york whitehorse mawson woy woy st louis\n'
)
# Create some test input data
self.srcTree = makeEmptyDir( os.path.join( self.workdir, 'src-1' ) )
fs = LocalFileStore( self.srcTree )
fs.put( 'code/file1.py', self.FILE1)
fs.put( 'code/file2.py', self.FILE2)
fs.put( 'assets/car-01.db', self.CAR01)
self.srcTree2 = makeEmptyDir( os.path.join( self.workdir, 'src-2' ) )
fs = LocalFileStore( self.srcTree2 )
fs.put( 'code/file1.py', self.FILE1 )
fs.put( 'code/file3.py', self.FILE3 )
fs.put( 'code/file4.py', self.FILE4)
fs.put( 'assets/car-01.db', self.CAR01 )
self.srcTree3 = makeEmptyDir( os.path.join( self.workdir, 'src-3' ) )
fs = LocalFileStore( self.srcTree3 )
fs.put( 'code/file1.py', self.FILE1 )
fs.put( 'code/file2.py', self.FILE2_A )
fs.put( 'code/file4.py', self.FILE4 )
fs.put( 'text/text', self.FILE5 )
self.srcTree4 = makeEmptyDir( os.path.join( self.workdir, 'src-4' ) )
fs = LocalFileStore( self.srcTree4 )
fs.put( 'file1.py', self.FILE1 )
fs.put( 'code/file2.py', self.FILE2_A )
fs.put( 'code/file4.py', self.FILE4 )
fs.put( 'text', self.FILE5 )
self.srcVariant = makeEmptyDir( os.path.join( self.workdir, 'src1-kiosk' ) )
fs = LocalFileStore( self.srcVariant )
fs.put( 'kiosk-01/key', b'this is the key src1:kiosk-01' )
fs.put( 'kiosk-02/key', b'this is the key src1:kiosk-02' )
def tearDown(self):
shutil.rmtree( self.workdir )
def test_fs_treestore(self):
# Create a file system backed treestore
fileStore = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'fs' ) ) )
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore = TreeStore.create( fileStore, localCache, TreeStoreConfig( 100, True ) )
# Upload 2 trees
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore.upload( 'v1.0', '', creationTime, self.srcTree, CaptureUploadProgress() )
pkg = treestore.findPackage( 'v1.0' )
# Confirm it's in the index
self.assertEqual( treestore.listPackages(), ['v1.0'] )
# Verify it
treestore.verify( pkg )
# Test the cache priming function
treestore.prime( self.srcTree2, CaptureUploadProgress() )
# Test whether the verifyCache works
corruptedFiles = treestore.validateLocalCache()
self.assertEqual( len(corruptedFiles), 0)
# Download it, checking we get expected progress callbacks
# The order of the callbacks will depend on the order of the
# chunks in the package definition, which will depend on the
# iteration order of the file system when the package was created.
# So check independently of ordering.
cb = CaptureDownloadProgress()
treestore.download( pkg, cb )
self.assertEqual( sorted(cb.recorded), [30, 45, 47, 100, 100] )
# Verify it locally
treestore.verifyLocal( pkg )
# Install it
destTree = os.path.join( self.workdir, 'dest-1' )
treestore.install( pkg, destTree, CaptureInstallProgress() )
# Check that the installed tree is the same as the source tree
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree,destTree), shell=True ), 0 )
# Rename the tree, and check that installing that is the same
treestore.rename( 'v1.0', 'v1.0x' )
pkg = treestore.findPackage( 'v1.0x' )
treestore.download( pkg, CaptureDownloadProgress() )
destTree = os.path.join( self.workdir, 'dest-2' )
treestore.install( pkg, destTree, CaptureInstallProgress() )
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree,destTree), shell=True ), 0 )
# Test the flushStore function has nothing to remove)
treestore.upload( 'extra', '', creationTime, self.srcTree2, CaptureUploadProgress() )
removed = treestore.flushStore()
self.assertEqual(len(removed), 0)
# Remove a tree
treestore.remove( 'v1.0x' )
# Test the store now has dangling chunks when can be removed
removed = treestore.flushStore()
self.assertTrue(len(removed) > 0)
treestore.upload( 'v1.0', '', creationTime, self.srcTree, CaptureUploadProgress() )
# Initially the local cache should contain chunks for v1.0 and extra. Empty
# the local cache by successive flush operations
removed = treestore.flushLocalCache(['extra'])
self.assertTrue(len(removed) > 0)
removed = treestore.flushLocalCache(['v1.0'])
self.assertTrue(len(removed) > 0)
# Confirm that removing everything from the local cache is refused
with self.assertRaises(RuntimeError):
treestore.flushLocalCache([])
def test_sync(self):
# Create a file system backed treestore
fileStore = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'fs' ) ) )
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore = TreeStore.create( fileStore, localCache, TreeStoreConfig( 10, True ) )
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore.upload( 'v1.0', '', creationTime, self.srcTree, CaptureUploadProgress() )
treestore.upload( 'v1.3', '', creationTime, self.srcTree3, CaptureUploadProgress() )
treestore.upload( 'v1.4', '', creationTime, self.srcTree4, CaptureUploadProgress() )
testdir = makeEmptyDir( os.path.join( self.workdir, 'test' ) )
def assertExists( path ):
self.assertTrue( os.path.exists( os.path.join(testdir, path) ) )
def assertContains( path, data ):
with open( os.path.join(testdir, path), 'rb' ) as f:
self.assertEqual( f.read(), data )
def assertDoesntExist( path ):
self.assertFalse( os.path.exists( os.path.join(testdir, path) ) )
def assertInstalled(pkg, testdir):
result = treestore.compareInstall(pkg, testdir)
self.assertEqual( result.missing, set() )
self.assertEqual( result.extra, set() )
self.assertEqual( result.diffs, set() )
# sync a package to an empty directory
pkg = treestore.findPackage('v1.0')
treestore.download( pkg, CaptureDownloadProgress() )
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertContains( "code/file1.py", self.FILE1 )
assertContains( "code/file2.py", self.FILE2 )
assertContains( "assets/car-01.db", self.CAR01 )
assertExists( S3TS_PACKAGEFILE )
assertInstalled( pkg, testdir )
# Re-sync the same package
pkg = treestore.findPackage('v1.0')
treestore.download( pkg, CaptureDownloadProgress() )
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertContains( "code/file1.py", self.FILE1 )
assertContains( "code/file2.py", self.FILE2 )
assertContains( "assets/car-01.db", self.CAR01 )
assertExists( S3TS_PACKAGEFILE )
assertInstalled( pkg, testdir )
# Sync to a different package
pkg = treestore.findPackage('v1.3')
treestore.download( pkg, CaptureDownloadProgress() )
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertContains( "code/file1.py", self.FILE1 )
assertContains( "code/file2.py", self.FILE2_A )
assertDoesntExist( "assets/car-01.db" )
assertContains( "code/file4.py", self.FILE4 )
assertContains( "text/text", self.FILE5 )
assertExists( S3TS_PACKAGEFILE )
assertInstalled( pkg, testdir )
# Sync back to the first package
pkg = treestore.findPackage('v1.0')
treestore.download( pkg, CaptureDownloadProgress() )
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertContains( "code/file1.py", self.FILE1 )
assertContains( "code/file2.py", self.FILE2 )
assertContains( "assets/car-01.db", self.CAR01 )
assertDoesntExist( "code/file4.py" )
assertExists( S3TS_PACKAGEFILE )
assertInstalled( pkg, testdir )
# Remove the package file, and sync the second package again
os.unlink( os.path.join( testdir, S3TS_PACKAGEFILE ) )
pkg = treestore.findPackage('v1.3')
treestore.download( pkg, CaptureDownloadProgress() )
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertContains( "code/file1.py", self.FILE1 )
assertContains( "code/file2.py", self.FILE2_A )
assertDoesntExist( "assets/car-01.db" )
assertContains( "code/file4.py", self.FILE4 )
assertExists( S3TS_PACKAGEFILE )
assertInstalled( pkg, testdir )
# Add an extra file not in the package, and ensure
# that syncing deletes it
with open( os.path.join(testdir, "debug.log"), 'w') as f:
f.write( "something" )
pkg = treestore.findPackage('v1.3')
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertInstalled( pkg, testdir )
# Sync to test replacing a directory with a file
pkg = treestore.findPackage('v1.4')
treestore.download( pkg, CaptureDownloadProgress() )
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertContains( "text", self.FILE5 )
assertInstalled( pkg, testdir )
def test_metapackages(self):
# Create a file system backed treestore
fileStore = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'fs' ) ) )
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore = TreeStore.create( fileStore, localCache, TreeStoreConfig( 10, True ) )
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore.upload( 'v1.0', '', creationTime, self.srcTree, CaptureUploadProgress() )
treestore.upload( 'v1.3', '', creationTime, self.srcTree3, CaptureUploadProgress() )
treestore.upload( 'v1.4', '', creationTime, self.srcTree4, CaptureUploadProgress() )
meta1 = MetaPackage(
name = 'meta1',
description = '',
creationTime = creationTime,
components = [
SubPackage( 'dir-1', 'v1.0' ),
SubPackage( 'dir-2', 'v1.3' ),
]
)
meta1.verify(treestore,{})
treestore.uploadMetaPackage(meta1)
meta1p = treestore.find( 'meta1', {})
treestore.download(meta1p, CaptureDownloadProgress() )
# Install it
destTree = os.path.join( self.workdir, 'dest-1' )
treestore.install(meta1p, destTree, CaptureInstallProgress() )
def assertContains( path, text ):
with open( os.path.join(destTree, path), 'rb' ) as f:
self.assertEqual( f.read(), text )
assertContains("dir-1/code/file1.py", self.FILE1)
assertContains("dir-2/text/text", self.FILE5)
def test_s3_treestore(self):
# Create an s3 backed treestore
# Requires these environment variables set
#
# AWS_ACCESS_KEY_ID
# AWS_SECRET_ACCESS_KEY
# S3TS_BUCKET
#
# NB: **this will only work if the bucket is empty
s3c = boto.connect_s3()
bucket = s3c.get_bucket( os.environ['S3TS_BUCKET'] )
with EmptyS3Bucket(bucket):
fileStore = S3FileStore( bucket )
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore = TreeStore.create( fileStore, localCache, TreeStoreConfig( 100, True ) )
# Upload it as a tree
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore.upload( 'v1.0', '', creationTime, self.srcTree, CaptureUploadProgress() )
pkg = treestore.findPackage( 'v1.0' )
# Confirm it's in the index
self.assertEqual( treestore.listPackages(), ['v1.0'] )
# Verify it
treestore.verify( pkg )
# Download it, checking we get expected progress callbacks
cb = CaptureDownloadProgress()
treestore.download( pkg, cb )
self.assertEqual( sorted(cb.recorded), [30, 45, 47, 100, 100] )
# Verify it locally
treestore.verifyLocal( pkg )
# Install it
destTree = os.path.join( self.workdir, 'dest-1' )
treestore.install( pkg, destTree, CaptureInstallProgress() )
# Check that the installed tree is the same as the source tree
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree,destTree), shell=True ), 0 )
self.assertEqual( readInstallProperties(destTree).treeName, 'v1.0' )
# Use the compareInstall function to confirm the installed package is ok, and
# then check that modifying the files show up in the comparison
result = treestore.compareInstall( pkg, destTree )
self.assertEqual( len(result.missing), 0 )
self.assertEqual( len(result.extra), 0 )
self.assertEqual( len(result.diffs), 0 )
with open( os.path.join(destTree,"code/file1.py"), "w" ) as f:
f.write("x")
with open( os.path.join(destTree,"code/file3.py"), "w" ) as f:
f.write("y")
os.unlink(os.path.join(destTree,'assets/car-01.db'))
result = treestore.compareInstall( pkg, destTree )
self.assertEqual( result.missing, set(['assets/car-01.db']) )
self.assertEqual( result.extra, set(['code/file3.py']) )
self.assertEqual( result.diffs, set(['code/file1.py']) )
# Reinstall to fix directory content
shutil.rmtree( destTree )
treestore.install( pkg, destTree, CaptureInstallProgress() )
result = treestore.compareInstall( pkg, destTree )
self.assertEqual( len(result.missing), 0 )
self.assertEqual( len(result.extra), 0 )
self.assertEqual( len(result.diffs), 0 )
# Now create a pre-signed version of the package
pkg = treestore.findPackage( 'v1.0' )
treestore.addUrls( pkg, 3600 )
self.assertEqual( len(result.missing), 0 )
self.assertEqual( len(result.extra), 0 )
self.assertEqual( len(result.diffs), 0 )
# And download it directly via http. Create a new local cache
# to ensure that we actually redownload each chunk
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore2 = TreeStore.forHttpOnly( localCache )
cb = CaptureDownloadProgress()
treestore2.downloadHttp( pkg, cb )
self.assertEqual( sorted(cb.recorded), [30, 45, 47, 100, 100] )
# Install it
destTree2 = os.path.join( self.workdir, 'dest-2' )
treestore2.install( pkg, destTree2, CaptureInstallProgress() )
# Check that the new installed tree is the same as the source tree
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree,destTree2), shell=True ), 0 )
# Rename the tree, and check that installing that is the same
treestore.rename( 'v1.0', 'v1.0x' )
pkg = treestore.findPackage( 'v1.0x' )
treestore.download( pkg, CaptureDownloadProgress() )
destTree = os.path.join( self.workdir, 'dest-3' )
treestore.install( pkg, destTree, CaptureInstallProgress() )
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree,destTree), shell=True ), 0 )
# Remove the tree
treestore.remove( 'v1.0x' )
def test_s3_prefixes(self):
# Requires these environment variables set
#
# AWS_ACCESS_KEY_ID
# AWS_SECRET_ACCESS_KEY
# S3TS_BUCKET
#
# NB: **this will only work if the bucket is empty
s3c = boto.connect_s3()
bucket = s3c.get_bucket( os.environ['S3TS_BUCKET'] )
with EmptyS3Bucket(bucket):
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore1 = TreeStore.create( S3FileStore( bucket, "prefix1" ), localCache, TreeStoreConfig( 100, True ) )
treestore2 = TreeStore.create( S3FileStore( bucket, "prefix2" ), localCache, TreeStoreConfig( 100, True ) )
# Confirm we can write the different values to the same path in both treestores,
# and the different prefix keeps them separate independent
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore1.upload( 'release', '', creationTime, self.srcTree, CaptureUploadProgress() )
treestore2.upload( 'release', '', creationTime, self.srcTree2, CaptureUploadProgress() )
pkg1 = treestore1.findPackage( 'release' )
pkg2 = treestore2.findPackage( 'release' )
self.assertEqual(len(pkg1.files),3)
self.assertEqual(len(pkg2.files),4)
def test_s3_merged_package(self):
# Test the creation and subsequent installation of merged packages
# Requires these environment variables set
#
# AWS_ACCESS_KEY_ID
# AWS_SECRET_ACCESS_KEY
# S3TS_BUCKET
#
# NB: **this will only work if the bucket is empty
s3c = boto.connect_s3()
bucket = s3c.get_bucket( os.environ['S3TS_BUCKET'] )
with EmptyS3Bucket(bucket):
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore = TreeStore.create( S3FileStore( bucket), localCache, TreeStoreConfig( 100, True ) )
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore.upload( 'src1', '', creationTime, self.srcTree, CaptureUploadProgress() )
treestore.upload( 'src2', '', creationTime, self.srcTree2, CaptureUploadProgress() )
treestore.upload( 'src3', '', creationTime, self.srcTree3, CaptureUploadProgress() )
treestore.createMerged( 'merged', creationTime, { '.' : 'src1', 'subdir-a' : 'src2', 'subdir-b' : 'src3'})
pkg = treestore.findPackage( 'merged' )
treestore.download( pkg, CaptureDownloadProgress() )
destTree = os.path.join( self.workdir, 'merged' )
treestore.install( pkg, destTree, CaptureInstallProgress() )
def assertSameContent( path1, path2 ):
with open(path1) as f1:
with open(path2) as f2:
self.assertEqual( f1.read(), f2.read() )
assertSameContent(os.path.join(destTree, "code/file1.py"), os.path.join(self.srcTree, "code/file1.py"))
assertSameContent(os.path.join(destTree, "subdir-a/code/file4.py"), os.path.join(self.srcTree2, "code/file4.py"))
assertSameContent(os.path.join(destTree, "subdir-b/text/text"), os.path.join(self.srcTree3, "text/text"))
def test_s3_many_treestore(self):
# Create an s3 backed treestore
# Requires these environment variables set
#
# AWS_ACCESS_KEY_ID
# AWS_SECRET_ACCESS_KEY
# S3TS_BUCKET
#
# NB: **this will only work if the bucket is empty
s3c = boto.connect_s3()
bucket = s3c.get_bucket( os.environ['S3TS_BUCKET'] )
with EmptyS3Bucket(bucket):
fileStore = S3FileStore( bucket )
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore = TreeStore.create( fileStore, localCache, TreeStoreConfig( 100, True ) )
# Upload it as a tree
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore.uploadMany( 'v1.0', '', creationTime, self.srcTree, self.srcVariant, CaptureUploadProgress() )
pkg = treestore.findPackage( 'v1.0:kiosk-01' )
# Confirm it's in the index
self.assertEqual( treestore.listPackages(), ['v1.0:kiosk-01', 'v1.0:kiosk-02'] )
# Verify it
treestore.verify( pkg )
# Download it, checking we get expected progress callbacks
cb = CaptureDownloadProgress()
treestore.download( pkg, cb )
self.assertEqual( sorted(cb.recorded), [29, 30, 45, 47, 100, 100] )
# Verify it locally
treestore.verifyLocal( pkg )
# Install it
destTree = os.path.join( self.workdir, 'dest-1' )
treestore.install( pkg, destTree, CaptureInstallProgress() )
# Check that the installed tree is the same as the source tree
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree + '/assets',destTree + '/assets'), shell=True ), 0 )
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree + '/code',destTree + '/code'), shell=True ), 0 )
self.assertEqual( readInstallProperties(destTree).treeName, 'v1.0:kiosk-01' )
def makeEmptyDir( path ):
if os.path.exists( path ):
shutil.rmtree( path )
os.makedirs( path )
return path
if __name__ == '__main__':
unittest.main()
| 44.654275 | 160 | 0.620421 | import os, tempfile, unittest, shutil, subprocess, datetime, time
from s3ts.filestore import LocalFileStore
from s3ts.s3filestore import S3FileStore
from s3ts.config import TreeStoreConfig, readInstallProperties, S3TS_PROPERTIES
from s3ts.treestore import TreeStore
from s3ts.utils import datetimeFromIso
from s3ts.package import PackageJS, S3TS_PACKAGEFILE
from s3ts.metapackage import MetaPackage, SubPackage
import boto
import logging
class CaptureDownloadProgress:
def __init__( self ):
self.recorded = []
def __call__( self, bytesDownloaded, bytesFromCache ):
self.recorded.append( bytesDownloaded + bytesFromCache )
CaptureUploadProgress = CaptureDownloadProgress
class CaptureInstallProgress:
def __init__( self ):
self.recorded = []
def __call__( self, nBytes ):
self.recorded.append( nBytes )
class EmptyS3Bucket:
def __init__( self, bucket ):
self.bucket = bucket
def __enter__(self):
assert len(list(self.bucket.list()))==0, "S3 bucket is not empty"
def __exit__(self, type, value, traceback):
self.bucket.delete_keys( self.bucket.list() )
class TestTreeStore(unittest.TestCase):
def setUp(self):
self.workdir = tempfile.mkdtemp()
if os.path.exists( self.workdir ):
shutil.rmtree( self.workdir )
os.makedirs( self.workdir )
self.FILE1 = b'#!/bin/env python\n def main(): print "hello"\n'
self.FILE2 = b'#!/bin/env python\n def main(): print "goodbye"\n'
self.FILE2_A = b'#!/bin/env python\n def main(): print "goodbye foreever"\n'
self.FILE3 = b'#!/bin/env python\n def main(): print "goodbye foreever"\n'
self.FILE4 = b'#!/bin/env python\n def main(): print "what now"\n'
self.FILE5 = b'Just text'
self.CAR01 = (
b'Some big and complicated data structure goes here, hopefully big enough that it requires chunking and compression.\n'
b'sydney london paris port moresby okinawa st petersburg salt lake city new york whitehorse mawson woy woy st louis\n'
)
self.srcTree = makeEmptyDir( os.path.join( self.workdir, 'src-1' ) )
fs = LocalFileStore( self.srcTree )
fs.put( 'code/file1.py', self.FILE1)
fs.put( 'code/file2.py', self.FILE2)
fs.put( 'assets/car-01.db', self.CAR01)
self.srcTree2 = makeEmptyDir( os.path.join( self.workdir, 'src-2' ) )
fs = LocalFileStore( self.srcTree2 )
fs.put( 'code/file1.py', self.FILE1 )
fs.put( 'code/file3.py', self.FILE3 )
fs.put( 'code/file4.py', self.FILE4)
fs.put( 'assets/car-01.db', self.CAR01 )
self.srcTree3 = makeEmptyDir( os.path.join( self.workdir, 'src-3' ) )
fs = LocalFileStore( self.srcTree3 )
fs.put( 'code/file1.py', self.FILE1 )
fs.put( 'code/file2.py', self.FILE2_A )
fs.put( 'code/file4.py', self.FILE4 )
fs.put( 'text/text', self.FILE5 )
self.srcTree4 = makeEmptyDir( os.path.join( self.workdir, 'src-4' ) )
fs = LocalFileStore( self.srcTree4 )
fs.put( 'file1.py', self.FILE1 )
fs.put( 'code/file2.py', self.FILE2_A )
fs.put( 'code/file4.py', self.FILE4 )
fs.put( 'text', self.FILE5 )
self.srcVariant = makeEmptyDir( os.path.join( self.workdir, 'src1-kiosk' ) )
fs = LocalFileStore( self.srcVariant )
fs.put( 'kiosk-01/key', b'this is the key src1:kiosk-01' )
fs.put( 'kiosk-02/key', b'this is the key src1:kiosk-02' )
def tearDown(self):
shutil.rmtree( self.workdir )
def test_fs_treestore(self):
fileStore = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'fs' ) ) )
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore = TreeStore.create( fileStore, localCache, TreeStoreConfig( 100, True ) )
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore.upload( 'v1.0', '', creationTime, self.srcTree, CaptureUploadProgress() )
pkg = treestore.findPackage( 'v1.0' )
self.assertEqual( treestore.listPackages(), ['v1.0'] )
# Verify it
treestore.verify( pkg )
# Test the cache priming function
treestore.prime( self.srcTree2, CaptureUploadProgress() )
# Test whether the verifyCache works
corruptedFiles = treestore.validateLocalCache()
self.assertEqual( len(corruptedFiles), 0)
# Download it, checking we get expected progress callbacks
# The order of the callbacks will depend on the order of the
# chunks in the package definition, which will depend on the
# iteration order of the file system when the package was created.
# So check independently of ordering.
cb = CaptureDownloadProgress()
treestore.download( pkg, cb )
self.assertEqual( sorted(cb.recorded), [30, 45, 47, 100, 100] )
# Verify it locally
treestore.verifyLocal( pkg )
# Install it
destTree = os.path.join( self.workdir, 'dest-1' )
treestore.install( pkg, destTree, CaptureInstallProgress() )
# Check that the installed tree is the same as the source tree
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree,destTree), shell=True ), 0 )
# Rename the tree, and check that installing that is the same
treestore.rename( 'v1.0', 'v1.0x' )
pkg = treestore.findPackage( 'v1.0x' )
treestore.download( pkg, CaptureDownloadProgress() )
destTree = os.path.join( self.workdir, 'dest-2' )
treestore.install( pkg, destTree, CaptureInstallProgress() )
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree,destTree), shell=True ), 0 )
# Test the flushStore function has nothing to remove)
treestore.upload( 'extra', '', creationTime, self.srcTree2, CaptureUploadProgress() )
removed = treestore.flushStore()
self.assertEqual(len(removed), 0)
# Remove a tree
treestore.remove( 'v1.0x' )
# Test the store now has dangling chunks when can be removed
removed = treestore.flushStore()
self.assertTrue(len(removed) > 0)
treestore.upload( 'v1.0', '', creationTime, self.srcTree, CaptureUploadProgress() )
# Initially the local cache should contain chunks for v1.0 and extra. Empty
# the local cache by successive flush operations
removed = treestore.flushLocalCache(['extra'])
self.assertTrue(len(removed) > 0)
removed = treestore.flushLocalCache(['v1.0'])
self.assertTrue(len(removed) > 0)
# Confirm that removing everything from the local cache is refused
with self.assertRaises(RuntimeError):
treestore.flushLocalCache([])
def test_sync(self):
# Create a file system backed treestore
fileStore = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'fs' ) ) )
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore = TreeStore.create( fileStore, localCache, TreeStoreConfig( 10, True ) )
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore.upload( 'v1.0', '', creationTime, self.srcTree, CaptureUploadProgress() )
treestore.upload( 'v1.3', '', creationTime, self.srcTree3, CaptureUploadProgress() )
treestore.upload( 'v1.4', '', creationTime, self.srcTree4, CaptureUploadProgress() )
testdir = makeEmptyDir( os.path.join( self.workdir, 'test' ) )
def assertExists( path ):
self.assertTrue( os.path.exists( os.path.join(testdir, path) ) )
def assertContains( path, data ):
with open( os.path.join(testdir, path), 'rb' ) as f:
self.assertEqual( f.read(), data )
def assertDoesntExist( path ):
self.assertFalse( os.path.exists( os.path.join(testdir, path) ) )
def assertInstalled(pkg, testdir):
result = treestore.compareInstall(pkg, testdir)
self.assertEqual( result.missing, set() )
self.assertEqual( result.extra, set() )
self.assertEqual( result.diffs, set() )
# sync a package to an empty directory
pkg = treestore.findPackage('v1.0')
treestore.download( pkg, CaptureDownloadProgress() )
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertContains( "code/file1.py", self.FILE1 )
assertContains( "code/file2.py", self.FILE2 )
assertContains( "assets/car-01.db", self.CAR01 )
assertExists( S3TS_PACKAGEFILE )
assertInstalled( pkg, testdir )
# Re-sync the same package
pkg = treestore.findPackage('v1.0')
treestore.download( pkg, CaptureDownloadProgress() )
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertContains( "code/file1.py", self.FILE1 )
assertContains( "code/file2.py", self.FILE2 )
assertContains( "assets/car-01.db", self.CAR01 )
assertExists( S3TS_PACKAGEFILE )
assertInstalled( pkg, testdir )
# Sync to a different package
pkg = treestore.findPackage('v1.3')
treestore.download( pkg, CaptureDownloadProgress() )
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertContains( "code/file1.py", self.FILE1 )
assertContains( "code/file2.py", self.FILE2_A )
assertDoesntExist( "assets/car-01.db" )
assertContains( "code/file4.py", self.FILE4 )
assertContains( "text/text", self.FILE5 )
assertExists( S3TS_PACKAGEFILE )
assertInstalled( pkg, testdir )
# Sync back to the first package
pkg = treestore.findPackage('v1.0')
treestore.download( pkg, CaptureDownloadProgress() )
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertContains( "code/file1.py", self.FILE1 )
assertContains( "code/file2.py", self.FILE2 )
assertContains( "assets/car-01.db", self.CAR01 )
assertDoesntExist( "code/file4.py" )
assertExists( S3TS_PACKAGEFILE )
assertInstalled( pkg, testdir )
# Remove the package file, and sync the second package again
os.unlink( os.path.join( testdir, S3TS_PACKAGEFILE ) )
pkg = treestore.findPackage('v1.3')
treestore.download( pkg, CaptureDownloadProgress() )
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertContains( "code/file1.py", self.FILE1 )
assertContains( "code/file2.py", self.FILE2_A )
assertDoesntExist( "assets/car-01.db" )
assertContains( "code/file4.py", self.FILE4 )
assertExists( S3TS_PACKAGEFILE )
assertInstalled( pkg, testdir )
# Add an extra file not in the package, and ensure
# that syncing deletes it
with open( os.path.join(testdir, "debug.log"), 'w') as f:
f.write( "something" )
pkg = treestore.findPackage('v1.3')
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertInstalled( pkg, testdir )
# Sync to test replacing a directory with a file
pkg = treestore.findPackage('v1.4')
treestore.download( pkg, CaptureDownloadProgress() )
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertContains( "text", self.FILE5 )
assertInstalled( pkg, testdir )
def test_metapackages(self):
# Create a file system backed treestore
fileStore = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'fs' ) ) )
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore = TreeStore.create( fileStore, localCache, TreeStoreConfig( 10, True ) )
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore.upload( 'v1.0', '', creationTime, self.srcTree, CaptureUploadProgress() )
treestore.upload( 'v1.3', '', creationTime, self.srcTree3, CaptureUploadProgress() )
treestore.upload( 'v1.4', '', creationTime, self.srcTree4, CaptureUploadProgress() )
meta1 = MetaPackage(
name = 'meta1',
description = '',
creationTime = creationTime,
components = [
SubPackage( 'dir-1', 'v1.0' ),
SubPackage( 'dir-2', 'v1.3' ),
]
)
meta1.verify(treestore,{})
treestore.uploadMetaPackage(meta1)
meta1p = treestore.find( 'meta1', {})
treestore.download(meta1p, CaptureDownloadProgress() )
# Install it
destTree = os.path.join( self.workdir, 'dest-1' )
treestore.install(meta1p, destTree, CaptureInstallProgress() )
def assertContains( path, text ):
with open( os.path.join(destTree, path), 'rb' ) as f:
self.assertEqual( f.read(), text )
assertContains("dir-1/code/file1.py", self.FILE1)
assertContains("dir-2/text/text", self.FILE5)
def test_s3_treestore(self):
# Create an s3 backed treestore
# Requires these environment variables set
#
# AWS_ACCESS_KEY_ID
# AWS_SECRET_ACCESS_KEY
# S3TS_BUCKET
#
# NB: **this will only work if the bucket is empty
s3c = boto.connect_s3()
bucket = s3c.get_bucket( os.environ['S3TS_BUCKET'] )
with EmptyS3Bucket(bucket):
fileStore = S3FileStore( bucket )
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore = TreeStore.create( fileStore, localCache, TreeStoreConfig( 100, True ) )
# Upload it as a tree
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore.upload( 'v1.0', '', creationTime, self.srcTree, CaptureUploadProgress() )
pkg = treestore.findPackage( 'v1.0' )
# Confirm it's in the index
self.assertEqual( treestore.listPackages(), ['v1.0'] )
treestore.verify( pkg )
cb = CaptureDownloadProgress()
treestore.download( pkg, cb )
self.assertEqual( sorted(cb.recorded), [30, 45, 47, 100, 100] )
treestore.verifyLocal( pkg )
destTree = os.path.join( self.workdir, 'dest-1' )
treestore.install( pkg, destTree, CaptureInstallProgress() )
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree,destTree), shell=True ), 0 )
self.assertEqual( readInstallProperties(destTree).treeName, 'v1.0' )
result = treestore.compareInstall( pkg, destTree )
self.assertEqual( len(result.missing), 0 )
self.assertEqual( len(result.extra), 0 )
self.assertEqual( len(result.diffs), 0 )
with open( os.path.join(destTree,"code/file1.py"), "w" ) as f:
f.write("x")
with open( os.path.join(destTree,"code/file3.py"), "w" ) as f:
f.write("y")
os.unlink(os.path.join(destTree,'assets/car-01.db'))
result = treestore.compareInstall( pkg, destTree )
self.assertEqual( result.missing, set(['assets/car-01.db']) )
self.assertEqual( result.extra, set(['code/file3.py']) )
self.assertEqual( result.diffs, set(['code/file1.py']) )
shutil.rmtree( destTree )
treestore.install( pkg, destTree, CaptureInstallProgress() )
result = treestore.compareInstall( pkg, destTree )
self.assertEqual( len(result.missing), 0 )
self.assertEqual( len(result.extra), 0 )
self.assertEqual( len(result.diffs), 0 )
pkg = treestore.findPackage( 'v1.0' )
treestore.addUrls( pkg, 3600 )
self.assertEqual( len(result.missing), 0 )
self.assertEqual( len(result.extra), 0 )
self.assertEqual( len(result.diffs), 0 )
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore2 = TreeStore.forHttpOnly( localCache )
cb = CaptureDownloadProgress()
treestore2.downloadHttp( pkg, cb )
self.assertEqual( sorted(cb.recorded), [30, 45, 47, 100, 100] )
destTree2 = os.path.join( self.workdir, 'dest-2' )
treestore2.install( pkg, destTree2, CaptureInstallProgress() )
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree,destTree2), shell=True ), 0 )
treestore.rename( 'v1.0', 'v1.0x' )
pkg = treestore.findPackage( 'v1.0x' )
treestore.download( pkg, CaptureDownloadProgress() )
destTree = os.path.join( self.workdir, 'dest-3' )
treestore.install( pkg, destTree, CaptureInstallProgress() )
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree,destTree), shell=True ), 0 )
treestore.remove( 'v1.0x' )
def test_s3_prefixes(self):
s3c = boto.connect_s3()
bucket = s3c.get_bucket( os.environ['S3TS_BUCKET'] )
with EmptyS3Bucket(bucket):
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore1 = TreeStore.create( S3FileStore( bucket, "prefix1" ), localCache, TreeStoreConfig( 100, True ) )
treestore2 = TreeStore.create( S3FileStore( bucket, "prefix2" ), localCache, TreeStoreConfig( 100, True ) )
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore1.upload( 'release', '', creationTime, self.srcTree, CaptureUploadProgress() )
treestore2.upload( 'release', '', creationTime, self.srcTree2, CaptureUploadProgress() )
pkg1 = treestore1.findPackage( 'release' )
pkg2 = treestore2.findPackage( 'release' )
self.assertEqual(len(pkg1.files),3)
self.assertEqual(len(pkg2.files),4)
def test_s3_merged_package(self):
s3c = boto.connect_s3()
bucket = s3c.get_bucket( os.environ['S3TS_BUCKET'] )
with EmptyS3Bucket(bucket):
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore = TreeStore.create( S3FileStore( bucket), localCache, TreeStoreConfig( 100, True ) )
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore.upload( 'src1', '', creationTime, self.srcTree, CaptureUploadProgress() )
treestore.upload( 'src2', '', creationTime, self.srcTree2, CaptureUploadProgress() )
treestore.upload( 'src3', '', creationTime, self.srcTree3, CaptureUploadProgress() )
treestore.createMerged( 'merged', creationTime, { '.' : 'src1', 'subdir-a' : 'src2', 'subdir-b' : 'src3'})
pkg = treestore.findPackage( 'merged' )
treestore.download( pkg, CaptureDownloadProgress() )
destTree = os.path.join( self.workdir, 'merged' )
treestore.install( pkg, destTree, CaptureInstallProgress() )
def assertSameContent( path1, path2 ):
with open(path1) as f1:
with open(path2) as f2:
self.assertEqual( f1.read(), f2.read() )
assertSameContent(os.path.join(destTree, "code/file1.py"), os.path.join(self.srcTree, "code/file1.py"))
assertSameContent(os.path.join(destTree, "subdir-a/code/file4.py"), os.path.join(self.srcTree2, "code/file4.py"))
assertSameContent(os.path.join(destTree, "subdir-b/text/text"), os.path.join(self.srcTree3, "text/text"))
def test_s3_many_treestore(self):
s3c = boto.connect_s3()
bucket = s3c.get_bucket( os.environ['S3TS_BUCKET'] )
with EmptyS3Bucket(bucket):
fileStore = S3FileStore( bucket )
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore = TreeStore.create( fileStore, localCache, TreeStoreConfig( 100, True ) )
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore.uploadMany( 'v1.0', '', creationTime, self.srcTree, self.srcVariant, CaptureUploadProgress() )
pkg = treestore.findPackage( 'v1.0:kiosk-01' )
self.assertEqual( treestore.listPackages(), ['v1.0:kiosk-01', 'v1.0:kiosk-02'] )
# Verify it
treestore.verify( pkg )
# Download it, checking we get expected progress callbacks
cb = CaptureDownloadProgress()
treestore.download( pkg, cb )
self.assertEqual( sorted(cb.recorded), [29, 30, 45, 47, 100, 100] )
# Verify it locally
treestore.verifyLocal( pkg )
# Install it
destTree = os.path.join( self.workdir, 'dest-1' )
treestore.install( pkg, destTree, CaptureInstallProgress() )
# Check that the installed tree is the same as the source tree
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree + '/assets',destTree + '/assets'), shell=True ), 0 )
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree + '/code',destTree + '/code'), shell=True ), 0 )
self.assertEqual( readInstallProperties(destTree).treeName, 'v1.0:kiosk-01' )
def makeEmptyDir( path ):
if os.path.exists( path ):
shutil.rmtree( path )
os.makedirs( path )
return path
if __name__ == '__main__':
unittest.main()
| true | true |
f735517ae4b0c43d2d979272bde82614ddb99068 | 7,319 | py | Python | python-sockets-tutorial/libserver.py | mohammad26845/materials | 12f8baa413d8ab51d38f7756ed9b9d98076783b9 | [
"MIT"
] | 3,682 | 2018-05-07T19:45:24.000Z | 2022-03-31T15:19:10.000Z | python-sockets-tutorial/libserver.py | sribarrow/materials | c17c4a4d6f8487e59eac1df8c88ca92b73d6d2a5 | [
"MIT"
] | 148 | 2018-05-15T21:18:49.000Z | 2022-03-21T11:25:39.000Z | python-sockets-tutorial/libserver.py | sribarrow/materials | c17c4a4d6f8487e59eac1df8c88ca92b73d6d2a5 | [
"MIT"
] | 5,535 | 2018-05-25T23:36:08.000Z | 2022-03-31T16:55:52.000Z | import sys
import selectors
import json
import io
import struct
request_search = {
"morpheus": "Follow the white rabbit. \U0001f430",
"ring": "In the caves beneath the Misty Mountains. \U0001f48d",
"\U0001f436": "\U0001f43e Playing ball! \U0001f3d0",
}
class Message:
def __init__(self, selector, sock, addr):
self.selector = selector
self.sock = sock
self.addr = addr
self._recv_buffer = b""
self._send_buffer = b""
self._jsonheader_len = None
self.jsonheader = None
self.request = None
self.response_created = False
def _set_selector_events_mask(self, mode):
"""Set selector to listen for events: mode is 'r', 'w', or 'rw'."""
if mode == "r":
events = selectors.EVENT_READ
elif mode == "w":
events = selectors.EVENT_WRITE
elif mode == "rw":
events = selectors.EVENT_READ | selectors.EVENT_WRITE
else:
raise ValueError(f"Invalid events mask mode {repr(mode)}.")
self.selector.modify(self.sock, events, data=self)
def _read(self):
try:
# Should be ready to read
data = self.sock.recv(4096)
except BlockingIOError:
# Resource temporarily unavailable (errno EWOULDBLOCK)
pass
else:
if data:
self._recv_buffer += data
else:
raise RuntimeError("Peer closed.")
def _write(self):
if self._send_buffer:
print("sending", repr(self._send_buffer), "to", self.addr)
try:
# Should be ready to write
sent = self.sock.send(self._send_buffer)
except BlockingIOError:
# Resource temporarily unavailable (errno EWOULDBLOCK)
pass
else:
self._send_buffer = self._send_buffer[sent:]
# Close when the buffer is drained. The response has been sent.
if sent and not self._send_buffer:
self.close()
def _json_encode(self, obj, encoding):
return json.dumps(obj, ensure_ascii=False).encode(encoding)
def _json_decode(self, json_bytes, encoding):
tiow = io.TextIOWrapper(
io.BytesIO(json_bytes), encoding=encoding, newline=""
)
obj = json.load(tiow)
tiow.close()
return obj
def _create_message(
self, *, content_bytes, content_type, content_encoding
):
jsonheader = {
"byteorder": sys.byteorder,
"content-type": content_type,
"content-encoding": content_encoding,
"content-length": len(content_bytes),
}
jsonheader_bytes = self._json_encode(jsonheader, "utf-8")
message_hdr = struct.pack(">H", len(jsonheader_bytes))
message = message_hdr + jsonheader_bytes + content_bytes
return message
def _create_response_json_content(self):
action = self.request.get("action")
if action == "search":
query = self.request.get("value")
answer = request_search.get(query) or f'No match for "{query}".'
content = {"result": answer}
else:
content = {"result": f'Error: invalid action "{action}".'}
content_encoding = "utf-8"
response = {
"content_bytes": self._json_encode(content, content_encoding),
"content_type": "text/json",
"content_encoding": content_encoding,
}
return response
def _create_response_binary_content(self):
response = {
"content_bytes": b"First 10 bytes of request: "
+ self.request[:10],
"content_type": "binary/custom-server-binary-type",
"content_encoding": "binary",
}
return response
def process_events(self, mask):
if mask & selectors.EVENT_READ:
self.read()
if mask & selectors.EVENT_WRITE:
self.write()
def read(self):
self._read()
if self._jsonheader_len is None:
self.process_protoheader()
if self._jsonheader_len is not None:
if self.jsonheader is None:
self.process_jsonheader()
if self.jsonheader:
if self.request is None:
self.process_request()
def write(self):
if self.request:
if not self.response_created:
self.create_response()
self._write()
def close(self):
print("closing connection to", self.addr)
try:
self.selector.unregister(self.sock)
except Exception as e:
print(
"error: selector.unregister() exception for",
f"{self.addr}: {repr(e)}",
)
try:
self.sock.close()
except OSError as e:
print(
"error: socket.close() exception for",
f"{self.addr}: {repr(e)}",
)
finally:
# Delete reference to socket object for garbage collection
self.sock = None
def process_protoheader(self):
hdrlen = 2
if len(self._recv_buffer) >= hdrlen:
self._jsonheader_len = struct.unpack(
">H", self._recv_buffer[:hdrlen]
)[0]
self._recv_buffer = self._recv_buffer[hdrlen:]
def process_jsonheader(self):
hdrlen = self._jsonheader_len
if len(self._recv_buffer) >= hdrlen:
self.jsonheader = self._json_decode(
self._recv_buffer[:hdrlen], "utf-8"
)
self._recv_buffer = self._recv_buffer[hdrlen:]
for reqhdr in (
"byteorder",
"content-length",
"content-type",
"content-encoding",
):
if reqhdr not in self.jsonheader:
raise ValueError(f'Missing required header "{reqhdr}".')
def process_request(self):
content_len = self.jsonheader["content-length"]
if not len(self._recv_buffer) >= content_len:
return
data = self._recv_buffer[:content_len]
self._recv_buffer = self._recv_buffer[content_len:]
if self.jsonheader["content-type"] == "text/json":
encoding = self.jsonheader["content-encoding"]
self.request = self._json_decode(data, encoding)
print("received request", repr(self.request), "from", self.addr)
else:
# Binary or unknown content-type
self.request = data
print(
f'received {self.jsonheader["content-type"]} request from',
self.addr,
)
# Set selector to listen for write events, we're done reading.
self._set_selector_events_mask("w")
def create_response(self):
if self.jsonheader["content-type"] == "text/json":
response = self._create_response_json_content()
else:
# Binary or unknown content-type
response = self._create_response_binary_content()
message = self._create_message(**response)
self.response_created = True
self._send_buffer += message
| 33.728111 | 79 | 0.564558 | import sys
import selectors
import json
import io
import struct
request_search = {
"morpheus": "Follow the white rabbit. \U0001f430",
"ring": "In the caves beneath the Misty Mountains. \U0001f48d",
"\U0001f436": "\U0001f43e Playing ball! \U0001f3d0",
}
class Message:
def __init__(self, selector, sock, addr):
self.selector = selector
self.sock = sock
self.addr = addr
self._recv_buffer = b""
self._send_buffer = b""
self._jsonheader_len = None
self.jsonheader = None
self.request = None
self.response_created = False
def _set_selector_events_mask(self, mode):
if mode == "r":
events = selectors.EVENT_READ
elif mode == "w":
events = selectors.EVENT_WRITE
elif mode == "rw":
events = selectors.EVENT_READ | selectors.EVENT_WRITE
else:
raise ValueError(f"Invalid events mask mode {repr(mode)}.")
self.selector.modify(self.sock, events, data=self)
def _read(self):
try:
data = self.sock.recv(4096)
except BlockingIOError:
pass
else:
if data:
self._recv_buffer += data
else:
raise RuntimeError("Peer closed.")
def _write(self):
if self._send_buffer:
print("sending", repr(self._send_buffer), "to", self.addr)
try:
sent = self.sock.send(self._send_buffer)
except BlockingIOError:
pass
else:
self._send_buffer = self._send_buffer[sent:]
if sent and not self._send_buffer:
self.close()
def _json_encode(self, obj, encoding):
return json.dumps(obj, ensure_ascii=False).encode(encoding)
def _json_decode(self, json_bytes, encoding):
tiow = io.TextIOWrapper(
io.BytesIO(json_bytes), encoding=encoding, newline=""
)
obj = json.load(tiow)
tiow.close()
return obj
def _create_message(
self, *, content_bytes, content_type, content_encoding
):
jsonheader = {
"byteorder": sys.byteorder,
"content-type": content_type,
"content-encoding": content_encoding,
"content-length": len(content_bytes),
}
jsonheader_bytes = self._json_encode(jsonheader, "utf-8")
message_hdr = struct.pack(">H", len(jsonheader_bytes))
message = message_hdr + jsonheader_bytes + content_bytes
return message
def _create_response_json_content(self):
action = self.request.get("action")
if action == "search":
query = self.request.get("value")
answer = request_search.get(query) or f'No match for "{query}".'
content = {"result": answer}
else:
content = {"result": f'Error: invalid action "{action}".'}
content_encoding = "utf-8"
response = {
"content_bytes": self._json_encode(content, content_encoding),
"content_type": "text/json",
"content_encoding": content_encoding,
}
return response
def _create_response_binary_content(self):
response = {
"content_bytes": b"First 10 bytes of request: "
+ self.request[:10],
"content_type": "binary/custom-server-binary-type",
"content_encoding": "binary",
}
return response
def process_events(self, mask):
if mask & selectors.EVENT_READ:
self.read()
if mask & selectors.EVENT_WRITE:
self.write()
def read(self):
self._read()
if self._jsonheader_len is None:
self.process_protoheader()
if self._jsonheader_len is not None:
if self.jsonheader is None:
self.process_jsonheader()
if self.jsonheader:
if self.request is None:
self.process_request()
def write(self):
if self.request:
if not self.response_created:
self.create_response()
self._write()
def close(self):
print("closing connection to", self.addr)
try:
self.selector.unregister(self.sock)
except Exception as e:
print(
"error: selector.unregister() exception for",
f"{self.addr}: {repr(e)}",
)
try:
self.sock.close()
except OSError as e:
print(
"error: socket.close() exception for",
f"{self.addr}: {repr(e)}",
)
finally:
self.sock = None
def process_protoheader(self):
hdrlen = 2
if len(self._recv_buffer) >= hdrlen:
self._jsonheader_len = struct.unpack(
">H", self._recv_buffer[:hdrlen]
)[0]
self._recv_buffer = self._recv_buffer[hdrlen:]
def process_jsonheader(self):
hdrlen = self._jsonheader_len
if len(self._recv_buffer) >= hdrlen:
self.jsonheader = self._json_decode(
self._recv_buffer[:hdrlen], "utf-8"
)
self._recv_buffer = self._recv_buffer[hdrlen:]
for reqhdr in (
"byteorder",
"content-length",
"content-type",
"content-encoding",
):
if reqhdr not in self.jsonheader:
raise ValueError(f'Missing required header "{reqhdr}".')
def process_request(self):
content_len = self.jsonheader["content-length"]
if not len(self._recv_buffer) >= content_len:
return
data = self._recv_buffer[:content_len]
self._recv_buffer = self._recv_buffer[content_len:]
if self.jsonheader["content-type"] == "text/json":
encoding = self.jsonheader["content-encoding"]
self.request = self._json_decode(data, encoding)
print("received request", repr(self.request), "from", self.addr)
else:
self.request = data
print(
f'received {self.jsonheader["content-type"]} request from',
self.addr,
)
self._set_selector_events_mask("w")
def create_response(self):
if self.jsonheader["content-type"] == "text/json":
response = self._create_response_json_content()
else:
# Binary or unknown content-type
response = self._create_response_binary_content()
message = self._create_message(**response)
self.response_created = True
self._send_buffer += message
| true | true |
f73551e5a07ae0b82068651eb3d0b261c227f5a0 | 9,545 | py | Python | rltime/eval.py | frederikschubert/rltime | d1722ffd4cf7b4599655b8d9c64abc243919afc9 | [
"Apache-2.0"
] | null | null | null | rltime/eval.py | frederikschubert/rltime | d1722ffd4cf7b4599655b8d9c64abc243919afc9 | [
"Apache-2.0"
] | null | null | null | rltime/eval.py | frederikschubert/rltime | d1722ffd4cf7b4599655b8d9c64abc243919afc9 | [
"Apache-2.0"
] | null | null | null | """ Entry point for evaluating/rendering a trained policy. """
import argparse
import json
import os
import numpy as np
import time
import datetime
from rltime.general.config import load_config
from rltime.general.utils import deep_dictionary_update
from rltime.general.type_registry import get_registered_type
from rltime.env_wrappers.common import make_env_creator, EpisodeRecorder
from rltime.env_wrappers.vec_env.sub_proc import make_sub_proc_vec_env
from rltime.general.loggers import DirectoryLogger
def create_policy_from_config(config, action_space, observation_space):
"""Creates a policy from the given config and spaces
This does not load the weights just creates the policy
"""
if not isinstance(config, dict):
config = load_config(config)
train_cls = get_registered_type(
"trainers", config['training'].get("type", None))
assert(hasattr(train_cls, "create_policy")), \
f"Config training class {type(train_cls)} does not have a " \
"'create_policy' method"
model_config = config.get("model")
return train_cls.create_policy(
model_config=model_config, action_space=action_space,
observation_space=observation_space, **config.get("policy_args", {}))
def eval_policy(path, num_envs, episode_count, record=False, record_fps=60,
render=False, render_fps=None, eps=0.001, conf_update=None):
"""Evaluates training result at 'path', loading the last checkpoint
The result is logged to a new line in file 'eval.json' in <path>
Args:
path: The path containing the training result output to evaluate
num_envs: Amount of vectorized (sub-process) ENVs to evaluate in
parallel
episode_count: The amount of episodes to evaluate total
record: Whether to record episodes to MP4 (under 'recordings'
sub-directory in <path>)
record_fps: If <record>, the FPS to record at (These are raw ENV frames
before any frame-skipping, so atari would usually be 60)
render: Whether to render the ENVs in a window in real-time (Tiled if
num_envs>1)
render_fps: Frames-Per-Second to sync the rendering to (Valid only for
render=True), the default (None) renders at max policy speed. These
are acting steps, so after frame-skipping if active
eps: Epsilon to use for random action selection
Note: We count the first 'episode_count' episodes that started and not
ended, as 'ended' is unfair to longer episodes in case of vectorized
evaluation. For Example: Take a policy that achieves 100 reward in 100
seconds 50% of the time and 0 reward in <1 second 50% of the time.
So we'd expect if we evaluate 20 episodes to get around ~50 average
reward (which we would if running 20 episodes serially on a single ENV)
But if we run 16 ENVs in parallel we will likely get near-0 mean reward
if we count the first 20 episodes that finished (Since half of the 16
ENVs immediately end with reward 0 then restart, then half of those
immediately end with 0 and so on, so we quickly get ~(8+4+2+1) 0-reward
runs and don't count the ones which are running long and going to reach
100 reward), while if we take the first 20 episodes that started (and
ignore any that started after) we will get the expected result
"""
print("Evaluating:", path)
assert(num_envs <= episode_count), \
"num_envs can't be higher than the requested episode_count"
logger = DirectoryLogger(path, use_logging=False, tensorboard=False)
# Load the config from the result path
config = logger.get_config()
if conf_update:
config = dict(config) # Avoid changing the passed config
deep_dictionary_update(config, conf_update)
# Make the env-creaton function based on the config settings
env_args = config.get("env_args", {})
if record:
# If requested, add also an episode-recorder to the ENV stack
recorder = {
"type": EpisodeRecorder,
"args": {
"path": os.path.join(path, "recordings"),
"fps": record_fps
}
}
env_args['wrappers'] = [recorder] + env_args.get('wrappers', [])
env_creator = make_env_creator(config.get("env"), **env_args)
# Create a vectorized ENV
env = make_sub_proc_vec_env(env_creator, num_envs)
# Create the policy based on the config
policy = create_policy_from_config(
config, env.action_space, env.observation_space)
# Load the last checkpoint
training_step, cp_data = logger.get_checkpoint()
# Load the weights from the checkpoint to the policy
policy.load_state(cp_data['policy_state'])
print("Loaded checkpoint from step:", training_step)
# The initial policy input state
state = policy.make_input_state(env.reset(), np.array([True] * num_envs))
episodes_started = num_envs
rewards = []
lengths = []
# This signifies the ENV started the episode in time and should be counted
masks = [True] * num_envs
# TODO(frederik): Mention mode and difficulty
print(f"Running '{config.get('env')}' for {episode_count} episodes"
f" on {num_envs} ENVs")
while len(rewards) < episode_count:
step_start = time.time()
# Select the next action for each env
preds = policy.actor_predict(state, timesteps=1)
actions = preds['actions']
if eps:
# Remap to random actions with eps probability
for i in range(num_envs):
if np.random.rand() < eps:
actions[i] = env.action_space.sample()
# Send the action and get the transition data
obs, _, dones, info = env.step(actions)
# Check any env if finished
for i, env_info in enumerate(info):
# We use the 'real' done/reward from the EpisodeTracker wrapper
if env_info['episode_info']['done']:
if masks[i]:
# Only count the first 'episode_count' that started
reward = env_info['episode_info']['reward']
length = env_info['episode_info']['length']
rewards.append(reward)
lengths.append(length)
print(f"Episode {len(rewards)}/{episode_count} "
f"finished with reward: {reward}")
episodes_started += 1
if episodes_started > episode_count:
masks[i] = False
# Render to screen if requested
if render:
if render_fps:
diff = 1./render_fps - (time.time() - step_start)
if diff > 0:
time.sleep(diff)
env.render()
# Generate the next policy input state
state = policy.make_input_state(obs, dones)
env.close()
# Log the result
result = {
"step": training_step,
"date": datetime.datetime.now(),
"episodes": episode_count,
"envs": num_envs,
**{
key: {
"mean": np.mean(vals),
"min": np.min(vals),
"max": np.max(vals),
"median": np.median(vals),
"std": np.std(vals),
} for key, vals in [("reward", rewards), ("length", lengths)]
}
}
print("Result:")
logger.log_result("eval", result, None)
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'path', type=str,
help="The path to the training directory result to evaluate")
parser.add_argument(
'--num-envs', type=int, default=1,
help="Amount of ENVs to run in parallel")
parser.add_argument(
'--episodes', type=int, default=5,
help="Amount of episodes to run")
parser.add_argument(
'--record', action='store_true',
help="Whether to record episode to MP4 (To a sub-directory in the "
"result path). Warning: If used with --num-envs>1 the last "
"videos will be truncated")
parser.add_argument(
'--record-fps', type=int, default=60,
help="FPS to record at if --record (Typically 60FPS for atari)")
parser.add_argument(
'--render', action='store_true',
help="Whether to render the episodes in real-time")
parser.add_argument(
'--render-fps', type=int, default=0,
help="FPS to sync to if using --render (Set to 0 for full speed), "
"note this is after ENV frame-skipping so if you want 60FPS with "
"frame-skip of 4 use 15 here")
parser.add_argument(
'--eps', type=float, default=0.001,
help="Epsilon value to use for random action selection during "
"evaluation")
parser.add_argument(
'--conf-update', type=str,
help="Optional JSON dictionary string to deep-update the config with")
return parser.parse_args()
def main():
args = parse_args()
conf_update = None if not args.conf_update \
else json.loads(args.conf_update)
eval_policy(
args.path, num_envs=args.num_envs, episode_count=args.episodes,
record=args.record, record_fps=args.record_fps,
render=args.render, render_fps=args.render_fps, eps=args.eps, conf_update=conf_update)
if __name__ == '__main__':
main()
| 39.937238 | 94 | 0.638135 |
import argparse
import json
import os
import numpy as np
import time
import datetime
from rltime.general.config import load_config
from rltime.general.utils import deep_dictionary_update
from rltime.general.type_registry import get_registered_type
from rltime.env_wrappers.common import make_env_creator, EpisodeRecorder
from rltime.env_wrappers.vec_env.sub_proc import make_sub_proc_vec_env
from rltime.general.loggers import DirectoryLogger
def create_policy_from_config(config, action_space, observation_space):
if not isinstance(config, dict):
config = load_config(config)
train_cls = get_registered_type(
"trainers", config['training'].get("type", None))
assert(hasattr(train_cls, "create_policy")), \
f"Config training class {type(train_cls)} does not have a " \
"'create_policy' method"
model_config = config.get("model")
return train_cls.create_policy(
model_config=model_config, action_space=action_space,
observation_space=observation_space, **config.get("policy_args", {}))
def eval_policy(path, num_envs, episode_count, record=False, record_fps=60,
render=False, render_fps=None, eps=0.001, conf_update=None):
print("Evaluating:", path)
assert(num_envs <= episode_count), \
"num_envs can't be higher than the requested episode_count"
logger = DirectoryLogger(path, use_logging=False, tensorboard=False)
# Load the config from the result path
config = logger.get_config()
if conf_update:
config = dict(config) # Avoid changing the passed config
deep_dictionary_update(config, conf_update)
# Make the env-creaton function based on the config settings
env_args = config.get("env_args", {})
if record:
# If requested, add also an episode-recorder to the ENV stack
recorder = {
"type": EpisodeRecorder,
"args": {
"path": os.path.join(path, "recordings"),
"fps": record_fps
}
}
env_args['wrappers'] = [recorder] + env_args.get('wrappers', [])
env_creator = make_env_creator(config.get("env"), **env_args)
# Create a vectorized ENV
env = make_sub_proc_vec_env(env_creator, num_envs)
# Create the policy based on the config
policy = create_policy_from_config(
config, env.action_space, env.observation_space)
# Load the last checkpoint
training_step, cp_data = logger.get_checkpoint()
# Load the weights from the checkpoint to the policy
policy.load_state(cp_data['policy_state'])
print("Loaded checkpoint from step:", training_step)
# The initial policy input state
state = policy.make_input_state(env.reset(), np.array([True] * num_envs))
episodes_started = num_envs
rewards = []
lengths = []
# This signifies the ENV started the episode in time and should be counted
masks = [True] * num_envs
# TODO(frederik): Mention mode and difficulty
print(f"Running '{config.get('env')}' for {episode_count} episodes"
f" on {num_envs} ENVs")
while len(rewards) < episode_count:
step_start = time.time()
# Select the next action for each env
preds = policy.actor_predict(state, timesteps=1)
actions = preds['actions']
if eps:
# Remap to random actions with eps probability
for i in range(num_envs):
if np.random.rand() < eps:
actions[i] = env.action_space.sample()
# Send the action and get the transition data
obs, _, dones, info = env.step(actions)
# Check any env if finished
for i, env_info in enumerate(info):
# We use the 'real' done/reward from the EpisodeTracker wrapper
if env_info['episode_info']['done']:
if masks[i]:
# Only count the first 'episode_count' that started
reward = env_info['episode_info']['reward']
length = env_info['episode_info']['length']
rewards.append(reward)
lengths.append(length)
print(f"Episode {len(rewards)}/{episode_count} "
f"finished with reward: {reward}")
episodes_started += 1
if episodes_started > episode_count:
masks[i] = False
# Render to screen if requested
if render:
if render_fps:
diff = 1./render_fps - (time.time() - step_start)
if diff > 0:
time.sleep(diff)
env.render()
# Generate the next policy input state
state = policy.make_input_state(obs, dones)
env.close()
# Log the result
result = {
"step": training_step,
"date": datetime.datetime.now(),
"episodes": episode_count,
"envs": num_envs,
**{
key: {
"mean": np.mean(vals),
"min": np.min(vals),
"max": np.max(vals),
"median": np.median(vals),
"std": np.std(vals),
} for key, vals in [("reward", rewards), ("length", lengths)]
}
}
print("Result:")
logger.log_result("eval", result, None)
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'path', type=str,
help="The path to the training directory result to evaluate")
parser.add_argument(
'--num-envs', type=int, default=1,
help="Amount of ENVs to run in parallel")
parser.add_argument(
'--episodes', type=int, default=5,
help="Amount of episodes to run")
parser.add_argument(
'--record', action='store_true',
help="Whether to record episode to MP4 (To a sub-directory in the "
"result path). Warning: If used with --num-envs>1 the last "
"videos will be truncated")
parser.add_argument(
'--record-fps', type=int, default=60,
help="FPS to record at if --record (Typically 60FPS for atari)")
parser.add_argument(
'--render', action='store_true',
help="Whether to render the episodes in real-time")
parser.add_argument(
'--render-fps', type=int, default=0,
help="FPS to sync to if using --render (Set to 0 for full speed), "
"note this is after ENV frame-skipping so if you want 60FPS with "
"frame-skip of 4 use 15 here")
parser.add_argument(
'--eps', type=float, default=0.001,
help="Epsilon value to use for random action selection during "
"evaluation")
parser.add_argument(
'--conf-update', type=str,
help="Optional JSON dictionary string to deep-update the config with")
return parser.parse_args()
def main():
args = parse_args()
conf_update = None if not args.conf_update \
else json.loads(args.conf_update)
eval_policy(
args.path, num_envs=args.num_envs, episode_count=args.episodes,
record=args.record, record_fps=args.record_fps,
render=args.render, render_fps=args.render_fps, eps=args.eps, conf_update=conf_update)
if __name__ == '__main__':
main()
| true | true |
f735520963a9477834365eb74c51fd51ac149bce | 1,477 | py | Python | main/input_file_creator/write_cell_card/cell_card_main.py | KanruXie/Virtual_Linac | 7d321fcc744a6bec639bab495f5a2a71af4cbe4d | [
"Apache-2.0"
] | null | null | null | main/input_file_creator/write_cell_card/cell_card_main.py | KanruXie/Virtual_Linac | 7d321fcc744a6bec639bab495f5a2a71af4cbe4d | [
"Apache-2.0"
] | null | null | null | main/input_file_creator/write_cell_card/cell_card_main.py | KanruXie/Virtual_Linac | 7d321fcc744a6bec639bab495f5a2a71af4cbe4d | [
"Apache-2.0"
] | 1 | 2021-12-19T15:59:50.000Z | 2021-12-19T15:59:50.000Z | # -*- coding: utf-8 -*-
"""
Cell Card main module
"""
__author__ = 'Kanru Xie'
import globalvar as glv
from input_file_creator.write_cell_card import (objective_cell, air_cell, baseplate_cell, jaws_cell, mlc_cell)
def cell_card():
c_card = ''
mlc_state = glv.get_value('mlc state')
if mlc_state == 'no mlc':
c_card = str('c Cell card' + '\n' +
'c Water tank phantom' + '\n' +
' 1 1 -1.0 -1 2 imp:p,e 1 $water tank' + '\n' +
objective_cell.obj() + '\n' +
air_cell.air_card_1() +
'c Jaws' + '\n' +
jaws_cell.jaws() + '\n' +
baseplate_cell.baseplate() + '\n' +
'c Void' + '\n' +
' 999 0 999 imp:p,e 0' + '\n' + '\n'
)
elif mlc_state == 'standard mlc':
c_card = str('c Cell card' + '\n' +
'c Water tank phantom' + '\n' +
' 1 1 -1.0 -1 2 imp:p,e 1 $water tank' + '\n' +
objective_cell.obj() + '\n' +
air_cell.air_card_2() +
'c Jaws' + '\n' +
jaws_cell.jaws() + '\n' +
baseplate_cell.baseplate() + '\n' +
mlc_cell.mlc_card() +
'c Void' + '\n' +
' 999 0 999 imp:p,e 0' + '\n' + '\n'
)
return c_card
| 36.02439 | 110 | 0.404198 |
__author__ = 'Kanru Xie'
import globalvar as glv
from input_file_creator.write_cell_card import (objective_cell, air_cell, baseplate_cell, jaws_cell, mlc_cell)
def cell_card():
c_card = ''
mlc_state = glv.get_value('mlc state')
if mlc_state == 'no mlc':
c_card = str('c Cell card' + '\n' +
'c Water tank phantom' + '\n' +
' 1 1 -1.0 -1 2 imp:p,e 1 $water tank' + '\n' +
objective_cell.obj() + '\n' +
air_cell.air_card_1() +
'c Jaws' + '\n' +
jaws_cell.jaws() + '\n' +
baseplate_cell.baseplate() + '\n' +
'c Void' + '\n' +
' 999 0 999 imp:p,e 0' + '\n' + '\n'
)
elif mlc_state == 'standard mlc':
c_card = str('c Cell card' + '\n' +
'c Water tank phantom' + '\n' +
' 1 1 -1.0 -1 2 imp:p,e 1 $water tank' + '\n' +
objective_cell.obj() + '\n' +
air_cell.air_card_2() +
'c Jaws' + '\n' +
jaws_cell.jaws() + '\n' +
baseplate_cell.baseplate() + '\n' +
mlc_cell.mlc_card() +
'c Void' + '\n' +
' 999 0 999 imp:p,e 0' + '\n' + '\n'
)
return c_card
| true | true |
f73552eba68c512312aa18e4f1d14dbaea90b0e9 | 208 | py | Python | phf/__init__.py | framaz/AsyncSiteParser | 51ed0f2611f2f309734ccfbfc496cb13759c9ea1 | [
"MIT"
] | 2 | 2020-07-14T13:07:01.000Z | 2020-07-14T14:51:14.000Z | phf/__init__.py | framaz/phf | 51ed0f2611f2f309734ccfbfc496cb13759c9ea1 | [
"MIT"
] | null | null | null | phf/__init__.py | framaz/phf | 51ed0f2611f2f309734ccfbfc496cb13759c9ea1 | [
"MIT"
] | null | null | null | from . import abstracthook
from . import commandinput
from . import provider
from . import utils
from .phfsystem import PHFSystem
__all__ = ["abstracthook", "commandinput", "provider", "utils", "PHFSystem"]
| 26 | 76 | 0.759615 | from . import abstracthook
from . import commandinput
from . import provider
from . import utils
from .phfsystem import PHFSystem
__all__ = ["abstracthook", "commandinput", "provider", "utils", "PHFSystem"]
| true | true |
f7355393f2d9138047ff104c57ca1997780b65b4 | 25 | py | Python | examples/bytearray/ex3.py | mcorne/python-by-example | 15339c0909c84b51075587a6a66391100971c033 | [
"MIT"
] | null | null | null | examples/bytearray/ex3.py | mcorne/python-by-example | 15339c0909c84b51075587a6a66391100971c033 | [
"MIT"
] | null | null | null | examples/bytearray/ex3.py | mcorne/python-by-example | 15339c0909c84b51075587a6a66391100971c033 | [
"MIT"
] | null | null | null | print(bytearray(b'Hi!'))
| 12.5 | 24 | 0.68 | print(bytearray(b'Hi!'))
| true | true |
f73553f7d28d117cc9c9a46d69f9483b75ac3ce2 | 18,153 | py | Python | rlkit/core/base_algorithm.py | apexrl/EBIL-torch | 8d257d5efa36f7c608085e34a7cdd3e996962d3f | [
"MIT"
] | 5 | 2021-05-28T02:38:33.000Z | 2022-03-03T01:17:09.000Z | rlkit/core/base_algorithm.py | apexrl/EBIL-torch | 8d257d5efa36f7c608085e34a7cdd3e996962d3f | [
"MIT"
] | 1 | 2021-11-05T14:25:17.000Z | 2021-11-10T11:59:24.000Z | rlkit/core/base_algorithm.py | apexrl/EBIL-torch | 8d257d5efa36f7c608085e34a7cdd3e996962d3f | [
"MIT"
] | 1 | 2021-11-15T07:41:33.000Z | 2021-11-15T07:41:33.000Z | import abc
import pickle
import time
from collections import OrderedDict
from copy import deepcopy
import gtimer as gt
import numpy as np
from rlkit.core import logger, eval_util
from rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from rlkit.data_management.path_builder import PathBuilder
from rlkit.policies.base import ExplorationPolicy
from rlkit.torch.sac.policies import MakeDeterministic
from rlkit.samplers import PathSampler
from rlkit.envs.wrapped_absorbing_env import WrappedAbsorbingEnv
from gym.spaces import Dict
class BaseAlgorithm(metaclass=abc.ABCMeta):
"""
base algorithm for single task setting
can be used for RL or Learning from Demonstrations
"""
def __init__(
self,
env,
exploration_policy: ExplorationPolicy,
training_env=None,
eval_policy=None,
eval_sampler=None,
num_epochs=100,
num_steps_per_epoch=10000,
num_steps_between_train_calls=1000,
num_steps_per_eval=1000,
max_path_length=1000,
min_steps_before_training=0,
replay_buffer=None,
replay_buffer_size=10000,
freq_saving=1,
save_replay_buffer=False,
save_environment=False,
save_algorithm=False,
save_best=False,
save_best_starting_from_epoch=0,
best_key='AverageReturn', # higher is better
no_terminal=False,
wrap_absorbing=False,
render=False,
render_kwargs={},
freq_log_visuals=1,
eval_deterministic=False
):
self.env = env
self.training_env = training_env or pickle.loads(pickle.dumps(env))
self.exploration_policy = exploration_policy
self.num_epochs = num_epochs
self.num_env_steps_per_epoch = num_steps_per_epoch
self.num_steps_between_train_calls = num_steps_between_train_calls
self.num_steps_per_eval = num_steps_per_eval
self.max_path_length = max_path_length
self.min_steps_before_training = min_steps_before_training
self.render = render
self.save_replay_buffer = save_replay_buffer
self.save_algorithm = save_algorithm
self.save_environment = save_environment
self.save_best = save_best
self.save_best_starting_from_epoch = save_best_starting_from_epoch
self.best_key = best_key
self.best_statistic_so_far = float('-Inf')
if eval_sampler is None:
if eval_policy is None:
eval_policy = exploration_policy
eval_policy = MakeDeterministic(eval_policy)
eval_sampler = PathSampler(
env,
eval_policy,
num_steps_per_eval,
max_path_length,
no_terminal=no_terminal,
render=render,
render_kwargs=render_kwargs
)
self.eval_policy = eval_policy
self.eval_sampler = eval_sampler
self.action_space = env.action_space
self.obs_space = env.observation_space
self.replay_buffer_size = replay_buffer_size
if replay_buffer is None:
assert max_path_length < replay_buffer_size
replay_buffer = EnvReplayBuffer(
self.replay_buffer_size,
self.env,
random_seed=np.random.randint(10000)
)
else:
assert max_path_length < replay_buffer._max_replay_buffer_size
self.replay_buffer = replay_buffer
self._n_env_steps_total = 0
self._n_train_steps_total = 0
self._n_rollouts_total = 0
self._do_train_time = 0
self._epoch_start_time = None
self._algo_start_time = None
self._old_table_keys = None
self._current_path_builder = PathBuilder()
self._exploration_paths = []
if wrap_absorbing:
# needs to be properly handled both here and in replay buffer
raise NotImplementedError()
self.wrap_absorbing = wrap_absorbing
self.freq_saving = freq_saving
self.no_terminal = no_terminal
self.eval_statistics = None
self.freq_log_visuals = freq_log_visuals
def train(self, start_epoch=0):
self.pretrain()
if start_epoch == 0:
params = self.get_epoch_snapshot(-1)
logger.save_itr_params(-1, params)
self.training_mode(False)
self._n_env_steps_total = start_epoch * self.num_env_steps_per_epoch
gt.reset()
gt.set_def_unique(False)
self.start_training(start_epoch=start_epoch)
def pretrain(self):
"""
Do anything before the main training phase.
"""
pass
def start_training(self, start_epoch=0):
self._current_path_builder = PathBuilder()
observation = self._start_new_rollout()
for epoch in gt.timed_for(
range(start_epoch, self.num_epochs),
save_itrs=True,
):
self._start_epoch(epoch)
for steps_this_epoch in range(self.num_env_steps_per_epoch):
action, agent_info = self._get_action_and_info(observation)
if self.render: self.training_env.render()
next_ob, raw_reward, terminal, env_info = (
self.training_env.step(action)
)
if self.no_terminal: terminal = False
self._n_env_steps_total += 1
reward = np.array([raw_reward])
terminal = np.array([terminal])
self._handle_step(
observation,
action,
reward,
next_ob,
np.array([False]) if self.no_terminal else terminal,
absorbing=np.array([0., 0.]),
agent_info=agent_info,
env_info=env_info,
)
if terminal[0]:
if self.wrap_absorbing:
raise NotImplementedError()
'''
If we wrap absorbing states, two additional
transitions must be added: (s_T, s_abs) and
(s_abs, s_abs). In Disc Actor Critic paper
they make s_abs be a vector of 0s with last
dim set to 1. Here we are going to add the following:
([next_ob,0], random_action, [next_ob, 1]) and
([next_ob,1], random_action, [next_ob, 1])
This way we can handle varying types of terminal states.
'''
# next_ob is the absorbing state
# for now just taking the previous action
self._handle_step(
next_ob,
action,
# env.action_space.sample(),
# the reward doesn't matter
reward,
next_ob,
np.array([False]),
absorbing=np.array([0.0, 1.0]),
agent_info=agent_info,
env_info=env_info
)
self._handle_step(
next_ob,
action,
# env.action_space.sample(),
# the reward doesn't matter
reward,
next_ob,
np.array([False]),
absorbing=np.array([1.0, 1.0]),
agent_info=agent_info,
env_info=env_info
)
self._handle_rollout_ending()
observation = self._start_new_rollout()
elif len(self._current_path_builder) >= self.max_path_length:
self._handle_rollout_ending()
observation = self._start_new_rollout()
else:
observation = next_ob
if self._n_env_steps_total % self.num_steps_between_train_calls == 0:
gt.stamp('sample')
self._try_to_train(epoch)
gt.stamp('train')
gt.stamp('sample')
self._try_to_eval(epoch)
gt.stamp('eval')
self._end_epoch()
def _try_to_train(self, epoch):
if self._can_train():
self.training_mode(True)
self._do_training(epoch)
self._n_train_steps_total += 1
self.training_mode(False)
def _try_to_eval(self, epoch):
if self._can_evaluate():
# save if it's time to save
if (epoch % self.freq_saving == 0) or (epoch + 1 >= self.num_epochs):
# if epoch + 1 >= self.num_epochs:
# epoch = 'final'
logger.save_extra_data(self.get_extra_data_to_save(epoch))
params = self.get_epoch_snapshot(epoch)
logger.save_itr_params(epoch, params)
self.evaluate(epoch)
logger.record_tabular(
"Number of train calls total",
self._n_train_steps_total,
)
logger.record_tabular(
"Number of env steps total",
self._n_env_steps_total,
)
logger.record_tabular(
"Number of rollouts total",
self._n_rollouts_total,
)
times_itrs = gt.get_times().stamps.itrs
train_time = times_itrs['train'][-1]
sample_time = times_itrs['sample'][-1]
eval_time = times_itrs['eval'][-1] if epoch > 0 else 0
epoch_time = train_time + sample_time + eval_time
total_time = gt.get_times().total
logger.record_tabular('Train Time (s)', train_time)
logger.record_tabular('(Previous) Eval Time (s)', eval_time)
logger.record_tabular('Sample Time (s)', sample_time)
logger.record_tabular('Epoch Time (s)', epoch_time)
logger.record_tabular('Total Train Time (s)', total_time)
logger.record_tabular("Epoch", epoch)
logger.dump_tabular(with_prefix=False, with_timestamp=False)
else:
logger.log("Skipping eval for now.")
def _can_evaluate(self):
"""
One annoying thing about the logger table is that the keys at each
iteration need to be the exact same. So unless you can compute
everything, skip evaluation.
A common example for why you might want to skip evaluation is that at
the beginning of training, you may not have enough data for a
validation and training set.
:return:
"""
return (
len(self._exploration_paths) > 0
and self.replay_buffer.num_steps_can_sample() >= self.min_steps_before_training
)
def _can_train(self):
return self.replay_buffer.num_steps_can_sample() >= self.min_steps_before_training
def _get_action_and_info(self, observation):
"""
Get an action to take in the environment.
:param observation:
:return:
"""
self.exploration_policy.set_num_steps_total(self._n_env_steps_total)
return self.exploration_policy.get_action(
observation,
)
def _start_epoch(self, epoch):
self._epoch_start_time = time.time()
self._exploration_paths = []
self._do_train_time = 0
logger.push_prefix('Iteration #%d | ' % epoch)
def _end_epoch(self):
self.eval_statistics = None
logger.log("Epoch Duration: {0}".format(
time.time() - self._epoch_start_time
))
logger.log("Started Training: {0}".format(self._can_train()))
logger.pop_prefix()
def _start_new_rollout(self):
self.exploration_policy.reset()
return self.training_env.reset()
def _handle_path(self, path):
"""
Naive implementation: just loop through each transition.
:param path:
:return:
"""
for (
ob,
action,
reward,
next_ob,
terminal,
agent_info,
env_info
) in zip(
path["observations"],
path["actions"],
path["rewards"],
path["next_observations"],
path["terminals"],
path["agent_infos"],
path["env_infos"],
):
self._handle_step(
ob,
action,
reward,
next_ob,
terminal,
agent_info=agent_info,
env_info=env_info,
)
self._handle_rollout_ending()
def _handle_step(
self,
observation,
action,
reward,
next_observation,
terminal,
absorbing,
agent_info,
env_info,
):
"""
Implement anything that needs to happen after every step
:return:
"""
self._current_path_builder.add_all(
observations=observation,
actions=action,
rewards=reward,
next_observations=next_observation,
terminals=terminal,
absorbing=absorbing,
agent_infos=agent_info,
env_infos=env_info,
)
self.replay_buffer.add_sample(
observation=observation,
action=action,
reward=reward,
terminal=terminal,
next_observation=next_observation,
absorbing=absorbing,
agent_info=agent_info,
env_info=env_info,
)
def _handle_rollout_ending(self):
"""
Implement anything that needs to happen after every rollout.
"""
self.replay_buffer.terminate_episode()
self._n_rollouts_total += 1
if len(self._current_path_builder) > 0:
self._exploration_paths.append(
self._current_path_builder
)
self._current_path_builder = PathBuilder()
def get_epoch_snapshot(self, epoch):
"""
Probably will be overridden by each algorithm
"""
data_to_save = dict(
epoch=epoch,
exploration_policy=self.exploration_policy,
)
if self.save_environment:
data_to_save['env'] = self.training_env
return data_to_save
# @abc.abstractmethod
# def load_snapshot(self, snapshot):
# """
# Should be implemented on a per algorithm basis
# taking into consideration the particular
# get_epoch_snapshot implementation for the algorithm
# """
# pass
def get_extra_data_to_save(self, epoch):
"""
Save things that shouldn't be saved every snapshot but rather
overwritten every time.
:param epoch:
:return:
"""
if self.render:
self.training_env.render(close=True)
data_to_save = dict(
epoch=epoch,
)
if self.save_environment:
data_to_save['env'] = self.training_env
if self.save_replay_buffer:
data_to_save['replay_buffer'] = self.replay_buffer
if self.save_algorithm:
data_to_save['algorithm'] = self
return data_to_save
@abc.abstractmethod
def training_mode(self, mode):
"""
Set training mode to `mode`.
:param mode: If True, training will happen (e.g. set the dropout
probabilities to not all ones).
"""
pass
@abc.abstractmethod
def _do_training(self):
"""
Perform some update, e.g. perform one gradient step.
:return:
"""
pass
def evaluate(self, epoch):
"""
Evaluate the policy, e.g. save/print progress.
:param epoch:
:return:
"""
statistics = OrderedDict()
try:
statistics.update(self.eval_statistics)
self.eval_statistics = None
except:
print('No Stats to Eval')
logger.log("Collecting samples for evaluation")
test_paths = self.eval_sampler.obtain_samples()
statistics.update(eval_util.get_generic_path_information(
test_paths, stat_prefix="Test",
))
statistics.update(eval_util.get_generic_path_information(
self._exploration_paths, stat_prefix="Exploration",
))
if hasattr(self.env, "log_diagnostics"):
self.env.log_diagnostics(test_paths)
if hasattr(self.env, "log_statistics"):
statistics.update(self.env.log_statistics(test_paths))
if epoch % self.freq_log_visuals == 0:
if hasattr(self.env, "log_visuals"):
self.env.log_visuals(test_paths, epoch, logger.get_snapshot_dir())
average_returns = eval_util.get_average_returns(test_paths)
statistics['AverageReturn'] = average_returns
for key, value in statistics.items():
logger.record_tabular(key, value)
best_statistic = statistics[self.best_key]
if best_statistic > self.best_statistic_so_far:
self.best_statistic_so_far = best_statistic
if self.save_best and epoch >= self.save_best_starting_from_epoch:
data_to_save = {
'epoch': epoch,
'statistics': statistics
}
data_to_save.update(self.get_epoch_snapshot(epoch))
logger.save_extra_data(data_to_save, 'best.pkl')
print('\n\nSAVED BEST\n\n')
| 34.250943 | 91 | 0.562056 | import abc
import pickle
import time
from collections import OrderedDict
from copy import deepcopy
import gtimer as gt
import numpy as np
from rlkit.core import logger, eval_util
from rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from rlkit.data_management.path_builder import PathBuilder
from rlkit.policies.base import ExplorationPolicy
from rlkit.torch.sac.policies import MakeDeterministic
from rlkit.samplers import PathSampler
from rlkit.envs.wrapped_absorbing_env import WrappedAbsorbingEnv
from gym.spaces import Dict
class BaseAlgorithm(metaclass=abc.ABCMeta):
def __init__(
self,
env,
exploration_policy: ExplorationPolicy,
training_env=None,
eval_policy=None,
eval_sampler=None,
num_epochs=100,
num_steps_per_epoch=10000,
num_steps_between_train_calls=1000,
num_steps_per_eval=1000,
max_path_length=1000,
min_steps_before_training=0,
replay_buffer=None,
replay_buffer_size=10000,
freq_saving=1,
save_replay_buffer=False,
save_environment=False,
save_algorithm=False,
save_best=False,
save_best_starting_from_epoch=0,
best_key='AverageReturn',
no_terminal=False,
wrap_absorbing=False,
render=False,
render_kwargs={},
freq_log_visuals=1,
eval_deterministic=False
):
self.env = env
self.training_env = training_env or pickle.loads(pickle.dumps(env))
self.exploration_policy = exploration_policy
self.num_epochs = num_epochs
self.num_env_steps_per_epoch = num_steps_per_epoch
self.num_steps_between_train_calls = num_steps_between_train_calls
self.num_steps_per_eval = num_steps_per_eval
self.max_path_length = max_path_length
self.min_steps_before_training = min_steps_before_training
self.render = render
self.save_replay_buffer = save_replay_buffer
self.save_algorithm = save_algorithm
self.save_environment = save_environment
self.save_best = save_best
self.save_best_starting_from_epoch = save_best_starting_from_epoch
self.best_key = best_key
self.best_statistic_so_far = float('-Inf')
if eval_sampler is None:
if eval_policy is None:
eval_policy = exploration_policy
eval_policy = MakeDeterministic(eval_policy)
eval_sampler = PathSampler(
env,
eval_policy,
num_steps_per_eval,
max_path_length,
no_terminal=no_terminal,
render=render,
render_kwargs=render_kwargs
)
self.eval_policy = eval_policy
self.eval_sampler = eval_sampler
self.action_space = env.action_space
self.obs_space = env.observation_space
self.replay_buffer_size = replay_buffer_size
if replay_buffer is None:
assert max_path_length < replay_buffer_size
replay_buffer = EnvReplayBuffer(
self.replay_buffer_size,
self.env,
random_seed=np.random.randint(10000)
)
else:
assert max_path_length < replay_buffer._max_replay_buffer_size
self.replay_buffer = replay_buffer
self._n_env_steps_total = 0
self._n_train_steps_total = 0
self._n_rollouts_total = 0
self._do_train_time = 0
self._epoch_start_time = None
self._algo_start_time = None
self._old_table_keys = None
self._current_path_builder = PathBuilder()
self._exploration_paths = []
if wrap_absorbing:
raise NotImplementedError()
self.wrap_absorbing = wrap_absorbing
self.freq_saving = freq_saving
self.no_terminal = no_terminal
self.eval_statistics = None
self.freq_log_visuals = freq_log_visuals
def train(self, start_epoch=0):
self.pretrain()
if start_epoch == 0:
params = self.get_epoch_snapshot(-1)
logger.save_itr_params(-1, params)
self.training_mode(False)
self._n_env_steps_total = start_epoch * self.num_env_steps_per_epoch
gt.reset()
gt.set_def_unique(False)
self.start_training(start_epoch=start_epoch)
def pretrain(self):
pass
def start_training(self, start_epoch=0):
self._current_path_builder = PathBuilder()
observation = self._start_new_rollout()
for epoch in gt.timed_for(
range(start_epoch, self.num_epochs),
save_itrs=True,
):
self._start_epoch(epoch)
for steps_this_epoch in range(self.num_env_steps_per_epoch):
action, agent_info = self._get_action_and_info(observation)
if self.render: self.training_env.render()
next_ob, raw_reward, terminal, env_info = (
self.training_env.step(action)
)
if self.no_terminal: terminal = False
self._n_env_steps_total += 1
reward = np.array([raw_reward])
terminal = np.array([terminal])
self._handle_step(
observation,
action,
reward,
next_ob,
np.array([False]) if self.no_terminal else terminal,
absorbing=np.array([0., 0.]),
agent_info=agent_info,
env_info=env_info,
)
if terminal[0]:
if self.wrap_absorbing:
raise NotImplementedError()
self._handle_step(
next_ob,
action,
reward,
next_ob,
np.array([False]),
absorbing=np.array([0.0, 1.0]),
agent_info=agent_info,
env_info=env_info
)
self._handle_step(
next_ob,
action,
# env.action_space.sample(),
# the reward doesn't matter
reward,
next_ob,
np.array([False]),
absorbing=np.array([1.0, 1.0]),
agent_info=agent_info,
env_info=env_info
)
self._handle_rollout_ending()
observation = self._start_new_rollout()
elif len(self._current_path_builder) >= self.max_path_length:
self._handle_rollout_ending()
observation = self._start_new_rollout()
else:
observation = next_ob
if self._n_env_steps_total % self.num_steps_between_train_calls == 0:
gt.stamp('sample')
self._try_to_train(epoch)
gt.stamp('train')
gt.stamp('sample')
self._try_to_eval(epoch)
gt.stamp('eval')
self._end_epoch()
def _try_to_train(self, epoch):
if self._can_train():
self.training_mode(True)
self._do_training(epoch)
self._n_train_steps_total += 1
self.training_mode(False)
def _try_to_eval(self, epoch):
if self._can_evaluate():
if (epoch % self.freq_saving == 0) or (epoch + 1 >= self.num_epochs):
# if epoch + 1 >= self.num_epochs:
# epoch = 'final'
logger.save_extra_data(self.get_extra_data_to_save(epoch))
params = self.get_epoch_snapshot(epoch)
logger.save_itr_params(epoch, params)
self.evaluate(epoch)
logger.record_tabular(
"Number of train calls total",
self._n_train_steps_total,
)
logger.record_tabular(
"Number of env steps total",
self._n_env_steps_total,
)
logger.record_tabular(
"Number of rollouts total",
self._n_rollouts_total,
)
times_itrs = gt.get_times().stamps.itrs
train_time = times_itrs['train'][-1]
sample_time = times_itrs['sample'][-1]
eval_time = times_itrs['eval'][-1] if epoch > 0 else 0
epoch_time = train_time + sample_time + eval_time
total_time = gt.get_times().total
logger.record_tabular('Train Time (s)', train_time)
logger.record_tabular('(Previous) Eval Time (s)', eval_time)
logger.record_tabular('Sample Time (s)', sample_time)
logger.record_tabular('Epoch Time (s)', epoch_time)
logger.record_tabular('Total Train Time (s)', total_time)
logger.record_tabular("Epoch", epoch)
logger.dump_tabular(with_prefix=False, with_timestamp=False)
else:
logger.log("Skipping eval for now.")
def _can_evaluate(self):
return (
len(self._exploration_paths) > 0
and self.replay_buffer.num_steps_can_sample() >= self.min_steps_before_training
)
def _can_train(self):
return self.replay_buffer.num_steps_can_sample() >= self.min_steps_before_training
def _get_action_and_info(self, observation):
self.exploration_policy.set_num_steps_total(self._n_env_steps_total)
return self.exploration_policy.get_action(
observation,
)
def _start_epoch(self, epoch):
self._epoch_start_time = time.time()
self._exploration_paths = []
self._do_train_time = 0
logger.push_prefix('Iteration
def _end_epoch(self):
self.eval_statistics = None
logger.log("Epoch Duration: {0}".format(
time.time() - self._epoch_start_time
))
logger.log("Started Training: {0}".format(self._can_train()))
logger.pop_prefix()
def _start_new_rollout(self):
self.exploration_policy.reset()
return self.training_env.reset()
def _handle_path(self, path):
for (
ob,
action,
reward,
next_ob,
terminal,
agent_info,
env_info
) in zip(
path["observations"],
path["actions"],
path["rewards"],
path["next_observations"],
path["terminals"],
path["agent_infos"],
path["env_infos"],
):
self._handle_step(
ob,
action,
reward,
next_ob,
terminal,
agent_info=agent_info,
env_info=env_info,
)
self._handle_rollout_ending()
def _handle_step(
self,
observation,
action,
reward,
next_observation,
terminal,
absorbing,
agent_info,
env_info,
):
self._current_path_builder.add_all(
observations=observation,
actions=action,
rewards=reward,
next_observations=next_observation,
terminals=terminal,
absorbing=absorbing,
agent_infos=agent_info,
env_infos=env_info,
)
self.replay_buffer.add_sample(
observation=observation,
action=action,
reward=reward,
terminal=terminal,
next_observation=next_observation,
absorbing=absorbing,
agent_info=agent_info,
env_info=env_info,
)
def _handle_rollout_ending(self):
self.replay_buffer.terminate_episode()
self._n_rollouts_total += 1
if len(self._current_path_builder) > 0:
self._exploration_paths.append(
self._current_path_builder
)
self._current_path_builder = PathBuilder()
def get_epoch_snapshot(self, epoch):
data_to_save = dict(
epoch=epoch,
exploration_policy=self.exploration_policy,
)
if self.save_environment:
data_to_save['env'] = self.training_env
return data_to_save
# @abc.abstractmethod
# def load_snapshot(self, snapshot):
# """
# Should be implemented on a per algorithm basis
# taking into consideration the particular
# get_epoch_snapshot implementation for the algorithm
# """
# pass
def get_extra_data_to_save(self, epoch):
if self.render:
self.training_env.render(close=True)
data_to_save = dict(
epoch=epoch,
)
if self.save_environment:
data_to_save['env'] = self.training_env
if self.save_replay_buffer:
data_to_save['replay_buffer'] = self.replay_buffer
if self.save_algorithm:
data_to_save['algorithm'] = self
return data_to_save
@abc.abstractmethod
def training_mode(self, mode):
pass
@abc.abstractmethod
def _do_training(self):
pass
def evaluate(self, epoch):
statistics = OrderedDict()
try:
statistics.update(self.eval_statistics)
self.eval_statistics = None
except:
print('No Stats to Eval')
logger.log("Collecting samples for evaluation")
test_paths = self.eval_sampler.obtain_samples()
statistics.update(eval_util.get_generic_path_information(
test_paths, stat_prefix="Test",
))
statistics.update(eval_util.get_generic_path_information(
self._exploration_paths, stat_prefix="Exploration",
))
if hasattr(self.env, "log_diagnostics"):
self.env.log_diagnostics(test_paths)
if hasattr(self.env, "log_statistics"):
statistics.update(self.env.log_statistics(test_paths))
if epoch % self.freq_log_visuals == 0:
if hasattr(self.env, "log_visuals"):
self.env.log_visuals(test_paths, epoch, logger.get_snapshot_dir())
average_returns = eval_util.get_average_returns(test_paths)
statistics['AverageReturn'] = average_returns
for key, value in statistics.items():
logger.record_tabular(key, value)
best_statistic = statistics[self.best_key]
if best_statistic > self.best_statistic_so_far:
self.best_statistic_so_far = best_statistic
if self.save_best and epoch >= self.save_best_starting_from_epoch:
data_to_save = {
'epoch': epoch,
'statistics': statistics
}
data_to_save.update(self.get_epoch_snapshot(epoch))
logger.save_extra_data(data_to_save, 'best.pkl')
print('\n\nSAVED BEST\n\n')
| true | true |
f73554f08dc3a48f057339dda2f1974e69be8475 | 350 | py | Python | neurokernel/__init__.py | yiyin/neurodriver | 34e6874a1cf35633cda1191920cbaeac5d25dc9b | [
"BSD-3-Clause"
] | 235 | 2015-01-27T01:12:54.000Z | 2022-03-17T23:09:35.000Z | neurokernel/__init__.py | mreitm/neurokernel | 8195a500ba1127f719e963465af9f43d6019b884 | [
"BSD-3-Clause"
] | 29 | 2015-01-12T18:00:45.000Z | 2020-08-04T22:33:15.000Z | neuroarch_component/__init__.py | fruitflybrain/neuroarch_component | bc2198fee1d58d52c3399a6bef9e1fbbd3e33932 | [
"BSD-3-Clause"
] | 67 | 2015-01-18T22:20:49.000Z | 2021-12-13T03:33:49.000Z | try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
# Ignore all exceptions so that this doesn't cause package installation
# to fail if pkg_resources can't find neurokernel:
try:
from version import __version__
except:
pass
| 26.923077 | 71 | 0.774286 | try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
# to fail if pkg_resources can't find neurokernel:
try:
from version import __version__
except:
pass
| true | true |
f73554ff04f2ab45b89c0e233030e12064f0038d | 55 | py | Python | unsupervised/__init__.py | Riroaki/LemonML- | 3bb344c135e4dd7dab63a4fd2184ac0aaacc367d | [
"MIT"
] | 34 | 2019-06-23T03:45:41.000Z | 2021-11-30T12:28:41.000Z | unsupervised/__init__.py | Riroaki/LemonML- | 3bb344c135e4dd7dab63a4fd2184ac0aaacc367d | [
"MIT"
] | null | null | null | unsupervised/__init__.py | Riroaki/LemonML- | 3bb344c135e4dd7dab63a4fd2184ac0aaacc367d | [
"MIT"
] | 5 | 2019-06-14T09:41:24.000Z | 2019-10-23T11:21:22.000Z | from .clustering import *
from .decomposition import *
| 18.333333 | 28 | 0.781818 | from .clustering import *
from .decomposition import *
| true | true |
f73555f6b77e720c512282513fa4bcda7ce39a7e | 418 | py | Python | mytoyota/utils/token.py | laurentd75/mytoyota--DurgNomis-drol | 269cf13807c31b741e4d6a2fa0d7e09306c98f55 | [
"MIT"
] | 34 | 2021-03-20T16:38:08.000Z | 2022-02-17T12:47:07.000Z | mytoyota/utils/token.py | DurgNomis-drol/MyT | ad6b93461f42754045eb605b07b7c16cac93803d | [
"MIT"
] | 64 | 2021-03-14T12:12:39.000Z | 2022-03-29T18:44:32.000Z | mytoyota/utils/token.py | joro75/mytoyota | 405f7d84b3737846124aac6e7692aa6da52838a1 | [
"MIT"
] | 7 | 2021-03-15T07:27:38.000Z | 2022-01-21T17:23:08.000Z | """Token validation utilities"""
from mytoyota.const import TOKEN_LENGTH
from mytoyota.exceptions import ToyotaInvalidToken
def is_valid_token(token: str) -> bool:
"""Checks if token is the correct length"""
if token and len(token) == TOKEN_LENGTH and token.endswith("..*"):
return True
raise ToyotaInvalidToken(
f"Token must end with '..*' and be {TOKEN_LENGTH} characters long."
)
| 29.857143 | 75 | 0.698565 | from mytoyota.const import TOKEN_LENGTH
from mytoyota.exceptions import ToyotaInvalidToken
def is_valid_token(token: str) -> bool:
if token and len(token) == TOKEN_LENGTH and token.endswith("..*"):
return True
raise ToyotaInvalidToken(
f"Token must end with '..*' and be {TOKEN_LENGTH} characters long."
)
| true | true |
f735566079a6315e52b1d2a3c0a9ec263dbb5b35 | 551 | py | Python | docs/compliant_logging/hello-world.py | Anbang-Hu/shrike | 78189984c85696a9a9feaadb72aa471cf2409796 | [
"MIT"
] | 27 | 2021-05-27T00:01:24.000Z | 2022-01-30T19:55:24.000Z | docs/compliant_logging/hello-world.py | Anbang-Hu/shrike | 78189984c85696a9a9feaadb72aa471cf2409796 | [
"MIT"
] | 284 | 2021-05-12T22:26:41.000Z | 2022-02-23T21:18:34.000Z | docs/compliant_logging/hello-world.py | Anbang-Hu/shrike | 78189984c85696a9a9feaadb72aa471cf2409796 | [
"MIT"
] | 5 | 2021-06-02T04:51:47.000Z | 2021-12-20T17:07:41.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Simplest example of how to use the prefix_stack_trace decorator.
"""
from shrike.compliant_logging.exceptions import prefix_stack_trace
# Output will be:
# SystemLog: Traceback (most recent call last):
# SystemLog: File ".\hello-world.py", line 11, in main
# SystemLog: print(1 / 0)
# SystemLog: ZeroDivisionError: **Exception message scrubbed**
@prefix_stack_trace()
def main():
print("Hello, world!")
print(1 / 0)
if __name__ == "__main__":
main()
| 22.958333 | 66 | 0.711434 |
from shrike.compliant_logging.exceptions import prefix_stack_trace
@prefix_stack_trace()
def main():
print("Hello, world!")
print(1 / 0)
if __name__ == "__main__":
main()
| true | true |
f735566a03cd3ffb92dd1352843541464707906d | 6,966 | py | Python | gputools/convolve/minmax_filter.py | VolkerH/gputools | b8732c3cf82b96c6960497e6d82ce6b2bac463aa | [
"BSD-3-Clause"
] | null | null | null | gputools/convolve/minmax_filter.py | VolkerH/gputools | b8732c3cf82b96c6960497e6d82ce6b2bac463aa | [
"BSD-3-Clause"
] | null | null | null | gputools/convolve/minmax_filter.py | VolkerH/gputools | b8732c3cf82b96c6960497e6d82ce6b2bac463aa | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function, unicode_literals, absolute_import, division
import logging
logger = logging.getLogger(__name__)
import os
import numpy as np
from gputools import OCLArray, OCLProgram, get_device
from gputools.core.ocltypes import assert_bufs_type
from gputools.utils.tile_iterator import tile_iterator
from ._abspath import abspath
def _filter_max_2_gpu(data_g, size=10, res_g=None):
assert_bufs_type(np.float32, data_g)
prog = OCLProgram(abspath("kernels/minmax_filter.cl"))
tmp_g = OCLArray.empty_like(data_g)
if res_g is None:
res_g = OCLArray.empty_like(data_g)
prog.run_kernel("max_2_x", data_g.shape[::-1], None, data_g.data, tmp_g.data, np.int32(size[-1]))
prog.run_kernel("max_2_y", data_g.shape[::-1], None, tmp_g.data, res_g.data, np.int32(size[-2]))
return res_g
def _filter_max_3_gpu(data_g, size=10, res_g=None):
assert_bufs_type(np.float32, data_g)
prog = OCLProgram(abspath("kernels/minmax_filter.cl"))
tmp_g = OCLArray.empty_like(data_g)
if res_g is None:
res_g = OCLArray.empty_like(data_g)
prog.run_kernel("max_3_x", data_g.shape[::-1], None, data_g.data, res_g.data, np.int32(size[-1]))
prog.run_kernel("max_3_y", data_g.shape[::-1], None, res_g.data, tmp_g.data, np.int32(size[-2]))
prog.run_kernel("max_3_z", data_g.shape[::-1], None, tmp_g.data, res_g.data, np.int32(size[-3]))
return res_g
def _max_filter_gpu(data_g, size=5, res_g=None):
assert_bufs_type(np.float32, data_g)
assert (len(data_g.shape) == len(size))
if len(data_g.shape) == 2:
return _filter_max_2_gpu(data_g, size=size, res_g=res_g)
elif len(data_g.shape) == 3:
return _filter_max_3_gpu(data_g, size=size, res_g=res_g)
else:
raise NotImplementedError("only 2 or 3d arrays are supported for now")
def _max_filter_numpy(data, size=5):
data_g = OCLArray.from_array(data.astype(np.float32))
return _max_filter_gpu(data_g, size=size).get()
def max_filter(data, size=10, res_g=None, sub_blocks=(1, 1, 1)):
"""
maximum filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
Returns
-------
filtered image or None (if OCLArray)
"""
if np.isscalar(size):
size = (size,)*len(data.shape)
if isinstance(data, np.ndarray):
data = np.ascontiguousarray(data)
if set(sub_blocks) == {1} or sub_blocks is None:
return _max_filter_numpy(data, size)
else:
# cut the image into tile and operate on every of them
N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)]
Npads = tuple(map(lambda x: x//2, size))
res = np.empty(data.shape, np.float32)
for i, (data_tile, data_s_src, data_s_dest) \
in enumerate(tile_iterator(data, blocksize=N_sub,
padsize=Npads,
mode="constant")):
res_tile = _max_filter_numpy(data_tile.copy(),
size)
res[data_s_src] = res_tile[data_s_dest]
return res
elif isinstance(data, OCLArray):
return _max_filter_gpu(data, size=size, res_g=res_g)
else:
raise TypeError("array argument (1) has bad type: %s" % type(data))
def _filter_min_2_gpu(data_g, size=(10,10), res_g=None):
assert_bufs_type(np.float32, data_g)
prog = OCLProgram(abspath("kernels/minmax_filter.cl"))
tmp_g = OCLArray.empty_like(data_g)
if res_g is None:
res_g = OCLArray.empty_like(data_g)
prog.run_kernel("min_2_x", data_g.shape[::-1], None, data_g.data, tmp_g.data, np.int32(size[-1]))
prog.run_kernel("min_2_y", data_g.shape[::-1], None, tmp_g.data, res_g.data, np.int32(size[-2]))
return res_g
def _filter_min_3_gpu(data_g, size=(10,10,10), res_g=None):
assert_bufs_type(np.float32, data_g)
prog = OCLProgram(abspath("kernels/minmax_filter.cl"))
tmp_g = OCLArray.empty_like(data_g)
if res_g is None:
res_g = OCLArray.empty_like(data_g)
prog.run_kernel("min_3_x", data_g.shape[::-1], None, data_g.data, res_g.data, np.int32(size[-1]))
prog.run_kernel("min_3_y", data_g.shape[::-1], None, res_g.data, tmp_g.data, np.int32(size[-2]))
prog.run_kernel("min_3_z", data_g.shape[::-1], None, tmp_g.data, res_g.data, np.int32(size[-3]))
return res_g
def _min_filter_gpu(data_g, size=(10,10), res_g=None):
assert_bufs_type(np.float32, data_g)
assert (len(data_g.shape)==len(size))
if len(data_g.shape) == 2:
return _filter_min_2_gpu(data_g, size=size, res_g=res_g)
elif len(data_g.shape) == 3:
return _filter_min_3_gpu(data_g, size=size, res_g=res_g)
else:
raise NotImplementedError("only 2 or 3d arrays are supported for now")
def _min_filter_numpy(data, size=(10,10)):
data_g = OCLArray.from_array(data.astype(np.float32))
return _min_filter_gpu(data_g, size=size).get()
def min_filter(data, size=10, res_g=None, sub_blocks=(1, 1, 1)):
"""
minimum filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
Returns
-------
filtered image or None (if OCLArray)
"""
if np.isscalar(size):
size = (size,)*len(data.shape)
if isinstance(data, np.ndarray):
if set(sub_blocks) == {1} or sub_blocks is None:
return _min_filter_numpy(data, size)
else:
# cut the image into tile and operate on every of them
N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)]
Npads = tuple(map(lambda x: x//2, size))
res = np.empty(data.shape, np.float32)
for i, (data_tile, data_s_src, data_s_dest) \
in enumerate(tile_iterator(data, blocksize=N_sub,
padsize=Npads,
mode="constant")):
res_tile = _min_filter_numpy(data_tile.copy(),
size)
res[data_s_src] = res_tile[data_s_dest]
return res
elif isinstance(data, OCLArray):
return _min_filter_gpu(data, size=size, res_g=res_g)
else:
raise TypeError("array argument (1) has bad type: %s" % type(data))
| 32.4 | 101 | 0.626759 | from __future__ import print_function, unicode_literals, absolute_import, division
import logging
logger = logging.getLogger(__name__)
import os
import numpy as np
from gputools import OCLArray, OCLProgram, get_device
from gputools.core.ocltypes import assert_bufs_type
from gputools.utils.tile_iterator import tile_iterator
from ._abspath import abspath
def _filter_max_2_gpu(data_g, size=10, res_g=None):
assert_bufs_type(np.float32, data_g)
prog = OCLProgram(abspath("kernels/minmax_filter.cl"))
tmp_g = OCLArray.empty_like(data_g)
if res_g is None:
res_g = OCLArray.empty_like(data_g)
prog.run_kernel("max_2_x", data_g.shape[::-1], None, data_g.data, tmp_g.data, np.int32(size[-1]))
prog.run_kernel("max_2_y", data_g.shape[::-1], None, tmp_g.data, res_g.data, np.int32(size[-2]))
return res_g
def _filter_max_3_gpu(data_g, size=10, res_g=None):
assert_bufs_type(np.float32, data_g)
prog = OCLProgram(abspath("kernels/minmax_filter.cl"))
tmp_g = OCLArray.empty_like(data_g)
if res_g is None:
res_g = OCLArray.empty_like(data_g)
prog.run_kernel("max_3_x", data_g.shape[::-1], None, data_g.data, res_g.data, np.int32(size[-1]))
prog.run_kernel("max_3_y", data_g.shape[::-1], None, res_g.data, tmp_g.data, np.int32(size[-2]))
prog.run_kernel("max_3_z", data_g.shape[::-1], None, tmp_g.data, res_g.data, np.int32(size[-3]))
return res_g
def _max_filter_gpu(data_g, size=5, res_g=None):
assert_bufs_type(np.float32, data_g)
assert (len(data_g.shape) == len(size))
if len(data_g.shape) == 2:
return _filter_max_2_gpu(data_g, size=size, res_g=res_g)
elif len(data_g.shape) == 3:
return _filter_max_3_gpu(data_g, size=size, res_g=res_g)
else:
raise NotImplementedError("only 2 or 3d arrays are supported for now")
def _max_filter_numpy(data, size=5):
data_g = OCLArray.from_array(data.astype(np.float32))
return _max_filter_gpu(data_g, size=size).get()
def max_filter(data, size=10, res_g=None, sub_blocks=(1, 1, 1)):
if np.isscalar(size):
size = (size,)*len(data.shape)
if isinstance(data, np.ndarray):
data = np.ascontiguousarray(data)
if set(sub_blocks) == {1} or sub_blocks is None:
return _max_filter_numpy(data, size)
else:
N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)]
Npads = tuple(map(lambda x: x//2, size))
res = np.empty(data.shape, np.float32)
for i, (data_tile, data_s_src, data_s_dest) \
in enumerate(tile_iterator(data, blocksize=N_sub,
padsize=Npads,
mode="constant")):
res_tile = _max_filter_numpy(data_tile.copy(),
size)
res[data_s_src] = res_tile[data_s_dest]
return res
elif isinstance(data, OCLArray):
return _max_filter_gpu(data, size=size, res_g=res_g)
else:
raise TypeError("array argument (1) has bad type: %s" % type(data))
def _filter_min_2_gpu(data_g, size=(10,10), res_g=None):
assert_bufs_type(np.float32, data_g)
prog = OCLProgram(abspath("kernels/minmax_filter.cl"))
tmp_g = OCLArray.empty_like(data_g)
if res_g is None:
res_g = OCLArray.empty_like(data_g)
prog.run_kernel("min_2_x", data_g.shape[::-1], None, data_g.data, tmp_g.data, np.int32(size[-1]))
prog.run_kernel("min_2_y", data_g.shape[::-1], None, tmp_g.data, res_g.data, np.int32(size[-2]))
return res_g
def _filter_min_3_gpu(data_g, size=(10,10,10), res_g=None):
assert_bufs_type(np.float32, data_g)
prog = OCLProgram(abspath("kernels/minmax_filter.cl"))
tmp_g = OCLArray.empty_like(data_g)
if res_g is None:
res_g = OCLArray.empty_like(data_g)
prog.run_kernel("min_3_x", data_g.shape[::-1], None, data_g.data, res_g.data, np.int32(size[-1]))
prog.run_kernel("min_3_y", data_g.shape[::-1], None, res_g.data, tmp_g.data, np.int32(size[-2]))
prog.run_kernel("min_3_z", data_g.shape[::-1], None, tmp_g.data, res_g.data, np.int32(size[-3]))
return res_g
def _min_filter_gpu(data_g, size=(10,10), res_g=None):
assert_bufs_type(np.float32, data_g)
assert (len(data_g.shape)==len(size))
if len(data_g.shape) == 2:
return _filter_min_2_gpu(data_g, size=size, res_g=res_g)
elif len(data_g.shape) == 3:
return _filter_min_3_gpu(data_g, size=size, res_g=res_g)
else:
raise NotImplementedError("only 2 or 3d arrays are supported for now")
def _min_filter_numpy(data, size=(10,10)):
data_g = OCLArray.from_array(data.astype(np.float32))
return _min_filter_gpu(data_g, size=size).get()
def min_filter(data, size=10, res_g=None, sub_blocks=(1, 1, 1)):
if np.isscalar(size):
size = (size,)*len(data.shape)
if isinstance(data, np.ndarray):
if set(sub_blocks) == {1} or sub_blocks is None:
return _min_filter_numpy(data, size)
else:
N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)]
Npads = tuple(map(lambda x: x//2, size))
res = np.empty(data.shape, np.float32)
for i, (data_tile, data_s_src, data_s_dest) \
in enumerate(tile_iterator(data, blocksize=N_sub,
padsize=Npads,
mode="constant")):
res_tile = _min_filter_numpy(data_tile.copy(),
size)
res[data_s_src] = res_tile[data_s_dest]
return res
elif isinstance(data, OCLArray):
return _min_filter_gpu(data, size=size, res_g=res_g)
else:
raise TypeError("array argument (1) has bad type: %s" % type(data))
| true | true |
f7355702a040e2141c8ec1af9495e40c0492ec04 | 2,215 | py | Python | setup.py | LRydin/NeuroKit | 3e2ee72900c3fc85a0b338ad9a3adea0f4f4c169 | [
"MIT"
] | null | null | null | setup.py | LRydin/NeuroKit | 3e2ee72900c3fc85a0b338ad9a3adea0f4f4c169 | [
"MIT"
] | null | null | null | setup.py | LRydin/NeuroKit | 3e2ee72900c3fc85a0b338ad9a3adea0f4f4c169 | [
"MIT"
] | 2 | 2021-12-25T15:39:49.000Z | 2021-12-25T15:44:16.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
import re
from setuptools import find_packages, setup
# Utilities
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("NEWS.rst") as history_file:
history = history_file.read()
history = history.replace("\n-------------------", "\n^^^^^^^^^^^^^^^^^^^").replace("\n=====", "\n-----")
def find_version():
result = re.search(r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format("__version__"), open("neurokit2/__init__.py").read())
return result.group(1)
# Dependencies
requirements = ["numpy", "pandas", "scipy", "scikit-learn", "matplotlib"]
# Optional Dependencies (only needed / downloaded for testing purposes, for instance to test against some other packages)
setup_requirements = ["pytest-runner", "numpy"]
test_requirements = requirements + [
"pytest",
"coverage",
"bioread",
"mne",
"pyentrp",
"nolds",
"biosppy==0.6.1",
"cvxopt",
"PyWavelets",
"EMD-signal",
"astropy"
]
# Setup
setup(
# Info
name="neurokit2",
keywords="NeuroKit2, physiology, bodily signals, Python, ECG, EDA, EMG, PPG",
url="https://github.com/neuropsychology/NeuroKit",
version=find_version(),
description="The Python Toolbox for Neurophysiological Signal Processing.",
long_description=readme + "\n\n" + history,
long_description_content_type="text/x-rst",
license="MIT license",
# The name and contact of a maintainer
author="Dominique Makowski",
author_email="dom.makowski@gmail.com",
# Dependencies
install_requires=requirements,
setup_requires=setup_requirements,
extras_require={"test": test_requirements},
test_suite="pytest",
tests_require=test_requirements,
# Misc
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
]
)
| 27.345679 | 121 | 0.63702 |
import re
from setuptools import find_packages, setup
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("NEWS.rst") as history_file:
history = history_file.read()
history = history.replace("\n-------------------", "\n^^^^^^^^^^^^^^^^^^^").replace("\n=====", "\n-----")
def find_version():
result = re.search(r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format("__version__"), open("neurokit2/__init__.py").read())
return result.group(1)
# Dependencies
requirements = ["numpy", "pandas", "scipy", "scikit-learn", "matplotlib"]
# Optional Dependencies (only needed / downloaded for testing purposes, for instance to test against some other packages)
setup_requirements = ["pytest-runner", "numpy"]
test_requirements = requirements + [
"pytest",
"coverage",
"bioread",
"mne",
"pyentrp",
"nolds",
"biosppy==0.6.1",
"cvxopt",
"PyWavelets",
"EMD-signal",
"astropy"
]
# Setup
setup(
# Info
name="neurokit2",
keywords="NeuroKit2, physiology, bodily signals, Python, ECG, EDA, EMG, PPG",
url="https://github.com/neuropsychology/NeuroKit",
version=find_version(),
description="The Python Toolbox for Neurophysiological Signal Processing.",
long_description=readme + "\n\n" + history,
long_description_content_type="text/x-rst",
license="MIT license",
# The name and contact of a maintainer
author="Dominique Makowski",
author_email="dom.makowski@gmail.com",
# Dependencies
install_requires=requirements,
setup_requires=setup_requirements,
extras_require={"test": test_requirements},
test_suite="pytest",
tests_require=test_requirements,
# Misc
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
]
)
| true | true |
f7355ca167a264463a1c888c0f4bc0a609cdf2c1 | 1,641 | py | Python | setup.py | idekerlab/cdoslom | d44fb852f44ba11d8316d88da8654148aab3fca2 | [
"BSD-3-Clause"
] | null | null | null | setup.py | idekerlab/cdoslom | d44fb852f44ba11d8316d88da8654148aab3fca2 | [
"BSD-3-Clause"
] | null | null | null | setup.py | idekerlab/cdoslom | d44fb852f44ba11d8316d88da8654148aab3fca2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
from setuptools import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
with open(os.path.join('cdoslom', '__init__.py')) as ver_file:
for line in ver_file:
if line.startswith('__version__'):
version=re.sub("'", "", line[line.index("'"):])
requirements = [
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='cdoslom',
version=version,
description="Runs Oslom community detection algorithm in a container",
long_description=readme + '\n\n' + history,
author="Song Cao",
author_email='soc038@ucsd.edu',
url='https://github.com/idekerlab/cdoslom',
packages=[
'cdoslom',
],
package_dir={'cdoslom':
'cdoslom'},
include_package_data=True,
install_requires=requirements,
license="BSD license",
zip_safe=False,
keywords='cdoslom',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
scripts=['cdoslom/cdoslom.py'],
test_suite='tests',
tests_require=test_requirements
)
| 26.467742 | 74 | 0.620963 |
import os
import re
from setuptools import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
with open(os.path.join('cdoslom', '__init__.py')) as ver_file:
for line in ver_file:
if line.startswith('__version__'):
version=re.sub("'", "", line[line.index("'"):])
requirements = [
]
test_requirements = [
]
setup(
name='cdoslom',
version=version,
description="Runs Oslom community detection algorithm in a container",
long_description=readme + '\n\n' + history,
author="Song Cao",
author_email='soc038@ucsd.edu',
url='https://github.com/idekerlab/cdoslom',
packages=[
'cdoslom',
],
package_dir={'cdoslom':
'cdoslom'},
include_package_data=True,
install_requires=requirements,
license="BSD license",
zip_safe=False,
keywords='cdoslom',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
scripts=['cdoslom/cdoslom.py'],
test_suite='tests',
tests_require=test_requirements
)
| true | true |
f7355cb863fb2a7c8542214af3c2f9443699e337 | 7,953 | py | Python | api.py | gurgeh/silly-chess | 6c2ff766e10184a9b475681bb9945d9bc8f9aa89 | [
"Apache-2.0"
] | 1 | 2016-12-23T01:48:07.000Z | 2016-12-23T01:48:07.000Z | api.py | gurgeh/silly-chess | 6c2ff766e10184a9b475681bb9945d9bc8f9aa89 | [
"Apache-2.0"
] | null | null | null | api.py | gurgeh/silly-chess | 6c2ff766e10184a9b475681bb9945d9bc8f9aa89 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import json
import datetime
import hashlib
import webapp2
from google.appengine.api import users, memcache
from google.appengine.ext import ndb
import sil_model
import split_pgn
CHUNK_SIZE = 10
"""
Make sample opening PGN
parse_stm verkar inte klara kommentarer (med åäö?)
--- LATER ---
Memorize games:
find worthy games
maybe import from chessgames.com?
choose source type
if source is "game", choose moves
"forgive me"-knapp som säger att draget inte ska räknas som fel
Instant assessment:
Autogenerate such FENs + score from DB
Way to input and score assessment
Semi-blind tactics:
find games with tactic (crawl or calculate, preferably crawl)
Show position X moves before
Promote to other than queen
Make design nicer with semantic UI instead of jquery ui
More fact management:
keep fact times for same ID
inaktivera fact
list facts for source (so can reactivate)
delete facts when source deleted
Ta bort omedelbar stat, efter create
Create-spinner
Custom CSS for mobile
Fix open new window, board bug
"""
def add_success(fact):
memdb = memcache.Client()
i = memdb.incr('nranswer_%s' % fact.userid, initial_value=0)
return i
def add_fail(fact):
memdb = memcache.Client()
i = memdb.incr('nranswer_%s' % fact.userid, initial_value=0)
fails = memdb.get(fact.userid)
if not fails:
fails = []
fails = [f for f in fails if f != fact]
fails.append((fact, i - 1))
fails = fails[-CHUNK_SIZE:]
memdb.set(fact.userid, fails)
def get_fail(user_id):
memdb = memcache.Client()
fails = memdb.get(user_id)
if not fails:
return None
i = memdb.get('nranswer_%s' % user_id)
if i is None:
i = 0
if fails[0][1] + CHUNK_SIZE > i:
return None
fact = fails.pop(0)[0]
memdb.set(user_id, fails)
return fact
def get_fact(source_id, fact_id):
fact = ndb.Key(sil_model.Factlet,
long(fact_id),
parent=ndb.Key(sil_model.Source, long(source_id))).get()
return fact
class RestHandler(webapp2.RequestHandler):
def jsonify(self, d):
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(d))
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('Hello, World!')
class CreateSource(RestHandler):
def get(self):
user = users.get_current_user()
query = sil_model.Source.query(
sil_model.Source.userid == user.user_id())
self.jsonify(
{'sources': [source.to_jdict() for source in query.iter()]})
def post(self):
user = users.get_current_user()
source = sil_model.Source(
userid=user.user_id(),
name=self.request.get('name'),
fact_type=self.request.get('fact_type'))
source.put()
self.jsonify({'key': source.key.id()})
class SingleSource(RestHandler):
def get(self, source_id):
source = ndb.Key(sil_model.Source, long(source_id)).get()
d = source.to_jdict()
d['facts'] = [f.to_jdict()
for f in sil_model.Factlet.query_source(source_id)]
self.jsonify(d)
def delete(self, source_id):
ndb.Key(sil_model.Source, long(source_id)).delete()
class CreateFact(RestHandler):
def get(self):
user = users.get_current_user()
query = sil_model.Factlet.query(
sil_model.Factlet.userid == user.user_id())
self.jsonify(
{'keys': [key.id() for key in query.iter(keys_only=True)]})
def post(self):
user = users.get_current_user()
fact = self.request.get('fact').encode('utf8')
fact_obj = sil_model.Factlet(
parent=ndb.Key(sil_model.Source,
long(self.request.get('source_id'))),
userid=user.user_id(),
fact=fact, )
fact_obj.put()
self.jsonify({'key': fact_obj.key.id()})
class SingleFact(RestHandler):
def get(self, source_id, fact_id):
fact = get_fact(source_id, fact_id)
self.jsonify(fact.to_jdict())
def delete(self, source_id, fact_id):
parent = ndb.Key(sil_model.Source, long(source_id))
ndb.Key(sil_model.Factlet, long(fact_id), parent=parent).delete()
class SourceLearner(RestHandler):
def get(self, source_id):
user = users.get_current_user()
fact = get_fail(user.user_id())
if not fact or int(fact.key.parent().get().key.id()) != int(source_id):
fact = sil_model.Factlet.get_next(user.user_id(), source_id)
self.jsonify(fact.to_jdict())
class Answer(SourceLearner):
def post(self, source_id, fact_id, result):
fact = get_fact(source_id, fact_id)
if result == 'success':
fact.success()
add_success(fact)
else:
fact.fail()
add_fail(fact)
fact.put()
self.get(source_id)
class SourceStat(RestHandler):
def get(self, source_id):
user = users.get_current_user()
tot = sil_model.Factlet.count(user.user_id(), source_id)
left = sil_model.Factlet.count_left(user.user_id(), source_id)
nextfact = sil_model.Factlet.get_next(user.user_id(), source_id)
if nextfact:
next = (nextfact.next_scheduled - datetime.datetime(1970, 1, 1)
).total_seconds()
else:
next = 0
self.jsonify({'total': tot,
'left': left,
'key': source_id,
'next': next})
class AddOpening(RestHandler):
def post(self, source_id):
user = users.get_current_user()
source = ndb.Key(sil_model.Source, long(source_id))
color = self.request.get('color')
def make_fact(pgn, headers):
hid = hashlib.md5(user.user_id() + ''.join(x['move'] for x in
pgn)).hexdigest()
hid = int(hid[:14], 16)
fd = {'moves': pgn, 'orientation': color}
if 'FEN' in headers:
fd['fen'] = headers['FEN']
fd['orientation'] = 'b' if ' w ' in fd['fen'] else 'w'
fact = sil_model.Factlet(
parent=source,
id=hid,
userid=user.user_id(),
# use 'fen' for start positions
fact=json.dumps(fd), )
return fact
pgns = split_pgn.split_pgns(self.request.get('pgn'), color == 'w')
keys = ndb.put_multi(
[make_fact(pgn, headers) for pgn, headers in pgns])
self.jsonify({'keys': [key.id() for key in keys]})
class StageData(RestHandler):
def get(self):
user = users.get_current_user()
source = sil_model.Source(
userid=user.user_id(), name='stage', fact_type='opening')
source.put()
color = 'b'
def make_fact(pgn):
fact = sil_model.Factlet(
parent=source.key,
userid=user.user_id(),
# use 'fen' for start positions
fact=json.dumps({'moves': pgn,
'orientation': color}), )
return fact
pgns = split_pgn.split_pgn(open('data/black.pgn').read(), color == 'w')
keys = ndb.put_multi([make_fact(pgn) for pgn in pgns])
self.jsonify(source.key.id())
app = webapp2.WSGIApplication(
[
('/', MainPage), ('/fact', CreateFact), ('/source', CreateSource),
('/source/(\d+)', SingleSource), ('/source/(\d+)/(\d+)', SingleFact),
('/source/(\d+)/(\d+)/(success|fail)',
Answer), ('/source/(\d+)/next', SourceLearner),
('/source/(\d+)/stat', SourceStat),
('/source/(\d+)/opening', AddOpening), ('/stagedata', StageData)
],
debug=True)
| 28.711191 | 79 | 0.591223 |
import json
import datetime
import hashlib
import webapp2
from google.appengine.api import users, memcache
from google.appengine.ext import ndb
import sil_model
import split_pgn
CHUNK_SIZE = 10
def add_success(fact):
memdb = memcache.Client()
i = memdb.incr('nranswer_%s' % fact.userid, initial_value=0)
return i
def add_fail(fact):
memdb = memcache.Client()
i = memdb.incr('nranswer_%s' % fact.userid, initial_value=0)
fails = memdb.get(fact.userid)
if not fails:
fails = []
fails = [f for f in fails if f != fact]
fails.append((fact, i - 1))
fails = fails[-CHUNK_SIZE:]
memdb.set(fact.userid, fails)
def get_fail(user_id):
memdb = memcache.Client()
fails = memdb.get(user_id)
if not fails:
return None
i = memdb.get('nranswer_%s' % user_id)
if i is None:
i = 0
if fails[0][1] + CHUNK_SIZE > i:
return None
fact = fails.pop(0)[0]
memdb.set(user_id, fails)
return fact
def get_fact(source_id, fact_id):
fact = ndb.Key(sil_model.Factlet,
long(fact_id),
parent=ndb.Key(sil_model.Source, long(source_id))).get()
return fact
class RestHandler(webapp2.RequestHandler):
def jsonify(self, d):
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(d))
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('Hello, World!')
class CreateSource(RestHandler):
def get(self):
user = users.get_current_user()
query = sil_model.Source.query(
sil_model.Source.userid == user.user_id())
self.jsonify(
{'sources': [source.to_jdict() for source in query.iter()]})
def post(self):
user = users.get_current_user()
source = sil_model.Source(
userid=user.user_id(),
name=self.request.get('name'),
fact_type=self.request.get('fact_type'))
source.put()
self.jsonify({'key': source.key.id()})
class SingleSource(RestHandler):
def get(self, source_id):
source = ndb.Key(sil_model.Source, long(source_id)).get()
d = source.to_jdict()
d['facts'] = [f.to_jdict()
for f in sil_model.Factlet.query_source(source_id)]
self.jsonify(d)
def delete(self, source_id):
ndb.Key(sil_model.Source, long(source_id)).delete()
class CreateFact(RestHandler):
def get(self):
user = users.get_current_user()
query = sil_model.Factlet.query(
sil_model.Factlet.userid == user.user_id())
self.jsonify(
{'keys': [key.id() for key in query.iter(keys_only=True)]})
def post(self):
user = users.get_current_user()
fact = self.request.get('fact').encode('utf8')
fact_obj = sil_model.Factlet(
parent=ndb.Key(sil_model.Source,
long(self.request.get('source_id'))),
userid=user.user_id(),
fact=fact, )
fact_obj.put()
self.jsonify({'key': fact_obj.key.id()})
class SingleFact(RestHandler):
def get(self, source_id, fact_id):
fact = get_fact(source_id, fact_id)
self.jsonify(fact.to_jdict())
def delete(self, source_id, fact_id):
parent = ndb.Key(sil_model.Source, long(source_id))
ndb.Key(sil_model.Factlet, long(fact_id), parent=parent).delete()
class SourceLearner(RestHandler):
def get(self, source_id):
user = users.get_current_user()
fact = get_fail(user.user_id())
if not fact or int(fact.key.parent().get().key.id()) != int(source_id):
fact = sil_model.Factlet.get_next(user.user_id(), source_id)
self.jsonify(fact.to_jdict())
class Answer(SourceLearner):
def post(self, source_id, fact_id, result):
fact = get_fact(source_id, fact_id)
if result == 'success':
fact.success()
add_success(fact)
else:
fact.fail()
add_fail(fact)
fact.put()
self.get(source_id)
class SourceStat(RestHandler):
def get(self, source_id):
user = users.get_current_user()
tot = sil_model.Factlet.count(user.user_id(), source_id)
left = sil_model.Factlet.count_left(user.user_id(), source_id)
nextfact = sil_model.Factlet.get_next(user.user_id(), source_id)
if nextfact:
next = (nextfact.next_scheduled - datetime.datetime(1970, 1, 1)
).total_seconds()
else:
next = 0
self.jsonify({'total': tot,
'left': left,
'key': source_id,
'next': next})
class AddOpening(RestHandler):
def post(self, source_id):
user = users.get_current_user()
source = ndb.Key(sil_model.Source, long(source_id))
color = self.request.get('color')
def make_fact(pgn, headers):
hid = hashlib.md5(user.user_id() + ''.join(x['move'] for x in
pgn)).hexdigest()
hid = int(hid[:14], 16)
fd = {'moves': pgn, 'orientation': color}
if 'FEN' in headers:
fd['fen'] = headers['FEN']
fd['orientation'] = 'b' if ' w ' in fd['fen'] else 'w'
fact = sil_model.Factlet(
parent=source,
id=hid,
userid=user.user_id(),
fact=json.dumps(fd), )
return fact
pgns = split_pgn.split_pgns(self.request.get('pgn'), color == 'w')
keys = ndb.put_multi(
[make_fact(pgn, headers) for pgn, headers in pgns])
self.jsonify({'keys': [key.id() for key in keys]})
class StageData(RestHandler):
def get(self):
user = users.get_current_user()
source = sil_model.Source(
userid=user.user_id(), name='stage', fact_type='opening')
source.put()
color = 'b'
def make_fact(pgn):
fact = sil_model.Factlet(
parent=source.key,
userid=user.user_id(),
fact=json.dumps({'moves': pgn,
'orientation': color}), )
return fact
pgns = split_pgn.split_pgn(open('data/black.pgn').read(), color == 'w')
keys = ndb.put_multi([make_fact(pgn) for pgn in pgns])
self.jsonify(source.key.id())
app = webapp2.WSGIApplication(
[
('/', MainPage), ('/fact', CreateFact), ('/source', CreateSource),
('/source/(\d+)', SingleSource), ('/source/(\d+)/(\d+)', SingleFact),
('/source/(\d+)/(\d+)/(success|fail)',
Answer), ('/source/(\d+)/next', SourceLearner),
('/source/(\d+)/stat', SourceStat),
('/source/(\d+)/opening', AddOpening), ('/stagedata', StageData)
],
debug=True)
| true | true |
f7355d4aa91a1f92c0e39afc1bb9f214e6876718 | 1,813 | py | Python | reporover/get_convention.py | CommittedTeam/RepoRover | 41326c8f2a886288efda48db435b4c2537ae3678 | [
"MIT"
] | 1 | 2022-03-18T02:34:13.000Z | 2022-03-18T02:34:13.000Z | reporover/get_convention.py | CommittedTeam/RepoRover | 41326c8f2a886288efda48db435b4c2537ae3678 | [
"MIT"
] | null | null | null | reporover/get_convention.py | CommittedTeam/RepoRover | 41326c8f2a886288efda48db435b4c2537ae3678 | [
"MIT"
] | null | null | null | """ Detect the conventional style.
guidelines:
angular: https://github.com/angular/angular/blob/master/CONTRIBUTING.md#commit
atom: https://github.com/atom/atom/blob/master/CONTRIBUTING.md#git-commit-messages
ember: https://github.com/emberjs/ember.js/blob/master/CONTRIBUTING.md#pull-requests
eslint: https://eslint.org/docs/developer-guide/contributing/pull-requests
jshint: https://github.com/jshint/jshint/blob/master/CONTRIBUTING.md#commit-message-guidelines
"""
import re
def match(commit_msg):
"""Detremine which convention commit message is follwing"""
# The regular expressions are adapted from https://github.com/conventional-changelog/conventional-commits-detector
# Angular specifies the acceptable types here: https://docs.google.com/document/d/1QrDFcIiPjSLDn3EL15IJygNPiHORgU1_OOAqWjiDU5Y/edit and https://github.com/angular/angular/blob/master/CONTRIBUTING.md
commit_types = {
# TODO: add urls
"angular": "build|ci|docs|feat|fix|perf|refactor|test|chore|style",
"eslint": "Fix|Update|New|Breaking|Docs|Build|Upgrade|Chore",
}
conventions = {
"angular": r'^({})(?:\((.*)\))?: (.*)$'.format(commit_types["angular"]),
"atom": r'^(:.*?:) (.*)$',
"ember": r'^\[(.*) (.*)] (.*)$',
"eslint": r'^({}): (.*?)(?:\((.*)\))?$'.format(commit_types["eslint"]),
"jshint": r'^\[\[(.*)]] (.*)$',
}
temp_key = ""
for key in conventions.keys():
# Take subject line from the multiline commit messages
if (re.match(conventions[key],commit_msg.split('\n')[0])):
temp_key = key
break
else:
# If commit message doesn't match any of the conventions return undefined
temp_key = "undefined"
return temp_key
| 36.26 | 202 | 0.638169 |
import re
def match(commit_msg):
commit_types = {
"angular": "build|ci|docs|feat|fix|perf|refactor|test|chore|style",
"eslint": "Fix|Update|New|Breaking|Docs|Build|Upgrade|Chore",
}
conventions = {
"angular": r'^({})(?:\((.*)\))?: (.*)$'.format(commit_types["angular"]),
"atom": r'^(:.*?:) (.*)$',
"ember": r'^\[(.*) (.*)] (.*)$',
"eslint": r'^({}): (.*?)(?:\((.*)\))?$'.format(commit_types["eslint"]),
"jshint": r'^\[\[(.*)]] (.*)$',
}
temp_key = ""
for key in conventions.keys():
if (re.match(conventions[key],commit_msg.split('\n')[0])):
temp_key = key
break
else:
temp_key = "undefined"
return temp_key
| true | true |
f7355ee820770c79f616be1ed4b4f8d8723d4752 | 6,588 | py | Python | RecoBTag/PerformanceDB/python/measure/Pool_btagMuJetsWp0612.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | RecoBTag/PerformanceDB/python/measure/Pool_btagMuJetsWp0612.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | RecoBTag/PerformanceDB/python/measure/Pool_btagMuJetsWp0612.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from CondCore.DBCommon.CondDBCommon_cfi import *
PoolDBESSourcebtagMuJetsWp0612 = cms.ESSource("PoolDBESSource",
CondDBCommon,
toGet = cms.VPSet(
#
# working points
#
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGCSVLtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGCSVLtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGCSVLwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGCSVLwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGCSVMtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGCSVMtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGCSVMwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGCSVMwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGCSVTtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGCSVTtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGCSVTwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGCSVTwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGJPLtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJPLtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGJPLwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJPLwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGJPMtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJPMtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGJPMwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJPMwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGJPTtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJPTtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGJPTwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJPTwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGJBPLtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJBPLtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGJBPLwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJBPLwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGJBPMtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJBPMtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGJBPMwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJBPMwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGJBPTtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJBPTtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGJBPTwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJBPTwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGSSVHEMtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGSSVHEMtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGSSVHEMwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGSSVHEMwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGSSVHPTtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGSSVHPTtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGSSVHPTwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGSSVHPTwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGTCHELtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGTCHELtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGTCHELwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGTCHELwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGTCHEMtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGTCHEMtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGTCHEMwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGTCHEMwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGTCHPMtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGTCHPMtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGTCHPMwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGTCHPMwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGTCHPTtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGTCHPTtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGTCHPTwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGTCHPTwp_v8_offline')
),
))
PoolDBESSourcebtagMuJetsWp0612.connect = 'frontier://FrontierProd/CMS_COND_PAT_000'
| 39.449102 | 83 | 0.711597 | import FWCore.ParameterSet.Config as cms
from CondCore.DBCommon.CondDBCommon_cfi import *
PoolDBESSourcebtagMuJetsWp0612 = cms.ESSource("PoolDBESSource",
CondDBCommon,
toGet = cms.VPSet(
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGCSVLtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGCSVLtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGCSVLwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGCSVLwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGCSVMtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGCSVMtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGCSVMwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGCSVMwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGCSVTtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGCSVTtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGCSVTwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGCSVTwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGJPLtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJPLtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGJPLwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJPLwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGJPMtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJPMtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGJPMwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJPMwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGJPTtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJPTtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGJPTwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJPTwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGJBPLtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJBPLtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGJBPLwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJBPLwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGJBPMtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJBPMtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGJBPMwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJBPMwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGJBPTtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJBPTtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGJBPTwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGJBPTwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGSSVHEMtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGSSVHEMtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGSSVHEMwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGSSVHEMwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGSSVHPTtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGSSVHPTtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGSSVHPTwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGSSVHPTwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGTCHELtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGTCHELtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGTCHELwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGTCHELwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGTCHEMtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGTCHEMtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGTCHEMwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGTCHEMwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGTCHPMtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGTCHPMtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGTCHPMwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGTCHPMwp_v8_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMUJETSWPBTAGTCHPTtable_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGTCHPTtable_v8_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMUJETSWPBTAGTCHPTwp_v8_offline'),
label = cms.untracked.string('BTagMUJETSWPBTAGTCHPTwp_v8_offline')
),
))
PoolDBESSourcebtagMuJetsWp0612.connect = 'frontier://FrontierProd/CMS_COND_PAT_000'
| true | true |
f7355f7e109de934e2f174b301f857c4a6bc1865 | 32,743 | py | Python | tests/support/case.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 19 | 2016-01-29T14:37:52.000Z | 2022-03-30T18:08:01.000Z | tests/support/case.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 223 | 2016-03-02T16:39:41.000Z | 2022-03-03T12:26:35.000Z | tests/support/case.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 64 | 2016-02-04T19:45:26.000Z | 2021-12-15T02:02:31.000Z | """
:codeauthor: Pedro Algarvio (pedro@algarvio.me)
====================================
Custom Salt TestCase Implementations
====================================
Custom reusable :class:`TestCase<python2:unittest.TestCase>`
implementations.
"""
import errno
import io
import json
import logging
import os
import re
import subprocess
import sys
import tempfile
import textwrap
import time
from datetime import datetime, timedelta
import pytest
import salt.utils.files
from saltfactories.utils.processes import terminate_process
from tests.support.cli_scripts import ScriptPathMixin
from tests.support.helpers import SKIP_IF_NOT_RUNNING_PYTEST, RedirectStdStreams
from tests.support.mixins import ( # pylint: disable=unused-import
AdaptedConfigurationTestCaseMixin,
SaltClientTestCaseMixin,
SaltMultimasterClientTestCaseMixin,
)
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase
STATE_FUNCTION_RUNNING_RE = re.compile(
r"""The function (?:"|')(?P<state_func>.*)(?:"|') is running as PID """
r"(?P<pid>[\d]+) and was started at (?P<date>.*) with jid (?P<jid>[\d]+)"
)
log = logging.getLogger(__name__)
class ShellCase(TestCase, AdaptedConfigurationTestCaseMixin, ScriptPathMixin):
"""
Execute a test for a shell command
"""
RUN_TIMEOUT = 30
def run_salt(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
timeout=None,
popen_kwargs=None,
config_dir=None,
):
r'''
Run the ``salt`` CLI tool with the provided arguments
.. code-block:: python
class MatchTest(ShellCase):
def test_list(self):
"""
test salt -L matcher
"""
data = self.run_salt('-L minion test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
'''
if timeout is None:
timeout = self.RUN_TIMEOUT
arg_str = "-t {} {}".format(timeout, arg_str)
return self.run_script(
"salt",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
config_dir=config_dir,
)
def run_ssh(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
timeout=None,
wipe=False,
raw=False,
roster_file=None,
ssh_opts="",
log_level="error",
config_dir=None,
**kwargs
):
"""
Execute salt-ssh
"""
if timeout is None:
timeout = self.RUN_TIMEOUT
if not roster_file:
roster_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "roster")
arg_str = (
"{wipe} {raw} -l {log_level} --ignore-host-keys --priv {client_key} --roster-file "
"{roster_file} {ssh_opts} localhost {arg_str} --out=json"
).format(
wipe=" -W" if wipe else "",
raw=" -r" if raw else "",
log_level=log_level,
client_key=os.path.join(RUNTIME_VARS.TMP_SSH_CONF_DIR, "client_key"),
roster_file=roster_file,
ssh_opts=ssh_opts,
arg_str=arg_str,
)
ret = self.run_script(
"salt-ssh",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
raw=True,
timeout=timeout,
config_dir=config_dir,
**kwargs
)
log.debug("Result of run_ssh for command '%s %s': %s", arg_str, kwargs, ret)
return ret
def run_run(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
asynchronous=False,
timeout=None,
config_dir=None,
**kwargs
):
"""
Execute salt-run
"""
if timeout is None:
timeout = self.RUN_TIMEOUT
asynchronous = kwargs.get("async", asynchronous)
arg_str = "{async_flag} -t {timeout} {}".format(
arg_str, timeout=timeout, async_flag=" --async" if asynchronous else "",
)
ret = self.run_script(
"salt-run",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
config_dir=config_dir,
)
log.debug("Result of run_run for command '%s': %s", arg_str, ret)
return ret
def run_run_plus(self, fun, *arg, **kwargs):
"""
Execute the runner function and return the return data and output in a dict
"""
output = kwargs.pop("_output", None)
opts_overrides = kwargs.pop("opts_overrides", None)
ret = {"fun": fun}
# Late import
import salt.config
import salt.output
import salt.runner
opts = salt.config.client_config(self.get_config_file_path("master"))
if opts_overrides:
opts.update(opts_overrides)
opts_arg = list(arg)
if kwargs:
opts_arg.append({"__kwarg__": True})
opts_arg[-1].update(kwargs)
opts.update({"doc": False, "fun": fun, "arg": opts_arg})
with RedirectStdStreams():
runner = salt.runner.Runner(opts)
ret["return"] = runner.run()
try:
ret["jid"] = runner.jid
except AttributeError:
ret["jid"] = None
# Compile output
# TODO: Support outputters other than nested
opts["color"] = False
opts["output_file"] = io.StringIO()
try:
salt.output.display_output(ret["return"], opts=opts, out=output)
out = opts["output_file"].getvalue()
if output is None:
out = out.splitlines()
elif output == "json":
out = json.loads(out)
ret["out"] = out
finally:
opts["output_file"].close()
log.debug(
"Result of run_run_plus for fun '%s' with arg '%s': %s", fun, opts_arg, ret
)
return ret
def run_key(self, arg_str, catch_stderr=False, with_retcode=False, config_dir=None):
"""
Execute salt-key
"""
return self.run_script(
"salt-key",
arg_str,
catch_stderr=catch_stderr,
with_retcode=with_retcode,
config_dir=config_dir,
)
def run_cp(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
timeout=None,
config_dir=None,
):
"""
Execute salt-cp
"""
if timeout is None:
timeout = self.RUN_TIMEOUT
# Note: not logging result of run_cp because it will log a bunch of
# bytes which will not be very helpful.
return self.run_script(
"salt-cp",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
config_dir=config_dir,
)
def run_call(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
local=False,
timeout=None,
config_dir=None,
):
if timeout is None:
timeout = self.RUN_TIMEOUT
if not config_dir:
config_dir = RUNTIME_VARS.TMP_MINION_CONF_DIR
arg_str = "{} {}".format("--local" if local else "", arg_str)
ret = self.run_script(
"salt-call",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
config_dir=config_dir,
)
log.debug("Result of run_call for command '%s': %s", arg_str, ret)
return ret
def run_function(
self,
function,
arg=(),
with_retcode=False,
catch_stderr=False,
local=False,
timeout=RUN_TIMEOUT,
**kwargs
):
"""
Execute function with salt-call.
This function is added for compatibility with ModuleCase. This makes it possible to use
decorators like @with_system_user.
"""
arg_str = "{} {} {}".format(
function,
" ".join(str(arg_) for arg_ in arg),
" ".join("{}={}".format(*item) for item in kwargs.items()),
)
return self.run_call(arg_str, with_retcode, catch_stderr, local, timeout)
def run_cloud(self, arg_str, catch_stderr=False, timeout=None, config_dir=None):
"""
Execute salt-cloud
"""
if timeout is None:
timeout = self.RUN_TIMEOUT
ret = self.run_script(
"salt-cloud", arg_str, catch_stderr, timeout=timeout, config_dir=config_dir
)
log.debug("Result of run_cloud for command '%s': %s", arg_str, ret)
return ret
def run_spm(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
timeout=None,
config_dir=None,
):
"""
Execute spm
"""
if timeout is None:
timeout = self.RUN_TIMEOUT
ret = self.run_script(
"spm",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
config_dir=config_dir,
)
log.debug("Result of run_spm for command '%s': %s", arg_str, ret)
return ret
def run_script(
self,
script,
arg_str,
catch_stderr=False,
with_retcode=False,
catch_timeout=False,
# FIXME A timeout of zero or disabling timeouts may not return results!
timeout=15,
raw=False,
popen_kwargs=None,
log_output=None,
config_dir=None,
**kwargs
):
"""
Execute a script with the given argument string
The ``log_output`` argument is ternary, it can be True, False, or None.
If the value is boolean, then it forces the results to either be logged
or not logged. If it is None, then the return code of the subprocess
determines whether or not to log results.
"""
import salt.utils.platform
script_path = self.get_script_path(script)
if not os.path.isfile(script_path):
return False
popen_kwargs = popen_kwargs or {}
if salt.utils.platform.is_windows():
cmd = "python "
if "cwd" not in popen_kwargs:
popen_kwargs["cwd"] = os.getcwd()
if "env" not in popen_kwargs:
popen_kwargs["env"] = os.environ.copy()
popen_kwargs["env"]["PYTHONPATH"] = RUNTIME_VARS.CODE_DIR
else:
cmd = "PYTHONPATH="
python_path = os.environ.get("PYTHONPATH", None)
if python_path is not None:
cmd += "{}:".format(python_path)
if sys.version_info[0] < 3:
cmd += "{} ".format(":".join(sys.path[1:]))
else:
cmd += "{} ".format(":".join(sys.path[0:]))
cmd += "python{}.{} ".format(*sys.version_info)
cmd += "{} --config-dir={} {} ".format(
script_path, config_dir or RUNTIME_VARS.TMP_CONF_DIR, arg_str
)
if kwargs:
# late import
import salt.utils.json
for key, value in kwargs.items():
cmd += "'{}={} '".format(key, salt.utils.json.dumps(value))
tmp_file = tempfile.SpooledTemporaryFile()
popen_kwargs = dict(
{"shell": True, "stdout": tmp_file, "universal_newlines": True},
**popen_kwargs
)
if catch_stderr is True:
popen_kwargs["stderr"] = subprocess.PIPE
if not sys.platform.lower().startswith("win"):
popen_kwargs["close_fds"] = True
def detach_from_parent_group():
# detach from parent group (no more inherited signals!)
os.setpgrp()
popen_kwargs["preexec_fn"] = detach_from_parent_group
def format_return(retcode, stdout, stderr=None, timed_out=False):
"""
DRY helper to log script result if it failed, and then return the
desired output based on whether or not stderr was desired, and
wither or not a retcode was desired.
"""
log_func = log.debug
if timed_out:
log.error(
"run_script timed out after %d seconds (process killed)", timeout
)
log_func = log.error
if log_output is True or timed_out or (log_output is None and retcode != 0):
log_func(
"run_script results for: %s %s\n"
"return code: %s\n"
"stdout:\n"
"%s\n\n"
"stderr:\n"
"%s",
script,
arg_str,
retcode,
stdout,
stderr,
)
stdout = stdout or ""
stderr = stderr or ""
if not raw:
stdout = stdout.splitlines()
stderr = stderr.splitlines()
ret = [stdout]
if catch_stderr:
ret.append(stderr)
if with_retcode:
ret.append(retcode)
if catch_timeout:
ret.append(timed_out)
return ret[0] if len(ret) == 1 else tuple(ret)
log.debug("Running Popen(%r, %r)", cmd, popen_kwargs)
process = subprocess.Popen(cmd, **popen_kwargs)
if timeout is not None:
stop_at = datetime.now() + timedelta(seconds=timeout)
term_sent = False
while True:
process.poll()
time.sleep(0.1)
if datetime.now() <= stop_at:
# We haven't reached the timeout yet
if process.returncode is not None:
break
else:
terminate_process(process.pid, kill_children=True)
return format_return(
process.returncode, *process.communicate(), timed_out=True
)
tmp_file.seek(0)
try:
out = tmp_file.read().decode(__salt_system_encoding__)
except (NameError, UnicodeDecodeError):
# Let's cross our fingers and hope for the best
out = tmp_file.read().decode("utf-8")
if catch_stderr:
if sys.version_info < (2, 7):
# On python 2.6, the subprocess'es communicate() method uses
# select which, is limited by the OS to 1024 file descriptors
# We need more available descriptors to run the tests which
# need the stderr output.
# So instead of .communicate() we wait for the process to
# finish, but, as the python docs state "This will deadlock
# when using stdout=PIPE and/or stderr=PIPE and the child
# process generates enough output to a pipe such that it
# blocks waiting for the OS pipe buffer to accept more data.
# Use communicate() to avoid that." <- a catch, catch situation
#
# Use this work around were it's needed only, python 2.6
process.wait()
err = process.stderr.read()
else:
_, err = process.communicate()
# Force closing stderr/stdout to release file descriptors
if process.stdout is not None:
process.stdout.close()
if process.stderr is not None:
process.stderr.close()
# pylint: disable=maybe-no-member
try:
return format_return(process.returncode, out, err or "")
finally:
try:
if os.path.exists(tmp_file.name):
if isinstance(tmp_file.name, str):
# tmp_file.name is an int when using SpooledTemporaryFiles
# int types cannot be used with os.remove() in Python 3
os.remove(tmp_file.name)
else:
# Clean up file handles
tmp_file.close()
process.terminate()
except OSError as err:
# process already terminated
pass
# pylint: enable=maybe-no-member
# TODO Remove this?
process.communicate()
if process.stdout is not None:
process.stdout.close()
try:
return format_return(process.returncode, out)
finally:
try:
if os.path.exists(tmp_file.name):
if isinstance(tmp_file.name, str):
# tmp_file.name is an int when using SpooledTemporaryFiles
# int types cannot be used with os.remove() in Python 3
os.remove(tmp_file.name)
else:
# Clean up file handles
tmp_file.close()
process.terminate()
except OSError as err:
# process already terminated
pass
class MultiMasterTestShellCase(ShellCase):
"""
'''
Execute a test for a shell command when running multi-master tests
"""
@property
def config_dir(self):
return RUNTIME_VARS.TMP_MM_CONF_DIR
class SPMTestUserInterface:
"""
Test user interface to SPMClient
"""
def __init__(self):
self._status = []
self._confirm = []
self._error = []
def status(self, msg):
self._status.append(msg)
def confirm(self, action):
self._confirm.append(action)
def error(self, msg):
self._error.append(msg)
class SPMCase(TestCase, AdaptedConfigurationTestCaseMixin):
"""
Class for handling spm commands
"""
def _spm_build_files(self, config):
self.formula_dir = os.path.join(
" ".join(config["file_roots"]["base"]), "formulas"
)
self.formula_sls_dir = os.path.join(self.formula_dir, "apache")
self.formula_sls = os.path.join(self.formula_sls_dir, "apache.sls")
self.formula_file = os.path.join(self.formula_dir, "FORMULA")
dirs = [self.formula_dir, self.formula_sls_dir]
for f_dir in dirs:
os.makedirs(f_dir)
with salt.utils.files.fopen(self.formula_sls, "w") as fp:
fp.write(
textwrap.dedent(
"""\
install-apache:
pkg.installed:
- name: apache2
"""
)
)
with salt.utils.files.fopen(self.formula_file, "w") as fp:
fp.write(
textwrap.dedent(
"""\
name: apache
os: RedHat, Debian, Ubuntu, Suse, FreeBSD
os_family: RedHat, Debian, Suse, FreeBSD
version: 201506
release: 2
summary: Formula for installing Apache
description: Formula for installing Apache
"""
)
)
def _spm_config(self, assume_yes=True):
self._tmp_spm = tempfile.mkdtemp()
config = self.get_temp_config(
"minion",
**{
"spm_logfile": os.path.join(self._tmp_spm, "log"),
"spm_repos_config": os.path.join(self._tmp_spm, "etc", "spm.repos"),
"spm_cache_dir": os.path.join(self._tmp_spm, "cache"),
"spm_build_dir": os.path.join(self._tmp_spm, "build"),
"spm_build_exclude": ["apache/.git"],
"spm_db_provider": "sqlite3",
"spm_files_provider": "local",
"spm_db": os.path.join(self._tmp_spm, "packages.db"),
"extension_modules": os.path.join(self._tmp_spm, "modules"),
"file_roots": {"base": [self._tmp_spm]},
"formula_path": os.path.join(self._tmp_spm, "salt"),
"pillar_path": os.path.join(self._tmp_spm, "pillar"),
"reactor_path": os.path.join(self._tmp_spm, "reactor"),
"assume_yes": True if assume_yes else False,
"force": False,
"verbose": False,
"cache": "localfs",
"cachedir": os.path.join(self._tmp_spm, "cache"),
"spm_repo_dups": "ignore",
"spm_share_dir": os.path.join(self._tmp_spm, "share"),
}
)
import salt.utils.yaml
if not os.path.isdir(config["formula_path"]):
os.makedirs(config["formula_path"])
with salt.utils.files.fopen(os.path.join(self._tmp_spm, "spm"), "w") as fp:
salt.utils.yaml.safe_dump(config, fp)
return config
def _spm_create_update_repo(self, config):
build_spm = self.run_spm("build", self.config, self.formula_dir)
c_repo = self.run_spm("create_repo", self.config, self.config["spm_build_dir"])
repo_conf_dir = self.config["spm_repos_config"] + ".d"
os.makedirs(repo_conf_dir)
with salt.utils.files.fopen(os.path.join(repo_conf_dir, "spm.repo"), "w") as fp:
fp.write(
textwrap.dedent(
"""\
local_repo:
url: file://{}
""".format(
self.config["spm_build_dir"]
)
)
)
u_repo = self.run_spm("update_repo", self.config)
def _spm_client(self, config):
import salt.spm
self.ui = SPMTestUserInterface()
client = salt.spm.SPMClient(self.ui, config)
return client
def run_spm(self, cmd, config, arg=None):
client = self._spm_client(config)
client.run([cmd, arg])
client._close()
return self.ui._status
class ModuleCase(TestCase, SaltClientTestCaseMixin):
"""
Execute a module function
"""
def wait_for_all_jobs(self, minions=("minion", "sub_minion"), sleep=0.3):
"""
Wait for all jobs currently running on the list of minions to finish
"""
for minion in minions:
while True:
ret = self.run_function(
"saltutil.running", minion_tgt=minion, timeout=300
)
if ret:
log.debug("Waiting for minion's jobs: %s", minion)
time.sleep(sleep)
else:
break
def minion_run(self, _function, *args, **kw):
"""
Run a single salt function on the 'minion' target and condition
the return down to match the behavior of the raw function call
"""
return self.run_function(_function, args, **kw)
def run_function(
self,
function,
arg=(),
minion_tgt="minion",
timeout=300,
master_tgt=None,
**kwargs
):
"""
Run a single salt function and condition the return down to match the
behavior of the raw function call
"""
known_to_return_none = (
"data.get",
"file.chown",
"file.chgrp",
"pkg.refresh_db",
"ssh.recv_known_host_entries",
"time.sleep",
"grains.delkey",
"grains.delval",
)
if "f_arg" in kwargs:
kwargs["arg"] = kwargs.pop("f_arg")
if "f_timeout" in kwargs:
kwargs["timeout"] = kwargs.pop("f_timeout")
client = self.client if master_tgt is None else self.clients[master_tgt]
log.debug(
"Running client.cmd(minion_tgt=%r, function=%r, arg=%r, timeout=%r, kwarg=%r)",
minion_tgt,
function,
arg,
timeout,
kwargs,
)
orig = client.cmd(minion_tgt, function, arg, timeout=timeout, kwarg=kwargs)
if RUNTIME_VARS.PYTEST_SESSION:
fail_or_skip_func = self.fail
else:
fail_or_skip_func = self.skipTest
if minion_tgt not in orig:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply "
"from the minion '{}'. Command output: {}".format(minion_tgt, orig)
)
elif orig[minion_tgt] is None and function not in known_to_return_none:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get '{}' from "
"the minion '{}'. Command output: {}".format(function, minion_tgt, orig)
)
# Try to match stalled state functions
orig[minion_tgt] = self._check_state_return(orig[minion_tgt])
return orig[minion_tgt]
def run_state(self, function, **kwargs):
"""
Run the state.single command and return the state return structure
"""
ret = self.run_function("state.single", [function], **kwargs)
return self._check_state_return(ret)
def _check_state_return(self, ret):
if isinstance(ret, dict):
# This is the supposed return format for state calls
return ret
if isinstance(ret, list):
jids = []
# These are usually errors
for item in ret[:]:
if not isinstance(item, str):
# We don't know how to handle this
continue
match = STATE_FUNCTION_RUNNING_RE.match(item)
if not match:
# We don't know how to handle this
continue
jid = match.group("jid")
if jid in jids:
continue
jids.append(jid)
job_data = self.run_function("saltutil.find_job", [jid])
job_kill = self.run_function("saltutil.kill_job", [jid])
msg = (
"A running state.single was found causing a state lock. "
"Job details: '{}' Killing Job Returned: '{}'".format(
job_data, job_kill
)
)
ret.append(
"[TEST SUITE ENFORCED]{}" "[/TEST SUITE ENFORCED]".format(msg)
)
return ret
class MultimasterModuleCase(ModuleCase, SaltMultimasterClientTestCaseMixin):
"""
Execute a module function
"""
def run_function(
self,
function,
arg=(),
minion_tgt="mm-minion",
timeout=300,
master_tgt="mm-master",
**kwargs
):
"""
Run a single salt function and condition the return down to match the
behavior of the raw function call
"""
known_to_return_none = (
"data.get",
"file.chown",
"file.chgrp",
"pkg.refresh_db",
"ssh.recv_known_host_entries",
"time.sleep",
)
if minion_tgt == "mm-sub-minion":
known_to_return_none += ("mine.update",)
if "f_arg" in kwargs:
kwargs["arg"] = kwargs.pop("f_arg")
if "f_timeout" in kwargs:
kwargs["timeout"] = kwargs.pop("f_timeout")
if master_tgt is None:
client = self.clients["mm-master"]
elif isinstance(master_tgt, int):
client = self.clients[list(self.clients)[master_tgt]]
else:
client = self.clients[master_tgt]
orig = client.cmd(minion_tgt, function, arg, timeout=timeout, kwarg=kwargs)
if RUNTIME_VARS.PYTEST_SESSION:
fail_or_skip_func = self.fail
else:
fail_or_skip_func = self.skipTest
if minion_tgt not in orig:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply "
"from the minion '{}'. Command output: {}".format(minion_tgt, orig)
)
elif orig[minion_tgt] is None and function not in known_to_return_none:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get '{}' from "
"the minion '{}'. Command output: {}".format(function, minion_tgt, orig)
)
# Try to match stalled state functions
orig[minion_tgt] = self._check_state_return(orig[minion_tgt])
return orig[minion_tgt]
def run_function_all_masters(
self, function, arg=(), minion_tgt="mm-minion", timeout=300, **kwargs
):
"""
Run a single salt function from all the masters in multimaster environment
and condition the return down to match the behavior of the raw function call
"""
ret = []
for master_id in self.clients:
ret.append(
self.run_function(
function,
arg=arg,
minion_tgt=minion_tgt,
timeout=timeout,
master_tgt=master_id,
**kwargs
)
)
return ret
class SyndicCase(TestCase, SaltClientTestCaseMixin):
"""
Execute a syndic based execution test
"""
_salt_client_config_file_name_ = "syndic_master"
def run_function(self, function, arg=(), timeout=90):
"""
Run a single salt function and condition the return down to match the
behavior of the raw function call
"""
orig = self.client.cmd("minion", function, arg, timeout=timeout)
if RUNTIME_VARS.PYTEST_SESSION:
fail_or_skip_func = self.fail
else:
fail_or_skip_func = self.skipTest
if "minion" not in orig:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply "
"from the minion. Command output: {}".format(orig)
)
return orig["minion"]
@SKIP_IF_NOT_RUNNING_PYTEST
@pytest.mark.usefixtures("salt_ssh_cli")
@pytest.mark.requires_sshd_server
class SSHCase(ShellCase):
"""
Execute a command via salt-ssh
"""
def _arg_str(self, function, arg):
return "{} {}".format(function, " ".join(arg))
# pylint: disable=arguments-differ
def run_function(
self, function, arg=(), timeout=180, wipe=True, raw=False, **kwargs
):
"""
We use a 180s timeout here, which some slower systems do end up needing
"""
ret = self.run_ssh(
self._arg_str(function, arg), timeout=timeout, wipe=wipe, raw=raw, **kwargs
)
log.debug(
"SSHCase run_function executed %s with arg %s and kwargs %s",
function,
arg,
kwargs,
)
log.debug("SSHCase JSON return: %s", ret)
# Late import
import salt.utils.json
try:
return salt.utils.json.loads(ret)["localhost"]
except Exception: # pylint: disable=broad-except
return ret
# pylint: enable=arguments-differ
def custom_roster(self, new_roster, data):
"""
helper method to create a custom roster to use for a ssh test
"""
roster = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "roster")
with salt.utils.files.fopen(roster, "r") as fp_:
conf = salt.utils.yaml.safe_load(fp_)
conf["localhost"].update(data)
with salt.utils.files.fopen(new_roster, "w") as fp_:
salt.utils.yaml.safe_dump(conf, fp_)
class ClientCase(AdaptedConfigurationTestCaseMixin, TestCase):
"""
A base class containing relevant options for starting the various Salt
Python API entrypoints
"""
def get_opts(self):
# Late import
import salt.config
return salt.config.client_config(self.get_config_file_path("master"))
def mkdir_p(self, path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| 32.354743 | 95 | 0.534557 |
import errno
import io
import json
import logging
import os
import re
import subprocess
import sys
import tempfile
import textwrap
import time
from datetime import datetime, timedelta
import pytest
import salt.utils.files
from saltfactories.utils.processes import terminate_process
from tests.support.cli_scripts import ScriptPathMixin
from tests.support.helpers import SKIP_IF_NOT_RUNNING_PYTEST, RedirectStdStreams
from tests.support.mixins import (
AdaptedConfigurationTestCaseMixin,
SaltClientTestCaseMixin,
SaltMultimasterClientTestCaseMixin,
)
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase
STATE_FUNCTION_RUNNING_RE = re.compile(
r"""The function (?:"|')(?P<state_func>.*)(?:"|') is running as PID """
r"(?P<pid>[\d]+) and was started at (?P<date>.*) with jid (?P<jid>[\d]+)"
)
log = logging.getLogger(__name__)
class ShellCase(TestCase, AdaptedConfigurationTestCaseMixin, ScriptPathMixin):
RUN_TIMEOUT = 30
def run_salt(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
timeout=None,
popen_kwargs=None,
config_dir=None,
):
if timeout is None:
timeout = self.RUN_TIMEOUT
arg_str = "-t {} {}".format(timeout, arg_str)
return self.run_script(
"salt",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
config_dir=config_dir,
)
def run_ssh(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
timeout=None,
wipe=False,
raw=False,
roster_file=None,
ssh_opts="",
log_level="error",
config_dir=None,
**kwargs
):
if timeout is None:
timeout = self.RUN_TIMEOUT
if not roster_file:
roster_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "roster")
arg_str = (
"{wipe} {raw} -l {log_level} --ignore-host-keys --priv {client_key} --roster-file "
"{roster_file} {ssh_opts} localhost {arg_str} --out=json"
).format(
wipe=" -W" if wipe else "",
raw=" -r" if raw else "",
log_level=log_level,
client_key=os.path.join(RUNTIME_VARS.TMP_SSH_CONF_DIR, "client_key"),
roster_file=roster_file,
ssh_opts=ssh_opts,
arg_str=arg_str,
)
ret = self.run_script(
"salt-ssh",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
raw=True,
timeout=timeout,
config_dir=config_dir,
**kwargs
)
log.debug("Result of run_ssh for command '%s %s': %s", arg_str, kwargs, ret)
return ret
def run_run(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
asynchronous=False,
timeout=None,
config_dir=None,
**kwargs
):
if timeout is None:
timeout = self.RUN_TIMEOUT
asynchronous = kwargs.get("async", asynchronous)
arg_str = "{async_flag} -t {timeout} {}".format(
arg_str, timeout=timeout, async_flag=" --async" if asynchronous else "",
)
ret = self.run_script(
"salt-run",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
config_dir=config_dir,
)
log.debug("Result of run_run for command '%s': %s", arg_str, ret)
return ret
def run_run_plus(self, fun, *arg, **kwargs):
output = kwargs.pop("_output", None)
opts_overrides = kwargs.pop("opts_overrides", None)
ret = {"fun": fun}
import salt.config
import salt.output
import salt.runner
opts = salt.config.client_config(self.get_config_file_path("master"))
if opts_overrides:
opts.update(opts_overrides)
opts_arg = list(arg)
if kwargs:
opts_arg.append({"__kwarg__": True})
opts_arg[-1].update(kwargs)
opts.update({"doc": False, "fun": fun, "arg": opts_arg})
with RedirectStdStreams():
runner = salt.runner.Runner(opts)
ret["return"] = runner.run()
try:
ret["jid"] = runner.jid
except AttributeError:
ret["jid"] = None
opts["color"] = False
opts["output_file"] = io.StringIO()
try:
salt.output.display_output(ret["return"], opts=opts, out=output)
out = opts["output_file"].getvalue()
if output is None:
out = out.splitlines()
elif output == "json":
out = json.loads(out)
ret["out"] = out
finally:
opts["output_file"].close()
log.debug(
"Result of run_run_plus for fun '%s' with arg '%s': %s", fun, opts_arg, ret
)
return ret
def run_key(self, arg_str, catch_stderr=False, with_retcode=False, config_dir=None):
return self.run_script(
"salt-key",
arg_str,
catch_stderr=catch_stderr,
with_retcode=with_retcode,
config_dir=config_dir,
)
def run_cp(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
timeout=None,
config_dir=None,
):
if timeout is None:
timeout = self.RUN_TIMEOUT
return self.run_script(
"salt-cp",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
config_dir=config_dir,
)
def run_call(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
local=False,
timeout=None,
config_dir=None,
):
if timeout is None:
timeout = self.RUN_TIMEOUT
if not config_dir:
config_dir = RUNTIME_VARS.TMP_MINION_CONF_DIR
arg_str = "{} {}".format("--local" if local else "", arg_str)
ret = self.run_script(
"salt-call",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
config_dir=config_dir,
)
log.debug("Result of run_call for command '%s': %s", arg_str, ret)
return ret
def run_function(
self,
function,
arg=(),
with_retcode=False,
catch_stderr=False,
local=False,
timeout=RUN_TIMEOUT,
**kwargs
):
arg_str = "{} {} {}".format(
function,
" ".join(str(arg_) for arg_ in arg),
" ".join("{}={}".format(*item) for item in kwargs.items()),
)
return self.run_call(arg_str, with_retcode, catch_stderr, local, timeout)
def run_cloud(self, arg_str, catch_stderr=False, timeout=None, config_dir=None):
if timeout is None:
timeout = self.RUN_TIMEOUT
ret = self.run_script(
"salt-cloud", arg_str, catch_stderr, timeout=timeout, config_dir=config_dir
)
log.debug("Result of run_cloud for command '%s': %s", arg_str, ret)
return ret
def run_spm(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
timeout=None,
config_dir=None,
):
if timeout is None:
timeout = self.RUN_TIMEOUT
ret = self.run_script(
"spm",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
config_dir=config_dir,
)
log.debug("Result of run_spm for command '%s': %s", arg_str, ret)
return ret
def run_script(
self,
script,
arg_str,
catch_stderr=False,
with_retcode=False,
catch_timeout=False,
timeout=15,
raw=False,
popen_kwargs=None,
log_output=None,
config_dir=None,
**kwargs
):
import salt.utils.platform
script_path = self.get_script_path(script)
if not os.path.isfile(script_path):
return False
popen_kwargs = popen_kwargs or {}
if salt.utils.platform.is_windows():
cmd = "python "
if "cwd" not in popen_kwargs:
popen_kwargs["cwd"] = os.getcwd()
if "env" not in popen_kwargs:
popen_kwargs["env"] = os.environ.copy()
popen_kwargs["env"]["PYTHONPATH"] = RUNTIME_VARS.CODE_DIR
else:
cmd = "PYTHONPATH="
python_path = os.environ.get("PYTHONPATH", None)
if python_path is not None:
cmd += "{}:".format(python_path)
if sys.version_info[0] < 3:
cmd += "{} ".format(":".join(sys.path[1:]))
else:
cmd += "{} ".format(":".join(sys.path[0:]))
cmd += "python{}.{} ".format(*sys.version_info)
cmd += "{} --config-dir={} {} ".format(
script_path, config_dir or RUNTIME_VARS.TMP_CONF_DIR, arg_str
)
if kwargs:
import salt.utils.json
for key, value in kwargs.items():
cmd += "'{}={} '".format(key, salt.utils.json.dumps(value))
tmp_file = tempfile.SpooledTemporaryFile()
popen_kwargs = dict(
{"shell": True, "stdout": tmp_file, "universal_newlines": True},
**popen_kwargs
)
if catch_stderr is True:
popen_kwargs["stderr"] = subprocess.PIPE
if not sys.platform.lower().startswith("win"):
popen_kwargs["close_fds"] = True
def detach_from_parent_group():
os.setpgrp()
popen_kwargs["preexec_fn"] = detach_from_parent_group
def format_return(retcode, stdout, stderr=None, timed_out=False):
log_func = log.debug
if timed_out:
log.error(
"run_script timed out after %d seconds (process killed)", timeout
)
log_func = log.error
if log_output is True or timed_out or (log_output is None and retcode != 0):
log_func(
"run_script results for: %s %s\n"
"return code: %s\n"
"stdout:\n"
"%s\n\n"
"stderr:\n"
"%s",
script,
arg_str,
retcode,
stdout,
stderr,
)
stdout = stdout or ""
stderr = stderr or ""
if not raw:
stdout = stdout.splitlines()
stderr = stderr.splitlines()
ret = [stdout]
if catch_stderr:
ret.append(stderr)
if with_retcode:
ret.append(retcode)
if catch_timeout:
ret.append(timed_out)
return ret[0] if len(ret) == 1 else tuple(ret)
log.debug("Running Popen(%r, %r)", cmd, popen_kwargs)
process = subprocess.Popen(cmd, **popen_kwargs)
if timeout is not None:
stop_at = datetime.now() + timedelta(seconds=timeout)
term_sent = False
while True:
process.poll()
time.sleep(0.1)
if datetime.now() <= stop_at:
if process.returncode is not None:
break
else:
terminate_process(process.pid, kill_children=True)
return format_return(
process.returncode, *process.communicate(), timed_out=True
)
tmp_file.seek(0)
try:
out = tmp_file.read().decode(__salt_system_encoding__)
except (NameError, UnicodeDecodeError):
# Let's cross our fingers and hope for the best
out = tmp_file.read().decode("utf-8")
if catch_stderr:
if sys.version_info < (2, 7):
# select which, is limited by the OS to 1024 file descriptors
# We need more available descriptors to run the tests which
# need the stderr output.
# So instead of .communicate() we wait for the process to
# finish, but, as the python docs state "This will deadlock
# when using stdout=PIPE and/or stderr=PIPE and the child
# process generates enough output to a pipe such that it
# blocks waiting for the OS pipe buffer to accept more data.
# Use communicate() to avoid that." <- a catch, catch situation
#
# Use this work around were it's needed only, python 2.6
process.wait()
err = process.stderr.read()
else:
_, err = process.communicate()
if process.stdout is not None:
process.stdout.close()
if process.stderr is not None:
process.stderr.close()
try:
return format_return(process.returncode, out, err or "")
finally:
try:
if os.path.exists(tmp_file.name):
if isinstance(tmp_file.name, str):
os.remove(tmp_file.name)
else:
tmp_file.close()
process.terminate()
except OSError as err:
pass
process.communicate()
if process.stdout is not None:
process.stdout.close()
try:
return format_return(process.returncode, out)
finally:
try:
if os.path.exists(tmp_file.name):
if isinstance(tmp_file.name, str):
os.remove(tmp_file.name)
else:
tmp_file.close()
process.terminate()
except OSError as err:
pass
class MultiMasterTestShellCase(ShellCase):
@property
def config_dir(self):
return RUNTIME_VARS.TMP_MM_CONF_DIR
class SPMTestUserInterface:
def __init__(self):
self._status = []
self._confirm = []
self._error = []
def status(self, msg):
self._status.append(msg)
def confirm(self, action):
self._confirm.append(action)
def error(self, msg):
self._error.append(msg)
class SPMCase(TestCase, AdaptedConfigurationTestCaseMixin):
def _spm_build_files(self, config):
self.formula_dir = os.path.join(
" ".join(config["file_roots"]["base"]), "formulas"
)
self.formula_sls_dir = os.path.join(self.formula_dir, "apache")
self.formula_sls = os.path.join(self.formula_sls_dir, "apache.sls")
self.formula_file = os.path.join(self.formula_dir, "FORMULA")
dirs = [self.formula_dir, self.formula_sls_dir]
for f_dir in dirs:
os.makedirs(f_dir)
with salt.utils.files.fopen(self.formula_sls, "w") as fp:
fp.write(
textwrap.dedent(
"""\
install-apache:
pkg.installed:
- name: apache2
"""
)
)
with salt.utils.files.fopen(self.formula_file, "w") as fp:
fp.write(
textwrap.dedent(
"""\
name: apache
os: RedHat, Debian, Ubuntu, Suse, FreeBSD
os_family: RedHat, Debian, Suse, FreeBSD
version: 201506
release: 2
summary: Formula for installing Apache
description: Formula for installing Apache
"""
)
)
def _spm_config(self, assume_yes=True):
self._tmp_spm = tempfile.mkdtemp()
config = self.get_temp_config(
"minion",
**{
"spm_logfile": os.path.join(self._tmp_spm, "log"),
"spm_repos_config": os.path.join(self._tmp_spm, "etc", "spm.repos"),
"spm_cache_dir": os.path.join(self._tmp_spm, "cache"),
"spm_build_dir": os.path.join(self._tmp_spm, "build"),
"spm_build_exclude": ["apache/.git"],
"spm_db_provider": "sqlite3",
"spm_files_provider": "local",
"spm_db": os.path.join(self._tmp_spm, "packages.db"),
"extension_modules": os.path.join(self._tmp_spm, "modules"),
"file_roots": {"base": [self._tmp_spm]},
"formula_path": os.path.join(self._tmp_spm, "salt"),
"pillar_path": os.path.join(self._tmp_spm, "pillar"),
"reactor_path": os.path.join(self._tmp_spm, "reactor"),
"assume_yes": True if assume_yes else False,
"force": False,
"verbose": False,
"cache": "localfs",
"cachedir": os.path.join(self._tmp_spm, "cache"),
"spm_repo_dups": "ignore",
"spm_share_dir": os.path.join(self._tmp_spm, "share"),
}
)
import salt.utils.yaml
if not os.path.isdir(config["formula_path"]):
os.makedirs(config["formula_path"])
with salt.utils.files.fopen(os.path.join(self._tmp_spm, "spm"), "w") as fp:
salt.utils.yaml.safe_dump(config, fp)
return config
def _spm_create_update_repo(self, config):
build_spm = self.run_spm("build", self.config, self.formula_dir)
c_repo = self.run_spm("create_repo", self.config, self.config["spm_build_dir"])
repo_conf_dir = self.config["spm_repos_config"] + ".d"
os.makedirs(repo_conf_dir)
with salt.utils.files.fopen(os.path.join(repo_conf_dir, "spm.repo"), "w") as fp:
fp.write(
textwrap.dedent(
"""\
local_repo:
url: file://{}
""".format(
self.config["spm_build_dir"]
)
)
)
u_repo = self.run_spm("update_repo", self.config)
def _spm_client(self, config):
import salt.spm
self.ui = SPMTestUserInterface()
client = salt.spm.SPMClient(self.ui, config)
return client
def run_spm(self, cmd, config, arg=None):
client = self._spm_client(config)
client.run([cmd, arg])
client._close()
return self.ui._status
class ModuleCase(TestCase, SaltClientTestCaseMixin):
def wait_for_all_jobs(self, minions=("minion", "sub_minion"), sleep=0.3):
for minion in minions:
while True:
ret = self.run_function(
"saltutil.running", minion_tgt=minion, timeout=300
)
if ret:
log.debug("Waiting for minion's jobs: %s", minion)
time.sleep(sleep)
else:
break
def minion_run(self, _function, *args, **kw):
return self.run_function(_function, args, **kw)
def run_function(
self,
function,
arg=(),
minion_tgt="minion",
timeout=300,
master_tgt=None,
**kwargs
):
known_to_return_none = (
"data.get",
"file.chown",
"file.chgrp",
"pkg.refresh_db",
"ssh.recv_known_host_entries",
"time.sleep",
"grains.delkey",
"grains.delval",
)
if "f_arg" in kwargs:
kwargs["arg"] = kwargs.pop("f_arg")
if "f_timeout" in kwargs:
kwargs["timeout"] = kwargs.pop("f_timeout")
client = self.client if master_tgt is None else self.clients[master_tgt]
log.debug(
"Running client.cmd(minion_tgt=%r, function=%r, arg=%r, timeout=%r, kwarg=%r)",
minion_tgt,
function,
arg,
timeout,
kwargs,
)
orig = client.cmd(minion_tgt, function, arg, timeout=timeout, kwarg=kwargs)
if RUNTIME_VARS.PYTEST_SESSION:
fail_or_skip_func = self.fail
else:
fail_or_skip_func = self.skipTest
if minion_tgt not in orig:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply "
"from the minion '{}'. Command output: {}".format(minion_tgt, orig)
)
elif orig[minion_tgt] is None and function not in known_to_return_none:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get '{}' from "
"the minion '{}'. Command output: {}".format(function, minion_tgt, orig)
)
# Try to match stalled state functions
orig[minion_tgt] = self._check_state_return(orig[minion_tgt])
return orig[minion_tgt]
def run_state(self, function, **kwargs):
ret = self.run_function("state.single", [function], **kwargs)
return self._check_state_return(ret)
def _check_state_return(self, ret):
if isinstance(ret, dict):
# This is the supposed return format for state calls
return ret
if isinstance(ret, list):
jids = []
# These are usually errors
for item in ret[:]:
if not isinstance(item, str):
# We don't know how to handle this
continue
match = STATE_FUNCTION_RUNNING_RE.match(item)
if not match:
continue
jid = match.group("jid")
if jid in jids:
continue
jids.append(jid)
job_data = self.run_function("saltutil.find_job", [jid])
job_kill = self.run_function("saltutil.kill_job", [jid])
msg = (
"A running state.single was found causing a state lock. "
"Job details: '{}' Killing Job Returned: '{}'".format(
job_data, job_kill
)
)
ret.append(
"[TEST SUITE ENFORCED]{}" "[/TEST SUITE ENFORCED]".format(msg)
)
return ret
class MultimasterModuleCase(ModuleCase, SaltMultimasterClientTestCaseMixin):
def run_function(
self,
function,
arg=(),
minion_tgt="mm-minion",
timeout=300,
master_tgt="mm-master",
**kwargs
):
known_to_return_none = (
"data.get",
"file.chown",
"file.chgrp",
"pkg.refresh_db",
"ssh.recv_known_host_entries",
"time.sleep",
)
if minion_tgt == "mm-sub-minion":
known_to_return_none += ("mine.update",)
if "f_arg" in kwargs:
kwargs["arg"] = kwargs.pop("f_arg")
if "f_timeout" in kwargs:
kwargs["timeout"] = kwargs.pop("f_timeout")
if master_tgt is None:
client = self.clients["mm-master"]
elif isinstance(master_tgt, int):
client = self.clients[list(self.clients)[master_tgt]]
else:
client = self.clients[master_tgt]
orig = client.cmd(minion_tgt, function, arg, timeout=timeout, kwarg=kwargs)
if RUNTIME_VARS.PYTEST_SESSION:
fail_or_skip_func = self.fail
else:
fail_or_skip_func = self.skipTest
if minion_tgt not in orig:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply "
"from the minion '{}'. Command output: {}".format(minion_tgt, orig)
)
elif orig[minion_tgt] is None and function not in known_to_return_none:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get '{}' from "
"the minion '{}'. Command output: {}".format(function, minion_tgt, orig)
)
# Try to match stalled state functions
orig[minion_tgt] = self._check_state_return(orig[minion_tgt])
return orig[minion_tgt]
def run_function_all_masters(
self, function, arg=(), minion_tgt="mm-minion", timeout=300, **kwargs
):
ret = []
for master_id in self.clients:
ret.append(
self.run_function(
function,
arg=arg,
minion_tgt=minion_tgt,
timeout=timeout,
master_tgt=master_id,
**kwargs
)
)
return ret
class SyndicCase(TestCase, SaltClientTestCaseMixin):
_salt_client_config_file_name_ = "syndic_master"
def run_function(self, function, arg=(), timeout=90):
orig = self.client.cmd("minion", function, arg, timeout=timeout)
if RUNTIME_VARS.PYTEST_SESSION:
fail_or_skip_func = self.fail
else:
fail_or_skip_func = self.skipTest
if "minion" not in orig:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply "
"from the minion. Command output: {}".format(orig)
)
return orig["minion"]
@SKIP_IF_NOT_RUNNING_PYTEST
@pytest.mark.usefixtures("salt_ssh_cli")
@pytest.mark.requires_sshd_server
class SSHCase(ShellCase):
def _arg_str(self, function, arg):
return "{} {}".format(function, " ".join(arg))
# pylint: disable=arguments-differ
def run_function(
self, function, arg=(), timeout=180, wipe=True, raw=False, **kwargs
):
ret = self.run_ssh(
self._arg_str(function, arg), timeout=timeout, wipe=wipe, raw=raw, **kwargs
)
log.debug(
"SSHCase run_function executed %s with arg %s and kwargs %s",
function,
arg,
kwargs,
)
log.debug("SSHCase JSON return: %s", ret)
# Late import
import salt.utils.json
try:
return salt.utils.json.loads(ret)["localhost"]
except Exception: # pylint: disable=broad-except
return ret
# pylint: enable=arguments-differ
def custom_roster(self, new_roster, data):
roster = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "roster")
with salt.utils.files.fopen(roster, "r") as fp_:
conf = salt.utils.yaml.safe_load(fp_)
conf["localhost"].update(data)
with salt.utils.files.fopen(new_roster, "w") as fp_:
salt.utils.yaml.safe_dump(conf, fp_)
class ClientCase(AdaptedConfigurationTestCaseMixin, TestCase):
def get_opts(self):
# Late import
import salt.config
return salt.config.client_config(self.get_config_file_path("master"))
def mkdir_p(self, path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| true | true |
f73560bca59a5c29d4d70597653059aebe24d0d6 | 53,504 | py | Python | lbry/wallet/database.py | DispatchCommit/lbry-sdk | 761fcfe0caede6f2a003e47b9dd5809ac59e0adb | [
"MIT"
] | null | null | null | lbry/wallet/database.py | DispatchCommit/lbry-sdk | 761fcfe0caede6f2a003e47b9dd5809ac59e0adb | [
"MIT"
] | null | null | null | lbry/wallet/database.py | DispatchCommit/lbry-sdk | 761fcfe0caede6f2a003e47b9dd5809ac59e0adb | [
"MIT"
] | null | null | null | import os
import logging
import asyncio
import sqlite3
import platform
from binascii import hexlify
from collections import defaultdict
from dataclasses import dataclass
from contextvars import ContextVar
from typing import Tuple, List, Union, Callable, Any, Awaitable, Iterable, Dict, Optional
from datetime import date
from prometheus_client import Gauge, Counter, Histogram
from lbry.utils import LockWithMetrics
from .bip32 import PubKey
from .transaction import Transaction, Output, OutputScript, TXRefImmutable, Input
from .constants import TXO_TYPES, CLAIM_TYPES
from .util import date_to_julian_day
from concurrent.futures.thread import ThreadPoolExecutor # pylint: disable=wrong-import-order
if platform.system() == 'Windows' or 'ANDROID_ARGUMENT' or 'KIVY_BUILD' in os.environ:
from concurrent.futures.thread import ThreadPoolExecutor as ReaderExecutorClass # pylint: disable=reimported
else:
from concurrent.futures.process import ProcessPoolExecutor as ReaderExecutorClass
log = logging.getLogger(__name__)
sqlite3.enable_callback_tracebacks(True)
HISTOGRAM_BUCKETS = (
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf')
)
@dataclass
class ReaderProcessState:
cursor: sqlite3.Cursor
reader_context: Optional[ContextVar[ReaderProcessState]] = ContextVar('reader_context')
def initializer(path):
db = sqlite3.connect(path)
db.row_factory = dict_row_factory
db.executescript("pragma journal_mode=WAL;")
reader = ReaderProcessState(db.cursor())
reader_context.set(reader)
def run_read_only_fetchall(sql, params):
cursor = reader_context.get().cursor
try:
return cursor.execute(sql, params).fetchall()
except (Exception, OSError) as e:
log.exception('Error running transaction:', exc_info=e)
raise
def run_read_only_fetchone(sql, params):
cursor = reader_context.get().cursor
try:
return cursor.execute(sql, params).fetchone()
except (Exception, OSError) as e:
log.exception('Error running transaction:', exc_info=e)
raise
class AIOSQLite:
reader_executor: ReaderExecutorClass
waiting_writes_metric = Gauge(
"waiting_writes_count", "Number of waiting db writes", namespace="daemon_database"
)
waiting_reads_metric = Gauge(
"waiting_reads_count", "Number of waiting db writes", namespace="daemon_database"
)
write_count_metric = Counter(
"write_count", "Number of database writes", namespace="daemon_database"
)
read_count_metric = Counter(
"read_count", "Number of database reads", namespace="daemon_database"
)
acquire_write_lock_metric = Histogram(
f'write_lock_acquired', 'Time to acquire the write lock', namespace="daemon_database", buckets=HISTOGRAM_BUCKETS
)
held_write_lock_metric = Histogram(
f'write_lock_held', 'Length of time the write lock is held for', namespace="daemon_database",
buckets=HISTOGRAM_BUCKETS
)
def __init__(self):
# has to be single threaded as there is no mapping of thread:connection
self.writer_executor = ThreadPoolExecutor(max_workers=1)
self.writer_connection: Optional[sqlite3.Connection] = None
self._closing = False
self.query_count = 0
self.write_lock = LockWithMetrics(self.acquire_write_lock_metric, self.held_write_lock_metric)
self.writers = 0
self.read_ready = asyncio.Event()
self.urgent_read_done = asyncio.Event()
@classmethod
async def connect(cls, path: Union[bytes, str], *args, **kwargs):
sqlite3.enable_callback_tracebacks(True)
db = cls()
def _connect_writer():
db.writer_connection = sqlite3.connect(path, *args, **kwargs)
readers = max(os.cpu_count() - 2, 2)
db.reader_executor = ReaderExecutorClass(
max_workers=readers, initializer=initializer, initargs=(path, )
)
await asyncio.get_event_loop().run_in_executor(db.writer_executor, _connect_writer)
db.read_ready.set()
db.urgent_read_done.set()
return db
async def close(self):
if self._closing:
return
self._closing = True
def __checkpoint_and_close(conn: sqlite3.Connection):
conn.execute("PRAGMA WAL_CHECKPOINT(FULL);")
log.info("DB checkpoint finished.")
conn.close()
await asyncio.get_event_loop().run_in_executor(
self.writer_executor, __checkpoint_and_close, self.writer_connection)
self.writer_executor.shutdown(wait=True)
self.reader_executor.shutdown(wait=True)
self.read_ready.clear()
self.writer_connection = None
def executemany(self, sql: str, params: Iterable):
params = params if params is not None else []
# this fetchall is needed to prevent SQLITE_MISUSE
return self.run(lambda conn: conn.executemany(sql, params).fetchall())
def executescript(self, script: str) -> Awaitable:
return self.run(lambda conn: conn.executescript(script))
async def _execute_fetch(self, sql: str, parameters: Iterable = None,
read_only=False, fetch_all: bool = False) -> List[dict]:
read_only_fn = run_read_only_fetchall if fetch_all else run_read_only_fetchone
parameters = parameters if parameters is not None else []
still_waiting = False
urgent_read = False
if read_only:
self.waiting_reads_metric.inc()
self.read_count_metric.inc()
try:
while self.writers and not self._closing: # more writes can come in while we are waiting for the first
if not urgent_read and still_waiting and self.urgent_read_done.is_set():
# throttle the writes if they pile up
self.urgent_read_done.clear()
urgent_read = True
# wait until the running writes have finished
await self.read_ready.wait()
still_waiting = True
if self._closing:
raise asyncio.CancelledError()
return await asyncio.get_event_loop().run_in_executor(
self.reader_executor, read_only_fn, sql, parameters
)
finally:
if urgent_read:
# unthrottle the writers if they had to be throttled
self.urgent_read_done.set()
self.waiting_reads_metric.dec()
if fetch_all:
return await self.run(lambda conn: conn.execute(sql, parameters).fetchall())
return await self.run(lambda conn: conn.execute(sql, parameters).fetchone())
async def execute_fetchall(self, sql: str, parameters: Iterable = None,
read_only=False) -> List[dict]:
return await self._execute_fetch(sql, parameters, read_only, fetch_all=True)
async def execute_fetchone(self, sql: str, parameters: Iterable = None,
read_only=False) -> List[dict]:
return await self._execute_fetch(sql, parameters, read_only, fetch_all=False)
def execute(self, sql: str, parameters: Iterable = None) -> Awaitable[sqlite3.Cursor]:
parameters = parameters if parameters is not None else []
return self.run(lambda conn: conn.execute(sql, parameters))
async def run(self, fun, *args, **kwargs):
self.write_count_metric.inc()
self.waiting_writes_metric.inc()
# it's possible many writes are coming in one after the other, these can
# block reader calls for a long time
# if the reader waits for the writers to finish and then has to wait for
# yet more, it will clear the urgent_read_done event to block more writers
# piling on
try:
await self.urgent_read_done.wait()
except Exception as e:
self.waiting_writes_metric.dec()
raise e
self.writers += 1
# block readers
self.read_ready.clear()
try:
async with self.write_lock:
if self._closing:
raise asyncio.CancelledError()
return await asyncio.get_event_loop().run_in_executor(
self.writer_executor, lambda: self.__run_transaction(fun, *args, **kwargs)
)
finally:
self.writers -= 1
self.waiting_writes_metric.dec()
if not self.writers:
# unblock the readers once the last enqueued writer finishes
self.read_ready.set()
def __run_transaction(self, fun: Callable[[sqlite3.Connection, Any, Any], Any], *args, **kwargs):
self.writer_connection.execute('begin')
try:
self.query_count += 1
result = fun(self.writer_connection, *args, **kwargs) # type: ignore
self.writer_connection.commit()
return result
except (Exception, OSError) as e:
log.exception('Error running transaction:', exc_info=e)
self.writer_connection.rollback()
log.warning("rolled back")
raise
async def run_with_foreign_keys_disabled(self, fun, *args, **kwargs):
self.write_count_metric.inc()
self.waiting_writes_metric.inc()
try:
await self.urgent_read_done.wait()
except Exception as e:
self.waiting_writes_metric.dec()
raise e
self.writers += 1
self.read_ready.clear()
try:
async with self.write_lock:
if self._closing:
raise asyncio.CancelledError()
return await asyncio.get_event_loop().run_in_executor(
self.writer_executor, self.__run_transaction_with_foreign_keys_disabled, fun, args, kwargs
)
finally:
self.writers -= 1
self.waiting_writes_metric.dec()
if not self.writers:
self.read_ready.set()
def __run_transaction_with_foreign_keys_disabled(self,
fun: Callable[[sqlite3.Connection, Any, Any], Any],
args, kwargs):
foreign_keys_enabled, = self.writer_connection.execute("pragma foreign_keys").fetchone()
if not foreign_keys_enabled:
raise sqlite3.IntegrityError("foreign keys are disabled, use `AIOSQLite.run` instead")
try:
self.writer_connection.execute('pragma foreign_keys=off').fetchone()
return self.__run_transaction(fun, *args, **kwargs)
finally:
self.writer_connection.execute('pragma foreign_keys=on').fetchone()
def constraints_to_sql(constraints, joiner=' AND ', prepend_key=''):
sql, values = [], {}
for key, constraint in constraints.items():
tag = '0'
if '#' in key:
key, tag = key[:key.index('#')], key[key.index('#')+1:]
col, op, key = key, '=', key.replace('.', '_')
if not key:
sql.append(constraint)
continue
if key.startswith('$$'):
col, key = col[2:], key[1:]
elif key.startswith('$'):
values[key] = constraint
continue
if key.endswith('__not'):
col, op = col[:-len('__not')], '!='
elif key.endswith('__is_null'):
col = col[:-len('__is_null')]
sql.append(f'{col} IS NULL')
continue
if key.endswith('__is_not_null'):
col = col[:-len('__is_not_null')]
sql.append(f'{col} IS NOT NULL')
continue
if key.endswith('__lt'):
col, op = col[:-len('__lt')], '<'
elif key.endswith('__lte'):
col, op = col[:-len('__lte')], '<='
elif key.endswith('__gt'):
col, op = col[:-len('__gt')], '>'
elif key.endswith('__gte'):
col, op = col[:-len('__gte')], '>='
elif key.endswith('__like'):
col, op = col[:-len('__like')], 'LIKE'
elif key.endswith('__not_like'):
col, op = col[:-len('__not_like')], 'NOT LIKE'
elif key.endswith('__in') or key.endswith('__not_in'):
if key.endswith('__in'):
col, op, one_val_op = col[:-len('__in')], 'IN', '='
else:
col, op, one_val_op = col[:-len('__not_in')], 'NOT IN', '!='
if constraint:
if isinstance(constraint, (list, set, tuple)):
if len(constraint) == 1:
values[f'{key}{tag}'] = next(iter(constraint))
sql.append(f'{col} {one_val_op} :{key}{tag}')
else:
keys = []
for i, val in enumerate(constraint):
keys.append(f':{key}{tag}_{i}')
values[f'{key}{tag}_{i}'] = val
sql.append(f'{col} {op} ({", ".join(keys)})')
elif isinstance(constraint, str):
sql.append(f'{col} {op} ({constraint})')
else:
raise ValueError(f"{col} requires a list, set or string as constraint value.")
continue
elif key.endswith('__any') or key.endswith('__or'):
where, subvalues = constraints_to_sql(constraint, ' OR ', key+tag+'_')
sql.append(f'({where})')
values.update(subvalues)
continue
if key.endswith('__and'):
where, subvalues = constraints_to_sql(constraint, ' AND ', key+tag+'_')
sql.append(f'({where})')
values.update(subvalues)
continue
sql.append(f'{col} {op} :{prepend_key}{key}{tag}')
values[prepend_key+key+tag] = constraint
return joiner.join(sql) if sql else '', values
def query(select, **constraints) -> Tuple[str, Dict[str, Any]]:
sql = [select]
limit = constraints.pop('limit', None)
offset = constraints.pop('offset', None)
order_by = constraints.pop('order_by', None)
group_by = constraints.pop('group_by', None)
accounts = constraints.pop('accounts', [])
if accounts:
constraints['account__in'] = [a.public_key.address for a in accounts]
where, values = constraints_to_sql(constraints)
if where:
sql.append('WHERE')
sql.append(where)
if group_by is not None:
sql.append(f'GROUP BY {group_by}')
if order_by:
sql.append('ORDER BY')
if isinstance(order_by, str):
sql.append(order_by)
elif isinstance(order_by, list):
sql.append(', '.join(order_by))
else:
raise ValueError("order_by must be string or list")
if limit is not None:
sql.append(f'LIMIT {limit}')
if offset is not None:
sql.append(f'OFFSET {offset}')
return ' '.join(sql), values
def interpolate(sql, values):
for k in sorted(values.keys(), reverse=True):
value = values[k]
if isinstance(value, bytes):
value = f"X'{hexlify(value).decode()}'"
elif isinstance(value, str):
value = f"'{value}'"
else:
value = str(value)
sql = sql.replace(f":{k}", value)
return sql
def constrain_single_or_list(constraints, column, value, convert=lambda x: x, negate=False):
if value is not None:
if isinstance(value, list):
value = [convert(v) for v in value]
if len(value) == 1:
if negate:
constraints[f"{column}__or"] = {
f"{column}__is_null": True,
f"{column}__not": value[0]
}
else:
constraints[column] = value[0]
elif len(value) > 1:
if negate:
constraints[f"{column}__or"] = {
f"{column}__is_null": True,
f"{column}__not_in": value
}
else:
constraints[f"{column}__in"] = value
elif negate:
constraints[f"{column}__or"] = {
f"{column}__is_null": True,
f"{column}__not": convert(value)
}
else:
constraints[column] = convert(value)
return constraints
class SQLiteMixin:
SCHEMA_VERSION: Optional[str] = None
CREATE_TABLES_QUERY: str
MAX_QUERY_VARIABLES = 900
CREATE_VERSION_TABLE = """
create table if not exists version (
version text
);
"""
def __init__(self, path):
self._db_path = path
self.db: AIOSQLite = None
self.ledger = None
async def open(self):
log.info("connecting to database: %s", self._db_path)
self.db = await AIOSQLite.connect(self._db_path, isolation_level=None)
if self.SCHEMA_VERSION:
tables = [t[0] for t in await self.db.execute_fetchall(
"SELECT name FROM sqlite_master WHERE type='table';"
)]
if tables:
if 'version' in tables:
version = await self.db.execute_fetchone("SELECT version FROM version LIMIT 1;")
if version == (self.SCHEMA_VERSION,):
return
await self.db.executescript('\n'.join(
f"DROP TABLE {table};" for table in tables
) + '\n' + 'PRAGMA WAL_CHECKPOINT(FULL);' + '\n' + 'VACUUM;')
await self.db.execute(self.CREATE_VERSION_TABLE)
await self.db.execute("INSERT INTO version VALUES (?)", (self.SCHEMA_VERSION,))
await self.db.executescript(self.CREATE_TABLES_QUERY)
async def close(self):
await self.db.close()
@staticmethod
def _insert_sql(table: str, data: dict, ignore_duplicate: bool = False,
replace: bool = False) -> Tuple[str, List]:
columns, values = [], []
for column, value in data.items():
columns.append(column)
values.append(value)
policy = ""
if ignore_duplicate:
policy = " OR IGNORE"
if replace:
policy = " OR REPLACE"
sql = "INSERT{} INTO {} ({}) VALUES ({})".format(
policy, table, ', '.join(columns), ', '.join(['?'] * len(values))
)
return sql, values
@staticmethod
def _update_sql(table: str, data: dict, where: str,
constraints: Union[list, tuple]) -> Tuple[str, list]:
columns, values = [], []
for column, value in data.items():
columns.append(f"{column} = ?")
values.append(value)
values.extend(constraints)
sql = "UPDATE {} SET {} WHERE {}".format(
table, ', '.join(columns), where
)
return sql, values
def dict_row_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
SQLITE_MAX_INTEGER = 9223372036854775807
def _get_spendable_utxos(transaction: sqlite3.Connection, accounts: List, decoded_transactions: Dict[str, Transaction],
result: Dict[Tuple[bytes, int, bool], List[int]], reserved: List[Transaction],
amount_to_reserve: int, reserved_amount: int, floor: int, ceiling: int,
fee_per_byte: int) -> int:
accounts_fmt = ",".join(["?"] * len(accounts))
txo_query = f"""
SELECT tx.txid, txo.txoid, tx.raw, tx.height, txo.position as nout, tx.is_verified, txo.amount FROM txo
INNER JOIN account_address USING (address)
LEFT JOIN txi USING (txoid)
INNER JOIN tx USING (txid)
WHERE txo.txo_type=0 AND txi.txoid IS NULL AND tx.txid IS NOT NULL AND NOT txo.is_reserved
AND txo.amount >= ? AND txo.amount < ?
"""
if accounts:
txo_query += f"""
AND account_address.account {'= ?' if len(accounts_fmt) == 1 else 'IN (' + accounts_fmt + ')'}
"""
txo_query += """
ORDER BY txo.amount ASC, tx.height DESC
"""
# prefer confirmed, but save unconfirmed utxos from this selection in case they are needed
unconfirmed = []
for row in transaction.execute(txo_query, (floor, ceiling, *accounts)):
(txid, txoid, raw, height, nout, verified, amount) = row.values()
# verified or non verified transactions were found- reset the gap count
# multiple txos can come from the same tx, only decode it once and cache
if txid not in decoded_transactions:
# cache the decoded transaction
decoded_transactions[txid] = Transaction(raw)
decoded_tx = decoded_transactions[txid]
# save the unconfirmed txo for possible use later, if still needed
if verified:
# add the txo to the reservation, minus the fee for including it
reserved_amount += amount
reserved_amount -= Input.spend(decoded_tx.outputs[nout]).size * fee_per_byte
# mark it as reserved
result[(raw, height, verified)].append(nout)
reserved.append(txoid)
# if we've reserved enough, return
if reserved_amount >= amount_to_reserve:
return reserved_amount
else:
unconfirmed.append((txid, txoid, raw, height, nout, verified, amount))
# we're popping the items, so to get them in the order they were seen they are reversed
unconfirmed.reverse()
# add available unconfirmed txos if any were previously found
while unconfirmed and reserved_amount < amount_to_reserve:
(txid, txoid, raw, height, nout, verified, amount) = unconfirmed.pop()
# it's already decoded
decoded_tx = decoded_transactions[txid]
# add to the reserved amount
reserved_amount += amount
reserved_amount -= Input.spend(decoded_tx.outputs[nout]).size * fee_per_byte
result[(raw, height, verified)].append(nout)
reserved.append(txoid)
return reserved_amount
def get_and_reserve_spendable_utxos(transaction: sqlite3.Connection, accounts: List, amount_to_reserve: int, floor: int,
fee_per_byte: int, set_reserved: bool, return_insufficient_funds: bool,
base_multiplier: int = 100):
txs = defaultdict(list)
decoded_transactions = {}
reserved = []
reserved_dewies = 0
multiplier = base_multiplier
gap_count = 0
while reserved_dewies < amount_to_reserve and gap_count < 5 and floor * multiplier < SQLITE_MAX_INTEGER:
previous_reserved_dewies = reserved_dewies
reserved_dewies = _get_spendable_utxos(
transaction, accounts, decoded_transactions, txs, reserved, amount_to_reserve, reserved_dewies,
floor, floor * multiplier, fee_per_byte
)
floor *= multiplier
if previous_reserved_dewies == reserved_dewies:
gap_count += 1
multiplier **= 2
else:
gap_count = 0
multiplier = base_multiplier
# reserve the accumulated txos if enough were found
if reserved_dewies >= amount_to_reserve:
if set_reserved:
transaction.executemany("UPDATE txo SET is_reserved = ? WHERE txoid = ?",
[(True, txoid) for txoid in reserved]).fetchall()
return txs
# return_insufficient_funds and set_reserved are used for testing
return txs if return_insufficient_funds else {}
class Database(SQLiteMixin):
SCHEMA_VERSION = "1.5"
PRAGMAS = """
pragma journal_mode=WAL;
"""
CREATE_ACCOUNT_TABLE = """
create table if not exists account_address (
account text not null,
address text not null,
chain integer not null,
pubkey blob not null,
chain_code blob not null,
n integer not null,
depth integer not null,
primary key (account, address)
);
create index if not exists address_account_idx on account_address (address, account);
"""
CREATE_PUBKEY_ADDRESS_TABLE = """
create table if not exists pubkey_address (
address text primary key,
history text,
used_times integer not null default 0
);
"""
CREATE_TX_TABLE = """
create table if not exists tx (
txid text primary key,
raw blob not null,
height integer not null,
position integer not null,
is_verified boolean not null default 0,
purchased_claim_id text,
day integer
);
create index if not exists tx_purchased_claim_id_idx on tx (purchased_claim_id);
"""
CREATE_TXO_TABLE = """
create table if not exists txo (
txid text references tx,
txoid text primary key,
address text references pubkey_address,
position integer not null,
amount integer not null,
script blob not null,
is_reserved boolean not null default 0,
txo_type integer not null default 0,
claim_id text,
claim_name text,
channel_id text,
reposted_claim_id text
);
create index if not exists txo_txid_idx on txo (txid);
create index if not exists txo_address_idx on txo (address);
create index if not exists txo_claim_id_idx on txo (claim_id, txo_type);
create index if not exists txo_claim_name_idx on txo (claim_name);
create index if not exists txo_txo_type_idx on txo (txo_type);
create index if not exists txo_channel_id_idx on txo (channel_id);
create index if not exists txo_reposted_claim_idx on txo (reposted_claim_id);
"""
CREATE_TXI_TABLE = """
create table if not exists txi (
txid text references tx,
txoid text references txo primary key,
address text references pubkey_address,
position integer not null
);
create index if not exists txi_address_idx on txi (address);
create index if not exists first_input_idx on txi (txid, address) where position=0;
"""
CREATE_TABLES_QUERY = (
PRAGMAS +
CREATE_ACCOUNT_TABLE +
CREATE_PUBKEY_ADDRESS_TABLE +
CREATE_TX_TABLE +
CREATE_TXO_TABLE +
CREATE_TXI_TABLE
)
async def open(self):
await super().open()
self.db.writer_connection.row_factory = dict_row_factory
def txo_to_row(self, tx, txo):
row = {
'txid': tx.id,
'txoid': txo.id,
'address': txo.get_address(self.ledger),
'position': txo.position,
'amount': txo.amount,
'script': sqlite3.Binary(txo.script.source)
}
if txo.is_claim:
if txo.can_decode_claim:
claim = txo.claim
row['txo_type'] = TXO_TYPES.get(claim.claim_type, TXO_TYPES['stream'])
if claim.is_repost:
row['reposted_claim_id'] = claim.repost.reference.claim_id
if claim.is_signed:
row['channel_id'] = claim.signing_channel_id
else:
row['txo_type'] = TXO_TYPES['stream']
elif txo.is_support:
row['txo_type'] = TXO_TYPES['support']
elif txo.purchase is not None:
row['txo_type'] = TXO_TYPES['purchase']
row['claim_id'] = txo.purchased_claim_id
if txo.script.is_claim_involved:
row['claim_id'] = txo.claim_id
row['claim_name'] = txo.claim_name
return row
def tx_to_row(self, tx):
row = {
'txid': tx.id,
'raw': sqlite3.Binary(tx.raw),
'height': tx.height,
'position': tx.position,
'is_verified': tx.is_verified,
'day': tx.get_julian_day(self.ledger),
}
txos = tx.outputs
if len(txos) >= 2 and txos[1].can_decode_purchase_data:
txos[0].purchase = txos[1]
row['purchased_claim_id'] = txos[1].purchase_data.claim_id
return row
async def insert_transaction(self, tx):
await self.db.execute_fetchall(*self._insert_sql('tx', self.tx_to_row(tx)))
async def update_transaction(self, tx):
await self.db.execute_fetchall(*self._update_sql("tx", {
'height': tx.height, 'position': tx.position, 'is_verified': tx.is_verified
}, 'txid = ?', (tx.id,)))
def _transaction_io(self, conn: sqlite3.Connection, tx: Transaction, address, txhash):
conn.execute(*self._insert_sql('tx', self.tx_to_row(tx), replace=True)).fetchall()
is_my_input = False
for txi in tx.inputs:
if txi.txo_ref.txo is not None:
txo = txi.txo_ref.txo
if txo.has_address and txo.get_address(self.ledger) == address:
is_my_input = True
conn.execute(*self._insert_sql("txi", {
'txid': tx.id,
'txoid': txo.id,
'address': address,
'position': txi.position
}, ignore_duplicate=True)).fetchall()
for txo in tx.outputs:
if txo.script.is_pay_pubkey_hash and (txo.pubkey_hash == txhash or is_my_input):
conn.execute(*self._insert_sql(
"txo", self.txo_to_row(tx, txo), ignore_duplicate=True
)).fetchall()
elif txo.script.is_pay_script_hash:
# TODO: implement script hash payments
log.warning('Database.save_transaction_io: pay script hash is not implemented!')
def save_transaction_io(self, tx: Transaction, address, txhash, history):
return self.save_transaction_io_batch([tx], address, txhash, history)
def save_transaction_io_batch(self, txs: Iterable[Transaction], address, txhash, history):
history_count = history.count(':') // 2
def __many(conn):
for tx in txs:
self._transaction_io(conn, tx, address, txhash)
conn.execute(
"UPDATE pubkey_address SET history = ?, used_times = ? WHERE address = ?",
(history, history_count, address)
).fetchall()
return self.db.run(__many)
async def reserve_outputs(self, txos, is_reserved=True):
txoids = [(is_reserved, txo.id) for txo in txos]
await self.db.executemany("UPDATE txo SET is_reserved = ? WHERE txoid = ?", txoids)
async def release_outputs(self, txos):
await self.reserve_outputs(txos, is_reserved=False)
async def rewind_blockchain(self, above_height): # pylint: disable=no-self-use
# TODO:
# 1. delete transactions above_height
# 2. update address histories removing deleted TXs
return True
async def get_spendable_utxos(self, ledger, reserve_amount, accounts: Optional[Iterable], min_amount: int = 1,
fee_per_byte: int = 50, set_reserved: bool = True,
return_insufficient_funds: bool = False) -> List:
to_spend = await self.db.run(
get_and_reserve_spendable_utxos, tuple(account.id for account in accounts), reserve_amount, min_amount,
fee_per_byte, set_reserved, return_insufficient_funds
)
txos = []
for (raw, height, verified), positions in to_spend.items():
tx = Transaction(raw, height=height, is_verified=verified)
for nout in positions:
txos.append(tx.outputs[nout].get_estimator(ledger))
return txos
async def select_transactions(self, cols, accounts=None, read_only=False, **constraints):
if not {'txid', 'txid__in'}.intersection(constraints):
assert accounts, "'accounts' argument required when no 'txid' constraint is present"
where, values = constraints_to_sql({
'$$account_address.account__in': [a.public_key.address for a in accounts]
})
constraints['txid__in'] = f"""
SELECT txo.txid FROM txo JOIN account_address USING (address) WHERE {where}
UNION
SELECT txi.txid FROM txi JOIN account_address USING (address) WHERE {where}
"""
constraints.update(values)
return await self.db.execute_fetchall(
*query(f"SELECT {cols} FROM tx", **constraints), read_only=read_only
)
TXO_NOT_MINE = Output(None, None, is_my_output=False)
async def get_transactions(self, wallet=None, **constraints):
include_is_spent = constraints.pop('include_is_spent', False)
include_is_my_input = constraints.pop('include_is_my_input', False)
include_is_my_output = constraints.pop('include_is_my_output', False)
tx_rows = await self.select_transactions(
'txid, raw, height, position, is_verified',
order_by=constraints.pop('order_by', ["height=0 DESC", "height DESC", "position DESC"]),
**constraints
)
if not tx_rows:
return []
txids, txs, txi_txoids = [], [], []
for row in tx_rows:
txids.append(row['txid'])
txs.append(Transaction(
raw=row['raw'], height=row['height'], position=row['position'],
is_verified=bool(row['is_verified'])
))
for txi in txs[-1].inputs:
txi_txoids.append(txi.txo_ref.id)
step = self.MAX_QUERY_VARIABLES
annotated_txos = {}
for offset in range(0, len(txids), step):
annotated_txos.update({
txo.id: txo for txo in
(await self.get_txos(
wallet=wallet,
txid__in=txids[offset:offset+step], order_by='txo.txid',
include_is_spent=include_is_spent,
include_is_my_input=include_is_my_input,
include_is_my_output=include_is_my_output,
))
})
referenced_txos = {}
for offset in range(0, len(txi_txoids), step):
referenced_txos.update({
txo.id: txo for txo in
(await self.get_txos(
wallet=wallet,
txoid__in=txi_txoids[offset:offset+step], order_by='txo.txoid',
include_is_my_output=include_is_my_output,
))
})
for tx in txs:
for txi in tx.inputs:
txo = referenced_txos.get(txi.txo_ref.id)
if txo:
txi.txo_ref = txo.ref
for txo in tx.outputs:
_txo = annotated_txos.get(txo.id)
if _txo:
txo.update_annotations(_txo)
else:
txo.update_annotations(self.TXO_NOT_MINE)
for tx in txs:
txos = tx.outputs
if len(txos) >= 2 and txos[1].can_decode_purchase_data:
txos[0].purchase = txos[1]
return txs
async def get_transaction_count(self, **constraints):
constraints.pop('wallet', None)
constraints.pop('offset', None)
constraints.pop('limit', None)
constraints.pop('order_by', None)
count = await self.select_transactions('COUNT(*) as total', **constraints)
return count[0]['total'] or 0
async def get_transaction(self, **constraints):
txs = await self.get_transactions(limit=1, **constraints)
if txs:
return txs[0]
async def select_txos(
self, cols, accounts=None, is_my_input=None, is_my_output=True,
is_my_input_or_output=None, exclude_internal_transfers=False,
include_is_spent=False, include_is_my_input=False,
is_spent=None, read_only=False, **constraints):
for rename_col in ('txid', 'txoid'):
for rename_constraint in (rename_col, rename_col+'__in', rename_col+'__not_in'):
if rename_constraint in constraints:
constraints['txo.'+rename_constraint] = constraints.pop(rename_constraint)
if accounts:
account_in_sql, values = constraints_to_sql({
'$$account__in': [a.public_key.address for a in accounts]
})
my_addresses = f"SELECT address FROM account_address WHERE {account_in_sql}"
constraints.update(values)
if is_my_input_or_output:
include_is_my_input = True
constraints['received_or_sent__or'] = {
'txo.address__in': my_addresses,
'sent__and': {
'txi.address__is_not_null': True,
'txi.address__in': my_addresses
}
}
else:
if is_my_output:
constraints['txo.address__in'] = my_addresses
elif is_my_output is False:
constraints['txo.address__not_in'] = my_addresses
if is_my_input:
include_is_my_input = True
constraints['txi.address__is_not_null'] = True
constraints['txi.address__in'] = my_addresses
elif is_my_input is False:
include_is_my_input = True
constraints['is_my_input_false__or'] = {
'txi.address__is_null': True,
'txi.address__not_in': my_addresses
}
if exclude_internal_transfers:
include_is_my_input = True
constraints['exclude_internal_payments__or'] = {
'txo.txo_type__not': TXO_TYPES['other'],
'txo.address__not_in': my_addresses,
'txi.address__is_null': True,
'txi.address__not_in': my_addresses,
}
sql = [f"SELECT {cols} FROM txo JOIN tx ON (tx.txid=txo.txid)"]
if is_spent:
constraints['spent.txoid__is_not_null'] = True
elif is_spent is False:
constraints['is_reserved'] = False
constraints['spent.txoid__is_null'] = True
if include_is_spent or is_spent is not None:
sql.append("LEFT JOIN txi AS spent ON (spent.txoid=txo.txoid)")
if include_is_my_input:
sql.append("LEFT JOIN txi ON (txi.position=0 AND txi.txid=txo.txid)")
return await self.db.execute_fetchall(*query(' '.join(sql), **constraints), read_only=read_only)
async def get_txos(self, wallet=None, no_tx=False, read_only=False, **constraints):
include_is_spent = constraints.get('include_is_spent', False)
include_is_my_input = constraints.get('include_is_my_input', False)
include_is_my_output = constraints.pop('include_is_my_output', False)
include_received_tips = constraints.pop('include_received_tips', False)
select_columns = [
"tx.txid, raw, tx.height, tx.position as tx_position, tx.is_verified, "
"txo_type, txo.position as txo_position, amount, script"
]
my_accounts = {a.public_key.address for a in wallet.accounts} if wallet else set()
my_accounts_sql = ""
if include_is_my_output or include_is_my_input:
my_accounts_sql, values = constraints_to_sql({'$$account__in#_wallet': my_accounts})
constraints.update(values)
if include_is_my_output and my_accounts:
if constraints.get('is_my_output', None) in (True, False):
select_columns.append(f"{1 if constraints['is_my_output'] else 0} AS is_my_output")
else:
select_columns.append(f"""(
txo.address IN (SELECT address FROM account_address WHERE {my_accounts_sql})
) AS is_my_output""")
if include_is_my_input and my_accounts:
if constraints.get('is_my_input', None) in (True, False):
select_columns.append(f"{1 if constraints['is_my_input'] else 0} AS is_my_input")
else:
select_columns.append(f"""(
txi.address IS NOT NULL AND
txi.address IN (SELECT address FROM account_address WHERE {my_accounts_sql})
) AS is_my_input""")
if include_is_spent:
select_columns.append("spent.txoid IS NOT NULL AS is_spent")
if include_received_tips:
select_columns.append(f"""(
SELECT COALESCE(SUM(support.amount), 0) FROM txo AS support WHERE
support.claim_id = txo.claim_id AND
support.txo_type = {TXO_TYPES['support']} AND
support.address IN (SELECT address FROM account_address WHERE {my_accounts_sql}) AND
support.txoid NOT IN (SELECT txoid FROM txi)
) AS received_tips""")
if 'order_by' not in constraints or constraints['order_by'] == 'height':
constraints['order_by'] = [
"tx.height=0 DESC", "tx.height DESC", "tx.position DESC", "txo.position"
]
elif constraints.get('order_by', None) == 'none':
del constraints['order_by']
rows = await self.select_txos(', '.join(select_columns), read_only=read_only, **constraints)
txos = []
txs = {}
for row in rows:
if no_tx:
txo = Output(
amount=row['amount'],
script=OutputScript(row['script']),
tx_ref=TXRefImmutable.from_id(row['txid'], row['height']),
position=row['txo_position']
)
else:
if row['txid'] not in txs:
txs[row['txid']] = Transaction(
row['raw'], height=row['height'], position=row['tx_position'],
is_verified=bool(row['is_verified'])
)
txo = txs[row['txid']].outputs[row['txo_position']]
if include_is_spent:
txo.is_spent = bool(row['is_spent'])
if include_is_my_input:
txo.is_my_input = bool(row['is_my_input'])
if include_is_my_output:
txo.is_my_output = bool(row['is_my_output'])
if include_is_my_input and include_is_my_output:
if txo.is_my_input and txo.is_my_output and row['txo_type'] == TXO_TYPES['other']:
txo.is_internal_transfer = True
else:
txo.is_internal_transfer = False
if include_received_tips:
txo.received_tips = row['received_tips']
txos.append(txo)
channel_ids = set()
for txo in txos:
if txo.is_claim and txo.can_decode_claim:
if txo.claim.is_signed:
channel_ids.add(txo.claim.signing_channel_id)
if txo.claim.is_channel and wallet:
for account in wallet.accounts:
private_key = await account.get_channel_private_key(
txo.claim.channel.public_key_bytes
)
if private_key:
txo.private_key = private_key
break
if channel_ids:
channels = {
txo.claim_id: txo for txo in
(await self.get_channels(
wallet=wallet,
claim_id__in=channel_ids,
read_only=read_only
))
}
for txo in txos:
if txo.is_claim and txo.can_decode_claim:
txo.channel = channels.get(txo.claim.signing_channel_id, None)
return txos
@staticmethod
def _clean_txo_constraints_for_aggregation(constraints):
constraints.pop('include_is_spent', None)
constraints.pop('include_is_my_input', None)
constraints.pop('include_is_my_output', None)
constraints.pop('include_received_tips', None)
constraints.pop('wallet', None)
constraints.pop('resolve', None)
constraints.pop('offset', None)
constraints.pop('limit', None)
constraints.pop('order_by', None)
async def get_txo_count(self, **constraints):
self._clean_txo_constraints_for_aggregation(constraints)
count = await self.select_txos('COUNT(*) AS total', **constraints)
return count[0]['total'] or 0
async def get_txo_sum(self, **constraints):
self._clean_txo_constraints_for_aggregation(constraints)
result = await self.select_txos('SUM(amount) AS total', **constraints)
return result[0]['total'] or 0
async def get_txo_plot(self, start_day=None, days_back=0, end_day=None, days_after=None, **constraints):
self._clean_txo_constraints_for_aggregation(constraints)
if start_day is None:
constraints['day__gte'] = self.ledger.headers.estimated_julian_day(
self.ledger.headers.height
) - days_back
else:
constraints['day__gte'] = date_to_julian_day(
date.fromisoformat(start_day)
)
if end_day is not None:
constraints['day__lte'] = date_to_julian_day(
date.fromisoformat(end_day)
)
elif days_after is not None:
constraints['day__lte'] = constraints['day__gte'] + days_after
return await self.select_txos(
"DATE(day) AS day, SUM(amount) AS total",
group_by='day', order_by='day', **constraints
)
def get_utxos(self, read_only=False, **constraints):
return self.get_txos(is_spent=False, read_only=read_only, **constraints)
def get_utxo_count(self, **constraints):
return self.get_txo_count(is_spent=False, **constraints)
async def get_balance(self, wallet=None, accounts=None, read_only=False, **constraints):
assert wallet or accounts, \
"'wallet' or 'accounts' constraints required to calculate balance"
constraints['accounts'] = accounts or wallet.accounts
balance = await self.select_txos(
'SUM(amount) as total', is_spent=False, read_only=read_only, **constraints
)
return balance[0]['total'] or 0
async def select_addresses(self, cols, read_only=False, **constraints):
return await self.db.execute_fetchall(*query(
f"SELECT {cols} FROM pubkey_address JOIN account_address USING (address)",
**constraints
), read_only=read_only)
async def get_addresses(self, cols=None, read_only=False, **constraints):
cols = cols or (
'address', 'account', 'chain', 'history', 'used_times',
'pubkey', 'chain_code', 'n', 'depth'
)
addresses = await self.select_addresses(', '.join(cols), read_only=read_only, **constraints)
if 'pubkey' in cols:
for address in addresses:
address['pubkey'] = PubKey(
self.ledger, address.pop('pubkey'), address.pop('chain_code'),
address.pop('n'), address.pop('depth')
)
return addresses
async def get_address_count(self, cols=None, read_only=False, **constraints):
count = await self.select_addresses('COUNT(*) as total', read_only=read_only, **constraints)
return count[0]['total'] or 0
async def get_address(self, read_only=False, **constraints):
addresses = await self.get_addresses(read_only=read_only, limit=1, **constraints)
if addresses:
return addresses[0]
async def add_keys(self, account, chain, pubkeys):
await self.db.executemany(
"insert or ignore into account_address "
"(account, address, chain, pubkey, chain_code, n, depth) values "
"(?, ?, ?, ?, ?, ?, ?)", ((
account.id, k.address, chain,
sqlite3.Binary(k.pubkey_bytes),
sqlite3.Binary(k.chain_code),
k.n, k.depth
) for k in pubkeys)
)
await self.db.executemany(
"insert or ignore into pubkey_address (address) values (?)",
((pubkey.address,) for pubkey in pubkeys)
)
async def _set_address_history(self, address, history):
await self.db.execute_fetchall(
"UPDATE pubkey_address SET history = ?, used_times = ? WHERE address = ?",
(history, history.count(':')//2, address)
)
async def set_address_history(self, address, history):
await self._set_address_history(address, history)
@staticmethod
def constrain_purchases(constraints):
accounts = constraints.pop('accounts', None)
assert accounts, "'accounts' argument required to find purchases"
if not {'purchased_claim_id', 'purchased_claim_id__in'}.intersection(constraints):
constraints['purchased_claim_id__is_not_null'] = True
constraints.update({
f'$account{i}': a.public_key.address for i, a in enumerate(accounts)
})
account_values = ', '.join([f':$account{i}' for i in range(len(accounts))])
constraints['txid__in'] = f"""
SELECT txid FROM txi JOIN account_address USING (address)
WHERE account_address.account IN ({account_values})
"""
async def get_purchases(self, **constraints):
self.constrain_purchases(constraints)
return [tx.outputs[0] for tx in await self.get_transactions(**constraints)]
def get_purchase_count(self, **constraints):
self.constrain_purchases(constraints)
return self.get_transaction_count(**constraints)
@staticmethod
def constrain_claims(constraints):
if {'txo_type', 'txo_type__in'}.intersection(constraints):
return
claim_types = constraints.pop('claim_type', None)
if claim_types:
constrain_single_or_list(
constraints, 'txo_type', claim_types, lambda x: TXO_TYPES[x]
)
else:
constraints['txo_type__in'] = CLAIM_TYPES
async def get_claims(self, read_only=False, **constraints) -> List[Output]:
self.constrain_claims(constraints)
return await self.get_utxos(read_only=read_only, **constraints)
def get_claim_count(self, **constraints):
self.constrain_claims(constraints)
return self.get_utxo_count(**constraints)
@staticmethod
def constrain_streams(constraints):
constraints['txo_type'] = TXO_TYPES['stream']
def get_streams(self, read_only=False, **constraints):
self.constrain_streams(constraints)
return self.get_claims(read_only=read_only, **constraints)
def get_stream_count(self, **constraints):
self.constrain_streams(constraints)
return self.get_claim_count(**constraints)
@staticmethod
def constrain_channels(constraints):
constraints['txo_type'] = TXO_TYPES['channel']
def get_channels(self, **constraints):
self.constrain_channels(constraints)
return self.get_claims(**constraints)
def get_channel_count(self, **constraints):
self.constrain_channels(constraints)
return self.get_claim_count(**constraints)
@staticmethod
def constrain_supports(constraints):
constraints['txo_type'] = TXO_TYPES['support']
def get_supports(self, **constraints):
self.constrain_supports(constraints)
return self.get_utxos(**constraints)
def get_support_count(self, **constraints):
self.constrain_supports(constraints)
return self.get_utxo_count(**constraints)
@staticmethod
def constrain_collections(constraints):
constraints['txo_type'] = TXO_TYPES['collection']
def get_collections(self, **constraints):
self.constrain_collections(constraints)
return self.get_utxos(**constraints)
def get_collection_count(self, **constraints):
self.constrain_collections(constraints)
return self.get_utxo_count(**constraints)
async def release_all_outputs(self, account=None):
if account is None:
await self.db.execute_fetchall("UPDATE txo SET is_reserved = 0 WHERE is_reserved = 1")
else:
await self.db.execute_fetchall(
"UPDATE txo SET is_reserved = 0 WHERE"
" is_reserved = 1 AND txo.address IN ("
" SELECT address from account_address WHERE account = ?"
" )", (account.public_key.address, )
)
def get_supports_summary(self, read_only=False, **constraints):
return self.get_txos(
txo_type=TXO_TYPES['support'],
is_spent=False, is_my_output=True,
include_is_my_input=True,
no_tx=True, read_only=read_only,
**constraints
)
| 41.062164 | 120 | 0.594778 | import os
import logging
import asyncio
import sqlite3
import platform
from binascii import hexlify
from collections import defaultdict
from dataclasses import dataclass
from contextvars import ContextVar
from typing import Tuple, List, Union, Callable, Any, Awaitable, Iterable, Dict, Optional
from datetime import date
from prometheus_client import Gauge, Counter, Histogram
from lbry.utils import LockWithMetrics
from .bip32 import PubKey
from .transaction import Transaction, Output, OutputScript, TXRefImmutable, Input
from .constants import TXO_TYPES, CLAIM_TYPES
from .util import date_to_julian_day
from concurrent.futures.thread import ThreadPoolExecutor
if platform.system() == 'Windows' or 'ANDROID_ARGUMENT' or 'KIVY_BUILD' in os.environ:
from concurrent.futures.thread import ThreadPoolExecutor as ReaderExecutorClass
else:
from concurrent.futures.process import ProcessPoolExecutor as ReaderExecutorClass
log = logging.getLogger(__name__)
sqlite3.enable_callback_tracebacks(True)
HISTOGRAM_BUCKETS = (
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf')
)
@dataclass
class ReaderProcessState:
cursor: sqlite3.Cursor
reader_context: Optional[ContextVar[ReaderProcessState]] = ContextVar('reader_context')
def initializer(path):
db = sqlite3.connect(path)
db.row_factory = dict_row_factory
db.executescript("pragma journal_mode=WAL;")
reader = ReaderProcessState(db.cursor())
reader_context.set(reader)
def run_read_only_fetchall(sql, params):
cursor = reader_context.get().cursor
try:
return cursor.execute(sql, params).fetchall()
except (Exception, OSError) as e:
log.exception('Error running transaction:', exc_info=e)
raise
def run_read_only_fetchone(sql, params):
cursor = reader_context.get().cursor
try:
return cursor.execute(sql, params).fetchone()
except (Exception, OSError) as e:
log.exception('Error running transaction:', exc_info=e)
raise
class AIOSQLite:
reader_executor: ReaderExecutorClass
waiting_writes_metric = Gauge(
"waiting_writes_count", "Number of waiting db writes", namespace="daemon_database"
)
waiting_reads_metric = Gauge(
"waiting_reads_count", "Number of waiting db writes", namespace="daemon_database"
)
write_count_metric = Counter(
"write_count", "Number of database writes", namespace="daemon_database"
)
read_count_metric = Counter(
"read_count", "Number of database reads", namespace="daemon_database"
)
acquire_write_lock_metric = Histogram(
f'write_lock_acquired', 'Time to acquire the write lock', namespace="daemon_database", buckets=HISTOGRAM_BUCKETS
)
held_write_lock_metric = Histogram(
f'write_lock_held', 'Length of time the write lock is held for', namespace="daemon_database",
buckets=HISTOGRAM_BUCKETS
)
def __init__(self):
self.writer_executor = ThreadPoolExecutor(max_workers=1)
self.writer_connection: Optional[sqlite3.Connection] = None
self._closing = False
self.query_count = 0
self.write_lock = LockWithMetrics(self.acquire_write_lock_metric, self.held_write_lock_metric)
self.writers = 0
self.read_ready = asyncio.Event()
self.urgent_read_done = asyncio.Event()
@classmethod
async def connect(cls, path: Union[bytes, str], *args, **kwargs):
sqlite3.enable_callback_tracebacks(True)
db = cls()
def _connect_writer():
db.writer_connection = sqlite3.connect(path, *args, **kwargs)
readers = max(os.cpu_count() - 2, 2)
db.reader_executor = ReaderExecutorClass(
max_workers=readers, initializer=initializer, initargs=(path, )
)
await asyncio.get_event_loop().run_in_executor(db.writer_executor, _connect_writer)
db.read_ready.set()
db.urgent_read_done.set()
return db
async def close(self):
if self._closing:
return
self._closing = True
def __checkpoint_and_close(conn: sqlite3.Connection):
conn.execute("PRAGMA WAL_CHECKPOINT(FULL);")
log.info("DB checkpoint finished.")
conn.close()
await asyncio.get_event_loop().run_in_executor(
self.writer_executor, __checkpoint_and_close, self.writer_connection)
self.writer_executor.shutdown(wait=True)
self.reader_executor.shutdown(wait=True)
self.read_ready.clear()
self.writer_connection = None
def executemany(self, sql: str, params: Iterable):
params = params if params is not None else []
return self.run(lambda conn: conn.executemany(sql, params).fetchall())
def executescript(self, script: str) -> Awaitable:
return self.run(lambda conn: conn.executescript(script))
async def _execute_fetch(self, sql: str, parameters: Iterable = None,
read_only=False, fetch_all: bool = False) -> List[dict]:
read_only_fn = run_read_only_fetchall if fetch_all else run_read_only_fetchone
parameters = parameters if parameters is not None else []
still_waiting = False
urgent_read = False
if read_only:
self.waiting_reads_metric.inc()
self.read_count_metric.inc()
try:
while self.writers and not self._closing:
if not urgent_read and still_waiting and self.urgent_read_done.is_set():
self.urgent_read_done.clear()
urgent_read = True
await self.read_ready.wait()
still_waiting = True
if self._closing:
raise asyncio.CancelledError()
return await asyncio.get_event_loop().run_in_executor(
self.reader_executor, read_only_fn, sql, parameters
)
finally:
if urgent_read:
self.urgent_read_done.set()
self.waiting_reads_metric.dec()
if fetch_all:
return await self.run(lambda conn: conn.execute(sql, parameters).fetchall())
return await self.run(lambda conn: conn.execute(sql, parameters).fetchone())
async def execute_fetchall(self, sql: str, parameters: Iterable = None,
read_only=False) -> List[dict]:
return await self._execute_fetch(sql, parameters, read_only, fetch_all=True)
async def execute_fetchone(self, sql: str, parameters: Iterable = None,
read_only=False) -> List[dict]:
return await self._execute_fetch(sql, parameters, read_only, fetch_all=False)
def execute(self, sql: str, parameters: Iterable = None) -> Awaitable[sqlite3.Cursor]:
parameters = parameters if parameters is not None else []
return self.run(lambda conn: conn.execute(sql, parameters))
async def run(self, fun, *args, **kwargs):
self.write_count_metric.inc()
self.waiting_writes_metric.inc()
# block reader calls for a long time
# if the reader waits for the writers to finish and then has to wait for
# yet more, it will clear the urgent_read_done event to block more writers
# piling on
try:
await self.urgent_read_done.wait()
except Exception as e:
self.waiting_writes_metric.dec()
raise e
self.writers += 1
# block readers
self.read_ready.clear()
try:
async with self.write_lock:
if self._closing:
raise asyncio.CancelledError()
return await asyncio.get_event_loop().run_in_executor(
self.writer_executor, lambda: self.__run_transaction(fun, *args, **kwargs)
)
finally:
self.writers -= 1
self.waiting_writes_metric.dec()
if not self.writers:
# unblock the readers once the last enqueued writer finishes
self.read_ready.set()
def __run_transaction(self, fun: Callable[[sqlite3.Connection, Any, Any], Any], *args, **kwargs):
self.writer_connection.execute('begin')
try:
self.query_count += 1
result = fun(self.writer_connection, *args, **kwargs) # type: ignore
self.writer_connection.commit()
return result
except (Exception, OSError) as e:
log.exception('Error running transaction:', exc_info=e)
self.writer_connection.rollback()
log.warning("rolled back")
raise
async def run_with_foreign_keys_disabled(self, fun, *args, **kwargs):
self.write_count_metric.inc()
self.waiting_writes_metric.inc()
try:
await self.urgent_read_done.wait()
except Exception as e:
self.waiting_writes_metric.dec()
raise e
self.writers += 1
self.read_ready.clear()
try:
async with self.write_lock:
if self._closing:
raise asyncio.CancelledError()
return await asyncio.get_event_loop().run_in_executor(
self.writer_executor, self.__run_transaction_with_foreign_keys_disabled, fun, args, kwargs
)
finally:
self.writers -= 1
self.waiting_writes_metric.dec()
if not self.writers:
self.read_ready.set()
def __run_transaction_with_foreign_keys_disabled(self,
fun: Callable[[sqlite3.Connection, Any, Any], Any],
args, kwargs):
foreign_keys_enabled, = self.writer_connection.execute("pragma foreign_keys").fetchone()
if not foreign_keys_enabled:
raise sqlite3.IntegrityError("foreign keys are disabled, use `AIOSQLite.run` instead")
try:
self.writer_connection.execute('pragma foreign_keys=off').fetchone()
return self.__run_transaction(fun, *args, **kwargs)
finally:
self.writer_connection.execute('pragma foreign_keys=on').fetchone()
def constraints_to_sql(constraints, joiner=' AND ', prepend_key=''):
sql, values = [], {}
for key, constraint in constraints.items():
tag = '0'
if '
key, tag = key[:key.index(' col, op, key = key, '=', key.replace('.', '_')
if not key:
sql.append(constraint)
continue
if key.startswith('$$'):
col, key = col[2:], key[1:]
elif key.startswith('$'):
values[key] = constraint
continue
if key.endswith('__not'):
col, op = col[:-len('__not')], '!='
elif key.endswith('__is_null'):
col = col[:-len('__is_null')]
sql.append(f'{col} IS NULL')
continue
if key.endswith('__is_not_null'):
col = col[:-len('__is_not_null')]
sql.append(f'{col} IS NOT NULL')
continue
if key.endswith('__lt'):
col, op = col[:-len('__lt')], '<'
elif key.endswith('__lte'):
col, op = col[:-len('__lte')], '<='
elif key.endswith('__gt'):
col, op = col[:-len('__gt')], '>'
elif key.endswith('__gte'):
col, op = col[:-len('__gte')], '>='
elif key.endswith('__like'):
col, op = col[:-len('__like')], 'LIKE'
elif key.endswith('__not_like'):
col, op = col[:-len('__not_like')], 'NOT LIKE'
elif key.endswith('__in') or key.endswith('__not_in'):
if key.endswith('__in'):
col, op, one_val_op = col[:-len('__in')], 'IN', '='
else:
col, op, one_val_op = col[:-len('__not_in')], 'NOT IN', '!='
if constraint:
if isinstance(constraint, (list, set, tuple)):
if len(constraint) == 1:
values[f'{key}{tag}'] = next(iter(constraint))
sql.append(f'{col} {one_val_op} :{key}{tag}')
else:
keys = []
for i, val in enumerate(constraint):
keys.append(f':{key}{tag}_{i}')
values[f'{key}{tag}_{i}'] = val
sql.append(f'{col} {op} ({", ".join(keys)})')
elif isinstance(constraint, str):
sql.append(f'{col} {op} ({constraint})')
else:
raise ValueError(f"{col} requires a list, set or string as constraint value.")
continue
elif key.endswith('__any') or key.endswith('__or'):
where, subvalues = constraints_to_sql(constraint, ' OR ', key+tag+'_')
sql.append(f'({where})')
values.update(subvalues)
continue
if key.endswith('__and'):
where, subvalues = constraints_to_sql(constraint, ' AND ', key+tag+'_')
sql.append(f'({where})')
values.update(subvalues)
continue
sql.append(f'{col} {op} :{prepend_key}{key}{tag}')
values[prepend_key+key+tag] = constraint
return joiner.join(sql) if sql else '', values
def query(select, **constraints) -> Tuple[str, Dict[str, Any]]:
sql = [select]
limit = constraints.pop('limit', None)
offset = constraints.pop('offset', None)
order_by = constraints.pop('order_by', None)
group_by = constraints.pop('group_by', None)
accounts = constraints.pop('accounts', [])
if accounts:
constraints['account__in'] = [a.public_key.address for a in accounts]
where, values = constraints_to_sql(constraints)
if where:
sql.append('WHERE')
sql.append(where)
if group_by is not None:
sql.append(f'GROUP BY {group_by}')
if order_by:
sql.append('ORDER BY')
if isinstance(order_by, str):
sql.append(order_by)
elif isinstance(order_by, list):
sql.append(', '.join(order_by))
else:
raise ValueError("order_by must be string or list")
if limit is not None:
sql.append(f'LIMIT {limit}')
if offset is not None:
sql.append(f'OFFSET {offset}')
return ' '.join(sql), values
def interpolate(sql, values):
for k in sorted(values.keys(), reverse=True):
value = values[k]
if isinstance(value, bytes):
value = f"X'{hexlify(value).decode()}'"
elif isinstance(value, str):
value = f"'{value}'"
else:
value = str(value)
sql = sql.replace(f":{k}", value)
return sql
def constrain_single_or_list(constraints, column, value, convert=lambda x: x, negate=False):
if value is not None:
if isinstance(value, list):
value = [convert(v) for v in value]
if len(value) == 1:
if negate:
constraints[f"{column}__or"] = {
f"{column}__is_null": True,
f"{column}__not": value[0]
}
else:
constraints[column] = value[0]
elif len(value) > 1:
if negate:
constraints[f"{column}__or"] = {
f"{column}__is_null": True,
f"{column}__not_in": value
}
else:
constraints[f"{column}__in"] = value
elif negate:
constraints[f"{column}__or"] = {
f"{column}__is_null": True,
f"{column}__not": convert(value)
}
else:
constraints[column] = convert(value)
return constraints
class SQLiteMixin:
SCHEMA_VERSION: Optional[str] = None
CREATE_TABLES_QUERY: str
MAX_QUERY_VARIABLES = 900
CREATE_VERSION_TABLE = """
create table if not exists version (
version text
);
"""
def __init__(self, path):
self._db_path = path
self.db: AIOSQLite = None
self.ledger = None
async def open(self):
log.info("connecting to database: %s", self._db_path)
self.db = await AIOSQLite.connect(self._db_path, isolation_level=None)
if self.SCHEMA_VERSION:
tables = [t[0] for t in await self.db.execute_fetchall(
"SELECT name FROM sqlite_master WHERE type='table';"
)]
if tables:
if 'version' in tables:
version = await self.db.execute_fetchone("SELECT version FROM version LIMIT 1;")
if version == (self.SCHEMA_VERSION,):
return
await self.db.executescript('\n'.join(
f"DROP TABLE {table};" for table in tables
) + '\n' + 'PRAGMA WAL_CHECKPOINT(FULL);' + '\n' + 'VACUUM;')
await self.db.execute(self.CREATE_VERSION_TABLE)
await self.db.execute("INSERT INTO version VALUES (?)", (self.SCHEMA_VERSION,))
await self.db.executescript(self.CREATE_TABLES_QUERY)
async def close(self):
await self.db.close()
@staticmethod
def _insert_sql(table: str, data: dict, ignore_duplicate: bool = False,
replace: bool = False) -> Tuple[str, List]:
columns, values = [], []
for column, value in data.items():
columns.append(column)
values.append(value)
policy = ""
if ignore_duplicate:
policy = " OR IGNORE"
if replace:
policy = " OR REPLACE"
sql = "INSERT{} INTO {} ({}) VALUES ({})".format(
policy, table, ', '.join(columns), ', '.join(['?'] * len(values))
)
return sql, values
@staticmethod
def _update_sql(table: str, data: dict, where: str,
constraints: Union[list, tuple]) -> Tuple[str, list]:
columns, values = [], []
for column, value in data.items():
columns.append(f"{column} = ?")
values.append(value)
values.extend(constraints)
sql = "UPDATE {} SET {} WHERE {}".format(
table, ', '.join(columns), where
)
return sql, values
def dict_row_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
SQLITE_MAX_INTEGER = 9223372036854775807
def _get_spendable_utxos(transaction: sqlite3.Connection, accounts: List, decoded_transactions: Dict[str, Transaction],
result: Dict[Tuple[bytes, int, bool], List[int]], reserved: List[Transaction],
amount_to_reserve: int, reserved_amount: int, floor: int, ceiling: int,
fee_per_byte: int) -> int:
accounts_fmt = ",".join(["?"] * len(accounts))
txo_query = f"""
SELECT tx.txid, txo.txoid, tx.raw, tx.height, txo.position as nout, tx.is_verified, txo.amount FROM txo
INNER JOIN account_address USING (address)
LEFT JOIN txi USING (txoid)
INNER JOIN tx USING (txid)
WHERE txo.txo_type=0 AND txi.txoid IS NULL AND tx.txid IS NOT NULL AND NOT txo.is_reserved
AND txo.amount >= ? AND txo.amount < ?
"""
if accounts:
txo_query += f"""
AND account_address.account {'= ?' if len(accounts_fmt) == 1 else 'IN (' + accounts_fmt + ')'}
"""
txo_query += """
ORDER BY txo.amount ASC, tx.height DESC
"""
# prefer confirmed, but save unconfirmed utxos from this selection in case they are needed
unconfirmed = []
for row in transaction.execute(txo_query, (floor, ceiling, *accounts)):
(txid, txoid, raw, height, nout, verified, amount) = row.values()
# verified or non verified transactions were found- reset the gap count
# multiple txos can come from the same tx, only decode it once and cache
if txid not in decoded_transactions:
# cache the decoded transaction
decoded_transactions[txid] = Transaction(raw)
decoded_tx = decoded_transactions[txid]
# save the unconfirmed txo for possible use later, if still needed
if verified:
# add the txo to the reservation, minus the fee for including it
reserved_amount += amount
reserved_amount -= Input.spend(decoded_tx.outputs[nout]).size * fee_per_byte
# mark it as reserved
result[(raw, height, verified)].append(nout)
reserved.append(txoid)
# if we've reserved enough, return
if reserved_amount >= amount_to_reserve:
return reserved_amount
else:
unconfirmed.append((txid, txoid, raw, height, nout, verified, amount))
unconfirmed.reverse()
# add available unconfirmed txos if any were previously found
while unconfirmed and reserved_amount < amount_to_reserve:
(txid, txoid, raw, height, nout, verified, amount) = unconfirmed.pop()
# it's already decoded
decoded_tx = decoded_transactions[txid]
reserved_amount += amount
reserved_amount -= Input.spend(decoded_tx.outputs[nout]).size * fee_per_byte
result[(raw, height, verified)].append(nout)
reserved.append(txoid)
return reserved_amount
def get_and_reserve_spendable_utxos(transaction: sqlite3.Connection, accounts: List, amount_to_reserve: int, floor: int,
fee_per_byte: int, set_reserved: bool, return_insufficient_funds: bool,
base_multiplier: int = 100):
txs = defaultdict(list)
decoded_transactions = {}
reserved = []
reserved_dewies = 0
multiplier = base_multiplier
gap_count = 0
while reserved_dewies < amount_to_reserve and gap_count < 5 and floor * multiplier < SQLITE_MAX_INTEGER:
previous_reserved_dewies = reserved_dewies
reserved_dewies = _get_spendable_utxos(
transaction, accounts, decoded_transactions, txs, reserved, amount_to_reserve, reserved_dewies,
floor, floor * multiplier, fee_per_byte
)
floor *= multiplier
if previous_reserved_dewies == reserved_dewies:
gap_count += 1
multiplier **= 2
else:
gap_count = 0
multiplier = base_multiplier
if reserved_dewies >= amount_to_reserve:
if set_reserved:
transaction.executemany("UPDATE txo SET is_reserved = ? WHERE txoid = ?",
[(True, txoid) for txoid in reserved]).fetchall()
return txs
return txs if return_insufficient_funds else {}
class Database(SQLiteMixin):
SCHEMA_VERSION = "1.5"
PRAGMAS = """
pragma journal_mode=WAL;
"""
CREATE_ACCOUNT_TABLE = """
create table if not exists account_address (
account text not null,
address text not null,
chain integer not null,
pubkey blob not null,
chain_code blob not null,
n integer not null,
depth integer not null,
primary key (account, address)
);
create index if not exists address_account_idx on account_address (address, account);
"""
CREATE_PUBKEY_ADDRESS_TABLE = """
create table if not exists pubkey_address (
address text primary key,
history text,
used_times integer not null default 0
);
"""
CREATE_TX_TABLE = """
create table if not exists tx (
txid text primary key,
raw blob not null,
height integer not null,
position integer not null,
is_verified boolean not null default 0,
purchased_claim_id text,
day integer
);
create index if not exists tx_purchased_claim_id_idx on tx (purchased_claim_id);
"""
CREATE_TXO_TABLE = """
create table if not exists txo (
txid text references tx,
txoid text primary key,
address text references pubkey_address,
position integer not null,
amount integer not null,
script blob not null,
is_reserved boolean not null default 0,
txo_type integer not null default 0,
claim_id text,
claim_name text,
channel_id text,
reposted_claim_id text
);
create index if not exists txo_txid_idx on txo (txid);
create index if not exists txo_address_idx on txo (address);
create index if not exists txo_claim_id_idx on txo (claim_id, txo_type);
create index if not exists txo_claim_name_idx on txo (claim_name);
create index if not exists txo_txo_type_idx on txo (txo_type);
create index if not exists txo_channel_id_idx on txo (channel_id);
create index if not exists txo_reposted_claim_idx on txo (reposted_claim_id);
"""
CREATE_TXI_TABLE = """
create table if not exists txi (
txid text references tx,
txoid text references txo primary key,
address text references pubkey_address,
position integer not null
);
create index if not exists txi_address_idx on txi (address);
create index if not exists first_input_idx on txi (txid, address) where position=0;
"""
CREATE_TABLES_QUERY = (
PRAGMAS +
CREATE_ACCOUNT_TABLE +
CREATE_PUBKEY_ADDRESS_TABLE +
CREATE_TX_TABLE +
CREATE_TXO_TABLE +
CREATE_TXI_TABLE
)
async def open(self):
await super().open()
self.db.writer_connection.row_factory = dict_row_factory
def txo_to_row(self, tx, txo):
row = {
'txid': tx.id,
'txoid': txo.id,
'address': txo.get_address(self.ledger),
'position': txo.position,
'amount': txo.amount,
'script': sqlite3.Binary(txo.script.source)
}
if txo.is_claim:
if txo.can_decode_claim:
claim = txo.claim
row['txo_type'] = TXO_TYPES.get(claim.claim_type, TXO_TYPES['stream'])
if claim.is_repost:
row['reposted_claim_id'] = claim.repost.reference.claim_id
if claim.is_signed:
row['channel_id'] = claim.signing_channel_id
else:
row['txo_type'] = TXO_TYPES['stream']
elif txo.is_support:
row['txo_type'] = TXO_TYPES['support']
elif txo.purchase is not None:
row['txo_type'] = TXO_TYPES['purchase']
row['claim_id'] = txo.purchased_claim_id
if txo.script.is_claim_involved:
row['claim_id'] = txo.claim_id
row['claim_name'] = txo.claim_name
return row
def tx_to_row(self, tx):
row = {
'txid': tx.id,
'raw': sqlite3.Binary(tx.raw),
'height': tx.height,
'position': tx.position,
'is_verified': tx.is_verified,
'day': tx.get_julian_day(self.ledger),
}
txos = tx.outputs
if len(txos) >= 2 and txos[1].can_decode_purchase_data:
txos[0].purchase = txos[1]
row['purchased_claim_id'] = txos[1].purchase_data.claim_id
return row
async def insert_transaction(self, tx):
await self.db.execute_fetchall(*self._insert_sql('tx', self.tx_to_row(tx)))
async def update_transaction(self, tx):
await self.db.execute_fetchall(*self._update_sql("tx", {
'height': tx.height, 'position': tx.position, 'is_verified': tx.is_verified
}, 'txid = ?', (tx.id,)))
def _transaction_io(self, conn: sqlite3.Connection, tx: Transaction, address, txhash):
conn.execute(*self._insert_sql('tx', self.tx_to_row(tx), replace=True)).fetchall()
is_my_input = False
for txi in tx.inputs:
if txi.txo_ref.txo is not None:
txo = txi.txo_ref.txo
if txo.has_address and txo.get_address(self.ledger) == address:
is_my_input = True
conn.execute(*self._insert_sql("txi", {
'txid': tx.id,
'txoid': txo.id,
'address': address,
'position': txi.position
}, ignore_duplicate=True)).fetchall()
for txo in tx.outputs:
if txo.script.is_pay_pubkey_hash and (txo.pubkey_hash == txhash or is_my_input):
conn.execute(*self._insert_sql(
"txo", self.txo_to_row(tx, txo), ignore_duplicate=True
)).fetchall()
elif txo.script.is_pay_script_hash:
log.warning('Database.save_transaction_io: pay script hash is not implemented!')
def save_transaction_io(self, tx: Transaction, address, txhash, history):
return self.save_transaction_io_batch([tx], address, txhash, history)
def save_transaction_io_batch(self, txs: Iterable[Transaction], address, txhash, history):
history_count = history.count(':') // 2
def __many(conn):
for tx in txs:
self._transaction_io(conn, tx, address, txhash)
conn.execute(
"UPDATE pubkey_address SET history = ?, used_times = ? WHERE address = ?",
(history, history_count, address)
).fetchall()
return self.db.run(__many)
async def reserve_outputs(self, txos, is_reserved=True):
txoids = [(is_reserved, txo.id) for txo in txos]
await self.db.executemany("UPDATE txo SET is_reserved = ? WHERE txoid = ?", txoids)
async def release_outputs(self, txos):
await self.reserve_outputs(txos, is_reserved=False)
async def rewind_blockchain(self, above_height):
return True
async def get_spendable_utxos(self, ledger, reserve_amount, accounts: Optional[Iterable], min_amount: int = 1,
fee_per_byte: int = 50, set_reserved: bool = True,
return_insufficient_funds: bool = False) -> List:
to_spend = await self.db.run(
get_and_reserve_spendable_utxos, tuple(account.id for account in accounts), reserve_amount, min_amount,
fee_per_byte, set_reserved, return_insufficient_funds
)
txos = []
for (raw, height, verified), positions in to_spend.items():
tx = Transaction(raw, height=height, is_verified=verified)
for nout in positions:
txos.append(tx.outputs[nout].get_estimator(ledger))
return txos
async def select_transactions(self, cols, accounts=None, read_only=False, **constraints):
if not {'txid', 'txid__in'}.intersection(constraints):
assert accounts, "'accounts' argument required when no 'txid' constraint is present"
where, values = constraints_to_sql({
'$$account_address.account__in': [a.public_key.address for a in accounts]
})
constraints['txid__in'] = f"""
SELECT txo.txid FROM txo JOIN account_address USING (address) WHERE {where}
UNION
SELECT txi.txid FROM txi JOIN account_address USING (address) WHERE {where}
"""
constraints.update(values)
return await self.db.execute_fetchall(
*query(f"SELECT {cols} FROM tx", **constraints), read_only=read_only
)
TXO_NOT_MINE = Output(None, None, is_my_output=False)
async def get_transactions(self, wallet=None, **constraints):
include_is_spent = constraints.pop('include_is_spent', False)
include_is_my_input = constraints.pop('include_is_my_input', False)
include_is_my_output = constraints.pop('include_is_my_output', False)
tx_rows = await self.select_transactions(
'txid, raw, height, position, is_verified',
order_by=constraints.pop('order_by', ["height=0 DESC", "height DESC", "position DESC"]),
**constraints
)
if not tx_rows:
return []
txids, txs, txi_txoids = [], [], []
for row in tx_rows:
txids.append(row['txid'])
txs.append(Transaction(
raw=row['raw'], height=row['height'], position=row['position'],
is_verified=bool(row['is_verified'])
))
for txi in txs[-1].inputs:
txi_txoids.append(txi.txo_ref.id)
step = self.MAX_QUERY_VARIABLES
annotated_txos = {}
for offset in range(0, len(txids), step):
annotated_txos.update({
txo.id: txo for txo in
(await self.get_txos(
wallet=wallet,
txid__in=txids[offset:offset+step], order_by='txo.txid',
include_is_spent=include_is_spent,
include_is_my_input=include_is_my_input,
include_is_my_output=include_is_my_output,
))
})
referenced_txos = {}
for offset in range(0, len(txi_txoids), step):
referenced_txos.update({
txo.id: txo for txo in
(await self.get_txos(
wallet=wallet,
txoid__in=txi_txoids[offset:offset+step], order_by='txo.txoid',
include_is_my_output=include_is_my_output,
))
})
for tx in txs:
for txi in tx.inputs:
txo = referenced_txos.get(txi.txo_ref.id)
if txo:
txi.txo_ref = txo.ref
for txo in tx.outputs:
_txo = annotated_txos.get(txo.id)
if _txo:
txo.update_annotations(_txo)
else:
txo.update_annotations(self.TXO_NOT_MINE)
for tx in txs:
txos = tx.outputs
if len(txos) >= 2 and txos[1].can_decode_purchase_data:
txos[0].purchase = txos[1]
return txs
async def get_transaction_count(self, **constraints):
constraints.pop('wallet', None)
constraints.pop('offset', None)
constraints.pop('limit', None)
constraints.pop('order_by', None)
count = await self.select_transactions('COUNT(*) as total', **constraints)
return count[0]['total'] or 0
async def get_transaction(self, **constraints):
txs = await self.get_transactions(limit=1, **constraints)
if txs:
return txs[0]
async def select_txos(
self, cols, accounts=None, is_my_input=None, is_my_output=True,
is_my_input_or_output=None, exclude_internal_transfers=False,
include_is_spent=False, include_is_my_input=False,
is_spent=None, read_only=False, **constraints):
for rename_col in ('txid', 'txoid'):
for rename_constraint in (rename_col, rename_col+'__in', rename_col+'__not_in'):
if rename_constraint in constraints:
constraints['txo.'+rename_constraint] = constraints.pop(rename_constraint)
if accounts:
account_in_sql, values = constraints_to_sql({
'$$account__in': [a.public_key.address for a in accounts]
})
my_addresses = f"SELECT address FROM account_address WHERE {account_in_sql}"
constraints.update(values)
if is_my_input_or_output:
include_is_my_input = True
constraints['received_or_sent__or'] = {
'txo.address__in': my_addresses,
'sent__and': {
'txi.address__is_not_null': True,
'txi.address__in': my_addresses
}
}
else:
if is_my_output:
constraints['txo.address__in'] = my_addresses
elif is_my_output is False:
constraints['txo.address__not_in'] = my_addresses
if is_my_input:
include_is_my_input = True
constraints['txi.address__is_not_null'] = True
constraints['txi.address__in'] = my_addresses
elif is_my_input is False:
include_is_my_input = True
constraints['is_my_input_false__or'] = {
'txi.address__is_null': True,
'txi.address__not_in': my_addresses
}
if exclude_internal_transfers:
include_is_my_input = True
constraints['exclude_internal_payments__or'] = {
'txo.txo_type__not': TXO_TYPES['other'],
'txo.address__not_in': my_addresses,
'txi.address__is_null': True,
'txi.address__not_in': my_addresses,
}
sql = [f"SELECT {cols} FROM txo JOIN tx ON (tx.txid=txo.txid)"]
if is_spent:
constraints['spent.txoid__is_not_null'] = True
elif is_spent is False:
constraints['is_reserved'] = False
constraints['spent.txoid__is_null'] = True
if include_is_spent or is_spent is not None:
sql.append("LEFT JOIN txi AS spent ON (spent.txoid=txo.txoid)")
if include_is_my_input:
sql.append("LEFT JOIN txi ON (txi.position=0 AND txi.txid=txo.txid)")
return await self.db.execute_fetchall(*query(' '.join(sql), **constraints), read_only=read_only)
async def get_txos(self, wallet=None, no_tx=False, read_only=False, **constraints):
include_is_spent = constraints.get('include_is_spent', False)
include_is_my_input = constraints.get('include_is_my_input', False)
include_is_my_output = constraints.pop('include_is_my_output', False)
include_received_tips = constraints.pop('include_received_tips', False)
select_columns = [
"tx.txid, raw, tx.height, tx.position as tx_position, tx.is_verified, "
"txo_type, txo.position as txo_position, amount, script"
]
my_accounts = {a.public_key.address for a in wallet.accounts} if wallet else set()
my_accounts_sql = ""
if include_is_my_output or include_is_my_input:
my_accounts_sql, values = constraints_to_sql({'$$account__in#_wallet': my_accounts})
constraints.update(values)
if include_is_my_output and my_accounts:
if constraints.get('is_my_output', None) in (True, False):
select_columns.append(f"{1 if constraints['is_my_output'] else 0} AS is_my_output")
else:
select_columns.append(f"""(
txo.address IN (SELECT address FROM account_address WHERE {my_accounts_sql})
) AS is_my_output""")
if include_is_my_input and my_accounts:
if constraints.get('is_my_input', None) in (True, False):
select_columns.append(f"{1 if constraints['is_my_input'] else 0} AS is_my_input")
else:
select_columns.append(f"""(
txi.address IS NOT NULL AND
txi.address IN (SELECT address FROM account_address WHERE {my_accounts_sql})
) AS is_my_input""")
if include_is_spent:
select_columns.append("spent.txoid IS NOT NULL AS is_spent")
if include_received_tips:
select_columns.append(f"""(
SELECT COALESCE(SUM(support.amount), 0) FROM txo AS support WHERE
support.claim_id = txo.claim_id AND
support.txo_type = {TXO_TYPES['support']} AND
support.address IN (SELECT address FROM account_address WHERE {my_accounts_sql}) AND
support.txoid NOT IN (SELECT txoid FROM txi)
) AS received_tips""")
if 'order_by' not in constraints or constraints['order_by'] == 'height':
constraints['order_by'] = [
"tx.height=0 DESC", "tx.height DESC", "tx.position DESC", "txo.position"
]
elif constraints.get('order_by', None) == 'none':
del constraints['order_by']
rows = await self.select_txos(', '.join(select_columns), read_only=read_only, **constraints)
txos = []
txs = {}
for row in rows:
if no_tx:
txo = Output(
amount=row['amount'],
script=OutputScript(row['script']),
tx_ref=TXRefImmutable.from_id(row['txid'], row['height']),
position=row['txo_position']
)
else:
if row['txid'] not in txs:
txs[row['txid']] = Transaction(
row['raw'], height=row['height'], position=row['tx_position'],
is_verified=bool(row['is_verified'])
)
txo = txs[row['txid']].outputs[row['txo_position']]
if include_is_spent:
txo.is_spent = bool(row['is_spent'])
if include_is_my_input:
txo.is_my_input = bool(row['is_my_input'])
if include_is_my_output:
txo.is_my_output = bool(row['is_my_output'])
if include_is_my_input and include_is_my_output:
if txo.is_my_input and txo.is_my_output and row['txo_type'] == TXO_TYPES['other']:
txo.is_internal_transfer = True
else:
txo.is_internal_transfer = False
if include_received_tips:
txo.received_tips = row['received_tips']
txos.append(txo)
channel_ids = set()
for txo in txos:
if txo.is_claim and txo.can_decode_claim:
if txo.claim.is_signed:
channel_ids.add(txo.claim.signing_channel_id)
if txo.claim.is_channel and wallet:
for account in wallet.accounts:
private_key = await account.get_channel_private_key(
txo.claim.channel.public_key_bytes
)
if private_key:
txo.private_key = private_key
break
if channel_ids:
channels = {
txo.claim_id: txo for txo in
(await self.get_channels(
wallet=wallet,
claim_id__in=channel_ids,
read_only=read_only
))
}
for txo in txos:
if txo.is_claim and txo.can_decode_claim:
txo.channel = channels.get(txo.claim.signing_channel_id, None)
return txos
@staticmethod
def _clean_txo_constraints_for_aggregation(constraints):
constraints.pop('include_is_spent', None)
constraints.pop('include_is_my_input', None)
constraints.pop('include_is_my_output', None)
constraints.pop('include_received_tips', None)
constraints.pop('wallet', None)
constraints.pop('resolve', None)
constraints.pop('offset', None)
constraints.pop('limit', None)
constraints.pop('order_by', None)
async def get_txo_count(self, **constraints):
self._clean_txo_constraints_for_aggregation(constraints)
count = await self.select_txos('COUNT(*) AS total', **constraints)
return count[0]['total'] or 0
async def get_txo_sum(self, **constraints):
self._clean_txo_constraints_for_aggregation(constraints)
result = await self.select_txos('SUM(amount) AS total', **constraints)
return result[0]['total'] or 0
async def get_txo_plot(self, start_day=None, days_back=0, end_day=None, days_after=None, **constraints):
self._clean_txo_constraints_for_aggregation(constraints)
if start_day is None:
constraints['day__gte'] = self.ledger.headers.estimated_julian_day(
self.ledger.headers.height
) - days_back
else:
constraints['day__gte'] = date_to_julian_day(
date.fromisoformat(start_day)
)
if end_day is not None:
constraints['day__lte'] = date_to_julian_day(
date.fromisoformat(end_day)
)
elif days_after is not None:
constraints['day__lte'] = constraints['day__gte'] + days_after
return await self.select_txos(
"DATE(day) AS day, SUM(amount) AS total",
group_by='day', order_by='day', **constraints
)
def get_utxos(self, read_only=False, **constraints):
return self.get_txos(is_spent=False, read_only=read_only, **constraints)
def get_utxo_count(self, **constraints):
return self.get_txo_count(is_spent=False, **constraints)
async def get_balance(self, wallet=None, accounts=None, read_only=False, **constraints):
assert wallet or accounts, \
"'wallet' or 'accounts' constraints required to calculate balance"
constraints['accounts'] = accounts or wallet.accounts
balance = await self.select_txos(
'SUM(amount) as total', is_spent=False, read_only=read_only, **constraints
)
return balance[0]['total'] or 0
async def select_addresses(self, cols, read_only=False, **constraints):
return await self.db.execute_fetchall(*query(
f"SELECT {cols} FROM pubkey_address JOIN account_address USING (address)",
**constraints
), read_only=read_only)
async def get_addresses(self, cols=None, read_only=False, **constraints):
cols = cols or (
'address', 'account', 'chain', 'history', 'used_times',
'pubkey', 'chain_code', 'n', 'depth'
)
addresses = await self.select_addresses(', '.join(cols), read_only=read_only, **constraints)
if 'pubkey' in cols:
for address in addresses:
address['pubkey'] = PubKey(
self.ledger, address.pop('pubkey'), address.pop('chain_code'),
address.pop('n'), address.pop('depth')
)
return addresses
async def get_address_count(self, cols=None, read_only=False, **constraints):
count = await self.select_addresses('COUNT(*) as total', read_only=read_only, **constraints)
return count[0]['total'] or 0
async def get_address(self, read_only=False, **constraints):
addresses = await self.get_addresses(read_only=read_only, limit=1, **constraints)
if addresses:
return addresses[0]
async def add_keys(self, account, chain, pubkeys):
await self.db.executemany(
"insert or ignore into account_address "
"(account, address, chain, pubkey, chain_code, n, depth) values "
"(?, ?, ?, ?, ?, ?, ?)", ((
account.id, k.address, chain,
sqlite3.Binary(k.pubkey_bytes),
sqlite3.Binary(k.chain_code),
k.n, k.depth
) for k in pubkeys)
)
await self.db.executemany(
"insert or ignore into pubkey_address (address) values (?)",
((pubkey.address,) for pubkey in pubkeys)
)
async def _set_address_history(self, address, history):
await self.db.execute_fetchall(
"UPDATE pubkey_address SET history = ?, used_times = ? WHERE address = ?",
(history, history.count(':')//2, address)
)
async def set_address_history(self, address, history):
await self._set_address_history(address, history)
@staticmethod
def constrain_purchases(constraints):
accounts = constraints.pop('accounts', None)
assert accounts, "'accounts' argument required to find purchases"
if not {'purchased_claim_id', 'purchased_claim_id__in'}.intersection(constraints):
constraints['purchased_claim_id__is_not_null'] = True
constraints.update({
f'$account{i}': a.public_key.address for i, a in enumerate(accounts)
})
account_values = ', '.join([f':$account{i}' for i in range(len(accounts))])
constraints['txid__in'] = f"""
SELECT txid FROM txi JOIN account_address USING (address)
WHERE account_address.account IN ({account_values})
"""
async def get_purchases(self, **constraints):
self.constrain_purchases(constraints)
return [tx.outputs[0] for tx in await self.get_transactions(**constraints)]
def get_purchase_count(self, **constraints):
self.constrain_purchases(constraints)
return self.get_transaction_count(**constraints)
@staticmethod
def constrain_claims(constraints):
if {'txo_type', 'txo_type__in'}.intersection(constraints):
return
claim_types = constraints.pop('claim_type', None)
if claim_types:
constrain_single_or_list(
constraints, 'txo_type', claim_types, lambda x: TXO_TYPES[x]
)
else:
constraints['txo_type__in'] = CLAIM_TYPES
async def get_claims(self, read_only=False, **constraints) -> List[Output]:
self.constrain_claims(constraints)
return await self.get_utxos(read_only=read_only, **constraints)
def get_claim_count(self, **constraints):
self.constrain_claims(constraints)
return self.get_utxo_count(**constraints)
@staticmethod
def constrain_streams(constraints):
constraints['txo_type'] = TXO_TYPES['stream']
def get_streams(self, read_only=False, **constraints):
self.constrain_streams(constraints)
return self.get_claims(read_only=read_only, **constraints)
def get_stream_count(self, **constraints):
self.constrain_streams(constraints)
return self.get_claim_count(**constraints)
@staticmethod
def constrain_channels(constraints):
constraints['txo_type'] = TXO_TYPES['channel']
def get_channels(self, **constraints):
self.constrain_channels(constraints)
return self.get_claims(**constraints)
def get_channel_count(self, **constraints):
self.constrain_channels(constraints)
return self.get_claim_count(**constraints)
@staticmethod
def constrain_supports(constraints):
constraints['txo_type'] = TXO_TYPES['support']
def get_supports(self, **constraints):
self.constrain_supports(constraints)
return self.get_utxos(**constraints)
def get_support_count(self, **constraints):
self.constrain_supports(constraints)
return self.get_utxo_count(**constraints)
@staticmethod
def constrain_collections(constraints):
constraints['txo_type'] = TXO_TYPES['collection']
def get_collections(self, **constraints):
self.constrain_collections(constraints)
return self.get_utxos(**constraints)
def get_collection_count(self, **constraints):
self.constrain_collections(constraints)
return self.get_utxo_count(**constraints)
async def release_all_outputs(self, account=None):
if account is None:
await self.db.execute_fetchall("UPDATE txo SET is_reserved = 0 WHERE is_reserved = 1")
else:
await self.db.execute_fetchall(
"UPDATE txo SET is_reserved = 0 WHERE"
" is_reserved = 1 AND txo.address IN ("
" SELECT address from account_address WHERE account = ?"
" )", (account.public_key.address, )
)
def get_supports_summary(self, read_only=False, **constraints):
return self.get_txos(
txo_type=TXO_TYPES['support'],
is_spent=False, is_my_output=True,
include_is_my_input=True,
no_tx=True, read_only=read_only,
**constraints
)
| true | true |
f73560d7b5130038cedff239dd82760a17b30eb9 | 5,843 | py | Python | scripts/mgear/shifter_classic_components/foot_bk_01/guide.py | stormstudios/shifter_classic_components | 738411b7a2600884b8e32db752997ab07176fad1 | [
"MIT"
] | 1 | 2019-04-30T08:13:18.000Z | 2019-04-30T08:13:18.000Z | scripts/mgear/shifter_classic_components/foot_bk_01/guide.py | stormstudios/shifter_classic_components | 738411b7a2600884b8e32db752997ab07176fad1 | [
"MIT"
] | 80 | 2018-06-15T03:42:37.000Z | 2021-05-05T23:54:52.000Z | scripts/mgear/shifter_classic_components/foot_bk_01/guide.py | stormstudios/shifter_classic_components | 738411b7a2600884b8e32db752997ab07176fad1 | [
"MIT"
] | 12 | 2019-04-08T16:37:31.000Z | 2021-09-06T17:48:24.000Z | # MGEAR is under the terms of the MIT License
# Copyright (c) 2016 Jeremie Passerin, Miquel Campos
"""Guide Foot banking 01 module"""
from functools import partial
import pymel.core as pm
from mgear.shifter.component import guide
from mgear.core import transform, pyqt
from mgear.vendor.Qt import QtWidgets, QtCore
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
from maya.app.general.mayaMixin import MayaQDockWidget
import settingsUI as sui
# guide info
AUTHOR = "Jeremie Passerin, Miquel Campos"
URL = "www.jeremiepasserin.com, www.miquel-campos.com"
EMAIL = "geerem@hotmail.com, hello@miquel-campos.com"
VERSION = [1, 0, 0]
TYPE = "foot_bk_01"
NAME = "foot"
DESCRIPTION = "Foot with reversed controllers to control foot roll."
##########################################################
# CLASS
##########################################################
class Guide(guide.ComponentGuide):
"""Component Guide Class"""
compType = TYPE
compName = NAME
description = DESCRIPTION
author = AUTHOR
url = URL
email = EMAIL
version = VERSION
connectors = ["leg_2jnt_01", "leg_ms_2jnt_01", "leg_3jnt_01"]
def postInit(self):
"""Initialize the position for the guide"""
self.save_transform = ["root", "#_loc", "heel", "outpivot", "inpivot"]
self.addMinMax("#_loc", 1, -1)
def addObjects(self):
"""Add the Guide Root, blade and locators"""
self.root = self.addRoot()
self.locs = self.addLocMulti("#_loc", self.root)
centers = [self.root]
centers.extend(self.locs)
self.dispcrv = self.addDispCurve("crv", centers)
# Heel and pivots
vTemp = transform.getOffsetPosition(self.root, [0, -1, -1])
self.heel = self.addLoc("heel", self.root, vTemp)
vTemp = transform.getOffsetPosition(self.root, [1, -1, -1])
self.outpivot = self.addLoc("outpivot", self.root, vTemp)
vTemp = transform.getOffsetPosition(self.root, [-1, -1, -1])
self.inpivot = self.addLoc("inpivot", self.root, vTemp)
cnt = [self.root, self.heel, self.outpivot, self.heel, self.inpivot]
self.dispcrv = self.addDispCurve("1", cnt)
def addParameters(self):
"""Add the configurations settings"""
self.pRoll = self.addParam("useRollCtl", "bool", True)
self.pUseIndex = self.addParam("useIndex", "bool", False)
self.pParentJointIndex = self.addParam(
"parentJointIndex", "long", -1, None, None)
##########################################################
# Setting Page
##########################################################
class settingsTab(QtWidgets.QDialog, sui.Ui_Form):
"""The Component settings UI"""
def __init__(self, parent=None):
super(settingsTab, self).__init__(parent)
self.setupUi(self)
class componentSettings(MayaQWidgetDockableMixin, guide.componentMainSettings):
"""Create the component setting window"""
def __init__(self, parent=None):
self.toolName = TYPE
# Delete old instances of the componet settings window.
pyqt.deleteInstances(self, MayaQDockWidget)
super(self.__class__, self).__init__(parent=parent)
self.settingsTab = settingsTab()
self.setup_componentSettingWindow()
self.create_componentControls()
self.populate_componentControls()
self.create_componentLayout()
self.create_componentConnections()
def setup_componentSettingWindow(self):
self.mayaMainWindow = pyqt.maya_main_window()
self.setObjectName(self.toolName)
self.setWindowFlags(QtCore.Qt.Window)
self.setWindowTitle(TYPE)
self.resize(280, 350)
def create_componentControls(self):
return
def populate_componentControls(self):
"""Populate Controls
Populate the controls values from the custom attributes of the
component.
"""
# populate tab
self.tabs.insertTab(1, self.settingsTab, "Component Settings")
# populate component settings
self.populateCheck(self.settingsTab.useRollCtl_checkBox, "useRollCtl")
# populate connections in main settings
for cnx in Guide.connectors:
self.mainSettingsTab.connector_comboBox.addItem(cnx)
cBox = self.mainSettingsTab.connector_comboBox
self.connector_items = [cBox.itemText(i) for i in range(cBox.count())]
currentConnector = self.root.attr("connector").get()
if currentConnector not in self.connector_items:
self.mainSettingsTab.connector_comboBox.addItem(currentConnector)
self.connector_items.append(currentConnector)
pm.displayWarning("The current connector: %s, is not a valid "
"connector for this component. "
"Build will Fail!!")
comboIndex = self.connector_items.index(currentConnector)
self.mainSettingsTab.connector_comboBox.setCurrentIndex(comboIndex)
def create_componentLayout(self):
self.settings_layout = QtWidgets.QVBoxLayout()
self.settings_layout.addWidget(self.tabs)
self.settings_layout.addWidget(self.close_button)
self.setLayout(self.settings_layout)
def create_componentConnections(self):
self.settingsTab.useRollCtl_checkBox.stateChanged.connect(
partial(self.updateCheck,
self.settingsTab.useRollCtl_checkBox,
"useRollCtl"))
self.mainSettingsTab.connector_comboBox.currentIndexChanged.connect(
partial(self.updateConnector,
self.mainSettingsTab.connector_comboBox,
self.connector_items))
def dockCloseEventTriggered(self):
pyqt.deleteInstances(self, MayaQDockWidget)
| 34.370588 | 79 | 0.645559 |
from functools import partial
import pymel.core as pm
from mgear.shifter.component import guide
from mgear.core import transform, pyqt
from mgear.vendor.Qt import QtWidgets, QtCore
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
from maya.app.general.mayaMixin import MayaQDockWidget
import settingsUI as sui
AUTHOR = "Jeremie Passerin, Miquel Campos"
URL = "www.jeremiepasserin.com, www.miquel-campos.com"
EMAIL = "geerem@hotmail.com, hello@miquel-campos.com"
VERSION = [1, 0, 0]
TYPE = "foot_bk_01"
NAME = "foot"
DESCRIPTION = "Foot with reversed controllers to control foot roll."
| true | true |
f735615c2319bdff0ca979793672f7234acf31cf | 23,431 | py | Python | robolearn/torch/policies/tanh_gaussian_promp_multi_policy.py | domingoesteban/robolearn | 0d20125425c352b80ef2eeed1c0b11ab6497b11a | [
"BSD-3-Clause"
] | 1 | 2020-01-13T09:44:22.000Z | 2020-01-13T09:44:22.000Z | robolearn/torch/policies/tanh_gaussian_promp_multi_policy.py | domingoesteban/robolearn | 0d20125425c352b80ef2eeed1c0b11ab6497b11a | [
"BSD-3-Clause"
] | null | null | null | robolearn/torch/policies/tanh_gaussian_promp_multi_policy.py | domingoesteban/robolearn | 0d20125425c352b80ef2eeed1c0b11ab6497b11a | [
"BSD-3-Clause"
] | 1 | 2021-12-22T00:41:20.000Z | 2021-12-22T00:41:20.000Z | import math
import torch
from torch import nn as nn
from torch.distributions import Normal
from robolearn.torch.core import PyTorchModule
from robolearn.torch.utils.pytorch_util import np_ify
from torch.nn.modules.normalization import LayerNorm
import robolearn.torch.utils.pytorch_util as ptu
from robolearn.models.policies import ExplorationPolicy
from collections import OrderedDict
from itertools import chain
# LOG_SIG_MAX = 2
# LOG_SIG_MIN = -3.0
LOG_SIG_MAX = 2
LOG_SIG_MIN = -20
# SIG_MAX = 7.38905609893065
# SIG_MIN = 0.049787068367863944
# LOG_MIX_COEFF_MIN = -10
# LOG_MIX_COEFF_MAX = -1e-6 #-4.5e-5
# LOG_MIX_COEFF_MIN = -1
# LOG_MIX_COEFF_MAX = 1 #-4.5e-5
# EPS = 1e-12
EPS = 1e-8
class TanhGaussianPrompMultiPolicy(PyTorchModule, ExplorationPolicy):
"""
Usage:
```
policy = TanhGaussianPrompMultiPolicy(...)
action, policy_dict = policy(obs)
```
Here, mean and log_std are the mean and log_std of the Gaussian that is
sampled from.
If deterministic is True, action = tanh(mean).
If return_log_prob is False (default), log_prob = None
This is done because computing the log_prob can be a bit expensive.
"""
def __init__(
self,
obs_dim,
action_dim,
n_policies,
shared_hidden_sizes=None,
unshared_hidden_sizes=None,
unshared_mix_hidden_sizes=None,
stds=None,
hidden_activation='relu',
hidden_w_init='xavier_normal',
hidden_b_init_val=0,
output_w_init='xavier_normal',
output_b_init_val=0,
pol_output_activation='linear',
mix_output_activation='linear',
input_norm=False,
shared_layer_norm=False,
policies_layer_norm=False,
mixture_layer_norm=False,
softmax_weights=False,
**kwargs
):
self.save_init_params(locals())
PyTorchModule.__init__(self)
ExplorationPolicy.__init__(self, action_dim)
self._input_size = obs_dim
self._output_sizes = action_dim
self._n_subpolicies = n_policies
# Activation Fcns
self._hidden_activation = ptu.get_activation(hidden_activation)
self._pol_output_activation = ptu.get_activation(pol_output_activation)
self._mix_output_activation = ptu.get_activation(mix_output_activation)
# Normalization Layer Flags
self._shared_layer_norm = shared_layer_norm
self._policies_layer_norm = policies_layer_norm
self._mixture_layer_norm = mixture_layer_norm
# Layers Lists
self._sfcs = [] # Shared Layers
self._sfc_norms = [] # Norm. Shared Layers
self._pfcs = [list() for _ in range(self._n_subpolicies)] # Policies Layers
self._pfc_norms = [list() for _ in range(self._n_subpolicies)] # N. Pol. L.
self._pfc_lasts = [] # Last Policies Layers
self._mfcs = [] # Mixing Layers
self._norm_mfcs = [] # Norm. Mixing Layers
# self.mfc_last = None # Below is instantiated
self._softmax_weights = softmax_weights
# Initial size = Obs size
in_size = self._input_size
# Ordered Dictionaries for specific modules/parameters
self._shared_modules = OrderedDict()
self._shared_parameters = OrderedDict()
self._policies_modules = [OrderedDict() for _ in range(n_policies)]
self._policies_parameters = [OrderedDict() for _ in range(n_policies)]
self._mixing_modules = OrderedDict()
self._mixing_parameters = OrderedDict()
# ############# #
# Shared Layers #
# ############# #
if input_norm:
ln = nn.BatchNorm1d(in_size)
self.sfc_input = ln
self.add_shared_module("sfc_input", ln)
else:
self.sfc_input = None
if shared_hidden_sizes is not None:
for ii, next_size in enumerate(shared_hidden_sizes):
sfc = nn.Linear(in_size, next_size)
ptu.layer_init(
layer=sfc,
option=hidden_w_init,
activation=hidden_activation,
b=hidden_b_init_val,
)
self.__setattr__("sfc{}".format(ii), sfc)
self._sfcs.append(sfc)
self.add_shared_module("sfc{}".format(ii), sfc)
if self._shared_layer_norm:
ln = LayerNorm(next_size)
# ln = nn.BatchNorm1d(next_size)
self.__setattr__("sfc{}_norm".format(ii), ln)
self._sfc_norms.append(ln)
self.add_shared_module("sfc{}_norm".format(ii), ln)
in_size = next_size
# Get the output_size of the shared layers (assume same for all)
multipol_in_size = in_size
mixture_in_size = in_size
# ############### #
# Unshared Layers #
# ############### #
# Unshared Multi-Policy Hidden Layers
if unshared_hidden_sizes is not None:
for ii, next_size in enumerate(unshared_hidden_sizes):
for pol_idx in range(self._n_subpolicies):
pfc = nn.Linear(multipol_in_size, next_size)
ptu.layer_init(
layer=pfc,
option=hidden_w_init,
activation=hidden_activation,
b=hidden_b_init_val,
)
self.__setattr__("pfc{}_{}".format(pol_idx, ii), pfc)
self._pfcs[pol_idx].append(pfc)
self.add_policies_module("pfc{}_{}".format(pol_idx, ii),
pfc, idx=pol_idx)
if self._policies_layer_norm:
ln = LayerNorm(next_size)
# ln = nn.BatchNorm1d(next_size)
self.__setattr__("pfc{}_{}_norm".format(pol_idx, ii),
ln)
self._pfc_norms[pol_idx].append(ln)
self.add_policies_module("pfc{}_{}_norm".format(pol_idx,
ii),
ln, idx=pol_idx)
multipol_in_size = next_size
# Multi-Policy Last Layers
for pol_idx in range(self._n_subpolicies):
last_pfc = nn.Linear(multipol_in_size, action_dim)
ptu.layer_init(
layer=last_pfc,
option=output_w_init,
activation=pol_output_activation,
b=output_b_init_val,
)
self.__setattr__("pfc{}_last".format(pol_idx), last_pfc)
self._pfc_lasts.append(last_pfc)
self.add_policies_module("pfc{}_last".format(pol_idx), last_pfc,
idx=pol_idx)
# Multi-Policy Log-Stds Last Layers
self.stds = stds
self.log_std = list()
if stds is None:
self._pfc_log_std_lasts = list()
for pol_idx in range(self._n_subpolicies):
last_pfc_log_std = nn.Linear(multipol_in_size, action_dim)
ptu.layer_init(
layer=last_pfc_log_std,
option=output_w_init,
activation=pol_output_activation,
b=output_b_init_val,
)
self.__setattr__("pfc{}_log_std_last".format(pol_idx),
last_pfc_log_std)
self._pfc_log_std_lasts.append(last_pfc_log_std)
self.add_policies_module("pfc{}_log_std_last".format(pol_idx),
last_pfc_log_std, idx=pol_idx)
else:
for std in stds:
self.log_std.append(torch.log(stds))
assert LOG_SIG_MIN <= self.log_std[-1] <= LOG_SIG_MAX
# ############# #
# Mixing Layers #
# ############# #
# Unshared Mixing-Weights Hidden Layers
if unshared_mix_hidden_sizes is not None:
for ii, next_size in enumerate(unshared_mix_hidden_sizes):
mfc = nn.Linear(mixture_in_size, next_size)
ptu.layer_init(
layer=mfc,
option=hidden_w_init,
activation=hidden_activation,
b=hidden_b_init_val,
)
self.__setattr__("mfc{}".format(ii), mfc)
self._mfcs.append(mfc)
# Add it to specific dictionaries
self.add_mixing_module("mfc{}".format(ii), mfc)
if self._mixture_layer_norm:
ln = LayerNorm(next_size)
# ln = nn.BatchNorm1d(next_size)
self.__setattr__("mfc{}_norm".format(ii), ln)
self._norm_mfcs.append(ln)
self.add_mixing_module("mfc{}_norm".format(ii), ln)
mixture_in_size = next_size
# Unshared Mixing-Weights Last Layers
mfc_last = nn.Linear(mixture_in_size, self._n_subpolicies * action_dim)
ptu.layer_init(
layer=mfc_last,
option=output_w_init,
activation=mix_output_activation,
b=output_b_init_val,
)
self.__setattr__("mfc_last", mfc_last)
self.mfc_last = mfc_last
# Add it to specific dictionaries
self.add_mixing_module("mfc_last", mfc_last)
self.mfc_sigmoid = nn.Sigmoid()
self._normal_dist = Normal(loc=ptu.zeros(action_dim),
scale=ptu.ones(action_dim))
self._pols_idxs = ptu.arange(self._n_subpolicies)
def get_action(self, obs_np, **kwargs):
"""
"""
actions, info_dict = self.get_actions(obs_np[None], **kwargs)
for key, val in info_dict.items():
info_dict[key] = val[0, :]
# Get [0, :] vals (Because it has dimension 1xdA)
return actions[0, :], info_dict
def get_actions(self, obs_np, **kwargs):
"""
"""
actions, torch_info_dict = self.eval_np(obs_np, **kwargs)
info_dict = dict()
for key, vals in torch_info_dict.items():
if key in ['mixing_coeff']:
info_dict[key] = np_ify(torch_info_dict[key])
return actions, info_dict
def forward(
self,
obs,
deterministic=False,
return_log_prob=False,
pol_idx=None,
optimize_policies=True,
):
"""
Args:
obs (Tensor): Observation(s)
deterministic (bool): True for using mean. False, sample from dist.
return_log_prob (bool):
pol_idx (int):
optimize_policies (bool):
Returns:
action (Tensor):
pol_info (dict):
"""
h = obs
nbatch = obs.shape[0]
# ############# #
# Shared Layers #
# ############# #
if self.sfc_input is not None:
# h = self.sfc_input(h)
if nbatch > 1:
h = self.sfc_input(h)
else:
h = torch.batch_norm(
h,
self.sfc_input.weight,
self.sfc_input.bias,
self.sfc_input.running_mean,
self.sfc_input.running_var,
True, # TODO: True or False??
self.sfc_input.momentum,
self.sfc_input.eps,
torch.backends.cudnn.enabled
)
for ss, fc in enumerate(self._sfcs):
h = fc(h)
if self._shared_layer_norm:
h = self._sfc_norms[ss](h)
h = self._hidden_activation(h)
# ############## #
# Multi Policies #
# ############## #
hs = [h.clone() for _ in range(self._n_subpolicies)]
# Hidden Layers
if len(self._pfcs) > 0:
for pp in range(self._n_subpolicies):
for ii, fc in enumerate(self._pfcs[pp]):
hs[pp] = fc(hs[pp])
if self._policies_layer_norm:
hs[pp] = self._pfc_norms[pp][ii](hs[pp])
hs[pp] = self._hidden_activation(hs[pp])
# Last Mean Layers
means = torch.cat(
[(
self._pol_output_activation(self._pfc_lasts[pp](hs[pp]))
).unsqueeze(dim=1)
for pp in range(self._n_subpolicies)
],
dim=1
) # Batch x Npols x dA
# Last Log-Std Layers
if self.stds is None:
log_stds = torch.cat(
[(
self._pol_output_activation(
self._pfc_log_std_lasts[pp](hs[pp])
)
).unsqueeze(dim=1)
for pp in range(self._n_subpolicies)
],
dim=1
) # Batch x Npols x dA
# # log_std option 1:
# log_stds = torch.clamp(log_stds, min=LOG_SIG_MIN, max=LOG_SIG_MAX)
# log_std option 2:
log_stds = torch.tanh(log_stds)
log_stds = \
LOG_SIG_MIN + 0.5 * (LOG_SIG_MAX - LOG_SIG_MIN)*(log_stds + 1)
stds = torch.exp(log_stds)
variances = stds**2
else:
log_stds = self.log_std
stds = self.stds
variances = stds**2
# ############## #
# Mixing Weigths #
# ############## #
mh = h.clone()
if len(self._mfcs) > 0:
for mm, mfc in enumerate(self._mfcs):
mh = mfc(mh)
if self._mixture_layer_norm:
mh = self._norm_mfcs[mm](mh)
mh = self._hidden_activation(mh)
# NO nonlinear transformation
mixture_coeff = \
self.mfc_last(mh).reshape(-1, self._n_subpolicies, self.action_dim)
mixture_coeff = self.mfc_sigmoid(mixture_coeff)
# if torch.isnan(mixture_coeff).any():
# raise ValueError('Some mixture coeff(s) is(are) NAN: %s' %
# mixture_coeff)
#
# if torch.isnan(means).any():
# raise ValueError('Some means are NAN: %s' %
# means)
#
# if torch.isnan(stds).any():
# raise ValueError('Some stds are NAN: %s' %
# stds)
if pol_idx is None:
# Calculate weighted means and stds (and log_stds)
if optimize_policies:
sig_invs = mixture_coeff/variances
else:
sig_invs = mixture_coeff/variances.detach()
variance = 1./torch.sum(sig_invs, dim=1, keepdim=False)
if optimize_policies:
mean = variance*torch.sum(
means*sig_invs,
dim=1,
keepdim=False
)
else:
mean = variance*torch.sum(
means.detach()*sig_invs,
dim=1,
keepdim=False
)
# log_std option 1:
std = torch.sqrt(variance)
std = torch.clamp(std,
min=math.exp(LOG_SIG_MIN),
max=math.exp(LOG_SIG_MAX))
log_std = torch.log(std)
# # log_std option 2:
# variance = torch.tanh(variance)
# variance = (
# math.exp(LOG_SIG_MIN)**2 +
# 0.5*(math.exp(LOG_SIG_MAX)**2 - math.exp(LOG_SIG_MIN)**2) *
# (variance + 1)
# )
# std = torch.sqrt(variance)
# log_std = torch.log(std)
# TODO: Remove the following?
# log_std = torch.logsumexp(
# log_stds + log_mixture_coeff.reshape(-1,
# self.action_dim,
# self._n_subpolicies),
# dim=-1,
# keepdim=False
# ) - torch.logsumexp(log_mixture_coeff, dim=-1, keepdim=True)
# log_std = torch.log(std)
else:
index = self._pols_idxs[pol_idx]
mean = \
torch.index_select(means, dim=1, index=index).squeeze(1)
std = \
torch.index_select(stds, dim=1, index=index).squeeze(1)
log_std = \
torch.index_select(log_stds, dim=1, index=index).squeeze(1)
variance = \
torch.index_select(variances, dim=1, index=index).squeeze(1)
pre_tanh_value = None
log_prob = None
pre_tanh_values = None
log_probs = None
if deterministic:
action = torch.tanh(mean)
actions = torch.tanh(means)
else:
# # Using this distribution instead of TanhMultivariateNormal
# # because it has Diagonal Covariance.
# # Then, a collection of n independent Gaussian r.v.
# tanh_normal = TanhNormal(mean, std)
#
# # # It is the Lower-triangular factor of covariance because it is
# # # Diagonal Covariance
# # scale_trils = torch.stack([torch.diag(m) for m in std])
# # tanh_normal = TanhMultivariateNormal(mean, scale_tril=scale_trils)
#
# if return_log_prob:
# log_prob = tanh_normal.log_prob(
# action,
# pre_tanh_value=pre_tanh_value
# )
# log_prob = log_prob.sum(dim=-1, keepdim=True)
noise = self._normal_dist.sample((nbatch,))
pre_tanh_value = std*noise + mean
pre_tanh_values = stds*noise.unsqueeze(1) + means
action = torch.tanh(pre_tanh_value)
actions = torch.tanh(pre_tanh_values)
if return_log_prob:
# Log probability: Main Policy
log_prob = -((pre_tanh_value - mean) ** 2) / (2*variance) \
- log_std - math.log(math.sqrt(2*math.pi))
log_prob -= torch.log(
# torch.clamp(1. - action**2, 0, 1)
clip_but_pass_gradient(1. - action**2, 0, 1)
+ 1.e-6
)
log_prob = log_prob.sum(dim=-1, keepdim=True)
# Log probability: Sub-Policies
log_probs = -((pre_tanh_values - means) ** 2) / (2*variances)\
- log_stds - math.log(math.sqrt(2*math.pi))
log_probs -= torch.log(
# torch.clamp(1. - actions**2, 0, 1)
clip_but_pass_gradient(1. - actions**2, 0, 1)
+ 1.e-6
)
log_probs = log_probs.sum(dim=-1, keepdim=True)
# if torch.isnan(action).any():
# raise ValueError('ACTION NAN')
#
# if torch.isnan(actions).any():
# raise ValueError('ACTION NAN')
info_dict = dict(
mean=mean,
std=std,
log_std=log_std,
log_prob=log_prob,
pre_tanh_value=pre_tanh_value,
# log_mixture_coeff=log_mixture_coeff,
mixing_coeff=mixture_coeff,
pol_actions=actions,
pol_means=means,
pol_stds=stds,
pol_log_stds=log_stds,
pol_log_probs=log_probs,
pol_pre_tanh_values=pre_tanh_values,
)
return action, info_dict
def log_action(self, actions, obs, pol_idx=None):
raise NotImplementedError
@property
def n_heads(self):
return self._n_subpolicies
@property
def n_subpolicies(self):
return self._n_subpolicies
# ################# #
# Shared parameters #
# ################# #
def shared_parameters(self):
"""Returns an iterator over the shared parameters.
"""
for name, param in self.named_shared_parameters():
yield param
def named_shared_parameters(self, **kwargs):
"""Returns an iterator over shared module parameters, yielding both the
name of the parameter as well as the parameter itself
"""
return ptu.named_parameters(self._shared_modules,
self._shared_parameters,
**kwargs)
def add_shared_module(self, name, module):
ptu.add_module(self._shared_modules, name, module)
# ####################### #
# Sub-Policies parameters #
# ####################### #
def policies_parameters(self, idx=None):
"""Returns an iterator over the policies parameters.
"""
if idx is None:
idx_list = list(range(self._n_subpolicies))
elif isinstance(idx, list) or isinstance(idx, tuple):
idx_list = idx
else:
idx_list = [idx]
for name, param in self.named_policies_parameters(idx_list):
yield param
def named_policies_parameters(self, idx=None, **kwargs):
"""Returns an iterator over policies module parameters, yielding both the
name of the parameter as well as the parameter itself
"""
if idx is None:
idx_list = list(range(self._n_subpolicies))
elif isinstance(idx, list) or isinstance(idx, tuple):
idx_list = idx
else:
idx_list = [idx]
return chain(*[ptu.named_parameters(self._policies_modules[idx],
self._policies_parameters[idx],
**kwargs)
for idx in idx_list])
def add_policies_module(self, name, module, idx=None):
if idx is None:
idx_list = list(range(self._n_subpolicies))
elif isinstance(idx, list) or isinstance(idx, tuple):
idx_list = idx
else:
idx_list = [idx]
for idx in idx_list:
ptu.add_module(self._policies_modules[idx], name, module)
# ################# #
# Mixing parameters #
# ################# #
def mixing_parameters(self):
"""Returns an iterator over the mixing parameters.
"""
for name, param in self.named_mixing_parameters():
yield param
def named_mixing_parameters(self, **kwargs):
"""Returns an iterator over mixing module parameters, yielding both the
name of the parameter as well as the parameter itself
"""
return ptu.named_parameters(self._mixing_modules,
self._mixing_parameters,
**kwargs)
def add_mixing_module(self, name, module):
ptu.add_module(self._mixing_modules, name, module)
def clip_but_pass_gradient(x, l=-1., u=1.):
clip_up = (x > u).to(ptu.device, dtype=torch.float32)
clip_low = (x < l).to(ptu.device, dtype=torch.float32)
return x + ((u - x)*clip_up + (l - x)*clip_low).detach()
| 35.555387 | 84 | 0.52358 | import math
import torch
from torch import nn as nn
from torch.distributions import Normal
from robolearn.torch.core import PyTorchModule
from robolearn.torch.utils.pytorch_util import np_ify
from torch.nn.modules.normalization import LayerNorm
import robolearn.torch.utils.pytorch_util as ptu
from robolearn.models.policies import ExplorationPolicy
from collections import OrderedDict
from itertools import chain
LOG_SIG_MAX = 2
LOG_SIG_MIN = -20
class TanhGaussianPrompMultiPolicy(PyTorchModule, ExplorationPolicy):
def __init__(
self,
obs_dim,
action_dim,
n_policies,
shared_hidden_sizes=None,
unshared_hidden_sizes=None,
unshared_mix_hidden_sizes=None,
stds=None,
hidden_activation='relu',
hidden_w_init='xavier_normal',
hidden_b_init_val=0,
output_w_init='xavier_normal',
output_b_init_val=0,
pol_output_activation='linear',
mix_output_activation='linear',
input_norm=False,
shared_layer_norm=False,
policies_layer_norm=False,
mixture_layer_norm=False,
softmax_weights=False,
**kwargs
):
self.save_init_params(locals())
PyTorchModule.__init__(self)
ExplorationPolicy.__init__(self, action_dim)
self._input_size = obs_dim
self._output_sizes = action_dim
self._n_subpolicies = n_policies
self._hidden_activation = ptu.get_activation(hidden_activation)
self._pol_output_activation = ptu.get_activation(pol_output_activation)
self._mix_output_activation = ptu.get_activation(mix_output_activation)
self._shared_layer_norm = shared_layer_norm
self._policies_layer_norm = policies_layer_norm
self._mixture_layer_norm = mixture_layer_norm
self._sfcs = []
self._sfc_norms = []
self._pfcs = [list() for _ in range(self._n_subpolicies)]
self._pfc_norms = [list() for _ in range(self._n_subpolicies)]
self._pfc_lasts = []
self._mfcs = []
self._norm_mfcs = []
_weights = softmax_weights
in_size = self._input_size
self._shared_modules = OrderedDict()
self._shared_parameters = OrderedDict()
self._policies_modules = [OrderedDict() for _ in range(n_policies)]
self._policies_parameters = [OrderedDict() for _ in range(n_policies)]
self._mixing_modules = OrderedDict()
self._mixing_parameters = OrderedDict()
den_sizes is not None:
for ii, next_size in enumerate(shared_hidden_sizes):
sfc = nn.Linear(in_size, next_size)
ptu.layer_init(
layer=sfc,
option=hidden_w_init,
activation=hidden_activation,
b=hidden_b_init_val,
)
self.__setattr__("sfc{}".format(ii), sfc)
self._sfcs.append(sfc)
self.add_shared_module("sfc{}".format(ii), sfc)
if self._shared_layer_norm:
ln = LayerNorm(next_size)
self.__setattr__("sfc{}_norm".format(ii), ln)
self._sfc_norms.append(ln)
self.add_shared_module("sfc{}_norm".format(ii), ln)
in_size = next_size
multipol_in_size = in_size
mixture_in_size = in_size
layer=pfc,
option=hidden_w_init,
activation=hidden_activation,
b=hidden_b_init_val,
)
self.__setattr__("pfc{}_{}".format(pol_idx, ii), pfc)
self._pfcs[pol_idx].append(pfc)
self.add_policies_module("pfc{}_{}".format(pol_idx, ii),
pfc, idx=pol_idx)
if self._policies_layer_norm:
ln = LayerNorm(next_size)
self.__setattr__("pfc{}_{}_norm".format(pol_idx, ii),
ln)
self._pfc_norms[pol_idx].append(ln)
self.add_policies_module("pfc{}_{}_norm".format(pol_idx,
ii),
ln, idx=pol_idx)
multipol_in_size = next_size
for pol_idx in range(self._n_subpolicies):
last_pfc = nn.Linear(multipol_in_size, action_dim)
ptu.layer_init(
layer=last_pfc,
option=output_w_init,
activation=pol_output_activation,
b=output_b_init_val,
)
self.__setattr__("pfc{}_last".format(pol_idx), last_pfc)
self._pfc_lasts.append(last_pfc)
self.add_policies_module("pfc{}_last".format(pol_idx), last_pfc,
idx=pol_idx)
self.stds = stds
self.log_std = list()
if stds is None:
self._pfc_log_std_lasts = list()
for pol_idx in range(self._n_subpolicies):
last_pfc_log_std = nn.Linear(multipol_in_size, action_dim)
ptu.layer_init(
layer=last_pfc_log_std,
option=output_w_init,
activation=pol_output_activation,
b=output_b_init_val,
)
self.__setattr__("pfc{}_log_std_last".format(pol_idx),
last_pfc_log_std)
self._pfc_log_std_lasts.append(last_pfc_log_std)
self.add_policies_module("pfc{}_log_std_last".format(pol_idx),
last_pfc_log_std, idx=pol_idx)
else:
for std in stds:
self.log_std.append(torch.log(stds))
assert LOG_SIG_MIN <= self.log_std[-1] <= LOG_SIG_MAX
it(
layer=mfc,
option=hidden_w_init,
activation=hidden_activation,
b=hidden_b_init_val,
)
self.__setattr__("mfc{}".format(ii), mfc)
self._mfcs.append(mfc)
self.add_mixing_module("mfc{}".format(ii), mfc)
if self._mixture_layer_norm:
ln = LayerNorm(next_size)
self.__setattr__("mfc{}_norm".format(ii), ln)
self._norm_mfcs.append(ln)
self.add_mixing_module("mfc{}_norm".format(ii), ln)
mixture_in_size = next_size
mfc_last = nn.Linear(mixture_in_size, self._n_subpolicies * action_dim)
ptu.layer_init(
layer=mfc_last,
option=output_w_init,
activation=mix_output_activation,
b=output_b_init_val,
)
self.__setattr__("mfc_last", mfc_last)
self.mfc_last = mfc_last
self.add_mixing_module("mfc_last", mfc_last)
self.mfc_sigmoid = nn.Sigmoid()
self._normal_dist = Normal(loc=ptu.zeros(action_dim),
scale=ptu.ones(action_dim))
self._pols_idxs = ptu.arange(self._n_subpolicies)
def get_action(self, obs_np, **kwargs):
actions, info_dict = self.get_actions(obs_np[None], **kwargs)
for key, val in info_dict.items():
info_dict[key] = val[0, :]
return actions[0, :], info_dict
def get_actions(self, obs_np, **kwargs):
actions, torch_info_dict = self.eval_np(obs_np, **kwargs)
info_dict = dict()
for key, vals in torch_info_dict.items():
if key in ['mixing_coeff']:
info_dict[key] = np_ify(torch_info_dict[key])
return actions, info_dict
def forward(
self,
obs,
deterministic=False,
return_log_prob=False,
pol_idx=None,
optimize_policies=True,
):
h = obs
nbatch = obs.shape[0]
lf.sfc_input.weight,
self.sfc_input.bias,
self.sfc_input.running_mean,
self.sfc_input.running_var,
True,
self.sfc_input.momentum,
self.sfc_input.eps,
torch.backends.cudnn.enabled
)
for ss, fc in enumerate(self._sfcs):
h = fc(h)
if self._shared_layer_norm:
h = self._sfc_norms[ss](h)
h = self._hidden_activation(h)
if self._policies_layer_norm:
hs[pp] = self._pfc_norms[pp][ii](hs[pp])
hs[pp] = self._hidden_activation(hs[pp])
means = torch.cat(
[(
self._pol_output_activation(self._pfc_lasts[pp](hs[pp]))
).unsqueeze(dim=1)
for pp in range(self._n_subpolicies)
],
dim=1
)
if self.stds is None:
log_stds = torch.cat(
[(
self._pol_output_activation(
self._pfc_log_std_lasts[pp](hs[pp])
)
).unsqueeze(dim=1)
for pp in range(self._n_subpolicies)
],
dim=1
)
log_stds = torch.tanh(log_stds)
log_stds = \
LOG_SIG_MIN + 0.5 * (LOG_SIG_MAX - LOG_SIG_MIN)*(log_stds + 1)
stds = torch.exp(log_stds)
variances = stds**2
else:
log_stds = self.log_std
stds = self.stds
variances = stds**2
= self._hidden_activation(mh)
mixture_coeff = \
self.mfc_last(mh).reshape(-1, self._n_subpolicies, self.action_dim)
mixture_coeff = self.mfc_sigmoid(mixture_coeff)
if pol_idx is None:
if optimize_policies:
sig_invs = mixture_coeff/variances
else:
sig_invs = mixture_coeff/variances.detach()
variance = 1./torch.sum(sig_invs, dim=1, keepdim=False)
if optimize_policies:
mean = variance*torch.sum(
means*sig_invs,
dim=1,
keepdim=False
)
else:
mean = variance*torch.sum(
means.detach()*sig_invs,
dim=1,
keepdim=False
)
std = torch.sqrt(variance)
std = torch.clamp(std,
min=math.exp(LOG_SIG_MIN),
max=math.exp(LOG_SIG_MAX))
log_std = torch.log(std)
else:
index = self._pols_idxs[pol_idx]
mean = \
torch.index_select(means, dim=1, index=index).squeeze(1)
std = \
torch.index_select(stds, dim=1, index=index).squeeze(1)
log_std = \
torch.index_select(log_stds, dim=1, index=index).squeeze(1)
variance = \
torch.index_select(variances, dim=1, index=index).squeeze(1)
pre_tanh_value = None
log_prob = None
pre_tanh_values = None
log_probs = None
if deterministic:
action = torch.tanh(mean)
actions = torch.tanh(means)
else:
pre_tanh_values)
if return_log_prob:
log_prob = -((pre_tanh_value - mean) ** 2) / (2*variance) \
- log_std - math.log(math.sqrt(2*math.pi))
log_prob -= torch.log(
clip_but_pass_gradient(1. - action**2, 0, 1)
+ 1.e-6
)
log_prob = log_prob.sum(dim=-1, keepdim=True)
log_probs = -((pre_tanh_values - means) ** 2) / (2*variances)\
- log_stds - math.log(math.sqrt(2*math.pi))
log_probs -= torch.log(
clip_but_pass_gradient(1. - actions**2, 0, 1)
+ 1.e-6
)
log_probs = log_probs.sum(dim=-1, keepdim=True)
info_dict = dict(
mean=mean,
std=std,
log_std=log_std,
log_prob=log_prob,
pre_tanh_value=pre_tanh_value,
mixing_coeff=mixture_coeff,
pol_actions=actions,
pol_means=means,
pol_stds=stds,
pol_log_stds=log_stds,
pol_log_probs=log_probs,
pol_pre_tanh_values=pre_tanh_values,
)
return action, info_dict
def log_action(self, actions, obs, pol_idx=None):
raise NotImplementedError
@property
def n_heads(self):
return self._n_subpolicies
@property
def n_subpolicies(self):
return self._n_subpolicies
ame, module):
ptu.add_module(self._shared_modules, name, module)
eturn chain(*[ptu.named_parameters(self._policies_modules[idx],
self._policies_parameters[idx],
**kwargs)
for idx in idx_list])
def add_policies_module(self, name, module, idx=None):
if idx is None:
idx_list = list(range(self._n_subpolicies))
elif isinstance(idx, list) or isinstance(idx, tuple):
idx_list = idx
else:
idx_list = [idx]
for idx in idx_list:
ptu.add_module(self._policies_modules[idx], name, module)
ame, module):
ptu.add_module(self._mixing_modules, name, module)
def clip_but_pass_gradient(x, l=-1., u=1.):
clip_up = (x > u).to(ptu.device, dtype=torch.float32)
clip_low = (x < l).to(ptu.device, dtype=torch.float32)
return x + ((u - x)*clip_up + (l - x)*clip_low).detach()
| true | true |
f7356189410830fee91f4d3a459c6fac261b2145 | 115,902 | py | Python | src/azure-cli/azure/cli/command_modules/storage/_params.py | chunyu3/azure-cli | 481df7ec3f42067bdf078692cb32e9a27baa6821 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/storage/_params.py | chunyu3/azure-cli | 481df7ec3f42067bdf078692cb32e9a27baa6821 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/storage/_params.py | chunyu3/azure-cli | 481df7ec3f42067bdf078692cb32e9a27baa6821 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.validators import get_default_location_from_resource_group
from azure.cli.core.commands.parameters import (tags_type, file_type, get_location_type, get_enum_type,
get_three_state_flag)
from azure.cli.core.local_context import LocalContextAttribute, LocalContextAction, ALL
from ._validators import (get_datetime_type, validate_metadata, get_permission_validator, get_permission_help_string,
resource_type_type, services_type, validate_entity, validate_select, validate_blob_type,
validate_included_datasets_validator, validate_custom_domain,
validate_container_public_access,
validate_table_payload_format, add_progress_callback, process_resource_group,
storage_account_key_options, process_file_download_namespace, process_metric_update_namespace,
get_char_options_validator, validate_bypass, validate_encryption_source, validate_marker,
validate_storage_data_plane_list, validate_azcopy_upload_destination_url,
validate_azcopy_remove_arguments, as_user_validator, parse_storage_account,
validate_delete_retention_days, validate_container_delete_retention_days,
validate_file_delete_retention_days, validator_change_feed_retention_days,
validate_fs_public_access, validate_logging_version, validate_or_policy, validate_policy,
get_api_version_type, blob_download_file_path_validator, blob_tier_validator)
def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statements, too-many-lines
from argcomplete.completers import FilesCompleter
from six import u as unicode_string
from knack.arguments import ignore_type, CLIArgumentType
from azure.cli.core.commands.parameters import get_resource_name_completion_list
from .sdkutil import get_table_data_type
from .completers import get_storage_name_completion_list
t_base_blob_service = self.get_sdk('blob.baseblobservice#BaseBlobService')
t_file_service = self.get_sdk('file#FileService')
t_queue_service = self.get_sdk('queue#QueueService')
t_table_service = get_table_data_type(self.cli_ctx, 'table', 'TableService')
storage_account_type = CLIArgumentType(options_list='--storage-account',
help='The name or ID of the storage account.',
validator=parse_storage_account, id_part='name')
acct_name_type = CLIArgumentType(options_list=['--account-name', '-n'], help='The storage account name.',
id_part='name',
completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'),
local_context_attribute=LocalContextAttribute(
name='storage_account_name', actions=[LocalContextAction.GET]))
blob_name_type = CLIArgumentType(options_list=['--blob-name', '-b'], help='The blob name.',
completer=get_storage_name_completion_list(t_base_blob_service, 'list_blobs',
parent='container_name'))
container_name_type = CLIArgumentType(options_list=['--container-name', '-c'], help='The container name.',
completer=get_storage_name_completion_list(t_base_blob_service,
'list_containers'))
directory_type = CLIArgumentType(options_list=['--directory-name', '-d'], help='The directory name.',
completer=get_storage_name_completion_list(t_file_service,
'list_directories_and_files',
parent='share_name'))
file_name_type = CLIArgumentType(options_list=['--file-name', '-f'],
completer=get_storage_name_completion_list(t_file_service,
'list_directories_and_files',
parent='share_name'))
share_name_type = CLIArgumentType(options_list=['--share-name', '-s'], help='The file share name.',
completer=get_storage_name_completion_list(t_file_service, 'list_shares'))
table_name_type = CLIArgumentType(options_list=['--table-name', '-t'],
completer=get_storage_name_completion_list(t_table_service, 'list_tables'))
queue_name_type = CLIArgumentType(options_list=['--queue-name', '-q'], help='The queue name.',
completer=get_storage_name_completion_list(t_queue_service, 'list_queues'))
progress_type = CLIArgumentType(help='Include this flag to disable progress reporting for the command.',
action='store_true', validator=add_progress_callback)
socket_timeout_type = CLIArgumentType(help='The socket timeout(secs), used by the service to regulate data flow.',
type=int)
large_file_share_type = CLIArgumentType(
action='store_true', min_api='2019-04-01',
help='Enable the capability to support large file shares with more than 5 TiB capacity for storage account.'
'Once the property is enabled, the feature cannot be disabled. Currently only supported for LRS and '
'ZRS replication types, hence account conversions to geo-redundant accounts would not be possible. '
'For more information, please refer to https://go.microsoft.com/fwlink/?linkid=2086047.')
adds_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2019-04-01',
help='Enable Azure Files Active Directory Domain Service Authentication for '
'storage account. When --enable-files-adds is set to true, Azure Active '
'Directory Properties arguments must be provided.')
aadds_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2018-11-01',
help='Enable Azure Active Directory Domain Services authentication for Azure Files')
domain_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the primary domain that the AD DNS server is authoritative for. "
"Required when --enable-files-adds is set to True")
net_bios_domain_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the NetBIOS domain name. "
"Required when --enable-files-adds is set to True")
forest_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the Active Directory forest to get. "
"Required when --enable-files-adds is set to True")
domain_guid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the domain GUID. Required when --enable-files-adds is set to True")
domain_sid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the security identifier (SID). Required when --enable-files-adds "
"is set to True")
azure_storage_sid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the security identifier (SID) for Azure Storage. "
"Required when --enable-files-adds is set to True")
exclude_pattern_type = CLIArgumentType(arg_group='Additional Flags', help='Exclude these files where the name '
'matches the pattern list. For example: *.jpg;*.pdf;exactName. This '
'option supports wildcard characters (*)')
include_pattern_type = CLIArgumentType(arg_group='Additional Flags', help='Include only these files where the name '
'matches the pattern list. For example: *.jpg;*.pdf;exactName. This '
'option supports wildcard characters (*)')
exclude_path_type = CLIArgumentType(arg_group='Additional Flags', help='Exclude these paths. This option does not '
'support wildcard characters (*). Checks relative path prefix. For example: '
'myFolder;myFolder/subDirName/file.pdf.')
include_path_type = CLIArgumentType(arg_group='Additional Flags', help='Include only these paths. This option does '
'not support wildcard characters (*). Checks relative path prefix. For example:'
'myFolder;myFolder/subDirName/file.pdf')
recursive_type = CLIArgumentType(options_list=['--recursive', '-r'], action='store_true',
help='Look into sub-directories recursively.')
sas_help = 'The permissions the SAS grants. Allowed values: {}. Do not use if a stored access policy is ' \
'referenced with --id that specifies this value. Can be combined.'
t_routing_choice = self.get_models('RoutingChoice', resource_type=ResourceType.MGMT_STORAGE)
routing_choice_type = CLIArgumentType(
arg_group='Routing Preference', arg_type=get_enum_type(t_routing_choice),
help='Routing Choice defines the kind of network routing opted by the user.',
min_api='2019-06-01')
publish_microsoft_endpoints_type = CLIArgumentType(
arg_group='Routing Preference', arg_type=get_three_state_flag(), min_api='2019-06-01',
help='A boolean flag which indicates whether microsoft routing storage endpoints are to be published.')
publish_internet_endpoints_type = CLIArgumentType(
arg_group='Routing Preference', arg_type=get_three_state_flag(), min_api='2019-06-01',
help='A boolean flag which indicates whether internet routing storage endpoints are to be published.')
umask_type = CLIArgumentType(
help='When creating a file or directory and the parent folder does not have a default ACL, the umask restricts '
'the permissions of the file or directory to be created. The resulting permission is given by p & ^u, '
'where p is the permission and u is the umask. For more information, please refer to '
'https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control#umask.')
permissions_type = CLIArgumentType(
help='POSIX access permissions for the file owner, the file owning group, and others. Each class may be '
'granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) '
'and 4-digit octal notation (e.g. 0766) are supported. For more information, please refer to https://'
'docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control#levels-of-permission.')
timeout_type = CLIArgumentType(
help='Request timeout in seconds. Applies to each call to the service.', type=int
)
marker_type = CLIArgumentType(
help='A string value that identifies the portion of the list of containers to be '
'returned with the next listing operation. The operation returns the NextMarker value within '
'the response body if the listing operation did not return all containers remaining to be listed '
'with the current page. If specified, this generator will begin returning results from the point '
'where the previous generator stopped.')
num_results_type = CLIArgumentType(
default=5000, validator=validate_storage_data_plane_list,
help='Specify the maximum number to return. If the request does not specify '
'num_results, or specifies a value greater than 5000, the server will return up to 5000 items. Note that '
'if the listing operation crosses a partition boundary, then the service will return a continuation token '
'for retrieving the remaining of the results. Provide "*" to return all.'
)
if_modified_since_type = CLIArgumentType(
help='Commence only if modified since supplied UTC datetime (Y-m-d\'T\'H:M\'Z\')',
type=get_datetime_type(False))
if_unmodified_since_type = CLIArgumentType(
help='Commence only if unmodified since supplied UTC datetime (Y-m-d\'T\'H:M\'Z\')',
type=get_datetime_type(False))
allow_shared_key_access_type = CLIArgumentType(
arg_type=get_three_state_flag(), options_list=['--allow-shared-key-access', '-k'], min_api='2019-04-01',
help='Indicate whether the storage account permits requests to be authorized with the account access key via '
'Shared Key. If false, then all requests, including shared access signatures, must be authorized with '
'Azure Active Directory (Azure AD). The default value is null, which is equivalent to true.')
t_blob_tier = self.get_sdk('_generated.models._azure_blob_storage_enums#AccessTierOptional',
resource_type=ResourceType.DATA_STORAGE_BLOB)
with self.argument_context('storage') as c:
c.argument('container_name', container_name_type)
c.argument('directory_name', directory_type)
c.argument('share_name', share_name_type)
c.argument('table_name', table_name_type)
c.argument('retry_wait', options_list=('--retry-interval',))
c.ignore('progress_callback')
c.argument('metadata', nargs='+',
help='Metadata in space-separated key=value pairs. This overwrites any existing metadata.',
validator=validate_metadata)
c.argument('timeout', help='Request timeout in seconds. Applies to each call to the service.', type=int)
with self.argument_context('storage', arg_group='Precondition') as c:
c.argument('if_modified_since', if_modified_since_type)
c.argument('if_unmodified_since', if_unmodified_since_type)
c.argument('if_match')
c.argument('if_none_match')
for item in ['delete', 'show', 'update', 'show-connection-string', 'keys', 'network-rule', 'revoke-delegation-keys', 'failover']: # pylint: disable=line-too-long
with self.argument_context('storage account {}'.format(item)) as c:
c.argument('account_name', acct_name_type, options_list=['--name', '-n'])
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage account check-name') as c:
c.argument('name', options_list=['--name', '-n'],
help='The name of the storage account within the specified resource group')
with self.argument_context('storage account delete') as c:
c.argument('account_name', acct_name_type, options_list=['--name', '-n'], local_context_attribute=None)
with self.argument_context('storage account create', resource_type=ResourceType.MGMT_STORAGE) as c:
t_account_type, t_sku_name, t_kind, t_tls_version = \
self.get_models('AccountType', 'SkuName', 'Kind', 'MinimumTlsVersion',
resource_type=ResourceType.MGMT_STORAGE)
c.register_common_storage_account_options()
c.argument('location', get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
c.argument('account_type', help='The storage account type', arg_type=get_enum_type(t_account_type))
c.argument('account_name', acct_name_type, options_list=['--name', '-n'], completer=None,
local_context_attribute=LocalContextAttribute(
name='storage_account_name', actions=[LocalContextAction.SET], scopes=[ALL]))
c.argument('kind', help='Indicate the type of storage account.',
arg_type=get_enum_type(t_kind),
default='StorageV2' if self.cli_ctx.cloud.profile == 'latest' else 'Storage')
c.argument('https_only', arg_type=get_three_state_flag(), min_api='2019-04-01',
help='Allow https traffic only to storage service if set to true. The default value is true.')
c.argument('https_only', arg_type=get_three_state_flag(), max_api='2018-11-01',
help='Allow https traffic only to storage service if set to true. The default value is false.')
c.argument('tags', tags_type)
c.argument('custom_domain', help='User domain assigned to the storage account. Name is the CNAME source.')
c.argument('sku', help='The storage account SKU.', arg_type=get_enum_type(t_sku_name, default='standard_ragrs'))
c.argument('enable_files_aadds', aadds_type)
c.argument('enable_files_adds', adds_type)
c.argument('enable_large_file_share', arg_type=large_file_share_type)
c.argument('domain_name', domain_name_type)
c.argument('net_bios_domain_name', net_bios_domain_name_type)
c.argument('forest_name', forest_name_type)
c.argument('domain_guid', domain_guid_type)
c.argument('domain_sid', domain_sid_type)
c.argument('azure_storage_sid', azure_storage_sid_type)
c.argument('enable_hierarchical_namespace', arg_type=get_three_state_flag(),
options_list=['--enable-hierarchical-namespace', '--hns',
c.deprecate(target='--hierarchical-namespace', redirect='--hns', hide=True)],
help=" Allow the blob service to exhibit filesystem semantics. This property can be enabled only "
"when storage account kind is StorageV2.",
min_api='2018-02-01')
c.argument('encryption_key_type_for_table', arg_type=get_enum_type(['Account', 'Service']),
help='Set the encryption key type for Table service. "Account": Table will be encrypted '
'with account-scoped encryption key. "Service": Table will always be encrypted with '
'service-scoped keys. Currently the default encryption key type is "Service".',
min_api='2019-06-01', options_list=['--encryption-key-type-for-table', '-t'])
c.argument('encryption_key_type_for_queue', arg_type=get_enum_type(['Account', 'Service']),
help='Set the encryption key type for Queue service. "Account": Queue will be encrypted '
'with account-scoped encryption key. "Service": Queue will always be encrypted with '
'service-scoped keys. Currently the default encryption key type is "Service".',
min_api='2019-06-01', options_list=['--encryption-key-type-for-queue', '-q'])
c.argument('routing_choice', routing_choice_type)
c.argument('publish_microsoft_endpoints', publish_microsoft_endpoints_type)
c.argument('publish_internet_endpoints', publish_internet_endpoints_type)
c.argument('require_infrastructure_encryption', options_list=['--require-infrastructure-encryption', '-i'],
arg_type=get_three_state_flag(),
help='A boolean indicating whether or not the service applies a secondary layer of encryption with '
'platform managed keys for data at rest.')
c.argument('allow_blob_public_access', arg_type=get_three_state_flag(), min_api='2019-04-01',
help='Allow or disallow public access to all blobs or containers in the storage account. '
'The default value for this property is null, which is equivalent to true. When true, containers '
'in the account may be configured for public access. Note that setting this property to true does '
'not enable anonymous access to any data in the account. The additional step of configuring the '
'public access setting for a container is required to enable anonymous access.')
c.argument('min_tls_version', arg_type=get_enum_type(t_tls_version),
help='The minimum TLS version to be permitted on requests to storage. '
'The default interpretation is TLS 1.0 for this property')
c.argument('allow_shared_key_access', allow_shared_key_access_type)
with self.argument_context('storage account private-endpoint-connection',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('private_endpoint_connection_name', options_list=['--name', '-n'],
help='The name of the private endpoint connection associated with the Storage Account.')
for item in ['approve', 'reject', 'show', 'delete']:
with self.argument_context('storage account private-endpoint-connection {}'.format(item),
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('private_endpoint_connection_name', options_list=['--name', '-n'], required=False,
help='The name of the private endpoint connection associated with the Storage Account.')
c.extra('connection_id', options_list=['--id'],
help='The ID of the private endpoint connection associated with the Storage Account. You can get '
'it using `az storage account show`.')
c.argument('account_name', help='The storage account name.', required=False)
c.argument('resource_group_name', help='The resource group name of specified storage account.',
required=False)
c.argument('description', help='Comments for {} operation.'.format(item))
with self.argument_context('storage account update', resource_type=ResourceType.MGMT_STORAGE) as c:
t_tls_version = self.get_models('MinimumTlsVersion', resource_type=ResourceType.MGMT_STORAGE)
c.register_common_storage_account_options()
c.argument('sku', arg_type=get_enum_type(t_sku_name),
help='Note that the SKU name cannot be updated to Standard_ZRS, Premium_LRS or Premium_ZRS, '
'nor can accounts of those SKU names be updated to any other value')
c.argument('custom_domain',
help='User domain assigned to the storage account. Name is the CNAME source. Use "" to clear '
'existing value.',
validator=validate_custom_domain)
c.argument('use_subdomain', help='Specify whether to use indirect CNAME validation.',
arg_type=get_enum_type(['true', 'false']))
c.argument('tags', tags_type, default=None)
c.argument('enable_files_aadds', aadds_type)
c.argument('enable_files_adds', adds_type)
c.argument('enable_large_file_share', arg_type=large_file_share_type)
c.argument('domain_name', domain_name_type)
c.argument('net_bios_domain_name', net_bios_domain_name_type)
c.argument('forest_name', forest_name_type)
c.argument('domain_guid', domain_guid_type)
c.argument('domain_sid', domain_sid_type)
c.argument('azure_storage_sid', azure_storage_sid_type)
c.argument('routing_choice', routing_choice_type)
c.argument('publish_microsoft_endpoints', publish_microsoft_endpoints_type)
c.argument('publish_internet_endpoints', publish_internet_endpoints_type)
c.argument('allow_blob_public_access', arg_type=get_three_state_flag(), min_api='2019-04-01',
help='Allow or disallow public access to all blobs or containers in the storage account. '
'The default value for this property is null, which is equivalent to true. When true, containers '
'in the account may be configured for public access. Note that setting this property to true does '
'not enable anonymous access to any data in the account. The additional step of configuring the '
'public access setting for a container is required to enable anonymous access.')
c.argument('min_tls_version', arg_type=get_enum_type(t_tls_version),
help='The minimum TLS version to be permitted on requests to storage. '
'The default interpretation is TLS 1.0 for this property')
c.argument('allow_shared_key_access', allow_shared_key_access_type)
with self.argument_context('storage account update', arg_group='Customer managed key', min_api='2017-06-01') as c:
t_key_source = self.get_models('KeySource', resource_type=ResourceType.MGMT_STORAGE)
c.argument('encryption_key_name', help='The name of the KeyVault key.', )
c.argument('encryption_key_vault', help='The Uri of the KeyVault.')
c.argument('encryption_key_version',
help='The version of the KeyVault key to use, which will opt out of implicit key rotation. '
'Please use "" to opt in key auto-rotation again.')
c.argument('encryption_key_source',
arg_type=get_enum_type(t_key_source),
help='The default encryption key source',
validator=validate_encryption_source)
for scope in ['storage account create', 'storage account update']:
with self.argument_context(scope, resource_type=ResourceType.MGMT_STORAGE, min_api='2017-06-01',
arg_group='Network Rule') as c:
t_bypass, t_default_action = self.get_models('Bypass', 'DefaultAction',
resource_type=ResourceType.MGMT_STORAGE)
c.argument('bypass', nargs='+', validator=validate_bypass, arg_type=get_enum_type(t_bypass),
help='Bypass traffic for space-separated uses.')
c.argument('default_action', arg_type=get_enum_type(t_default_action),
help='Default action to apply when no rule matches.')
with self.argument_context('storage account show-connection-string') as c:
c.argument('protocol', help='The default endpoint protocol.', arg_type=get_enum_type(['http', 'https']))
c.argument('sas_token', help='The SAS token to be used in the connection-string.')
c.argument('key_name', options_list=['--key'], help='The key to use.',
arg_type=get_enum_type(list(storage_account_key_options.keys())))
for item in ['blob', 'file', 'queue', 'table']:
c.argument('{}_endpoint'.format(item), help='Custom endpoint for {}s.'.format(item))
with self.argument_context('storage account encryption-scope') as c:
c.argument('account_name', help='The storage account name.')
c.argument('resource_group_name', validator=process_resource_group, required=False)
c.argument('encryption_scope_name', options_list=['--name', '-n'],
help='The name of the encryption scope within the specified storage account.')
for scope in ['storage account encryption-scope create', 'storage account encryption-scope update']:
with self.argument_context(scope, resource_type=ResourceType.MGMT_STORAGE) as c:
from ._validators import validate_encryption_key
t_encryption_key_source = self.get_models('EncryptionScopeSource', resource_type=ResourceType.MGMT_STORAGE)
c.argument('key_source', options_list=['-s', '--key-source'],
arg_type=get_enum_type(t_encryption_key_source, default="Microsoft.Storage"),
help='The provider for the encryption scope.', validator=validate_encryption_key)
c.argument('key_uri', options_list=['-u', '--key-uri'],
help='The object identifier for a key vault key object. When applied, the encryption scope will '
'use the key referenced by the identifier to enable customer-managed key support on this '
'encryption scope.')
c.argument('require_infrastructure_encryption', options_list=['--require-infrastructure-encryption', '-i'],
arg_type=get_three_state_flag(), min_api='2021-01-01',
help='A boolean indicating whether or not the service applies a secondary layer of encryption '
'with platform managed keys for data at rest.')
with self.argument_context('storage account encryption-scope update') as c:
t_state = self.get_models("EncryptionScopeState", resource_type=ResourceType.MGMT_STORAGE)
c.argument('key_source', options_list=['-s', '--key-source'],
arg_type=get_enum_type(t_encryption_key_source),
help='The provider for the encryption scope.', validator=validate_encryption_key)
c.argument('state', arg_type=get_enum_type(t_state),
help='Change the state the encryption scope. When disabled, '
'all blob read/write operations using this encryption scope will fail.')
with self.argument_context('storage account keys list', resource_type=ResourceType.MGMT_STORAGE) as c:
t_expand_key_type = self.get_models('ListKeyExpand', resource_type=ResourceType.MGMT_STORAGE)
c.argument("expand", options_list=['--expand-key-type'], help='Specify the expanded key types to be listed.',
arg_type=get_enum_type(t_expand_key_type), min_api='2019-04-01', is_preview=True)
with self.argument_context('storage account keys renew', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('key_name', options_list=['--key'], help='The key options to regenerate.',
arg_type=get_enum_type(list(storage_account_key_options.keys())))
c.extra('key_type', help='The key type to regenerate. If --key-type is not specified, one of access keys will '
'be regenerated by default.', arg_type=get_enum_type(['kerb']), min_api='2019-04-01')
c.argument('account_name', acct_name_type, id_part=None)
with self.argument_context('storage account management-policy create') as c:
c.argument('policy', type=file_type, completer=FilesCompleter(),
help='The Storage Account ManagementPolicies Rules, in JSON format. See more details in: '
'https://docs.microsoft.com/azure/storage/common/storage-lifecycle-managment-concepts.')
for item in ['create', 'update', 'show', 'delete']:
with self.argument_context('storage account management-policy {}'.format(item)) as c:
c.argument('account_name', help='The name of the storage account within the specified resource group.')
with self.argument_context('storage account keys list') as c:
c.argument('account_name', acct_name_type, id_part=None)
with self.argument_context('storage account network-rule', resource_type=ResourceType.MGMT_STORAGE) as c:
from ._validators import validate_subnet
c.argument('account_name', acct_name_type, id_part=None)
c.argument('ip_address', help='IPv4 address or CIDR range.')
c.argument('subnet', help='Name or ID of subnet. If name is supplied, `--vnet-name` must be supplied.')
c.argument('vnet_name', help='Name of a virtual network.', validator=validate_subnet)
c.argument('action', help='The action of virtual network rule.')
c.argument('resource_id', help='The resource id to add in network rule.', arg_group='Resource Access Rule',
min_api='2020-08-01-preview')
c.argument('tenant_id', help='The tenant id to add in network rule.', arg_group='Resource Access Rule',
min_api='2020-08-01-preview')
with self.argument_context('storage account blob-service-properties show',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage account blob-service-properties update',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('enable_change_feed', arg_type=get_three_state_flag(), min_api='2019-04-01',
arg_group='Change Feed Policy')
c.argument('change_feed_retention_days', is_preview=True,
options_list=['--change-feed-retention-days', '--change-feed-days'],
type=int, min_api='2019-06-01', arg_group='Change Feed Policy',
validator=validator_change_feed_retention_days,
help='Indicate the duration of changeFeed retention in days. '
'Minimum value is 1 day and maximum value is 146000 days (400 years). '
'A null value indicates an infinite retention of the change feed.'
'(Use `--enable-change-feed` without `--change-feed-days` to indicate null)')
c.argument('enable_container_delete_retention',
arg_type=get_three_state_flag(),
options_list=['--enable-container-delete-retention', '--container-retention'],
arg_group='Container Delete Retention Policy', min_api='2019-06-01',
help='Enable container delete retention policy for container soft delete when set to true. '
'Disable container delete retention policy when set to false.')
c.argument('container_delete_retention_days',
options_list=['--container-delete-retention-days', '--container-days'],
type=int, arg_group='Container Delete Retention Policy',
min_api='2019-06-01', validator=validate_container_delete_retention_days,
help='Indicate the number of days that the deleted container should be retained. The minimum '
'specified value can be 1 and the maximum value can be 365.')
c.argument('enable_delete_retention', arg_type=get_three_state_flag(), arg_group='Delete Retention Policy',
min_api='2018-07-01')
c.argument('delete_retention_days', type=int, arg_group='Delete Retention Policy',
validator=validate_delete_retention_days, min_api='2018-07-01')
c.argument('enable_restore_policy', arg_type=get_three_state_flag(), arg_group='Restore Policy',
min_api='2019-06-01', help="Enable blob restore policy when it set to true.")
c.argument('restore_days', type=int, arg_group='Restore Policy',
min_api='2019-06-01', help="The number of days for the blob can be restored. It should be greater "
"than zero and less than Delete Retention Days.")
c.argument('enable_versioning', arg_type=get_three_state_flag(), help='Versioning is enabled if set to true.',
min_api='2019-06-01')
c.argument('default_service_version', options_list=['--default-service-version', '-d'],
type=get_api_version_type(), min_api='2018-07-01',
help="Indicate the default version to use for requests to the Blob service if an incoming request's "
"version is not specified.")
with self.argument_context('storage account file-service-properties show',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage account file-service-properties update',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('enable_delete_retention', arg_type=get_three_state_flag(), arg_group='Delete Retention Policy',
min_api='2019-06-01', help='Enable file service properties for share soft delete.')
c.argument('delete_retention_days', type=int, arg_group='Delete Retention Policy',
validator=validate_file_delete_retention_days, min_api='2019-06-01',
help='Indicate the number of days that the deleted item should be retained. The minimum specified '
'value can be 1 and the maximum value can be 365.')
c.argument('enable_smb_multichannel', options_list=['--enable-smb-multichannel', '--mc'],
arg_type=get_three_state_flag(), min_api='2020-08-01-preview',
help='Set SMB Multichannel setting for file service. Applies to Premium FileStorage only.')
with self.argument_context('storage account generate-sas') as c:
from ._validators import get_not_none_validator
t_account_permissions = self.get_sdk('common.models#AccountPermissions')
c.register_sas_arguments()
c.argument('services', type=services_type(self))
c.argument('resource_types', type=resource_type_type(self))
c.argument('expiry', type=get_datetime_type(True))
c.argument('start', type=get_datetime_type(True))
c.argument('account_name', acct_name_type, options_list=['--account-name'],
validator=get_not_none_validator('account_name'))
c.argument('permission', options_list=('--permissions',),
help='The permissions the SAS grants. Allowed values: {}. Can be combined.'.format(
get_permission_help_string(t_account_permissions)),
validator=get_permission_validator(t_account_permissions))
c.ignore('sas_token')
or_policy_type = CLIArgumentType(
options_list=['--policy', '-p'],
help='The object replication policy definition between two storage accounts, in JSON format. '
'Multiple rules can be defined in one policy.'
)
policy_id_type = CLIArgumentType(
options_list=['--policy-id'],
help='The ID of object replication policy or "default" if the policy ID is unknown. Policy Id will be '
'auto-generated when setting on destination account. Required when setting on source account.'
)
rule_id_type = CLIArgumentType(
options_list=['--rule-id', '-r'],
help='Rule Id is auto-generated for each new rule on destination account. It is required '
'for put policy on source account.'
)
prefix_math_type = CLIArgumentType(
nargs='+', arg_group='Filters', options_list=['--prefix-match', '--prefix'],
help='Optional. Filter the results to replicate only blobs whose names begin with the specified '
'prefix.'
)
min_creation_time_type = CLIArgumentType(
options_list=['--min-creation-time', '-t'], arg_group='Filters', type=get_datetime_type(True),
help="Blobs created after the time will be replicated to the destination. It must be in datetime format "
"'yyyy-MM-ddTHH:mm:ssZ'. Example: 2020-02-19T16:05:00Z")
with self.argument_context('storage account or-policy') as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('object_replication_policy_id', policy_id_type)
c.argument('policy_id', policy_id_type)
c.argument('source_account', options_list=['--source-account', '-s'],
help='The source storage account name. Required when no --policy provided.')
c.argument('destination_account', options_list=['--destination-account', '-d'],
help='The destination storage account name. Apply --account-name value as destination account '
'when there is no destination account provided in --policy and --destination-account.')
c.argument('properties', or_policy_type)
c.argument('prefix_match', prefix_math_type)
c.argument('min_creation_time', min_creation_time_type)
for item in ['create', 'update']:
with self.argument_context('storage account or-policy {}'.format(item),
arg_group="Object Replication Policy Rule") as c:
c.argument('rule_id', help='Rule Id is auto-generated for each new rule on destination account. It is '
'required for put policy on source account.')
c.argument('source_container', options_list=['--source-container', '--scont'],
help='The source storage container name. Required when no --policy provided.')
c.argument('destination_container', options_list=['--destination-container', '--dcont'],
help='The destination storage container name. Required when no --policy provided.')
with self.argument_context('storage account or-policy create') as c:
c.argument('properties', or_policy_type, validator=validate_or_policy)
with self.argument_context('storage account or-policy rule') as c:
c.argument('policy_id', policy_id_type)
c.argument('source_container', options_list=['--source-container', '-s'],
help='The source storage container name.')
c.argument('destination_container', options_list=['--destination-container', '-d'],
help='The destination storage container name.')
c.argument('rule_id', rule_id_type)
for item in ['show', 'off']:
with self.argument_context('storage logging {}'.format(item)) as c:
c.extra('services', validator=get_char_options_validator('bqt', 'services'), default='bqt')
with self.argument_context('storage logging update') as c:
c.extra('services', validator=get_char_options_validator('bqt', 'services'), options_list='--services',
required=True)
c.argument('log', validator=get_char_options_validator('rwd', 'log'))
c.argument('retention', type=int)
c.argument('version', type=float, validator=validate_logging_version)
with self.argument_context('storage metrics show') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), default='bfqt')
c.argument('interval', arg_type=get_enum_type(['hour', 'minute', 'both']))
with self.argument_context('storage metrics update') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), options_list='--services',
required=True)
c.argument('hour', validator=process_metric_update_namespace, arg_type=get_enum_type(['true', 'false']))
c.argument('minute', arg_type=get_enum_type(['true', 'false']))
c.argument('api', arg_type=get_enum_type(['true', 'false']))
c.argument('retention', type=int)
with self.argument_context('storage blob') as c:
c.argument('blob_name', options_list=('--name', '-n'), arg_type=blob_name_type)
c.argument('destination_path', help='The destination path that will be appended to the blob name.')
with self.argument_context('storage blob list') as c:
from ._validators import get_include_help_string
t_blob_include = self.get_sdk('_generated.models._azure_blob_storage_enums#ListBlobsIncludeItem',
resource_type=ResourceType.DATA_STORAGE_BLOB)
c.register_container_arguments()
c.argument('delimiter',
help='When the request includes this parameter, the operation returns a BlobPrefix element in the '
'result list that acts as a placeholder for all blobs whose names begin with the same substring '
'up to the appearance of the delimiter character. The delimiter may be a single character or a '
'string.')
c.argument('include', help="Specify one or more additional datasets to include in the response. "
"Options include: {}. Can be combined.".format(get_include_help_string(t_blob_include)),
validator=validate_included_datasets_validator(include_class=t_blob_include))
c.argument('marker', arg_type=marker_type)
c.argument('num_results', arg_type=num_results_type)
c.argument('prefix',
help='Filter the results to return only blobs whose name begins with the specified prefix.')
c.argument('show_next_marker', action='store_true',
help='Show nextMarker in result when specified.')
with self.argument_context('storage blob generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_blob_permissions = self.get_sdk('blob.models#BlobPermissions')
c.register_sas_arguments()
c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed'
'using this shared access signature.')
c.argument('content_disposition', help='Response header value for Content-Disposition when resource is accessed'
'using this shared access signature.')
c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed'
'using this shared access signature.')
c.argument('content_language', help='Response header value for Content-Language when resource is accessed'
'using this shared access signature.')
c.argument('content_type', help='Response header value for Content-Type when resource is accessed'
'using this shared access signature.')
c.argument('full_uri', action='store_true',
help='Indicates that this command return the full blob URI and the shared access signature token.')
c.argument('as_user', min_api='2018-11-09', action='store_true',
validator=as_user_validator,
help="Indicates that this command return the SAS signed with the user delegation key. "
"The expiry parameter and '--auth-mode login' are required if this argument is specified. ")
c.argument('id', options_list='--policy-name', validator=validate_policy,
help='The name of a stored access policy within the container\'s ACL.',
completer=get_storage_acl_name_completion_list(t_base_blob_service, 'container_name',
'get_container_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_blob_permissions)),
validator=get_permission_validator(t_blob_permissions))
c.ignore('sas_token')
with self.argument_context('storage blob restore', resource_type=ResourceType.MGMT_STORAGE) as c:
from ._validators import BlobRangeAddAction
c.argument('blob_ranges', options_list=['--blob-range', '-r'], action=BlobRangeAddAction, nargs='+',
help='Blob ranges to restore. You need to two values to specify start_range and end_range for each '
'blob range, e.g. -r blob1 blob2. Note: Empty means account start as start range value, and '
'means account end for end range.')
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('time_to_restore', type=get_datetime_type(True), options_list=['--time-to-restore', '-t'],
help='Restore blob to the specified time, which should be UTC datetime in (Y-m-d\'T\'H:M:S\'Z\').')
with self.argument_context('storage blob rewrite', resource_type=ResourceType.DATA_STORAGE_BLOB,
min_api='2020-04-08') as c:
c.register_blob_arguments()
c.register_precondition_options()
c.argument('source_url', options_list=['--source-uri', '-u'],
help='A URL of up to 2 KB in length that specifies a file or blob. The value should be URL-encoded '
'as it would appear in a request URI. If the source is in another account, the source must either '
'be public or must be authenticated via a shared access signature. If the source is public, no '
'authentication is required.')
c.extra('lease', options_list='--lease-id',
help='Required if the blob has an active lease. Value can be a BlobLeaseClient object '
'or the lease ID as a string.')
c.extra('standard_blob_tier', arg_type=get_enum_type(t_blob_tier), options_list='--tier',
help='A standard blob tier value to set the blob to. For this version of the library, '
'this is only applicable to block blobs on standard storage accounts.')
c.extra('encryption_scope',
help='A predefined encryption scope used to encrypt the data on the service. An encryption scope '
'can be created using the Management API and referenced here by name. If a default encryption scope '
'has been defined at the container, this value will override it if the container-level scope is '
'configured to allow overrides. Otherwise an error will be raised.')
with self.argument_context('storage blob update') as c:
t_blob_content_settings = self.get_sdk('blob.models#ContentSettings')
c.register_content_settings_argument(t_blob_content_settings, update=True)
with self.argument_context('storage blob exists') as c:
c.argument('blob_name', required=True)
with self.argument_context('storage blob url') as c:
c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.')
c.argument('snapshot', help='An string value that uniquely identifies the snapshot. The value of'
'this query parameter indicates the snapshot version.')
with self.argument_context('storage blob set-tier') as c:
from azure.cli.command_modules.storage._validators import (blob_rehydrate_priority_validator)
c.register_blob_arguments()
c.argument('blob_type', options_list=('--type', '-t'), arg_type=get_enum_type(('block', 'page')))
c.argument('tier', validator=blob_tier_validator)
c.argument('rehydrate_priority', options_list=('--rehydrate-priority', '-r'),
arg_type=get_enum_type(('High', 'Standard')), validator=blob_rehydrate_priority_validator,
is_preview=True, help="Indicate the priority with which to rehydrate an archived blob. "
"The priority can be set on a blob only once, default value is Standard.")
with self.argument_context('storage blob service-properties delete-policy update') as c:
c.argument('enable', arg_type=get_enum_type(['true', 'false']), help='Enables/disables soft-delete.')
c.argument('days_retained', type=int,
help='Number of days that soft-deleted blob will be retained. Must be in range [1,365].')
with self.argument_context('storage blob service-properties update', min_api='2018-03-28') as c:
c.argument('delete_retention', arg_type=get_three_state_flag(), arg_group='Soft Delete',
help='Enables soft-delete.')
c.argument('delete_retention_period', type=int, arg_group='Soft Delete',
help='Number of days that soft-deleted blob will be retained. Must be in range [1,365].')
c.argument('static_website', arg_group='Static Website', arg_type=get_three_state_flag(),
help='Enables static-website.')
c.argument('index_document', help='Represents the name of the index document. This is commonly "index.html".',
arg_group='Static Website')
c.argument('error_document_404_path', options_list=['--404-document'], arg_group='Static Website',
help='Represents the path to the error document that should be shown when an error 404 is issued,'
' in other words, when a browser requests a page that does not exist.')
with self.argument_context('storage blob show') as c:
c.register_blob_arguments()
c.register_precondition_options()
c.extra('snapshot', help='The snapshot parameter is an opaque DateTime value that, when present, '
'specifies the blob snapshot to retrieve.')
c.argument('lease_id', help='Required if the blob has an active lease.')
with self.argument_context('storage blob upload') as c:
from ._validators import page_blob_tier_validator, validate_encryption_scope_client_params
from .sdkutil import get_blob_types, get_blob_tier_names
t_blob_content_settings = self.get_sdk('blob.models#ContentSettings')
c.register_content_settings_argument(t_blob_content_settings, update=False)
c.register_blob_arguments()
c.argument('file_path', options_list=('--file', '-f'), type=file_type, completer=FilesCompleter())
c.argument('max_connections', type=int)
c.argument('blob_type', options_list=('--type', '-t'), validator=validate_blob_type,
arg_type=get_enum_type(get_blob_types()))
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
# TODO: Remove once #807 is complete. Smart Create Generation requires this parameter.
# register_extra_cli_argument('storage blob upload', '_subscription_id', options_list=('--subscription',),
# help=argparse.SUPPRESS)
c.argument('tier', validator=page_blob_tier_validator,
arg_type=get_enum_type(get_blob_tier_names(self.cli_ctx, 'PremiumPageBlobTier')),
min_api='2017-04-17')
c.argument('encryption_scope', validator=validate_encryption_scope_client_params,
help='A predefined encryption scope used to encrypt the data on the service.')
with self.argument_context('storage blob upload-batch') as c:
from .sdkutil import get_blob_types
t_blob_content_settings = self.get_sdk('blob.models#ContentSettings')
c.register_content_settings_argument(t_blob_content_settings, update=False, arg_group='Content Control')
c.ignore('source_files', 'destination_container_name')
c.argument('source', options_list=('--source', '-s'))
c.argument('destination', options_list=('--destination', '-d'))
c.argument('max_connections', type=int,
help='Maximum number of parallel connections to use when the blob size exceeds 64MB.')
c.argument('maxsize_condition', arg_group='Content Control')
c.argument('validate_content', action='store_true', min_api='2016-05-31', arg_group='Content Control')
c.argument('blob_type', options_list=('--type', '-t'), arg_type=get_enum_type(get_blob_types()))
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
with self.argument_context('storage blob download') as c:
c.argument('file_path', options_list=('--file', '-f'), type=file_type,
completer=FilesCompleter(), validator=blob_download_file_path_validator)
c.argument('max_connections', type=int)
c.argument('start_range', type=int)
c.argument('end_range', type=int)
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
with self.argument_context('storage blob download-batch') as c:
c.ignore('source_container_name')
c.argument('destination', options_list=('--destination', '-d'))
c.argument('source', options_list=('--source', '-s'))
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
c.argument('max_connections', type=int,
help='Maximum number of parallel connections to use when the blob size exceeds 64MB.')
with self.argument_context('storage blob delete') as c:
from .sdkutil import get_delete_blob_snapshot_type_names
c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_blob_snapshot_type_names()))
with self.argument_context('storage blob delete-batch') as c:
c.ignore('source_container_name')
c.argument('source', options_list=('--source', '-s'))
c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_blob_snapshot_type_names()),
help='Required if the blob has associated snapshots.')
c.argument('lease_id', help='The active lease id for the blob.')
with self.argument_context('storage blob lease') as c:
c.argument('blob_name', arg_type=blob_name_type)
with self.argument_context('storage blob lease acquire') as c:
c.register_precondition_options()
c.register_blob_arguments()
c.extra('lease_id', options_list='--proposed-lease-id', help='Proposed lease ID, in a GUID string format. '
'The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format.')
c.argument('lease_duration', help='Specify the duration of the lease, in seconds, or negative one (-1) for '
'a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease '
'duration cannot be changed using renew or change. Default is -1 (infinite lease)', type=int)
with self.argument_context('storage blob lease break') as c:
c.register_precondition_options()
c.register_blob_arguments()
c.argument('lease_break_period', type=int,
help="This is the proposed duration of seconds that the lease should continue before it is broken, "
"between 0 and 60 seconds. This break period is only used if it is shorter than the time remaining "
"on the lease. If longer, the time remaining on the lease is used. A new lease will not be "
"available before the break period has expired, but the lease may be held for longer than the break "
"period. If this header does not appear with a break operation, a fixed-duration lease breaks after "
"the remaining lease period elapses, and an infinite lease breaks immediately.")
with self.argument_context('storage blob lease change') as c:
c.register_precondition_options()
c.register_blob_arguments()
c.extra('proposed_lease_id', help='Proposed lease ID, in a GUID string format. The Blob service returns 400 '
'(Invalid request) if the proposed lease ID is not in the correct format.', required=True)
c.extra('lease_id', help='Required if the blob has an active lease.', required=True)
for item in ['release', 'renew']:
with self.argument_context('storage blob lease {}'.format(item)) as c:
c.register_precondition_options()
c.register_blob_arguments()
c.extra('lease_id', help='Required if the blob has an active lease.', required=True)
with self.argument_context('storage copy') as c:
c.argument('destination',
options_list=['--destination', '-d',
c.deprecate(target='--destination-local-path', redirect='--destination')],
help="The path/url of copy destination. "
"It can be a local path, an url to azure storage server. If you provide destination parameter "
"here, you do not need to provide arguments in copy destination arguments group and copy "
"destination arguments will be deprecated in future.", required=False)
c.argument('source',
options_list=['--source', '-s',
c.deprecate(target='--source-local-path', redirect='--source')],
help="The path/url of copy source. It can be a local"
" path, an url to azure storage server or AWS S3 buckets. If you provide source parameter here,"
" you do not need to provide arguments in copy source arguments group and copy source arguments"
" will be deprecated in future.", required=False)
for item in ['destination', 'source']:
c.extra('{}_container'.format(item), arg_group='Copy {}'.format(item),
help='Container name of copy {} storage account'.format(item))
c.extra('{}_blob'.format(item), arg_group='Copy {}'.format(item),
help='Blob name in blob container of copy {} storage account'.format(item))
c.extra('{}_share'.format(item), arg_group='Copy {}'.format(item),
help='File share name of copy {} storage account'.format(item))
c.extra('{}_file_path'.format(item), arg_group='Copy {}'.format(item),
help='File path in file share of copy {} storage account'.format(item))
c.argument('account_name', acct_name_type, arg_group='Storage Account', id_part=None,
options_list=['--account-name',
c.deprecate(target='--destination-account-name', redirect='--account-name')],
help='Storage account name of copy destination')
c.extra('source_account_name', arg_group='Copy source',
help='Account name of copy source storage account.')
c.extra('source_account_key', arg_group='Copy source',
help='Account key of copy source storage account. Must be used in conjunction with source storage '
'account name.')
c.extra('source_connection_string', arg_group='Copy source',
options_list=['--source-connection-string', '--src-conn'],
help='Connection string of source storage account.')
c.extra('source_sas', arg_group='Copy source',
help='Shared Access Signature (SAS) token of copy source. Must be used in conjunction with source '
'storage account name.')
c.argument('put_md5', arg_group='Additional Flags', action='store_true',
help='Create an MD5 hash of each file, and save the hash as the Content-MD5 property of the '
'destination blob/file.Only available when uploading.')
c.argument('blob_type', arg_group='Additional Flags',
arg_type=get_enum_type(["BlockBlob", "PageBlob", "AppendBlob"]),
help='The type of blob at the destination.')
c.argument('preserve_s2s_access_tier', arg_group='Additional Flags', arg_type=get_three_state_flag(),
help='Preserve access tier during service to service copy. '
'Please refer to https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers '
'to ensure destination storage account support setting access tier. In the cases that setting '
'access tier is not supported, please use `--preserve-s2s-access-tier false` to bypass copying '
'access tier. (Default true)')
c.argument('exclude_pattern', exclude_pattern_type)
c.argument('include_pattern', include_pattern_type)
c.argument('exclude_path', exclude_path_type)
c.argument('include_path', include_path_type)
c.argument('recursive', recursive_type)
c.argument('content_type', arg_group='Additional Flags', help="Specify content type of the file. ")
c.argument('follow_symlinks', arg_group='Additional Flags', action='store_true',
help='Follow symbolic links when uploading from local file system.')
with self.argument_context('storage blob copy') as c:
for item in ['destination', 'source']:
c.argument('{}_if_modified_since'.format(item), arg_group='Pre-condition', arg_type=if_modified_since_type)
c.argument('{}_if_unmodified_since'.format(item), arg_group='Pre-condition',
arg_type=if_unmodified_since_type)
c.argument('{}_if_match'.format(item), arg_group='Pre-condition')
c.argument('{}_if_none_match'.format(item), arg_group='Pre-condition')
c.argument('container_name', container_name_type, options_list=('--destination-container', '-c'))
c.argument('blob_name', blob_name_type, options_list=('--destination-blob', '-b'),
help='Name of the destination blob. If the exists, it will be overwritten.')
c.argument('source_lease_id', arg_group='Copy Source')
with self.argument_context('storage blob copy start') as c:
from azure.cli.command_modules.storage._validators import validate_source_uri
c.register_source_uri_arguments(validator=validate_source_uri)
c.argument('requires_sync', arg_type=get_three_state_flag(),
help='Enforce that the service will not return a response until the copy is complete.'
'Not support for standard page blob.')
with self.argument_context('storage blob copy start-batch', arg_group='Copy Source') as c:
from azure.cli.command_modules.storage._validators import get_source_file_or_blob_service_client
c.argument('source_client', ignore_type, validator=get_source_file_or_blob_service_client)
c.extra('source_account_name')
c.extra('source_account_key')
c.extra('source_uri')
c.argument('source_sas')
c.argument('source_container')
c.argument('source_share')
with self.argument_context('storage blob incremental-copy start') as c:
from azure.cli.command_modules.storage._validators import process_blob_source_uri
c.register_source_uri_arguments(validator=process_blob_source_uri, blob_only=True)
c.argument('destination_if_modified_since', arg_group='Pre-condition', arg_type=if_modified_since_type)
c.argument('destination_if_unmodified_since', arg_group='Pre-condition', arg_type=if_unmodified_since_type)
c.argument('destination_if_match', arg_group='Pre-condition')
c.argument('destination_if_none_match', arg_group='Pre-condition')
c.argument('container_name', container_name_type, options_list=('--destination-container', '-c'))
c.argument('blob_name', blob_name_type, options_list=('--destination-blob', '-b'),
help='Name of the destination blob. If the exists, it will be overwritten.')
c.argument('source_lease_id', arg_group='Copy Source')
with self.argument_context('storage blob query') as c:
from ._validators import validate_text_configuration
c.register_blob_arguments()
c.register_precondition_options()
line_separator = CLIArgumentType(help="The string used to separate records.", default='\n')
column_separator = CLIArgumentType(help="The string used to separate columns.", default=',')
quote_char = CLIArgumentType(help="The string used to quote a specific field.", default='"')
record_separator = CLIArgumentType(help="The string used to separate records.", default='\n')
escape_char = CLIArgumentType(help="The string used as an escape character. Default to empty.", default="")
has_header = CLIArgumentType(
arg_type=get_three_state_flag(),
help="Whether the blob data includes headers in the first line. "
"The default value is False, meaning that the data will be returned inclusive of the first line. "
"If set to True, the data will be returned exclusive of the first line.", default=False)
c.extra('lease', options_list='--lease-id',
help='Required if the blob has an active lease.')
c.argument('query_expression', help='The query expression in SQL. The maximum size of the query expression '
'is 256KiB. For more information about the expression syntax, please see '
'https://docs.microsoft.com/azure/storage/blobs/query-acceleration-sql-reference')
c.extra('input_format', arg_type=get_enum_type(['csv', 'json']), validator=validate_text_configuration,
help='Serialization type of the data currently stored in the blob. '
'The default is to treat the blob data as CSV data formatted in the default dialect.'
'The blob data will be reformatted according to that profile when blob format is specified. '
'If you choose `json`, please specify `Output Json Text Configuration Arguments` accordingly; '
'If you choose `csv`, please specify `Output Delimited Text Configuration Arguments`.')
c.extra('output_format', arg_type=get_enum_type(['csv', 'json']),
help='Output serialization type for the data stream. '
'By default the data will be returned as it is represented in the blob. '
'By providing an output format, the blob data will be reformatted according to that profile. '
'If you choose `json`, please specify `Output Json Text Configuration Arguments` accordingly; '
'If you choose `csv`, please specify `Output Delimited Text Configuration Arguments`.')
c.extra('in_line_separator',
arg_group='Input Json Text Configuration',
arg_type=line_separator)
c.extra('in_column_separator', arg_group='Input Delimited Text Configuration',
arg_type=column_separator)
c.extra('in_quote_char', arg_group='Input Delimited Text Configuration',
arg_type=quote_char)
c.extra('in_record_separator', arg_group='Input Delimited Text Configuration',
arg_type=record_separator)
c.extra('in_escape_char', arg_group='Input Delimited Text Configuration',
arg_type=escape_char)
c.extra('in_has_header', arg_group='Input Delimited Text Configuration',
arg_type=has_header)
c.extra('out_line_separator',
arg_group='Output Json Text Configuration',
arg_type=line_separator)
c.extra('out_column_separator', arg_group='Output Delimited Text Configuration',
arg_type=column_separator)
c.extra('out_quote_char', arg_group='Output Delimited Text Configuration',
arg_type=quote_char)
c.extra('out_record_separator', arg_group='Output Delimited Text Configuration',
arg_type=record_separator)
c.extra('out_escape_char', arg_group='Output Delimited Text Configuration',
arg_type=escape_char)
c.extra('out_has_header', arg_group='Output Delimited Text Configuration',
arg_type=has_header)
c.extra('result_file', help='Specify the file path to save result.')
c.ignore('input_config')
c.ignore('output_config')
with self.argument_context('storage blob sync') as c:
c.extra('destination_container', options_list=['--container', '-c'], required=True,
help='The sync destination container.')
c.extra('destination_path', options_list=['--destination', '-d'],
validator=validate_azcopy_upload_destination_url,
help='The sync destination path.')
c.argument('source', options_list=['--source', '-s'],
help='The source file path to sync from.')
c.ignore('destination')
c.argument('exclude_pattern', exclude_pattern_type)
c.argument('include_pattern', include_pattern_type)
c.argument('exclude_path', exclude_path_type)
with self.argument_context('storage container') as c:
from .sdkutil import get_container_access_type_names
c.argument('container_name', container_name_type, options_list=('--name', '-n'))
c.argument('public_access', validator=validate_container_public_access,
arg_type=get_enum_type(get_container_access_type_names()),
help='Specifies whether data in the container may be accessed publicly.')
with self.argument_context('storage container create') as c:
c.argument('container_name', container_name_type, options_list=('--name', '-n'), completer=None)
c.argument('fail_on_exist', help='Throw an exception if the container already exists.')
c.argument('account_name', help='Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.')
c.argument('default_encryption_scope', options_list=['--default-encryption-scope', '-d'],
arg_group='Encryption Policy', is_preview=True,
help='Default the container to use specified encryption scope for all writes.')
c.argument('prevent_encryption_scope_override', options_list=['--prevent-encryption-scope-override', '-p'],
arg_type=get_three_state_flag(), arg_group='Encryption Policy', is_preview=True,
help='Block override of encryption scope from the container default.')
with self.argument_context('storage container delete') as c:
c.argument('fail_not_exist', help='Throw an exception if the container does not exist.')
c.argument('bypass_immutability_policy', action='store_true', help='Bypasses upcoming service behavior that '
'will block a container from being deleted if it has a immutability-policy. Specifying this will '
'ignore arguments aside from those used to identify the container ("--name", "--account-name").')
c.argument('lease_id', help="If specified, delete_container only succeeds if the container's lease is active "
"and matches this ID. Required if the container has an active lease.")
c.ignore('processed_resource_group')
c.ignore('processed_account_name')
c.ignore('mgmt_client')
with self.argument_context('storage container exists') as c:
c.ignore('blob_name', 'snapshot')
for item in ['create', 'extend']:
with self.argument_context('storage container immutability-policy {}'.format(item)) as c:
c.argument('account_name',
help='Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.')
c.argument('if_match', help="An ETag value, or the wildcard character (*). Specify this header to perform "
"the operation only if the resource's ETag matches the value specified.")
c.extra('allow_protected_append_writes', options_list=['--allow-protected-append-writes', '-w'],
arg_type=get_three_state_flag(), help='This property can only be changed for unlocked time-based '
'retention policies. When enabled, new blocks can be '
'written to an append blob while maintaining immutability '
'protection and compliance. Only new blocks can be added '
'and any existing blocks cannot be modified or deleted. '
'This property cannot be changed with '
'ExtendImmutabilityPolicy API.')
c.extra('period', type=int, help='The immutability period for the blobs in the container since the policy '
'creation, in days.')
c.ignore('parameters')
with self.argument_context('storage container list') as c:
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage container set-permission') as c:
c.ignore('signed_identifiers')
with self.argument_context('storage container lease') as c:
c.argument('container_name', container_name_type)
with self.argument_context('storage container') as c:
c.argument('account_name', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage container immutability-policy') as c:
c.argument('immutability_period_since_creation_in_days', options_list='--period')
c.argument('container_name', container_name_type)
with self.argument_context('storage container legal-hold') as c:
c.argument('container_name', container_name_type)
c.argument('account_name',
help='Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.')
c.argument('tags', nargs='+',
help='Space-separated tags. Each tag should be 3 to 23 alphanumeric characters and is normalized '
'to lower case')
with self.argument_context('storage container policy') as c:
from .completers import get_storage_acl_name_completion_list
t_container_permissions = self.get_sdk('blob.models#ContainerPermissions')
c.argument('container_name', container_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_base_blob_service, 'container_name',
'get_container_acl'))
help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_container_permissions))
c.argument('permission', options_list='--permissions', help=help_str,
validator=get_permission_validator(t_container_permissions))
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
for item in ['create', 'delete', 'list', 'show', 'update']:
with self.argument_context('storage container policy {}'.format(item)) as c:
c.extra('lease_id', options_list='--lease-id', help='The container lease ID.')
with self.argument_context('storage container generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_container_permissions = self.get_sdk('blob.models#ContainerPermissions')
c.register_sas_arguments()
c.argument('id', options_list='--policy-name', validator=validate_policy,
help='The name of a stored access policy within the container\'s ACL.',
completer=get_storage_acl_name_completion_list(t_container_permissions, 'container_name',
'get_container_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_container_permissions)),
validator=get_permission_validator(t_container_permissions))
c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed'
'using this shared access signature.')
c.argument('content_disposition', help='Response header value for Content-Disposition when resource is accessed'
'using this shared access signature.')
c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed'
'using this shared access signature.')
c.argument('content_language', help='Response header value for Content-Language when resource is accessed'
'using this shared access signature.')
c.argument('content_type', help='Response header value for Content-Type when resource is accessed'
'using this shared access signature.')
c.argument('as_user', min_api='2018-11-09', action='store_true',
validator=as_user_validator,
help="Indicates that this command return the SAS signed with the user delegation key. "
"The expiry parameter and '--auth-mode login' are required if this argument is specified. ")
c.ignore('sas_token')
with self.argument_context('storage container lease') as c:
c.argument('lease_duration', type=int)
c.argument('lease_break_period', type=int)
with self.argument_context('storage container-rm', resource_type=ResourceType.MGMT_STORAGE) as c:
from .sdkutil import get_container_access_type_names
c.argument('container_name', container_name_type, options_list=('--name', '-n'), id_part='child_name_2')
c.argument('account_name', storage_account_type)
c.argument('resource_group_name', required=False)
c.argument('public_access', validator=validate_container_public_access,
arg_type=get_enum_type(get_container_access_type_names()),
help='Specify whether data in the container may be accessed publicly.')
c.ignore('filter', 'maxpagesize')
with self.argument_context('storage container-rm create', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('fail_on_exist', help='Throw an exception if the container already exists.')
for item in ['create', 'update']:
with self.argument_context('storage container-rm {}'.format(item),
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('default_encryption_scope', options_list=['--default-encryption-scope', '-d'],
arg_group='Encryption Policy', min_api='2019-06-01',
help='Default the container to use specified encryption scope for all writes.')
c.argument('deny_encryption_scope_override',
options_list=['--deny-encryption-scope-override', '--deny-override'],
arg_type=get_three_state_flag(), arg_group='Encryption Policy', min_api='2019-06-01',
help='Block override of encryption scope from the container default.')
with self.argument_context('storage container-rm list', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', storage_account_type, id_part=None)
c.argument('include_deleted', action='store_true',
help='Include soft deleted containers when specified.')
with self.argument_context('storage share') as c:
c.argument('share_name', share_name_type, options_list=('--name', '-n'))
with self.argument_context('storage share-rm', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('resource_group_name', required=False)
c.argument('account_name', storage_account_type)
c.argument('share_name', share_name_type, options_list=('--name', '-n'), id_part='child_name_2')
c.argument('expand', default=None)
c.ignore('filter', 'maxpagesize')
for item in ['create', 'update']:
with self.argument_context('storage share-rm {}'.format(item), resource_type=ResourceType.MGMT_STORAGE) as c:
t_enabled_protocols, t_root_squash, t_access_tier = \
self.get_models('EnabledProtocols', 'RootSquashType', 'ShareAccessTier',
resource_type=ResourceType.MGMT_STORAGE)
c.argument('share_quota', type=int, options_list=['--quota', '-q'],
help='The maximum size of the share in gigabytes. Must be greater than 0, and less than or '
'equal to 5TB (5120). For Large File Shares, the maximum size is 102400.')
c.argument('metadata', nargs='+',
help='Metadata in space-separated key=value pairs that is associated with the share. '
'This overwrites any existing metadata',
validator=validate_metadata)
c.argument('enabled_protocols', arg_type=get_enum_type(t_enabled_protocols), is_preview=True,
min_api='2019-06-01', help='Immutable property for file shares protocol. NFS protocol will be '
'only available for premium file shares (file shares in the FileStorage account type).')
c.argument('root_squash', arg_type=get_enum_type(t_root_squash), is_preview=True,
min_api='2019-06-01', help='Reduction of the access rights for the remote superuser.')
c.argument('access_tier', arg_type=get_enum_type(t_access_tier), min_api='2019-06-01',
help='Access tier for specific share. GpV2 account can choose between TransactionOptimized '
'(default), Hot, and Cool. FileStorage account can choose Premium.')
with self.argument_context('storage share-rm list', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', storage_account_type, id_part=None)
c.argument('include_deleted', action='store_true',
help='Include soft deleted file shares when specified.')
with self.argument_context('storage share-rm restore', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('deleted_version',
help='Identify the version of the deleted share that will be restored.')
c.argument('share_name',
help='The file share name. Identify the name of the deleted share that will be restored.')
c.argument('restored_name',
help='A new file share name to be restored. If not specified, deleted share name will be used.')
with self.argument_context('storage share url') as c:
c.argument('unc', action='store_true', help='Output UNC network path.')
c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.')
with self.argument_context('storage share list') as c:
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage share exists') as c:
c.ignore('directory_name', 'file_name')
with self.argument_context('storage share policy') as c:
from .completers import get_storage_acl_name_completion_list
t_file_svc = self.get_sdk('file#FileService')
t_share_permissions = self.get_sdk('file.models#SharePermissions')
c.argument('container_name', share_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_file_svc, 'container_name', 'get_share_acl'))
help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_share_permissions))
c.argument('permission', options_list='--permissions', help=help_str,
validator=get_permission_validator(t_share_permissions))
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
with self.argument_context('storage share delete') as c:
from .sdkutil import get_delete_file_snapshot_type_names
c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_file_snapshot_type_names()),
help='Specify the deletion strategy when the share has snapshots.')
with self.argument_context('storage share generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_share_permissions = self.get_sdk('file.models#SharePermissions')
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the share\'s ACL.',
completer=get_storage_acl_name_completion_list(t_share_permissions, 'share_name', 'get_share_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_share_permissions)),
validator=get_permission_validator(t_share_permissions))
c.ignore('sas_token')
with self.argument_context('storage directory') as c:
c.argument('directory_name', directory_type, options_list=('--name', '-n'))
with self.argument_context('storage directory exists') as c:
c.ignore('file_name')
c.argument('directory_name', required=True)
with self.argument_context('storage file') as c:
c.argument('file_name', file_name_type, options_list=('--name', '-n'))
c.argument('directory_name', directory_type, required=False)
with self.argument_context('storage file copy') as c:
c.argument('share_name', share_name_type, options_list=('--destination-share', '-s'),
help='Name of the destination share. The share must exist.')
with self.argument_context('storage file copy cancel') as c:
c.register_path_argument(options_list=('--destination-path', '-p'))
with self.argument_context('storage file delete') as c:
c.register_path_argument()
with self.argument_context('storage file download') as c:
c.register_path_argument()
c.argument('file_path', options_list=('--dest',), type=file_type, required=False,
help='Path of the file to write to. The source filename will be used if not specified.',
validator=process_file_download_namespace, completer=FilesCompleter())
c.argument('path', validator=None) # validator called manually from process_file_download_namespace
c.extra('no_progress', progress_type)
c.argument('max_connections', type=int)
c.argument('start_range', type=int)
c.argument('end_range', type=int)
with self.argument_context('storage file exists') as c:
c.register_path_argument()
with self.argument_context('storage file generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
c.register_path_argument()
c.register_sas_arguments()
t_file_svc = self.get_sdk('file.fileservice#FileService')
t_file_permissions = self.get_sdk('file.models#FilePermissions')
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the container\'s ACL.',
completer=get_storage_acl_name_completion_list(t_file_svc, 'container_name', 'get_container_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_file_permissions)),
validator=get_permission_validator(t_file_permissions))
c.ignore('sas_token')
with self.argument_context('storage file list') as c:
from .completers import dir_path_completer
c.argument('directory_name', options_list=('--path', '-p'), help='The directory path within the file share.',
completer=dir_path_completer)
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage file metadata show') as c:
c.register_path_argument()
with self.argument_context('storage file metadata update') as c:
c.register_path_argument()
with self.argument_context('storage file resize') as c:
c.register_path_argument()
c.argument('content_length', options_list='--size')
with self.argument_context('storage file show') as c:
c.register_path_argument()
with self.argument_context('storage file update') as c:
t_file_content_settings = self.get_sdk('file.models#ContentSettings')
c.register_path_argument()
c.register_content_settings_argument(t_file_content_settings, update=True)
with self.argument_context('storage file upload') as c:
t_file_content_settings = self.get_sdk('file.models#ContentSettings')
c.register_path_argument(default_file_param='local_file_path')
c.register_content_settings_argument(t_file_content_settings, update=False, guess_from_file='local_file_path')
c.argument('local_file_path', options_list='--source', type=file_type, completer=FilesCompleter())
c.extra('no_progress', progress_type)
c.argument('max_connections', type=int)
with self.argument_context('storage file url') as c:
c.register_path_argument()
c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.')
with self.argument_context('storage file upload-batch') as c:
from ._validators import process_file_upload_batch_parameters
c.argument('source', options_list=('--source', '-s'), validator=process_file_upload_batch_parameters)
c.argument('destination', options_list=('--destination', '-d'))
c.argument('max_connections', arg_group='Download Control', type=int)
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.register_content_settings_argument(t_file_content_settings, update=False, arg_group='Content Settings')
c.extra('no_progress', progress_type)
with self.argument_context('storage file download-batch') as c:
from ._validators import process_file_download_batch_parameters
c.argument('source', options_list=('--source', '-s'), validator=process_file_download_batch_parameters)
c.argument('destination', options_list=('--destination', '-d'))
c.argument('max_connections', arg_group='Download Control', type=int)
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.extra('no_progress', progress_type)
with self.argument_context('storage file delete-batch') as c:
from ._validators import process_file_batch_source_parameters
c.argument('source', options_list=('--source', '-s'), validator=process_file_batch_source_parameters)
with self.argument_context('storage file copy start') as c:
from azure.cli.command_modules.storage._validators import validate_source_uri
c.register_path_argument(options_list=('--destination-path', '-p'))
c.register_source_uri_arguments(validator=validate_source_uri)
c.extra('file_snapshot', default=None, arg_group='Copy Source',
help='The file snapshot for the source storage account.')
with self.argument_context('storage file copy start-batch', arg_group='Copy Source') as c:
from ._validators import get_source_file_or_blob_service_client
c.argument('source_client', ignore_type, validator=get_source_file_or_blob_service_client)
c.extra('source_account_name')
c.extra('source_account_key')
c.extra('source_uri')
c.argument('source_sas')
c.argument('source_container')
c.argument('source_share')
with self.argument_context('storage cors list') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), default='bqft',
options_list='--services', required=False)
with self.argument_context('storage cors add') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), required=True,
options_list='--services')
c.argument('max_age')
c.argument('origins', nargs='+')
c.argument('methods', nargs='+',
arg_type=get_enum_type(['DELETE', 'GET', 'HEAD', 'MERGE', 'POST', 'OPTIONS', 'PUT']))
c.argument('allowed_headers', nargs='+')
c.argument('exposed_headers', nargs='+')
with self.argument_context('storage cors clear') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), required=True,
options_list='--services')
with self.argument_context('storage queue generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_queue_permissions = self.get_sdk('queue.models#QueuePermissions')
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the share\'s ACL.',
completer=get_storage_acl_name_completion_list(t_queue_permissions, 'queue_name', 'get_queue_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_queue_permissions)),
validator=get_permission_validator(t_queue_permissions))
c.ignore('sas_token')
c.ignore('auth_mode')
with self.argument_context('storage queue') as c:
c.argument('queue_name', queue_name_type, options_list=('--name', '-n'))
with self.argument_context('storage queue list') as c:
c.argument('include_metadata', help='Specify that queue metadata be returned in the response.')
c.argument('marker', arg_type=marker_type)
c.argument('num_results', arg_type=num_results_type)
c.argument('prefix', help='Filter the results to return only queues whose names '
'begin with the specified prefix.')
c.argument('show_next_marker', action='store_true',
help='Show nextMarker in result when specified.')
c.extra('timeout', help='Request timeout in seconds. Apply to each call to the service.', type=int)
with self.argument_context('storage queue create') as c:
c.argument('queue_name', queue_name_type, options_list=('--name', '-n'), completer=None)
with self.argument_context('storage queue policy') as c:
from .completers import get_storage_acl_name_completion_list
t_queue_permissions = self.get_sdk('queue.models#QueuePermissions')
c.argument('container_name', queue_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_queue_service, 'container_name', 'get_queue_acl'))
help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_queue_permissions))
c.argument('permission', options_list='--permissions', help=help_str,
validator=get_permission_validator(t_queue_permissions))
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
c.ignore('auth_mode')
with self.argument_context('storage message') as c:
c.argument('queue_name', queue_name_type)
c.argument('message_id', options_list='--id')
c.argument('content', type=unicode_string, help='Message content, up to 64KB in size.')
with self.argument_context('storage remove') as c:
from .completers import file_path_completer
c.extra('container_name', container_name_type, validator=validate_azcopy_remove_arguments)
c.extra('blob_name', options_list=('--name', '-n'), arg_type=blob_name_type)
c.extra('share_name', share_name_type, help='The file share name.')
c.extra('path', options_list=('--path', '-p'),
help='The path to the file within the file share.',
completer=file_path_completer)
c.argument('exclude_pattern', exclude_pattern_type)
c.argument('include_pattern', include_pattern_type)
c.argument('exclude_path', exclude_path_type)
c.argument('include_path', include_path_type)
c.argument('recursive', recursive_type)
c.ignore('destination')
c.ignore('service')
c.ignore('target')
with self.argument_context('storage table') as c:
c.argument('table_name', table_name_type, options_list=('--name', '-n'))
with self.argument_context('storage table create') as c:
c.argument('table_name', table_name_type, options_list=('--name', '-n'), completer=None)
c.argument('fail_on_exist', help='Throw an exception if the table already exists.')
with self.argument_context('storage table policy') as c:
from ._validators import table_permission_validator
from .completers import get_storage_acl_name_completion_list
c.argument('container_name', table_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_table_service, 'table_name', 'get_table_acl'))
help_str = 'Allowed values: (r)ead/query (a)dd (u)pdate (d)elete. Can be combined.'
c.argument('permission', options_list='--permissions', help=help_str, validator=table_permission_validator)
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
with self.argument_context('storage table generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the table\'s ACL.',
completer=get_storage_acl_name_completion_list(t_table_service, 'table_name', 'get_table_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format('(r)ead/query (a)dd (u)pdate (d)elete'),
validator=table_permission_validator)
c.ignore('sas_token')
with self.argument_context('storage entity') as c:
c.ignore('property_resolver')
c.argument('entity', options_list=('--entity', '-e'), validator=validate_entity, nargs='+')
c.argument('select', nargs='+', validator=validate_select,
help='Space-separated list of properties to return for each entity.')
with self.argument_context('storage entity insert') as c:
c.argument('if_exists', arg_type=get_enum_type(['fail', 'merge', 'replace']))
with self.argument_context('storage entity query') as c:
c.argument('accept', default='minimal', validator=validate_table_payload_format,
arg_type=get_enum_type(['none', 'minimal', 'full']),
help='Specifies how much metadata to include in the response payload.')
c.argument('marker', validator=validate_marker, nargs='+')
for item in ['create', 'show', 'delete', 'exists', 'metadata update', 'metadata show']:
with self.argument_context('storage fs {}'.format(item)) as c:
c.extra('file_system_name', options_list=['--name', '-n'],
help="File system name.", required=True)
c.extra('timeout', timeout_type)
with self.argument_context('storage fs create') as c:
from .sdkutil import get_fs_access_type_names
c.argument('public_access', arg_type=get_enum_type(get_fs_access_type_names()),
validator=validate_fs_public_access,
help="Specify whether data in the file system may be accessed publicly and the level of access.")
with self.argument_context('storage fs list') as c:
c.argument('include_metadata', arg_type=get_three_state_flag(),
help='Specify that file system metadata be returned in the response. The default value is "False".')
c.argument('name_starts_with', options_list=['--prefix'],
help='Filter the results to return only file systems whose names begin with the specified prefix.')
for item in ['create', 'show', 'delete', 'exists', 'move', 'metadata update', 'metadata show']:
with self.argument_context('storage fs directory {}'.format(item)) as c:
c.extra('file_system_name', options_list=['-f', '--file-system'], help="File system name.", required=True)
c.extra('directory_path', options_list=['--name', '-n'],
help="The name of directory.", required=True)
c.extra('timeout', timeout_type)
with self.argument_context('storage fs directory create') as c:
c.extra('permissions', permissions_type)
c.extra('umask', umask_type)
with self.argument_context('storage fs directory list') as c:
c.extra('file_system_name', options_list=['-f', '--file-system'], help="File system name.", required=True)
c.argument('recursive', arg_type=get_three_state_flag(), default=True,
help='Look into sub-directories recursively when set to true.')
c.argument('path', help="Filter the results to return only paths under the specified path.")
c.argument('num_results', type=int, help='Specify the maximum number of results to return.')
with self.argument_context('storage fs directory move') as c:
c.argument('new_name', options_list=['--new-directory', '-d'],
help='The new directory name the users want to move to. The value must have the following format: '
'"{filesystem}/{directory}/{subdirectory}".')
with self.argument_context('storage fs file list') as c:
c.extra('file_system_name', options_list=['-f', '--file-system'], help="File system name.", required=True)
c.argument('recursive', arg_type=get_three_state_flag(), default=True,
help='Look into sub-directories recursively when set to true.')
c.argument('exclude_dir', action='store_true',
help='List only files in the given file system.')
c.argument('path', help='Filter the results to return only paths under the specified path.')
c.argument('num_results', type=int, default=5000,
help='Specify the maximum number of results to return. If the request does not specify num_results '
'or specifies a value greater than 5,000, the server will return up to 5,000 items.')
c.argument('marker',
help='An opaque continuation token. This value can be retrieved from the next_marker field of a '
'previous generator object. If specified, this generator will begin returning results from this '
'point.')
for item in ['create', 'show', 'delete', 'exists', 'upload', 'append', 'download', 'show', 'metadata update',
'metadata show']:
with self.argument_context('storage fs file {}'.format(item)) as c:
c.extra('file_system_name', options_list=['-f', '--file-system'],
help='File system name.', required=True)
c.extra('path', options_list=['-p', '--path'], help="The file path in a file system.",
required=True)
c.extra('timeout', timeout_type)
c.argument('content', help='Content to be appended to file.')
with self.argument_context('storage fs file create') as c:
t_file_content_settings = self.get_sdk('_models#ContentSettings',
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
c.register_content_settings_argument(t_file_content_settings, update=False)
c.extra('permissions', permissions_type)
c.extra('umask', umask_type)
c.extra('timeout', timeout_type)
with self.argument_context('storage fs file download') as c:
c.argument('destination_path', options_list=['--destination', '-d'], type=file_type,
help='The local file where the file or folder will be downloaded to. The source filename will be '
'used if not specified.')
c.argument('overwrite', arg_type=get_three_state_flag(),
help="Overwrite an existing file when specified. Default value is false.")
with self.argument_context('storage fs file move') as c:
t_file_content_settings = self.get_sdk('_models#ContentSettings',
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
c.register_content_settings_argument(t_file_content_settings, update=False)
c.extra('file_system_name', options_list=['-f', '--file-system'],
help='File system name.', required=True)
c.extra('path', options_list=['-p', '--path'], required=True,
help="The original file path users want to move in a file system.")
c.argument('new_name', options_list=['--new-path'],
help='The new path the users want to move to. The value must have the following format: '
'"{filesystem}/{directory}/{subdirectory}/{file}".')
with self.argument_context('storage fs file upload') as c:
t_file_content_settings = self.get_sdk('_models#ContentSettings',
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
c.register_content_settings_argument(t_file_content_settings, update=False)
c.argument('local_path', options_list=['--source', '-s'],
help='Path of the local file to upload as the file content.')
c.argument('overwrite', arg_type=get_three_state_flag(), help="Overwrite an existing file when specified.")
c.argument('if_match', arg_group='Precondition',
help="An ETag value, or the wildcard character (*). Specify this header to perform the operation "
"only if the resource's ETag matches the value specified.")
c.argument('if_none_match', arg_group='Precondition',
help="An ETag value, or the wildcard character (*). Specify this header to perform the operation "
"only if the resource's ETag does not match the value specified.")
c.argument('if_modified_since', arg_group='Precondition',
help="A Commence only if modified since supplied UTC datetime (Y-m-d'T'H:M'Z').")
c.argument('if_unmodified_since', arg_group='Precondition',
help="A Commence only if unmodified since supplied UTC datetime (Y-m-d'T'H:M'Z').")
c.argument('permissions', permissions_type)
c.argument('umask', umask_type)
for item in ['set', 'show']:
with self.argument_context('storage fs access {}'.format(item)) as c:
from ._validators import validate_access_control
c.extra('file_system_name', options_list=['-f', '--file-system'],
help='File system name.', required=True)
c.extra('directory_path', options_list=['-p', '--path'],
help='The path to a file or directory in the specified file system.', required=True)
c.argument('permissions', validator=validate_access_control)
c.ignore('upn')
for item in ['set-recursive', 'update-recursive', 'remove-recursive']:
with self.argument_context('storage fs access {}'.format(item)) as c:
c.register_fs_directory_arguments()
c.argument('acl', help='The value is a comma-separated list of access control entries. Each access control '
'entry (ACE) consists of a scope, a type, a user or group identifier, and permissions in the '
'format "[scope:][type]:[id]:[permissions]". For more information, please refer to '
'https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control.')
c.extra('continuation',
help='Optional continuation token that can be used to resume previously stopped operation.')
c.extra('batch_size', type=int, help='Optional. If data set size exceeds batch size then operation will '
'be split into multiple requests so that progress can be tracked. Batch size should be between 1 '
'and 2000. The default when unspecified is 2000.')
c.extra('max_batches', type=int, help='Optional. Define maximum number of batches that single change '
'Access Control operation can execute. If maximum is reached before all sub-paths are processed, '
'then continuation token can be used to resume operation. Empty value indicates that maximum '
'number of batches in unbound and operation continues till end.')
c.extra('continue_on_failure', arg_type=get_three_state_flag(),
help='If set to False, the operation will terminate quickly on encountering user errors (4XX). '
'If True, the operation will ignore user errors and proceed with the operation on other '
'sub-entities of the directory. Continuation token will only be returned when '
'--continue-on-failure is True in case of user errors. If not set the default value is False '
'for this.')
| 69.402395 | 166 | 0.655606 |
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.validators import get_default_location_from_resource_group
from azure.cli.core.commands.parameters import (tags_type, file_type, get_location_type, get_enum_type,
get_three_state_flag)
from azure.cli.core.local_context import LocalContextAttribute, LocalContextAction, ALL
from ._validators import (get_datetime_type, validate_metadata, get_permission_validator, get_permission_help_string,
resource_type_type, services_type, validate_entity, validate_select, validate_blob_type,
validate_included_datasets_validator, validate_custom_domain,
validate_container_public_access,
validate_table_payload_format, add_progress_callback, process_resource_group,
storage_account_key_options, process_file_download_namespace, process_metric_update_namespace,
get_char_options_validator, validate_bypass, validate_encryption_source, validate_marker,
validate_storage_data_plane_list, validate_azcopy_upload_destination_url,
validate_azcopy_remove_arguments, as_user_validator, parse_storage_account,
validate_delete_retention_days, validate_container_delete_retention_days,
validate_file_delete_retention_days, validator_change_feed_retention_days,
validate_fs_public_access, validate_logging_version, validate_or_policy, validate_policy,
get_api_version_type, blob_download_file_path_validator, blob_tier_validator)
def load_arguments(self, _):
from argcomplete.completers import FilesCompleter
from six import u as unicode_string
from knack.arguments import ignore_type, CLIArgumentType
from azure.cli.core.commands.parameters import get_resource_name_completion_list
from .sdkutil import get_table_data_type
from .completers import get_storage_name_completion_list
t_base_blob_service = self.get_sdk('blob.baseblobservice#BaseBlobService')
t_file_service = self.get_sdk('file#FileService')
t_queue_service = self.get_sdk('queue#QueueService')
t_table_service = get_table_data_type(self.cli_ctx, 'table', 'TableService')
storage_account_type = CLIArgumentType(options_list='--storage-account',
help='The name or ID of the storage account.',
validator=parse_storage_account, id_part='name')
acct_name_type = CLIArgumentType(options_list=['--account-name', '-n'], help='The storage account name.',
id_part='name',
completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'),
local_context_attribute=LocalContextAttribute(
name='storage_account_name', actions=[LocalContextAction.GET]))
blob_name_type = CLIArgumentType(options_list=['--blob-name', '-b'], help='The blob name.',
completer=get_storage_name_completion_list(t_base_blob_service, 'list_blobs',
parent='container_name'))
container_name_type = CLIArgumentType(options_list=['--container-name', '-c'], help='The container name.',
completer=get_storage_name_completion_list(t_base_blob_service,
'list_containers'))
directory_type = CLIArgumentType(options_list=['--directory-name', '-d'], help='The directory name.',
completer=get_storage_name_completion_list(t_file_service,
'list_directories_and_files',
parent='share_name'))
file_name_type = CLIArgumentType(options_list=['--file-name', '-f'],
completer=get_storage_name_completion_list(t_file_service,
'list_directories_and_files',
parent='share_name'))
share_name_type = CLIArgumentType(options_list=['--share-name', '-s'], help='The file share name.',
completer=get_storage_name_completion_list(t_file_service, 'list_shares'))
table_name_type = CLIArgumentType(options_list=['--table-name', '-t'],
completer=get_storage_name_completion_list(t_table_service, 'list_tables'))
queue_name_type = CLIArgumentType(options_list=['--queue-name', '-q'], help='The queue name.',
completer=get_storage_name_completion_list(t_queue_service, 'list_queues'))
progress_type = CLIArgumentType(help='Include this flag to disable progress reporting for the command.',
action='store_true', validator=add_progress_callback)
socket_timeout_type = CLIArgumentType(help='The socket timeout(secs), used by the service to regulate data flow.',
type=int)
large_file_share_type = CLIArgumentType(
action='store_true', min_api='2019-04-01',
help='Enable the capability to support large file shares with more than 5 TiB capacity for storage account.'
'Once the property is enabled, the feature cannot be disabled. Currently only supported for LRS and '
'ZRS replication types, hence account conversions to geo-redundant accounts would not be possible. '
'For more information, please refer to https://go.microsoft.com/fwlink/?linkid=2086047.')
adds_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2019-04-01',
help='Enable Azure Files Active Directory Domain Service Authentication for '
'storage account. When --enable-files-adds is set to true, Azure Active '
'Directory Properties arguments must be provided.')
aadds_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2018-11-01',
help='Enable Azure Active Directory Domain Services authentication for Azure Files')
domain_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the primary domain that the AD DNS server is authoritative for. "
"Required when --enable-files-adds is set to True")
net_bios_domain_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the NetBIOS domain name. "
"Required when --enable-files-adds is set to True")
forest_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the Active Directory forest to get. "
"Required when --enable-files-adds is set to True")
domain_guid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the domain GUID. Required when --enable-files-adds is set to True")
domain_sid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the security identifier (SID). Required when --enable-files-adds "
"is set to True")
azure_storage_sid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the security identifier (SID) for Azure Storage. "
"Required when --enable-files-adds is set to True")
exclude_pattern_type = CLIArgumentType(arg_group='Additional Flags', help='Exclude these files where the name '
'matches the pattern list. For example: *.jpg;*.pdf;exactName. This '
'option supports wildcard characters (*)')
include_pattern_type = CLIArgumentType(arg_group='Additional Flags', help='Include only these files where the name '
'matches the pattern list. For example: *.jpg;*.pdf;exactName. This '
'option supports wildcard characters (*)')
exclude_path_type = CLIArgumentType(arg_group='Additional Flags', help='Exclude these paths. This option does not '
'support wildcard characters (*). Checks relative path prefix. For example: '
'myFolder;myFolder/subDirName/file.pdf.')
include_path_type = CLIArgumentType(arg_group='Additional Flags', help='Include only these paths. This option does '
'not support wildcard characters (*). Checks relative path prefix. For example:'
'myFolder;myFolder/subDirName/file.pdf')
recursive_type = CLIArgumentType(options_list=['--recursive', '-r'], action='store_true',
help='Look into sub-directories recursively.')
sas_help = 'The permissions the SAS grants. Allowed values: {}. Do not use if a stored access policy is ' \
'referenced with --id that specifies this value. Can be combined.'
t_routing_choice = self.get_models('RoutingChoice', resource_type=ResourceType.MGMT_STORAGE)
routing_choice_type = CLIArgumentType(
arg_group='Routing Preference', arg_type=get_enum_type(t_routing_choice),
help='Routing Choice defines the kind of network routing opted by the user.',
min_api='2019-06-01')
publish_microsoft_endpoints_type = CLIArgumentType(
arg_group='Routing Preference', arg_type=get_three_state_flag(), min_api='2019-06-01',
help='A boolean flag which indicates whether microsoft routing storage endpoints are to be published.')
publish_internet_endpoints_type = CLIArgumentType(
arg_group='Routing Preference', arg_type=get_three_state_flag(), min_api='2019-06-01',
help='A boolean flag which indicates whether internet routing storage endpoints are to be published.')
umask_type = CLIArgumentType(
help='When creating a file or directory and the parent folder does not have a default ACL, the umask restricts '
'the permissions of the file or directory to be created. The resulting permission is given by p & ^u, '
'where p is the permission and u is the umask. For more information, please refer to '
'https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control#umask.')
permissions_type = CLIArgumentType(
help='POSIX access permissions for the file owner, the file owning group, and others. Each class may be '
'granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) '
'and 4-digit octal notation (e.g. 0766) are supported. For more information, please refer to https://'
'docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control#levels-of-permission.')
timeout_type = CLIArgumentType(
help='Request timeout in seconds. Applies to each call to the service.', type=int
)
marker_type = CLIArgumentType(
help='A string value that identifies the portion of the list of containers to be '
'returned with the next listing operation. The operation returns the NextMarker value within '
'the response body if the listing operation did not return all containers remaining to be listed '
'with the current page. If specified, this generator will begin returning results from the point '
'where the previous generator stopped.')
num_results_type = CLIArgumentType(
default=5000, validator=validate_storage_data_plane_list,
help='Specify the maximum number to return. If the request does not specify '
'num_results, or specifies a value greater than 5000, the server will return up to 5000 items. Note that '
'if the listing operation crosses a partition boundary, then the service will return a continuation token '
'for retrieving the remaining of the results. Provide "*" to return all.'
)
if_modified_since_type = CLIArgumentType(
help='Commence only if modified since supplied UTC datetime (Y-m-d\'T\'H:M\'Z\')',
type=get_datetime_type(False))
if_unmodified_since_type = CLIArgumentType(
help='Commence only if unmodified since supplied UTC datetime (Y-m-d\'T\'H:M\'Z\')',
type=get_datetime_type(False))
allow_shared_key_access_type = CLIArgumentType(
arg_type=get_three_state_flag(), options_list=['--allow-shared-key-access', '-k'], min_api='2019-04-01',
help='Indicate whether the storage account permits requests to be authorized with the account access key via '
'Shared Key. If false, then all requests, including shared access signatures, must be authorized with '
'Azure Active Directory (Azure AD). The default value is null, which is equivalent to true.')
t_blob_tier = self.get_sdk('_generated.models._azure_blob_storage_enums#AccessTierOptional',
resource_type=ResourceType.DATA_STORAGE_BLOB)
with self.argument_context('storage') as c:
c.argument('container_name', container_name_type)
c.argument('directory_name', directory_type)
c.argument('share_name', share_name_type)
c.argument('table_name', table_name_type)
c.argument('retry_wait', options_list=('--retry-interval',))
c.ignore('progress_callback')
c.argument('metadata', nargs='+',
help='Metadata in space-separated key=value pairs. This overwrites any existing metadata.',
validator=validate_metadata)
c.argument('timeout', help='Request timeout in seconds. Applies to each call to the service.', type=int)
with self.argument_context('storage', arg_group='Precondition') as c:
c.argument('if_modified_since', if_modified_since_type)
c.argument('if_unmodified_since', if_unmodified_since_type)
c.argument('if_match')
c.argument('if_none_match')
for item in ['delete', 'show', 'update', 'show-connection-string', 'keys', 'network-rule', 'revoke-delegation-keys', 'failover']:
with self.argument_context('storage account {}'.format(item)) as c:
c.argument('account_name', acct_name_type, options_list=['--name', '-n'])
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage account check-name') as c:
c.argument('name', options_list=['--name', '-n'],
help='The name of the storage account within the specified resource group')
with self.argument_context('storage account delete') as c:
c.argument('account_name', acct_name_type, options_list=['--name', '-n'], local_context_attribute=None)
with self.argument_context('storage account create', resource_type=ResourceType.MGMT_STORAGE) as c:
t_account_type, t_sku_name, t_kind, t_tls_version = \
self.get_models('AccountType', 'SkuName', 'Kind', 'MinimumTlsVersion',
resource_type=ResourceType.MGMT_STORAGE)
c.register_common_storage_account_options()
c.argument('location', get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
c.argument('account_type', help='The storage account type', arg_type=get_enum_type(t_account_type))
c.argument('account_name', acct_name_type, options_list=['--name', '-n'], completer=None,
local_context_attribute=LocalContextAttribute(
name='storage_account_name', actions=[LocalContextAction.SET], scopes=[ALL]))
c.argument('kind', help='Indicate the type of storage account.',
arg_type=get_enum_type(t_kind),
default='StorageV2' if self.cli_ctx.cloud.profile == 'latest' else 'Storage')
c.argument('https_only', arg_type=get_three_state_flag(), min_api='2019-04-01',
help='Allow https traffic only to storage service if set to true. The default value is true.')
c.argument('https_only', arg_type=get_three_state_flag(), max_api='2018-11-01',
help='Allow https traffic only to storage service if set to true. The default value is false.')
c.argument('tags', tags_type)
c.argument('custom_domain', help='User domain assigned to the storage account. Name is the CNAME source.')
c.argument('sku', help='The storage account SKU.', arg_type=get_enum_type(t_sku_name, default='standard_ragrs'))
c.argument('enable_files_aadds', aadds_type)
c.argument('enable_files_adds', adds_type)
c.argument('enable_large_file_share', arg_type=large_file_share_type)
c.argument('domain_name', domain_name_type)
c.argument('net_bios_domain_name', net_bios_domain_name_type)
c.argument('forest_name', forest_name_type)
c.argument('domain_guid', domain_guid_type)
c.argument('domain_sid', domain_sid_type)
c.argument('azure_storage_sid', azure_storage_sid_type)
c.argument('enable_hierarchical_namespace', arg_type=get_three_state_flag(),
options_list=['--enable-hierarchical-namespace', '--hns',
c.deprecate(target='--hierarchical-namespace', redirect='--hns', hide=True)],
help=" Allow the blob service to exhibit filesystem semantics. This property can be enabled only "
"when storage account kind is StorageV2.",
min_api='2018-02-01')
c.argument('encryption_key_type_for_table', arg_type=get_enum_type(['Account', 'Service']),
help='Set the encryption key type for Table service. "Account": Table will be encrypted '
'with account-scoped encryption key. "Service": Table will always be encrypted with '
'service-scoped keys. Currently the default encryption key type is "Service".',
min_api='2019-06-01', options_list=['--encryption-key-type-for-table', '-t'])
c.argument('encryption_key_type_for_queue', arg_type=get_enum_type(['Account', 'Service']),
help='Set the encryption key type for Queue service. "Account": Queue will be encrypted '
'with account-scoped encryption key. "Service": Queue will always be encrypted with '
'service-scoped keys. Currently the default encryption key type is "Service".',
min_api='2019-06-01', options_list=['--encryption-key-type-for-queue', '-q'])
c.argument('routing_choice', routing_choice_type)
c.argument('publish_microsoft_endpoints', publish_microsoft_endpoints_type)
c.argument('publish_internet_endpoints', publish_internet_endpoints_type)
c.argument('require_infrastructure_encryption', options_list=['--require-infrastructure-encryption', '-i'],
arg_type=get_three_state_flag(),
help='A boolean indicating whether or not the service applies a secondary layer of encryption with '
'platform managed keys for data at rest.')
c.argument('allow_blob_public_access', arg_type=get_three_state_flag(), min_api='2019-04-01',
help='Allow or disallow public access to all blobs or containers in the storage account. '
'The default value for this property is null, which is equivalent to true. When true, containers '
'in the account may be configured for public access. Note that setting this property to true does '
'not enable anonymous access to any data in the account. The additional step of configuring the '
'public access setting for a container is required to enable anonymous access.')
c.argument('min_tls_version', arg_type=get_enum_type(t_tls_version),
help='The minimum TLS version to be permitted on requests to storage. '
'The default interpretation is TLS 1.0 for this property')
c.argument('allow_shared_key_access', allow_shared_key_access_type)
with self.argument_context('storage account private-endpoint-connection',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('private_endpoint_connection_name', options_list=['--name', '-n'],
help='The name of the private endpoint connection associated with the Storage Account.')
for item in ['approve', 'reject', 'show', 'delete']:
with self.argument_context('storage account private-endpoint-connection {}'.format(item),
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('private_endpoint_connection_name', options_list=['--name', '-n'], required=False,
help='The name of the private endpoint connection associated with the Storage Account.')
c.extra('connection_id', options_list=['--id'],
help='The ID of the private endpoint connection associated with the Storage Account. You can get '
'it using `az storage account show`.')
c.argument('account_name', help='The storage account name.', required=False)
c.argument('resource_group_name', help='The resource group name of specified storage account.',
required=False)
c.argument('description', help='Comments for {} operation.'.format(item))
with self.argument_context('storage account update', resource_type=ResourceType.MGMT_STORAGE) as c:
t_tls_version = self.get_models('MinimumTlsVersion', resource_type=ResourceType.MGMT_STORAGE)
c.register_common_storage_account_options()
c.argument('sku', arg_type=get_enum_type(t_sku_name),
help='Note that the SKU name cannot be updated to Standard_ZRS, Premium_LRS or Premium_ZRS, '
'nor can accounts of those SKU names be updated to any other value')
c.argument('custom_domain',
help='User domain assigned to the storage account. Name is the CNAME source. Use "" to clear '
'existing value.',
validator=validate_custom_domain)
c.argument('use_subdomain', help='Specify whether to use indirect CNAME validation.',
arg_type=get_enum_type(['true', 'false']))
c.argument('tags', tags_type, default=None)
c.argument('enable_files_aadds', aadds_type)
c.argument('enable_files_adds', adds_type)
c.argument('enable_large_file_share', arg_type=large_file_share_type)
c.argument('domain_name', domain_name_type)
c.argument('net_bios_domain_name', net_bios_domain_name_type)
c.argument('forest_name', forest_name_type)
c.argument('domain_guid', domain_guid_type)
c.argument('domain_sid', domain_sid_type)
c.argument('azure_storage_sid', azure_storage_sid_type)
c.argument('routing_choice', routing_choice_type)
c.argument('publish_microsoft_endpoints', publish_microsoft_endpoints_type)
c.argument('publish_internet_endpoints', publish_internet_endpoints_type)
c.argument('allow_blob_public_access', arg_type=get_three_state_flag(), min_api='2019-04-01',
help='Allow or disallow public access to all blobs or containers in the storage account. '
'The default value for this property is null, which is equivalent to true. When true, containers '
'in the account may be configured for public access. Note that setting this property to true does '
'not enable anonymous access to any data in the account. The additional step of configuring the '
'public access setting for a container is required to enable anonymous access.')
c.argument('min_tls_version', arg_type=get_enum_type(t_tls_version),
help='The minimum TLS version to be permitted on requests to storage. '
'The default interpretation is TLS 1.0 for this property')
c.argument('allow_shared_key_access', allow_shared_key_access_type)
with self.argument_context('storage account update', arg_group='Customer managed key', min_api='2017-06-01') as c:
t_key_source = self.get_models('KeySource', resource_type=ResourceType.MGMT_STORAGE)
c.argument('encryption_key_name', help='The name of the KeyVault key.', )
c.argument('encryption_key_vault', help='The Uri of the KeyVault.')
c.argument('encryption_key_version',
help='The version of the KeyVault key to use, which will opt out of implicit key rotation. '
'Please use "" to opt in key auto-rotation again.')
c.argument('encryption_key_source',
arg_type=get_enum_type(t_key_source),
help='The default encryption key source',
validator=validate_encryption_source)
for scope in ['storage account create', 'storage account update']:
with self.argument_context(scope, resource_type=ResourceType.MGMT_STORAGE, min_api='2017-06-01',
arg_group='Network Rule') as c:
t_bypass, t_default_action = self.get_models('Bypass', 'DefaultAction',
resource_type=ResourceType.MGMT_STORAGE)
c.argument('bypass', nargs='+', validator=validate_bypass, arg_type=get_enum_type(t_bypass),
help='Bypass traffic for space-separated uses.')
c.argument('default_action', arg_type=get_enum_type(t_default_action),
help='Default action to apply when no rule matches.')
with self.argument_context('storage account show-connection-string') as c:
c.argument('protocol', help='The default endpoint protocol.', arg_type=get_enum_type(['http', 'https']))
c.argument('sas_token', help='The SAS token to be used in the connection-string.')
c.argument('key_name', options_list=['--key'], help='The key to use.',
arg_type=get_enum_type(list(storage_account_key_options.keys())))
for item in ['blob', 'file', 'queue', 'table']:
c.argument('{}_endpoint'.format(item), help='Custom endpoint for {}s.'.format(item))
with self.argument_context('storage account encryption-scope') as c:
c.argument('account_name', help='The storage account name.')
c.argument('resource_group_name', validator=process_resource_group, required=False)
c.argument('encryption_scope_name', options_list=['--name', '-n'],
help='The name of the encryption scope within the specified storage account.')
for scope in ['storage account encryption-scope create', 'storage account encryption-scope update']:
with self.argument_context(scope, resource_type=ResourceType.MGMT_STORAGE) as c:
from ._validators import validate_encryption_key
t_encryption_key_source = self.get_models('EncryptionScopeSource', resource_type=ResourceType.MGMT_STORAGE)
c.argument('key_source', options_list=['-s', '--key-source'],
arg_type=get_enum_type(t_encryption_key_source, default="Microsoft.Storage"),
help='The provider for the encryption scope.', validator=validate_encryption_key)
c.argument('key_uri', options_list=['-u', '--key-uri'],
help='The object identifier for a key vault key object. When applied, the encryption scope will '
'use the key referenced by the identifier to enable customer-managed key support on this '
'encryption scope.')
c.argument('require_infrastructure_encryption', options_list=['--require-infrastructure-encryption', '-i'],
arg_type=get_three_state_flag(), min_api='2021-01-01',
help='A boolean indicating whether or not the service applies a secondary layer of encryption '
'with platform managed keys for data at rest.')
with self.argument_context('storage account encryption-scope update') as c:
t_state = self.get_models("EncryptionScopeState", resource_type=ResourceType.MGMT_STORAGE)
c.argument('key_source', options_list=['-s', '--key-source'],
arg_type=get_enum_type(t_encryption_key_source),
help='The provider for the encryption scope.', validator=validate_encryption_key)
c.argument('state', arg_type=get_enum_type(t_state),
help='Change the state the encryption scope. When disabled, '
'all blob read/write operations using this encryption scope will fail.')
with self.argument_context('storage account keys list', resource_type=ResourceType.MGMT_STORAGE) as c:
t_expand_key_type = self.get_models('ListKeyExpand', resource_type=ResourceType.MGMT_STORAGE)
c.argument("expand", options_list=['--expand-key-type'], help='Specify the expanded key types to be listed.',
arg_type=get_enum_type(t_expand_key_type), min_api='2019-04-01', is_preview=True)
with self.argument_context('storage account keys renew', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('key_name', options_list=['--key'], help='The key options to regenerate.',
arg_type=get_enum_type(list(storage_account_key_options.keys())))
c.extra('key_type', help='The key type to regenerate. If --key-type is not specified, one of access keys will '
'be regenerated by default.', arg_type=get_enum_type(['kerb']), min_api='2019-04-01')
c.argument('account_name', acct_name_type, id_part=None)
with self.argument_context('storage account management-policy create') as c:
c.argument('policy', type=file_type, completer=FilesCompleter(),
help='The Storage Account ManagementPolicies Rules, in JSON format. See more details in: '
'https://docs.microsoft.com/azure/storage/common/storage-lifecycle-managment-concepts.')
for item in ['create', 'update', 'show', 'delete']:
with self.argument_context('storage account management-policy {}'.format(item)) as c:
c.argument('account_name', help='The name of the storage account within the specified resource group.')
with self.argument_context('storage account keys list') as c:
c.argument('account_name', acct_name_type, id_part=None)
with self.argument_context('storage account network-rule', resource_type=ResourceType.MGMT_STORAGE) as c:
from ._validators import validate_subnet
c.argument('account_name', acct_name_type, id_part=None)
c.argument('ip_address', help='IPv4 address or CIDR range.')
c.argument('subnet', help='Name or ID of subnet. If name is supplied, `--vnet-name` must be supplied.')
c.argument('vnet_name', help='Name of a virtual network.', validator=validate_subnet)
c.argument('action', help='The action of virtual network rule.')
c.argument('resource_id', help='The resource id to add in network rule.', arg_group='Resource Access Rule',
min_api='2020-08-01-preview')
c.argument('tenant_id', help='The tenant id to add in network rule.', arg_group='Resource Access Rule',
min_api='2020-08-01-preview')
with self.argument_context('storage account blob-service-properties show',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage account blob-service-properties update',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('enable_change_feed', arg_type=get_three_state_flag(), min_api='2019-04-01',
arg_group='Change Feed Policy')
c.argument('change_feed_retention_days', is_preview=True,
options_list=['--change-feed-retention-days', '--change-feed-days'],
type=int, min_api='2019-06-01', arg_group='Change Feed Policy',
validator=validator_change_feed_retention_days,
help='Indicate the duration of changeFeed retention in days. '
'Minimum value is 1 day and maximum value is 146000 days (400 years). '
'A null value indicates an infinite retention of the change feed.'
'(Use `--enable-change-feed` without `--change-feed-days` to indicate null)')
c.argument('enable_container_delete_retention',
arg_type=get_three_state_flag(),
options_list=['--enable-container-delete-retention', '--container-retention'],
arg_group='Container Delete Retention Policy', min_api='2019-06-01',
help='Enable container delete retention policy for container soft delete when set to true. '
'Disable container delete retention policy when set to false.')
c.argument('container_delete_retention_days',
options_list=['--container-delete-retention-days', '--container-days'],
type=int, arg_group='Container Delete Retention Policy',
min_api='2019-06-01', validator=validate_container_delete_retention_days,
help='Indicate the number of days that the deleted container should be retained. The minimum '
'specified value can be 1 and the maximum value can be 365.')
c.argument('enable_delete_retention', arg_type=get_three_state_flag(), arg_group='Delete Retention Policy',
min_api='2018-07-01')
c.argument('delete_retention_days', type=int, arg_group='Delete Retention Policy',
validator=validate_delete_retention_days, min_api='2018-07-01')
c.argument('enable_restore_policy', arg_type=get_three_state_flag(), arg_group='Restore Policy',
min_api='2019-06-01', help="Enable blob restore policy when it set to true.")
c.argument('restore_days', type=int, arg_group='Restore Policy',
min_api='2019-06-01', help="The number of days for the blob can be restored. It should be greater "
"than zero and less than Delete Retention Days.")
c.argument('enable_versioning', arg_type=get_three_state_flag(), help='Versioning is enabled if set to true.',
min_api='2019-06-01')
c.argument('default_service_version', options_list=['--default-service-version', '-d'],
type=get_api_version_type(), min_api='2018-07-01',
help="Indicate the default version to use for requests to the Blob service if an incoming request's "
"version is not specified.")
with self.argument_context('storage account file-service-properties show',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage account file-service-properties update',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('enable_delete_retention', arg_type=get_three_state_flag(), arg_group='Delete Retention Policy',
min_api='2019-06-01', help='Enable file service properties for share soft delete.')
c.argument('delete_retention_days', type=int, arg_group='Delete Retention Policy',
validator=validate_file_delete_retention_days, min_api='2019-06-01',
help='Indicate the number of days that the deleted item should be retained. The minimum specified '
'value can be 1 and the maximum value can be 365.')
c.argument('enable_smb_multichannel', options_list=['--enable-smb-multichannel', '--mc'],
arg_type=get_three_state_flag(), min_api='2020-08-01-preview',
help='Set SMB Multichannel setting for file service. Applies to Premium FileStorage only.')
with self.argument_context('storage account generate-sas') as c:
from ._validators import get_not_none_validator
t_account_permissions = self.get_sdk('common.models
c.register_sas_arguments()
c.argument('services', type=services_type(self))
c.argument('resource_types', type=resource_type_type(self))
c.argument('expiry', type=get_datetime_type(True))
c.argument('start', type=get_datetime_type(True))
c.argument('account_name', acct_name_type, options_list=['--account-name'],
validator=get_not_none_validator('account_name'))
c.argument('permission', options_list=('--permissions',),
help='The permissions the SAS grants. Allowed values: {}. Can be combined.'.format(
get_permission_help_string(t_account_permissions)),
validator=get_permission_validator(t_account_permissions))
c.ignore('sas_token')
or_policy_type = CLIArgumentType(
options_list=['--policy', '-p'],
help='The object replication policy definition between two storage accounts, in JSON format. '
'Multiple rules can be defined in one policy.'
)
policy_id_type = CLIArgumentType(
options_list=['--policy-id'],
help='The ID of object replication policy or "default" if the policy ID is unknown. Policy Id will be '
'auto-generated when setting on destination account. Required when setting on source account.'
)
rule_id_type = CLIArgumentType(
options_list=['--rule-id', '-r'],
help='Rule Id is auto-generated for each new rule on destination account. It is required '
'for put policy on source account.'
)
prefix_math_type = CLIArgumentType(
nargs='+', arg_group='Filters', options_list=['--prefix-match', '--prefix'],
help='Optional. Filter the results to replicate only blobs whose names begin with the specified '
'prefix.'
)
min_creation_time_type = CLIArgumentType(
options_list=['--min-creation-time', '-t'], arg_group='Filters', type=get_datetime_type(True),
help="Blobs created after the time will be replicated to the destination. It must be in datetime format "
"'yyyy-MM-ddTHH:mm:ssZ'. Example: 2020-02-19T16:05:00Z")
with self.argument_context('storage account or-policy') as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('object_replication_policy_id', policy_id_type)
c.argument('policy_id', policy_id_type)
c.argument('source_account', options_list=['--source-account', '-s'],
help='The source storage account name. Required when no --policy provided.')
c.argument('destination_account', options_list=['--destination-account', '-d'],
help='The destination storage account name. Apply --account-name value as destination account '
'when there is no destination account provided in --policy and --destination-account.')
c.argument('properties', or_policy_type)
c.argument('prefix_match', prefix_math_type)
c.argument('min_creation_time', min_creation_time_type)
for item in ['create', 'update']:
with self.argument_context('storage account or-policy {}'.format(item),
arg_group="Object Replication Policy Rule") as c:
c.argument('rule_id', help='Rule Id is auto-generated for each new rule on destination account. It is '
'required for put policy on source account.')
c.argument('source_container', options_list=['--source-container', '--scont'],
help='The source storage container name. Required when no --policy provided.')
c.argument('destination_container', options_list=['--destination-container', '--dcont'],
help='The destination storage container name. Required when no --policy provided.')
with self.argument_context('storage account or-policy create') as c:
c.argument('properties', or_policy_type, validator=validate_or_policy)
with self.argument_context('storage account or-policy rule') as c:
c.argument('policy_id', policy_id_type)
c.argument('source_container', options_list=['--source-container', '-s'],
help='The source storage container name.')
c.argument('destination_container', options_list=['--destination-container', '-d'],
help='The destination storage container name.')
c.argument('rule_id', rule_id_type)
for item in ['show', 'off']:
with self.argument_context('storage logging {}'.format(item)) as c:
c.extra('services', validator=get_char_options_validator('bqt', 'services'), default='bqt')
with self.argument_context('storage logging update') as c:
c.extra('services', validator=get_char_options_validator('bqt', 'services'), options_list='--services',
required=True)
c.argument('log', validator=get_char_options_validator('rwd', 'log'))
c.argument('retention', type=int)
c.argument('version', type=float, validator=validate_logging_version)
with self.argument_context('storage metrics show') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), default='bfqt')
c.argument('interval', arg_type=get_enum_type(['hour', 'minute', 'both']))
with self.argument_context('storage metrics update') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), options_list='--services',
required=True)
c.argument('hour', validator=process_metric_update_namespace, arg_type=get_enum_type(['true', 'false']))
c.argument('minute', arg_type=get_enum_type(['true', 'false']))
c.argument('api', arg_type=get_enum_type(['true', 'false']))
c.argument('retention', type=int)
with self.argument_context('storage blob') as c:
c.argument('blob_name', options_list=('--name', '-n'), arg_type=blob_name_type)
c.argument('destination_path', help='The destination path that will be appended to the blob name.')
with self.argument_context('storage blob list') as c:
from ._validators import get_include_help_string
t_blob_include = self.get_sdk('_generated.models._azure_blob_storage_enums
resource_type=ResourceType.DATA_STORAGE_BLOB)
c.register_container_arguments()
c.argument('delimiter',
help='When the request includes this parameter, the operation returns a BlobPrefix element in the '
'result list that acts as a placeholder for all blobs whose names begin with the same substring '
'up to the appearance of the delimiter character. The delimiter may be a single character or a '
'string.')
c.argument('include', help="Specify one or more additional datasets to include in the response. "
"Options include: {}. Can be combined.".format(get_include_help_string(t_blob_include)),
validator=validate_included_datasets_validator(include_class=t_blob_include))
c.argument('marker', arg_type=marker_type)
c.argument('num_results', arg_type=num_results_type)
c.argument('prefix',
help='Filter the results to return only blobs whose name begins with the specified prefix.')
c.argument('show_next_marker', action='store_true',
help='Show nextMarker in result when specified.')
with self.argument_context('storage blob generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_blob_permissions = self.get_sdk('blob.models
c.register_sas_arguments()
c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed'
'using this shared access signature.')
c.argument('content_disposition', help='Response header value for Content-Disposition when resource is accessed'
'using this shared access signature.')
c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed'
'using this shared access signature.')
c.argument('content_language', help='Response header value for Content-Language when resource is accessed'
'using this shared access signature.')
c.argument('content_type', help='Response header value for Content-Type when resource is accessed'
'using this shared access signature.')
c.argument('full_uri', action='store_true',
help='Indicates that this command return the full blob URI and the shared access signature token.')
c.argument('as_user', min_api='2018-11-09', action='store_true',
validator=as_user_validator,
help="Indicates that this command return the SAS signed with the user delegation key. "
"The expiry parameter and '--auth-mode login' are required if this argument is specified. ")
c.argument('id', options_list='--policy-name', validator=validate_policy,
help='The name of a stored access policy within the container\'s ACL.',
completer=get_storage_acl_name_completion_list(t_base_blob_service, 'container_name',
'get_container_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_blob_permissions)),
validator=get_permission_validator(t_blob_permissions))
c.ignore('sas_token')
with self.argument_context('storage blob restore', resource_type=ResourceType.MGMT_STORAGE) as c:
from ._validators import BlobRangeAddAction
c.argument('blob_ranges', options_list=['--blob-range', '-r'], action=BlobRangeAddAction, nargs='+',
help='Blob ranges to restore. You need to two values to specify start_range and end_range for each '
'blob range, e.g. -r blob1 blob2. Note: Empty means account start as start range value, and '
'means account end for end range.')
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('time_to_restore', type=get_datetime_type(True), options_list=['--time-to-restore', '-t'],
help='Restore blob to the specified time, which should be UTC datetime in (Y-m-d\'T\'H:M:S\'Z\').')
with self.argument_context('storage blob rewrite', resource_type=ResourceType.DATA_STORAGE_BLOB,
min_api='2020-04-08') as c:
c.register_blob_arguments()
c.register_precondition_options()
c.argument('source_url', options_list=['--source-uri', '-u'],
help='A URL of up to 2 KB in length that specifies a file or blob. The value should be URL-encoded '
'as it would appear in a request URI. If the source is in another account, the source must either '
'be public or must be authenticated via a shared access signature. If the source is public, no '
'authentication is required.')
c.extra('lease', options_list='--lease-id',
help='Required if the blob has an active lease. Value can be a BlobLeaseClient object '
'or the lease ID as a string.')
c.extra('standard_blob_tier', arg_type=get_enum_type(t_blob_tier), options_list='--tier',
help='A standard blob tier value to set the blob to. For this version of the library, '
'this is only applicable to block blobs on standard storage accounts.')
c.extra('encryption_scope',
help='A predefined encryption scope used to encrypt the data on the service. An encryption scope '
'can be created using the Management API and referenced here by name. If a default encryption scope '
'has been defined at the container, this value will override it if the container-level scope is '
'configured to allow overrides. Otherwise an error will be raised.')
with self.argument_context('storage blob update') as c:
t_blob_content_settings = self.get_sdk('blob.models#ContentSettings')
c.register_content_settings_argument(t_blob_content_settings, update=True)
with self.argument_context('storage blob exists') as c:
c.argument('blob_name', required=True)
with self.argument_context('storage blob url') as c:
c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.')
c.argument('snapshot', help='An string value that uniquely identifies the snapshot. The value of'
'this query parameter indicates the snapshot version.')
with self.argument_context('storage blob set-tier') as c:
from azure.cli.command_modules.storage._validators import (blob_rehydrate_priority_validator)
c.register_blob_arguments()
c.argument('blob_type', options_list=('--type', '-t'), arg_type=get_enum_type(('block', 'page')))
c.argument('tier', validator=blob_tier_validator)
c.argument('rehydrate_priority', options_list=('--rehydrate-priority', '-r'),
arg_type=get_enum_type(('High', 'Standard')), validator=blob_rehydrate_priority_validator,
is_preview=True, help="Indicate the priority with which to rehydrate an archived blob. "
"The priority can be set on a blob only once, default value is Standard.")
with self.argument_context('storage blob service-properties delete-policy update') as c:
c.argument('enable', arg_type=get_enum_type(['true', 'false']), help='Enables/disables soft-delete.')
c.argument('days_retained', type=int,
help='Number of days that soft-deleted blob will be retained. Must be in range [1,365].')
with self.argument_context('storage blob service-properties update', min_api='2018-03-28') as c:
c.argument('delete_retention', arg_type=get_three_state_flag(), arg_group='Soft Delete',
help='Enables soft-delete.')
c.argument('delete_retention_period', type=int, arg_group='Soft Delete',
help='Number of days that soft-deleted blob will be retained. Must be in range [1,365].')
c.argument('static_website', arg_group='Static Website', arg_type=get_three_state_flag(),
help='Enables static-website.')
c.argument('index_document', help='Represents the name of the index document. This is commonly "index.html".',
arg_group='Static Website')
c.argument('error_document_404_path', options_list=['--404-document'], arg_group='Static Website',
help='Represents the path to the error document that should be shown when an error 404 is issued,'
' in other words, when a browser requests a page that does not exist.')
with self.argument_context('storage blob show') as c:
c.register_blob_arguments()
c.register_precondition_options()
c.extra('snapshot', help='The snapshot parameter is an opaque DateTime value that, when present, '
'specifies the blob snapshot to retrieve.')
c.argument('lease_id', help='Required if the blob has an active lease.')
with self.argument_context('storage blob upload') as c:
from ._validators import page_blob_tier_validator, validate_encryption_scope_client_params
from .sdkutil import get_blob_types, get_blob_tier_names
t_blob_content_settings = self.get_sdk('blob.models#ContentSettings')
c.register_content_settings_argument(t_blob_content_settings, update=False)
c.register_blob_arguments()
c.argument('file_path', options_list=('--file', '-f'), type=file_type, completer=FilesCompleter())
c.argument('max_connections', type=int)
c.argument('blob_type', options_list=('--type', '-t'), validator=validate_blob_type,
arg_type=get_enum_type(get_blob_types()))
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
tier_validator,
arg_type=get_enum_type(get_blob_tier_names(self.cli_ctx, 'PremiumPageBlobTier')),
min_api='2017-04-17')
c.argument('encryption_scope', validator=validate_encryption_scope_client_params,
help='A predefined encryption scope used to encrypt the data on the service.')
with self.argument_context('storage blob upload-batch') as c:
from .sdkutil import get_blob_types
t_blob_content_settings = self.get_sdk('blob.models#ContentSettings')
c.register_content_settings_argument(t_blob_content_settings, update=False, arg_group='Content Control')
c.ignore('source_files', 'destination_container_name')
c.argument('source', options_list=('--source', '-s'))
c.argument('destination', options_list=('--destination', '-d'))
c.argument('max_connections', type=int,
help='Maximum number of parallel connections to use when the blob size exceeds 64MB.')
c.argument('maxsize_condition', arg_group='Content Control')
c.argument('validate_content', action='store_true', min_api='2016-05-31', arg_group='Content Control')
c.argument('blob_type', options_list=('--type', '-t'), arg_type=get_enum_type(get_blob_types()))
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
with self.argument_context('storage blob download') as c:
c.argument('file_path', options_list=('--file', '-f'), type=file_type,
completer=FilesCompleter(), validator=blob_download_file_path_validator)
c.argument('max_connections', type=int)
c.argument('start_range', type=int)
c.argument('end_range', type=int)
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
with self.argument_context('storage blob download-batch') as c:
c.ignore('source_container_name')
c.argument('destination', options_list=('--destination', '-d'))
c.argument('source', options_list=('--source', '-s'))
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
c.argument('max_connections', type=int,
help='Maximum number of parallel connections to use when the blob size exceeds 64MB.')
with self.argument_context('storage blob delete') as c:
from .sdkutil import get_delete_blob_snapshot_type_names
c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_blob_snapshot_type_names()))
with self.argument_context('storage blob delete-batch') as c:
c.ignore('source_container_name')
c.argument('source', options_list=('--source', '-s'))
c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_blob_snapshot_type_names()),
help='Required if the blob has associated snapshots.')
c.argument('lease_id', help='The active lease id for the blob.')
with self.argument_context('storage blob lease') as c:
c.argument('blob_name', arg_type=blob_name_type)
with self.argument_context('storage blob lease acquire') as c:
c.register_precondition_options()
c.register_blob_arguments()
c.extra('lease_id', options_list='--proposed-lease-id', help='Proposed lease ID, in a GUID string format. '
'The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format.')
c.argument('lease_duration', help='Specify the duration of the lease, in seconds, or negative one (-1) for '
'a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease '
'duration cannot be changed using renew or change. Default is -1 (infinite lease)', type=int)
with self.argument_context('storage blob lease break') as c:
c.register_precondition_options()
c.register_blob_arguments()
c.argument('lease_break_period', type=int,
help="This is the proposed duration of seconds that the lease should continue before it is broken, "
"between 0 and 60 seconds. This break period is only used if it is shorter than the time remaining "
"on the lease. If longer, the time remaining on the lease is used. A new lease will not be "
"available before the break period has expired, but the lease may be held for longer than the break "
"period. If this header does not appear with a break operation, a fixed-duration lease breaks after "
"the remaining lease period elapses, and an infinite lease breaks immediately.")
with self.argument_context('storage blob lease change') as c:
c.register_precondition_options()
c.register_blob_arguments()
c.extra('proposed_lease_id', help='Proposed lease ID, in a GUID string format. The Blob service returns 400 '
'(Invalid request) if the proposed lease ID is not in the correct format.', required=True)
c.extra('lease_id', help='Required if the blob has an active lease.', required=True)
for item in ['release', 'renew']:
with self.argument_context('storage blob lease {}'.format(item)) as c:
c.register_precondition_options()
c.register_blob_arguments()
c.extra('lease_id', help='Required if the blob has an active lease.', required=True)
with self.argument_context('storage copy') as c:
c.argument('destination',
options_list=['--destination', '-d',
c.deprecate(target='--destination-local-path', redirect='--destination')],
help="The path/url of copy destination. "
"It can be a local path, an url to azure storage server. If you provide destination parameter "
"here, you do not need to provide arguments in copy destination arguments group and copy "
"destination arguments will be deprecated in future.", required=False)
c.argument('source',
options_list=['--source', '-s',
c.deprecate(target='--source-local-path', redirect='--source')],
help="The path/url of copy source. It can be a local"
" path, an url to azure storage server or AWS S3 buckets. If you provide source parameter here,"
" you do not need to provide arguments in copy source arguments group and copy source arguments"
" will be deprecated in future.", required=False)
for item in ['destination', 'source']:
c.extra('{}_container'.format(item), arg_group='Copy {}'.format(item),
help='Container name of copy {} storage account'.format(item))
c.extra('{}_blob'.format(item), arg_group='Copy {}'.format(item),
help='Blob name in blob container of copy {} storage account'.format(item))
c.extra('{}_share'.format(item), arg_group='Copy {}'.format(item),
help='File share name of copy {} storage account'.format(item))
c.extra('{}_file_path'.format(item), arg_group='Copy {}'.format(item),
help='File path in file share of copy {} storage account'.format(item))
c.argument('account_name', acct_name_type, arg_group='Storage Account', id_part=None,
options_list=['--account-name',
c.deprecate(target='--destination-account-name', redirect='--account-name')],
help='Storage account name of copy destination')
c.extra('source_account_name', arg_group='Copy source',
help='Account name of copy source storage account.')
c.extra('source_account_key', arg_group='Copy source',
help='Account key of copy source storage account. Must be used in conjunction with source storage '
'account name.')
c.extra('source_connection_string', arg_group='Copy source',
options_list=['--source-connection-string', '--src-conn'],
help='Connection string of source storage account.')
c.extra('source_sas', arg_group='Copy source',
help='Shared Access Signature (SAS) token of copy source. Must be used in conjunction with source '
'storage account name.')
c.argument('put_md5', arg_group='Additional Flags', action='store_true',
help='Create an MD5 hash of each file, and save the hash as the Content-MD5 property of the '
'destination blob/file.Only available when uploading.')
c.argument('blob_type', arg_group='Additional Flags',
arg_type=get_enum_type(["BlockBlob", "PageBlob", "AppendBlob"]),
help='The type of blob at the destination.')
c.argument('preserve_s2s_access_tier', arg_group='Additional Flags', arg_type=get_three_state_flag(),
help='Preserve access tier during service to service copy. '
'Please refer to https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers '
'to ensure destination storage account support setting access tier. In the cases that setting '
'access tier is not supported, please use `--preserve-s2s-access-tier false` to bypass copying '
'access tier. (Default true)')
c.argument('exclude_pattern', exclude_pattern_type)
c.argument('include_pattern', include_pattern_type)
c.argument('exclude_path', exclude_path_type)
c.argument('include_path', include_path_type)
c.argument('recursive', recursive_type)
c.argument('content_type', arg_group='Additional Flags', help="Specify content type of the file. ")
c.argument('follow_symlinks', arg_group='Additional Flags', action='store_true',
help='Follow symbolic links when uploading from local file system.')
with self.argument_context('storage blob copy') as c:
for item in ['destination', 'source']:
c.argument('{}_if_modified_since'.format(item), arg_group='Pre-condition', arg_type=if_modified_since_type)
c.argument('{}_if_unmodified_since'.format(item), arg_group='Pre-condition',
arg_type=if_unmodified_since_type)
c.argument('{}_if_match'.format(item), arg_group='Pre-condition')
c.argument('{}_if_none_match'.format(item), arg_group='Pre-condition')
c.argument('container_name', container_name_type, options_list=('--destination-container', '-c'))
c.argument('blob_name', blob_name_type, options_list=('--destination-blob', '-b'),
help='Name of the destination blob. If the exists, it will be overwritten.')
c.argument('source_lease_id', arg_group='Copy Source')
with self.argument_context('storage blob copy start') as c:
from azure.cli.command_modules.storage._validators import validate_source_uri
c.register_source_uri_arguments(validator=validate_source_uri)
c.argument('requires_sync', arg_type=get_three_state_flag(),
help='Enforce that the service will not return a response until the copy is complete.'
'Not support for standard page blob.')
with self.argument_context('storage blob copy start-batch', arg_group='Copy Source') as c:
from azure.cli.command_modules.storage._validators import get_source_file_or_blob_service_client
c.argument('source_client', ignore_type, validator=get_source_file_or_blob_service_client)
c.extra('source_account_name')
c.extra('source_account_key')
c.extra('source_uri')
c.argument('source_sas')
c.argument('source_container')
c.argument('source_share')
with self.argument_context('storage blob incremental-copy start') as c:
from azure.cli.command_modules.storage._validators import process_blob_source_uri
c.register_source_uri_arguments(validator=process_blob_source_uri, blob_only=True)
c.argument('destination_if_modified_since', arg_group='Pre-condition', arg_type=if_modified_since_type)
c.argument('destination_if_unmodified_since', arg_group='Pre-condition', arg_type=if_unmodified_since_type)
c.argument('destination_if_match', arg_group='Pre-condition')
c.argument('destination_if_none_match', arg_group='Pre-condition')
c.argument('container_name', container_name_type, options_list=('--destination-container', '-c'))
c.argument('blob_name', blob_name_type, options_list=('--destination-blob', '-b'),
help='Name of the destination blob. If the exists, it will be overwritten.')
c.argument('source_lease_id', arg_group='Copy Source')
with self.argument_context('storage blob query') as c:
from ._validators import validate_text_configuration
c.register_blob_arguments()
c.register_precondition_options()
line_separator = CLIArgumentType(help="The string used to separate records.", default='\n')
column_separator = CLIArgumentType(help="The string used to separate columns.", default=',')
quote_char = CLIArgumentType(help="The string used to quote a specific field.", default='"')
record_separator = CLIArgumentType(help="The string used to separate records.", default='\n')
escape_char = CLIArgumentType(help="The string used as an escape character. Default to empty.", default="")
has_header = CLIArgumentType(
arg_type=get_three_state_flag(),
help="Whether the blob data includes headers in the first line. "
"The default value is False, meaning that the data will be returned inclusive of the first line. "
"If set to True, the data will be returned exclusive of the first line.", default=False)
c.extra('lease', options_list='--lease-id',
help='Required if the blob has an active lease.')
c.argument('query_expression', help='The query expression in SQL. The maximum size of the query expression '
'is 256KiB. For more information about the expression syntax, please see '
'https://docs.microsoft.com/azure/storage/blobs/query-acceleration-sql-reference')
c.extra('input_format', arg_type=get_enum_type(['csv', 'json']), validator=validate_text_configuration,
help='Serialization type of the data currently stored in the blob. '
'The default is to treat the blob data as CSV data formatted in the default dialect.'
'The blob data will be reformatted according to that profile when blob format is specified. '
'If you choose `json`, please specify `Output Json Text Configuration Arguments` accordingly; '
'If you choose `csv`, please specify `Output Delimited Text Configuration Arguments`.')
c.extra('output_format', arg_type=get_enum_type(['csv', 'json']),
help='Output serialization type for the data stream. '
'By default the data will be returned as it is represented in the blob. '
'By providing an output format, the blob data will be reformatted according to that profile. '
'If you choose `json`, please specify `Output Json Text Configuration Arguments` accordingly; '
'If you choose `csv`, please specify `Output Delimited Text Configuration Arguments`.')
c.extra('in_line_separator',
arg_group='Input Json Text Configuration',
arg_type=line_separator)
c.extra('in_column_separator', arg_group='Input Delimited Text Configuration',
arg_type=column_separator)
c.extra('in_quote_char', arg_group='Input Delimited Text Configuration',
arg_type=quote_char)
c.extra('in_record_separator', arg_group='Input Delimited Text Configuration',
arg_type=record_separator)
c.extra('in_escape_char', arg_group='Input Delimited Text Configuration',
arg_type=escape_char)
c.extra('in_has_header', arg_group='Input Delimited Text Configuration',
arg_type=has_header)
c.extra('out_line_separator',
arg_group='Output Json Text Configuration',
arg_type=line_separator)
c.extra('out_column_separator', arg_group='Output Delimited Text Configuration',
arg_type=column_separator)
c.extra('out_quote_char', arg_group='Output Delimited Text Configuration',
arg_type=quote_char)
c.extra('out_record_separator', arg_group='Output Delimited Text Configuration',
arg_type=record_separator)
c.extra('out_escape_char', arg_group='Output Delimited Text Configuration',
arg_type=escape_char)
c.extra('out_has_header', arg_group='Output Delimited Text Configuration',
arg_type=has_header)
c.extra('result_file', help='Specify the file path to save result.')
c.ignore('input_config')
c.ignore('output_config')
with self.argument_context('storage blob sync') as c:
c.extra('destination_container', options_list=['--container', '-c'], required=True,
help='The sync destination container.')
c.extra('destination_path', options_list=['--destination', '-d'],
validator=validate_azcopy_upload_destination_url,
help='The sync destination path.')
c.argument('source', options_list=['--source', '-s'],
help='The source file path to sync from.')
c.ignore('destination')
c.argument('exclude_pattern', exclude_pattern_type)
c.argument('include_pattern', include_pattern_type)
c.argument('exclude_path', exclude_path_type)
with self.argument_context('storage container') as c:
from .sdkutil import get_container_access_type_names
c.argument('container_name', container_name_type, options_list=('--name', '-n'))
c.argument('public_access', validator=validate_container_public_access,
arg_type=get_enum_type(get_container_access_type_names()),
help='Specifies whether data in the container may be accessed publicly.')
with self.argument_context('storage container create') as c:
c.argument('container_name', container_name_type, options_list=('--name', '-n'), completer=None)
c.argument('fail_on_exist', help='Throw an exception if the container already exists.')
c.argument('account_name', help='Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.')
c.argument('default_encryption_scope', options_list=['--default-encryption-scope', '-d'],
arg_group='Encryption Policy', is_preview=True,
help='Default the container to use specified encryption scope for all writes.')
c.argument('prevent_encryption_scope_override', options_list=['--prevent-encryption-scope-override', '-p'],
arg_type=get_three_state_flag(), arg_group='Encryption Policy', is_preview=True,
help='Block override of encryption scope from the container default.')
with self.argument_context('storage container delete') as c:
c.argument('fail_not_exist', help='Throw an exception if the container does not exist.')
c.argument('bypass_immutability_policy', action='store_true', help='Bypasses upcoming service behavior that '
'will block a container from being deleted if it has a immutability-policy. Specifying this will '
'ignore arguments aside from those used to identify the container ("--name", "--account-name").')
c.argument('lease_id', help="If specified, delete_container only succeeds if the container's lease is active "
"and matches this ID. Required if the container has an active lease.")
c.ignore('processed_resource_group')
c.ignore('processed_account_name')
c.ignore('mgmt_client')
with self.argument_context('storage container exists') as c:
c.ignore('blob_name', 'snapshot')
for item in ['create', 'extend']:
with self.argument_context('storage container immutability-policy {}'.format(item)) as c:
c.argument('account_name',
help='Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.')
c.argument('if_match', help="An ETag value, or the wildcard character (*). Specify this header to perform "
"the operation only if the resource's ETag matches the value specified.")
c.extra('allow_protected_append_writes', options_list=['--allow-protected-append-writes', '-w'],
arg_type=get_three_state_flag(), help='This property can only be changed for unlocked time-based '
'retention policies. When enabled, new blocks can be '
'written to an append blob while maintaining immutability '
'protection and compliance. Only new blocks can be added '
'and any existing blocks cannot be modified or deleted. '
'This property cannot be changed with '
'ExtendImmutabilityPolicy API.')
c.extra('period', type=int, help='The immutability period for the blobs in the container since the policy '
'creation, in days.')
c.ignore('parameters')
with self.argument_context('storage container list') as c:
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage container set-permission') as c:
c.ignore('signed_identifiers')
with self.argument_context('storage container lease') as c:
c.argument('container_name', container_name_type)
with self.argument_context('storage container') as c:
c.argument('account_name', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage container immutability-policy') as c:
c.argument('immutability_period_since_creation_in_days', options_list='--period')
c.argument('container_name', container_name_type)
with self.argument_context('storage container legal-hold') as c:
c.argument('container_name', container_name_type)
c.argument('account_name',
help='Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.')
c.argument('tags', nargs='+',
help='Space-separated tags. Each tag should be 3 to 23 alphanumeric characters and is normalized '
'to lower case')
with self.argument_context('storage container policy') as c:
from .completers import get_storage_acl_name_completion_list
t_container_permissions = self.get_sdk('blob.models#ContainerPermissions')
c.argument('container_name', container_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_base_blob_service, 'container_name',
'get_container_acl'))
help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_container_permissions))
c.argument('permission', options_list='--permissions', help=help_str,
validator=get_permission_validator(t_container_permissions))
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
for item in ['create', 'delete', 'list', 'show', 'update']:
with self.argument_context('storage container policy {}'.format(item)) as c:
c.extra('lease_id', options_list='--lease-id', help='The container lease ID.')
with self.argument_context('storage container generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_container_permissions = self.get_sdk('blob.models#ContainerPermissions')
c.register_sas_arguments()
c.argument('id', options_list='--policy-name', validator=validate_policy,
help='The name of a stored access policy within the container\'s ACL.',
completer=get_storage_acl_name_completion_list(t_container_permissions, 'container_name',
'get_container_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_container_permissions)),
validator=get_permission_validator(t_container_permissions))
c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed'
'using this shared access signature.')
c.argument('content_disposition', help='Response header value for Content-Disposition when resource is accessed'
'using this shared access signature.')
c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed'
'using this shared access signature.')
c.argument('content_language', help='Response header value for Content-Language when resource is accessed'
'using this shared access signature.')
c.argument('content_type', help='Response header value for Content-Type when resource is accessed'
'using this shared access signature.')
c.argument('as_user', min_api='2018-11-09', action='store_true',
validator=as_user_validator,
help="Indicates that this command return the SAS signed with the user delegation key. "
"The expiry parameter and '--auth-mode login' are required if this argument is specified. ")
c.ignore('sas_token')
with self.argument_context('storage container lease') as c:
c.argument('lease_duration', type=int)
c.argument('lease_break_period', type=int)
with self.argument_context('storage container-rm', resource_type=ResourceType.MGMT_STORAGE) as c:
from .sdkutil import get_container_access_type_names
c.argument('container_name', container_name_type, options_list=('--name', '-n'), id_part='child_name_2')
c.argument('account_name', storage_account_type)
c.argument('resource_group_name', required=False)
c.argument('public_access', validator=validate_container_public_access,
arg_type=get_enum_type(get_container_access_type_names()),
help='Specify whether data in the container may be accessed publicly.')
c.ignore('filter', 'maxpagesize')
with self.argument_context('storage container-rm create', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('fail_on_exist', help='Throw an exception if the container already exists.')
for item in ['create', 'update']:
with self.argument_context('storage container-rm {}'.format(item),
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('default_encryption_scope', options_list=['--default-encryption-scope', '-d'],
arg_group='Encryption Policy', min_api='2019-06-01',
help='Default the container to use specified encryption scope for all writes.')
c.argument('deny_encryption_scope_override',
options_list=['--deny-encryption-scope-override', '--deny-override'],
arg_type=get_three_state_flag(), arg_group='Encryption Policy', min_api='2019-06-01',
help='Block override of encryption scope from the container default.')
with self.argument_context('storage container-rm list', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', storage_account_type, id_part=None)
c.argument('include_deleted', action='store_true',
help='Include soft deleted containers when specified.')
with self.argument_context('storage share') as c:
c.argument('share_name', share_name_type, options_list=('--name', '-n'))
with self.argument_context('storage share-rm', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('resource_group_name', required=False)
c.argument('account_name', storage_account_type)
c.argument('share_name', share_name_type, options_list=('--name', '-n'), id_part='child_name_2')
c.argument('expand', default=None)
c.ignore('filter', 'maxpagesize')
for item in ['create', 'update']:
with self.argument_context('storage share-rm {}'.format(item), resource_type=ResourceType.MGMT_STORAGE) as c:
t_enabled_protocols, t_root_squash, t_access_tier = \
self.get_models('EnabledProtocols', 'RootSquashType', 'ShareAccessTier',
resource_type=ResourceType.MGMT_STORAGE)
c.argument('share_quota', type=int, options_list=['--quota', '-q'],
help='The maximum size of the share in gigabytes. Must be greater than 0, and less than or '
'equal to 5TB (5120). For Large File Shares, the maximum size is 102400.')
c.argument('metadata', nargs='+',
help='Metadata in space-separated key=value pairs that is associated with the share. '
'This overwrites any existing metadata',
validator=validate_metadata)
c.argument('enabled_protocols', arg_type=get_enum_type(t_enabled_protocols), is_preview=True,
min_api='2019-06-01', help='Immutable property for file shares protocol. NFS protocol will be '
'only available for premium file shares (file shares in the FileStorage account type).')
c.argument('root_squash', arg_type=get_enum_type(t_root_squash), is_preview=True,
min_api='2019-06-01', help='Reduction of the access rights for the remote superuser.')
c.argument('access_tier', arg_type=get_enum_type(t_access_tier), min_api='2019-06-01',
help='Access tier for specific share. GpV2 account can choose between TransactionOptimized '
'(default), Hot, and Cool. FileStorage account can choose Premium.')
with self.argument_context('storage share-rm list', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', storage_account_type, id_part=None)
c.argument('include_deleted', action='store_true',
help='Include soft deleted file shares when specified.')
with self.argument_context('storage share-rm restore', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('deleted_version',
help='Identify the version of the deleted share that will be restored.')
c.argument('share_name',
help='The file share name. Identify the name of the deleted share that will be restored.')
c.argument('restored_name',
help='A new file share name to be restored. If not specified, deleted share name will be used.')
with self.argument_context('storage share url') as c:
c.argument('unc', action='store_true', help='Output UNC network path.')
c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.')
with self.argument_context('storage share list') as c:
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage share exists') as c:
c.ignore('directory_name', 'file_name')
with self.argument_context('storage share policy') as c:
from .completers import get_storage_acl_name_completion_list
t_file_svc = self.get_sdk('file#FileService')
t_share_permissions = self.get_sdk('file.models#SharePermissions')
c.argument('container_name', share_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_file_svc, 'container_name', 'get_share_acl'))
help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_share_permissions))
c.argument('permission', options_list='--permissions', help=help_str,
validator=get_permission_validator(t_share_permissions))
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
with self.argument_context('storage share delete') as c:
from .sdkutil import get_delete_file_snapshot_type_names
c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_file_snapshot_type_names()),
help='Specify the deletion strategy when the share has snapshots.')
with self.argument_context('storage share generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_share_permissions = self.get_sdk('file.models#SharePermissions')
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the share\'s ACL.',
completer=get_storage_acl_name_completion_list(t_share_permissions, 'share_name', 'get_share_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_share_permissions)),
validator=get_permission_validator(t_share_permissions))
c.ignore('sas_token')
with self.argument_context('storage directory') as c:
c.argument('directory_name', directory_type, options_list=('--name', '-n'))
with self.argument_context('storage directory exists') as c:
c.ignore('file_name')
c.argument('directory_name', required=True)
with self.argument_context('storage file') as c:
c.argument('file_name', file_name_type, options_list=('--name', '-n'))
c.argument('directory_name', directory_type, required=False)
with self.argument_context('storage file copy') as c:
c.argument('share_name', share_name_type, options_list=('--destination-share', '-s'),
help='Name of the destination share. The share must exist.')
with self.argument_context('storage file copy cancel') as c:
c.register_path_argument(options_list=('--destination-path', '-p'))
with self.argument_context('storage file delete') as c:
c.register_path_argument()
with self.argument_context('storage file download') as c:
c.register_path_argument()
c.argument('file_path', options_list=('--dest',), type=file_type, required=False,
help='Path of the file to write to. The source filename will be used if not specified.',
validator=process_file_download_namespace, completer=FilesCompleter())
c.argument('path', validator=None) # validator called manually from process_file_download_namespace
c.extra('no_progress', progress_type)
c.argument('max_connections', type=int)
c.argument('start_range', type=int)
c.argument('end_range', type=int)
with self.argument_context('storage file exists') as c:
c.register_path_argument()
with self.argument_context('storage file generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
c.register_path_argument()
c.register_sas_arguments()
t_file_svc = self.get_sdk('file.fileservice#FileService')
t_file_permissions = self.get_sdk('file.models#FilePermissions')
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the container\'s ACL.',
completer=get_storage_acl_name_completion_list(t_file_svc, 'container_name', 'get_container_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_file_permissions)),
validator=get_permission_validator(t_file_permissions))
c.ignore('sas_token')
with self.argument_context('storage file list') as c:
from .completers import dir_path_completer
c.argument('directory_name', options_list=('--path', '-p'), help='The directory path within the file share.',
completer=dir_path_completer)
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage file metadata show') as c:
c.register_path_argument()
with self.argument_context('storage file metadata update') as c:
c.register_path_argument()
with self.argument_context('storage file resize') as c:
c.register_path_argument()
c.argument('content_length', options_list='--size')
with self.argument_context('storage file show') as c:
c.register_path_argument()
with self.argument_context('storage file update') as c:
t_file_content_settings = self.get_sdk('file.models#ContentSettings')
c.register_path_argument()
c.register_content_settings_argument(t_file_content_settings, update=True)
with self.argument_context('storage file upload') as c:
t_file_content_settings = self.get_sdk('file.models#ContentSettings')
c.register_path_argument(default_file_param='local_file_path')
c.register_content_settings_argument(t_file_content_settings, update=False, guess_from_file='local_file_path')
c.argument('local_file_path', options_list='--source', type=file_type, completer=FilesCompleter())
c.extra('no_progress', progress_type)
c.argument('max_connections', type=int)
with self.argument_context('storage file url') as c:
c.register_path_argument()
c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.')
with self.argument_context('storage file upload-batch') as c:
from ._validators import process_file_upload_batch_parameters
c.argument('source', options_list=('--source', '-s'), validator=process_file_upload_batch_parameters)
c.argument('destination', options_list=('--destination', '-d'))
c.argument('max_connections', arg_group='Download Control', type=int)
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.register_content_settings_argument(t_file_content_settings, update=False, arg_group='Content Settings')
c.extra('no_progress', progress_type)
with self.argument_context('storage file download-batch') as c:
from ._validators import process_file_download_batch_parameters
c.argument('source', options_list=('--source', '-s'), validator=process_file_download_batch_parameters)
c.argument('destination', options_list=('--destination', '-d'))
c.argument('max_connections', arg_group='Download Control', type=int)
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.extra('no_progress', progress_type)
with self.argument_context('storage file delete-batch') as c:
from ._validators import process_file_batch_source_parameters
c.argument('source', options_list=('--source', '-s'), validator=process_file_batch_source_parameters)
with self.argument_context('storage file copy start') as c:
from azure.cli.command_modules.storage._validators import validate_source_uri
c.register_path_argument(options_list=('--destination-path', '-p'))
c.register_source_uri_arguments(validator=validate_source_uri)
c.extra('file_snapshot', default=None, arg_group='Copy Source',
help='The file snapshot for the source storage account.')
with self.argument_context('storage file copy start-batch', arg_group='Copy Source') as c:
from ._validators import get_source_file_or_blob_service_client
c.argument('source_client', ignore_type, validator=get_source_file_or_blob_service_client)
c.extra('source_account_name')
c.extra('source_account_key')
c.extra('source_uri')
c.argument('source_sas')
c.argument('source_container')
c.argument('source_share')
with self.argument_context('storage cors list') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), default='bqft',
options_list='--services', required=False)
with self.argument_context('storage cors add') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), required=True,
options_list='--services')
c.argument('max_age')
c.argument('origins', nargs='+')
c.argument('methods', nargs='+',
arg_type=get_enum_type(['DELETE', 'GET', 'HEAD', 'MERGE', 'POST', 'OPTIONS', 'PUT']))
c.argument('allowed_headers', nargs='+')
c.argument('exposed_headers', nargs='+')
with self.argument_context('storage cors clear') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), required=True,
options_list='--services')
with self.argument_context('storage queue generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_queue_permissions = self.get_sdk('queue.models#QueuePermissions')
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the share\'s ACL.',
completer=get_storage_acl_name_completion_list(t_queue_permissions, 'queue_name', 'get_queue_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_queue_permissions)),
validator=get_permission_validator(t_queue_permissions))
c.ignore('sas_token')
c.ignore('auth_mode')
with self.argument_context('storage queue') as c:
c.argument('queue_name', queue_name_type, options_list=('--name', '-n'))
with self.argument_context('storage queue list') as c:
c.argument('include_metadata', help='Specify that queue metadata be returned in the response.')
c.argument('marker', arg_type=marker_type)
c.argument('num_results', arg_type=num_results_type)
c.argument('prefix', help='Filter the results to return only queues whose names '
'begin with the specified prefix.')
c.argument('show_next_marker', action='store_true',
help='Show nextMarker in result when specified.')
c.extra('timeout', help='Request timeout in seconds. Apply to each call to the service.', type=int)
with self.argument_context('storage queue create') as c:
c.argument('queue_name', queue_name_type, options_list=('--name', '-n'), completer=None)
with self.argument_context('storage queue policy') as c:
from .completers import get_storage_acl_name_completion_list
t_queue_permissions = self.get_sdk('queue.models#QueuePermissions')
c.argument('container_name', queue_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_queue_service, 'container_name', 'get_queue_acl'))
help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_queue_permissions))
c.argument('permission', options_list='--permissions', help=help_str,
validator=get_permission_validator(t_queue_permissions))
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
c.ignore('auth_mode')
with self.argument_context('storage message') as c:
c.argument('queue_name', queue_name_type)
c.argument('message_id', options_list='--id')
c.argument('content', type=unicode_string, help='Message content, up to 64KB in size.')
with self.argument_context('storage remove') as c:
from .completers import file_path_completer
c.extra('container_name', container_name_type, validator=validate_azcopy_remove_arguments)
c.extra('blob_name', options_list=('--name', '-n'), arg_type=blob_name_type)
c.extra('share_name', share_name_type, help='The file share name.')
c.extra('path', options_list=('--path', '-p'),
help='The path to the file within the file share.',
completer=file_path_completer)
c.argument('exclude_pattern', exclude_pattern_type)
c.argument('include_pattern', include_pattern_type)
c.argument('exclude_path', exclude_path_type)
c.argument('include_path', include_path_type)
c.argument('recursive', recursive_type)
c.ignore('destination')
c.ignore('service')
c.ignore('target')
with self.argument_context('storage table') as c:
c.argument('table_name', table_name_type, options_list=('--name', '-n'))
with self.argument_context('storage table create') as c:
c.argument('table_name', table_name_type, options_list=('--name', '-n'), completer=None)
c.argument('fail_on_exist', help='Throw an exception if the table already exists.')
with self.argument_context('storage table policy') as c:
from ._validators import table_permission_validator
from .completers import get_storage_acl_name_completion_list
c.argument('container_name', table_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_table_service, 'table_name', 'get_table_acl'))
help_str = 'Allowed values: (r)ead/query (a)dd (u)pdate (d)elete. Can be combined.'
c.argument('permission', options_list='--permissions', help=help_str, validator=table_permission_validator)
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
with self.argument_context('storage table generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the table\'s ACL.',
completer=get_storage_acl_name_completion_list(t_table_service, 'table_name', 'get_table_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format('(r)ead/query (a)dd (u)pdate (d)elete'),
validator=table_permission_validator)
c.ignore('sas_token')
with self.argument_context('storage entity') as c:
c.ignore('property_resolver')
c.argument('entity', options_list=('--entity', '-e'), validator=validate_entity, nargs='+')
c.argument('select', nargs='+', validator=validate_select,
help='Space-separated list of properties to return for each entity.')
with self.argument_context('storage entity insert') as c:
c.argument('if_exists', arg_type=get_enum_type(['fail', 'merge', 'replace']))
with self.argument_context('storage entity query') as c:
c.argument('accept', default='minimal', validator=validate_table_payload_format,
arg_type=get_enum_type(['none', 'minimal', 'full']),
help='Specifies how much metadata to include in the response payload.')
c.argument('marker', validator=validate_marker, nargs='+')
for item in ['create', 'show', 'delete', 'exists', 'metadata update', 'metadata show']:
with self.argument_context('storage fs {}'.format(item)) as c:
c.extra('file_system_name', options_list=['--name', '-n'],
help="File system name.", required=True)
c.extra('timeout', timeout_type)
with self.argument_context('storage fs create') as c:
from .sdkutil import get_fs_access_type_names
c.argument('public_access', arg_type=get_enum_type(get_fs_access_type_names()),
validator=validate_fs_public_access,
help="Specify whether data in the file system may be accessed publicly and the level of access.")
with self.argument_context('storage fs list') as c:
c.argument('include_metadata', arg_type=get_three_state_flag(),
help='Specify that file system metadata be returned in the response. The default value is "False".')
c.argument('name_starts_with', options_list=['--prefix'],
help='Filter the results to return only file systems whose names begin with the specified prefix.')
for item in ['create', 'show', 'delete', 'exists', 'move', 'metadata update', 'metadata show']:
with self.argument_context('storage fs directory {}'.format(item)) as c:
c.extra('file_system_name', options_list=['-f', '--file-system'], help="File system name.", required=True)
c.extra('directory_path', options_list=['--name', '-n'],
help="The name of directory.", required=True)
c.extra('timeout', timeout_type)
with self.argument_context('storage fs directory create') as c:
c.extra('permissions', permissions_type)
c.extra('umask', umask_type)
with self.argument_context('storage fs directory list') as c:
c.extra('file_system_name', options_list=['-f', '--file-system'], help="File system name.", required=True)
c.argument('recursive', arg_type=get_three_state_flag(), default=True,
help='Look into sub-directories recursively when set to true.')
c.argument('path', help="Filter the results to return only paths under the specified path.")
c.argument('num_results', type=int, help='Specify the maximum number of results to return.')
with self.argument_context('storage fs directory move') as c:
c.argument('new_name', options_list=['--new-directory', '-d'],
help='The new directory name the users want to move to. The value must have the following format: '
'"{filesystem}/{directory}/{subdirectory}".')
with self.argument_context('storage fs file list') as c:
c.extra('file_system_name', options_list=['-f', '--file-system'], help="File system name.", required=True)
c.argument('recursive', arg_type=get_three_state_flag(), default=True,
help='Look into sub-directories recursively when set to true.')
c.argument('exclude_dir', action='store_true',
help='List only files in the given file system.')
c.argument('path', help='Filter the results to return only paths under the specified path.')
c.argument('num_results', type=int, default=5000,
help='Specify the maximum number of results to return. If the request does not specify num_results '
'or specifies a value greater than 5,000, the server will return up to 5,000 items.')
c.argument('marker',
help='An opaque continuation token. This value can be retrieved from the next_marker field of a '
'previous generator object. If specified, this generator will begin returning results from this '
'point.')
for item in ['create', 'show', 'delete', 'exists', 'upload', 'append', 'download', 'show', 'metadata update',
'metadata show']:
with self.argument_context('storage fs file {}'.format(item)) as c:
c.extra('file_system_name', options_list=['-f', '--file-system'],
help='File system name.', required=True)
c.extra('path', options_list=['-p', '--path'], help="The file path in a file system.",
required=True)
c.extra('timeout', timeout_type)
c.argument('content', help='Content to be appended to file.')
with self.argument_context('storage fs file create') as c:
t_file_content_settings = self.get_sdk('_models#ContentSettings',
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
c.register_content_settings_argument(t_file_content_settings, update=False)
c.extra('permissions', permissions_type)
c.extra('umask', umask_type)
c.extra('timeout', timeout_type)
with self.argument_context('storage fs file download') as c:
c.argument('destination_path', options_list=['--destination', '-d'], type=file_type,
help='The local file where the file or folder will be downloaded to. The source filename will be '
'used if not specified.')
c.argument('overwrite', arg_type=get_three_state_flag(),
help="Overwrite an existing file when specified. Default value is false.")
with self.argument_context('storage fs file move') as c:
t_file_content_settings = self.get_sdk('_models#ContentSettings',
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
c.register_content_settings_argument(t_file_content_settings, update=False)
c.extra('file_system_name', options_list=['-f', '--file-system'],
help='File system name.', required=True)
c.extra('path', options_list=['-p', '--path'], required=True,
help="The original file path users want to move in a file system.")
c.argument('new_name', options_list=['--new-path'],
help='The new path the users want to move to. The value must have the following format: '
'"{filesystem}/{directory}/{subdirectory}/{file}".')
with self.argument_context('storage fs file upload') as c:
t_file_content_settings = self.get_sdk('_models#ContentSettings',
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
c.register_content_settings_argument(t_file_content_settings, update=False)
c.argument('local_path', options_list=['--source', '-s'],
help='Path of the local file to upload as the file content.')
c.argument('overwrite', arg_type=get_three_state_flag(), help="Overwrite an existing file when specified.")
c.argument('if_match', arg_group='Precondition',
help="An ETag value, or the wildcard character (*). Specify this header to perform the operation "
"only if the resource's ETag matches the value specified.")
c.argument('if_none_match', arg_group='Precondition',
help="An ETag value, or the wildcard character (*). Specify this header to perform the operation "
"only if the resource's ETag does not match the value specified.")
c.argument('if_modified_since', arg_group='Precondition',
help="A Commence only if modified since supplied UTC datetime (Y-m-d'T'H:M'Z').")
c.argument('if_unmodified_since', arg_group='Precondition',
help="A Commence only if unmodified since supplied UTC datetime (Y-m-d'T'H:M'Z').")
c.argument('permissions', permissions_type)
c.argument('umask', umask_type)
for item in ['set', 'show']:
with self.argument_context('storage fs access {}'.format(item)) as c:
from ._validators import validate_access_control
c.extra('file_system_name', options_list=['-f', '--file-system'],
help='File system name.', required=True)
c.extra('directory_path', options_list=['-p', '--path'],
help='The path to a file or directory in the specified file system.', required=True)
c.argument('permissions', validator=validate_access_control)
c.ignore('upn')
for item in ['set-recursive', 'update-recursive', 'remove-recursive']:
with self.argument_context('storage fs access {}'.format(item)) as c:
c.register_fs_directory_arguments()
c.argument('acl', help='The value is a comma-separated list of access control entries. Each access control '
'entry (ACE) consists of a scope, a type, a user or group identifier, and permissions in the '
'format "[scope:][type]:[id]:[permissions]". For more information, please refer to '
'https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control.')
c.extra('continuation',
help='Optional continuation token that can be used to resume previously stopped operation.')
c.extra('batch_size', type=int, help='Optional. If data set size exceeds batch size then operation will '
'be split into multiple requests so that progress can be tracked. Batch size should be between 1 '
'and 2000. The default when unspecified is 2000.')
c.extra('max_batches', type=int, help='Optional. Define maximum number of batches that single change '
'Access Control operation can execute. If maximum is reached before all sub-paths are processed, '
'then continuation token can be used to resume operation. Empty value indicates that maximum '
'number of batches in unbound and operation continues till end.')
c.extra('continue_on_failure', arg_type=get_three_state_flag(),
help='If set to False, the operation will terminate quickly on encountering user errors (4XX). '
'If True, the operation will ignore user errors and proceed with the operation on other '
'sub-entities of the directory. Continuation token will only be returned when '
'--continue-on-failure is True in case of user errors. If not set the default value is False '
'for this.')
| true | true |
f735623bbcbe181b6aabbdb2e8750fd1eb7dab3c | 1,433 | py | Python | main.py | wmcgee3/imguber | 58e21267e9cddbbcd4df5a8055fc9d4895c2823a | [
"MIT"
] | null | null | null | main.py | wmcgee3/imguber | 58e21267e9cddbbcd4df5a8055fc9d4895c2823a | [
"MIT"
] | null | null | null | main.py | wmcgee3/imguber | 58e21267e9cddbbcd4df5a8055fc9d4895c2823a | [
"MIT"
] | null | null | null | from fastapi import FastAPI, Path
from typing import Optional
from pydantic import BaseModel
app = FastAPI()
class Item(BaseModel):
name: str
price: float
brand: Optional[str] = None
class UpdateItem(BaseModel):
name: Optional[str] = None
price: Optional[float] = None
brand: Optional[str] = None
@app.get('/')
def home():
return "did it!"
inventory = {
}
@app.get("/get-item/{item_id}")
def get_item(item_id: int = Path(None, description="The ID of the item you would like to view.")):
return inventory[item_id]
@app.get("/get-by-name/")
def get_item(*, name: Optional[str] = None, test: Optional[int]):
for item_id in inventory:
if inventory[item_id].name == name:
return inventory[item_id]
return{"Data": "Not found"}
@app.post("/create-item/{item_id}")
def create_item(item_id: int, item: Item):
if item_id in inventory:
return {"Error": "Item ID already exists."}
inventory[item_id] = item
return inventory[item_id]
@app.put("/update-item/{item_id}")
def update_item(item_id: int, item: UpdateItem):
if item_id not in inventory:
return {"Error": "Item ID does not exists."}
if item.name != None:
inventory[item_id].name = item.name
if item.price != None:
inventory[item_id].price = item.price
if item.brand != None:
inventory[item_id].brand = item.brand
return inventory[item_id]
| 23.883333 | 98 | 0.655269 | from fastapi import FastAPI, Path
from typing import Optional
from pydantic import BaseModel
app = FastAPI()
class Item(BaseModel):
name: str
price: float
brand: Optional[str] = None
class UpdateItem(BaseModel):
name: Optional[str] = None
price: Optional[float] = None
brand: Optional[str] = None
@app.get('/')
def home():
return "did it!"
inventory = {
}
@app.get("/get-item/{item_id}")
def get_item(item_id: int = Path(None, description="The ID of the item you would like to view.")):
return inventory[item_id]
@app.get("/get-by-name/")
def get_item(*, name: Optional[str] = None, test: Optional[int]):
for item_id in inventory:
if inventory[item_id].name == name:
return inventory[item_id]
return{"Data": "Not found"}
@app.post("/create-item/{item_id}")
def create_item(item_id: int, item: Item):
if item_id in inventory:
return {"Error": "Item ID already exists."}
inventory[item_id] = item
return inventory[item_id]
@app.put("/update-item/{item_id}")
def update_item(item_id: int, item: UpdateItem):
if item_id not in inventory:
return {"Error": "Item ID does not exists."}
if item.name != None:
inventory[item_id].name = item.name
if item.price != None:
inventory[item_id].price = item.price
if item.brand != None:
inventory[item_id].brand = item.brand
return inventory[item_id]
| true | true |
f735625bd5e10ebc5dcbfb0804a6ef4459c6c0a4 | 2,508 | py | Python | test/functional/mempool_limit.py | pniwre/titcoin | 4f37e544a7320f945900c5f9ae2b0835b017a6d4 | [
"MIT"
] | 7 | 2019-01-16T23:53:26.000Z | 2020-11-07T15:06:42.000Z | test/functional/mempool_limit.py | pniwre/titcoin | 4f37e544a7320f945900c5f9ae2b0835b017a6d4 | [
"MIT"
] | 1 | 2021-04-14T18:44:41.000Z | 2021-04-28T03:25:41.000Z | test/functional/mempool_limit.py | pniwre/titcoin | 4f37e544a7320f945900c5f9ae2b0835b017a6d4 | [
"MIT"
] | 9 | 2018-11-24T00:33:36.000Z | 2021-09-05T13:06:35.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool limiting together/eviction with the wallet."""
from test_framework.test_framework import TitcoinTestFramework
from test_framework.util import *
class MempoolLimitTest(TitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-maxmempool=5", "-spendzeroconfchange=0"]]
def run_test(self):
txouts = gen_return_txouts()
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
self.log.info('Check that mempoolminfee is minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_equal(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
txids = []
utxos = create_confirmed_utxos(relayfee, self.nodes[0], 91)
self.log.info('Create a mempool tx that will be evicted')
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
self.nodes[0].settxfee(relayfee) # specifically fund this tx with low fee
txF = self.nodes[0].fundrawtransaction(tx)
self.nodes[0].settxfee(0) # return to automatic fee selection
txFS = self.nodes[0].signrawtransaction(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in range (3):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee)
self.log.info('The tx should be evicted by now')
assert(txid not in self.nodes[0].getrawmempool())
txdata = self.nodes[0].gettransaction(txid)
assert(txdata['confirmations'] == 0) #confirmation should still be 0
self.log.info('Check that mempoolminfee is larger than minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_greater_than(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
if __name__ == '__main__':
MempoolLimitTest().main()
| 45.6 | 118 | 0.672249 |
from test_framework.test_framework import TitcoinTestFramework
from test_framework.util import *
class MempoolLimitTest(TitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-maxmempool=5", "-spendzeroconfchange=0"]]
def run_test(self):
txouts = gen_return_txouts()
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
self.log.info('Check that mempoolminfee is minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_equal(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
txids = []
utxos = create_confirmed_utxos(relayfee, self.nodes[0], 91)
self.log.info('Create a mempool tx that will be evicted')
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
self.nodes[0].settxfee(relayfee)
txF = self.nodes[0].fundrawtransaction(tx)
self.nodes[0].settxfee(0)
txFS = self.nodes[0].signrawtransaction(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in range (3):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee)
self.log.info('The tx should be evicted by now')
assert(txid not in self.nodes[0].getrawmempool())
txdata = self.nodes[0].gettransaction(txid)
assert(txdata['confirmations'] == 0)
self.log.info('Check that mempoolminfee is larger than minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_greater_than(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
if __name__ == '__main__':
MempoolLimitTest().main()
| true | true |
f735626cfc6177de873e236c69c8bfc77e514feb | 1,709 | py | Python | kitsune/users/tests/test_utils.py | 10allday-Software/kitsune | 3e032fcee8af6adeb468b5331feeb1f16c5a6584 | [
"BSD-3-Clause"
] | 929 | 2015-01-04T08:08:51.000Z | 2022-03-31T06:20:44.000Z | kitsune/users/tests/test_utils.py | hafixo/kitsune | d7756872e16590eea1c6adaeb5bc78f83414d753 | [
"BSD-3-Clause"
] | 1,751 | 2015-01-02T00:04:37.000Z | 2022-03-31T10:24:30.000Z | kitsune/users/tests/test_utils.py | Whoerr/kitsune | 2428573b4920a824c3e712b8a4870f8c1ada8f64 | [
"BSD-3-Clause"
] | 605 | 2015-01-01T14:08:36.000Z | 2022-03-28T15:39:45.000Z | from django.contrib.auth.models import User
from nose.tools import eq_
from kitsune.sumo.tests import TestCase
from kitsune.users.utils import suggest_username
class UtilsTestCase(TestCase):
def test_suggest_username(self):
eq_("someuser", suggest_username("someuser@test.com"))
User.objects.create(username="someuser")
suggested = suggest_username("someuser@test.com")
eq_("someuser1", suggested)
User.objects.create(username="someuser4")
suggested = suggest_username("someuser@test.com")
eq_("someuser1", suggested)
User.objects.create(username="ricky")
User.objects.create(username="Ricky1")
User.objects.create(username="ricky33")
suggested = suggest_username("rIcky@test.com")
eq_("rIcky2", suggested)
User.objects.create(username="user")
User.objects.create(username="user01")
User.objects.create(username="user1")
User.objects.create(username="user2")
suggested = suggest_username("user@test.com")
eq_("user3", suggested)
User.objects.create(username="testuser+1")
User.objects.create(username="testuser+11")
suggested = suggest_username("testuser+1@example.com")
eq_("testuser+12", suggested)
def test_suggest_username_invalid_characters(self):
"""Test some invalid to Django usernames."""
eq_("foobar", suggest_username("foo bar"))
User.objects.create(username="foobar")
eq_("foobar1", suggest_username("foo bar"))
eq_("foobar1", suggest_username("foobar /1"))
User.objects.create(username="foobar1")
eq_("foobar11", suggest_username("foobar /1"))
| 34.877551 | 62 | 0.672323 | from django.contrib.auth.models import User
from nose.tools import eq_
from kitsune.sumo.tests import TestCase
from kitsune.users.utils import suggest_username
class UtilsTestCase(TestCase):
def test_suggest_username(self):
eq_("someuser", suggest_username("someuser@test.com"))
User.objects.create(username="someuser")
suggested = suggest_username("someuser@test.com")
eq_("someuser1", suggested)
User.objects.create(username="someuser4")
suggested = suggest_username("someuser@test.com")
eq_("someuser1", suggested)
User.objects.create(username="ricky")
User.objects.create(username="Ricky1")
User.objects.create(username="ricky33")
suggested = suggest_username("rIcky@test.com")
eq_("rIcky2", suggested)
User.objects.create(username="user")
User.objects.create(username="user01")
User.objects.create(username="user1")
User.objects.create(username="user2")
suggested = suggest_username("user@test.com")
eq_("user3", suggested)
User.objects.create(username="testuser+1")
User.objects.create(username="testuser+11")
suggested = suggest_username("testuser+1@example.com")
eq_("testuser+12", suggested)
def test_suggest_username_invalid_characters(self):
eq_("foobar", suggest_username("foo bar"))
User.objects.create(username="foobar")
eq_("foobar1", suggest_username("foo bar"))
eq_("foobar1", suggest_username("foobar /1"))
User.objects.create(username="foobar1")
eq_("foobar11", suggest_username("foobar /1"))
| true | true |
f735639ff9c38d11b781e79a8c7ff13ce1b000dd | 1,984 | py | Python | coded_distributed_computing.py | kcexn/singular-value-decomposition | 63e2a23f9f0db9aa361e338b8065d59b80f7649e | [
"BSD-Source-Code"
] | null | null | null | coded_distributed_computing.py | kcexn/singular-value-decomposition | 63e2a23f9f0db9aa361e338b8065d59b80f7649e | [
"BSD-Source-Code"
] | null | null | null | coded_distributed_computing.py | kcexn/singular-value-decomposition | 63e2a23f9f0db9aa361e338b8065d59b80f7649e | [
"BSD-Source-Code"
] | 1 | 2021-03-22T12:15:53.000Z | 2021-03-22T12:15:53.000Z | ''' coded_distributed_computing
This module contains functions related to a study of the coded distributed computing model.
'''
import numpy as np
def encode_matrix(A: np.matrix, G: np.matrix) -> np.matrix:
''' encode_matrix
Parameters:
---
A: np.matrix, input matrix to code.
G: np.matrix, generator matrix to encode A with.
---
Returns:
---
A*G: np.matrix, output encoded matrix.
---
Description:
---
Following van Lint's text "Introduction to Coding Theory",
I am constructing linear block codes using a generator matrix G
and an input matrix A.
Actually typically the codes would be constructed using a
generator matrix G and an input vector k which would create an
output message, a vector, m.
Following from my conversation with Jingge last week though.
I'm convinced that encoding a matrix to preserve the
matrix vector multiplication Ax is exactly the same as encoding
multiple messages across time simultaneously. i.e. If I were to
accumulate n messages (column vectors) of size k and concatenated them
I would end up with a matrix of size k x n (rows and columns). Encoding
it with the generator matrix G would give me a matrix of size m x n. Where
each column in the matrix A*G can be considered one message to be delivered
over time. The matrix vector multiplication Ax is simply the rows of multiple
messages concatenated together multiplied with the vector x.
This is not a super great analogue, because obviously matrices in a matrix vector
multiplication are shared with everyone all at once not one column at a time.
But I think it's a useful way to reason about the coding properties of
the matrix A*G. And I believe opens up the possibilities of
matrix encodings to ALL codes that can be represented as linear block codes
(which I believe are simply, ALL linear codes).
'''
return np.matmul(A,G)
| 38.901961 | 91 | 0.717742 | import numpy as np
def encode_matrix(A: np.matrix, G: np.matrix) -> np.matrix:
return np.matmul(A,G)
| true | true |
f735644a6b5de633eca03c7d4bbc6e3805aae0f8 | 4,380 | py | Python | contrib/seeds/generate-seeds.py | cicxcoin/cicoin | b48b11574ae38ae063670a755b9d50ef6960e1e8 | [
"MIT"
] | 3 | 2020-06-19T11:21:43.000Z | 2021-02-16T16:29:13.000Z | contrib/seeds/generate-seeds.py | Madurajaya/cicoin | b48b11574ae38ae063670a755b9d50ef6960e1e8 | [
"MIT"
] | 1 | 2020-04-29T20:15:13.000Z | 2020-04-29T20:15:13.000Z | contrib/seeds/generate-seeds.py | Madurajaya/cicoin | b48b11574ae38ae063670a755b9d50ef6960e1e8 | [
"MIT"
] | 4 | 2020-01-25T06:31:23.000Z | 2022-02-28T05:36:12.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys
import os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % vchAddr)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match(r'\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the cicoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside an IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_main', 8333)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_test', 18333)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.510791 | 99 | 0.583562 |
from base64 import b32decode
from binascii import a2b_hex
import sys
import os
import re
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % vchAddr)
return pchOnionCat + vchAddr
elif '.' in addr:
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr:
sub = [[], []]
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1):
continue
x += 1
assert(x < 2)
else:
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'):
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match(r'\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match:
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1:
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the cicoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside an IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_main', 8333)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_test', 18333)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| true | true |
f73564c89e4f82e73a70d4c133d1f686f85559c4 | 3,011 | py | Python | app/app.py | TBxy/bokeh_start_app | 755494f6bc60e92ce17022bbd7f707a39132cbd0 | [
"MIT"
] | 1 | 2017-04-27T09:15:48.000Z | 2017-04-27T09:15:48.000Z | app/app.py | TBxy/bokeh_start_app | 755494f6bc60e92ce17022bbd7f707a39132cbd0 | [
"MIT"
] | null | null | null | app/app.py | TBxy/bokeh_start_app | 755494f6bc60e92ce17022bbd7f707a39132cbd0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
from flask import Flask, render_template
import commands, public, user, plots, admin
from .assets import assets
from .extensions import bcrypt, cache, csrf_protect, db, debug_toolbar, login_manager, migrate, api, flask_admin
from .settings import ProdConfig
#from flask_admin.contrib.sqla import ModelView
from flask_admin.menu import MenuLink
from api.v1 import *
def create_app(config_object=ProdConfig):
"""An application factory, as explained here: http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split('.')[0])
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
register_shellcontext(app)
register_commands(app)
return app
def register_extensions(app):
"""Register Flask extensions."""
assets.init_app(app)
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
csrf_protect.init_app(app)
login_manager.init_app(app)
login_manager.anonymous_user = user.models.AnonymousUser
debug_toolbar.init_app(app)
migrate.init_app(app, db)
api.init_app(app)
flask_admin.init_app(app, index_view=admin.views.MyAdminIndexView())
flask_admin.add_view(admin.views.UsersAdmin(user.models.User, db.session, endpoint='admin_users'))
flask_admin.add_view(admin.views.RolesAdmin(user.models.Roles, db.session))
#flask_admin.add_view(ModelView(user.models.UserRoles, db.session))
flask_admin.add_view(admin.views.PermissionsAdmin(user.models.Permissions, db.session))
flask_admin.add_link(MenuLink(name='Back Home', url='/'))
#assets.url_expire = True
return None
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.blueprint)
app.register_blueprint(plots.views.blueprint)
app.register_blueprint(user.views.blueprint)
return None
def register_errorhandlers(app):
"""Register error handlers."""
def render_error(error):
"""Render error template."""
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template('{0}.html'.format(error_code)), error_code
for errcode in [401, 403, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {
'db': db,
'User': user.models.User}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.test)
app.cli.add_command(commands.lint)
app.cli.add_command(commands.clean)
app.cli.add_command(commands.urls)
app.cli.add_command(commands.insertdb)
| 32.376344 | 112 | 0.721023 |
from flask import Flask, render_template
import commands, public, user, plots, admin
from .assets import assets
from .extensions import bcrypt, cache, csrf_protect, db, debug_toolbar, login_manager, migrate, api, flask_admin
from .settings import ProdConfig
from flask_admin.menu import MenuLink
from api.v1 import *
def create_app(config_object=ProdConfig):
app = Flask(__name__.split('.')[0])
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
register_shellcontext(app)
register_commands(app)
return app
def register_extensions(app):
assets.init_app(app)
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
csrf_protect.init_app(app)
login_manager.init_app(app)
login_manager.anonymous_user = user.models.AnonymousUser
debug_toolbar.init_app(app)
migrate.init_app(app, db)
api.init_app(app)
flask_admin.init_app(app, index_view=admin.views.MyAdminIndexView())
flask_admin.add_view(admin.views.UsersAdmin(user.models.User, db.session, endpoint='admin_users'))
flask_admin.add_view(admin.views.RolesAdmin(user.models.Roles, db.session))
flask_admin.add_view(admin.views.PermissionsAdmin(user.models.Permissions, db.session))
flask_admin.add_link(MenuLink(name='Back Home', url='/'))
return None
def register_blueprints(app):
app.register_blueprint(public.views.blueprint)
app.register_blueprint(plots.views.blueprint)
app.register_blueprint(user.views.blueprint)
return None
def register_errorhandlers(app):
def render_error(error):
error_code = getattr(error, 'code', 500)
return render_template('{0}.html'.format(error_code)), error_code
for errcode in [401, 403, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_shellcontext(app):
def shell_context():
return {
'db': db,
'User': user.models.User}
app.shell_context_processor(shell_context)
def register_commands(app):
app.cli.add_command(commands.test)
app.cli.add_command(commands.lint)
app.cli.add_command(commands.clean)
app.cli.add_command(commands.urls)
app.cli.add_command(commands.insertdb)
| true | true |
f73566ab8b420130bb2088c29d7a73aa381c8024 | 342 | py | Python | setup.py | juasiepo/able | ce6dc9db0237a66d0063cac95ed233d491d86684 | [
"MIT"
] | null | null | null | setup.py | juasiepo/able | ce6dc9db0237a66d0063cac95ed233d491d86684 | [
"MIT"
] | null | null | null | setup.py | juasiepo/able | ce6dc9db0237a66d0063cac95ed233d491d86684 | [
"MIT"
] | null | null | null | from setuptools import setup, convert_path
main_ns = {}
ver_path = convert_path('able/version.py')
with open(ver_path) as ver_file:
exec(ver_file.read(), main_ns)
setup(
name='able',
version=main_ns['__version__'],
packages=['able', 'able.android'],
description='Bluetooth Low Energy for Android',
license='MIT',
)
| 21.375 | 51 | 0.690058 | from setuptools import setup, convert_path
main_ns = {}
ver_path = convert_path('able/version.py')
with open(ver_path) as ver_file:
exec(ver_file.read(), main_ns)
setup(
name='able',
version=main_ns['__version__'],
packages=['able', 'able.android'],
description='Bluetooth Low Energy for Android',
license='MIT',
)
| true | true |
f73567b822ba92a6781d158f87b83b557c2b6a7b | 5,187 | py | Python | djangosige/apps/financeiro/models/lancamento.py | 3ysoftwarehouse/vcd-novo | 5181e93aa57e926af84332d76b7b0628637544e1 | [
"MIT"
] | null | null | null | djangosige/apps/financeiro/models/lancamento.py | 3ysoftwarehouse/vcd-novo | 5181e93aa57e926af84332d76b7b0628637544e1 | [
"MIT"
] | null | null | null | djangosige/apps/financeiro/models/lancamento.py | 3ysoftwarehouse/vcd-novo | 5181e93aa57e926af84332d76b7b0628637544e1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.db import models
from django.core.validators import MinValueValidator
from decimal import Decimal
from django.core.urlresolvers import reverse_lazy
from django.template.defaultfilters import date
import locale
locale.setlocale(locale.LC_ALL, '')
STATUS_CONTA_SAIDA_ESCOLHAS = (
(u'0', u'Paga'),
(u'1', u'A pagar'),
(u'2', u'Atrasada'),
)
STATUS_CONTA_ENTRADA_ESCOLHAS = (
(u'0', u'Recebida'),
(u'1', u'A receber'),
(u'2', u'Atrasada'),
)
class Lancamento(models.Model):
data_vencimento = models.DateField(null=True, blank=True)
data_pagamento = models.DateField(null=True, blank=True)
descricao = models.CharField(max_length=255)
conta_corrente = models.ForeignKey(
'cadastro.Banco', related_name="conta_corrente_conta", on_delete=models.SET_NULL, null=True, blank=True)
valor_total = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
abatimento = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
juros = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
valor_liquido = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
movimentar_caixa = models.BooleanField(default=True)
movimento_caixa = models.ForeignKey(
'financeiro.MovimentoCaixa', related_name="movimento_caixa_lancamento", on_delete=models.SET_NULL, null=True, blank=True)
moeda = models.ForeignKey('financeiro.Moeda', related_name="moeda", on_delete=models.SET_NULL, null=True,
blank=True)
class Meta:
verbose_name = "Lançamento"
permissions = (
("view_lancamento", "Can view lancamento"),
)
def format_valor_liquido(self):
return locale.format(u'%.2f', self.valor_liquido, 1)
@property
def format_data_vencimento(self):
return '%s' % date(self.data_vencimento, "d/m/Y")
@property
def format_data_pagamento(self):
return '%s' % date(self.data_pagamento, "d/m/Y")
class Entrada(Lancamento):
cliente = models.ForeignKey('cadastro.Cliente', related_name="conta_cliente",
on_delete=models.SET_NULL, null=True, blank=True)
status = models.CharField(
max_length=1, choices=STATUS_CONTA_ENTRADA_ESCOLHAS, default='1')
grupo_plano = models.ForeignKey(
'financeiro.PlanoContasGrupo', related_name="grupo_plano_recebimento", on_delete=models.SET_NULL, null=True, blank=True)
def get_edit_url(self):
if self.status == '0':
return reverse_lazy('financeiro:editarrecebimentoview', kwargs={'pk': self.id})
else:
return reverse_lazy('financeiro:editarcontareceberview', kwargs={'pk': self.id})
def get_tipo(self):
return 'Entrada'
class Saida(Lancamento):
fornecedor = models.ForeignKey(
'cadastro.Fornecedor', related_name="conta_fornecedor", on_delete=models.SET_NULL, null=True, blank=True)
status = models.CharField(
max_length=1, choices=STATUS_CONTA_SAIDA_ESCOLHAS, default='1')
grupo_plano = models.ForeignKey(
'financeiro.PlanoContasGrupo', related_name="grupo_plano_pagamento", on_delete=models.SET_NULL, null=True, blank=True)
def get_edit_url(self):
if self.status == '0':
return reverse_lazy('financeiro:editarpagamentoview', kwargs={'pk': self.id})
else:
return reverse_lazy('financeiro:editarcontapagarview', kwargs={'pk': self.id})
def get_tipo(self):
return 'Saida'
class MovimentoCaixa(models.Model):
data_movimento = models.DateField(null=True, blank=True)
saldo_inicial = models.DecimalField(
max_digits=13, decimal_places=2, default=Decimal('0.00'))
saldo_final = models.DecimalField(
max_digits=13, decimal_places=2, default=Decimal('0.00'))
entradas = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
saidas = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
class Meta:
verbose_name = "Movimento de Caixa"
permissions = (
("acesso_fluxodecaixa", "Pode acessar o Fluxo de Caixa"),
)
@property
def format_data_movimento(self):
return '%s' % date(self.data_movimento, "d/m/Y")
@property
def valor_lucro_prejuizo(self):
return self.saldo_final - self.saldo_inicial
def __unicode__(self):
s = u'Movimento dia %s' % (self.data_movimento)
return s
def __str__(self):
s = u'Movimento dia %s' % (self.data_movimento)
return s
| 39.59542 | 129 | 0.658955 |
from django.db import models
from django.core.validators import MinValueValidator
from decimal import Decimal
from django.core.urlresolvers import reverse_lazy
from django.template.defaultfilters import date
import locale
locale.setlocale(locale.LC_ALL, '')
STATUS_CONTA_SAIDA_ESCOLHAS = (
(u'0', u'Paga'),
(u'1', u'A pagar'),
(u'2', u'Atrasada'),
)
STATUS_CONTA_ENTRADA_ESCOLHAS = (
(u'0', u'Recebida'),
(u'1', u'A receber'),
(u'2', u'Atrasada'),
)
class Lancamento(models.Model):
data_vencimento = models.DateField(null=True, blank=True)
data_pagamento = models.DateField(null=True, blank=True)
descricao = models.CharField(max_length=255)
conta_corrente = models.ForeignKey(
'cadastro.Banco', related_name="conta_corrente_conta", on_delete=models.SET_NULL, null=True, blank=True)
valor_total = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
abatimento = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
juros = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
valor_liquido = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
movimentar_caixa = models.BooleanField(default=True)
movimento_caixa = models.ForeignKey(
'financeiro.MovimentoCaixa', related_name="movimento_caixa_lancamento", on_delete=models.SET_NULL, null=True, blank=True)
moeda = models.ForeignKey('financeiro.Moeda', related_name="moeda", on_delete=models.SET_NULL, null=True,
blank=True)
class Meta:
verbose_name = "Lançamento"
permissions = (
("view_lancamento", "Can view lancamento"),
)
def format_valor_liquido(self):
return locale.format(u'%.2f', self.valor_liquido, 1)
@property
def format_data_vencimento(self):
return '%s' % date(self.data_vencimento, "d/m/Y")
@property
def format_data_pagamento(self):
return '%s' % date(self.data_pagamento, "d/m/Y")
class Entrada(Lancamento):
cliente = models.ForeignKey('cadastro.Cliente', related_name="conta_cliente",
on_delete=models.SET_NULL, null=True, blank=True)
status = models.CharField(
max_length=1, choices=STATUS_CONTA_ENTRADA_ESCOLHAS, default='1')
grupo_plano = models.ForeignKey(
'financeiro.PlanoContasGrupo', related_name="grupo_plano_recebimento", on_delete=models.SET_NULL, null=True, blank=True)
def get_edit_url(self):
if self.status == '0':
return reverse_lazy('financeiro:editarrecebimentoview', kwargs={'pk': self.id})
else:
return reverse_lazy('financeiro:editarcontareceberview', kwargs={'pk': self.id})
def get_tipo(self):
return 'Entrada'
class Saida(Lancamento):
fornecedor = models.ForeignKey(
'cadastro.Fornecedor', related_name="conta_fornecedor", on_delete=models.SET_NULL, null=True, blank=True)
status = models.CharField(
max_length=1, choices=STATUS_CONTA_SAIDA_ESCOLHAS, default='1')
grupo_plano = models.ForeignKey(
'financeiro.PlanoContasGrupo', related_name="grupo_plano_pagamento", on_delete=models.SET_NULL, null=True, blank=True)
def get_edit_url(self):
if self.status == '0':
return reverse_lazy('financeiro:editarpagamentoview', kwargs={'pk': self.id})
else:
return reverse_lazy('financeiro:editarcontapagarview', kwargs={'pk': self.id})
def get_tipo(self):
return 'Saida'
class MovimentoCaixa(models.Model):
data_movimento = models.DateField(null=True, blank=True)
saldo_inicial = models.DecimalField(
max_digits=13, decimal_places=2, default=Decimal('0.00'))
saldo_final = models.DecimalField(
max_digits=13, decimal_places=2, default=Decimal('0.00'))
entradas = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
saidas = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
class Meta:
verbose_name = "Movimento de Caixa"
permissions = (
("acesso_fluxodecaixa", "Pode acessar o Fluxo de Caixa"),
)
@property
def format_data_movimento(self):
return '%s' % date(self.data_movimento, "d/m/Y")
@property
def valor_lucro_prejuizo(self):
return self.saldo_final - self.saldo_inicial
def __unicode__(self):
s = u'Movimento dia %s' % (self.data_movimento)
return s
def __str__(self):
s = u'Movimento dia %s' % (self.data_movimento)
return s
| true | true |
f73567edde1853a3919e05ddccb738e82af0155f | 3,861 | py | Python | tests/pytests/functional/states/file/test_rename.py | tomdoherty/salt | f87d5d7abbf9777773c4d91fdafecb8b1a728e76 | [
"Apache-2.0"
] | 1 | 2022-03-12T00:03:19.000Z | 2022-03-12T00:03:19.000Z | tests/pytests/functional/states/file/test_rename.py | tomdoherty/salt | f87d5d7abbf9777773c4d91fdafecb8b1a728e76 | [
"Apache-2.0"
] | 2 | 2022-03-02T16:11:35.000Z | 2022-03-03T08:04:30.000Z | tests/pytests/functional/states/file/test_rename.py | tomdoherty/salt | f87d5d7abbf9777773c4d91fdafecb8b1a728e76 | [
"Apache-2.0"
] | null | null | null | """
Tests for file.rename state function
"""
# nox -e pytest-zeromq-3.8(coverage=False) -- -vvv --run-slow --run-destructive tests\pytests\functional\states\file\test_rename.py
import pytest
import salt.utils.path
pytestmark = [
pytest.mark.windows_whitelisted,
]
@pytest.fixture(scope="module")
def file(states):
return states.file
@pytest.fixture(scope="function")
def source():
with pytest.helpers.temp_file(
name="old_name.txt", contents="Source content"
) as source:
yield source
def test_defaults(file, source):
"""
Test file.rename with defaults
"""
new_name = source.parent / "new_name.txt"
try:
file.rename(name=str(new_name), source=str(source))
assert new_name.exists()
assert not source.exists()
finally:
new_name.unlink()
def test_relative_name(file):
"""
Test file.rename when name is a relative path
"""
result = file.rename(name="..\\rel\\path\\test", source=str(source))
assert "is not an absolute path" in result.filtered["comment"]
assert result.filtered["result"] is False
def test_missing_source(file, source):
"""
Test file.rename with the source file is missing
"""
new_name = source.parent / "new_name.txt"
missing_name = source.parent / "missing.txt"
result = file.rename(name=str(new_name), source=str(missing_name))
assert "has already been moved out of place" in result.filtered["comment"]
assert result.filtered["result"] is True
def test_target_exists(file, source):
"""
Test file.rename when there is an existing file with the new name
"""
new_name = source.parent / "new_name.txt"
new_name.write_text("existing file")
try:
result = file.rename(name=str(new_name), source=str(source))
assert "exists and will not be overwritten" in result.filtered["comment"]
assert result.filtered["result"] is True
finally:
new_name.unlink()
def test_target_exists_force(file, source):
"""
Test file.rename when there is an existing file with the new name and
force=True
"""
new_name = source.parent / "new_name.txt"
new_name.write_text("existing file")
try:
file.rename(name=str(new_name), source=str(source), force=True)
assert new_name.exists()
assert not source.exists()
assert new_name.read_text() == "Source content"
finally:
new_name.unlink()
def test_test_is_true(file, source):
new_name = source.parent / "new_name.txt"
result = file.rename(name=str(new_name), source=str(source), test=True)
assert "is set to be moved to" in result.filtered["comment"]
assert result.filtered["result"] is None
def test_missing_dirs(file, source):
new_name = source.parent / "missing_subdir" / "new_name.txt"
result = file.rename(name=str(new_name), source=str(source))
assert "is not present" in result.filtered["comment"]
assert result.filtered["result"] is False
def test_missing_dirs_makedirs(file, source):
new_name = source.parent / "missing_subdir" / "new_name.txt"
try:
file.rename(name=str(new_name), source=str(source), makedirs=True)
assert new_name.exists()
assert not source.exists()
finally:
new_name.unlink()
new_name.parent.rmdir()
def test_source_is_link(file, source):
link_source = source.parent / "link_source.lnk"
link_source.symlink_to(source)
new_name = source.parent / "new_name.lnk"
try:
file.rename(name=str(new_name), source=str(link_source))
assert new_name.exists()
assert new_name.is_symlink()
assert salt.utils.path.readlink(str(new_name)) == str(source)
assert new_name.read_text() == "Source content"
assert not link_source.exists()
finally:
new_name.unlink()
| 30.164063 | 131 | 0.673401 |
import pytest
import salt.utils.path
pytestmark = [
pytest.mark.windows_whitelisted,
]
@pytest.fixture(scope="module")
def file(states):
return states.file
@pytest.fixture(scope="function")
def source():
with pytest.helpers.temp_file(
name="old_name.txt", contents="Source content"
) as source:
yield source
def test_defaults(file, source):
new_name = source.parent / "new_name.txt"
try:
file.rename(name=str(new_name), source=str(source))
assert new_name.exists()
assert not source.exists()
finally:
new_name.unlink()
def test_relative_name(file):
result = file.rename(name="..\\rel\\path\\test", source=str(source))
assert "is not an absolute path" in result.filtered["comment"]
assert result.filtered["result"] is False
def test_missing_source(file, source):
new_name = source.parent / "new_name.txt"
missing_name = source.parent / "missing.txt"
result = file.rename(name=str(new_name), source=str(missing_name))
assert "has already been moved out of place" in result.filtered["comment"]
assert result.filtered["result"] is True
def test_target_exists(file, source):
new_name = source.parent / "new_name.txt"
new_name.write_text("existing file")
try:
result = file.rename(name=str(new_name), source=str(source))
assert "exists and will not be overwritten" in result.filtered["comment"]
assert result.filtered["result"] is True
finally:
new_name.unlink()
def test_target_exists_force(file, source):
new_name = source.parent / "new_name.txt"
new_name.write_text("existing file")
try:
file.rename(name=str(new_name), source=str(source), force=True)
assert new_name.exists()
assert not source.exists()
assert new_name.read_text() == "Source content"
finally:
new_name.unlink()
def test_test_is_true(file, source):
new_name = source.parent / "new_name.txt"
result = file.rename(name=str(new_name), source=str(source), test=True)
assert "is set to be moved to" in result.filtered["comment"]
assert result.filtered["result"] is None
def test_missing_dirs(file, source):
new_name = source.parent / "missing_subdir" / "new_name.txt"
result = file.rename(name=str(new_name), source=str(source))
assert "is not present" in result.filtered["comment"]
assert result.filtered["result"] is False
def test_missing_dirs_makedirs(file, source):
new_name = source.parent / "missing_subdir" / "new_name.txt"
try:
file.rename(name=str(new_name), source=str(source), makedirs=True)
assert new_name.exists()
assert not source.exists()
finally:
new_name.unlink()
new_name.parent.rmdir()
def test_source_is_link(file, source):
link_source = source.parent / "link_source.lnk"
link_source.symlink_to(source)
new_name = source.parent / "new_name.lnk"
try:
file.rename(name=str(new_name), source=str(link_source))
assert new_name.exists()
assert new_name.is_symlink()
assert salt.utils.path.readlink(str(new_name)) == str(source)
assert new_name.read_text() == "Source content"
assert not link_source.exists()
finally:
new_name.unlink()
| true | true |
f735682a4455b157cc8cd531342908296c302379 | 108 | py | Python | django/models/customising_admin/apps.py | djangojeng-e/TIL | bdbe1dfb6ebc48b89067fddda195227cca64b8dc | [
"MIT"
] | null | null | null | django/models/customising_admin/apps.py | djangojeng-e/TIL | bdbe1dfb6ebc48b89067fddda195227cca64b8dc | [
"MIT"
] | null | null | null | django/models/customising_admin/apps.py | djangojeng-e/TIL | bdbe1dfb6ebc48b89067fddda195227cca64b8dc | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class CustomisingAdminConfig(AppConfig):
name = 'customising_admin'
| 18 | 40 | 0.796296 | from django.apps import AppConfig
class CustomisingAdminConfig(AppConfig):
name = 'customising_admin'
| true | true |
f73568592e3f9b63d56ec35c8be5d16ba89d76d3 | 1,904 | py | Python | Container-Root/src/python/lib/util/encode_decode_json.py | aws-samples/aws-do-pm | 17c8803d18e479e7e0d0b6e35ff2abe77079a61e | [
"MIT-0"
] | 4 | 2022-03-03T03:52:33.000Z | 2022-03-31T23:28:14.000Z | Container-Root/src/python/lib/util/encode_decode_json.py | aws-samples/aws-do-pm | 17c8803d18e479e7e0d0b6e35ff2abe77079a61e | [
"MIT-0"
] | null | null | null | Container-Root/src/python/lib/util/encode_decode_json.py | aws-samples/aws-do-pm | 17c8803d18e479e7e0d0b6e35ff2abe77079a61e | [
"MIT-0"
] | null | null | null | ######################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# SPDX-License-Identifier: MIT-0 #
######################################################################
import json
import base64
def utf_encode_json(json_dict):
json_dict_str = json.dumps(json_dict)
json_dict_bytes = json_dict_str.encode('utf-8')
return json_dict_bytes
def utf_decode_json(json_dict_bytes):
json_dict_str = json_dict_bytes.decode('utf-8')
json_dict = json.loads(json_dict_str)
return json_dict
def base64_encode_json_fname(json_fname):
encoded_json_str = None
with open(json_fname, 'r') as fp:
json_dict = json.load(fp)
json_dict_str = json.dumps(json_dict, indent=2)
json_dict_bytes = json_dict_str.encode('utf-8')
encoded_json_bytestr = base64.b64encode(json_dict_bytes)
encoded_json_str = encoded_json_bytestr.decode('utf-8')
return encoded_json_str
def base64_decode_json(encoded_json_str):
json_decode_str = base64.b64decode(encoded_json_str)
json_dict = json.loads(json_decode_str)
return json_dict
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--json_fname', help='JSON File Name')
args = parser.parse_args()
# json_fname = 'config.json'
encoded_json_str = base64_encode_json_fname(args.json_fname)
env_file = 'local_env.sh'
with open(env_file, 'w') as fp:
fp.write('echo "Creating environment variables ENCODED_CONFIG_JSON"\n')
fp.write('export ENCODED_CONFIG_JSON=%s\n'%(encoded_json_str))
fp.close()
print('Exported: local_env.sh...')
# print('export ENCODED_CONFIG_JSON=%s'%(encoded_json_str))
# Simply test the inversion process
# json_dict = decode_json(encoded_json_str)
| 34 | 79 | 0.655462 | true | true | |
f73568ba58dac4a9dd111036f4e127aea811cdfa | 45,214 | py | Python | nssrc/com/citrix/netscaler/nitro/resource/config/appflow/appflowparam.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/appflow/appflowparam.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/appflow/appflowparam.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class appflowparam(base_resource) :
""" Configuration for AppFlow parameter resource. """
def __init__(self) :
self._templaterefresh = None
self._appnamerefresh = None
self._flowrecordinterval = None
self._securityinsightrecordinterval = None
self._udppmtu = None
self._httpurl = None
self._aaausername = None
self._httpcookie = None
self._httpreferer = None
self._httpmethod = None
self._httphost = None
self._httpuseragent = None
self._clienttrafficonly = None
self._httpcontenttype = None
self._httpauthorization = None
self._httpvia = None
self._httpxforwardedfor = None
self._httplocation = None
self._httpsetcookie = None
self._httpsetcookie2 = None
self._connectionchaining = None
self._httpdomain = None
self._skipcacheredirectionhttptransaction = None
self._identifiername = None
self._identifiersessionname = None
self._observationdomainid = None
self._observationdomainname = None
self._subscriberawareness = None
self._subscriberidobfuscation = None
self._subscriberidobfuscationalgo = None
self._gxsessionreporting = None
self._securityinsighttraffic = None
self._cacheinsight = None
self._videoinsight = None
self._httpquerywithurl = None
self._urlcategory = None
self._lsnlogging = None
self._cqareporting = None
self._emailaddress = None
self._usagerecordinterval = None
self._websaasappusagereporting = None
self._metrics = None
self._events = None
self._auditlogs = None
self._observationpointid = None
self._distributedtracing = None
self._disttracingsamplingrate = None
self._tcpattackcounterinterval = None
self._logstreamovernsip = None
self._analyticsauthtoken = None
self._timeseriesovernsip = None
self._builtin = None
self._feature = None
self._tcpburstreporting = None
self._tcpburstreportingthreshold = None
@property
def templaterefresh(self) :
r"""Refresh interval, in seconds, at which to export the template data. Because data transmission is in UDP, the templates must be resent at regular intervals.<br/>Default value: 600<br/>Minimum length = 60<br/>Maximum length = 3600.
"""
try :
return self._templaterefresh
except Exception as e:
raise e
@templaterefresh.setter
def templaterefresh(self, templaterefresh) :
r"""Refresh interval, in seconds, at which to export the template data. Because data transmission is in UDP, the templates must be resent at regular intervals.<br/>Default value: 600<br/>Minimum length = 60<br/>Maximum length = 3600
"""
try :
self._templaterefresh = templaterefresh
except Exception as e:
raise e
@property
def appnamerefresh(self) :
r"""Interval, in seconds, at which to send Appnames to the configured collectors. Appname refers to the name of an entity (virtual server, service, or service group) in the Citrix ADC.<br/>Default value: 600<br/>Minimum length = 60<br/>Maximum length = 3600.
"""
try :
return self._appnamerefresh
except Exception as e:
raise e
@appnamerefresh.setter
def appnamerefresh(self, appnamerefresh) :
r"""Interval, in seconds, at which to send Appnames to the configured collectors. Appname refers to the name of an entity (virtual server, service, or service group) in the Citrix ADC.<br/>Default value: 600<br/>Minimum length = 60<br/>Maximum length = 3600
"""
try :
self._appnamerefresh = appnamerefresh
except Exception as e:
raise e
@property
def flowrecordinterval(self) :
r"""Interval, in seconds, at which to send flow records to the configured collectors.<br/>Default value: 60<br/>Minimum length = 60<br/>Maximum length = 3600.
"""
try :
return self._flowrecordinterval
except Exception as e:
raise e
@flowrecordinterval.setter
def flowrecordinterval(self, flowrecordinterval) :
r"""Interval, in seconds, at which to send flow records to the configured collectors.<br/>Default value: 60<br/>Minimum length = 60<br/>Maximum length = 3600
"""
try :
self._flowrecordinterval = flowrecordinterval
except Exception as e:
raise e
@property
def securityinsightrecordinterval(self) :
r"""Interval, in seconds, at which to send security insight flow records to the configured collectors.<br/>Default value: 600<br/>Minimum length = 60<br/>Maximum length = 3600.
"""
try :
return self._securityinsightrecordinterval
except Exception as e:
raise e
@securityinsightrecordinterval.setter
def securityinsightrecordinterval(self, securityinsightrecordinterval) :
r"""Interval, in seconds, at which to send security insight flow records to the configured collectors.<br/>Default value: 600<br/>Minimum length = 60<br/>Maximum length = 3600
"""
try :
self._securityinsightrecordinterval = securityinsightrecordinterval
except Exception as e:
raise e
@property
def udppmtu(self) :
r"""MTU, in bytes, for IPFIX UDP packets.<br/>Default value: 1472<br/>Minimum length = 128<br/>Maximum length = 1472.
"""
try :
return self._udppmtu
except Exception as e:
raise e
@udppmtu.setter
def udppmtu(self, udppmtu) :
r"""MTU, in bytes, for IPFIX UDP packets.<br/>Default value: 1472<br/>Minimum length = 128<br/>Maximum length = 1472
"""
try :
self._udppmtu = udppmtu
except Exception as e:
raise e
@property
def httpurl(self) :
r"""Include the http URL that the Citrix ADC received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpurl
except Exception as e:
raise e
@httpurl.setter
def httpurl(self, httpurl) :
r"""Include the http URL that the Citrix ADC received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpurl = httpurl
except Exception as e:
raise e
@property
def aaausername(self) :
r"""Enable AppFlow AAA Username logging.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._aaausername
except Exception as e:
raise e
@aaausername.setter
def aaausername(self, aaausername) :
r"""Enable AppFlow AAA Username logging.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._aaausername = aaausername
except Exception as e:
raise e
@property
def httpcookie(self) :
r"""Include the cookie that was in the HTTP request the appliance received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpcookie
except Exception as e:
raise e
@httpcookie.setter
def httpcookie(self, httpcookie) :
r"""Include the cookie that was in the HTTP request the appliance received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpcookie = httpcookie
except Exception as e:
raise e
@property
def httpreferer(self) :
r"""Include the web page that was last visited by the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpreferer
except Exception as e:
raise e
@httpreferer.setter
def httpreferer(self, httpreferer) :
r"""Include the web page that was last visited by the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpreferer = httpreferer
except Exception as e:
raise e
@property
def httpmethod(self) :
r"""Include the method that was specified in the HTTP request that the appliance received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpmethod
except Exception as e:
raise e
@httpmethod.setter
def httpmethod(self, httpmethod) :
r"""Include the method that was specified in the HTTP request that the appliance received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpmethod = httpmethod
except Exception as e:
raise e
@property
def httphost(self) :
r"""Include the host identified in the HTTP request that the appliance received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httphost
except Exception as e:
raise e
@httphost.setter
def httphost(self, httphost) :
r"""Include the host identified in the HTTP request that the appliance received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httphost = httphost
except Exception as e:
raise e
@property
def httpuseragent(self) :
r"""Include the client application through which the HTTP request was received by the Citrix ADC.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpuseragent
except Exception as e:
raise e
@httpuseragent.setter
def httpuseragent(self, httpuseragent) :
r"""Include the client application through which the HTTP request was received by the Citrix ADC.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpuseragent = httpuseragent
except Exception as e:
raise e
@property
def clienttrafficonly(self) :
r"""Generate AppFlow records for only the traffic from the client.<br/>Default value: NO<br/>Possible values = YES, NO.
"""
try :
return self._clienttrafficonly
except Exception as e:
raise e
@clienttrafficonly.setter
def clienttrafficonly(self, clienttrafficonly) :
r"""Generate AppFlow records for only the traffic from the client.<br/>Default value: NO<br/>Possible values = YES, NO
"""
try :
self._clienttrafficonly = clienttrafficonly
except Exception as e:
raise e
@property
def httpcontenttype(self) :
r"""Include the HTTP Content-Type header sent from the server to the client to determine the type of the content sent.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpcontenttype
except Exception as e:
raise e
@httpcontenttype.setter
def httpcontenttype(self, httpcontenttype) :
r"""Include the HTTP Content-Type header sent from the server to the client to determine the type of the content sent.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpcontenttype = httpcontenttype
except Exception as e:
raise e
@property
def httpauthorization(self) :
r"""Include the HTTP Authorization header information.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpauthorization
except Exception as e:
raise e
@httpauthorization.setter
def httpauthorization(self, httpauthorization) :
r"""Include the HTTP Authorization header information.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpauthorization = httpauthorization
except Exception as e:
raise e
@property
def httpvia(self) :
r"""Include the httpVia header which contains the IP address of proxy server through which the client accessed the server.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpvia
except Exception as e:
raise e
@httpvia.setter
def httpvia(self, httpvia) :
r"""Include the httpVia header which contains the IP address of proxy server through which the client accessed the server.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpvia = httpvia
except Exception as e:
raise e
@property
def httpxforwardedfor(self) :
r"""Include the httpXForwardedFor header, which contains the original IP Address of the client using a proxy server to access the server.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpxforwardedfor
except Exception as e:
raise e
@httpxforwardedfor.setter
def httpxforwardedfor(self, httpxforwardedfor) :
r"""Include the httpXForwardedFor header, which contains the original IP Address of the client using a proxy server to access the server.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpxforwardedfor = httpxforwardedfor
except Exception as e:
raise e
@property
def httplocation(self) :
r"""Include the HTTP location headers returned from the HTTP responses.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httplocation
except Exception as e:
raise e
@httplocation.setter
def httplocation(self, httplocation) :
r"""Include the HTTP location headers returned from the HTTP responses.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httplocation = httplocation
except Exception as e:
raise e
@property
def httpsetcookie(self) :
r"""Include the Set-cookie header sent from the server to the client in response to a HTTP request.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpsetcookie
except Exception as e:
raise e
@httpsetcookie.setter
def httpsetcookie(self, httpsetcookie) :
r"""Include the Set-cookie header sent from the server to the client in response to a HTTP request.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpsetcookie = httpsetcookie
except Exception as e:
raise e
@property
def httpsetcookie2(self) :
r"""Include the Set-cookie header sent from the server to the client in response to a HTTP request.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpsetcookie2
except Exception as e:
raise e
@httpsetcookie2.setter
def httpsetcookie2(self, httpsetcookie2) :
r"""Include the Set-cookie header sent from the server to the client in response to a HTTP request.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpsetcookie2 = httpsetcookie2
except Exception as e:
raise e
@property
def connectionchaining(self) :
r"""Enable connection chaining so that the client server flows of a connection are linked. Also the connection chain ID is propagated across Citrix ADCs, so that in a multi-hop environment the flows belonging to the same logical connection are linked. This id is also logged as part of appflow record.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._connectionchaining
except Exception as e:
raise e
@connectionchaining.setter
def connectionchaining(self, connectionchaining) :
r"""Enable connection chaining so that the client server flows of a connection are linked. Also the connection chain ID is propagated across Citrix ADCs, so that in a multi-hop environment the flows belonging to the same logical connection are linked. This id is also logged as part of appflow record.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._connectionchaining = connectionchaining
except Exception as e:
raise e
@property
def httpdomain(self) :
r"""Include the http domain request to be exported.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpdomain
except Exception as e:
raise e
@httpdomain.setter
def httpdomain(self, httpdomain) :
r"""Include the http domain request to be exported.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpdomain = httpdomain
except Exception as e:
raise e
@property
def skipcacheredirectionhttptransaction(self) :
r"""Skip Cache http transaction. This HTTP transaction is specific to Cache Redirection module. In Case of Cache Miss there will be another HTTP transaction initiated by the cache server.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._skipcacheredirectionhttptransaction
except Exception as e:
raise e
@skipcacheredirectionhttptransaction.setter
def skipcacheredirectionhttptransaction(self, skipcacheredirectionhttptransaction) :
r"""Skip Cache http transaction. This HTTP transaction is specific to Cache Redirection module. In Case of Cache Miss there will be another HTTP transaction initiated by the cache server.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._skipcacheredirectionhttptransaction = skipcacheredirectionhttptransaction
except Exception as e:
raise e
@property
def identifiername(self) :
r"""Include the stream identifier name to be exported.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._identifiername
except Exception as e:
raise e
@identifiername.setter
def identifiername(self, identifiername) :
r"""Include the stream identifier name to be exported.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._identifiername = identifiername
except Exception as e:
raise e
@property
def identifiersessionname(self) :
r"""Include the stream identifier session name to be exported.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._identifiersessionname
except Exception as e:
raise e
@identifiersessionname.setter
def identifiersessionname(self, identifiersessionname) :
r"""Include the stream identifier session name to be exported.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._identifiersessionname = identifiersessionname
except Exception as e:
raise e
@property
def observationdomainid(self) :
r"""An observation domain groups a set of Citrix ADCs based on deployment: cluster, HA etc. A unique Observation Domain ID is required to be assigned to each such group.<br/>Default value: 0<br/>Minimum length = 1000.
"""
try :
return self._observationdomainid
except Exception as e:
raise e
@observationdomainid.setter
def observationdomainid(self, observationdomainid) :
r"""An observation domain groups a set of Citrix ADCs based on deployment: cluster, HA etc. A unique Observation Domain ID is required to be assigned to each such group.<br/>Default value: 0<br/>Minimum length = 1000
"""
try :
self._observationdomainid = observationdomainid
except Exception as e:
raise e
@property
def observationdomainname(self) :
r"""Name of the Observation Domain defined by the observation domain ID.<br/>Maximum length = 127.
"""
try :
return self._observationdomainname
except Exception as e:
raise e
@observationdomainname.setter
def observationdomainname(self, observationdomainname) :
r"""Name of the Observation Domain defined by the observation domain ID.<br/>Maximum length = 127
"""
try :
self._observationdomainname = observationdomainname
except Exception as e:
raise e
@property
def subscriberawareness(self) :
r"""Enable this option for logging end user MSISDN in L4/L7 appflow records.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._subscriberawareness
except Exception as e:
raise e
@subscriberawareness.setter
def subscriberawareness(self, subscriberawareness) :
r"""Enable this option for logging end user MSISDN in L4/L7 appflow records.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._subscriberawareness = subscriberawareness
except Exception as e:
raise e
@property
def subscriberidobfuscation(self) :
r"""Enable this option for obfuscating MSISDN in L4/L7 appflow records.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._subscriberidobfuscation
except Exception as e:
raise e
@subscriberidobfuscation.setter
def subscriberidobfuscation(self, subscriberidobfuscation) :
r"""Enable this option for obfuscating MSISDN in L4/L7 appflow records.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._subscriberidobfuscation = subscriberidobfuscation
except Exception as e:
raise e
@property
def subscriberidobfuscationalgo(self) :
r"""Algorithm(MD5 or SHA256) to be used for obfuscating MSISDN.<br/>Default value: MD5<br/>Possible values = MD5, SHA256.
"""
try :
return self._subscriberidobfuscationalgo
except Exception as e:
raise e
@subscriberidobfuscationalgo.setter
def subscriberidobfuscationalgo(self, subscriberidobfuscationalgo) :
r"""Algorithm(MD5 or SHA256) to be used for obfuscating MSISDN.<br/>Default value: MD5<br/>Possible values = MD5, SHA256
"""
try :
self._subscriberidobfuscationalgo = subscriberidobfuscationalgo
except Exception as e:
raise e
@property
def gxsessionreporting(self) :
r"""Enable this option for Gx session reporting.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._gxsessionreporting
except Exception as e:
raise e
@gxsessionreporting.setter
def gxsessionreporting(self, gxsessionreporting) :
r"""Enable this option for Gx session reporting.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._gxsessionreporting = gxsessionreporting
except Exception as e:
raise e
@property
def securityinsighttraffic(self) :
r"""Enable/disable the feature individually on appflow action.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._securityinsighttraffic
except Exception as e:
raise e
@securityinsighttraffic.setter
def securityinsighttraffic(self, securityinsighttraffic) :
r"""Enable/disable the feature individually on appflow action.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._securityinsighttraffic = securityinsighttraffic
except Exception as e:
raise e
@property
def cacheinsight(self) :
r"""Flag to determine whether cache records need to be exported or not. If this flag is true and IC is enabled, cache records are exported instead of L7 HTTP records.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._cacheinsight
except Exception as e:
raise e
@cacheinsight.setter
def cacheinsight(self, cacheinsight) :
r"""Flag to determine whether cache records need to be exported or not. If this flag is true and IC is enabled, cache records are exported instead of L7 HTTP records.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._cacheinsight = cacheinsight
except Exception as e:
raise e
@property
def videoinsight(self) :
r"""Enable/disable the feature individually on appflow action.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._videoinsight
except Exception as e:
raise e
@videoinsight.setter
def videoinsight(self, videoinsight) :
r"""Enable/disable the feature individually on appflow action.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._videoinsight = videoinsight
except Exception as e:
raise e
@property
def httpquerywithurl(self) :
r"""Include the HTTP query segment along with the URL that the Citrix ADC received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpquerywithurl
except Exception as e:
raise e
@httpquerywithurl.setter
def httpquerywithurl(self, httpquerywithurl) :
r"""Include the HTTP query segment along with the URL that the Citrix ADC received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpquerywithurl = httpquerywithurl
except Exception as e:
raise e
@property
def urlcategory(self) :
r"""Include the URL category record.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._urlcategory
except Exception as e:
raise e
@urlcategory.setter
def urlcategory(self, urlcategory) :
r"""Include the URL category record.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._urlcategory = urlcategory
except Exception as e:
raise e
@property
def lsnlogging(self) :
r"""On enabling this option, the Citrix ADC will send the Large Scale Nat(LSN) records to the configured collectors.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._lsnlogging
except Exception as e:
raise e
@lsnlogging.setter
def lsnlogging(self, lsnlogging) :
r"""On enabling this option, the Citrix ADC will send the Large Scale Nat(LSN) records to the configured collectors.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._lsnlogging = lsnlogging
except Exception as e:
raise e
@property
def cqareporting(self) :
r"""TCP CQA reporting enable/disable knob.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._cqareporting
except Exception as e:
raise e
@cqareporting.setter
def cqareporting(self, cqareporting) :
r"""TCP CQA reporting enable/disable knob.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._cqareporting = cqareporting
except Exception as e:
raise e
@property
def emailaddress(self) :
r"""Enable AppFlow user email-id logging.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._emailaddress
except Exception as e:
raise e
@emailaddress.setter
def emailaddress(self, emailaddress) :
r"""Enable AppFlow user email-id logging.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._emailaddress = emailaddress
except Exception as e:
raise e
@property
def usagerecordinterval(self) :
r"""On enabling this option, the NGS will send bandwidth usage record to configured collectors.<br/>Default value: 0<br/>Maximum length = 7200.
"""
try :
return self._usagerecordinterval
except Exception as e:
raise e
@usagerecordinterval.setter
def usagerecordinterval(self, usagerecordinterval) :
r"""On enabling this option, the NGS will send bandwidth usage record to configured collectors.<br/>Default value: 0<br/>Maximum length = 7200
"""
try :
self._usagerecordinterval = usagerecordinterval
except Exception as e:
raise e
@property
def websaasappusagereporting(self) :
r"""On enabling this option, NGS will send data used by Web/saas app at the end of every HTTP transaction to configured collectors.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._websaasappusagereporting
except Exception as e:
raise e
@websaasappusagereporting.setter
def websaasappusagereporting(self, websaasappusagereporting) :
r"""On enabling this option, NGS will send data used by Web/saas app at the end of every HTTP transaction to configured collectors.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._websaasappusagereporting = websaasappusagereporting
except Exception as e:
raise e
@property
def metrics(self) :
r"""Enable Citrix ADC Stats to be sent to the Telemetry Agent.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._metrics
except Exception as e:
raise e
@metrics.setter
def metrics(self, metrics) :
r"""Enable Citrix ADC Stats to be sent to the Telemetry Agent.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._metrics = metrics
except Exception as e:
raise e
@property
def events(self) :
r"""Enable Events to be sent to the Telemetry Agent.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._events
except Exception as e:
raise e
@events.setter
def events(self, events) :
r"""Enable Events to be sent to the Telemetry Agent.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._events = events
except Exception as e:
raise e
@property
def auditlogs(self) :
r"""Enable Auditlogs to be sent to the Telemetry Agent.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._auditlogs
except Exception as e:
raise e
@auditlogs.setter
def auditlogs(self, auditlogs) :
r"""Enable Auditlogs to be sent to the Telemetry Agent.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._auditlogs = auditlogs
except Exception as e:
raise e
@property
def observationpointid(self) :
r"""An observation point ID is identifier for the NetScaler from which appflow records are being exported. By default, the NetScaler IP is the observation point ID.<br/>Minimum length = 1.
"""
try :
return self._observationpointid
except Exception as e:
raise e
@observationpointid.setter
def observationpointid(self, observationpointid) :
r"""An observation point ID is identifier for the NetScaler from which appflow records are being exported. By default, the NetScaler IP is the observation point ID.<br/>Minimum length = 1
"""
try :
self._observationpointid = observationpointid
except Exception as e:
raise e
@property
def distributedtracing(self) :
r"""Enable generation of the distributed tracing templates in the Appflow records.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._distributedtracing
except Exception as e:
raise e
@distributedtracing.setter
def distributedtracing(self, distributedtracing) :
r"""Enable generation of the distributed tracing templates in the Appflow records.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._distributedtracing = distributedtracing
except Exception as e:
raise e
@property
def disttracingsamplingrate(self) :
r"""Sampling rate for Distributed Tracing.<br/>Default value: 0<br/>Maximum length = 100.
"""
try :
return self._disttracingsamplingrate
except Exception as e:
raise e
@disttracingsamplingrate.setter
def disttracingsamplingrate(self, disttracingsamplingrate) :
r"""Sampling rate for Distributed Tracing.<br/>Default value: 0<br/>Maximum length = 100
"""
try :
self._disttracingsamplingrate = disttracingsamplingrate
except Exception as e:
raise e
@property
def tcpattackcounterinterval(self) :
r"""Interval, in seconds, at which to send tcp attack counters to the configured collectors. If 0 is configured, the record is not sent.<br/>Default value: 0<br/>Maximum length = 3600.
"""
try :
return self._tcpattackcounterinterval
except Exception as e:
raise e
@tcpattackcounterinterval.setter
def tcpattackcounterinterval(self, tcpattackcounterinterval) :
r"""Interval, in seconds, at which to send tcp attack counters to the configured collectors. If 0 is configured, the record is not sent.<br/>Default value: 0<br/>Maximum length = 3600
"""
try :
self._tcpattackcounterinterval = tcpattackcounterinterval
except Exception as e:
raise e
@property
def logstreamovernsip(self) :
r"""To use the Citrix ADC IP to send Logstream records instead of the SNIP.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._logstreamovernsip
except Exception as e:
raise e
@logstreamovernsip.setter
def logstreamovernsip(self, logstreamovernsip) :
r"""To use the Citrix ADC IP to send Logstream records instead of the SNIP.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._logstreamovernsip = logstreamovernsip
except Exception as e:
raise e
@property
def analyticsauthtoken(self) :
r"""Authentication token to be set by the agent.<br/>Maximum length = 256.
"""
try :
return self._analyticsauthtoken
except Exception as e:
raise e
@analyticsauthtoken.setter
def analyticsauthtoken(self, analyticsauthtoken) :
r"""Authentication token to be set by the agent.<br/>Maximum length = 256
"""
try :
self._analyticsauthtoken = analyticsauthtoken
except Exception as e:
raise e
@property
def timeseriesovernsip(self) :
r"""To use the Citrix ADC IP to send Time series data such as metrics and events, instead of the SNIP.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._timeseriesovernsip
except Exception as e:
raise e
@timeseriesovernsip.setter
def timeseriesovernsip(self, timeseriesovernsip) :
r"""To use the Citrix ADC IP to send Time series data such as metrics and events, instead of the SNIP.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._timeseriesovernsip = timeseriesovernsip
except Exception as e:
raise e
@property
def builtin(self) :
r"""Flag to determine if the appflow param is built-in or not.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE, PARTITION_ALL.
"""
try :
return self._builtin
except Exception as e:
raise e
@property
def feature(self) :
r"""The feature to be checked while applying this config.
"""
try :
return self._feature
except Exception as e:
raise e
@property
def tcpburstreporting(self) :
r"""TCP burst reporting enable/disable knob.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._tcpburstreporting
except Exception as e:
raise e
@property
def tcpburstreportingthreshold(self) :
r"""TCP burst reporting threshold.<br/>Default value: 1500<br/>Minimum value = 10<br/>Maximum value = 5000.
"""
try :
return self._tcpburstreportingthreshold
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(appflowparam_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.appflowparam
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def filter_update_parameters(cls, resource) :
r""" Use this function to create a resource with only update operation specific parameters.
"""
updateresource = appflowparam()
updateresource.templaterefresh = resource.templaterefresh
updateresource.appnamerefresh = resource.appnamerefresh
updateresource.flowrecordinterval = resource.flowrecordinterval
updateresource.securityinsightrecordinterval = resource.securityinsightrecordinterval
updateresource.udppmtu = resource.udppmtu
updateresource.httpurl = resource.httpurl
updateresource.aaausername = resource.aaausername
updateresource.httpcookie = resource.httpcookie
updateresource.httpreferer = resource.httpreferer
updateresource.httpmethod = resource.httpmethod
updateresource.httphost = resource.httphost
updateresource.httpuseragent = resource.httpuseragent
updateresource.clienttrafficonly = resource.clienttrafficonly
updateresource.httpcontenttype = resource.httpcontenttype
updateresource.httpauthorization = resource.httpauthorization
updateresource.httpvia = resource.httpvia
updateresource.httpxforwardedfor = resource.httpxforwardedfor
updateresource.httplocation = resource.httplocation
updateresource.httpsetcookie = resource.httpsetcookie
updateresource.httpsetcookie2 = resource.httpsetcookie2
updateresource.connectionchaining = resource.connectionchaining
updateresource.httpdomain = resource.httpdomain
updateresource.skipcacheredirectionhttptransaction = resource.skipcacheredirectionhttptransaction
updateresource.identifiername = resource.identifiername
updateresource.identifiersessionname = resource.identifiersessionname
updateresource.observationdomainid = resource.observationdomainid
updateresource.observationdomainname = resource.observationdomainname
updateresource.subscriberawareness = resource.subscriberawareness
updateresource.subscriberidobfuscation = resource.subscriberidobfuscation
updateresource.subscriberidobfuscationalgo = resource.subscriberidobfuscationalgo
updateresource.gxsessionreporting = resource.gxsessionreporting
updateresource.securityinsighttraffic = resource.securityinsighttraffic
updateresource.cacheinsight = resource.cacheinsight
updateresource.videoinsight = resource.videoinsight
updateresource.httpquerywithurl = resource.httpquerywithurl
updateresource.urlcategory = resource.urlcategory
updateresource.lsnlogging = resource.lsnlogging
updateresource.cqareporting = resource.cqareporting
updateresource.emailaddress = resource.emailaddress
updateresource.usagerecordinterval = resource.usagerecordinterval
updateresource.websaasappusagereporting = resource.websaasappusagereporting
updateresource.metrics = resource.metrics
updateresource.events = resource.events
updateresource.auditlogs = resource.auditlogs
updateresource.observationpointid = resource.observationpointid
updateresource.distributedtracing = resource.distributedtracing
updateresource.disttracingsamplingrate = resource.disttracingsamplingrate
updateresource.tcpattackcounterinterval = resource.tcpattackcounterinterval
updateresource.logstreamovernsip = resource.logstreamovernsip
updateresource.analyticsauthtoken = resource.analyticsauthtoken
updateresource.timeseriesovernsip = resource.timeseriesovernsip
return updateresource
@classmethod
def update(cls, client, resource) :
r""" Use this API to update appflowparam.
"""
try :
if type(resource) is not list :
updateresource = cls.filter_update_parameters(resource)
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
r""" Use this API to unset the properties of appflowparam resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = appflowparam()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the appflowparam resources that are configured on netscaler.
"""
try :
if not name :
obj = appflowparam()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class Httpreferer:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Logstreamovernsip:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Websaasappusagereporting:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Cqareporting:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Feature:
WL = "WL"
WebLogging = "WebLogging"
SP = "SP"
SurgeProtection = "SurgeProtection"
LB = "LB"
LoadBalancing = "LoadBalancing"
CS = "CS"
ContentSwitching = "ContentSwitching"
CR = "CR"
CacheRedirection = "CacheRedirection"
SC = "SC"
SureConnect = "SureConnect"
CMP = "CMP"
CMPcntl = "CMPcntl"
CompressionControl = "CompressionControl"
PQ = "PQ"
PriorityQueuing = "PriorityQueuing"
HDOSP = "HDOSP"
HttpDoSProtection = "HttpDoSProtection"
SSLVPN = "SSLVPN"
AAA = "AAA"
GSLB = "GSLB"
GlobalServerLoadBalancing = "GlobalServerLoadBalancing"
SSL = "SSL"
SSLOffload = "SSLOffload"
SSLOffloading = "SSLOffloading"
CF = "CF"
ContentFiltering = "ContentFiltering"
IC = "IC"
IntegratedCaching = "IntegratedCaching"
OSPF = "OSPF"
OSPFRouting = "OSPFRouting"
RIP = "RIP"
RIPRouting = "RIPRouting"
BGP = "BGP"
BGPRouting = "BGPRouting"
REWRITE = "REWRITE"
IPv6PT = "IPv6PT"
IPv6protocoltranslation = "IPv6protocoltranslation"
AppFw = "AppFw"
ApplicationFirewall = "ApplicationFirewall"
RESPONDER = "RESPONDER"
HTMLInjection = "HTMLInjection"
push = "push"
NSPush = "NSPush"
NetScalerPush = "NetScalerPush"
AppFlow = "AppFlow"
CloudBridge = "CloudBridge"
ISIS = "ISIS"
ISISRouting = "ISISRouting"
CH = "CH"
CallHome = "CallHome"
AppQoE = "AppQoE"
ContentAccelerator = "ContentAccelerator"
SYSTEM = "SYSTEM"
RISE = "RISE"
FEO = "FEO"
LSN = "LSN"
LargeScaleNAT = "LargeScaleNAT"
RDPProxy = "RDPProxy"
Rep = "Rep"
Reputation = "Reputation"
URLFiltering = "URLFiltering"
VideoOptimization = "VideoOptimization"
ForwardProxy = "ForwardProxy"
SSLInterception = "SSLInterception"
AdaptiveTCP = "AdaptiveTCP"
CQA = "CQA"
CI = "CI"
ContentInspection = "ContentInspection"
Bot = "Bot"
APIGateway = "APIGateway"
class Httpsetcookie:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpvia:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Gxsessionreporting:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpdomain:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Videoinsight:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpsetcookie2:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Auditlogs:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Builtin:
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
PARTITION_ALL = "PARTITION_ALL"
class Httpquerywithurl:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpauthorization:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Urlcategory:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Skipcacheredirectionhttptransaction:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Aaausername:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Lsnlogging:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Clienttrafficonly:
YES = "YES"
NO = "NO"
class Securityinsighttraffic:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpcontenttype:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Cacheinsight:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Timeseriesovernsip:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Emailaddress:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpmethod:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Subscriberawareness:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httplocation:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Distributedtracing:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Identifiersessionname:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Tcpburstreporting:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Metrics:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Connectionchaining:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Subscriberidobfuscation:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpxforwardedfor:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Subscriberidobfuscationalgo:
MD5 = "MD5"
SHA256 = "SHA256"
class Identifiername:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpcookie:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpurl:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Events:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpuseragent:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httphost:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class appflowparam_response(base_response) :
def __init__(self, length=1) :
self.appflowparam = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.appflowparam = [appflowparam() for _ in range(length)]
| 32.135039 | 372 | 0.74384 |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class appflowparam(base_resource) :
def __init__(self) :
self._templaterefresh = None
self._appnamerefresh = None
self._flowrecordinterval = None
self._securityinsightrecordinterval = None
self._udppmtu = None
self._httpurl = None
self._aaausername = None
self._httpcookie = None
self._httpreferer = None
self._httpmethod = None
self._httphost = None
self._httpuseragent = None
self._clienttrafficonly = None
self._httpcontenttype = None
self._httpauthorization = None
self._httpvia = None
self._httpxforwardedfor = None
self._httplocation = None
self._httpsetcookie = None
self._httpsetcookie2 = None
self._connectionchaining = None
self._httpdomain = None
self._skipcacheredirectionhttptransaction = None
self._identifiername = None
self._identifiersessionname = None
self._observationdomainid = None
self._observationdomainname = None
self._subscriberawareness = None
self._subscriberidobfuscation = None
self._subscriberidobfuscationalgo = None
self._gxsessionreporting = None
self._securityinsighttraffic = None
self._cacheinsight = None
self._videoinsight = None
self._httpquerywithurl = None
self._urlcategory = None
self._lsnlogging = None
self._cqareporting = None
self._emailaddress = None
self._usagerecordinterval = None
self._websaasappusagereporting = None
self._metrics = None
self._events = None
self._auditlogs = None
self._observationpointid = None
self._distributedtracing = None
self._disttracingsamplingrate = None
self._tcpattackcounterinterval = None
self._logstreamovernsip = None
self._analyticsauthtoken = None
self._timeseriesovernsip = None
self._builtin = None
self._feature = None
self._tcpburstreporting = None
self._tcpburstreportingthreshold = None
@property
def templaterefresh(self) :
try :
return self._templaterefresh
except Exception as e:
raise e
@templaterefresh.setter
def templaterefresh(self, templaterefresh) :
try :
self._templaterefresh = templaterefresh
except Exception as e:
raise e
@property
def appnamerefresh(self) :
try :
return self._appnamerefresh
except Exception as e:
raise e
@appnamerefresh.setter
def appnamerefresh(self, appnamerefresh) :
try :
self._appnamerefresh = appnamerefresh
except Exception as e:
raise e
@property
def flowrecordinterval(self) :
try :
return self._flowrecordinterval
except Exception as e:
raise e
@flowrecordinterval.setter
def flowrecordinterval(self, flowrecordinterval) :
try :
self._flowrecordinterval = flowrecordinterval
except Exception as e:
raise e
@property
def securityinsightrecordinterval(self) :
try :
return self._securityinsightrecordinterval
except Exception as e:
raise e
@securityinsightrecordinterval.setter
def securityinsightrecordinterval(self, securityinsightrecordinterval) :
try :
self._securityinsightrecordinterval = securityinsightrecordinterval
except Exception as e:
raise e
@property
def udppmtu(self) :
try :
return self._udppmtu
except Exception as e:
raise e
@udppmtu.setter
def udppmtu(self, udppmtu) :
try :
self._udppmtu = udppmtu
except Exception as e:
raise e
@property
def httpurl(self) :
try :
return self._httpurl
except Exception as e:
raise e
@httpurl.setter
def httpurl(self, httpurl) :
try :
self._httpurl = httpurl
except Exception as e:
raise e
@property
def aaausername(self) :
try :
return self._aaausername
except Exception as e:
raise e
@aaausername.setter
def aaausername(self, aaausername) :
try :
self._aaausername = aaausername
except Exception as e:
raise e
@property
def httpcookie(self) :
try :
return self._httpcookie
except Exception as e:
raise e
@httpcookie.setter
def httpcookie(self, httpcookie) :
try :
self._httpcookie = httpcookie
except Exception as e:
raise e
@property
def httpreferer(self) :
try :
return self._httpreferer
except Exception as e:
raise e
@httpreferer.setter
def httpreferer(self, httpreferer) :
try :
self._httpreferer = httpreferer
except Exception as e:
raise e
@property
def httpmethod(self) :
try :
return self._httpmethod
except Exception as e:
raise e
@httpmethod.setter
def httpmethod(self, httpmethod) :
try :
self._httpmethod = httpmethod
except Exception as e:
raise e
@property
def httphost(self) :
try :
return self._httphost
except Exception as e:
raise e
@httphost.setter
def httphost(self, httphost) :
try :
self._httphost = httphost
except Exception as e:
raise e
@property
def httpuseragent(self) :
try :
return self._httpuseragent
except Exception as e:
raise e
@httpuseragent.setter
def httpuseragent(self, httpuseragent) :
try :
self._httpuseragent = httpuseragent
except Exception as e:
raise e
@property
def clienttrafficonly(self) :
try :
return self._clienttrafficonly
except Exception as e:
raise e
@clienttrafficonly.setter
def clienttrafficonly(self, clienttrafficonly) :
try :
self._clienttrafficonly = clienttrafficonly
except Exception as e:
raise e
@property
def httpcontenttype(self) :
try :
return self._httpcontenttype
except Exception as e:
raise e
@httpcontenttype.setter
def httpcontenttype(self, httpcontenttype) :
try :
self._httpcontenttype = httpcontenttype
except Exception as e:
raise e
@property
def httpauthorization(self) :
try :
return self._httpauthorization
except Exception as e:
raise e
@httpauthorization.setter
def httpauthorization(self, httpauthorization) :
try :
self._httpauthorization = httpauthorization
except Exception as e:
raise e
@property
def httpvia(self) :
try :
return self._httpvia
except Exception as e:
raise e
@httpvia.setter
def httpvia(self, httpvia) :
try :
self._httpvia = httpvia
except Exception as e:
raise e
@property
def httpxforwardedfor(self) :
try :
return self._httpxforwardedfor
except Exception as e:
raise e
@httpxforwardedfor.setter
def httpxforwardedfor(self, httpxforwardedfor) :
try :
self._httpxforwardedfor = httpxforwardedfor
except Exception as e:
raise e
@property
def httplocation(self) :
try :
return self._httplocation
except Exception as e:
raise e
@httplocation.setter
def httplocation(self, httplocation) :
try :
self._httplocation = httplocation
except Exception as e:
raise e
@property
def httpsetcookie(self) :
try :
return self._httpsetcookie
except Exception as e:
raise e
@httpsetcookie.setter
def httpsetcookie(self, httpsetcookie) :
try :
self._httpsetcookie = httpsetcookie
except Exception as e:
raise e
@property
def httpsetcookie2(self) :
try :
return self._httpsetcookie2
except Exception as e:
raise e
@httpsetcookie2.setter
def httpsetcookie2(self, httpsetcookie2) :
try :
self._httpsetcookie2 = httpsetcookie2
except Exception as e:
raise e
@property
def connectionchaining(self) :
try :
return self._connectionchaining
except Exception as e:
raise e
@connectionchaining.setter
def connectionchaining(self, connectionchaining) :
try :
self._connectionchaining = connectionchaining
except Exception as e:
raise e
@property
def httpdomain(self) :
try :
return self._httpdomain
except Exception as e:
raise e
@httpdomain.setter
def httpdomain(self, httpdomain) :
try :
self._httpdomain = httpdomain
except Exception as e:
raise e
@property
def skipcacheredirectionhttptransaction(self) :
try :
return self._skipcacheredirectionhttptransaction
except Exception as e:
raise e
@skipcacheredirectionhttptransaction.setter
def skipcacheredirectionhttptransaction(self, skipcacheredirectionhttptransaction) :
try :
self._skipcacheredirectionhttptransaction = skipcacheredirectionhttptransaction
except Exception as e:
raise e
@property
def identifiername(self) :
try :
return self._identifiername
except Exception as e:
raise e
@identifiername.setter
def identifiername(self, identifiername) :
try :
self._identifiername = identifiername
except Exception as e:
raise e
@property
def identifiersessionname(self) :
try :
return self._identifiersessionname
except Exception as e:
raise e
@identifiersessionname.setter
def identifiersessionname(self, identifiersessionname) :
try :
self._identifiersessionname = identifiersessionname
except Exception as e:
raise e
@property
def observationdomainid(self) :
try :
return self._observationdomainid
except Exception as e:
raise e
@observationdomainid.setter
def observationdomainid(self, observationdomainid) :
try :
self._observationdomainid = observationdomainid
except Exception as e:
raise e
@property
def observationdomainname(self) :
try :
return self._observationdomainname
except Exception as e:
raise e
@observationdomainname.setter
def observationdomainname(self, observationdomainname) :
try :
self._observationdomainname = observationdomainname
except Exception as e:
raise e
@property
def subscriberawareness(self) :
try :
return self._subscriberawareness
except Exception as e:
raise e
@subscriberawareness.setter
def subscriberawareness(self, subscriberawareness) :
try :
self._subscriberawareness = subscriberawareness
except Exception as e:
raise e
@property
def subscriberidobfuscation(self) :
try :
return self._subscriberidobfuscation
except Exception as e:
raise e
@subscriberidobfuscation.setter
def subscriberidobfuscation(self, subscriberidobfuscation) :
try :
self._subscriberidobfuscation = subscriberidobfuscation
except Exception as e:
raise e
@property
def subscriberidobfuscationalgo(self) :
try :
return self._subscriberidobfuscationalgo
except Exception as e:
raise e
@subscriberidobfuscationalgo.setter
def subscriberidobfuscationalgo(self, subscriberidobfuscationalgo) :
try :
self._subscriberidobfuscationalgo = subscriberidobfuscationalgo
except Exception as e:
raise e
@property
def gxsessionreporting(self) :
try :
return self._gxsessionreporting
except Exception as e:
raise e
@gxsessionreporting.setter
def gxsessionreporting(self, gxsessionreporting) :
try :
self._gxsessionreporting = gxsessionreporting
except Exception as e:
raise e
@property
def securityinsighttraffic(self) :
try :
return self._securityinsighttraffic
except Exception as e:
raise e
@securityinsighttraffic.setter
def securityinsighttraffic(self, securityinsighttraffic) :
try :
self._securityinsighttraffic = securityinsighttraffic
except Exception as e:
raise e
@property
def cacheinsight(self) :
try :
return self._cacheinsight
except Exception as e:
raise e
@cacheinsight.setter
def cacheinsight(self, cacheinsight) :
try :
self._cacheinsight = cacheinsight
except Exception as e:
raise e
@property
def videoinsight(self) :
try :
return self._videoinsight
except Exception as e:
raise e
@videoinsight.setter
def videoinsight(self, videoinsight) :
try :
self._videoinsight = videoinsight
except Exception as e:
raise e
@property
def httpquerywithurl(self) :
try :
return self._httpquerywithurl
except Exception as e:
raise e
@httpquerywithurl.setter
def httpquerywithurl(self, httpquerywithurl) :
try :
self._httpquerywithurl = httpquerywithurl
except Exception as e:
raise e
@property
def urlcategory(self) :
try :
return self._urlcategory
except Exception as e:
raise e
@urlcategory.setter
def urlcategory(self, urlcategory) :
try :
self._urlcategory = urlcategory
except Exception as e:
raise e
@property
def lsnlogging(self) :
try :
return self._lsnlogging
except Exception as e:
raise e
@lsnlogging.setter
def lsnlogging(self, lsnlogging) :
try :
self._lsnlogging = lsnlogging
except Exception as e:
raise e
@property
def cqareporting(self) :
try :
return self._cqareporting
except Exception as e:
raise e
@cqareporting.setter
def cqareporting(self, cqareporting) :
try :
self._cqareporting = cqareporting
except Exception as e:
raise e
@property
def emailaddress(self) :
try :
return self._emailaddress
except Exception as e:
raise e
@emailaddress.setter
def emailaddress(self, emailaddress) :
try :
self._emailaddress = emailaddress
except Exception as e:
raise e
@property
def usagerecordinterval(self) :
try :
return self._usagerecordinterval
except Exception as e:
raise e
@usagerecordinterval.setter
def usagerecordinterval(self, usagerecordinterval) :
try :
self._usagerecordinterval = usagerecordinterval
except Exception as e:
raise e
@property
def websaasappusagereporting(self) :
try :
return self._websaasappusagereporting
except Exception as e:
raise e
@websaasappusagereporting.setter
def websaasappusagereporting(self, websaasappusagereporting) :
try :
self._websaasappusagereporting = websaasappusagereporting
except Exception as e:
raise e
@property
def metrics(self) :
try :
return self._metrics
except Exception as e:
raise e
@metrics.setter
def metrics(self, metrics) :
try :
self._metrics = metrics
except Exception as e:
raise e
@property
def events(self) :
try :
return self._events
except Exception as e:
raise e
@events.setter
def events(self, events) :
try :
self._events = events
except Exception as e:
raise e
@property
def auditlogs(self) :
try :
return self._auditlogs
except Exception as e:
raise e
@auditlogs.setter
def auditlogs(self, auditlogs) :
try :
self._auditlogs = auditlogs
except Exception as e:
raise e
@property
def observationpointid(self) :
try :
return self._observationpointid
except Exception as e:
raise e
@observationpointid.setter
def observationpointid(self, observationpointid) :
try :
self._observationpointid = observationpointid
except Exception as e:
raise e
@property
def distributedtracing(self) :
try :
return self._distributedtracing
except Exception as e:
raise e
@distributedtracing.setter
def distributedtracing(self, distributedtracing) :
try :
self._distributedtracing = distributedtracing
except Exception as e:
raise e
@property
def disttracingsamplingrate(self) :
try :
return self._disttracingsamplingrate
except Exception as e:
raise e
@disttracingsamplingrate.setter
def disttracingsamplingrate(self, disttracingsamplingrate) :
try :
self._disttracingsamplingrate = disttracingsamplingrate
except Exception as e:
raise e
@property
def tcpattackcounterinterval(self) :
try :
return self._tcpattackcounterinterval
except Exception as e:
raise e
@tcpattackcounterinterval.setter
def tcpattackcounterinterval(self, tcpattackcounterinterval) :
try :
self._tcpattackcounterinterval = tcpattackcounterinterval
except Exception as e:
raise e
@property
def logstreamovernsip(self) :
try :
return self._logstreamovernsip
except Exception as e:
raise e
@logstreamovernsip.setter
def logstreamovernsip(self, logstreamovernsip) :
try :
self._logstreamovernsip = logstreamovernsip
except Exception as e:
raise e
@property
def analyticsauthtoken(self) :
try :
return self._analyticsauthtoken
except Exception as e:
raise e
@analyticsauthtoken.setter
def analyticsauthtoken(self, analyticsauthtoken) :
try :
self._analyticsauthtoken = analyticsauthtoken
except Exception as e:
raise e
@property
def timeseriesovernsip(self) :
try :
return self._timeseriesovernsip
except Exception as e:
raise e
@timeseriesovernsip.setter
def timeseriesovernsip(self, timeseriesovernsip) :
try :
self._timeseriesovernsip = timeseriesovernsip
except Exception as e:
raise e
@property
def builtin(self) :
try :
return self._builtin
except Exception as e:
raise e
@property
def feature(self) :
try :
return self._feature
except Exception as e:
raise e
@property
def tcpburstreporting(self) :
try :
return self._tcpburstreporting
except Exception as e:
raise e
@property
def tcpburstreportingthreshold(self) :
try :
return self._tcpburstreportingthreshold
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
try :
result = service.payload_formatter.string_to_resource(appflowparam_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.appflowparam
except Exception as e :
raise e
def _get_object_name(self) :
try :
return 0
except Exception as e :
raise e
@classmethod
def filter_update_parameters(cls, resource) :
updateresource = appflowparam()
updateresource.templaterefresh = resource.templaterefresh
updateresource.appnamerefresh = resource.appnamerefresh
updateresource.flowrecordinterval = resource.flowrecordinterval
updateresource.securityinsightrecordinterval = resource.securityinsightrecordinterval
updateresource.udppmtu = resource.udppmtu
updateresource.httpurl = resource.httpurl
updateresource.aaausername = resource.aaausername
updateresource.httpcookie = resource.httpcookie
updateresource.httpreferer = resource.httpreferer
updateresource.httpmethod = resource.httpmethod
updateresource.httphost = resource.httphost
updateresource.httpuseragent = resource.httpuseragent
updateresource.clienttrafficonly = resource.clienttrafficonly
updateresource.httpcontenttype = resource.httpcontenttype
updateresource.httpauthorization = resource.httpauthorization
updateresource.httpvia = resource.httpvia
updateresource.httpxforwardedfor = resource.httpxforwardedfor
updateresource.httplocation = resource.httplocation
updateresource.httpsetcookie = resource.httpsetcookie
updateresource.httpsetcookie2 = resource.httpsetcookie2
updateresource.connectionchaining = resource.connectionchaining
updateresource.httpdomain = resource.httpdomain
updateresource.skipcacheredirectionhttptransaction = resource.skipcacheredirectionhttptransaction
updateresource.identifiername = resource.identifiername
updateresource.identifiersessionname = resource.identifiersessionname
updateresource.observationdomainid = resource.observationdomainid
updateresource.observationdomainname = resource.observationdomainname
updateresource.subscriberawareness = resource.subscriberawareness
updateresource.subscriberidobfuscation = resource.subscriberidobfuscation
updateresource.subscriberidobfuscationalgo = resource.subscriberidobfuscationalgo
updateresource.gxsessionreporting = resource.gxsessionreporting
updateresource.securityinsighttraffic = resource.securityinsighttraffic
updateresource.cacheinsight = resource.cacheinsight
updateresource.videoinsight = resource.videoinsight
updateresource.httpquerywithurl = resource.httpquerywithurl
updateresource.urlcategory = resource.urlcategory
updateresource.lsnlogging = resource.lsnlogging
updateresource.cqareporting = resource.cqareporting
updateresource.emailaddress = resource.emailaddress
updateresource.usagerecordinterval = resource.usagerecordinterval
updateresource.websaasappusagereporting = resource.websaasappusagereporting
updateresource.metrics = resource.metrics
updateresource.events = resource.events
updateresource.auditlogs = resource.auditlogs
updateresource.observationpointid = resource.observationpointid
updateresource.distributedtracing = resource.distributedtracing
updateresource.disttracingsamplingrate = resource.disttracingsamplingrate
updateresource.tcpattackcounterinterval = resource.tcpattackcounterinterval
updateresource.logstreamovernsip = resource.logstreamovernsip
updateresource.analyticsauthtoken = resource.analyticsauthtoken
updateresource.timeseriesovernsip = resource.timeseriesovernsip
return updateresource
@classmethod
def update(cls, client, resource) :
try :
if type(resource) is not list :
updateresource = cls.filter_update_parameters(resource)
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
try :
if type(resource) is not list :
unsetresource = appflowparam()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
try :
if not name :
obj = appflowparam()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class Httpreferer:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Logstreamovernsip:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Websaasappusagereporting:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Cqareporting:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Feature:
WL = "WL"
WebLogging = "WebLogging"
SP = "SP"
SurgeProtection = "SurgeProtection"
LB = "LB"
LoadBalancing = "LoadBalancing"
CS = "CS"
ContentSwitching = "ContentSwitching"
CR = "CR"
CacheRedirection = "CacheRedirection"
SC = "SC"
SureConnect = "SureConnect"
CMP = "CMP"
CMPcntl = "CMPcntl"
CompressionControl = "CompressionControl"
PQ = "PQ"
PriorityQueuing = "PriorityQueuing"
HDOSP = "HDOSP"
HttpDoSProtection = "HttpDoSProtection"
SSLVPN = "SSLVPN"
AAA = "AAA"
GSLB = "GSLB"
GlobalServerLoadBalancing = "GlobalServerLoadBalancing"
SSL = "SSL"
SSLOffload = "SSLOffload"
SSLOffloading = "SSLOffloading"
CF = "CF"
ContentFiltering = "ContentFiltering"
IC = "IC"
IntegratedCaching = "IntegratedCaching"
OSPF = "OSPF"
OSPFRouting = "OSPFRouting"
RIP = "RIP"
RIPRouting = "RIPRouting"
BGP = "BGP"
BGPRouting = "BGPRouting"
REWRITE = "REWRITE"
IPv6PT = "IPv6PT"
IPv6protocoltranslation = "IPv6protocoltranslation"
AppFw = "AppFw"
ApplicationFirewall = "ApplicationFirewall"
RESPONDER = "RESPONDER"
HTMLInjection = "HTMLInjection"
push = "push"
NSPush = "NSPush"
NetScalerPush = "NetScalerPush"
AppFlow = "AppFlow"
CloudBridge = "CloudBridge"
ISIS = "ISIS"
ISISRouting = "ISISRouting"
CH = "CH"
CallHome = "CallHome"
AppQoE = "AppQoE"
ContentAccelerator = "ContentAccelerator"
SYSTEM = "SYSTEM"
RISE = "RISE"
FEO = "FEO"
LSN = "LSN"
LargeScaleNAT = "LargeScaleNAT"
RDPProxy = "RDPProxy"
Rep = "Rep"
Reputation = "Reputation"
URLFiltering = "URLFiltering"
VideoOptimization = "VideoOptimization"
ForwardProxy = "ForwardProxy"
SSLInterception = "SSLInterception"
AdaptiveTCP = "AdaptiveTCP"
CQA = "CQA"
CI = "CI"
ContentInspection = "ContentInspection"
Bot = "Bot"
APIGateway = "APIGateway"
class Httpsetcookie:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpvia:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Gxsessionreporting:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpdomain:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Videoinsight:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpsetcookie2:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Auditlogs:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Builtin:
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
PARTITION_ALL = "PARTITION_ALL"
class Httpquerywithurl:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpauthorization:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Urlcategory:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Skipcacheredirectionhttptransaction:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Aaausername:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Lsnlogging:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Clienttrafficonly:
YES = "YES"
NO = "NO"
class Securityinsighttraffic:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpcontenttype:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Cacheinsight:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Timeseriesovernsip:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Emailaddress:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpmethod:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Subscriberawareness:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httplocation:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Distributedtracing:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Identifiersessionname:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Tcpburstreporting:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Metrics:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Connectionchaining:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Subscriberidobfuscation:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpxforwardedfor:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Subscriberidobfuscationalgo:
MD5 = "MD5"
SHA256 = "SHA256"
class Identifiername:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpcookie:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpurl:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Events:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpuseragent:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httphost:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class appflowparam_response(base_response) :
def __init__(self, length=1) :
self.appflowparam = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.appflowparam = [appflowparam() for _ in range(length)]
| true | true |
f73568bb9aceccf782577788600b4d031e9caf26 | 2,459 | py | Python | kanban/lib/python2.7/site-packages/django/utils/version.py | ecrespo/django_kanban-agile | 2601410d911d8822eb485e761e150dfcefaf0655 | [
"MIT"
] | 39 | 2016-12-05T14:36:37.000Z | 2021-07-29T18:22:34.000Z | kanban/lib/python2.7/site-packages/django/utils/version.py | ecrespo/django_kanban-agile | 2601410d911d8822eb485e761e150dfcefaf0655 | [
"MIT"
] | 68 | 2016-12-12T20:38:47.000Z | 2020-07-26T18:28:49.000Z | kanban/lib/python2.7/site-packages/django/utils/version.py | ecrespo/django_kanban-agile | 2601410d911d8822eb485e761e150dfcefaf0655 | [
"MIT"
] | 120 | 2016-08-18T14:53:03.000Z | 2020-06-16T13:27:20.000Z | from __future__ import unicode_literals
import datetime
import os
import subprocess
from django.utils.lru_cache import lru_cache
def get_version(version=None):
"Returns a PEP 386-compliant version number from VERSION."
version = get_complete_version(version)
# Now build the two parts of the version number:
# major = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
major = get_major_version(version)
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return str(major + sub)
def get_major_version(version=None):
"Returns major version from VERSION."
version = get_complete_version(version)
parts = 2 if version[2] == 0 else 3
major = '.'.join(str(x) for x in version[:parts])
return major
def get_complete_version(version=None):
"""Returns a tuple of the django version. If version argument is non-empty,
then checks for correctness of the tuple provided.
"""
if version is None:
from django import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
return version
def get_docs_version(version=None):
version = get_complete_version(version)
if version[3] != 'final':
return 'dev'
else:
return '%d.%d' % version[:2]
@lru_cache()
def get_git_changeset():
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
| 30.358025 | 79 | 0.660838 | from __future__ import unicode_literals
import datetime
import os
import subprocess
from django.utils.lru_cache import lru_cache
def get_version(version=None):
version = get_complete_version(version)
major = get_major_version(version)
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return str(major + sub)
def get_major_version(version=None):
version = get_complete_version(version)
parts = 2 if version[2] == 0 else 3
major = '.'.join(str(x) for x in version[:parts])
return major
def get_complete_version(version=None):
if version is None:
from django import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
return version
def get_docs_version(version=None):
version = get_complete_version(version)
if version[3] != 'final':
return 'dev'
else:
return '%d.%d' % version[:2]
@lru_cache()
def get_git_changeset():
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
| true | true |
f7356913d6ee4f01834b90c06eeafd81bc4cc467 | 589 | py | Python | Aniyom Ebenezer/phase 1/python 2 basis/Day27_Challenge_Solution/Ques 3 Sol.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 6 | 2020-05-23T19:53:25.000Z | 2021-05-08T20:21:30.000Z | Aniyom Ebenezer/phase 1/python 2 basis/Day27_Challenge_Solution/Ques 3 Sol.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 8 | 2020-05-14T18:53:12.000Z | 2020-07-03T00:06:20.000Z | Aniyom Ebenezer/phase 1/python 2 basis/Day27_Challenge_Solution/Ques 3 Sol.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 39 | 2020-05-10T20:55:02.000Z | 2020-09-12T17:40:59.000Z | """
Write a Python program to check whether two given lines are parallel or not.
Note: Parallel lines are two or more lines that never intersect. Parallel Lines are like railroad tracks that never intersect.
The General Form of the equation of a straight line is: ax + by = c
The said straight line is represented in a list as [a, b, c]
Example of two parallel lines:
x + 4y = 10 and x + 4y = 14
Sample Output:
True
False
"""
def parallel_lines(line1, line2):
return line1[0]/line1[1] == line2[0]/line2[1]
print(parallel_lines([2,3,4], [2,3,8]))
print(parallel_lines([2,3,4], [4,-3,8])) | 36.8125 | 126 | 0.721562 | def parallel_lines(line1, line2):
return line1[0]/line1[1] == line2[0]/line2[1]
print(parallel_lines([2,3,4], [2,3,8]))
print(parallel_lines([2,3,4], [4,-3,8])) | true | true |
f73569d59f547d8ce45674644ef17e5f76dc2e6f | 7,924 | py | Python | readthedocs/search/utils.py | ank-forked/readthedocs.org | e4110e8db5d25b7e6c699dd2df1a580b04ee8d16 | [
"MIT"
] | 1 | 2019-01-05T09:49:52.000Z | 2019-01-05T09:49:52.000Z | readthedocs/search/utils.py | himynamesdave/readthedocs.org | 38e73cd73efb76461d28a5d9737731b7d7349297 | [
"MIT"
] | 4 | 2021-02-08T21:06:49.000Z | 2021-12-13T20:51:17.000Z | readthedocs/search/utils.py | himynamesdave/readthedocs.org | 38e73cd73efb76461d28a5d9737731b7d7349297 | [
"MIT"
] | 3 | 2016-08-04T12:53:13.000Z | 2016-11-02T14:17:55.000Z | # -*- coding: utf-8 -*-
import os
import fnmatch
import re
import codecs
import logging
import json
from pyquery import PyQuery
log = logging.getLogger(__name__)
def process_mkdocs_json(version, build_dir=True):
if build_dir:
full_path = version.project.full_json_path(version.slug)
else:
full_path = version.project.get_production_media_path(
type_='json', version_slug=version.slug, include_file=False)
html_files = []
for root, dirs, files in os.walk(full_path):
for filename in fnmatch.filter(files, '*.json'):
html_files.append(os.path.join(root, filename))
page_list = []
for filename in html_files:
relative_path = parse_path_from_file(documentation_type='mkdocs', file_path=filename)
html = parse_content_from_file(documentation_type='mkdocs', file_path=filename)
headers = parse_headers_from_file(documentation_type='mkdocs', file_path=filename)
sections = parse_sections_from_file(documentation_type='mkdocs', file_path=filename)
try:
title = sections[0]['title']
except IndexError:
title = relative_path
page_list.append({
'content': html,
'path': relative_path,
'title': title,
'headers': headers,
'sections': sections,
})
return page_list
def recurse_while_none(element):
if element.text is None:
return recurse_while_none(element.getchildren()[0])
else:
return element.text
def parse_path_from_file(documentation_type, file_path):
try:
with codecs.open(file_path, encoding='utf-8', mode='r') as f:
content = f.read()
except IOError as e:
log.info('(Search Index) Unable to index file: %s, error :%s' % (file_path, e))
return ''
page_json = json.loads(content)
path = page_json['url']
# The URLs here should be of the form "path/index". So we need to
# convert:
# "path/" => "path/index"
# "path/index.html" => "path/index"
# "/path/index" => "path/index"
path = re.sub('/$', '/index', path)
path = re.sub('\.html$', '', path)
path = re.sub('^/', '', path)
return path
def parse_content_from_file(documentation_type, file_path):
try:
with codecs.open(file_path, encoding='utf-8', mode='r') as f:
content = f.read()
except IOError as e:
log.info('(Search Index) Unable to index file: %s, error :%s' % (file_path, e))
return ''
page_json = json.loads(content)
page_content = page_json['content']
content = parse_content(documentation_type, page_content)
if not content:
log.info('(Search Index) Unable to index file: %s, empty file' % (file_path))
else:
log.debug('(Search Index) %s length: %s' % (file_path, len(content)))
return content
def parse_content(documentation_type, content):
"""
Prepare the text of the html file.
Returns the body text of a document
"""
try:
to_index = PyQuery(content).text()
except ValueError:
return ''
return to_index
def parse_headers_from_file(documentation_type, file_path):
log.debug('(Search Index) Parsing headers for %s' % (file_path))
try:
with codecs.open(file_path, encoding='utf-8', mode='r') as f:
content = f.read()
except IOError as e:
log.info('(Search Index) Unable to index file: %s, error :%s' % (file_path, e))
return ''
page_json = json.loads(content)
page_content = page_json['content']
headers = parse_headers(documentation_type, page_content)
if not headers:
log.error('Unable to index file headers for: %s' % file_path)
return headers
def parse_headers(documentation_type, content):
headers = []
if documentation_type == 'mkdocs':
for element in PyQuery(content)('h2'):
headers.append(recurse_while_none(element))
return headers
def parse_sections_from_file(documentation_type, file_path):
log.debug('(Search Index) Parsing sections for %s' % (file_path))
try:
with codecs.open(file_path, encoding='utf-8', mode='r') as f:
content = f.read()
except IOError as e:
log.info('(Search Index) Unable to index file: %s, error :%s' % (file_path, e))
return ''
page_json = json.loads(content)
page_content = page_json['content']
sections = parse_sections(documentation_type, page_content)
if not sections:
log.error('Unable to index file sections for: %s' % file_path)
return sections
def parse_sections(documentation_type, content):
sections = []
if 'sphinx' in documentation_type:
body = PyQuery(content)
h1_section = body('.section > h1')
if h1_section:
div = h1_section.parent()
h1_title = h1_section.text().replace(u'¶', '').strip()
h1_id = div.attr('id')
h1_content = ""
next_p = body('h1').next()
while next_p:
if next_p[0].tag == 'div' and 'class' in next_p[0].attrib:
if 'section' in next_p[0].attrib['class']:
break
h1_content += "\n%s\n" % next_p.html()
next_p = next_p.next()
if h1_content:
sections.append({
'id': h1_id,
'title': h1_title,
'content': h1_content,
})
# Capture text inside h2's
section_list = body('.section > h2')
for num in range(len(section_list)):
div = section_list.eq(num).parent()
header = section_list.eq(num)
title = header.text().replace(u'¶', '').strip()
section_id = div.attr('id')
content = div.html()
sections.append({
'id': section_id,
'title': title,
'content': content,
})
log.debug("(Search Index) Section [%s:%s]: %s" % (section_id, title, content))
if 'mkdocs' in documentation_type:
try:
body = PyQuery(content)
except ValueError:
return ''
try:
# H1 content
h1 = body('h1')
h1_id = h1.attr('id')
h1_title = h1.text().strip()
h1_content = ""
next_p = body('h1').next()
while next_p:
if next_p[0].tag == 'h2':
break
h1_html = next_p.html()
if h1_html:
h1_content += "\n%s\n" % h1_html
next_p = next_p.next()
if h1_content:
sections.append({
'id': h1_id,
'title': h1_title,
'content': h1_content,
})
# H2 content
section_list = body('h2')
for num in range(len(section_list)):
h2 = section_list.eq(num)
h2_title = h2.text().strip()
section_id = h2.attr('id')
h2_content = ""
next_p = body('h2').next()
while next_p:
if next_p[0].tag == 'h2':
break
h2_html = next_p.html()
if h2_html:
h2_content += "\n%s\n" % h2_html
next_p = next_p.next()
if h2_content:
sections.append({
'id': section_id,
'title': h2_title,
'content': h2_content,
})
log.debug("(Search Index) Section [%s:%s]: %s" % (section_id, h2_title, h2_content))
except:
log.error('Failed indexing', exc_info=True)
return sections
| 32.743802 | 100 | 0.551111 |
import os
import fnmatch
import re
import codecs
import logging
import json
from pyquery import PyQuery
log = logging.getLogger(__name__)
def process_mkdocs_json(version, build_dir=True):
if build_dir:
full_path = version.project.full_json_path(version.slug)
else:
full_path = version.project.get_production_media_path(
type_='json', version_slug=version.slug, include_file=False)
html_files = []
for root, dirs, files in os.walk(full_path):
for filename in fnmatch.filter(files, '*.json'):
html_files.append(os.path.join(root, filename))
page_list = []
for filename in html_files:
relative_path = parse_path_from_file(documentation_type='mkdocs', file_path=filename)
html = parse_content_from_file(documentation_type='mkdocs', file_path=filename)
headers = parse_headers_from_file(documentation_type='mkdocs', file_path=filename)
sections = parse_sections_from_file(documentation_type='mkdocs', file_path=filename)
try:
title = sections[0]['title']
except IndexError:
title = relative_path
page_list.append({
'content': html,
'path': relative_path,
'title': title,
'headers': headers,
'sections': sections,
})
return page_list
def recurse_while_none(element):
if element.text is None:
return recurse_while_none(element.getchildren()[0])
else:
return element.text
def parse_path_from_file(documentation_type, file_path):
try:
with codecs.open(file_path, encoding='utf-8', mode='r') as f:
content = f.read()
except IOError as e:
log.info('(Search Index) Unable to index file: %s, error :%s' % (file_path, e))
return ''
page_json = json.loads(content)
path = page_json['url']
path = re.sub('/$', '/index', path)
path = re.sub('\.html$', '', path)
path = re.sub('^/', '', path)
return path
def parse_content_from_file(documentation_type, file_path):
try:
with codecs.open(file_path, encoding='utf-8', mode='r') as f:
content = f.read()
except IOError as e:
log.info('(Search Index) Unable to index file: %s, error :%s' % (file_path, e))
return ''
page_json = json.loads(content)
page_content = page_json['content']
content = parse_content(documentation_type, page_content)
if not content:
log.info('(Search Index) Unable to index file: %s, empty file' % (file_path))
else:
log.debug('(Search Index) %s length: %s' % (file_path, len(content)))
return content
def parse_content(documentation_type, content):
try:
to_index = PyQuery(content).text()
except ValueError:
return ''
return to_index
def parse_headers_from_file(documentation_type, file_path):
log.debug('(Search Index) Parsing headers for %s' % (file_path))
try:
with codecs.open(file_path, encoding='utf-8', mode='r') as f:
content = f.read()
except IOError as e:
log.info('(Search Index) Unable to index file: %s, error :%s' % (file_path, e))
return ''
page_json = json.loads(content)
page_content = page_json['content']
headers = parse_headers(documentation_type, page_content)
if not headers:
log.error('Unable to index file headers for: %s' % file_path)
return headers
def parse_headers(documentation_type, content):
headers = []
if documentation_type == 'mkdocs':
for element in PyQuery(content)('h2'):
headers.append(recurse_while_none(element))
return headers
def parse_sections_from_file(documentation_type, file_path):
log.debug('(Search Index) Parsing sections for %s' % (file_path))
try:
with codecs.open(file_path, encoding='utf-8', mode='r') as f:
content = f.read()
except IOError as e:
log.info('(Search Index) Unable to index file: %s, error :%s' % (file_path, e))
return ''
page_json = json.loads(content)
page_content = page_json['content']
sections = parse_sections(documentation_type, page_content)
if not sections:
log.error('Unable to index file sections for: %s' % file_path)
return sections
def parse_sections(documentation_type, content):
sections = []
if 'sphinx' in documentation_type:
body = PyQuery(content)
h1_section = body('.section > h1')
if h1_section:
div = h1_section.parent()
h1_title = h1_section.text().replace(u'¶', '').strip()
h1_id = div.attr('id')
h1_content = ""
next_p = body('h1').next()
while next_p:
if next_p[0].tag == 'div' and 'class' in next_p[0].attrib:
if 'section' in next_p[0].attrib['class']:
break
h1_content += "\n%s\n" % next_p.html()
next_p = next_p.next()
if h1_content:
sections.append({
'id': h1_id,
'title': h1_title,
'content': h1_content,
})
section_list = body('.section > h2')
for num in range(len(section_list)):
div = section_list.eq(num).parent()
header = section_list.eq(num)
title = header.text().replace(u'¶', '').strip()
section_id = div.attr('id')
content = div.html()
sections.append({
'id': section_id,
'title': title,
'content': content,
})
log.debug("(Search Index) Section [%s:%s]: %s" % (section_id, title, content))
if 'mkdocs' in documentation_type:
try:
body = PyQuery(content)
except ValueError:
return ''
try:
# H1 content
h1 = body('h1')
h1_id = h1.attr('id')
h1_title = h1.text().strip()
h1_content = ""
next_p = body('h1').next()
while next_p:
if next_p[0].tag == 'h2':
break
h1_html = next_p.html()
if h1_html:
h1_content += "\n%s\n" % h1_html
next_p = next_p.next()
if h1_content:
sections.append({
'id': h1_id,
'title': h1_title,
'content': h1_content,
})
# H2 content
section_list = body('h2')
for num in range(len(section_list)):
h2 = section_list.eq(num)
h2_title = h2.text().strip()
section_id = h2.attr('id')
h2_content = ""
next_p = body('h2').next()
while next_p:
if next_p[0].tag == 'h2':
break
h2_html = next_p.html()
if h2_html:
h2_content += "\n%s\n" % h2_html
next_p = next_p.next()
if h2_content:
sections.append({
'id': section_id,
'title': h2_title,
'content': h2_content,
})
log.debug("(Search Index) Section [%s:%s]: %s" % (section_id, h2_title, h2_content))
except:
log.error('Failed indexing', exc_info=True)
return sections
| true | true |
f7356b3bba32f2fcd3678ee11ab6863786e3b137 | 4,511 | py | Python | fastestimator/backend/_iwd.py | DwijayDS/fastestimator | 9b288cb2bd870f971ec4cee09d0b3205e1316a94 | [
"Apache-2.0"
] | null | null | null | fastestimator/backend/_iwd.py | DwijayDS/fastestimator | 9b288cb2bd870f971ec4cee09d0b3205e1316a94 | [
"Apache-2.0"
] | null | null | null | fastestimator/backend/_iwd.py | DwijayDS/fastestimator | 9b288cb2bd870f971ec4cee09d0b3205e1316a94 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import math
from typing import Optional, TypeVar
import numpy as np
import tensorflow as tf
import torch
from fastestimator.backend._maximum import maximum
from fastestimator.backend._reduce_sum import reduce_sum
from fastestimator.backend._reshape import reshape
from fastestimator.backend._tensor_pow import tensor_pow
from fastestimator.backend._to_tensor import to_tensor
from fastestimator.util.util import TENSOR_TO_NP_DTYPE
Tensor = TypeVar('Tensor', tf.Tensor, torch.Tensor, np.ndarray)
def iwd(tensor: Tensor,
power: float = 1.0,
max_prob: float = 0.95,
pairwise_distance: float = 1.0,
eps: Optional[Tensor] = None) -> Tensor:
"""Compute the Inverse Weighted Distance from the given input.
This can be used as an activation function for the final layer of a neural network instead of softmax. For example,
instead of: model.add(layers.Dense(classes, activation='softmax')), you could use:
model.add(layers.Dense(classes, activation=lambda x: iwd(tf.nn.sigmoid(x))))
This method can be used with Numpy data:
```python
n = np.array([[0.5]*5, [0]+[1]*4])
b = fe.backend.iwd(n) # [[0.2, 0.2, 0.2, 0.2, 0.2], [0.95, 0.0125, 0.0125, 0.0125, 0.0125]]
```
This method can be used with TensorFlow tensors:
```python
t = tf.constant([[0.5]*5, [0]+[1]*4])
b = fe.backend.iwd(n) # [[0.2, 0.2, 0.2, 0.2, 0.2], [0.95, 0.0125, 0.0125, 0.0125, 0.0125]]
```
This method can be used with PyTorch tensors:
```python
p = torch.tensor([[0.5]*5, [0]+[1]*4])
b = fe.backend.iwd(n) # [[0.2, 0.2, 0.2, 0.2, 0.2], [0.95, 0.0125, 0.0125, 0.0125, 0.0125]]
```
Args:
tensor: The input value. Should be of shape (Batch, C) where every element in C corresponds to a (non-negative)
distance to a target class.
power: The power to raise the inverse distances to. 1.0 results in a fairly intuitive probability output. Larger
powers can widen regions of certainty, whereas values between 0 and 1 can widen regions of uncertainty.
max_prob: The maximum probability to assign to a class estimate when it is distance zero away from the target.
For numerical stability this must be less than 1.0. We have found that using smaller values like 0.95 can
lead to natural adversarial robustness.
pairwise_distance: The distance to any other class when the distance to a target class is zero. For example, if
you have a perfect match for class 'a', what distance should be reported to class 'b'. If you have a metric
where this isn't constant, just use an approximate expected distance. In that case `max_prob` will only give
you approximate control over the true maximum probability.
eps: The numeric stability constant to be used when d approaches zero. If None then it will be computed using
`max_prob` and `pairwise_distance`. If not None, then `max_prob` and `pairwise_distance` will be ignored.
Returns:
A probability distribution of shape (Batch, C) where smaller distances from `tensor` correspond to larger
probabilities.
"""
if eps is None:
eps = np.array(pairwise_distance * math.pow((1.0 - max_prob) / (max_prob * (tensor.shape[-1] - 1)), 1 / power),
dtype=TENSOR_TO_NP_DTYPE[tensor.dtype])
eps = to_tensor(
eps, target_type='torch' if isinstance(tensor, torch.Tensor) else 'tf' if tf.is_tensor(tensor) else 'np')
if isinstance(eps, torch.Tensor):
eps = eps.to("cuda:0" if torch.cuda.is_available() else "cpu")
tensor = maximum(tensor, eps)
tensor = tensor_pow(1.0 / tensor, power)
tensor = tensor / reshape(reduce_sum(tensor, axis=-1), shape=[-1, 1])
return tensor
| 49.571429 | 120 | 0.673022 |
import math
from typing import Optional, TypeVar
import numpy as np
import tensorflow as tf
import torch
from fastestimator.backend._maximum import maximum
from fastestimator.backend._reduce_sum import reduce_sum
from fastestimator.backend._reshape import reshape
from fastestimator.backend._tensor_pow import tensor_pow
from fastestimator.backend._to_tensor import to_tensor
from fastestimator.util.util import TENSOR_TO_NP_DTYPE
Tensor = TypeVar('Tensor', tf.Tensor, torch.Tensor, np.ndarray)
def iwd(tensor: Tensor,
power: float = 1.0,
max_prob: float = 0.95,
pairwise_distance: float = 1.0,
eps: Optional[Tensor] = None) -> Tensor:
if eps is None:
eps = np.array(pairwise_distance * math.pow((1.0 - max_prob) / (max_prob * (tensor.shape[-1] - 1)), 1 / power),
dtype=TENSOR_TO_NP_DTYPE[tensor.dtype])
eps = to_tensor(
eps, target_type='torch' if isinstance(tensor, torch.Tensor) else 'tf' if tf.is_tensor(tensor) else 'np')
if isinstance(eps, torch.Tensor):
eps = eps.to("cuda:0" if torch.cuda.is_available() else "cpu")
tensor = maximum(tensor, eps)
tensor = tensor_pow(1.0 / tensor, power)
tensor = tensor / reshape(reduce_sum(tensor, axis=-1), shape=[-1, 1])
return tensor
| true | true |
f7356ec145250e7c0c4bc92e49c56d5c1c07160a | 66 | py | Python | tests/integration/test_commands.py | orotalt/kforce | f2a26dc53a8f364c9854bf32fcb59b6fb3aaa3d8 | [
"MIT"
] | 9 | 2018-02-21T00:48:43.000Z | 2020-02-15T06:21:58.000Z | tests/integration/test_commands.py | orotalt/kforce | f2a26dc53a8f364c9854bf32fcb59b6fb3aaa3d8 | [
"MIT"
] | 5 | 2018-03-02T06:19:24.000Z | 2021-03-25T21:58:51.000Z | tests/integration/test_commands.py | DomainGroupOSS/kforce | f2a26dc53a8f364c9854bf32fcb59b6fb3aaa3d8 | [
"MIT"
] | 5 | 2018-02-27T06:58:46.000Z | 2018-12-06T22:18:33.000Z | from unittest import TestCase
class TestNew(TestCase):
pass
| 11 | 29 | 0.757576 | from unittest import TestCase
class TestNew(TestCase):
pass
| true | true |
f7356fdd90f419efa0300e27fdfd55d90e10cc07 | 2,897 | py | Python | nanpy/bmp180.py | AFTC-1/Arduino-rpi | c46079f937d7e07cc0a930cc7ae278036f50a47d | [
"MIT"
] | 178 | 2015-01-03T11:56:49.000Z | 2021-12-23T14:47:55.000Z | nanpy/bmp180.py | AFTC-1/Arduino-rpi | c46079f937d7e07cc0a930cc7ae278036f50a47d | [
"MIT"
] | 88 | 2015-01-23T09:06:43.000Z | 2021-12-26T19:58:51.000Z | nanpy/bmp180.py | AFTC-1/Arduino-rpi | c46079f937d7e07cc0a930cc7ae278036f50a47d | [
"MIT"
] | 77 | 2015-02-18T17:26:11.000Z | 2021-09-28T02:47:25.000Z | from __future__ import division
import logging
from nanpy.i2c import I2C_Master
from nanpy.memo import memoized
import time
log = logging.getLogger(__name__)
def to_s16(n):
return (n + 2 ** 15) % 2 ** 16 - 2 ** 15
class Bmp180(object):
"""Control of BMP180 Digital pressure sensor (I2C)
calculation is based on Bosch datasheet."""
def __init__(self, wire, address=0x77, oss=3):
self.i2c = I2C_Master(wire)
self.address = address
self.oss = oss
def read_bytes(self, address, count):
self.i2c.send(self.address, [address])
x = self.i2c.request(self.address, count)
return x
def write_byte(self, address, data):
self.i2c.send(self.address, [address, data])
@property
@memoized
def eeprom(self):
return self.read_bytes(0xaa, 22)
def read_temperature_raw(self):
self.write_byte(0xf4, 0x2e)
time.sleep(0.005)
MSB, LSB = self.read_bytes(0xf6, 2)
UT = (MSB << 8) + LSB
return UT
def read_pressure_raw(self):
self.write_byte(0xf4, 0x34 + (self.oss << 6))
time.sleep(0.005)
MSB, LSB, XLSB = self.read_bytes(0xf6, 3)
UP = ((MSB << 16) + (LSB << 8) + XLSB) >> (8 - self.oss)
return UP
@classmethod
def calculate(cls, pressure_raw, temperature_raw, oss, eeprom):
'''
return: Pascal, Celsius
'''
UT = temperature_raw
UP = pressure_raw
def ushort(i):
return (eeprom[2 * i] << 8) + eeprom[2 * i + 1]
def short(i):
return to_s16(ushort(i))
AC1 = short(0)
AC2 = short(1)
AC3 = short(2)
AC4 = ushort(3)
AC5 = ushort(4)
AC6 = ushort(5)
B1 = short(6)
B2 = short(7)
# MB = short(8)
MC = short(9)
MD = short(10)
X1 = ((UT - AC6) * AC5) >> 15
X2 = (MC << 11) // (X1 + MD)
B5 = X1 + X2
T = (B5 + 8) >> 4
B6 = B5 - 4000
X1 = (B2 * ((B6 * B6) >> 12)) >> 11
X2 = (AC2 * B6) >> 11
X3 = X1 + X2
B3 = (((AC1 * 4 + X3) << oss) + 2) // 4
X1 = (AC3 * B6) >> 13
X2 = (B1 * ((B6 * B6) >> 12)) >> 16
X3 = ((X1 + X2) + 2) // 4
B4 = (AC4 * (X3 + 32768)) >> 15
B7 = (UP - B3) * (50000 >> oss)
p = (B7 * 2) // B4 if B7 < 0x80000000 else (B7 // B4) * 2
X1 = (p >> 8) * (p >> 8)
X1 = (X1 * 3038) >> 16
X2 = (-7357 * p) >> 16
p += (X1 + X2 + 3791) >> 4
return p, T / 10
def read(self):
'''
return: Pascal, Celsius
'''
temperature_raw = self.read_temperature_raw()
pressure_raw = self.read_pressure_raw()
return self.calculate(
pressure_raw,
temperature_raw,
self.oss,
self.eeprom,
)
| 24.550847 | 67 | 0.491543 | from __future__ import division
import logging
from nanpy.i2c import I2C_Master
from nanpy.memo import memoized
import time
log = logging.getLogger(__name__)
def to_s16(n):
return (n + 2 ** 15) % 2 ** 16 - 2 ** 15
class Bmp180(object):
def __init__(self, wire, address=0x77, oss=3):
self.i2c = I2C_Master(wire)
self.address = address
self.oss = oss
def read_bytes(self, address, count):
self.i2c.send(self.address, [address])
x = self.i2c.request(self.address, count)
return x
def write_byte(self, address, data):
self.i2c.send(self.address, [address, data])
@property
@memoized
def eeprom(self):
return self.read_bytes(0xaa, 22)
def read_temperature_raw(self):
self.write_byte(0xf4, 0x2e)
time.sleep(0.005)
MSB, LSB = self.read_bytes(0xf6, 2)
UT = (MSB << 8) + LSB
return UT
def read_pressure_raw(self):
self.write_byte(0xf4, 0x34 + (self.oss << 6))
time.sleep(0.005)
MSB, LSB, XLSB = self.read_bytes(0xf6, 3)
UP = ((MSB << 16) + (LSB << 8) + XLSB) >> (8 - self.oss)
return UP
@classmethod
def calculate(cls, pressure_raw, temperature_raw, oss, eeprom):
UT = temperature_raw
UP = pressure_raw
def ushort(i):
return (eeprom[2 * i] << 8) + eeprom[2 * i + 1]
def short(i):
return to_s16(ushort(i))
AC1 = short(0)
AC2 = short(1)
AC3 = short(2)
AC4 = ushort(3)
AC5 = ushort(4)
AC6 = ushort(5)
B1 = short(6)
B2 = short(7)
MC = short(9)
MD = short(10)
X1 = ((UT - AC6) * AC5) >> 15
X2 = (MC << 11) // (X1 + MD)
B5 = X1 + X2
T = (B5 + 8) >> 4
B6 = B5 - 4000
X1 = (B2 * ((B6 * B6) >> 12)) >> 11
X2 = (AC2 * B6) >> 11
X3 = X1 + X2
B3 = (((AC1 * 4 + X3) << oss) + 2) // 4
X1 = (AC3 * B6) >> 13
X2 = (B1 * ((B6 * B6) >> 12)) >> 16
X3 = ((X1 + X2) + 2) // 4
B4 = (AC4 * (X3 + 32768)) >> 15
B7 = (UP - B3) * (50000 >> oss)
p = (B7 * 2) // B4 if B7 < 0x80000000 else (B7 // B4) * 2
X1 = (p >> 8) * (p >> 8)
X1 = (X1 * 3038) >> 16
X2 = (-7357 * p) >> 16
p += (X1 + X2 + 3791) >> 4
return p, T / 10
def read(self):
temperature_raw = self.read_temperature_raw()
pressure_raw = self.read_pressure_raw()
return self.calculate(
pressure_raw,
temperature_raw,
self.oss,
self.eeprom,
)
| true | true |
f7357074d41b80404b727e210328e1a6fe909547 | 2,626 | py | Python | test/sconstool/clang/clang_shared_library.py | ptomulik/scons-tool-clang | c6410f96fde9a19e3964c6dd0d802d5be10a425c | [
"Unlicense"
] | 1 | 2017-02-13T02:06:08.000Z | 2017-02-13T02:06:08.000Z | test/sconstool/clang/clang_shared_library.py | ptomulik/scons-tool-clang | c6410f96fde9a19e3964c6dd0d802d5be10a425c | [
"Unlicense"
] | 5 | 2017-06-21T12:50:14.000Z | 2021-02-02T21:50:38.000Z | test/sconstool/clang/clang_shared_library.py | ptomulik/scons-tool-clang | c6410f96fde9a19e3964c6dd0d802d5be10a425c | [
"Unlicense"
] | 1 | 2018-03-24T15:55:34.000Z | 2018-03-24T15:55:34.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014-2020 by Paweł Tomulik <ptomulik@meil.pw.edu.pl>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import TestSCons
import json
_exe = TestSCons._exe
test = TestSCons.TestSCons()
if not test.where_is('clang'):
test.skip_test("Could not find 'clang', skipping test.\n")
test.file_fixture('../../../__init__.py', 'site_scons/site_tools/clang/__init__.py')
test.write('SConstruct', """\
import SCons.Environment
import json
env = SCons.Environment.Base()
platform = env['PLATFORM']
if platform == 'posix':
filename_options = ['foo.os']
libraryname = 'libfoo.so'
elif platform == 'darwin':
filename_options = ['foo.os']
libraryname = 'libfoo.dylib'
elif platform == 'win32':
filename_options = ['foo.obj','foo.os']
libraryname = 'foo.dll'
else:
test.fail_test()
info = {'filename_options': filename_options, 'libraryname': libraryname}
with open('info.json', 'wt') as f:
f.write(json.dumps(info))
""")
test.run()
test.must_exist(test.workpath('info.json'))
with open(test.workpath('info.json')) as f:
info = json.loads(f.read())
test.write('SConstruct', """\
DefaultEnvironment(tools=[])
env = Environment(tools=['clang', 'link'])
env.SharedLibrary('foo', 'foo.c')
""")
test.write('foo.c', """\
int bar() {
return 42;
}
""")
test.run()
test.must_exist_one_of([test.workpath(f) for f in info['filename_options']])
test.must_exist(test.workpath(info['libraryname']))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 28.857143 | 84 | 0.718203 |
import TestSCons
import json
_exe = TestSCons._exe
test = TestSCons.TestSCons()
if not test.where_is('clang'):
test.skip_test("Could not find 'clang', skipping test.\n")
test.file_fixture('../../../__init__.py', 'site_scons/site_tools/clang/__init__.py')
test.write('SConstruct', """\
import SCons.Environment
import json
env = SCons.Environment.Base()
platform = env['PLATFORM']
if platform == 'posix':
filename_options = ['foo.os']
libraryname = 'libfoo.so'
elif platform == 'darwin':
filename_options = ['foo.os']
libraryname = 'libfoo.dylib'
elif platform == 'win32':
filename_options = ['foo.obj','foo.os']
libraryname = 'foo.dll'
else:
test.fail_test()
info = {'filename_options': filename_options, 'libraryname': libraryname}
with open('info.json', 'wt') as f:
f.write(json.dumps(info))
""")
test.run()
test.must_exist(test.workpath('info.json'))
with open(test.workpath('info.json')) as f:
info = json.loads(f.read())
test.write('SConstruct', """\
DefaultEnvironment(tools=[])
env = Environment(tools=['clang', 'link'])
env.SharedLibrary('foo', 'foo.c')
""")
test.write('foo.c', """\
int bar() {
return 42;
}
""")
test.run()
test.must_exist_one_of([test.workpath(f) for f in info['filename_options']])
test.must_exist(test.workpath(info['libraryname']))
test.pass_test()
| true | true |
f735713a14eda3b9105cce638f1ac64b5f5e74b9 | 14,962 | py | Python | artifact_sdk/model/container/volume_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | artifact_sdk/model/container/volume_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | artifact_sdk/model/container/volume_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: volume.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from artifact_sdk.model.container import key_to_path_pb2 as artifact__sdk_dot_model_dot_container_dot_key__to__path__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='volume.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x0cvolume.proto\x12\tcontainer\x1a.artifact_sdk/model/container/key_to_path.proto\"\xfa\x04\n\x06Volume\x12\x0c\n\x04name\x18\x01 \x01(\t\x12,\n\x08hostPath\x18\x02 \x01(\x0b\x32\x1a.container.Volume.HostPath\x12,\n\x08\x65mptyDir\x18\x03 \x01(\x0b\x32\x1a.container.Volume.EmptyDir\x12(\n\x06secret\x18\x04 \x01(\x0b\x32\x18.container.Volume.Secret\x12.\n\tconfigMap\x18\x05 \x01(\x0b\x32\x1b.container.Volume.ConfigMap\x12\x46\n\x15persistentVolumeClaim\x18\x06 \x01(\x0b\x32\'.container.Volume.PersistentVolumeClaim\x1a&\n\x08HostPath\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x1a-\n\x08\x45mptyDir\x12\x0e\n\x06medium\x18\x01 \x01(\t\x12\x11\n\tsizeLimit\x18\x02 \x01(\t\x1ah\n\x06Secret\x12\x12\n\nsecretName\x18\x01 \x01(\t\x12#\n\x05items\x18\x02 \x03(\x0b\x32\x14.container.KeyToPath\x12\x13\n\x0b\x64\x65\x66\x61ultMode\x18\x03 \x01(\x05\x12\x10\n\x08optional\x18\x04 \x01(\x08\x1a\x65\n\tConfigMap\x12\x0c\n\x04name\x18\x01 \x01(\t\x12#\n\x05items\x18\x02 \x03(\x0b\x32\x14.container.KeyToPath\x12\x13\n\x0b\x64\x65\x66\x61ultMode\x18\x03 \x01(\x05\x12\x10\n\x08optional\x18\x04 \x01(\x08\x1a<\n\x15PersistentVolumeClaim\x12\x11\n\tclaimName\x18\x01 \x01(\t\x12\x10\n\x08readOnly\x18\x02 \x01(\x08\x42\x45ZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[artifact__sdk_dot_model_dot_container_dot_key__to__path__pb2.DESCRIPTOR,])
_VOLUME_HOSTPATH = _descriptor.Descriptor(
name='HostPath',
full_name='container.Volume.HostPath',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='container.Volume.HostPath.path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='container.Volume.HostPath.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=354,
serialized_end=392,
)
_VOLUME_EMPTYDIR = _descriptor.Descriptor(
name='EmptyDir',
full_name='container.Volume.EmptyDir',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='medium', full_name='container.Volume.EmptyDir.medium', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sizeLimit', full_name='container.Volume.EmptyDir.sizeLimit', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=394,
serialized_end=439,
)
_VOLUME_SECRET = _descriptor.Descriptor(
name='Secret',
full_name='container.Volume.Secret',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='secretName', full_name='container.Volume.Secret.secretName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='items', full_name='container.Volume.Secret.items', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='defaultMode', full_name='container.Volume.Secret.defaultMode', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='optional', full_name='container.Volume.Secret.optional', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=441,
serialized_end=545,
)
_VOLUME_CONFIGMAP = _descriptor.Descriptor(
name='ConfigMap',
full_name='container.Volume.ConfigMap',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='container.Volume.ConfigMap.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='items', full_name='container.Volume.ConfigMap.items', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='defaultMode', full_name='container.Volume.ConfigMap.defaultMode', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='optional', full_name='container.Volume.ConfigMap.optional', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=547,
serialized_end=648,
)
_VOLUME_PERSISTENTVOLUMECLAIM = _descriptor.Descriptor(
name='PersistentVolumeClaim',
full_name='container.Volume.PersistentVolumeClaim',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='claimName', full_name='container.Volume.PersistentVolumeClaim.claimName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='readOnly', full_name='container.Volume.PersistentVolumeClaim.readOnly', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=650,
serialized_end=710,
)
_VOLUME = _descriptor.Descriptor(
name='Volume',
full_name='container.Volume',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='container.Volume.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hostPath', full_name='container.Volume.hostPath', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='emptyDir', full_name='container.Volume.emptyDir', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='secret', full_name='container.Volume.secret', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='configMap', full_name='container.Volume.configMap', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='persistentVolumeClaim', full_name='container.Volume.persistentVolumeClaim', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_VOLUME_HOSTPATH, _VOLUME_EMPTYDIR, _VOLUME_SECRET, _VOLUME_CONFIGMAP, _VOLUME_PERSISTENTVOLUMECLAIM, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=76,
serialized_end=710,
)
_VOLUME_HOSTPATH.containing_type = _VOLUME
_VOLUME_EMPTYDIR.containing_type = _VOLUME
_VOLUME_SECRET.fields_by_name['items'].message_type = artifact__sdk_dot_model_dot_container_dot_key__to__path__pb2._KEYTOPATH
_VOLUME_SECRET.containing_type = _VOLUME
_VOLUME_CONFIGMAP.fields_by_name['items'].message_type = artifact__sdk_dot_model_dot_container_dot_key__to__path__pb2._KEYTOPATH
_VOLUME_CONFIGMAP.containing_type = _VOLUME
_VOLUME_PERSISTENTVOLUMECLAIM.containing_type = _VOLUME
_VOLUME.fields_by_name['hostPath'].message_type = _VOLUME_HOSTPATH
_VOLUME.fields_by_name['emptyDir'].message_type = _VOLUME_EMPTYDIR
_VOLUME.fields_by_name['secret'].message_type = _VOLUME_SECRET
_VOLUME.fields_by_name['configMap'].message_type = _VOLUME_CONFIGMAP
_VOLUME.fields_by_name['persistentVolumeClaim'].message_type = _VOLUME_PERSISTENTVOLUMECLAIM
DESCRIPTOR.message_types_by_name['Volume'] = _VOLUME
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Volume = _reflection.GeneratedProtocolMessageType('Volume', (_message.Message,), {
'HostPath' : _reflection.GeneratedProtocolMessageType('HostPath', (_message.Message,), {
'DESCRIPTOR' : _VOLUME_HOSTPATH,
'__module__' : 'volume_pb2'
# @@protoc_insertion_point(class_scope:container.Volume.HostPath)
})
,
'EmptyDir' : _reflection.GeneratedProtocolMessageType('EmptyDir', (_message.Message,), {
'DESCRIPTOR' : _VOLUME_EMPTYDIR,
'__module__' : 'volume_pb2'
# @@protoc_insertion_point(class_scope:container.Volume.EmptyDir)
})
,
'Secret' : _reflection.GeneratedProtocolMessageType('Secret', (_message.Message,), {
'DESCRIPTOR' : _VOLUME_SECRET,
'__module__' : 'volume_pb2'
# @@protoc_insertion_point(class_scope:container.Volume.Secret)
})
,
'ConfigMap' : _reflection.GeneratedProtocolMessageType('ConfigMap', (_message.Message,), {
'DESCRIPTOR' : _VOLUME_CONFIGMAP,
'__module__' : 'volume_pb2'
# @@protoc_insertion_point(class_scope:container.Volume.ConfigMap)
})
,
'PersistentVolumeClaim' : _reflection.GeneratedProtocolMessageType('PersistentVolumeClaim', (_message.Message,), {
'DESCRIPTOR' : _VOLUME_PERSISTENTVOLUMECLAIM,
'__module__' : 'volume_pb2'
# @@protoc_insertion_point(class_scope:container.Volume.PersistentVolumeClaim)
})
,
'DESCRIPTOR' : _VOLUME,
'__module__' : 'volume_pb2'
# @@protoc_insertion_point(class_scope:container.Volume)
})
_sym_db.RegisterMessage(Volume)
_sym_db.RegisterMessage(Volume.HostPath)
_sym_db.RegisterMessage(Volume.EmptyDir)
_sym_db.RegisterMessage(Volume.Secret)
_sym_db.RegisterMessage(Volume.ConfigMap)
_sym_db.RegisterMessage(Volume.PersistentVolumeClaim)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 40.005348 | 1,346 | 0.744352 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from artifact_sdk.model.container import key_to_path_pb2 as artifact__sdk_dot_model_dot_container_dot_key__to__path__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='volume.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x0cvolume.proto\x12\tcontainer\x1a.artifact_sdk/model/container/key_to_path.proto\"\xfa\x04\n\x06Volume\x12\x0c\n\x04name\x18\x01 \x01(\t\x12,\n\x08hostPath\x18\x02 \x01(\x0b\x32\x1a.container.Volume.HostPath\x12,\n\x08\x65mptyDir\x18\x03 \x01(\x0b\x32\x1a.container.Volume.EmptyDir\x12(\n\x06secret\x18\x04 \x01(\x0b\x32\x18.container.Volume.Secret\x12.\n\tconfigMap\x18\x05 \x01(\x0b\x32\x1b.container.Volume.ConfigMap\x12\x46\n\x15persistentVolumeClaim\x18\x06 \x01(\x0b\x32\'.container.Volume.PersistentVolumeClaim\x1a&\n\x08HostPath\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x1a-\n\x08\x45mptyDir\x12\x0e\n\x06medium\x18\x01 \x01(\t\x12\x11\n\tsizeLimit\x18\x02 \x01(\t\x1ah\n\x06Secret\x12\x12\n\nsecretName\x18\x01 \x01(\t\x12#\n\x05items\x18\x02 \x03(\x0b\x32\x14.container.KeyToPath\x12\x13\n\x0b\x64\x65\x66\x61ultMode\x18\x03 \x01(\x05\x12\x10\n\x08optional\x18\x04 \x01(\x08\x1a\x65\n\tConfigMap\x12\x0c\n\x04name\x18\x01 \x01(\t\x12#\n\x05items\x18\x02 \x03(\x0b\x32\x14.container.KeyToPath\x12\x13\n\x0b\x64\x65\x66\x61ultMode\x18\x03 \x01(\x05\x12\x10\n\x08optional\x18\x04 \x01(\x08\x1a<\n\x15PersistentVolumeClaim\x12\x11\n\tclaimName\x18\x01 \x01(\t\x12\x10\n\x08readOnly\x18\x02 \x01(\x08\x42\x45ZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[artifact__sdk_dot_model_dot_container_dot_key__to__path__pb2.DESCRIPTOR,])
_VOLUME_HOSTPATH = _descriptor.Descriptor(
name='HostPath',
full_name='container.Volume.HostPath',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='container.Volume.HostPath.path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='container.Volume.HostPath.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=354,
serialized_end=392,
)
_VOLUME_EMPTYDIR = _descriptor.Descriptor(
name='EmptyDir',
full_name='container.Volume.EmptyDir',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='medium', full_name='container.Volume.EmptyDir.medium', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sizeLimit', full_name='container.Volume.EmptyDir.sizeLimit', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=394,
serialized_end=439,
)
_VOLUME_SECRET = _descriptor.Descriptor(
name='Secret',
full_name='container.Volume.Secret',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='secretName', full_name='container.Volume.Secret.secretName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='items', full_name='container.Volume.Secret.items', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='defaultMode', full_name='container.Volume.Secret.defaultMode', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='optional', full_name='container.Volume.Secret.optional', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=441,
serialized_end=545,
)
_VOLUME_CONFIGMAP = _descriptor.Descriptor(
name='ConfigMap',
full_name='container.Volume.ConfigMap',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='container.Volume.ConfigMap.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='items', full_name='container.Volume.ConfigMap.items', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='defaultMode', full_name='container.Volume.ConfigMap.defaultMode', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='optional', full_name='container.Volume.ConfigMap.optional', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=547,
serialized_end=648,
)
_VOLUME_PERSISTENTVOLUMECLAIM = _descriptor.Descriptor(
name='PersistentVolumeClaim',
full_name='container.Volume.PersistentVolumeClaim',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='claimName', full_name='container.Volume.PersistentVolumeClaim.claimName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='readOnly', full_name='container.Volume.PersistentVolumeClaim.readOnly', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=650,
serialized_end=710,
)
_VOLUME = _descriptor.Descriptor(
name='Volume',
full_name='container.Volume',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='container.Volume.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hostPath', full_name='container.Volume.hostPath', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='emptyDir', full_name='container.Volume.emptyDir', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='secret', full_name='container.Volume.secret', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='configMap', full_name='container.Volume.configMap', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='persistentVolumeClaim', full_name='container.Volume.persistentVolumeClaim', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_VOLUME_HOSTPATH, _VOLUME_EMPTYDIR, _VOLUME_SECRET, _VOLUME_CONFIGMAP, _VOLUME_PERSISTENTVOLUMECLAIM, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=76,
serialized_end=710,
)
_VOLUME_HOSTPATH.containing_type = _VOLUME
_VOLUME_EMPTYDIR.containing_type = _VOLUME
_VOLUME_SECRET.fields_by_name['items'].message_type = artifact__sdk_dot_model_dot_container_dot_key__to__path__pb2._KEYTOPATH
_VOLUME_SECRET.containing_type = _VOLUME
_VOLUME_CONFIGMAP.fields_by_name['items'].message_type = artifact__sdk_dot_model_dot_container_dot_key__to__path__pb2._KEYTOPATH
_VOLUME_CONFIGMAP.containing_type = _VOLUME
_VOLUME_PERSISTENTVOLUMECLAIM.containing_type = _VOLUME
_VOLUME.fields_by_name['hostPath'].message_type = _VOLUME_HOSTPATH
_VOLUME.fields_by_name['emptyDir'].message_type = _VOLUME_EMPTYDIR
_VOLUME.fields_by_name['secret'].message_type = _VOLUME_SECRET
_VOLUME.fields_by_name['configMap'].message_type = _VOLUME_CONFIGMAP
_VOLUME.fields_by_name['persistentVolumeClaim'].message_type = _VOLUME_PERSISTENTVOLUMECLAIM
DESCRIPTOR.message_types_by_name['Volume'] = _VOLUME
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Volume = _reflection.GeneratedProtocolMessageType('Volume', (_message.Message,), {
'HostPath' : _reflection.GeneratedProtocolMessageType('HostPath', (_message.Message,), {
'DESCRIPTOR' : _VOLUME_HOSTPATH,
'__module__' : 'volume_pb2'
# @@protoc_insertion_point(class_scope:container.Volume.HostPath)
})
,
'EmptyDir' : _reflection.GeneratedProtocolMessageType('EmptyDir', (_message.Message,), {
'DESCRIPTOR' : _VOLUME_EMPTYDIR,
'__module__' : 'volume_pb2'
# @@protoc_insertion_point(class_scope:container.Volume.EmptyDir)
})
,
'Secret' : _reflection.GeneratedProtocolMessageType('Secret', (_message.Message,), {
'DESCRIPTOR' : _VOLUME_SECRET,
'__module__' : 'volume_pb2'
# @@protoc_insertion_point(class_scope:container.Volume.Secret)
})
,
'ConfigMap' : _reflection.GeneratedProtocolMessageType('ConfigMap', (_message.Message,), {
'DESCRIPTOR' : _VOLUME_CONFIGMAP,
'__module__' : 'volume_pb2'
# @@protoc_insertion_point(class_scope:container.Volume.ConfigMap)
})
,
'PersistentVolumeClaim' : _reflection.GeneratedProtocolMessageType('PersistentVolumeClaim', (_message.Message,), {
'DESCRIPTOR' : _VOLUME_PERSISTENTVOLUMECLAIM,
'__module__' : 'volume_pb2'
# @@protoc_insertion_point(class_scope:container.Volume.PersistentVolumeClaim)
})
,
'DESCRIPTOR' : _VOLUME,
'__module__' : 'volume_pb2'
# @@protoc_insertion_point(class_scope:container.Volume)
})
_sym_db.RegisterMessage(Volume)
_sym_db.RegisterMessage(Volume.HostPath)
_sym_db.RegisterMessage(Volume.EmptyDir)
_sym_db.RegisterMessage(Volume.Secret)
_sym_db.RegisterMessage(Volume.ConfigMap)
_sym_db.RegisterMessage(Volume.PersistentVolumeClaim)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| true | true |
f7357140642f55ff5a3b3396580fc2a5411b1546 | 3,732 | py | Python | src/spaceone/identity/info/project_group_info.py | Jeoungseungho/identity | 8fa1c8d21952fb7b313624e632d98e99e5bf0def | [
"Apache-2.0"
] | null | null | null | src/spaceone/identity/info/project_group_info.py | Jeoungseungho/identity | 8fa1c8d21952fb7b313624e632d98e99e5bf0def | [
"Apache-2.0"
] | null | null | null | src/spaceone/identity/info/project_group_info.py | Jeoungseungho/identity | 8fa1c8d21952fb7b313624e632d98e99e5bf0def | [
"Apache-2.0"
] | null | null | null | import functools
from spaceone.api.core.v1 import tag_pb2
from spaceone.api.identity.v1 import project_group_pb2
from spaceone.core.pygrpc.message_type import *
from spaceone.identity.model.project_model import Project
from spaceone.identity.model.project_group_model import ProjectGroup
from spaceone.identity.info.role_info import RoleInfo
__all__ = ['ProjectGroupInfo', 'ProjectGroupsInfo', 'ProjectGroupRoleBindingInfo', 'ProjectGroupRoleBindingsInfo',
'ProjectGroupProjectsInfo']
def ProjectGroupInfo(project_group_vo: ProjectGroup, minimal=False):
info = {
'project_group_id': project_group_vo.project_group_id,
'name': project_group_vo.name
}
if not minimal:
if project_group_vo.parent_project_group:
info.update({
'parent_project_group_info': ProjectGroupInfo(project_group_vo.parent_project_group, minimal=True)
})
info.update({
'tags': [tag_pb2.Tag(key=tag.key, value=tag.value) for tag in project_group_vo.tags],
'domain_id': project_group_vo.domain_id,
'created_by': project_group_vo.created_by,
'created_at': change_timestamp_type(project_group_vo.created_at)
})
# Temporary code for DB migration
if not project_group_vo.parent_project_group_id and project_group_vo.parent_project_group:
project_group_vo.update({'parent_project_group_id': project_group_vo.parent_project_group.project_group_id})
return project_group_pb2.ProjectGroupInfo(**info)
def ProjectGroupsInfo(project_group_vos, total_count, **kwargs):
results = list(map(functools.partial(ProjectGroupInfo, **kwargs), project_group_vos))
return project_group_pb2.ProjectGroupsInfo(results=results, total_count=total_count)
def ProjectGroupRoleBindingInfo(role_binding_vo):
info = {
'role_binding_id': role_binding_vo.role_binding_id,
'resource_type': role_binding_vo.resource_type,
'resource_id': role_binding_vo.resource_id,
'role_info': RoleInfo(role_binding_vo.role, minimal=True),
'project_group_info': ProjectGroupInfo(role_binding_vo.project_group, minimal=True),
'labels': role_binding_vo.labels,
'tags': [tag_pb2.Tag(key=tag.key, value=tag.value) for tag in role_binding_vo.tags],
'domain_id': role_binding_vo.domain_id,
'created_at': change_timestamp_type(role_binding_vo.created_at)
}
return project_group_pb2.ProjectGroupRoleBindingInfo(**info)
def ProjectGroupRoleBindingsInfo(role_binding_vos, total_count, **kwargs):
results = list(map(ProjectGroupRoleBindingInfo, role_binding_vos))
return project_group_pb2.ProjectGroupRoleBindingsInfo(results=results, total_count=total_count)
def ProjectGroupProjectInfo(project_vo: Project, minimal=False):
info = {
'project_id': project_vo.project_id,
'name': project_vo.name
}
if not minimal:
if project_vo.project_group:
info.update({
'project_group_info': ProjectGroupInfo(project_vo.project_group, minimal=True)
})
info.update({
'tags': [tag_pb2.Tag(key=tag.key, value=tag.value) for tag in project_vo.tags],
'domain_id': project_vo.domain_id,
'created_by': project_vo.created_by,
'created_at': change_timestamp_type(project_vo.created_at)
})
return project_group_pb2.ProjectGroupProjectInfo(**info)
def ProjectGroupProjectsInfo(project_vos, total_count, **kwargs):
results = list(map(functools.partial(ProjectGroupProjectInfo, **kwargs), project_vos))
return project_group_pb2.ProjectGroupProjectsInfo(results=results, total_count=total_count)
| 41.010989 | 120 | 0.733923 | import functools
from spaceone.api.core.v1 import tag_pb2
from spaceone.api.identity.v1 import project_group_pb2
from spaceone.core.pygrpc.message_type import *
from spaceone.identity.model.project_model import Project
from spaceone.identity.model.project_group_model import ProjectGroup
from spaceone.identity.info.role_info import RoleInfo
__all__ = ['ProjectGroupInfo', 'ProjectGroupsInfo', 'ProjectGroupRoleBindingInfo', 'ProjectGroupRoleBindingsInfo',
'ProjectGroupProjectsInfo']
def ProjectGroupInfo(project_group_vo: ProjectGroup, minimal=False):
info = {
'project_group_id': project_group_vo.project_group_id,
'name': project_group_vo.name
}
if not minimal:
if project_group_vo.parent_project_group:
info.update({
'parent_project_group_info': ProjectGroupInfo(project_group_vo.parent_project_group, minimal=True)
})
info.update({
'tags': [tag_pb2.Tag(key=tag.key, value=tag.value) for tag in project_group_vo.tags],
'domain_id': project_group_vo.domain_id,
'created_by': project_group_vo.created_by,
'created_at': change_timestamp_type(project_group_vo.created_at)
})
if not project_group_vo.parent_project_group_id and project_group_vo.parent_project_group:
project_group_vo.update({'parent_project_group_id': project_group_vo.parent_project_group.project_group_id})
return project_group_pb2.ProjectGroupInfo(**info)
def ProjectGroupsInfo(project_group_vos, total_count, **kwargs):
results = list(map(functools.partial(ProjectGroupInfo, **kwargs), project_group_vos))
return project_group_pb2.ProjectGroupsInfo(results=results, total_count=total_count)
def ProjectGroupRoleBindingInfo(role_binding_vo):
info = {
'role_binding_id': role_binding_vo.role_binding_id,
'resource_type': role_binding_vo.resource_type,
'resource_id': role_binding_vo.resource_id,
'role_info': RoleInfo(role_binding_vo.role, minimal=True),
'project_group_info': ProjectGroupInfo(role_binding_vo.project_group, minimal=True),
'labels': role_binding_vo.labels,
'tags': [tag_pb2.Tag(key=tag.key, value=tag.value) for tag in role_binding_vo.tags],
'domain_id': role_binding_vo.domain_id,
'created_at': change_timestamp_type(role_binding_vo.created_at)
}
return project_group_pb2.ProjectGroupRoleBindingInfo(**info)
def ProjectGroupRoleBindingsInfo(role_binding_vos, total_count, **kwargs):
results = list(map(ProjectGroupRoleBindingInfo, role_binding_vos))
return project_group_pb2.ProjectGroupRoleBindingsInfo(results=results, total_count=total_count)
def ProjectGroupProjectInfo(project_vo: Project, minimal=False):
info = {
'project_id': project_vo.project_id,
'name': project_vo.name
}
if not minimal:
if project_vo.project_group:
info.update({
'project_group_info': ProjectGroupInfo(project_vo.project_group, minimal=True)
})
info.update({
'tags': [tag_pb2.Tag(key=tag.key, value=tag.value) for tag in project_vo.tags],
'domain_id': project_vo.domain_id,
'created_by': project_vo.created_by,
'created_at': change_timestamp_type(project_vo.created_at)
})
return project_group_pb2.ProjectGroupProjectInfo(**info)
def ProjectGroupProjectsInfo(project_vos, total_count, **kwargs):
results = list(map(functools.partial(ProjectGroupProjectInfo, **kwargs), project_vos))
return project_group_pb2.ProjectGroupProjectsInfo(results=results, total_count=total_count)
| true | true |
f7357204b9431e5eb3909f94e42769a597561c0a | 7,020 | py | Python | evennia/utils/tests/test_create_functions.py | lootcrawl/evennia | a5f736ca0ff89e4f7da7d3f89a8886f1ea3698aa | [
"BSD-3-Clause"
] | null | null | null | evennia/utils/tests/test_create_functions.py | lootcrawl/evennia | a5f736ca0ff89e4f7da7d3f89a8886f1ea3698aa | [
"BSD-3-Clause"
] | null | null | null | evennia/utils/tests/test_create_functions.py | lootcrawl/evennia | a5f736ca0ff89e4f7da7d3f89a8886f1ea3698aa | [
"BSD-3-Clause"
] | null | null | null | """
Tests of create functions
"""
from django.test import TestCase
from evennia.utils.test_resources import EvenniaTest
from evennia.scripts.scripts import DefaultScript
from evennia.utils import create
class TestCreateScript(EvenniaTest):
def test_create_script(self):
class TestScriptA(DefaultScript):
def at_script_creation(self):
self.key = "test_script"
self.interval = 10
self.persistent = False
script = create.create_script(TestScriptA, key="test_script")
assert script is not None
assert script.interval == 10
assert script.key == "test_script"
script.stop()
def test_create_script_w_repeats_equal_1(self):
class TestScriptB(DefaultScript):
def at_script_creation(self):
self.key = "test_script"
self.interval = 10
self.repeats = 1
self.persistent = False
# script should still exist even though repeats=1, start_delay=False
script = create.create_script(TestScriptB, key="test_script")
assert script
# but the timer should be inactive now
assert not script.is_active
def test_create_script_w_repeats_equal_1_persisted(self):
class TestScriptB1(DefaultScript):
def at_script_creation(self):
self.key = "test_script"
self.interval = 10
self.repeats = 1
self.persistent = True
# script is already stopped (interval=1, start_delay=False)
script = create.create_script(TestScriptB1, key="test_script")
assert script
assert not script.is_active
def test_create_script_w_repeats_equal_2(self):
class TestScriptC(DefaultScript):
def at_script_creation(self):
self.key = "test_script"
self.interval = 10
self.repeats = 2
self.persistent = False
script = create.create_script(TestScriptC, key="test_script")
assert script is not None
assert script.interval == 10
assert script.repeats == 2
assert script.key == "test_script"
script.stop()
def test_create_script_w_repeats_equal_1_and_delayed(self):
class TestScriptD(DefaultScript):
def at_script_creation(self):
self.key = "test_script"
self.interval = 10
self.start_delay = True
self.repeats = 1
self.persistent = False
script = create.create_script(TestScriptD, key="test_script")
assert script is not None
assert script.interval == 10
assert script.repeats == 1
assert script.key == "test_script"
script.stop()
class TestCreateHelpEntry(TestCase):
help_entry = """
Qui laborum voluptas quis commodi ipsum quo temporibus eum. Facilis
assumenda facilis architecto in corrupti. Est placeat eum amet qui beatae
reiciendis. Accusamus vel aspernatur ab ex. Quam expedita sed expedita
consequuntur est dolorum non exercitationem.
Ipsa vel ut dolorem voluptatem adipisci velit. Sit odit temporibus mollitia
illum ipsam placeat. Rem et ipsum dolor. Hic eum tempore excepturi qui veniam
magni.
Excepturi quam repellendus inventore excepturi fugiat quo quasi molestias.
Nostrum ut assumenda enim a. Repellat quis omnis est officia accusantium. Fugit
facere qui aperiam. Perspiciatis commodi dolores ipsam nemo consequatur
quisquam qui non. Adipisci et molestias voluptatum est sed fugiat facere.
"""
def test_create_help_entry__simple(self):
entry = create.create_help_entry("testentry", self.help_entry, category="Testing")
self.assertEqual(entry.key, "testentry")
self.assertEqual(entry.entrytext, self.help_entry)
self.assertEqual(entry.help_category, "Testing")
# creating same-named entry should not work (must edit existing)
self.assertFalse(create.create_help_entry("testentry", "testtext"))
def test_create_help_entry__complex(self):
locks = "foo:false();bar:true()"
aliases = ["foo", "bar", "tst"]
tags = [("tag1", "help"), ("tag2", "help"), ("tag3", "help")]
entry = create.create_help_entry(
"testentry",
self.help_entry,
category="Testing",
locks=locks,
aliases=aliases,
tags=tags,
)
self.assertTrue(all(lock in entry.locks.all() for lock in locks.split(";")))
self.assertEqual(list(entry.aliases.all()).sort(), aliases.sort())
self.assertEqual(entry.tags.all(return_key_and_category=True), tags)
class TestCreateMessage(EvenniaTest):
msgtext = """
Qui laborum voluptas quis commodi ipsum quo temporibus eum. Facilis
assumenda facilis architecto in corrupti. Est placeat eum amet qui beatae
reiciendis. Accusamus vel aspernatur ab ex. Quam expedita sed expedita
consequuntur est dolorum non exercitationem.
"""
def test_create_msg__simple(self):
# from evennia import set_trace;set_trace()
msg = create.create_message(self.char1, self.msgtext, header="TestHeader")
msg.senders = "ExternalSender"
msg.receivers = self.char2
msg.receivers = "ExternalReceiver"
self.assertEqual(msg.message, self.msgtext)
self.assertEqual(msg.header, "TestHeader")
self.assertEqual(msg.senders, [self.char1, "ExternalSender"])
self.assertEqual(msg.receivers, [self.char2, "ExternalReceiver"])
def test_create_msg__custom(self):
locks = "foo:false();bar:true()"
tags = ["tag1", "tag2", "tag3"]
msg = create.create_message(
self.char1,
self.msgtext,
header="TestHeader",
receivers=[self.char1, self.char2, "ExternalReceiver"],
locks=locks,
tags=tags,
)
self.assertEqual(set(msg.receivers), set([self.char1, self.char2, "ExternalReceiver"]))
self.assertTrue(all(lock in msg.locks.all() for lock in locks.split(";")))
self.assertEqual(msg.tags.all(), tags)
class TestCreateChannel(TestCase):
def test_create_channel__simple(self):
chan = create.create_channel("TestChannel1", desc="Testing channel")
self.assertEqual(chan.key, "TestChannel1")
self.assertEqual(chan.db.desc, "Testing channel")
def test_create_channel__complex(self):
locks = "foo:false();bar:true()"
tags = ["tag1", "tag2", "tag3"]
aliases = ["foo", "bar", "tst"]
chan = create.create_channel(
"TestChannel2", desc="Testing channel", aliases=aliases, locks=locks, tags=tags
)
self.assertTrue(all(lock in chan.locks.all() for lock in locks.split(";")))
self.assertEqual(chan.tags.all(), tags)
self.assertEqual(list(chan.aliases.all()).sort(), aliases.sort())
| 37.945946 | 95 | 0.645442 |
from django.test import TestCase
from evennia.utils.test_resources import EvenniaTest
from evennia.scripts.scripts import DefaultScript
from evennia.utils import create
class TestCreateScript(EvenniaTest):
def test_create_script(self):
class TestScriptA(DefaultScript):
def at_script_creation(self):
self.key = "test_script"
self.interval = 10
self.persistent = False
script = create.create_script(TestScriptA, key="test_script")
assert script is not None
assert script.interval == 10
assert script.key == "test_script"
script.stop()
def test_create_script_w_repeats_equal_1(self):
class TestScriptB(DefaultScript):
def at_script_creation(self):
self.key = "test_script"
self.interval = 10
self.repeats = 1
self.persistent = False
script = create.create_script(TestScriptB, key="test_script")
assert script
assert not script.is_active
def test_create_script_w_repeats_equal_1_persisted(self):
class TestScriptB1(DefaultScript):
def at_script_creation(self):
self.key = "test_script"
self.interval = 10
self.repeats = 1
self.persistent = True
script = create.create_script(TestScriptB1, key="test_script")
assert script
assert not script.is_active
def test_create_script_w_repeats_equal_2(self):
class TestScriptC(DefaultScript):
def at_script_creation(self):
self.key = "test_script"
self.interval = 10
self.repeats = 2
self.persistent = False
script = create.create_script(TestScriptC, key="test_script")
assert script is not None
assert script.interval == 10
assert script.repeats == 2
assert script.key == "test_script"
script.stop()
def test_create_script_w_repeats_equal_1_and_delayed(self):
class TestScriptD(DefaultScript):
def at_script_creation(self):
self.key = "test_script"
self.interval = 10
self.start_delay = True
self.repeats = 1
self.persistent = False
script = create.create_script(TestScriptD, key="test_script")
assert script is not None
assert script.interval == 10
assert script.repeats == 1
assert script.key == "test_script"
script.stop()
class TestCreateHelpEntry(TestCase):
help_entry = """
Qui laborum voluptas quis commodi ipsum quo temporibus eum. Facilis
assumenda facilis architecto in corrupti. Est placeat eum amet qui beatae
reiciendis. Accusamus vel aspernatur ab ex. Quam expedita sed expedita
consequuntur est dolorum non exercitationem.
Ipsa vel ut dolorem voluptatem adipisci velit. Sit odit temporibus mollitia
illum ipsam placeat. Rem et ipsum dolor. Hic eum tempore excepturi qui veniam
magni.
Excepturi quam repellendus inventore excepturi fugiat quo quasi molestias.
Nostrum ut assumenda enim a. Repellat quis omnis est officia accusantium. Fugit
facere qui aperiam. Perspiciatis commodi dolores ipsam nemo consequatur
quisquam qui non. Adipisci et molestias voluptatum est sed fugiat facere.
"""
def test_create_help_entry__simple(self):
entry = create.create_help_entry("testentry", self.help_entry, category="Testing")
self.assertEqual(entry.key, "testentry")
self.assertEqual(entry.entrytext, self.help_entry)
self.assertEqual(entry.help_category, "Testing")
self.assertFalse(create.create_help_entry("testentry", "testtext"))
def test_create_help_entry__complex(self):
locks = "foo:false();bar:true()"
aliases = ["foo", "bar", "tst"]
tags = [("tag1", "help"), ("tag2", "help"), ("tag3", "help")]
entry = create.create_help_entry(
"testentry",
self.help_entry,
category="Testing",
locks=locks,
aliases=aliases,
tags=tags,
)
self.assertTrue(all(lock in entry.locks.all() for lock in locks.split(";")))
self.assertEqual(list(entry.aliases.all()).sort(), aliases.sort())
self.assertEqual(entry.tags.all(return_key_and_category=True), tags)
class TestCreateMessage(EvenniaTest):
msgtext = """
Qui laborum voluptas quis commodi ipsum quo temporibus eum. Facilis
assumenda facilis architecto in corrupti. Est placeat eum amet qui beatae
reiciendis. Accusamus vel aspernatur ab ex. Quam expedita sed expedita
consequuntur est dolorum non exercitationem.
"""
def test_create_msg__simple(self):
msg = create.create_message(self.char1, self.msgtext, header="TestHeader")
msg.senders = "ExternalSender"
msg.receivers = self.char2
msg.receivers = "ExternalReceiver"
self.assertEqual(msg.message, self.msgtext)
self.assertEqual(msg.header, "TestHeader")
self.assertEqual(msg.senders, [self.char1, "ExternalSender"])
self.assertEqual(msg.receivers, [self.char2, "ExternalReceiver"])
def test_create_msg__custom(self):
locks = "foo:false();bar:true()"
tags = ["tag1", "tag2", "tag3"]
msg = create.create_message(
self.char1,
self.msgtext,
header="TestHeader",
receivers=[self.char1, self.char2, "ExternalReceiver"],
locks=locks,
tags=tags,
)
self.assertEqual(set(msg.receivers), set([self.char1, self.char2, "ExternalReceiver"]))
self.assertTrue(all(lock in msg.locks.all() for lock in locks.split(";")))
self.assertEqual(msg.tags.all(), tags)
class TestCreateChannel(TestCase):
def test_create_channel__simple(self):
chan = create.create_channel("TestChannel1", desc="Testing channel")
self.assertEqual(chan.key, "TestChannel1")
self.assertEqual(chan.db.desc, "Testing channel")
def test_create_channel__complex(self):
locks = "foo:false();bar:true()"
tags = ["tag1", "tag2", "tag3"]
aliases = ["foo", "bar", "tst"]
chan = create.create_channel(
"TestChannel2", desc="Testing channel", aliases=aliases, locks=locks, tags=tags
)
self.assertTrue(all(lock in chan.locks.all() for lock in locks.split(";")))
self.assertEqual(chan.tags.all(), tags)
self.assertEqual(list(chan.aliases.all()).sort(), aliases.sort())
| true | true |
f735721051aeee1950098b1b22a38efb71913410 | 855 | py | Python | setup.py | Tracardi/tracardi-zapier-webhook | becb6b8fc5815d35254f2cf9073417869202dbf0 | [
"MIT"
] | null | null | null | setup.py | Tracardi/tracardi-zapier-webhook | becb6b8fc5815d35254f2cf9073417869202dbf0 | [
"MIT"
] | null | null | null | setup.py | Tracardi/tracardi-zapier-webhook | becb6b8fc5815d35254f2cf9073417869202dbf0 | [
"MIT"
] | null | null | null | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='tracardi-zapier-webhook',
version='0.6.0.1',
description='This plugin calls zapier webhook.',
long_description=long_description,
long_description_content_type="text/markdown",
author='Risto Kowaczewski',
author_email='risto.kowaczewski@gmail.com',
packages=['tracardi_zapier_webhook'],
install_requires=[
'tracardi-plugin-sdk>=0.6.30',
'tracardi-dot-notation',
'pydantic',
'asyncio',
'aiohttp'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
keywords=['tracardi', 'plugin'],
include_package_data=True,
python_requires=">=3.8",
)
| 27.580645 | 52 | 0.636257 | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='tracardi-zapier-webhook',
version='0.6.0.1',
description='This plugin calls zapier webhook.',
long_description=long_description,
long_description_content_type="text/markdown",
author='Risto Kowaczewski',
author_email='risto.kowaczewski@gmail.com',
packages=['tracardi_zapier_webhook'],
install_requires=[
'tracardi-plugin-sdk>=0.6.30',
'tracardi-dot-notation',
'pydantic',
'asyncio',
'aiohttp'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
keywords=['tracardi', 'plugin'],
include_package_data=True,
python_requires=">=3.8",
)
| true | true |
f735728bd3c2525528a91c73c1dd2e6dfc3a12e2 | 749 | py | Python | app/management/commands/zombie_emails.py | marcusosso/uwhvz | c20303c117e8b2fcd04f5901326054296d3f3caf | [
"MIT"
] | 9 | 2018-09-08T06:59:02.000Z | 2022-03-23T08:12:02.000Z | app/management/commands/zombie_emails.py | marcusosso/uwhvz | c20303c117e8b2fcd04f5901326054296d3f3caf | [
"MIT"
] | 37 | 2020-01-22T02:36:32.000Z | 2020-10-06T15:05:37.000Z | app/management/commands/zombie_emails.py | marcusosso/uwhvz | c20303c117e8b2fcd04f5901326054296d3f3caf | [
"MIT"
] | 6 | 2019-03-07T02:55:27.000Z | 2019-11-10T23:26:44.000Z | from django.core.management.base import BaseCommand
from app.models import Player, PlayerRole, Spectator, Moderator, most_recent_game
class Command(BaseCommand):
help = 'Prints a list of all zombie emails'
def handle(self, *args, **options):
game = most_recent_game()
spectators = Spectator.objects.filter(game=game)
moderators = Moderator.objects.filter(game=game)
zombies = Player.objects.filter(game=game, active=True, role=PlayerRole.ZOMBIE)
spectator_emails = [s.user.email for s in spectators]
moderator_emails = [m.user.email for m in moderators]
zombie_emails = [z.user.email for z in zombies] + spectator_emails + moderator_emails
print(", ".join(zombie_emails))
| 39.421053 | 93 | 0.708945 | from django.core.management.base import BaseCommand
from app.models import Player, PlayerRole, Spectator, Moderator, most_recent_game
class Command(BaseCommand):
help = 'Prints a list of all zombie emails'
def handle(self, *args, **options):
game = most_recent_game()
spectators = Spectator.objects.filter(game=game)
moderators = Moderator.objects.filter(game=game)
zombies = Player.objects.filter(game=game, active=True, role=PlayerRole.ZOMBIE)
spectator_emails = [s.user.email for s in spectators]
moderator_emails = [m.user.email for m in moderators]
zombie_emails = [z.user.email for z in zombies] + spectator_emails + moderator_emails
print(", ".join(zombie_emails))
| true | true |
f735734fbcfa9b4dcd228604eb9c4689fe4646a8 | 464 | py | Python | recipes/deco/all/test_package/conanfile.py | dpronin/conan-center-index | 5c6e41a618097d04e731c9831118a51dcb39ab3f | [
"MIT"
] | 1 | 2021-11-11T03:07:13.000Z | 2021-11-11T03:07:13.000Z | recipes/deco/all/test_package/conanfile.py | dpronin/conan-center-index | 5c6e41a618097d04e731c9831118a51dcb39ab3f | [
"MIT"
] | 1 | 2021-11-22T13:54:48.000Z | 2021-11-22T14:09:45.000Z | recipes/deco/all/test_package/conanfile.py | dpronin/conan-center-index | 5c6e41a618097d04e731c9831118a51dcb39ab3f | [
"MIT"
] | null | null | null | import os
from conans import ConanFile, CMake, tools
class DecoTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake", "cmake_find_package_multi"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self):
bin_path = os.path.join("bin", "example")
self.run(bin_path, run_environment=True)
| 24.421053 | 53 | 0.62931 | import os
from conans import ConanFile, CMake, tools
class DecoTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake", "cmake_find_package_multi"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self):
bin_path = os.path.join("bin", "example")
self.run(bin_path, run_environment=True)
| true | true |
f7357454568fceaf697bea82684aafa83f283390 | 6,230 | py | Python | backend/Layer/layer/aws/dynamodb/base.py | cvc-Fujii/line-api-use-case-reservation-Restaurant | 248ae2ed52d8325d17d2ddbbd2975068381193fe | [
"Unlicense"
] | 8 | 2021-05-21T03:10:12.000Z | 2022-01-09T10:10:26.000Z | backend/Layer/layer/aws/dynamodb/base.py | cvc-Fujii/line-api-use-case-reservation-Restaurant | 248ae2ed52d8325d17d2ddbbd2975068381193fe | [
"Unlicense"
] | null | null | null | backend/Layer/layer/aws/dynamodb/base.py | cvc-Fujii/line-api-use-case-reservation-Restaurant | 248ae2ed52d8325d17d2ddbbd2975068381193fe | [
"Unlicense"
] | 4 | 2021-05-28T09:57:52.000Z | 2021-09-27T12:25:54.000Z | """
DynamoDB操作用基底モジュール
"""
import boto3
from boto3.dynamodb.conditions import Key
import logging
from datetime import (datetime, timedelta)
# ログ出力の設定
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class DynamoDB:
"""DynamoDB操作用基底クラス"""
__slots__ = ['_db', '_table_name']
def __init__(self, table_name):
"""初期化メソッド"""
self._table_name = table_name
self._db = boto3.resource('dynamodb')
def _put_item(self, item):
"""
アイテムを登録する
Parameters
----------
item : dict
登録するアイテム
Returns
-------
response : dict
レスポンス情報
"""
try:
response = self._table.put_item(
Item=self._replace_data_for_dynamodb(item))
except Exception as e:
raise e
return response
def _update_item(self, key, expression, expression_value, return_value):
"""
アイテムを更新する
Parameters
----------
key : dict
更新するアイテムのキー
expression : str
更新の式
expression_value : dict
更新する値
return_value : str
responseで取得する値
Returns
-------
response : dict
レスポンス情報
"""
try:
response = self._table.update_item(Key=key,
UpdateExpression=expression,
ExpressionAttributeValues=self._replace_data_for_dynamodb( # noqa: E501
expression_value),
ReturnValues=return_value)
except Exception as e:
raise e
return response
def _update_item_optional(self, key, update_expression,
condition_expression, expression_attribute_names,
expression_value, return_value):
"""
アイテムを更新する
※キー以外の更新条件がある場合に対応します
※
Parameters
----------
key : dict
更新するアイテムのキー
update_expression : str
更新の式
condition_expression : str
更新条件
expression_attribute_names:dict
プレースホルダー
(予約語に対応するため)
expression_value : dict
各変数宣言
return_value : str
responseで取得する値
Returns
-------
response : dict
レスポンス情報
"""
try:
response = self._table.update_item(
Key=key,
UpdateExpression=update_expression,
ConditionExpression=condition_expression,
ExpressionAttributeNames=expression_attribute_names, # noqa 501
ExpressionAttributeValues=self._replace_data_for_dynamodb(
expression_value),
ReturnValues=return_value,
)
except Exception as e:
raise e
return response
def _delete_item(self, key):
"""
アイテムを削除する
Parameters
----------
key : dict
削除するアイテムのキー
Returns
-------
response : dict
レスポンス情報
"""
try:
response = self._table.delete_item(Key=key)
except Exception as e:
raise e
return response
def _get_item(self, key):
"""
アイテムを取得する
Parameters
----------
key : dict
取得するアイテムのキー
Returns
-------
response : dict
レスポンス情報
"""
try:
response = self._table.get_item(Key=key)
except Exception as e:
raise e
return response.get('Item', {})
def _query(self, key, value):
"""
queryメソッドを使用してアイテムを取得する
Parameters
----------
key : dict
取得するアイテムのキー
Returns
-------
items : list
対象アイテムのリスト
"""
try:
response = self._table.query(
KeyConditionExpression=Key(key).eq(value)
)
except Exception as e:
raise e
return response['Items']
def _query_index(self, index, expression, expression_value):
"""
indexからアイテムを取得する
Parameters
----------
index : str
index名
expression : str
検索対象の式
expression_value : dict
expression内で使用する変数名と値
Returns
-------
items : list
検索結果
"""
try:
response = self._table.query(
IndexName=index,
KeyConditionExpression=expression,
ExpressionAttributeValues=self._replace_data_for_dynamodb(
expression_value),
)
except Exception as e:
raise e
return response['Items']
def _scan(self, key, value=None):
"""
scanメソッドを使用してデータ取得
Parameters
----------
key : str
キー名
value : object, optional
検索する値, by default None
Returns
-------
items : list
対象アイテムのリスト
"""
scan_kwargs = {}
if value:
scan_kwargs['FilterExpression'] = Key(key).eq(value)
try:
response = self._table.scan(**scan_kwargs)
except Exception as e:
raise e
return response['Items']
def _get_table_size(self):
"""
アイテム数を取得する
Returns
-------
count : int
テーブルのアイテム数
"""
try:
response = self._table.scan(Select='COUNT')
except Exception as e:
raise e
return response.get('Count', 0)
def _replace_data_for_dynamodb(self, value: dict):
return value
| 22.98893 | 120 | 0.4626 | import boto3
from boto3.dynamodb.conditions import Key
import logging
from datetime import (datetime, timedelta)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class DynamoDB:
__slots__ = ['_db', '_table_name']
def __init__(self, table_name):
self._table_name = table_name
self._db = boto3.resource('dynamodb')
def _put_item(self, item):
try:
response = self._table.put_item(
Item=self._replace_data_for_dynamodb(item))
except Exception as e:
raise e
return response
def _update_item(self, key, expression, expression_value, return_value):
try:
response = self._table.update_item(Key=key,
UpdateExpression=expression,
ExpressionAttributeValues=self._replace_data_for_dynamodb(
expression_value),
ReturnValues=return_value)
except Exception as e:
raise e
return response
def _update_item_optional(self, key, update_expression,
condition_expression, expression_attribute_names,
expression_value, return_value):
try:
response = self._table.update_item(
Key=key,
UpdateExpression=update_expression,
ConditionExpression=condition_expression,
ExpressionAttributeNames=expression_attribute_names,
ExpressionAttributeValues=self._replace_data_for_dynamodb(
expression_value),
ReturnValues=return_value,
)
except Exception as e:
raise e
return response
def _delete_item(self, key):
try:
response = self._table.delete_item(Key=key)
except Exception as e:
raise e
return response
def _get_item(self, key):
try:
response = self._table.get_item(Key=key)
except Exception as e:
raise e
return response.get('Item', {})
def _query(self, key, value):
try:
response = self._table.query(
KeyConditionExpression=Key(key).eq(value)
)
except Exception as e:
raise e
return response['Items']
def _query_index(self, index, expression, expression_value):
try:
response = self._table.query(
IndexName=index,
KeyConditionExpression=expression,
ExpressionAttributeValues=self._replace_data_for_dynamodb(
expression_value),
)
except Exception as e:
raise e
return response['Items']
def _scan(self, key, value=None):
scan_kwargs = {}
if value:
scan_kwargs['FilterExpression'] = Key(key).eq(value)
try:
response = self._table.scan(**scan_kwargs)
except Exception as e:
raise e
return response['Items']
def _get_table_size(self):
try:
response = self._table.scan(Select='COUNT')
except Exception as e:
raise e
return response.get('Count', 0)
def _replace_data_for_dynamodb(self, value: dict):
return value
| true | true |
f735745dff459be2423815031d82822f221659e6 | 8,149 | py | Python | maatpy/dataset.py | sanzgiri/MaatPy | 381a0d31f1afdd2c53b9ccbb410eb0df6b4b9965 | [
"MIT"
] | 11 | 2019-05-17T03:50:18.000Z | 2021-08-23T22:18:23.000Z | maatpy/dataset.py | sanzgiri/MaatPy | 381a0d31f1afdd2c53b9ccbb410eb0df6b4b9965 | [
"MIT"
] | 3 | 2021-04-08T14:01:15.000Z | 2021-06-21T15:41:31.000Z | maatpy/dataset.py | sanzgiri/MaatPy | 381a0d31f1afdd2c53b9ccbb410eb0df6b4b9965 | [
"MIT"
] | 7 | 2019-06-09T06:16:59.000Z | 2021-11-12T01:45:52.000Z | import warnings
import numpy as np
import pandas as pd
from collections import Counter
from sklearn.datasets import make_classification
from sklearn.utils import check_X_y
from sklearn.utils import Bunch
from sklearn.preprocessing import LabelEncoder
from imblearn.under_sampling.prototype_selection import RandomUnderSampler
from imblearn.over_sampling import RandomOverSampler
class Dataset(Bunch):
def __init__(self, data=None, target=None, feature_names=None, target_names=None):
"""
:param data:
:param target:
:param feature_names:
:param target_names:
"""
self.data = data
self.target = target
self.feature_names = feature_names
self.target_names = target_names
def make_imbalance(self, ratio=None, random_state=None):
"""
Built on the imblearn.make_imbalance function
:param ratio: dict or list
Ratio to use for resampling the data set.
- When 'dict', the keys correspond to the targeted classes. The values correspond to the desired number
of samples for each targeted class.
- When 'list', the values correspond to the proportions of samples (float) assigned to each class. In
this case the number of samples is maintained but the samples per class are adjusted to the given
proportions.
:param random_state: int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number generator is the RandomState
instance used by `np.random`.
:return:
"""
x, y = check_X_y(self.data, self.target)
original_dataset_size = len(y)
n_classes = len(self.target_names)
if isinstance(ratio, dict):
ratio_ = ratio
elif isinstance(ratio, list):
weights = ratio
if len(weights) != n_classes:
raise ValueError("{} classes available but only {} values provided".format(n_classes, len(weights)))
ratio_ = {}
for i in range(n_classes):
ratio_[i] = int(round(weights[i] * original_dataset_size, 0))
else:
raise TypeError("Expected dict or list; {} provided".format(type(ratio)))
if sum(ratio_.values()) < original_dataset_size:
rus = RandomUnderSampler(ratio=ratio_, random_state=random_state)
self.data, self.target = rus.fit_sample(x, y)
elif sum(ratio_.values()) == original_dataset_size:
original_distribution = Counter(y)
interim_ratio = {}
for key in ratio_:
if ratio_[key] >= original_distribution[key]:
interim_ratio[key] = original_distribution[key]
else:
interim_ratio[key] = ratio_[key]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
rus = RandomUnderSampler(ratio=interim_ratio, random_state=random_state)
x_int, y_int = rus.fit_sample(x, y)
with warnings.catch_warnings():
# Silencing RandomOverSampler UserWarning: After over-sampling, the number of samples in class A will
# be larger than the number of samples in the majority class
warnings.simplefilter("ignore")
ros = RandomOverSampler(ratio=ratio_, random_state=random_state)
self.data, self.target = ros.fit_sample(x_int, y_int)
else:
raise ValueError("The requested dataset cannot be larger than the original dataset")
def load_from_csv(self, filename, sep=',', output_column=None, ignore=None):
"""
:param filename: path to filename containing the data to load
:param sep: field separator; default ','
:param output_column: column containing the outcome
:param ignore: column to remove from data; str or list
:return:
"""
df = pd.read_csv(filename, sep=sep)
if output_column:
le = LabelEncoder()
le.fit(list(df[output_column]))
self.target_names = le.classes_
self.target = le.transform(list(df[output_column]))
df.drop(output_column, axis=1, inplace=True)
else:
raise ValueError('Please define an output_column; column containing the class defined for each observation '
'(row)')
if ignore is not None:
df.drop(ignore, axis=1, inplace=True)
self.feature_names = df.columns
self.data = df.values
def simulate_dataset(n_samples=100, n_features=2, n_informative=2, n_redundant=0, n_classes=2, n_clusters_per_class=1,
weights=None, flip_y=0.01, class_sep=1.0, random_state=None):
"""
Using sklearn.make_classification function to return a Dataset object
:param n_samples: int, optional (default=100).
The number of samples.
:param n_features: int, optional (default=2)
The total number of features. These comprise 'n_informative' informative features and 'n_redundant'
redundant features.
:param n_informative: int, optional (default=2)
The number of informative features. Each class is composed of a number of gaussian clusters each located
around the vertices of a hypercube in a subspace of dimension 'n_informative'. For each cluster,
informative features are drawn independently from N(0, 1) and then randomly linearly combined within
each cluster in order to add covariance. The clusters are then placed on the vertices of the hypercube.
:param n_redundant: int, optional (default=0)
The number of redundant features. These features are generated a random linear combinations of the
informative features.
:param n_classes: int, optional (default=2)
The number of classes (or labels) of the classification problem.
:param n_clusters_per_class: int, optional (default=1)
The number of clusters per class.
:param weights: list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then classes are balanced. Note that if
'len(weights) == n_classes - 1' then the last class weight is automatically inferred. More than
'n_samples' samples may be returned if the sum of `weights` exceeds 1.
:param flip_y: float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged. Larger values introduce noise in the labels
and make the classification task harder.
:param class_sep: float, optional (default=1.0)
The factor multiplying the hypercube size. Larger values spread out the clusters/classes and make the
classification task easier.
:param random_state: int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number generator is the RandomState
instance used by `np.random`.
:return: Dataset object
"""
data, target = make_classification(n_samples=n_samples, n_features=n_features,
n_informative=n_informative, n_redundant=n_redundant,
n_classes=n_classes, n_clusters_per_class=n_clusters_per_class,
weights=weights, flip_y=flip_y, class_sep=class_sep,
random_state=random_state)
feature_names = ['feature#{}'.format(i) for i in range(data.shape[1])]
target_names = ['class#{}'.format(i) for i in np.unique(target)]
return Dataset(data, target, feature_names, target_names)
| 50.302469 | 120 | 0.647196 | import warnings
import numpy as np
import pandas as pd
from collections import Counter
from sklearn.datasets import make_classification
from sklearn.utils import check_X_y
from sklearn.utils import Bunch
from sklearn.preprocessing import LabelEncoder
from imblearn.under_sampling.prototype_selection import RandomUnderSampler
from imblearn.over_sampling import RandomOverSampler
class Dataset(Bunch):
def __init__(self, data=None, target=None, feature_names=None, target_names=None):
self.data = data
self.target = target
self.feature_names = feature_names
self.target_names = target_names
def make_imbalance(self, ratio=None, random_state=None):
x, y = check_X_y(self.data, self.target)
original_dataset_size = len(y)
n_classes = len(self.target_names)
if isinstance(ratio, dict):
ratio_ = ratio
elif isinstance(ratio, list):
weights = ratio
if len(weights) != n_classes:
raise ValueError("{} classes available but only {} values provided".format(n_classes, len(weights)))
ratio_ = {}
for i in range(n_classes):
ratio_[i] = int(round(weights[i] * original_dataset_size, 0))
else:
raise TypeError("Expected dict or list; {} provided".format(type(ratio)))
if sum(ratio_.values()) < original_dataset_size:
rus = RandomUnderSampler(ratio=ratio_, random_state=random_state)
self.data, self.target = rus.fit_sample(x, y)
elif sum(ratio_.values()) == original_dataset_size:
original_distribution = Counter(y)
interim_ratio = {}
for key in ratio_:
if ratio_[key] >= original_distribution[key]:
interim_ratio[key] = original_distribution[key]
else:
interim_ratio[key] = ratio_[key]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
rus = RandomUnderSampler(ratio=interim_ratio, random_state=random_state)
x_int, y_int = rus.fit_sample(x, y)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ros = RandomOverSampler(ratio=ratio_, random_state=random_state)
self.data, self.target = ros.fit_sample(x_int, y_int)
else:
raise ValueError("The requested dataset cannot be larger than the original dataset")
def load_from_csv(self, filename, sep=',', output_column=None, ignore=None):
df = pd.read_csv(filename, sep=sep)
if output_column:
le = LabelEncoder()
le.fit(list(df[output_column]))
self.target_names = le.classes_
self.target = le.transform(list(df[output_column]))
df.drop(output_column, axis=1, inplace=True)
else:
raise ValueError('Please define an output_column; column containing the class defined for each observation '
'(row)')
if ignore is not None:
df.drop(ignore, axis=1, inplace=True)
self.feature_names = df.columns
self.data = df.values
def simulate_dataset(n_samples=100, n_features=2, n_informative=2, n_redundant=0, n_classes=2, n_clusters_per_class=1,
weights=None, flip_y=0.01, class_sep=1.0, random_state=None):
data, target = make_classification(n_samples=n_samples, n_features=n_features,
n_informative=n_informative, n_redundant=n_redundant,
n_classes=n_classes, n_clusters_per_class=n_clusters_per_class,
weights=weights, flip_y=flip_y, class_sep=class_sep,
random_state=random_state)
feature_names = ['feature#{}'.format(i) for i in range(data.shape[1])]
target_names = ['class#{}'.format(i) for i in np.unique(target)]
return Dataset(data, target, feature_names, target_names)
| true | true |
f73574d2aecc0b6d1de391aa5eac4ec6d9dc1d37 | 17,721 | py | Python | maskrcnn_benchmark/config/.ipynb_checkpoints/defaults-checkpoint.py | Zhang-Jing-Xuan/MaskRCNN | 8f1719113847655607eb116d2cb632cc0461119e | [
"MIT"
] | 4 | 2021-03-16T02:39:26.000Z | 2021-07-02T08:25:28.000Z | maskrcnn_benchmark/config/defaults.py | Zhang-Jing-Xuan/MaskRCNN | 8f1719113847655607eb116d2cb632cc0461119e | [
"MIT"
] | null | null | null | maskrcnn_benchmark/config/defaults.py | Zhang-Jing-Xuan/MaskRCNN | 8f1719113847655607eb116d2cb632cc0461119e | [
"MIT"
] | 1 | 2021-06-12T17:11:45.000Z | 2021-06-12T17:11:45.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
from yacs.config import CfgNode as CN
# -----------------------------------------------------------------------------
# Convention about Training / Test specific parameters
# -----------------------------------------------------------------------------
# Whenever an argument can be either used for training or for testing, the
# corresponding name will be post-fixed by a _TRAIN for a training parameter,
# or _TEST for a test-specific parameter.
# For example, the maximum image side during training will be
# INPUT.MAX_SIZE_TRAIN, while for testing it will be
# INPUT.MAX_SIZE_TEST
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
_C.MODEL = CN()
_C.MODEL.RPN_ONLY = False
_C.MODEL.MASK_ON = False
_C.MODEL.RETINANET_ON = False
_C.MODEL.KEYPOINT_ON = False
_C.MODEL.DEVICE = "cuda"
_C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN"
_C.MODEL.CLS_AGNOSTIC_BBOX_REG = False
# If the WEIGHT starts with a catalog://, like :R-50, the code will look for
# the path in paths_catalog. Else, it will use it as the specified absolute
# path
_C.MODEL.WEIGHT = ""
# -----------------------------------------------------------------------------
# INPUT
# -----------------------------------------------------------------------------
_C.INPUT = CN()
# Size of the smallest side of the image during training
_C.INPUT.MIN_SIZE_TRAIN = (800,) # (800,)
# Maximum size of the side of the image during training
_C.INPUT.MAX_SIZE_TRAIN = 1333
# Size of the smallest side of the image during testing
_C.INPUT.MIN_SIZE_TEST = 800
# Maximum size of the side of the image during testing
_C.INPUT.MAX_SIZE_TEST = 1333
# Values to be used for image normalization
_C.INPUT.PIXEL_MEAN = [102.9801, 115.9465, 122.7717]
# Values to be used for image normalization
_C.INPUT.PIXEL_STD = [1., 1., 1.]
# Convert image to BGR format (for Caffe2 models), in range 0-255
_C.INPUT.TO_BGR255 = True
# Image ColorJitter
_C.INPUT.BRIGHTNESS = 0.0
_C.INPUT.CONTRAST = 0.0
_C.INPUT.SATURATION = 0.0
_C.INPUT.HUE = 0.0
# Flips
_C.INPUT.HORIZONTAL_FLIP_PROB_TRAIN = 0.5
_C.INPUT.VERTICAL_FLIP_PROB_TRAIN = 0.0
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASETS = CN()
# List of the dataset names for training, as present in paths_catalog.py
_C.DATASETS.TRAIN = ()
# List of the dataset names for testing, as present in paths_catalog.py
_C.DATASETS.TEST = ()
# -----------------------------------------------------------------------------
# DataLoader
# -----------------------------------------------------------------------------
_C.DATALOADER = CN()
# Number of data loading threads
_C.DATALOADER.NUM_WORKERS = 4
# If > 0, this enforces that each collated batch should have a size divisible
# by SIZE_DIVISIBILITY
_C.DATALOADER.SIZE_DIVISIBILITY = 0
# If True, each batch should contain only images for which the aspect ratio
# is compatible. This groups portrait images together, and landscape images
# are not batched with portrait images.
_C.DATALOADER.ASPECT_RATIO_GROUPING = True
# ---------------------------------------------------------------------------- #
# Backbone options
# ---------------------------------------------------------------------------- #
_C.MODEL.BACKBONE = CN()
# The backbone conv body to use
# The string must match a function that is imported in modeling.model_builder
# (e.g., 'FPN.add_fpn_ResNet101_conv5_body' to specify a ResNet-101-FPN
# backbone)
_C.MODEL.BACKBONE.CONV_BODY = "R-50-C4"
# Add StopGrad at a specified stage so the bottom layers are frozen
_C.MODEL.BACKBONE.FREEZE_CONV_BODY_AT = 2
# ---------------------------------------------------------------------------- #
# FPN options
# ---------------------------------------------------------------------------- #
_C.MODEL.FPN = CN()
_C.MODEL.FPN.USE_GN = False
_C.MODEL.FPN.USE_RELU = False
# ---------------------------------------------------------------------------- #
# Group Norm options
# ---------------------------------------------------------------------------- #
_C.MODEL.GROUP_NORM = CN()
# Number of dimensions per group in GroupNorm (-1 if using NUM_GROUPS)
_C.MODEL.GROUP_NORM.DIM_PER_GP = -1
# Number of groups in GroupNorm (-1 if using DIM_PER_GP)
_C.MODEL.GROUP_NORM.NUM_GROUPS = 32
# GroupNorm's small constant in the denominator
_C.MODEL.GROUP_NORM.EPSILON = 1e-5
# ---------------------------------------------------------------------------- #
# RPN options
# ---------------------------------------------------------------------------- #
_C.MODEL.RPN = CN()
_C.MODEL.RPN.USE_FPN = False
# Base RPN anchor sizes given in absolute pixels w.r.t. the scaled network input
_C.MODEL.RPN.ANCHOR_SIZES = (32, 64, 128, 256, 512)
# Stride of the feature map that RPN is attached.
# For FPN, number of strides should match number of scales
_C.MODEL.RPN.ANCHOR_STRIDE = (16,)
# RPN anchor aspect ratios
_C.MODEL.RPN.ASPECT_RATIOS = (0.5, 1.0, 2.0)
# Remove RPN anchors that go outside the image by RPN_STRADDLE_THRESH pixels
# Set to -1 or a large value, e.g. 100000, to disable pruning anchors
_C.MODEL.RPN.STRADDLE_THRESH = 0
# Minimum overlap required between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD
# ==> positive RPN example)
_C.MODEL.RPN.FG_IOU_THRESHOLD = 0.7
# Maximum overlap allowed between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD
# ==> negative RPN example)
_C.MODEL.RPN.BG_IOU_THRESHOLD = 0.3
# Total number of RPN examples per image
_C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256
# Target fraction of foreground (positive) examples per RPN minibatch
_C.MODEL.RPN.POSITIVE_FRACTION = 0.5
# Number of top scoring RPN proposals to keep before applying NMS
# When FPN is used, this is *per FPN level* (not total)
_C.MODEL.RPN.PRE_NMS_TOP_N_TRAIN = 12000
_C.MODEL.RPN.PRE_NMS_TOP_N_TEST = 6000
# Number of top scoring RPN proposals to keep after applying NMS
_C.MODEL.RPN.POST_NMS_TOP_N_TRAIN = 2000
_C.MODEL.RPN.POST_NMS_TOP_N_TEST = 1000
# NMS threshold used on RPN proposals
_C.MODEL.RPN.NMS_THRESH = 0.7
# Proposal height and width both need to be greater than RPN_MIN_SIZE
# (a the scale used during training or inference)
_C.MODEL.RPN.MIN_SIZE = 0
# Number of top scoring RPN proposals to keep after combining proposals from
# all FPN levels
_C.MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN = 2000
_C.MODEL.RPN.FPN_POST_NMS_TOP_N_TEST = 2000
# Apply the post NMS per batch (default) or per image during training
# (default is True to be consistent with Detectron, see Issue #672)
_C.MODEL.RPN.FPN_POST_NMS_PER_BATCH = True
# Custom rpn head, empty to use default conv or separable conv
_C.MODEL.RPN.RPN_HEAD = "SingleConvRPNHead"
# ---------------------------------------------------------------------------- #
# ROI HEADS options
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_HEADS = CN()
_C.MODEL.ROI_HEADS.USE_FPN = False
# Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD)
_C.MODEL.ROI_HEADS.FG_IOU_THRESHOLD = 0.5
# Overlap threshold for an RoI to be considered background
# (class = 0 if overlap in [0, BG_IOU_THRESHOLD))
_C.MODEL.ROI_HEADS.BG_IOU_THRESHOLD = 0.5
# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
# These are empirically chosen to approximately lead to unit variance targets
_C.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS = (10., 10., 5., 5.)
# RoI minibatch size *per image* (number of regions of interest [ROIs])
# Total number of RoIs per training minibatch =
# TRAIN.BATCH_SIZE_PER_IM * TRAIN.IMS_PER_BATCH
# E.g., a common configuration is: 512 * 2 * 8 = 8192
_C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)
_C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25
# Only used on test mode
# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to
# balance obtaining high recall with not having too many low precision
# detections that will slow down inference post processing steps (like NMS)
_C.MODEL.ROI_HEADS.SCORE_THRESH = 0.05
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
_C.MODEL.ROI_HEADS.NMS = 0.5
# Maximum number of detections to return per image (100 is based on the limit
# established for the COCO dataset)
_C.MODEL.ROI_HEADS.DETECTIONS_PER_IMG = 100
_C.MODEL.ROI_BOX_HEAD = CN()
_C.MODEL.ROI_BOX_HEAD.FEATURE_EXTRACTOR = "ResNet50Conv5ROIFeatureExtractor"
_C.MODEL.ROI_BOX_HEAD.PREDICTOR = "FastRCNNPredictor"
_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_BOX_HEAD.POOLER_SCALES = (1.0 / 16,)
_C.MODEL.ROI_BOX_HEAD.NUM_CLASSES = 2 #####
# Hidden layer dimension when using an MLP for the RoI box head
_C.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM = 1024
# GN
_C.MODEL.ROI_BOX_HEAD.USE_GN = False
# Dilation
_C.MODEL.ROI_BOX_HEAD.DILATION = 1
_C.MODEL.ROI_BOX_HEAD.CONV_HEAD_DIM = 256
_C.MODEL.ROI_BOX_HEAD.NUM_STACKED_CONVS = 4
_C.MODEL.ROI_MASK_HEAD = CN()
_C.MODEL.ROI_MASK_HEAD.FEATURE_EXTRACTOR = "ResNet50Conv5ROIFeatureExtractor"
_C.MODEL.ROI_MASK_HEAD.PREDICTOR = "MaskRCNNC4Predictor"
_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_MASK_HEAD.POOLER_SCALES = (1.0 / 16,)
_C.MODEL.ROI_MASK_HEAD.MLP_HEAD_DIM = 1024
_C.MODEL.ROI_MASK_HEAD.CONV_LAYERS = (256, 256, 256, 256)
_C.MODEL.ROI_MASK_HEAD.RESOLUTION = 14
_C.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR = True
# Whether or not resize and translate masks to the input image.
_C.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS = False
_C.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS_THRESHOLD = 0.5
# Dilation
_C.MODEL.ROI_MASK_HEAD.DILATION = 1
# GN
_C.MODEL.ROI_MASK_HEAD.USE_GN = False
_C.MODEL.ROI_KEYPOINT_HEAD = CN()
_C.MODEL.ROI_KEYPOINT_HEAD.FEATURE_EXTRACTOR = "KeypointRCNNFeatureExtractor"
_C.MODEL.ROI_KEYPOINT_HEAD.PREDICTOR = "KeypointRCNNPredictor"
_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SCALES = (1.0 / 16,)
_C.MODEL.ROI_KEYPOINT_HEAD.MLP_HEAD_DIM = 1024
_C.MODEL.ROI_KEYPOINT_HEAD.CONV_LAYERS = tuple(512 for _ in range(8))
_C.MODEL.ROI_KEYPOINT_HEAD.RESOLUTION = 14
_C.MODEL.ROI_KEYPOINT_HEAD.NUM_CLASSES = 17
_C.MODEL.ROI_KEYPOINT_HEAD.SHARE_BOX_FEATURE_EXTRACTOR = True
# ---------------------------------------------------------------------------- #
# ResNe[X]t options (ResNets = {ResNet, ResNeXt}
# Note that parts of a resnet may be used for both the backbone and the head
# These options apply to both
# ---------------------------------------------------------------------------- #
_C.MODEL.RESNETS = CN()
# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
_C.MODEL.RESNETS.NUM_GROUPS = 1
# Baseline width of each group
_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64
# Place the stride 2 conv on the 1x1 filter
# Use True only for the original MSRA ResNet; use False for C2 and Torch models
_C.MODEL.RESNETS.STRIDE_IN_1X1 = True
# Residual transformation function
_C.MODEL.RESNETS.TRANS_FUNC = "BottleneckWithFixedBatchNorm"
# ResNet's stem function (conv1 and pool1)
_C.MODEL.RESNETS.STEM_FUNC = "StemWithFixedBatchNorm"
# Apply dilation in stage "res5"
_C.MODEL.RESNETS.RES5_DILATION = 1
_C.MODEL.RESNETS.BACKBONE_OUT_CHANNELS = 256 * 4
_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256
_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64
_C.MODEL.RESNETS.STAGE_WITH_DCN = (False, False, False, False)
_C.MODEL.RESNETS.WITH_MODULATED_DCN = False
_C.MODEL.RESNETS.DEFORMABLE_GROUPS = 1
# ---------------------------------------------------------------------------- #
# RetinaNet Options (Follow the Detectron version)
# ---------------------------------------------------------------------------- #
_C.MODEL.RETINANET = CN()
# This is the number of foreground classes and background.
_C.MODEL.RETINANET.NUM_CLASSES = 81
# Anchor aspect ratios to use
_C.MODEL.RETINANET.ANCHOR_SIZES = (32, 64, 128, 256, 512)
_C.MODEL.RETINANET.ASPECT_RATIOS = (0.5, 1.0, 2.0)
_C.MODEL.RETINANET.ANCHOR_STRIDES = (8, 16, 32, 64, 128)
_C.MODEL.RETINANET.STRADDLE_THRESH = 0
# Anchor scales per octave
_C.MODEL.RETINANET.OCTAVE = 2.0
_C.MODEL.RETINANET.SCALES_PER_OCTAVE = 3
# Use C5 or P5 to generate P6
_C.MODEL.RETINANET.USE_C5 = True
# Convolutions to use in the cls and bbox tower
# NOTE: this doesn't include the last conv for logits
_C.MODEL.RETINANET.NUM_CONVS = 4
# Weight for bbox_regression loss
_C.MODEL.RETINANET.BBOX_REG_WEIGHT = 4.0
# Smooth L1 loss beta for bbox regression
_C.MODEL.RETINANET.BBOX_REG_BETA = 0.11
# During inference, #locs to select based on cls score before NMS is performed
# per FPN level
_C.MODEL.RETINANET.PRE_NMS_TOP_N = 1000
# IoU overlap ratio for labeling an anchor as positive
# Anchors with >= iou overlap are labeled positive
_C.MODEL.RETINANET.FG_IOU_THRESHOLD = 0.5
# IoU overlap ratio for labeling an anchor as negative
# Anchors with < iou overlap are labeled negative
_C.MODEL.RETINANET.BG_IOU_THRESHOLD = 0.4
# Focal loss parameter: alpha
_C.MODEL.RETINANET.LOSS_ALPHA = 0.25
# Focal loss parameter: gamma
_C.MODEL.RETINANET.LOSS_GAMMA = 2.0
# Prior prob for the positives at the beginning of training. This is used to set
# the bias init for the logits layer
_C.MODEL.RETINANET.PRIOR_PROB = 0.01
# Inference cls score threshold, anchors with score > INFERENCE_TH are
# considered for inference
_C.MODEL.RETINANET.INFERENCE_TH = 0.05
# NMS threshold used in RetinaNet
_C.MODEL.RETINANET.NMS_TH = 0.4
# ---------------------------------------------------------------------------- #
# FBNet options
# ---------------------------------------------------------------------------- #
_C.MODEL.FBNET = CN()
_C.MODEL.FBNET.ARCH = "default"
# custom arch
_C.MODEL.FBNET.ARCH_DEF = ""
_C.MODEL.FBNET.BN_TYPE = "bn"
_C.MODEL.FBNET.SCALE_FACTOR = 1.0
# the output channels will be divisible by WIDTH_DIVISOR
_C.MODEL.FBNET.WIDTH_DIVISOR = 1
_C.MODEL.FBNET.DW_CONV_SKIP_BN = True
_C.MODEL.FBNET.DW_CONV_SKIP_RELU = True
# > 0 scale, == 0 skip, < 0 same dimension
_C.MODEL.FBNET.DET_HEAD_LAST_SCALE = 1.0
_C.MODEL.FBNET.DET_HEAD_BLOCKS = []
# overwrite the stride for the head, 0 to use original value
_C.MODEL.FBNET.DET_HEAD_STRIDE = 0
# > 0 scale, == 0 skip, < 0 same dimension
_C.MODEL.FBNET.KPTS_HEAD_LAST_SCALE = 0.0
_C.MODEL.FBNET.KPTS_HEAD_BLOCKS = []
# overwrite the stride for the head, 0 to use original value
_C.MODEL.FBNET.KPTS_HEAD_STRIDE = 0
# > 0 scale, == 0 skip, < 0 same dimension
_C.MODEL.FBNET.MASK_HEAD_LAST_SCALE = 0.0
_C.MODEL.FBNET.MASK_HEAD_BLOCKS = []
# overwrite the stride for the head, 0 to use original value
_C.MODEL.FBNET.MASK_HEAD_STRIDE = 0
# 0 to use all blocks defined in arch_def
_C.MODEL.FBNET.RPN_HEAD_BLOCKS = 0
_C.MODEL.FBNET.RPN_BN_TYPE = ""
# ---------------------------------------------------------------------------- #
# Solver
# ---------------------------------------------------------------------------- #
_C.SOLVER = CN()
_C.SOLVER.MAX_ITER = 40000
_C.SOLVER.BASE_LR = 0.001
_C.SOLVER.BIAS_LR_FACTOR = 2
_C.SOLVER.MOMENTUM = 0.9
_C.SOLVER.WEIGHT_DECAY = 0.0005
_C.SOLVER.WEIGHT_DECAY_BIAS = 0
_C.SOLVER.GAMMA = 0.1
_C.SOLVER.STEPS = (30000,)
_C.SOLVER.WARMUP_FACTOR = 1.0 / 3
_C.SOLVER.WARMUP_ITERS = 500
_C.SOLVER.WARMUP_METHOD = "linear"
_C.SOLVER.CHECKPOINT_PERIOD = 2500
_C.SOLVER.TEST_PERIOD = 0
# Number of images per batch
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.SOLVER.IMS_PER_BATCH = 16
# ---------------------------------------------------------------------------- #
# Specific test options
# ---------------------------------------------------------------------------- #
_C.TEST = CN()
_C.TEST.EXPECTED_RESULTS = []
_C.TEST.EXPECTED_RESULTS_SIGMA_TOL = 4
# Number of images per batch
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.TEST.IMS_PER_BATCH = 8
# Number of detections per image
_C.TEST.DETECTIONS_PER_IMG = 100
# ---------------------------------------------------------------------------- #
# Test-time augmentations for bounding box detection
# See configs/test_time_aug/e2e_mask_rcnn_R-50-FPN_1x.yaml for an example
# ---------------------------------------------------------------------------- #
_C.TEST.BBOX_AUG = CN()
# Enable test-time augmentation for bounding box detection if True
_C.TEST.BBOX_AUG.ENABLED = False
# Horizontal flip at the original scale (id transform)
_C.TEST.BBOX_AUG.H_FLIP = False
# Each scale is the pixel size of an image's shortest side
_C.TEST.BBOX_AUG.SCALES = ()
# Max pixel size of the longer side
_C.TEST.BBOX_AUG.MAX_SIZE = 4000
# Horizontal flip at each scale
_C.TEST.BBOX_AUG.SCALE_H_FLIP = False
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
_C.OUTPUT_DIR = "."
_C.PATHS_CATALOG = os.path.join(os.path.dirname(__file__), "paths_catalog.py")
# ---------------------------------------------------------------------------- #
# Precision options
# ---------------------------------------------------------------------------- #
# Precision of input, allowable: (float32, float16)
_C.DTYPE = "float32"
# Enable verbosity in apex.amp
_C.AMP_VERBOSE = False
| 37.704255 | 83 | 0.644715 |
import os
from yacs.config import CfgNode as CN
_C = CN()
_C.MODEL = CN()
_C.MODEL.RPN_ONLY = False
_C.MODEL.MASK_ON = False
_C.MODEL.RETINANET_ON = False
_C.MODEL.KEYPOINT_ON = False
_C.MODEL.DEVICE = "cuda"
_C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN"
_C.MODEL.CLS_AGNOSTIC_BBOX_REG = False
_C.MODEL.WEIGHT = ""
_C.INPUT = CN()
_C.INPUT.MIN_SIZE_TRAIN = (800,)
_C.INPUT.MAX_SIZE_TRAIN = 1333
_C.INPUT.MIN_SIZE_TEST = 800
_C.INPUT.MAX_SIZE_TEST = 1333
_C.INPUT.PIXEL_MEAN = [102.9801, 115.9465, 122.7717]
_C.INPUT.PIXEL_STD = [1., 1., 1.]
_C.INPUT.TO_BGR255 = True
_C.INPUT.BRIGHTNESS = 0.0
_C.INPUT.CONTRAST = 0.0
_C.INPUT.SATURATION = 0.0
_C.INPUT.HUE = 0.0
_C.INPUT.HORIZONTAL_FLIP_PROB_TRAIN = 0.5
_C.INPUT.VERTICAL_FLIP_PROB_TRAIN = 0.0
_C.DATASETS = CN()
_C.DATASETS.TRAIN = ()
_C.DATASETS.TEST = ()
_C.DATALOADER = CN()
_C.DATALOADER.NUM_WORKERS = 4
_C.DATALOADER.SIZE_DIVISIBILITY = 0
_C.DATALOADER.ASPECT_RATIO_GROUPING = True
_C.MODEL.BACKBONE = CN()
_C.MODEL.BACKBONE.CONV_BODY = "R-50-C4"
_C.MODEL.BACKBONE.FREEZE_CONV_BODY_AT = 2
_C.MODEL.FPN = CN()
_C.MODEL.FPN.USE_GN = False
_C.MODEL.FPN.USE_RELU = False
_C.MODEL.GROUP_NORM = CN()
_C.MODEL.GROUP_NORM.DIM_PER_GP = -1
_C.MODEL.GROUP_NORM.NUM_GROUPS = 32
_C.MODEL.GROUP_NORM.EPSILON = 1e-5
# ---------------------------------------------------------------------------- #
# RPN options
# ---------------------------------------------------------------------------- #
_C.MODEL.RPN = CN()
_C.MODEL.RPN.USE_FPN = False
# Base RPN anchor sizes given in absolute pixels w.r.t. the scaled network input
_C.MODEL.RPN.ANCHOR_SIZES = (32, 64, 128, 256, 512)
# Stride of the feature map that RPN is attached.
# For FPN, number of strides should match number of scales
_C.MODEL.RPN.ANCHOR_STRIDE = (16,)
# RPN anchor aspect ratios
_C.MODEL.RPN.ASPECT_RATIOS = (0.5, 1.0, 2.0)
# Remove RPN anchors that go outside the image by RPN_STRADDLE_THRESH pixels
# Set to -1 or a large value, e.g. 100000, to disable pruning anchors
_C.MODEL.RPN.STRADDLE_THRESH = 0
# Minimum overlap required between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD
# ==> positive RPN example)
_C.MODEL.RPN.FG_IOU_THRESHOLD = 0.7
# Maximum overlap allowed between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD
# ==> negative RPN example)
_C.MODEL.RPN.BG_IOU_THRESHOLD = 0.3
# Total number of RPN examples per image
_C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256
# Target fraction of foreground (positive) examples per RPN minibatch
_C.MODEL.RPN.POSITIVE_FRACTION = 0.5
# Number of top scoring RPN proposals to keep before applying NMS
# When FPN is used, this is *per FPN level* (not total)
_C.MODEL.RPN.PRE_NMS_TOP_N_TRAIN = 12000
_C.MODEL.RPN.PRE_NMS_TOP_N_TEST = 6000
# Number of top scoring RPN proposals to keep after applying NMS
_C.MODEL.RPN.POST_NMS_TOP_N_TRAIN = 2000
_C.MODEL.RPN.POST_NMS_TOP_N_TEST = 1000
# NMS threshold used on RPN proposals
_C.MODEL.RPN.NMS_THRESH = 0.7
# Proposal height and width both need to be greater than RPN_MIN_SIZE
# (a the scale used during training or inference)
_C.MODEL.RPN.MIN_SIZE = 0
# Number of top scoring RPN proposals to keep after combining proposals from
# all FPN levels
_C.MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN = 2000
_C.MODEL.RPN.FPN_POST_NMS_TOP_N_TEST = 2000
# Apply the post NMS per batch (default) or per image during training
# (default is True to be consistent with Detectron, see Issue #672)
_C.MODEL.RPN.FPN_POST_NMS_PER_BATCH = True
# Custom rpn head, empty to use default conv or separable conv
_C.MODEL.RPN.RPN_HEAD = "SingleConvRPNHead"
# ---------------------------------------------------------------------------- #
# ROI HEADS options
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_HEADS = CN()
_C.MODEL.ROI_HEADS.USE_FPN = False
# Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD)
_C.MODEL.ROI_HEADS.FG_IOU_THRESHOLD = 0.5
# Overlap threshold for an RoI to be considered background
# (class = 0 if overlap in [0, BG_IOU_THRESHOLD))
_C.MODEL.ROI_HEADS.BG_IOU_THRESHOLD = 0.5
# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
# These are empirically chosen to approximately lead to unit variance targets
_C.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS = (10., 10., 5., 5.)
# RoI minibatch size *per image* (number of regions of interest [ROIs])
# Total number of RoIs per training minibatch =
# TRAIN.BATCH_SIZE_PER_IM * TRAIN.IMS_PER_BATCH
# E.g., a common configuration is: 512 * 2 * 8 = 8192
_C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)
_C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25
# Only used on test mode
# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to
# balance obtaining high recall with not having too many low precision
# detections that will slow down inference post processing steps (like NMS)
_C.MODEL.ROI_HEADS.SCORE_THRESH = 0.05
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
_C.MODEL.ROI_HEADS.NMS = 0.5
# Maximum number of detections to return per image (100 is based on the limit
# established for the COCO dataset)
_C.MODEL.ROI_HEADS.DETECTIONS_PER_IMG = 100
_C.MODEL.ROI_BOX_HEAD = CN()
_C.MODEL.ROI_BOX_HEAD.FEATURE_EXTRACTOR = "ResNet50Conv5ROIFeatureExtractor"
_C.MODEL.ROI_BOX_HEAD.PREDICTOR = "FastRCNNPredictor"
_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_BOX_HEAD.POOLER_SCALES = (1.0 / 16,)
_C.MODEL.ROI_BOX_HEAD.NUM_CLASSES = 2 #####
# Hidden layer dimension when using an MLP for the RoI box head
_C.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM = 1024
# GN
_C.MODEL.ROI_BOX_HEAD.USE_GN = False
# Dilation
_C.MODEL.ROI_BOX_HEAD.DILATION = 1
_C.MODEL.ROI_BOX_HEAD.CONV_HEAD_DIM = 256
_C.MODEL.ROI_BOX_HEAD.NUM_STACKED_CONVS = 4
_C.MODEL.ROI_MASK_HEAD = CN()
_C.MODEL.ROI_MASK_HEAD.FEATURE_EXTRACTOR = "ResNet50Conv5ROIFeatureExtractor"
_C.MODEL.ROI_MASK_HEAD.PREDICTOR = "MaskRCNNC4Predictor"
_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_MASK_HEAD.POOLER_SCALES = (1.0 / 16,)
_C.MODEL.ROI_MASK_HEAD.MLP_HEAD_DIM = 1024
_C.MODEL.ROI_MASK_HEAD.CONV_LAYERS = (256, 256, 256, 256)
_C.MODEL.ROI_MASK_HEAD.RESOLUTION = 14
_C.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR = True
# Whether or not resize and translate masks to the input image.
_C.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS = False
_C.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS_THRESHOLD = 0.5
# Dilation
_C.MODEL.ROI_MASK_HEAD.DILATION = 1
# GN
_C.MODEL.ROI_MASK_HEAD.USE_GN = False
_C.MODEL.ROI_KEYPOINT_HEAD = CN()
_C.MODEL.ROI_KEYPOINT_HEAD.FEATURE_EXTRACTOR = "KeypointRCNNFeatureExtractor"
_C.MODEL.ROI_KEYPOINT_HEAD.PREDICTOR = "KeypointRCNNPredictor"
_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SCALES = (1.0 / 16,)
_C.MODEL.ROI_KEYPOINT_HEAD.MLP_HEAD_DIM = 1024
_C.MODEL.ROI_KEYPOINT_HEAD.CONV_LAYERS = tuple(512 for _ in range(8))
_C.MODEL.ROI_KEYPOINT_HEAD.RESOLUTION = 14
_C.MODEL.ROI_KEYPOINT_HEAD.NUM_CLASSES = 17
_C.MODEL.ROI_KEYPOINT_HEAD.SHARE_BOX_FEATURE_EXTRACTOR = True
# ---------------------------------------------------------------------------- #
# ResNe[X]t options (ResNets = {ResNet, ResNeXt}
# Note that parts of a resnet may be used for both the backbone and the head
# These options apply to both
# ---------------------------------------------------------------------------- #
_C.MODEL.RESNETS = CN()
# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
_C.MODEL.RESNETS.NUM_GROUPS = 1
# Baseline width of each group
_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64
# Place the stride 2 conv on the 1x1 filter
# Use True only for the original MSRA ResNet; use False for C2 and Torch models
_C.MODEL.RESNETS.STRIDE_IN_1X1 = True
# Residual transformation function
_C.MODEL.RESNETS.TRANS_FUNC = "BottleneckWithFixedBatchNorm"
# ResNet's stem function (conv1 and pool1)
_C.MODEL.RESNETS.STEM_FUNC = "StemWithFixedBatchNorm"
_C.MODEL.RESNETS.RES5_DILATION = 1
_C.MODEL.RESNETS.BACKBONE_OUT_CHANNELS = 256 * 4
_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256
_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64
_C.MODEL.RESNETS.STAGE_WITH_DCN = (False, False, False, False)
_C.MODEL.RESNETS.WITH_MODULATED_DCN = False
_C.MODEL.RESNETS.DEFORMABLE_GROUPS = 1
_C.MODEL.RETINANET = CN()
_C.MODEL.RETINANET.NUM_CLASSES = 81
_C.MODEL.RETINANET.ANCHOR_SIZES = (32, 64, 128, 256, 512)
_C.MODEL.RETINANET.ASPECT_RATIOS = (0.5, 1.0, 2.0)
_C.MODEL.RETINANET.ANCHOR_STRIDES = (8, 16, 32, 64, 128)
_C.MODEL.RETINANET.STRADDLE_THRESH = 0
_C.MODEL.RETINANET.OCTAVE = 2.0
_C.MODEL.RETINANET.SCALES_PER_OCTAVE = 3
_C.MODEL.RETINANET.USE_C5 = True
_C.MODEL.RETINANET.NUM_CONVS = 4
# Weight for bbox_regression loss
_C.MODEL.RETINANET.BBOX_REG_WEIGHT = 4.0
# Smooth L1 loss beta for bbox regression
_C.MODEL.RETINANET.BBOX_REG_BETA = 0.11
# During inference, #locs to select based on cls score before NMS is performed
# per FPN level
_C.MODEL.RETINANET.PRE_NMS_TOP_N = 1000
# IoU overlap ratio for labeling an anchor as positive
# Anchors with >= iou overlap are labeled positive
_C.MODEL.RETINANET.FG_IOU_THRESHOLD = 0.5
# IoU overlap ratio for labeling an anchor as negative
# Anchors with < iou overlap are labeled negative
_C.MODEL.RETINANET.BG_IOU_THRESHOLD = 0.4
# Focal loss parameter: alpha
_C.MODEL.RETINANET.LOSS_ALPHA = 0.25
# Focal loss parameter: gamma
_C.MODEL.RETINANET.LOSS_GAMMA = 2.0
# Prior prob for the positives at the beginning of training. This is used to set
# the bias init for the logits layer
_C.MODEL.RETINANET.PRIOR_PROB = 0.01
# Inference cls score threshold, anchors with score > INFERENCE_TH are
# considered for inference
_C.MODEL.RETINANET.INFERENCE_TH = 0.05
# NMS threshold used in RetinaNet
_C.MODEL.RETINANET.NMS_TH = 0.4
# ---------------------------------------------------------------------------- #
# FBNet options
# ---------------------------------------------------------------------------- #
_C.MODEL.FBNET = CN()
_C.MODEL.FBNET.ARCH = "default"
# custom arch
_C.MODEL.FBNET.ARCH_DEF = ""
_C.MODEL.FBNET.BN_TYPE = "bn"
_C.MODEL.FBNET.SCALE_FACTOR = 1.0
# the output channels will be divisible by WIDTH_DIVISOR
_C.MODEL.FBNET.WIDTH_DIVISOR = 1
_C.MODEL.FBNET.DW_CONV_SKIP_BN = True
_C.MODEL.FBNET.DW_CONV_SKIP_RELU = True
# > 0 scale, == 0 skip, < 0 same dimension
_C.MODEL.FBNET.DET_HEAD_LAST_SCALE = 1.0
_C.MODEL.FBNET.DET_HEAD_BLOCKS = []
# overwrite the stride for the head, 0 to use original value
_C.MODEL.FBNET.DET_HEAD_STRIDE = 0
# > 0 scale, == 0 skip, < 0 same dimension
_C.MODEL.FBNET.KPTS_HEAD_LAST_SCALE = 0.0
_C.MODEL.FBNET.KPTS_HEAD_BLOCKS = []
# overwrite the stride for the head, 0 to use original value
_C.MODEL.FBNET.KPTS_HEAD_STRIDE = 0
# > 0 scale, == 0 skip, < 0 same dimension
_C.MODEL.FBNET.MASK_HEAD_LAST_SCALE = 0.0
_C.MODEL.FBNET.MASK_HEAD_BLOCKS = []
# overwrite the stride for the head, 0 to use original value
_C.MODEL.FBNET.MASK_HEAD_STRIDE = 0
# 0 to use all blocks defined in arch_def
_C.MODEL.FBNET.RPN_HEAD_BLOCKS = 0
_C.MODEL.FBNET.RPN_BN_TYPE = ""
# ---------------------------------------------------------------------------- #
# Solver
# ---------------------------------------------------------------------------- #
_C.SOLVER = CN()
_C.SOLVER.MAX_ITER = 40000
_C.SOLVER.BASE_LR = 0.001
_C.SOLVER.BIAS_LR_FACTOR = 2
_C.SOLVER.MOMENTUM = 0.9
_C.SOLVER.WEIGHT_DECAY = 0.0005
_C.SOLVER.WEIGHT_DECAY_BIAS = 0
_C.SOLVER.GAMMA = 0.1
_C.SOLVER.STEPS = (30000,)
_C.SOLVER.WARMUP_FACTOR = 1.0 / 3
_C.SOLVER.WARMUP_ITERS = 500
_C.SOLVER.WARMUP_METHOD = "linear"
_C.SOLVER.CHECKPOINT_PERIOD = 2500
_C.SOLVER.TEST_PERIOD = 0
# Number of images per batch
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.SOLVER.IMS_PER_BATCH = 16
# ---------------------------------------------------------------------------- #
# Specific test options
# ---------------------------------------------------------------------------- #
_C.TEST = CN()
_C.TEST.EXPECTED_RESULTS = []
_C.TEST.EXPECTED_RESULTS_SIGMA_TOL = 4
# Number of images per batch
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.TEST.IMS_PER_BATCH = 8
# Number of detections per image
_C.TEST.DETECTIONS_PER_IMG = 100
# ---------------------------------------------------------------------------- #
# Test-time augmentations for bounding box detection
# See configs/test_time_aug/e2e_mask_rcnn_R-50-FPN_1x.yaml for an example
# ---------------------------------------------------------------------------- #
_C.TEST.BBOX_AUG = CN()
# Enable test-time augmentation for bounding box detection if True
_C.TEST.BBOX_AUG.ENABLED = False
# Horizontal flip at the original scale (id transform)
_C.TEST.BBOX_AUG.H_FLIP = False
# Each scale is the pixel size of an image's shortest side
_C.TEST.BBOX_AUG.SCALES = ()
_C.TEST.BBOX_AUG.MAX_SIZE = 4000
_C.TEST.BBOX_AUG.SCALE_H_FLIP = False
_C.OUTPUT_DIR = "."
_C.PATHS_CATALOG = os.path.join(os.path.dirname(__file__), "paths_catalog.py")
_C.DTYPE = "float32"
_C.AMP_VERBOSE = False
| true | true |
f73574dea6f487ec48f1f14a66b63368c035037e | 7,807 | py | Python | examples/pwr_run/checkpointing/throughput/final4_new2/job48.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/throughput/final4_new2/job48.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/throughput/final4_new2/job48.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 32
args_lr = 0.0014
args_model = 'resnet101'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final4/' + job_name + '*'
total_epochs = 134
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_final4/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| 32.940928 | 118 | 0.691175 |
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
batch_size = 32
args_lr = 0.0014
args_model = 'resnet101'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final4/' + job_name + '*'
total_epochs = 134
starting_epoch = 0
pid = os.getpid()
message = job_name + ' pid ' + str(pid)
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
subtract_pixel_mean = True
n = 3
model_type = args.tc
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
print(model_type)
current_epoch = 0
| true | true |
f735756443eef240e21fb562830541755be870bb | 499 | py | Python | web/migrations/0014_auto_20160903_0626.py | acuestap/smarttools_test | caa8760be4dfd502d31f5e396392d1d455ebdca8 | [
"MIT"
] | null | null | null | web/migrations/0014_auto_20160903_0626.py | acuestap/smarttools_test | caa8760be4dfd502d31f5e396392d1d455ebdca8 | [
"MIT"
] | null | null | null | web/migrations/0014_auto_20160903_0626.py | acuestap/smarttools_test | caa8760be4dfd502d31f5e396392d1d455ebdca8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-03 06:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0013_auto_20160903_0624'),
]
operations = [
migrations.AlterField(
model_name='competition',
name='image',
field=models.ImageField(null=True, upload_to='static/upload_files/competitions/images'),
),
]
| 23.761905 | 100 | 0.639279 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0013_auto_20160903_0624'),
]
operations = [
migrations.AlterField(
model_name='competition',
name='image',
field=models.ImageField(null=True, upload_to='static/upload_files/competitions/images'),
),
]
| true | true |
f7357920e547b4be28ed27139ff673d4193773ae | 3,065 | py | Python | source/__init__.py | Mozzo1000/InstPakg | 994e608e1ec9ab465d5533f656f12b792191c8ba | [
"MIT"
] | null | null | null | source/__init__.py | Mozzo1000/InstPakg | 994e608e1ec9ab465d5533f656f12b792191c8ba | [
"MIT"
] | 10 | 2015-01-04T17:52:41.000Z | 2016-09-13T10:24:29.000Z | source/__init__.py | Mozzo1000/InstPakg | 994e608e1ec9ab465d5533f656f12b792191c8ba | [
"MIT"
] | null | null | null | import subprocess, os, glob, GlobalUtils, InstallUtil, Platform
from JsonUtil import *
from menu import cmenu
yes = set(["yes", "y"])
home = os.getenv("HOME")
JSON_LOCATION = home + "/.instpakg"
DEFAULT_JSON = JSON_LOCATION + "/DEFAULT.json"
jsonInstall = ""
markedInstall = []
markedRepo = []
markedCommand = []
def initJson():
global jsonInstall
load_json(DEFAULT_JSON)
if Platform.getPackage("apt"):
jsonInstall = get_json("apt")
elif Platform.getPackage("yum"):
jsonInstall = get_json("yum")
def bulkInstall():
initJson()
for item in jsonInstall:
InstallUtil.forceAddRepository(item["repo"])
InstallUtil.update()
for item in root:
if item["command"]:
InstallUtil.call(item["command"])
InstallUtil.forceInstall(item["app"])
close_json()
def mark(program, repo, command):
markedInstall.append(program)
if repo:
choice = raw_input("Do you want to add ppa " + repo + " (Required to install " + program +") (y/n)").lower()
if choice in yes:
markedRepo.append(repo)
else:
print("Cancelled install of " + program)
markedInstall.remove(program)
elif command:
choice = raw_input("The following command is required in order to install " + program + "are you sure? (y/n)\n\033[1m" + command + "\033[0m").lower()
if choice in yes:
markedCommand.append(command)
else:
print("Cancelled install of " + program)
markedInstall.remove(program)
def promptInstall():
GlobalUtils.clear()
initJson()
for item in jsonInstall:
print(item["app"] + "\n-----------------\nINSERT DESCRIPTION!\n")
choice = raw_input("Do you want to mark\033[1m " + item["app"] + "\033[0m for install? (y/n)").lower()
if choice in yes:
mark(item["app"], item["repo"], item["command"])
if markedCommand:
choice = raw_input("The following code will now run, are you sure (y/n) \n" + str(markedCommand)).lower()
if choice in yes:
for item in markedCommand:
InstallUtil.call(item)
if markedRepo:
choice = raw_input("The following repositories will be added, are you sure? (y/n)\n\033[1m" + str(markedRepo) + "\033[0m").lower()
if choice in yes:
for item in markedRepo:
InstallUtil.addRepository(item)
InstallUtil.update()
else:
print("No external repositories are required!")
choice = raw_input("Are you sure you want to install the following programs? -\n " + str(markedInstall))
if choice in yes:
for item in markedInstall:
InstallUtil.install(item)
close_json()
def selectJSON():
global DEFAULT_JSON
num = -1
GlobalUtils.clear()
for file in os.listdir(JSON_LOCATION):
if file.endswith(".json"):
files = glob.glob(JSON_LOCATION+"/*.json")
num += 1
print("["+str(num) + "] " + file)
choice = raw_input("Choose one [0-"+str(num)+"] ")
print(files[int(choice)])
DEFAULT_JSON = files[int(choice)]
def main():
try:
list = [{ "Install software": promptInstall }, {"Bulk Software Install": bulkInstall}, {"Select JSON file": selectJSON}, {"Exit": GlobalUtils.exit}]
menu = cmenu(list, "InstPakg Menu")
menu.display()
except SystemExit:
pass
else:
menu.cleanup()
| 29.190476 | 151 | 0.685808 | import subprocess, os, glob, GlobalUtils, InstallUtil, Platform
from JsonUtil import *
from menu import cmenu
yes = set(["yes", "y"])
home = os.getenv("HOME")
JSON_LOCATION = home + "/.instpakg"
DEFAULT_JSON = JSON_LOCATION + "/DEFAULT.json"
jsonInstall = ""
markedInstall = []
markedRepo = []
markedCommand = []
def initJson():
global jsonInstall
load_json(DEFAULT_JSON)
if Platform.getPackage("apt"):
jsonInstall = get_json("apt")
elif Platform.getPackage("yum"):
jsonInstall = get_json("yum")
def bulkInstall():
initJson()
for item in jsonInstall:
InstallUtil.forceAddRepository(item["repo"])
InstallUtil.update()
for item in root:
if item["command"]:
InstallUtil.call(item["command"])
InstallUtil.forceInstall(item["app"])
close_json()
def mark(program, repo, command):
markedInstall.append(program)
if repo:
choice = raw_input("Do you want to add ppa " + repo + " (Required to install " + program +") (y/n)").lower()
if choice in yes:
markedRepo.append(repo)
else:
print("Cancelled install of " + program)
markedInstall.remove(program)
elif command:
choice = raw_input("The following command is required in order to install " + program + "are you sure? (y/n)\n\033[1m" + command + "\033[0m").lower()
if choice in yes:
markedCommand.append(command)
else:
print("Cancelled install of " + program)
markedInstall.remove(program)
def promptInstall():
GlobalUtils.clear()
initJson()
for item in jsonInstall:
print(item["app"] + "\n-----------------\nINSERT DESCRIPTION!\n")
choice = raw_input("Do you want to mark\033[1m " + item["app"] + "\033[0m for install? (y/n)").lower()
if choice in yes:
mark(item["app"], item["repo"], item["command"])
if markedCommand:
choice = raw_input("The following code will now run, are you sure (y/n) \n" + str(markedCommand)).lower()
if choice in yes:
for item in markedCommand:
InstallUtil.call(item)
if markedRepo:
choice = raw_input("The following repositories will be added, are you sure? (y/n)\n\033[1m" + str(markedRepo) + "\033[0m").lower()
if choice in yes:
for item in markedRepo:
InstallUtil.addRepository(item)
InstallUtil.update()
else:
print("No external repositories are required!")
choice = raw_input("Are you sure you want to install the following programs? -\n " + str(markedInstall))
if choice in yes:
for item in markedInstall:
InstallUtil.install(item)
close_json()
def selectJSON():
global DEFAULT_JSON
num = -1
GlobalUtils.clear()
for file in os.listdir(JSON_LOCATION):
if file.endswith(".json"):
files = glob.glob(JSON_LOCATION+"/*.json")
num += 1
print("["+str(num) + "] " + file)
choice = raw_input("Choose one [0-"+str(num)+"] ")
print(files[int(choice)])
DEFAULT_JSON = files[int(choice)]
def main():
try:
list = [{ "Install software": promptInstall }, {"Bulk Software Install": bulkInstall}, {"Select JSON file": selectJSON}, {"Exit": GlobalUtils.exit}]
menu = cmenu(list, "InstPakg Menu")
menu.display()
except SystemExit:
pass
else:
menu.cleanup()
| true | true |
f7357a1b2dc90ab618025ec2ebb1d5489605bacc | 6,803 | py | Python | homeassistant/components/rachio/device.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 7 | 2019-02-07T14:14:12.000Z | 2019-07-28T06:56:10.000Z | homeassistant/components/rachio/device.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 47 | 2020-07-23T07:14:33.000Z | 2022-03-31T06:01:46.000Z | homeassistant/components/rachio/device.py | klauern/home-assistant-core | c18ba6aec0627e6afb6442c678edb5ff2bb17db6 | [
"Apache-2.0"
] | 5 | 2020-03-29T00:29:13.000Z | 2021-09-06T20:58:40.000Z | """Adapter to wrap the rachiopy api for home assistant."""
import logging
from typing import Optional
from homeassistant.const import EVENT_HOMEASSISTANT_STOP, HTTP_OK
from .const import (
KEY_DEVICES,
KEY_ENABLED,
KEY_EXTERNAL_ID,
KEY_FLEX_SCHEDULES,
KEY_ID,
KEY_MAC_ADDRESS,
KEY_MODEL,
KEY_NAME,
KEY_SCHEDULES,
KEY_SERIAL_NUMBER,
KEY_STATUS,
KEY_USERNAME,
KEY_ZONES,
)
from .webhooks import LISTEN_EVENT_TYPES, WEBHOOK_CONST_ID
_LOGGER = logging.getLogger(__name__)
class RachioPerson:
"""Represent a Rachio user."""
def __init__(self, rachio, config_entry):
"""Create an object from the provided API instance."""
# Use API token to get user ID
self.rachio = rachio
self.config_entry = config_entry
self.username = None
self._id = None
self._controllers = []
def setup(self, hass):
"""Rachio device setup."""
response = self.rachio.person.getInfo()
assert int(response[0][KEY_STATUS]) == HTTP_OK, "API key error"
self._id = response[1][KEY_ID]
# Use user ID to get user data
data = self.rachio.person.get(self._id)
assert int(data[0][KEY_STATUS]) == HTTP_OK, "User ID error"
self.username = data[1][KEY_USERNAME]
devices = data[1][KEY_DEVICES]
for controller in devices:
webhooks = self.rachio.notification.getDeviceWebhook(controller[KEY_ID])[1]
# The API does not provide a way to tell if a controller is shared
# or if they are the owner. To work around this problem we fetch the webooks
# before we setup the device so we can skip it instead of failing.
# webhooks are normally a list, however if there is an error
# rachio hands us back a dict
if isinstance(webhooks, dict):
_LOGGER.error(
"Failed to add rachio controller '%s' because of an error: %s",
controller[KEY_NAME],
webhooks.get("error", "Unknown Error"),
)
continue
rachio_iro = RachioIro(hass, self.rachio, controller, webhooks)
rachio_iro.setup()
self._controllers.append(rachio_iro)
_LOGGER.info('Using Rachio API as user "%s"', self.username)
@property
def user_id(self) -> str:
"""Get the user ID as defined by the Rachio API."""
return self._id
@property
def controllers(self) -> list:
"""Get a list of controllers managed by this account."""
return self._controllers
class RachioIro:
"""Represent a Rachio Iro."""
def __init__(self, hass, rachio, data, webhooks):
"""Initialize a Rachio device."""
self.hass = hass
self.rachio = rachio
self._id = data[KEY_ID]
self.name = data[KEY_NAME]
self.serial_number = data[KEY_SERIAL_NUMBER]
self.mac_address = data[KEY_MAC_ADDRESS]
self.model = data[KEY_MODEL]
self._zones = data[KEY_ZONES]
self._schedules = data[KEY_SCHEDULES]
self._flex_schedules = data[KEY_FLEX_SCHEDULES]
self._init_data = data
self._webhooks = webhooks
_LOGGER.debug('%s has ID "%s"', str(self), self.controller_id)
def setup(self):
"""Rachio Iro setup for webhooks."""
# Listen for all updates
self._init_webhooks()
def _init_webhooks(self) -> None:
"""Start getting updates from the Rachio API."""
current_webhook_id = None
# First delete any old webhooks that may have stuck around
def _deinit_webhooks(_) -> None:
"""Stop getting updates from the Rachio API."""
if not self._webhooks:
# We fetched webhooks when we created the device, however if we call _init_webhooks
# again we need to fetch again
self._webhooks = self.rachio.notification.getDeviceWebhook(
self.controller_id
)[1]
for webhook in self._webhooks:
if (
webhook[KEY_EXTERNAL_ID].startswith(WEBHOOK_CONST_ID)
or webhook[KEY_ID] == current_webhook_id
):
self.rachio.notification.deleteWebhook(webhook[KEY_ID])
self._webhooks = None
_deinit_webhooks(None)
# Choose which events to listen for and get their IDs
event_types = []
for event_type in self.rachio.notification.getWebhookEventType()[1]:
if event_type[KEY_NAME] in LISTEN_EVENT_TYPES:
event_types.append({"id": event_type[KEY_ID]})
# Register to listen to these events from the device
url = self.rachio.webhook_url
auth = WEBHOOK_CONST_ID + self.rachio.webhook_auth
new_webhook = self.rachio.notification.postWebhook(
self.controller_id, auth, url, event_types
)
# Save ID for deletion at shutdown
current_webhook_id = new_webhook[1][KEY_ID]
self.hass.bus.listen(EVENT_HOMEASSISTANT_STOP, _deinit_webhooks)
def __str__(self) -> str:
"""Display the controller as a string."""
return f'Rachio controller "{self.name}"'
@property
def controller_id(self) -> str:
"""Return the Rachio API controller ID."""
return self._id
@property
def current_schedule(self) -> str:
"""Return the schedule that the device is running right now."""
return self.rachio.device.getCurrentSchedule(self.controller_id)[1]
@property
def init_data(self) -> dict:
"""Return the information used to set up the controller."""
return self._init_data
def list_zones(self, include_disabled=False) -> list:
"""Return a list of the zone dicts connected to the device."""
# All zones
if include_disabled:
return self._zones
# Only enabled zones
return [z for z in self._zones if z[KEY_ENABLED]]
def get_zone(self, zone_id) -> Optional[dict]:
"""Return the zone with the given ID."""
for zone in self.list_zones(include_disabled=True):
if zone[KEY_ID] == zone_id:
return zone
return None
def list_schedules(self) -> list:
"""Return a list of fixed schedules."""
return self._schedules
def list_flex_schedules(self) -> list:
"""Return a list of flex schedules."""
return self._flex_schedules
def stop_watering(self) -> None:
"""Stop watering all zones connected to this controller."""
self.rachio.device.stopWater(self.controller_id)
_LOGGER.info("Stopped watering of all zones on %s", str(self))
| 35.248705 | 99 | 0.619727 |
import logging
from typing import Optional
from homeassistant.const import EVENT_HOMEASSISTANT_STOP, HTTP_OK
from .const import (
KEY_DEVICES,
KEY_ENABLED,
KEY_EXTERNAL_ID,
KEY_FLEX_SCHEDULES,
KEY_ID,
KEY_MAC_ADDRESS,
KEY_MODEL,
KEY_NAME,
KEY_SCHEDULES,
KEY_SERIAL_NUMBER,
KEY_STATUS,
KEY_USERNAME,
KEY_ZONES,
)
from .webhooks import LISTEN_EVENT_TYPES, WEBHOOK_CONST_ID
_LOGGER = logging.getLogger(__name__)
class RachioPerson:
def __init__(self, rachio, config_entry):
self.rachio = rachio
self.config_entry = config_entry
self.username = None
self._id = None
self._controllers = []
def setup(self, hass):
response = self.rachio.person.getInfo()
assert int(response[0][KEY_STATUS]) == HTTP_OK, "API key error"
self._id = response[1][KEY_ID]
data = self.rachio.person.get(self._id)
assert int(data[0][KEY_STATUS]) == HTTP_OK, "User ID error"
self.username = data[1][KEY_USERNAME]
devices = data[1][KEY_DEVICES]
for controller in devices:
webhooks = self.rachio.notification.getDeviceWebhook(controller[KEY_ID])[1]
if isinstance(webhooks, dict):
_LOGGER.error(
"Failed to add rachio controller '%s' because of an error: %s",
controller[KEY_NAME],
webhooks.get("error", "Unknown Error"),
)
continue
rachio_iro = RachioIro(hass, self.rachio, controller, webhooks)
rachio_iro.setup()
self._controllers.append(rachio_iro)
_LOGGER.info('Using Rachio API as user "%s"', self.username)
@property
def user_id(self) -> str:
return self._id
@property
def controllers(self) -> list:
return self._controllers
class RachioIro:
def __init__(self, hass, rachio, data, webhooks):
self.hass = hass
self.rachio = rachio
self._id = data[KEY_ID]
self.name = data[KEY_NAME]
self.serial_number = data[KEY_SERIAL_NUMBER]
self.mac_address = data[KEY_MAC_ADDRESS]
self.model = data[KEY_MODEL]
self._zones = data[KEY_ZONES]
self._schedules = data[KEY_SCHEDULES]
self._flex_schedules = data[KEY_FLEX_SCHEDULES]
self._init_data = data
self._webhooks = webhooks
_LOGGER.debug('%s has ID "%s"', str(self), self.controller_id)
def setup(self):
self._init_webhooks()
def _init_webhooks(self) -> None:
current_webhook_id = None
def _deinit_webhooks(_) -> None:
if not self._webhooks:
self._webhooks = self.rachio.notification.getDeviceWebhook(
self.controller_id
)[1]
for webhook in self._webhooks:
if (
webhook[KEY_EXTERNAL_ID].startswith(WEBHOOK_CONST_ID)
or webhook[KEY_ID] == current_webhook_id
):
self.rachio.notification.deleteWebhook(webhook[KEY_ID])
self._webhooks = None
_deinit_webhooks(None)
event_types = []
for event_type in self.rachio.notification.getWebhookEventType()[1]:
if event_type[KEY_NAME] in LISTEN_EVENT_TYPES:
event_types.append({"id": event_type[KEY_ID]})
url = self.rachio.webhook_url
auth = WEBHOOK_CONST_ID + self.rachio.webhook_auth
new_webhook = self.rachio.notification.postWebhook(
self.controller_id, auth, url, event_types
)
current_webhook_id = new_webhook[1][KEY_ID]
self.hass.bus.listen(EVENT_HOMEASSISTANT_STOP, _deinit_webhooks)
def __str__(self) -> str:
return f'Rachio controller "{self.name}"'
@property
def controller_id(self) -> str:
return self._id
@property
def current_schedule(self) -> str:
return self.rachio.device.getCurrentSchedule(self.controller_id)[1]
@property
def init_data(self) -> dict:
return self._init_data
def list_zones(self, include_disabled=False) -> list:
if include_disabled:
return self._zones
return [z for z in self._zones if z[KEY_ENABLED]]
def get_zone(self, zone_id) -> Optional[dict]:
for zone in self.list_zones(include_disabled=True):
if zone[KEY_ID] == zone_id:
return zone
return None
def list_schedules(self) -> list:
return self._schedules
def list_flex_schedules(self) -> list:
return self._flex_schedules
def stop_watering(self) -> None:
self.rachio.device.stopWater(self.controller_id)
_LOGGER.info("Stopped watering of all zones on %s", str(self))
| true | true |
f7357be79ed5cf787004c67c6e35b3966042133a | 659 | py | Python | ouch_server.py | jahinzee/theouchteam | 870767cae81ad37b4191ded64c3e83eb48be982a | [
"MIT"
] | 3 | 2022-01-09T02:40:31.000Z | 2022-02-01T03:57:40.000Z | ouch_server.py | jahinzee/theouchteam | 870767cae81ad37b4191ded64c3e83eb48be982a | [
"MIT"
] | null | null | null | ouch_server.py | jahinzee/theouchteam | 870767cae81ad37b4191ded64c3e83eb48be982a | [
"MIT"
] | 1 | 2022-01-21T08:05:27.000Z | 2022-01-21T08:05:27.000Z | import sys
from src.Exchange import Exchange
if __name__ == "__main__":
exchange = None
if len(sys.argv) == 2:
if sys.argv[1] == "debug":
# Exchange outputs using debug mode.
exchange = Exchange(debug="dump")
elif sys.argv[1] == "none":
# Exchange won't output anything.
exchange = Exchange(debug="none")
else:
raise Exception("Command line argument should be either 'dump' or 'none'")
else:
exchange = Exchange()
exchange.open_exchange()
input() # Pressing the enter key will cause the server process to terminate.
exchange.close_exchange() | 32.95 | 86 | 0.608498 | import sys
from src.Exchange import Exchange
if __name__ == "__main__":
exchange = None
if len(sys.argv) == 2:
if sys.argv[1] == "debug":
exchange = Exchange(debug="dump")
elif sys.argv[1] == "none":
exchange = Exchange(debug="none")
else:
raise Exception("Command line argument should be either 'dump' or 'none'")
else:
exchange = Exchange()
exchange.open_exchange()
input() # Pressing the enter key will cause the server process to terminate.
exchange.close_exchange() | true | true |
f7357d01e959dfee06c677343477559f2ba68a03 | 77 | py | Python | form.py | kdvuong/cgi-lab | b1e3a68d60607fd93124dab4e9f1b396525fa361 | [
"Apache-2.0"
] | null | null | null | form.py | kdvuong/cgi-lab | b1e3a68d60607fd93124dab4e9f1b396525fa361 | [
"Apache-2.0"
] | null | null | null | form.py | kdvuong/cgi-lab | b1e3a68d60607fd93124dab4e9f1b396525fa361 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
from templates import login_page
print(login_page())
| 15.4 | 32 | 0.779221 |
from templates import login_page
print(login_page())
| true | true |
f7357d5f9b4f7560bb8c442eea0df6889de78578 | 1,870 | py | Python | re/noasm/noasm.py | Enigmatrix/hats-ctf-2019 | 0dc1b9a5a4583c81b5f1b7bce0cbb9bd0fd2b192 | [
"MIT"
] | 5 | 2019-10-04T07:20:37.000Z | 2021-06-15T21:34:07.000Z | re/noasm/noasm.py | Enigmatrix/hats-ctf-2019 | 0dc1b9a5a4583c81b5f1b7bce0cbb9bd0fd2b192 | [
"MIT"
] | null | null | null | re/noasm/noasm.py | Enigmatrix/hats-ctf-2019 | 0dc1b9a5a4583c81b5f1b7bce0cbb9bd0fd2b192 | [
"MIT"
] | null | null | null | (lambda __print, __g, __y: [[[[[(lambda __after: (sys.stdout.write('Tell me the flag and I will let you know if you are right: '), [(lambda __after: (__print('WRONG'), (exit(0), __after())[1])[1] if (len(pw) != 19) else __after())(lambda: [(lambda __after: (__print('WRONG1'), (exit(0), __after())[1])[1] if (int(('0x' + p), 0) != 310333690747) else __after())(lambda: [(lambda __after: (__print('WRONG2'), (exit(0), __after())[1])[1] if (b != ''.join(map(chr, [89, 88, 78, 116, 88, 119, 61, 61]))) else __after())(lambda: [(lambda __after: (__print('WRONG3'), (exit(0), __after())[1])[1] if (h != '109dd7decb2e3a3658db75dcad688658') else __after())(lambda: [(lambda __items, __after, __sentinel: __y(lambda __this: lambda: (lambda __i: [[(random.seed(c), (lambda __after: (__print('WRONG4'), (exit(0), __after())[1])[1] if (r != random.randint(0, 100)) else __after())(lambda: __this()))[1] for __g['c'] in [(pw[i])]][0] for (__g['i'], __g['r']) in [(__i)]][0] if __i is not __sentinel else __after())(next(__items, __sentinel)))())(iter(zip(range(13, 19), rs)), lambda: (__print("That's the flag, go submit it."), __after())[1], []) for __g['rs'] in [([87, 16, 33, 1, 56, 73])]][0]) for __g['h'] in [(hashlib.md5(pw[9:13]).hexdigest())]][0]) for __g['b'] in [(base64.b64encode(pw[5:9]))]][0]) for __g['p'] in [(binascii.hexlify(pw[0:5]))]][0]) for __g['pw'] in [(raw_input())]][0])[1] if (__name__ == '__main__') else __after())(lambda: None) for __g['random'] in [(__import__('random', __g, __g))]][0] for __g['hashlib'] in [(__import__('hashlib', __g, __g))]][0] for __g['sys'] in [(__import__('sys', __g, __g))]][0] for __g['base64'] in [(__import__('base64', __g, __g))]][0] for __g['binascii'] in [(__import__('binascii', __g, __g))]][0])(__import__('__builtin__', level=0).__dict__['print'], globals(), (lambda f: (lambda x: x(x))(lambda y: f(lambda: y(y)()))))
| 935 | 1,869 | 0.614439 | (lambda __print, __g, __y: [[[[[(lambda __after: (sys.stdout.write('Tell me the flag and I will let you know if you are right: '), [(lambda __after: (__print('WRONG'), (exit(0), __after())[1])[1] if (len(pw) != 19) else __after())(lambda: [(lambda __after: (__print('WRONG1'), (exit(0), __after())[1])[1] if (int(('0x' + p), 0) != 310333690747) else __after())(lambda: [(lambda __after: (__print('WRONG2'), (exit(0), __after())[1])[1] if (b != ''.join(map(chr, [89, 88, 78, 116, 88, 119, 61, 61]))) else __after())(lambda: [(lambda __after: (__print('WRONG3'), (exit(0), __after())[1])[1] if (h != '109dd7decb2e3a3658db75dcad688658') else __after())(lambda: [(lambda __items, __after, __sentinel: __y(lambda __this: lambda: (lambda __i: [[(random.seed(c), (lambda __after: (__print('WRONG4'), (exit(0), __after())[1])[1] if (r != random.randint(0, 100)) else __after())(lambda: __this()))[1] for __g['c'] in [(pw[i])]][0] for (__g['i'], __g['r']) in [(__i)]][0] if __i is not __sentinel else __after())(next(__items, __sentinel)))())(iter(zip(range(13, 19), rs)), lambda: (__print("That's the flag, go submit it."), __after())[1], []) for __g['rs'] in [([87, 16, 33, 1, 56, 73])]][0]) for __g['h'] in [(hashlib.md5(pw[9:13]).hexdigest())]][0]) for __g['b'] in [(base64.b64encode(pw[5:9]))]][0]) for __g['p'] in [(binascii.hexlify(pw[0:5]))]][0]) for __g['pw'] in [(raw_input())]][0])[1] if (__name__ == '__main__') else __after())(lambda: None) for __g['random'] in [(__import__('random', __g, __g))]][0] for __g['hashlib'] in [(__import__('hashlib', __g, __g))]][0] for __g['sys'] in [(__import__('sys', __g, __g))]][0] for __g['base64'] in [(__import__('base64', __g, __g))]][0] for __g['binascii'] in [(__import__('binascii', __g, __g))]][0])(__import__('__builtin__', level=0).__dict__['print'], globals(), (lambda f: (lambda x: x(x))(lambda y: f(lambda: y(y)()))))
| true | true |
f7357e78f2b00cc46ec38e6ad37d15b3b25e3b9c | 11,240 | py | Python | src/fora/logger.py | oddlama/forge | d09b0f309ce7dcda79dc03765473b48732c71845 | [
"MIT"
] | 14 | 2021-12-17T10:38:27.000Z | 2022-03-02T01:20:01.000Z | src/fora/logger.py | oddlama/forge | d09b0f309ce7dcda79dc03765473b48732c71845 | [
"MIT"
] | 2 | 2022-01-11T13:31:09.000Z | 2022-02-03T15:41:43.000Z | src/fora/logger.py | oddlama/forge | d09b0f309ce7dcda79dc03765473b48732c71845 | [
"MIT"
] | 2 | 2022-02-03T15:20:51.000Z | 2022-02-03T15:45:11.000Z | """
Provides logging utilities.
"""
import argparse
import difflib
import os
from dataclasses import dataclass
import sys
from types import TracebackType
from typing import Any, Optional, Type, cast
import fora
@dataclass
class State:
"""Global state for logging."""
indentation_level: int = 0
"""The current global indentation level."""
state: State = State()
"""The global logger state."""
def use_color() -> bool:
"""Returns true if color should be used."""
if not isinstance(cast(Any, fora.args), argparse.Namespace):
return os.getenv("NO_COLOR") is None
return not fora.args.no_color
def col(color_code: str) -> str:
"""Returns the given argument only if color is enabled."""
return color_code if use_color() else ""
class IndentationContext:
"""A context manager to modify the indentation level."""
def __enter__(self) -> None:
state.indentation_level += 1
def __exit__(self, exc_type: Optional[Type[BaseException]], exc: Optional[BaseException], traceback: Optional[TracebackType]) -> None:
_ = (exc_type, exc, traceback)
state.indentation_level -= 1
def ellipsis(s: str, width: int) -> str:
"""
Shrinks the given string to width (including an ellipsis character).
Parameters
----------
s
The string.
width
The maximum width.
Returns
-------
str
A modified string with at most `width` characters.
"""
if len(s) > width:
s = s[:width - 1] + "…"
return s
def indent() -> IndentationContext:
"""Retruns a context manager that increases the indentation level."""
return IndentationContext()
def indent_prefix() -> str:
"""Returns the indentation prefix for the current indentation level."""
if not use_color():
return " " * state.indentation_level
ret = ""
for i in range(state.indentation_level):
if i % 2 == 0:
ret += "[90m│[m "
else:
ret += "[90m╵[m "
return ret
def debug(msg: str) -> None:
"""Prints the given message only in debug mode."""
if not fora.args.debug:
return
print(f" [1;34mDEBUG[m: {msg}", file=sys.stderr)
def debug_args(msg: str, args: dict[str, Any]) -> None:
"""Prints all given arguments when in debug mode."""
if not fora.args.debug:
return
str_args = ""
args = {k: v for k,v in args.items() if k != "self"}
if len(args) > 0:
str_args = " " + ", ".join(f"{k}={v}" for k,v in args.items())
print(f" [1;34mDEBUG[m: {msg}{str_args}", file=sys.stderr)
def print_indented(msg: str, **kwargs: Any) -> None:
"""Same as print(), but prefixes the message with the indentation prefix."""
print(f"{indent_prefix()}{msg}", **kwargs)
def connection_init(connector: Any) -> None:
"""Prints connection initialization information."""
print_indented(f"{col('[1;34m')}host{col('[m')} {connector.host.name} via {col('[1;33m')}{connector.host.url}{col('[m')}", flush=True)
def connection_failed(error_msg: str) -> None:
"""Signals that an error has occurred while establishing the connection."""
print(col("[1;31m") + "ERR" + col("[m"))
print_indented(f" {col('[90m')}└{col('[m')} " + f"{col('[31m')}{error_msg}{col('[m')}")
def connection_established() -> None:
"""Signals that the connection has been successfully established."""
#print(col("[1;32m") + "OK" + col("[m"))
def run_script(script: str, name: Optional[str] = None) -> None:
"""Prints the script file and name that is being executed next."""
if name is not None:
print_indented(f"{col('[33;1m')}script{col('[m')} {script} {col('[90m')}({name}){col('[m')}")
else:
print_indented(f"{col('[33;1m')}script{col('[m')} {script}")
def print_operation_title(op: Any, title_color: str, end: str = "\n") -> None:
"""Prints the operation title and description."""
name_if_given = (" " + col('[90m') + f"({op.name})" + col('[m')) if op.name is not None else ""
dry_run_info = f" {col('[90m')}(dry){col('[m')}" if fora.args.dry else ""
print_indented(f"{title_color}{op.op_name}{col('[m')}{dry_run_info} {op.description}{name_if_given}", end=end, flush=True)
def print_operation_early(op: Any) -> None:
"""Prints the operation title and description before the final status is known."""
title_color = col("[1;33m")
# Only overwrite status later if debugging is not enabled.
print_operation_title(op, title_color, end=" (early status)\n" if fora.args.debug else "")
def decode_escape(data: bytes, encoding: str = 'utf-8') -> str:
"""
Tries to decode the given data with the given encoding, but replaces all non-decodeable
and non-printable characters with backslash escape sequences.
Example:
```python
>>> decode_escape(b'It is Wednesday\\nmy dudes\\r\\n🐸\\xff\\0')
'It is Wednesday\\\\nMy Dudes\\\\r\\\\n🐸\\\\xff\\\\0'
```
Parameters
----------
content
The content that should be decoded and escaped.
encoding
The encoding that should be tried. To preserve utf-8 symbols, use 'utf-8',
to replace any non-ascii character with an escape sequence use 'ascii'.
Returns
-------
str
The decoded and escaped string.
"""
def escape_char(c: str) -> str:
special = {'\x00': '\\0', '\n': '\\n', '\r': '\\r', '\t': '\\t'}
if c in special:
return special[c]
num = ord(c)
if not c.isprintable() and num <= 0xff:
return f"\\x{num:02x}"
return c
return ''.join([escape_char(c) for c in data.decode(encoding, 'backslashreplace')])
def diff(filename: str, old: Optional[bytes], new: Optional[bytes], color: bool = True) -> list[str]:
"""
Creates a diff between the old and new content of the given filename,
that can be printed to the console. This function returns the diff
output as an array of lines. The lines in the output array are not
terminated by newlines.
If color is True, the diff is colored using ANSI escape sequences.
If you want to provide an alternative diffing function, beware that
the input can theoretically contain any bytes and therefore should
be decoded as utf-8 if possible, but non-decodeable
or non-printable charaters should be replaced with human readable
variants such as `\\x00`, `^@` or similar represenations.
Your diffing function should still be able to work on the raw bytes
representation, after you aquire the diff and before you apply colors,
your output should be made printable with a function such as `fora.logger.decode_escape`:
```python
# First decode and escape
line = logger.decode_escape(byteline)
# Add coloring afterwards so ANSI escape sequences are not escaped
```
Parameters
----------
filename
The filename of the file that is being diffed.
old
The old content, or None if the file didn't exist before.
new
The new content, or None if the file was deleted.
color
Whether the output should be colored (with ANSI color sequences).
Returns
-------
list[str]
The lines of the diff output. The individual lines will not have a terminating newline.
"""
bdiff = list(difflib.diff_bytes(difflib.unified_diff,
a=[] if old is None else old.split(b'\n'),
b=[] if new is None else new.split(b'\n'),
lineterm=b''))
# Strip file name header and decode diff to be human readable.
difflines = map(decode_escape, bdiff[2:])
# Create custom file name header
action = 'created' if old is None else 'deleted' if new is None else 'modified'
title = f"{action}: {filename}"
N = len(title)
header = ['─' * N, title, '─' * N]
# Apply coloring if desired
if color:
def apply_color(line: str) -> str:
linecolor = {
'+': '[32m',
'-': '[31m',
'@': '[34m',
}
return linecolor.get(line[0], '[90m') + line + '[m'
# Apply color to diff
difflines = map(apply_color, difflines)
# Apply color to header
header = list(map(lambda line: f"[33m{line}[m", header))
return header + list(difflines)
# TODO: move functions to operation api. cleaner and has type access.
def _operation_state_infos(result: Any) -> list[str]:
def to_str(v: Any) -> str:
return v.hex() if isinstance(v, bytes) else str(v)
# Print "key: value" pairs with changes
state_infos: list[str] = []
for k,final_v in result.final.items():
if final_v is None:
continue
initial_v = result.initial[k]
str_initial_v = to_str(initial_v)
str_final_v = to_str(final_v)
# Add ellipsis on long strings, if we are not in verbose mode
if fora.args.verbose == 0:
k = ellipsis(k, 12)
str_initial_v = ellipsis(to_str(initial_v), 9)
str_final_v = ellipsis(to_str(final_v), 9+3+9 if initial_v is None else 9)
if initial_v == final_v:
if fora.args.verbose >= 1:
# TODO = instead of : for better readability
entry_str = f"{col('[90m')}{k}: {str_initial_v}{col('[m')}"
state_infos.append(entry_str)
else:
if initial_v is None:
entry_str = f"{col('[33m')}{k}: {col('[32m')}{str_final_v}{col('[m')}"
else:
entry_str = f"{col('[33m')}{k}: {col('[31m')}{str_initial_v}{col('[33m')} → {col('[32m')}{str_final_v}{col('[m')}"
state_infos.append(entry_str)
return state_infos
def print_operation(op: Any, result: Any) -> None:
"""Prints the operation summary after it has finished execution."""
if result.success:
title_color = col("[1;32m") if result.changed else col("[1;90m")
else:
title_color = col("[1;31m")
# Print title and name, overwriting the transitive status
print("\r", end="")
print_operation_title(op, title_color)
if not result.success:
print_indented(f" {col('[90m')}└{col('[m')} " + f"{col('[31m')}{result.failure_message}{col('[m')}")
return
if not fora.args.changes:
return
# Cache number of upcoming diffs to determine what box character to print
n_diffs = len(op.diffs) if fora.args.diff else 0
box_char = '└' if n_diffs == 0 else '├'
# Print "key: value" pairs with changes
state_infos = _operation_state_infos(result)
if len(state_infos) > 0:
print_indented(f"{col('[90m')}{box_char}{col('[m')} " + f"{col('[90m')},{col('[m')} ".join(state_infos))
if fora.args.diff:
diff_lines = []
# Generate diffs
for file, old, new in op.diffs:
diff_lines.extend(diff(file, old, new))
# Print diffs with block character line
if len(diff_lines) > 0:
for l in diff_lines[:-1]:
print_indented(f"{col('[90m')}│ {col('[m')}" + l)
print_indented(f"{col('[90m')}└ {col('[m')}" + diff_lines[-1])
| 35.68254 | 142 | 0.607117 |
import argparse
import difflib
import os
from dataclasses import dataclass
import sys
from types import TracebackType
from typing import Any, Optional, Type, cast
import fora
@dataclass
class State:
indentation_level: int = 0
state: State = State()
def use_color() -> bool:
if not isinstance(cast(Any, fora.args), argparse.Namespace):
return os.getenv("NO_COLOR") is None
return not fora.args.no_color
def col(color_code: str) -> str:
return color_code if use_color() else ""
class IndentationContext:
def __enter__(self) -> None:
state.indentation_level += 1
def __exit__(self, exc_type: Optional[Type[BaseException]], exc: Optional[BaseException], traceback: Optional[TracebackType]) -> None:
_ = (exc_type, exc, traceback)
state.indentation_level -= 1
def ellipsis(s: str, width: int) -> str:
if len(s) > width:
s = s[:width - 1] + "…"
return s
def indent() -> IndentationContext:
return IndentationContext()
def indent_prefix() -> str:
if not use_color():
return " " * state.indentation_level
ret = ""
for i in range(state.indentation_level):
if i % 2 == 0:
ret += "[90m│[m "
else:
ret += "[90m╵[m "
return ret
def debug(msg: str) -> None:
if not fora.args.debug:
return
print(f" [1;34mDEBUG[m: {msg}", file=sys.stderr)
def debug_args(msg: str, args: dict[str, Any]) -> None:
if not fora.args.debug:
return
str_args = ""
args = {k: v for k,v in args.items() if k != "self"}
if len(args) > 0:
str_args = " " + ", ".join(f"{k}={v}" for k,v in args.items())
print(f" [1;34mDEBUG[m: {msg}{str_args}", file=sys.stderr)
def print_indented(msg: str, **kwargs: Any) -> None:
print(f"{indent_prefix()}{msg}", **kwargs)
def connection_init(connector: Any) -> None:
print_indented(f"{col('[1;34m')}host{col('[m')} {connector.host.name} via {col('[1;33m')}{connector.host.url}{col('[m')}", flush=True)
def connection_failed(error_msg: str) -> None:
print(col("[1;31m") + "ERR" + col("[m"))
print_indented(f" {col('[90m')}└{col('[m')} " + f"{col('[31m')}{error_msg}{col('[m')}")
def connection_established() -> None:
def run_script(script: str, name: Optional[str] = None) -> None:
if name is not None:
print_indented(f"{col('[33;1m')}script{col('[m')} {script} {col('[90m')}({name}){col('[m')}")
else:
print_indented(f"{col('[33;1m')}script{col('[m')} {script}")
def print_operation_title(op: Any, title_color: str, end: str = "\n") -> None:
name_if_given = (" " + col('[90m') + f"({op.name})" + col('[m')) if op.name is not None else ""
dry_run_info = f" {col('[90m')}(dry){col('[m')}" if fora.args.dry else ""
print_indented(f"{title_color}{op.op_name}{col('[m')}{dry_run_info} {op.description}{name_if_given}", end=end, flush=True)
def print_operation_early(op: Any) -> None:
title_color = col("[1;33m")
print_operation_title(op, title_color, end=" (early status)\n" if fora.args.debug else "")
def decode_escape(data: bytes, encoding: str = 'utf-8') -> str:
def escape_char(c: str) -> str:
special = {'\x00': '\\0', '\n': '\\n', '\r': '\\r', '\t': '\\t'}
if c in special:
return special[c]
num = ord(c)
if not c.isprintable() and num <= 0xff:
return f"\\x{num:02x}"
return c
return ''.join([escape_char(c) for c in data.decode(encoding, 'backslashreplace')])
def diff(filename: str, old: Optional[bytes], new: Optional[bytes], color: bool = True) -> list[str]:
bdiff = list(difflib.diff_bytes(difflib.unified_diff,
a=[] if old is None else old.split(b'\n'),
b=[] if new is None else new.split(b'\n'),
lineterm=b''))
difflines = map(decode_escape, bdiff[2:])
action = 'created' if old is None else 'deleted' if new is None else 'modified'
title = f"{action}: {filename}"
N = len(title)
header = ['─' * N, title, '─' * N]
if color:
def apply_color(line: str) -> str:
linecolor = {
'+': '[32m',
'-': '[31m',
'@': '[34m',
}
return linecolor.get(line[0], '[90m') + line + '[m'
difflines = map(apply_color, difflines)
header = list(map(lambda line: f"[33m{line}[m", header))
return header + list(difflines)
def _operation_state_infos(result: Any) -> list[str]:
def to_str(v: Any) -> str:
return v.hex() if isinstance(v, bytes) else str(v)
state_infos: list[str] = []
for k,final_v in result.final.items():
if final_v is None:
continue
initial_v = result.initial[k]
str_initial_v = to_str(initial_v)
str_final_v = to_str(final_v)
if fora.args.verbose == 0:
k = ellipsis(k, 12)
str_initial_v = ellipsis(to_str(initial_v), 9)
str_final_v = ellipsis(to_str(final_v), 9+3+9 if initial_v is None else 9)
if initial_v == final_v:
if fora.args.verbose >= 1:
entry_str = f"{col('[90m')}{k}: {str_initial_v}{col('[m')}"
state_infos.append(entry_str)
else:
if initial_v is None:
entry_str = f"{col('[33m')}{k}: {col('[32m')}{str_final_v}{col('[m')}"
else:
entry_str = f"{col('[33m')}{k}: {col('[31m')}{str_initial_v}{col('[33m')} → {col('[32m')}{str_final_v}{col('[m')}"
state_infos.append(entry_str)
return state_infos
def print_operation(op: Any, result: Any) -> None:
if result.success:
title_color = col("[1;32m") if result.changed else col("[1;90m")
else:
title_color = col("[1;31m")
print("\r", end="")
print_operation_title(op, title_color)
if not result.success:
print_indented(f" {col('[90m')}└{col('[m')} " + f"{col('[31m')}{result.failure_message}{col('[m')}")
return
if not fora.args.changes:
return
n_diffs = len(op.diffs) if fora.args.diff else 0
box_char = '└' if n_diffs == 0 else '├'
state_infos = _operation_state_infos(result)
if len(state_infos) > 0:
print_indented(f"{col('[90m')}{box_char}{col('[m')} " + f"{col('[90m')},{col('[m')} ".join(state_infos))
if fora.args.diff:
diff_lines = []
for file, old, new in op.diffs:
diff_lines.extend(diff(file, old, new))
if len(diff_lines) > 0:
for l in diff_lines[:-1]:
print_indented(f"{col('[90m')}│ {col('[m')}" + l)
print_indented(f"{col('[90m')}└ {col('[m')}" + diff_lines[-1])
| true | true |
f7357f096bc6406d31f3e33568491d295f5029c8 | 390 | py | Python | zipline/pipeline/data/__init__.py | colin1alexander/zipline | ba42e6d8b972dcce9271526562ceff0cddd3fa30 | [
"Apache-2.0"
] | null | null | null | zipline/pipeline/data/__init__.py | colin1alexander/zipline | ba42e6d8b972dcce9271526562ceff0cddd3fa30 | [
"Apache-2.0"
] | null | null | null | zipline/pipeline/data/__init__.py | colin1alexander/zipline | ba42e6d8b972dcce9271526562ceff0cddd3fa30 | [
"Apache-2.0"
] | null | null | null | from .buyback_auth import CashBuybackAuthorizations, ShareBuybackAuthorizations
from .earnings import EarningsCalendar
from .equity_pricing import USEquityPricing
from .dataset import DataSet, Column, BoundColumn
__all__ = [
'BoundColumn',
'CashBuybackAuthorizations',
'Column',
'DataSet',
'EarningsCalendar',
'ShareBuybackAuthorizations',
'USEquityPricing',
]
| 26 | 79 | 0.769231 | from .buyback_auth import CashBuybackAuthorizations, ShareBuybackAuthorizations
from .earnings import EarningsCalendar
from .equity_pricing import USEquityPricing
from .dataset import DataSet, Column, BoundColumn
__all__ = [
'BoundColumn',
'CashBuybackAuthorizations',
'Column',
'DataSet',
'EarningsCalendar',
'ShareBuybackAuthorizations',
'USEquityPricing',
]
| true | true |
f7357f33e672630577c0e02a2ed8598fe2a5e84e | 5,045 | py | Python | telepot/aio/api.py | francesconazzaro/telepota | 571b9fec6bedecf487df121644d8c673e6c5fd8f | [
"MIT"
] | 30 | 2020-07-27T07:11:44.000Z | 2022-03-20T19:01:22.000Z | telepot/aio/api.py | francesconazzaro/telepota | 571b9fec6bedecf487df121644d8c673e6c5fd8f | [
"MIT"
] | 5 | 2021-03-23T04:53:01.000Z | 2022-02-19T20:31:57.000Z | telepot/aio/api.py | francesconazzaro/telepota | 571b9fec6bedecf487df121644d8c673e6c5fd8f | [
"MIT"
] | 3 | 2021-04-01T12:42:21.000Z | 2022-03-20T18:33:02.000Z | import asyncio
import aiohttp
import async_timeout
import atexit
import re
import json
from .. import exception
from ..api import _methodurl, _which_pool, _fileurl, _guess_filename
_loop = asyncio.get_event_loop()
_pools = {
'default': aiohttp.ClientSession(
connector=aiohttp.TCPConnector(limit=10),
loop=_loop)
}
_timeout = 30
_proxy = None # (url, (username, password))
def set_proxy(url, basic_auth=None):
global _proxy
if not url:
_proxy = None
else:
_proxy = (url, basic_auth) if basic_auth else (url,)
def _proxy_kwargs():
if _proxy is None or len(_proxy) == 0:
return {}
elif len(_proxy) == 1:
return {'proxy': _proxy[0]}
elif len(_proxy) == 2:
return {'proxy': _proxy[0], 'proxy_auth': aiohttp.BasicAuth(*_proxy[1])}
else:
raise RuntimeError("_proxy has invalid length")
async def _close_pools():
global _pools
for s in _pools.values():
await s.close()
atexit.register(lambda: _loop.create_task(_close_pools())) # have to wrap async function
def _create_onetime_pool():
return aiohttp.ClientSession(
connector=aiohttp.TCPConnector(limit=1, force_close=True),
loop=_loop)
def _default_timeout(req, **user_kw):
return _timeout
def _compose_timeout(req, **user_kw):
token, method, params, files = req
if method == 'getUpdates' and params and 'timeout' in params:
# Ensure HTTP timeout is longer than getUpdates timeout
return params['timeout'] + _default_timeout(req, **user_kw)
elif files:
# Disable timeout if uploading files. For some reason, the larger the file,
# the longer it takes for the server to respond (after upload is finished).
# It is unclear how long timeout should be.
return None
else:
return _default_timeout(req, **user_kw)
def _compose_data(req, **user_kw):
token, method, params, files = req
data = aiohttp.FormData()
if params:
for key, value in params.items():
data.add_field(key, str(value))
if files:
for key, f in files.items():
if isinstance(f, tuple):
if len(f) == 2:
filename, fileobj = f
else:
raise ValueError('Tuple must have exactly 2 elements: filename, fileobj')
else:
filename, fileobj = _guess_filename(f) or key, f
data.add_field(key, fileobj, filename=filename)
return data
def _transform(req, **user_kw):
timeout = _compose_timeout(req, **user_kw)
data = _compose_data(req, **user_kw)
url = _methodurl(req, **user_kw)
name = _which_pool(req, **user_kw)
if name is None:
session = _create_onetime_pool()
cleanup = session.close # one-time session: remember to close
else:
session = _pools[name]
cleanup = None # reuse: do not close
kwargs = {'data': data}
kwargs.update(user_kw)
return session.post, (url,), kwargs, timeout, cleanup
async def _parse(response):
try:
data = await response.json()
if data is None:
raise ValueError()
except (ValueError, json.JSONDecodeError, aiohttp.ClientResponseError):
text = await response.text()
raise exception.BadHTTPResponse(response.status, text, response)
if data['ok']:
return data['result']
else:
description, error_code = data['description'], data['error_code']
# Look for specific error ...
for e in exception.TelegramError.__subclasses__():
n = len(e.DESCRIPTION_PATTERNS)
if any(map(re.search, e.DESCRIPTION_PATTERNS, n * [description], n * [re.IGNORECASE])):
raise e(description, error_code, data)
# ... or raise generic error
raise exception.TelegramError(description, error_code, data)
async def request(req, **user_kw):
fn, args, kwargs, timeout, cleanup = _transform(req, **user_kw)
kwargs.update(_proxy_kwargs())
try:
if timeout is None:
async with fn(*args, **kwargs) as r:
return await _parse(r)
else:
try:
with async_timeout.timeout(timeout):
async with fn(*args, **kwargs) as r:
return await _parse(r)
except asyncio.TimeoutError:
raise exception.TelegramError('Response timeout', 504, {})
except aiohttp.ClientConnectionError:
raise exception.TelegramError('Connection Error', 400, {})
finally:
if cleanup: # e.g. closing one-time session
if asyncio.iscoroutinefunction(cleanup):
await cleanup()
else:
cleanup()
def download(req):
session = _create_onetime_pool()
kwargs = {}
kwargs.update(_proxy_kwargs())
return session, session.get(_fileurl(req), timeout=_timeout, **kwargs)
# Caller should close session after download is complete
| 27.872928 | 99 | 0.622002 | import asyncio
import aiohttp
import async_timeout
import atexit
import re
import json
from .. import exception
from ..api import _methodurl, _which_pool, _fileurl, _guess_filename
_loop = asyncio.get_event_loop()
_pools = {
'default': aiohttp.ClientSession(
connector=aiohttp.TCPConnector(limit=10),
loop=_loop)
}
_timeout = 30
_proxy = None
def set_proxy(url, basic_auth=None):
global _proxy
if not url:
_proxy = None
else:
_proxy = (url, basic_auth) if basic_auth else (url,)
def _proxy_kwargs():
if _proxy is None or len(_proxy) == 0:
return {}
elif len(_proxy) == 1:
return {'proxy': _proxy[0]}
elif len(_proxy) == 2:
return {'proxy': _proxy[0], 'proxy_auth': aiohttp.BasicAuth(*_proxy[1])}
else:
raise RuntimeError("_proxy has invalid length")
async def _close_pools():
global _pools
for s in _pools.values():
await s.close()
atexit.register(lambda: _loop.create_task(_close_pools()))
def _create_onetime_pool():
return aiohttp.ClientSession(
connector=aiohttp.TCPConnector(limit=1, force_close=True),
loop=_loop)
def _default_timeout(req, **user_kw):
return _timeout
def _compose_timeout(req, **user_kw):
token, method, params, files = req
if method == 'getUpdates' and params and 'timeout' in params:
return params['timeout'] + _default_timeout(req, **user_kw)
elif files:
return None
else:
return _default_timeout(req, **user_kw)
def _compose_data(req, **user_kw):
token, method, params, files = req
data = aiohttp.FormData()
if params:
for key, value in params.items():
data.add_field(key, str(value))
if files:
for key, f in files.items():
if isinstance(f, tuple):
if len(f) == 2:
filename, fileobj = f
else:
raise ValueError('Tuple must have exactly 2 elements: filename, fileobj')
else:
filename, fileobj = _guess_filename(f) or key, f
data.add_field(key, fileobj, filename=filename)
return data
def _transform(req, **user_kw):
timeout = _compose_timeout(req, **user_kw)
data = _compose_data(req, **user_kw)
url = _methodurl(req, **user_kw)
name = _which_pool(req, **user_kw)
if name is None:
session = _create_onetime_pool()
cleanup = session.close
else:
session = _pools[name]
cleanup = None
kwargs = {'data': data}
kwargs.update(user_kw)
return session.post, (url,), kwargs, timeout, cleanup
async def _parse(response):
try:
data = await response.json()
if data is None:
raise ValueError()
except (ValueError, json.JSONDecodeError, aiohttp.ClientResponseError):
text = await response.text()
raise exception.BadHTTPResponse(response.status, text, response)
if data['ok']:
return data['result']
else:
description, error_code = data['description'], data['error_code']
for e in exception.TelegramError.__subclasses__():
n = len(e.DESCRIPTION_PATTERNS)
if any(map(re.search, e.DESCRIPTION_PATTERNS, n * [description], n * [re.IGNORECASE])):
raise e(description, error_code, data)
raise exception.TelegramError(description, error_code, data)
async def request(req, **user_kw):
fn, args, kwargs, timeout, cleanup = _transform(req, **user_kw)
kwargs.update(_proxy_kwargs())
try:
if timeout is None:
async with fn(*args, **kwargs) as r:
return await _parse(r)
else:
try:
with async_timeout.timeout(timeout):
async with fn(*args, **kwargs) as r:
return await _parse(r)
except asyncio.TimeoutError:
raise exception.TelegramError('Response timeout', 504, {})
except aiohttp.ClientConnectionError:
raise exception.TelegramError('Connection Error', 400, {})
finally:
if cleanup:
if asyncio.iscoroutinefunction(cleanup):
await cleanup()
else:
cleanup()
def download(req):
session = _create_onetime_pool()
kwargs = {}
kwargs.update(_proxy_kwargs())
return session, session.get(_fileurl(req), timeout=_timeout, **kwargs)
| true | true |
f7357f7d0603789b73ae269c01659bd14c34341d | 279 | py | Python | build/android/pylib/sdk/dexdump.py | TwistedCore/external_v8 | c6725dab9be251fbfc6fd7d53c3513a23e78c36c | [
"BSD-3-Clause"
] | 27 | 2016-04-27T01:02:03.000Z | 2021-12-13T08:53:19.000Z | build/android/pylib/sdk/dexdump.py | TwistedCore/external_v8 | c6725dab9be251fbfc6fd7d53c3513a23e78c36c | [
"BSD-3-Clause"
] | 2 | 2017-03-09T09:00:50.000Z | 2017-09-21T15:48:20.000Z | build/android/pylib/sdk/dexdump.py | TwistedCore/external_v8 | c6725dab9be251fbfc6fd7d53c3513a23e78c36c | [
"BSD-3-Clause"
] | 17 | 2016-04-27T02:06:39.000Z | 2019-12-18T08:07:00.000Z | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=unused-wildcard-import
# pylint: disable=wildcard-import
from devil.android.sdk.dexdump import *
| 31 | 72 | 0.777778 |
from devil.android.sdk.dexdump import *
| true | true |
f7357f866eb662ed69801854fd44d5c0190179f2 | 281 | py | Python | pypro/videos/admin.py | limberger/curso-django | 9b099a9934871c221be2018d2e80331e90bee40f | [
"Apache-2.0"
] | null | null | null | pypro/videos/admin.py | limberger/curso-django | 9b099a9934871c221be2018d2e80331e90bee40f | [
"Apache-2.0"
] | 1,012 | 2020-06-22T21:43:39.000Z | 2022-03-31T22:09:32.000Z | pypro/videos/admin.py | limberger/curso-django | 9b099a9934871c221be2018d2e80331e90bee40f | [
"Apache-2.0"
] | null | null | null | from django.contrib.admin import ModelAdmin, register
from pypro.videos.models import Video
@register(Video)
class VideoAdmin(ModelAdmin):
list_display = ('titulo', 'slug', 'creation', 'vimeo_id')
ordering = ('creation',)
prepopulated_fields = {'slug': ('titulo',)}
| 25.545455 | 61 | 0.708185 | from django.contrib.admin import ModelAdmin, register
from pypro.videos.models import Video
@register(Video)
class VideoAdmin(ModelAdmin):
list_display = ('titulo', 'slug', 'creation', 'vimeo_id')
ordering = ('creation',)
prepopulated_fields = {'slug': ('titulo',)}
| true | true |
f7357ff2a287f8a4fe49e760b1035fd94b80cf88 | 1,663 | py | Python | python2.7/site-packages/twisted/words/test/test_xmlstream.py | 84KaliPleXon3/sslstrip-hsts-openwrt | f875ded48078a3ed84bffef1e69dcbeaf2e77ae3 | [
"MIT"
] | 4 | 2020-10-31T19:52:05.000Z | 2021-09-22T11:39:27.000Z | python2.7/site-packages/twisted/words/test/test_xmlstream.py | 84KaliPleXon3/sslstrip-hsts-openwrt | f875ded48078a3ed84bffef1e69dcbeaf2e77ae3 | [
"MIT"
] | null | null | null | python2.7/site-packages/twisted/words/test/test_xmlstream.py | 84KaliPleXon3/sslstrip-hsts-openwrt | f875ded48078a3ed84bffef1e69dcbeaf2e77ae3 | [
"MIT"
] | 2 | 2020-02-27T08:28:35.000Z | 2020-09-13T12:39:26.000Z | # Copyright (c) 2001-2005 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.words.xish import xmlstream
class XmlStreamTest(unittest.TestCase):
def setUp(self):
self.errorOccurred = False
self.streamStarted = False
self.streamEnded = False
self.outlist = []
self.xmlstream = xmlstream.XmlStream()
self.xmlstream.transport = self
self.xmlstream.transport.write = self.outlist.append
# Auxilary methods
def loseConnection(self):
self.xmlstream.connectionLost("no reason")
def streamStartEvent(self, rootelem):
self.streamStarted = True
def streamErrorEvent(self, errelem):
self.errorOccurred = True
def streamEndEvent(self, _):
self.streamEnded = True
def testBasicOp(self):
xs = self.xmlstream
xs.addObserver(xmlstream.STREAM_START_EVENT,
self.streamStartEvent)
xs.addObserver(xmlstream.STREAM_ERROR_EVENT,
self.streamErrorEvent)
xs.addObserver(xmlstream.STREAM_END_EVENT,
self.streamEndEvent)
# Go...
xs.connectionMade()
xs.send("<root>")
self.assertEquals(self.outlist[0], "<root>")
xs.dataReceived("<root>")
self.assertEquals(self.streamStarted, True)
self.assertEquals(self.errorOccurred, False)
self.assertEquals(self.streamEnded, False)
xs.dataReceived("<child><unclosed></child>")
self.assertEquals(self.errorOccurred, True)
self.assertEquals(self.streamEnded, True)
| 31.980769 | 60 | 0.641612 |
from twisted.trial import unittest
from twisted.words.xish import xmlstream
class XmlStreamTest(unittest.TestCase):
def setUp(self):
self.errorOccurred = False
self.streamStarted = False
self.streamEnded = False
self.outlist = []
self.xmlstream = xmlstream.XmlStream()
self.xmlstream.transport = self
self.xmlstream.transport.write = self.outlist.append
def loseConnection(self):
self.xmlstream.connectionLost("no reason")
def streamStartEvent(self, rootelem):
self.streamStarted = True
def streamErrorEvent(self, errelem):
self.errorOccurred = True
def streamEndEvent(self, _):
self.streamEnded = True
def testBasicOp(self):
xs = self.xmlstream
xs.addObserver(xmlstream.STREAM_START_EVENT,
self.streamStartEvent)
xs.addObserver(xmlstream.STREAM_ERROR_EVENT,
self.streamErrorEvent)
xs.addObserver(xmlstream.STREAM_END_EVENT,
self.streamEndEvent)
xs.connectionMade()
xs.send("<root>")
self.assertEquals(self.outlist[0], "<root>")
xs.dataReceived("<root>")
self.assertEquals(self.streamStarted, True)
self.assertEquals(self.errorOccurred, False)
self.assertEquals(self.streamEnded, False)
xs.dataReceived("<child><unclosed></child>")
self.assertEquals(self.errorOccurred, True)
self.assertEquals(self.streamEnded, True)
| true | true |
f73581a462f72f903e4554626bd267decbf52903 | 1,960 | py | Python | gluonfr/__init__.py | PistonY/gluon-face | 22f1e22de38fdee0873b4e58b6608029947176b8 | [
"MIT"
] | null | null | null | gluonfr/__init__.py | PistonY/gluon-face | 22f1e22de38fdee0873b4e58b6608029947176b8 | [
"MIT"
] | null | null | null | gluonfr/__init__.py | PistonY/gluon-face | 22f1e22de38fdee0873b4e58b6608029947176b8 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2018 Haoxintong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""GluonFR: a deep learning face recognition toolkit powered by Gluon."""
# mxnet version check
mx_version = '1.3.0'
try:
import mxnet as mx
from distutils.version import LooseVersion
if LooseVersion(mx.__version__) < LooseVersion(mx_version):
msg = (
"Legacy mxnet=={} detected, some new modules may not work properly. "
"mxnet>={} is required. You can use pip to upgrade mxnet "
"`pip install mxnet/mxnet-cu92 --pre --upgrade`").format(mx.__version__, mx_version)
raise ImportError(msg)
except ImportError:
raise ImportError(
"Unable to import dependency mxnet. "
"A quick tip is to install via `pip install mxnet/mxnet-cu92 --pre`. "
)
__version__ = '0.1.0'
from . import data
from . import model_zoo
from . import nn
from . import utils
from . import loss
| 40.833333 | 96 | 0.728571 |
mx_version = '1.3.0'
try:
import mxnet as mx
from distutils.version import LooseVersion
if LooseVersion(mx.__version__) < LooseVersion(mx_version):
msg = (
"Legacy mxnet=={} detected, some new modules may not work properly. "
"mxnet>={} is required. You can use pip to upgrade mxnet "
"`pip install mxnet/mxnet-cu92 --pre --upgrade`").format(mx.__version__, mx_version)
raise ImportError(msg)
except ImportError:
raise ImportError(
"Unable to import dependency mxnet. "
"A quick tip is to install via `pip install mxnet/mxnet-cu92 --pre`. "
)
__version__ = '0.1.0'
from . import data
from . import model_zoo
from . import nn
from . import utils
from . import loss
| true | true |
f735821d09b9dcf71de0937f5941d9f078d8ceaa | 337 | py | Python | evaluate/__init__.py | beesk135/ReID-Survey | d1467c0ce5d3ca78640196360a05df9ff9f9f42a | [
"MIT"
] | null | null | null | evaluate/__init__.py | beesk135/ReID-Survey | d1467c0ce5d3ca78640196360a05df9ff9f9f42a | [
"MIT"
] | null | null | null | evaluate/__init__.py | beesk135/ReID-Survey | d1467c0ce5d3ca78640196360a05df9ff9f9f42a | [
"MIT"
] | null | null | null | import torch
from .eval_reid import eval_func
def euclidean_dist(x, y):
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt()
return dist
| 25.923077 | 62 | 0.593472 | import torch
from .eval_reid import eval_func
def euclidean_dist(x, y):
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt()
return dist
| true | true |
f73583333cac8f652fd8a032023c0f6c9eec6ce6 | 4,328 | py | Python | airbyte-integrations/connectors/source-shopify/source_shopify/transform.py | darian-heede/airbyte | 504580d833582f8800b334f24e57a414d94389bf | [
"MIT"
] | 6,215 | 2020-09-21T13:45:56.000Z | 2022-03-31T21:21:45.000Z | airbyte-integrations/connectors/source-shopify/source_shopify/transform.py | darian-heede/airbyte | 504580d833582f8800b334f24e57a414d94389bf | [
"MIT"
] | 8,448 | 2020-09-21T00:43:50.000Z | 2022-03-31T23:56:06.000Z | airbyte-integrations/connectors/source-shopify/source_shopify/transform.py | darian-heede/airbyte | 504580d833582f8800b334f24e57a414d94389bf | [
"MIT"
] | 1,251 | 2020-09-20T05:48:47.000Z | 2022-03-31T10:41:29.000Z | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from decimal import Decimal
from typing import Any, Iterable, List, Mapping, MutableMapping
class DataTypeEnforcer:
"""
Transform class was implemented according to issue #4841
Shopify API returns price fields as a string and it should be converted to the number
Some records fields contain objects and arrays, which contain price fields.
Those price fields should be transformed too.
This solution designed to convert string into number, but in future can be modified for general purpose
Correct types placed in schemes
Transformer iterates over records, compare values type with schema type and transform if it's needed
Methods
-------
_transform_array(self, array: List[Any], item_properties: Mapping[str, Any])
Some fields type is array. Items inside array contain price fields, which should be transformed
This method iterate over items in array, compare schema types and convert if necessary
transform(self, field: Any, schema: Mapping[str, Any] = None)
Accepts field of Any type and schema, compere type of field and type in schema, convert if necessary
"""
def __init__(self, schema: Mapping[str, Any], **kwargs):
super().__init__(**kwargs)
self._schema = schema
@staticmethod
def _get_json_types(value_type: Any) -> List[str]:
json_types = {
str: ["string"],
int: ["integer", "number"],
float: ["number"],
dict: ["object"],
list: ["array"],
bool: ["boolean"],
type(None): [
"null",
],
}
return json_types.get(value_type)
@staticmethod
def _types_from_schema(properties: Mapping[str, Any]) -> str:
schema_types = properties.get("type", [])
if not isinstance(schema_types, list):
schema_types = [
schema_types,
]
return schema_types
@staticmethod
def _first_non_null_type(schema_types: List[str]) -> str:
not_null_types = schema_types.copy()
if "null" in not_null_types:
not_null_types.remove("null")
return not_null_types[0]
@staticmethod
def _transform_number(value: Any):
return Decimal(value)
def _transform_array(self, array: List[Any], item_properties: Mapping[str, Any]):
# iterate over items in array, compare schema types and convert if necessary.
for index, record in enumerate(array):
array[index] = self.transform(record, item_properties)
return array
def _transform_object(self, record: MutableMapping[str, Any], properties: Mapping[str, Any]):
# compare schema types and convert if necessary.
for object_property, value in record.items():
if value is None:
continue
if object_property in properties:
object_properties = properties.get(object_property) or {}
record[object_property] = self.transform(value, object_properties)
return record
def transform(self, field: Any, schema: Mapping[str, Any] = None) -> Iterable[MutableMapping]:
schema = schema if schema is not None else self._schema
# get available types from schema
schema_types = self._types_from_schema(schema)
if schema_types and field is not None:
# if types presented in schema and field is not None, get available JSON Schema types for field
# and not null types from schema, check if field JSON Schema types presented in schema
field_json_types = self._get_json_types(type(field))
schema_type = self._first_non_null_type(schema_types)
if not any(field_json_type in schema_types for field_json_type in field_json_types):
if schema_type == "number":
return self._transform_number(field)
if schema_type == "object":
properties = schema.get("properties", {})
return self._transform_object(field, properties)
if schema_type == "array":
properties = schema.get("items", {})
return self._transform_array(field, properties)
return field
| 42.431373 | 108 | 0.644871 |
from decimal import Decimal
from typing import Any, Iterable, List, Mapping, MutableMapping
class DataTypeEnforcer:
def __init__(self, schema: Mapping[str, Any], **kwargs):
super().__init__(**kwargs)
self._schema = schema
@staticmethod
def _get_json_types(value_type: Any) -> List[str]:
json_types = {
str: ["string"],
int: ["integer", "number"],
float: ["number"],
dict: ["object"],
list: ["array"],
bool: ["boolean"],
type(None): [
"null",
],
}
return json_types.get(value_type)
@staticmethod
def _types_from_schema(properties: Mapping[str, Any]) -> str:
schema_types = properties.get("type", [])
if not isinstance(schema_types, list):
schema_types = [
schema_types,
]
return schema_types
@staticmethod
def _first_non_null_type(schema_types: List[str]) -> str:
not_null_types = schema_types.copy()
if "null" in not_null_types:
not_null_types.remove("null")
return not_null_types[0]
@staticmethod
def _transform_number(value: Any):
return Decimal(value)
def _transform_array(self, array: List[Any], item_properties: Mapping[str, Any]):
for index, record in enumerate(array):
array[index] = self.transform(record, item_properties)
return array
def _transform_object(self, record: MutableMapping[str, Any], properties: Mapping[str, Any]):
for object_property, value in record.items():
if value is None:
continue
if object_property in properties:
object_properties = properties.get(object_property) or {}
record[object_property] = self.transform(value, object_properties)
return record
def transform(self, field: Any, schema: Mapping[str, Any] = None) -> Iterable[MutableMapping]:
schema = schema if schema is not None else self._schema
schema_types = self._types_from_schema(schema)
if schema_types and field is not None:
field_json_types = self._get_json_types(type(field))
schema_type = self._first_non_null_type(schema_types)
if not any(field_json_type in schema_types for field_json_type in field_json_types):
if schema_type == "number":
return self._transform_number(field)
if schema_type == "object":
properties = schema.get("properties", {})
return self._transform_object(field, properties)
if schema_type == "array":
properties = schema.get("items", {})
return self._transform_array(field, properties)
return field
| true | true |
f7358382ed2ef8f8d84e4774cf6e92ccc29c2f89 | 153 | py | Python | melenium/__init__.py | a-maliarov/melenium | c71b79e6533c16fd638e7a7b84afaf5d16620cf9 | [
"MIT"
] | 1 | 2020-11-01T01:40:32.000Z | 2020-11-01T01:40:32.000Z | melenium/__init__.py | a-maliarov/melenium | c71b79e6533c16fd638e7a7b84afaf5d16620cf9 | [
"MIT"
] | null | null | null | melenium/__init__.py | a-maliarov/melenium | c71b79e6533c16fd638e7a7b84afaf5d16620cf9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
melenium
~~~~~~~~
Routine automation.
"""
#-----------------------------------------------------------------------------
| 12.75 | 78 | 0.228758 | true | true | |
f7358511e96f3ea4c1daa0a9f0cf003f4b29702b | 1,400 | py | Python | eventstreamexamples/controllers/geventeventstream.py | nh2/eventstreamexamples | f8f7801e65c259c3b0dd625c54c92a48ddbe57ba | [
"MirOS"
] | 3 | 2016-05-24T18:41:55.000Z | 2019-01-24T22:49:19.000Z | eventstreamexamples/controllers/geventeventstream.py | nh2/eventstreamexamples | f8f7801e65c259c3b0dd625c54c92a48ddbe57ba | [
"MirOS"
] | null | null | null | eventstreamexamples/controllers/geventeventstream.py | nh2/eventstreamexamples | f8f7801e65c259c3b0dd625c54c92a48ddbe57ba | [
"MirOS"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Example controller for SSE (server-side events) with gevent.
Builds on the simple SSE controller.
"""
import sys
import time
import gevent.queue
from tg import expose, request, response
from tg import url
from tg.decorators import with_trailing_slash
from eventstream import EventstreamController
class GeventEventstreamController(EventstreamController):
# set containing a gevent queue for each of the clients (browsers) listening for events
client_queues = set()
@expose()
@with_trailing_slash
def index(self):
"""whenever a new client opens this page, sends an event to all listening clients"""
# put a gevent event in each client's queue
for q in GeventEventstreamController.client_queues:
q.put("visit received from %(REMOTE_ADDR)s with user agent %(HTTP_USER_AGENT)s" % request.environ)
# return the page for listening
return self.load_js(url('visitstream'))
@expose()
def visitstream(self):
"""sends a SSE whenever somebody visits index"""
# set charset appropriately
response.headers['Content-type'] = 'text/event-stream'
# disable charset (see EventstreamController)
response.charset = ""
# create a new queue for this new listening client
q = gevent.queue.Queue()
GeventEventstreamController.client_queues.add(q)
def stream():
while True:
yield "data: %s %s\n\n" % (q.get(), time.time())
return stream()
| 28 | 101 | 0.742857 |
import sys
import time
import gevent.queue
from tg import expose, request, response
from tg import url
from tg.decorators import with_trailing_slash
from eventstream import EventstreamController
class GeventEventstreamController(EventstreamController):
client_queues = set()
@expose()
@with_trailing_slash
def index(self):
for q in GeventEventstreamController.client_queues:
q.put("visit received from %(REMOTE_ADDR)s with user agent %(HTTP_USER_AGENT)s" % request.environ)
# return the page for listening
return self.load_js(url('visitstream'))
@expose()
def visitstream(self):
# set charset appropriately
response.headers['Content-type'] = 'text/event-stream'
# disable charset (see EventstreamController)
response.charset = ""
# create a new queue for this new listening client
q = gevent.queue.Queue()
GeventEventstreamController.client_queues.add(q)
def stream():
while True:
yield "data: %s %s\n\n" % (q.get(), time.time())
return stream()
| true | true |
f735867beaac222b21e732102f425ed101d44aac | 11,985 | py | Python | analysisAPI/scoringErrors.py | xujin1184104394/coco-analyze | fefe16025554dbf831e71d32d6601dd8f00286a8 | [
"MIT"
] | 212 | 2017-07-19T19:06:34.000Z | 2022-03-31T07:19:05.000Z | analysisAPI/scoringErrors.py | xujin1184104394/coco-analyze | fefe16025554dbf831e71d32d6601dd8f00286a8 | [
"MIT"
] | 23 | 2017-09-06T08:43:18.000Z | 2022-02-09T20:12:18.000Z | analysisAPI/scoringErrors.py | xujin1184104394/coco-analyze | fefe16025554dbf831e71d32d6601dd8f00286a8 | [
"MIT"
] | 56 | 2017-07-27T03:53:13.000Z | 2021-12-21T11:03:13.000Z | ## imports
import os, time
import numpy as np
import matplotlib.pyplot as plt
# package imports
from . import utilities
def scoringErrors( coco_analyze, oks, imgs_info, saveDir ):
loc_dir = saveDir + '/scoring_errors'
if not os.path.exists(loc_dir):
os.makedirs(loc_dir)
f = open('%s/std_out.txt'%loc_dir, 'w')
f.write("Running Analysis: [Scoring Errors]\n\n")
tic = time.time()
paths = {}
# set parameters for the scoring errors analysis
coco_analyze.params.areaRng = [[32 ** 2, 1e5 ** 2]]
coco_analyze.params.areaRngLbl = ['all']
coco_analyze.params.oksThrs = [oks]
coco_analyze.cocoEval.params.useGtIgnore = 0
coco_analyze.cocoEval.params.gtIgnoreIds = []
coco_analyze.analyze(check_kpts=False, check_scores=True, check_bckgd=False)
coco_analyze.summarize(makeplots=True, savedir=loc_dir, team_name='scoring')
paths['opt_score_prc'] = \
'%s/error_prc_[scoring][%d][%s][%d].pdf'%(loc_dir, int(oks*100),
coco_analyze.params.areaRngLbl[0],
coco_analyze.params.maxDets[0])
corrected_dts = coco_analyze.corrected_dts['all']
# dictionary of all corrected detections grouped by image id
all_dts = {}
for d in coco_analyze.corrected_dts['all']:
if d['image_id'] not in all_dts:
all_dts[d['image_id']] = {}
all_dts[d['image_id']]['dts'] = [d]
else:
all_dts[d['image_id']]['dts'].append(d)
subopt_order_images = []
all_gts = {}; all_dtgt_oks = {}
for imgId in imgs_info:
if imgId in all_dts:
dts = all_dts[imgId]['dts']
all_dts[imgId]['score'] = np.argsort([-d['score'] for d in dts], kind='mergesort')
all_dts[imgId]['opt_score'] = np.argsort([-d['opt_score'] for d in dts], kind='mergesort')
if list(all_dts[imgId]['score']) != list(all_dts[imgId]['opt_score']):
subopt_order_images.append(imgId)
else:
dts = []
gts = coco_analyze.cocoGt.loadAnns(coco_analyze.cocoGt.getAnnIds(imgIds=imgId))
not_ignore_gts = []
for g in gts:
# gt ignores are discarded
if g['ignore'] or (g['area']<coco_analyze.params.areaRng[0][0] or g['area']>coco_analyze.params.areaRng[0][1]):
g['_ignore'] = 1
else:
g['_ignore'] = 0
not_ignore_gts.append(g)
# compute the oks matrix between the dts and gts of each image
image_oks_mat = utilities.compute_oks(dts, not_ignore_gts)
if len(image_oks_mat) == 0:
all_gts[imgId] = not_ignore_gts
all_dtgt_oks[imgId] = []
else:
# sort the ground truths by their max oks value with any detection
maxoksvals = [-max(image_oks_mat[:,j]) for j in range(len(not_ignore_gts))]
gtind = np.argsort(maxoksvals, kind='mergesort')
all_gts[imgId] = [not_ignore_gts[j] for j in gtind]
all_dtgt_oks[imgId] = image_oks_mat[:,gtind]
## check how many images have optimal score and original score with same order
perc = 100*len(subopt_order_images)/float(len(all_dts))
f.write("Num. of imgs with sub-optimal detections order: [%d]/[%d] (%.2f%%).\n\n"%(len(subopt_order_images), len(all_dts), perc))
## find scoring errors before and after rescoring
min_match_oks = .5
scoring_errors = {'score':[],'opt_score':[]}
for score_type in scoring_errors.keys():
for ind, imgId in enumerate(all_dts.keys()):
dind = all_dts[imgId][score_type]
sorted_dts = [all_dts[imgId]['dts'][i] for i in dind]
gtIds = [g['id'] for g in all_gts[imgId]]
if len(sorted_dts) * len(gtIds) == 0: continue
used_dts = []
for gind, gt in enumerate(all_gts[imgId]):
assert(gt['_ignore']==0)
oks = all_dtgt_oks[imgId][dind,gind]
dts_with_oks = np.where(oks >= min_match_oks)[0]
# remove the matched dts
dts_available = [(i,sorted_dts[i]['id'],oks[i],sorted_dts[i][score_type]) \
for i in dts_with_oks if sorted_dts[i]['id'] not in used_dts]
if len(dts_available) == 0: break
max_oks_dt = np.argmax([d[2] for d in dts_available])
used_dts.append(dts_available[max_oks_dt][1])
if len( dts_available ) > 1:
# check for scoring error
max_score_dt = np.argmax([d[3] for d in dts_available])
if max_score_dt!=max_oks_dt:
# this is a scoring error
error = {}
error['gt'] = gt
error['imgId'] = imgId
error['matched_dt'] = sorted_dts[dts_available[max_score_dt][0]]
error['top_match_dt'] = sorted_dts[dts_available[max_oks_dt][0]]
error['high_oks'] = dts_available[max_oks_dt][2]
error['low_oks'] = dts_available[max_score_dt][2]
scoring_errors[score_type].append(error)
f.write("Num. of scoring errors:\n")
f.write(" - Original Score: %d\n"%len(scoring_errors['score']))
f.write(" - Optimal Score: %d\n"%len(scoring_errors['opt_score']))
f.write("\nMost relevant scoring errors:\n")
## print the top scoring errors of the algorithm
ori_scoring_errors = scoring_errors['score']
ori_scoring_errors.sort(key=lambda k: -np.sqrt((k['matched_dt']['score']-k['top_match_dt']['score'])*(k['high_oks']-k['low_oks'])))
for ind, err in enumerate(ori_scoring_errors[0:12]):
relevance = np.sqrt((err['matched_dt']['score']-err['top_match_dt']['score'])*(err['high_oks']-err['low_oks']))
f.write("================================================\n")
f.write( "- gt id: [%d]\n"%err['gt']['id'] )
f.write( "- dt id, high score, low oks: [%d][%.3f][%.3f]\n"%(err['matched_dt']['id'], err['matched_dt']['score'], err['low_oks']) )
f.write( "- dt id, low score, high oks: [%d][%.3f][%.3f]\n"%(err['top_match_dt']['id'], err['top_match_dt']['score'], err['high_oks']) )
f.write( "- Relevance: [%.3f]\n\n"%relevance )
name = 'score_err_%d_high_score'%ind
paths[name] = '%s/%s.pdf'%(loc_dir,name)
utilities.show_dets([err['matched_dt']],
[err['gt']],
imgs_info[err['imgId']],save_path=paths[name])
name = 'score_err_%d_high_oks'%ind
paths[name] = '%s/%s.pdf'%(loc_dir,name)
utilities.show_dets([err['top_match_dt']],
[err['gt']],
imgs_info[err['imgId']],save_path=paths[name])
# for all the images with dts and gts compute the following quantities
# - number of dts with oks > min_match_oks for each gt
# - histogram of oks for the detection with highest oks
# - histogram of oks for all the other detections
# - histogram of original/optimal scores for the detection with highest oks
# - histogram of original/optimal scores for all the other detections
num_dts_high_oks = []
high_oks_dt_oks_hist = []; other_dt_oks_hist = []
high_oks_dt_ori_score_hist = []; other_dt_ori_score_hist = []
high_oks_dt_opt_score_hist = []; other_dt_opt_score_hist = []
for ind, imgId in enumerate(all_dts.keys()):
dts = [(d['id'],d['score'],d['opt_score']) for d in all_dts[imgId]['dts']]
gtIds = [g['id'] for g in all_gts[imgId]]
if len(dts) * len(gtIds) == 0: continue
for gind, gt in enumerate(all_gts[imgId]):
assert(gt['_ignore']==0)
dts_oks = all_dtgt_oks[imgId][:,gind]
dts_high_oks_i = np.where(dts_oks > .1)[0]
num_dts_high_oks.append(len(dts_high_oks_i))
if len(dts_high_oks_i) >= 2:
# study the case where multiple detections have high oks
# add the oks of the detections to the histogram of oks
oks_vals = sorted([(dts_oks[i],dts[i]) for i in dts_high_oks_i], key=lambda k: -k[0])
high_oks_dt_oks_hist.append(oks_vals[0][0])
other_dt_oks_hist.extend([k[0] for k in oks_vals[1:]])
high_oks_dt_ori_score_hist.append(oks_vals[0][1][1])
other_dt_ori_score_hist.extend([k[1][1] for k in oks_vals[1:]])
high_oks_dt_opt_score_hist.append(oks_vals[0][1][2])
other_dt_opt_score_hist.extend([k[1][2] for k in oks_vals[1:]])
fig, ax = plt.subplots(figsize=(10,10))
ax.set_facecolor('lightgray')
plt.hist(num_dts_high_oks,bins=[i-.5 for i in range(max(num_dts_high_oks)+1)],color='green')
plt.grid()
plt.xticks([i for i in range(max(num_dts_high_oks))])
plt.title('Histogram of Detection Redundancy',fontsize=20)
plt.xlabel('Number of Detections with OKS > .1',fontsize=20)
plt.ylabel('Number of Ground Truth Instances',fontsize=20)
path = '%s/num_dts_high_oks.pdf'%loc_dir
paths['num_dts_high_oks'] = path
plt.savefig(path,bbox_inches='tight')
plt.close()
fig, ax = plt.subplots(figsize=(10,10))
y1,binEdges=np.histogram(high_oks_dt_ori_score_hist,bins=19)
bincenters1 = 0.5*(binEdges[1:]+binEdges[:-1])
ax.plot(bincenters1,y1,'-',linewidth=3,c='b',label='Max OKS Detection')
min_val1 = min(bincenters1)
max_val1 = max(bincenters1)
y2,binEdges=np.histogram(other_dt_ori_score_hist,bins=19)
bincenters2 = 0.5*(binEdges[1:]+binEdges[:-1])
ax.plot(bincenters2,y2,'--',linewidth=3,c='b',label='Lower OKS Detection(s)')
min_val2 = min(bincenters2)
max_val2 = max(bincenters2)
min_val = min(min_val1,min_val2)
max_val = max(max_val1,max_val2)
overlapbins = [min(x,y) for x,y in zip(y1,y2)]
width = (max_val-min_val)/20.
ax.bar(np.linspace(min_val,max_val,19), overlapbins, color='red', alpha=.65, width=width,align='center')
plt.grid()
plt.xlim([min_val-(max_val-min_val)/20.,max_val+(max_val-min_val)/20.])
plt.grid()
plt.legend(loc='upper center',fontsize=20)
plt.title('Histogram of Original Detection Scores',fontsize=20)
plt.xlabel('Original Confidence Score',fontsize=20)
plt.ylabel('Number of Detections',fontsize=20)
path = '%s/dts_ori_score_hist.pdf'%loc_dir
paths['dts_ori_score_hist'] = path
plt.savefig(path,bbox_inches='tight')
plt.close()
fig, ax = plt.subplots(figsize=(10,10))
y1,binEdges=np.histogram(high_oks_dt_opt_score_hist,bins=19)
bincenters1 = 0.5*(binEdges[1:]+binEdges[:-1])
ax.plot(bincenters1,y1,'-',linewidth=3,c='b',label='Max OKS Detection')
min_val1 = min(bincenters1)
max_val1 = max(bincenters1)
y2,binEdges=np.histogram(other_dt_opt_score_hist,bins=19)
bincenters2 = 0.5*(binEdges[1:]+binEdges[:-1])
ax.plot(bincenters2,y2,'--',linewidth=3,c='b',label='Lower OKS Detection(s)')
min_val2 = min(bincenters2)
max_val2 = max(bincenters2)
min_val = min(min_val1,min_val2)
max_val = max(max_val1,max_val2)
overlapbins = [min(x,y) for x,y in zip(y1,y2)]
width = (max_val-min_val)/20.
ax.bar(np.linspace(min_val,max_val,19), overlapbins, color='red', alpha=.65, width=width,align='center')
plt.grid()
plt.xlim([min_val-(max_val-min_val)/20.,max_val+(max_val-min_val)/20.])
plt.grid()
plt.legend(loc='upper center',fontsize=20)
plt.title('Histogram of Optimal Detection Scores',fontsize=20)
plt.xlabel('Optimal Confidence Score',fontsize=20)
plt.ylabel('Number of Detections',fontsize=20)
path = '%s/dts_opt_score_hist.pdf'%loc_dir
paths['dts_opt_score_hist'] = path
plt.savefig(path,bbox_inches='tight')
plt.close()
f.write("\nDone, (t=%.2fs)."%(time.time()-tic))
f.close()
return paths
| 45.226415 | 146 | 0.607593 | s, time
import numpy as np
import matplotlib.pyplot as plt
from . import utilities
def scoringErrors( coco_analyze, oks, imgs_info, saveDir ):
loc_dir = saveDir + '/scoring_errors'
if not os.path.exists(loc_dir):
os.makedirs(loc_dir)
f = open('%s/std_out.txt'%loc_dir, 'w')
f.write("Running Analysis: [Scoring Errors]\n\n")
tic = time.time()
paths = {}
coco_analyze.params.areaRng = [[32 ** 2, 1e5 ** 2]]
coco_analyze.params.areaRngLbl = ['all']
coco_analyze.params.oksThrs = [oks]
coco_analyze.cocoEval.params.useGtIgnore = 0
coco_analyze.cocoEval.params.gtIgnoreIds = []
coco_analyze.analyze(check_kpts=False, check_scores=True, check_bckgd=False)
coco_analyze.summarize(makeplots=True, savedir=loc_dir, team_name='scoring')
paths['opt_score_prc'] = \
'%s/error_prc_[scoring][%d][%s][%d].pdf'%(loc_dir, int(oks*100),
coco_analyze.params.areaRngLbl[0],
coco_analyze.params.maxDets[0])
corrected_dts = coco_analyze.corrected_dts['all']
all_dts = {}
for d in coco_analyze.corrected_dts['all']:
if d['image_id'] not in all_dts:
all_dts[d['image_id']] = {}
all_dts[d['image_id']]['dts'] = [d]
else:
all_dts[d['image_id']]['dts'].append(d)
subopt_order_images = []
all_gts = {}; all_dtgt_oks = {}
for imgId in imgs_info:
if imgId in all_dts:
dts = all_dts[imgId]['dts']
all_dts[imgId]['score'] = np.argsort([-d['score'] for d in dts], kind='mergesort')
all_dts[imgId]['opt_score'] = np.argsort([-d['opt_score'] for d in dts], kind='mergesort')
if list(all_dts[imgId]['score']) != list(all_dts[imgId]['opt_score']):
subopt_order_images.append(imgId)
else:
dts = []
gts = coco_analyze.cocoGt.loadAnns(coco_analyze.cocoGt.getAnnIds(imgIds=imgId))
not_ignore_gts = []
for g in gts:
if g['ignore'] or (g['area']<coco_analyze.params.areaRng[0][0] or g['area']>coco_analyze.params.areaRng[0][1]):
g['_ignore'] = 1
else:
g['_ignore'] = 0
not_ignore_gts.append(g)
image_oks_mat = utilities.compute_oks(dts, not_ignore_gts)
if len(image_oks_mat) == 0:
all_gts[imgId] = not_ignore_gts
all_dtgt_oks[imgId] = []
else:
maxoksvals = [-max(image_oks_mat[:,j]) for j in range(len(not_ignore_gts))]
gtind = np.argsort(maxoksvals, kind='mergesort')
all_gts[imgId] = [not_ignore_gts[j] for j in gtind]
all_dtgt_oks[imgId] = image_oks_mat[:,gtind]
. of imgs with sub-optimal detections order: [%d]/[%d] (%.2f%%).\n\n"%(len(subopt_order_images), len(all_dts), perc))
core':[],'opt_score':[]}
for score_type in scoring_errors.keys():
for ind, imgId in enumerate(all_dts.keys()):
dind = all_dts[imgId][score_type]
sorted_dts = [all_dts[imgId]['dts'][i] for i in dind]
gtIds = [g['id'] for g in all_gts[imgId]]
if len(sorted_dts) * len(gtIds) == 0: continue
used_dts = []
for gind, gt in enumerate(all_gts[imgId]):
assert(gt['_ignore']==0)
oks = all_dtgt_oks[imgId][dind,gind]
dts_with_oks = np.where(oks >= min_match_oks)[0]
dts_available = [(i,sorted_dts[i]['id'],oks[i],sorted_dts[i][score_type]) \
for i in dts_with_oks if sorted_dts[i]['id'] not in used_dts]
if len(dts_available) == 0: break
max_oks_dt = np.argmax([d[2] for d in dts_available])
used_dts.append(dts_available[max_oks_dt][1])
if len( dts_available ) > 1:
max_score_dt = np.argmax([d[3] for d in dts_available])
if max_score_dt!=max_oks_dt:
error = {}
error['gt'] = gt
error['imgId'] = imgId
error['matched_dt'] = sorted_dts[dts_available[max_score_dt][0]]
error['top_match_dt'] = sorted_dts[dts_available[max_oks_dt][0]]
error['high_oks'] = dts_available[max_oks_dt][2]
error['low_oks'] = dts_available[max_score_dt][2]
scoring_errors[score_type].append(error)
f.write("Num. of scoring errors:\n")
f.write(" - Original Score: %d\n"%len(scoring_errors['score']))
f.write(" - Optimal Score: %d\n"%len(scoring_errors['opt_score']))
f.write("\nMost relevant scoring errors:\n")
']
ori_scoring_errors.sort(key=lambda k: -np.sqrt((k['matched_dt']['score']-k['top_match_dt']['score'])*(k['high_oks']-k['low_oks'])))
for ind, err in enumerate(ori_scoring_errors[0:12]):
relevance = np.sqrt((err['matched_dt']['score']-err['top_match_dt']['score'])*(err['high_oks']-err['low_oks']))
f.write("================================================\n")
f.write( "- gt id: [%d]\n"%err['gt']['id'] )
f.write( "- dt id, high score, low oks: [%d][%.3f][%.3f]\n"%(err['matched_dt']['id'], err['matched_dt']['score'], err['low_oks']) )
f.write( "- dt id, low score, high oks: [%d][%.3f][%.3f]\n"%(err['top_match_dt']['id'], err['top_match_dt']['score'], err['high_oks']) )
f.write( "- Relevance: [%.3f]\n\n"%relevance )
name = 'score_err_%d_high_score'%ind
paths[name] = '%s/%s.pdf'%(loc_dir,name)
utilities.show_dets([err['matched_dt']],
[err['gt']],
imgs_info[err['imgId']],save_path=paths[name])
name = 'score_err_%d_high_oks'%ind
paths[name] = '%s/%s.pdf'%(loc_dir,name)
utilities.show_dets([err['top_match_dt']],
[err['gt']],
imgs_info[err['imgId']],save_path=paths[name])
num_dts_high_oks = []
high_oks_dt_oks_hist = []; other_dt_oks_hist = []
high_oks_dt_ori_score_hist = []; other_dt_ori_score_hist = []
high_oks_dt_opt_score_hist = []; other_dt_opt_score_hist = []
for ind, imgId in enumerate(all_dts.keys()):
dts = [(d['id'],d['score'],d['opt_score']) for d in all_dts[imgId]['dts']]
gtIds = [g['id'] for g in all_gts[imgId]]
if len(dts) * len(gtIds) == 0: continue
for gind, gt in enumerate(all_gts[imgId]):
assert(gt['_ignore']==0)
dts_oks = all_dtgt_oks[imgId][:,gind]
dts_high_oks_i = np.where(dts_oks > .1)[0]
num_dts_high_oks.append(len(dts_high_oks_i))
if len(dts_high_oks_i) >= 2:
oks_vals = sorted([(dts_oks[i],dts[i]) for i in dts_high_oks_i], key=lambda k: -k[0])
high_oks_dt_oks_hist.append(oks_vals[0][0])
other_dt_oks_hist.extend([k[0] for k in oks_vals[1:]])
high_oks_dt_ori_score_hist.append(oks_vals[0][1][1])
other_dt_ori_score_hist.extend([k[1][1] for k in oks_vals[1:]])
high_oks_dt_opt_score_hist.append(oks_vals[0][1][2])
other_dt_opt_score_hist.extend([k[1][2] for k in oks_vals[1:]])
fig, ax = plt.subplots(figsize=(10,10))
ax.set_facecolor('lightgray')
plt.hist(num_dts_high_oks,bins=[i-.5 for i in range(max(num_dts_high_oks)+1)],color='green')
plt.grid()
plt.xticks([i for i in range(max(num_dts_high_oks))])
plt.title('Histogram of Detection Redundancy',fontsize=20)
plt.xlabel('Number of Detections with OKS > .1',fontsize=20)
plt.ylabel('Number of Ground Truth Instances',fontsize=20)
path = '%s/num_dts_high_oks.pdf'%loc_dir
paths['num_dts_high_oks'] = path
plt.savefig(path,bbox_inches='tight')
plt.close()
fig, ax = plt.subplots(figsize=(10,10))
y1,binEdges=np.histogram(high_oks_dt_ori_score_hist,bins=19)
bincenters1 = 0.5*(binEdges[1:]+binEdges[:-1])
ax.plot(bincenters1,y1,'-',linewidth=3,c='b',label='Max OKS Detection')
min_val1 = min(bincenters1)
max_val1 = max(bincenters1)
y2,binEdges=np.histogram(other_dt_ori_score_hist,bins=19)
bincenters2 = 0.5*(binEdges[1:]+binEdges[:-1])
ax.plot(bincenters2,y2,'--',linewidth=3,c='b',label='Lower OKS Detection(s)')
min_val2 = min(bincenters2)
max_val2 = max(bincenters2)
min_val = min(min_val1,min_val2)
max_val = max(max_val1,max_val2)
overlapbins = [min(x,y) for x,y in zip(y1,y2)]
width = (max_val-min_val)/20.
ax.bar(np.linspace(min_val,max_val,19), overlapbins, color='red', alpha=.65, width=width,align='center')
plt.grid()
plt.xlim([min_val-(max_val-min_val)/20.,max_val+(max_val-min_val)/20.])
plt.grid()
plt.legend(loc='upper center',fontsize=20)
plt.title('Histogram of Original Detection Scores',fontsize=20)
plt.xlabel('Original Confidence Score',fontsize=20)
plt.ylabel('Number of Detections',fontsize=20)
path = '%s/dts_ori_score_hist.pdf'%loc_dir
paths['dts_ori_score_hist'] = path
plt.savefig(path,bbox_inches='tight')
plt.close()
fig, ax = plt.subplots(figsize=(10,10))
y1,binEdges=np.histogram(high_oks_dt_opt_score_hist,bins=19)
bincenters1 = 0.5*(binEdges[1:]+binEdges[:-1])
ax.plot(bincenters1,y1,'-',linewidth=3,c='b',label='Max OKS Detection')
min_val1 = min(bincenters1)
max_val1 = max(bincenters1)
y2,binEdges=np.histogram(other_dt_opt_score_hist,bins=19)
bincenters2 = 0.5*(binEdges[1:]+binEdges[:-1])
ax.plot(bincenters2,y2,'--',linewidth=3,c='b',label='Lower OKS Detection(s)')
min_val2 = min(bincenters2)
max_val2 = max(bincenters2)
min_val = min(min_val1,min_val2)
max_val = max(max_val1,max_val2)
overlapbins = [min(x,y) for x,y in zip(y1,y2)]
width = (max_val-min_val)/20.
ax.bar(np.linspace(min_val,max_val,19), overlapbins, color='red', alpha=.65, width=width,align='center')
plt.grid()
plt.xlim([min_val-(max_val-min_val)/20.,max_val+(max_val-min_val)/20.])
plt.grid()
plt.legend(loc='upper center',fontsize=20)
plt.title('Histogram of Optimal Detection Scores',fontsize=20)
plt.xlabel('Optimal Confidence Score',fontsize=20)
plt.ylabel('Number of Detections',fontsize=20)
path = '%s/dts_opt_score_hist.pdf'%loc_dir
paths['dts_opt_score_hist'] = path
plt.savefig(path,bbox_inches='tight')
plt.close()
f.write("\nDone, (t=%.2fs)."%(time.time()-tic))
f.close()
return paths
| true | true |
f7358708cd30b3f0af4b546e1d5910f83058771f | 2,827 | py | Python | check_c_compiles.py | jweinst1/pysimd | 62bf99f8b9f4ca3d06bf59add960ff408f5781f7 | [
"MIT"
] | 7 | 2020-09-13T22:18:28.000Z | 2022-02-12T09:03:07.000Z | check_c_compiles.py | jweinst1/pysimd | 62bf99f8b9f4ca3d06bf59add960ff408f5781f7 | [
"MIT"
] | 1 | 2020-09-13T22:20:21.000Z | 2021-11-20T16:47:03.000Z | check_c_compiles.py | jweinst1/pysimd | 62bf99f8b9f4ca3d06bf59add960ff408f5781f7 | [
"MIT"
] | null | null | null | import distutils.ccompiler
import os
import random
import subprocess
"""
These classes allow a test to see if source code with the C compiler actually
compiles.
"""
DEFAULT_COMPILER = distutils.ccompiler.get_default_compiler()
C_EXTENSION = ".c"
def create_file_with_rand_name(source):
cur_dir = os.getcwd()
rand_file = os.path.join(cur_dir, "c_" + str(random.getrandbits(72)))
while os.path.exists(rand_file):
rand_file = os.path.join(cur_dir, "c_" + str(random.getrandbits(72)))
with open(rand_file + C_EXTENSION, "w") as c_file:
c_file.write(source)
return rand_file
class CheckCCompiles(object):
def __init__(self, name = "", source_code = ""):
self.name = name
self.source_code = source_code
self.compiler = distutils.ccompiler.new_compiler()
if DEFAULT_COMPILER == 'unix':
# The idea here is that we want to have the compiler try and generate all the possible
# simd instructions, then see by running it, if we get an illegal hardware instruction
self.extra_args = ["-m" + self.name]
elif DEFAULT_COMPILER == 'msvc':
self.extra_args = ['/arch:AVX', '/arch:AVX2', '/arch:AVX512']
else:
self.extra_args = []
self.works = False
def try_run(self):
try:
self.run_result = subprocess.run(self.file_name, check=False)
self.works = self.run_result.returncode == 0
except Exception:
self.works = False
return self.works
def __enter__(self):
self.file_name = create_file_with_rand_name(self.source_code)
self.c_name = self.file_name + C_EXTENSION
try:
self.obj_names = self.compiler.compile([self.c_name], extra_preargs=self.extra_args)
except Exception as exc:
print("FAILED " + self.name + " compile check: " + str(exc))
return self
self.compiles = True
try:
self.compiler.link_executable(self.obj_names, self.file_name)
except Exception as exc:
print("FAILED " + self.name + " link check: " + str(exc))
return self
self.links = True
if self.try_run():
print("PASSED " + self.name)
else:
print("FAILED " + self.name + " run check: " + str(self.run_result.stderr))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
os.remove(self.c_name)
if os.name == 'nt':
os.remove(self.file_name + ".exe")
else:
os.remove(self.file_name)
for objfile in self.obj_names:
os.remove(objfile)
except Exception as exc:
# Avoid noise for non existant files
return | 35.3375 | 98 | 0.600637 | import distutils.ccompiler
import os
import random
import subprocess
DEFAULT_COMPILER = distutils.ccompiler.get_default_compiler()
C_EXTENSION = ".c"
def create_file_with_rand_name(source):
cur_dir = os.getcwd()
rand_file = os.path.join(cur_dir, "c_" + str(random.getrandbits(72)))
while os.path.exists(rand_file):
rand_file = os.path.join(cur_dir, "c_" + str(random.getrandbits(72)))
with open(rand_file + C_EXTENSION, "w") as c_file:
c_file.write(source)
return rand_file
class CheckCCompiles(object):
def __init__(self, name = "", source_code = ""):
self.name = name
self.source_code = source_code
self.compiler = distutils.ccompiler.new_compiler()
if DEFAULT_COMPILER == 'unix':
self.extra_args = ["-m" + self.name]
elif DEFAULT_COMPILER == 'msvc':
self.extra_args = ['/arch:AVX', '/arch:AVX2', '/arch:AVX512']
else:
self.extra_args = []
self.works = False
def try_run(self):
try:
self.run_result = subprocess.run(self.file_name, check=False)
self.works = self.run_result.returncode == 0
except Exception:
self.works = False
return self.works
def __enter__(self):
self.file_name = create_file_with_rand_name(self.source_code)
self.c_name = self.file_name + C_EXTENSION
try:
self.obj_names = self.compiler.compile([self.c_name], extra_preargs=self.extra_args)
except Exception as exc:
print("FAILED " + self.name + " compile check: " + str(exc))
return self
self.compiles = True
try:
self.compiler.link_executable(self.obj_names, self.file_name)
except Exception as exc:
print("FAILED " + self.name + " link check: " + str(exc))
return self
self.links = True
if self.try_run():
print("PASSED " + self.name)
else:
print("FAILED " + self.name + " run check: " + str(self.run_result.stderr))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
os.remove(self.c_name)
if os.name == 'nt':
os.remove(self.file_name + ".exe")
else:
os.remove(self.file_name)
for objfile in self.obj_names:
os.remove(objfile)
except Exception as exc:
return | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.