repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
STDEN
|
STDEN-main/model/ode_func.py
|
import numpy as np
import torch
import torch.nn as nn
from lib import utils
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LayerParams:
def __init__(self, rnn_network: nn.Module, layer_type: str):
self._rnn_network = rnn_network
self._params_dict = {}
self._biases_dict = {}
self._type = layer_type
def get_weights(self, shape):
if shape not in self._params_dict:
nn_param = nn.Parameter(torch.empty(*shape, device=device))
nn.init.xavier_normal_(nn_param)
self._params_dict[shape] = nn_param
self._rnn_network.register_parameter('{}_weight_{}'.format(self._type, str(shape)),
nn_param)
return self._params_dict[shape]
def get_biases(self, length, bias_start=0.0):
if length not in self._biases_dict:
biases = nn.Parameter(torch.empty(length, device=device))
nn.init.constant_(biases, bias_start)
self._biases_dict[length] = biases
self._rnn_network.register_parameter('{}_biases_{}'.format(self._type, str(length)),
biases)
return self._biases_dict[length]
class ODEFunc(nn.Module):
def __init__(self, num_units, latent_dim, adj_mx, gcn_step, num_nodes,
gen_layers=1, nonlinearity='tanh', filter_type="default"):
"""
:param num_units: dimensionality of the hidden layers
:param latent_dim: dimensionality used for ODE (input and output). Analog of a continous latent state
:param adj_mx:
:param gcn_step:
:param num_nodes:
:param gen_layers: hidden layers in each ode func.
:param nonlinearity:
:param filter_type: default
:param use_gc_for_ru: whether to use Graph convolution to calculate the reset and update gates.
"""
super(ODEFunc, self).__init__()
self._activation = torch.tanh if nonlinearity == 'tanh' else torch.relu
self._num_nodes = num_nodes
self._num_units = num_units # hidden dimension
self._latent_dim = latent_dim
self._gen_layers = gen_layers
self.nfe = 0
self._filter_type = filter_type
if(self._filter_type == "unkP"):
ode_func_net = utils.create_net(latent_dim, latent_dim, n_units=num_units)
utils.init_network_weights(ode_func_net)
self.gradient_net = ode_func_net
else:
self._gcn_step = gcn_step
self._gconv_params = LayerParams(self, 'gconv')
self._supports = []
supports = []
supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
supports.append(utils.calculate_random_walk_matrix(adj_mx.T).T)
for support in supports:
self._supports.append(self._build_sparse_matrix(support))
@staticmethod
def _build_sparse_matrix(L):
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
# this is to ensure row-major ordering to equal torch.sparse.sparse_reorder(L)
indices = indices[np.lexsort((indices[:, 0], indices[:, 1]))]
L = torch.sparse_coo_tensor(indices.T, L.data, L.shape, device=device)
return L
def forward(self, t_local, y, backwards = False):
"""
Perform one step in solving ODE. Given current data point y and current time point t_local, returns gradient dy/dt at this time point
t_local: current time point
y: value at the current time point, shape (B, num_nodes * latent_dim)
:return
- Output: A `2-D` tensor with shape `(B, num_nodes * latent_dim)`.
"""
self.nfe += 1
grad = self.get_ode_gradient_nn(t_local, y)
if backwards:
grad = -grad
return grad
def get_ode_gradient_nn(self, t_local, inputs):
if(self._filter_type == "unkP"):
grad = self._fc(inputs)
elif (self._filter_type == "IncP"):
grad = - self.ode_func_net(inputs)
else: # default is diffusion process
# theta shape: (B, num_nodes * latent_dim)
theta = torch.sigmoid(self._gconv(inputs, self._latent_dim, bias_start=1.0))
grad = - theta * self.ode_func_net(inputs)
return grad
def ode_func_net(self, inputs):
c = inputs
for i in range(self._gen_layers):
c = self._gconv(c, self._num_units)
c = self._activation(c)
c = self._gconv(c, self._latent_dim)
c = self._activation(c)
return c
def _fc(self, inputs):
batch_size = inputs.size()[0]
grad = self.gradient_net(inputs.view(batch_size * self._num_nodes, self._latent_dim))
return grad.reshape(batch_size, self._num_nodes * self._latent_dim) # (batch_size, num_nodes, latent_dim)
@staticmethod
def _concat(x, x_):
x_ = x_.unsqueeze(0)
return torch.cat([x, x_], dim=0)
def _gconv(self, inputs, output_size, bias_start=0.0):
# Reshape input and state to (batch_size, num_nodes, input_dim/state_dim)
batch_size = inputs.shape[0]
inputs = torch.reshape(inputs, (batch_size, self._num_nodes, -1))
# state = torch.reshape(state, (batch_size, self._num_nodes, -1))
# inputs_and_state = torch.cat([inputs, state], dim=2)
input_size = inputs.size(2)
x = inputs
x0 = x.permute(1, 2, 0) # (num_nodes, total_arg_size, batch_size)
x0 = torch.reshape(x0, shape=[self._num_nodes, input_size * batch_size])
x = torch.unsqueeze(x0, 0)
if self._gcn_step == 0:
pass
else:
for support in self._supports:
x1 = torch.sparse.mm(support, x0)
x = self._concat(x, x1)
for k in range(2, self._gcn_step + 1):
x2 = 2 * torch.sparse.mm(support, x1) - x0
x = self._concat(x, x2)
x1, x0 = x2, x1
num_matrices = len(self._supports) * self._gcn_step + 1 # Adds for x itself.
x = torch.reshape(x, shape=[num_matrices, self._num_nodes, input_size, batch_size])
x = x.permute(3, 1, 2, 0) # (batch_size, num_nodes, input_size, order)
x = torch.reshape(x, shape=[batch_size * self._num_nodes, input_size * num_matrices])
weights = self._gconv_params.get_weights((input_size * num_matrices, output_size))
x = torch.matmul(x, weights) # (batch_size * self._num_nodes, output_size)
biases = self._gconv_params.get_biases(output_size, bias_start)
x += biases
# Reshape res back to 2D: (batch_size, num_node, state_dim) -> (batch_size, num_node * state_dim)
return torch.reshape(x, [batch_size, self._num_nodes * output_size])
| 6,912
| 40.644578
| 135
|
py
|
STDEN
|
STDEN-main/model/stden_supervisor.py
|
import os
import time
from random import SystemRandom
import numpy as np
import pandas as pd
import torch
from torch.utils.tensorboard import SummaryWriter
from lib import utils
from model.stden_model import STDENModel
from lib.metrics import masked_mae_loss, masked_mape_loss, masked_rmse_loss
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class STDENSupervisor:
def __init__(self, adj_mx, **kwargs):
self._kwargs = kwargs
self._data_kwargs = kwargs.get('data')
self._model_kwargs = kwargs.get('model')
self._train_kwargs = kwargs.get('train')
self.max_grad_norm = self._train_kwargs.get('max_grad_norm', 1.)
# logging.
self._log_dir = utils.get_log_dir(kwargs)
self._writer = SummaryWriter('runs/' + self._log_dir)
log_level = self._kwargs.get('log_level', 'INFO')
self._logger = utils.get_logger(self._log_dir, __name__, 'info.log', level=log_level)
# data set
self._data = utils.load_dataset(**self._data_kwargs)
self.standard_scaler = self._data['scaler']
self._logger.info('Scaler mean: {:.6f}, std {:.6f}.'.format(self.standard_scaler.mean, self.standard_scaler.std))
self.num_edges = (adj_mx > 0.).sum()
self.input_dim = int(self._model_kwargs.get('input_dim', 1))
self.seq_len = int(self._model_kwargs.get('seq_len')) # for the encoder
self.output_dim = int(self._model_kwargs.get('output_dim', 1))
self.use_curriculum_learning = bool(
self._model_kwargs.get('use_curriculum_learning', False))
self.horizon = int(self._model_kwargs.get('horizon', 1)) # for the decoder
# setup model
stden_model = STDENModel(adj_mx, self._logger, **self._model_kwargs)
self.stden_model = stden_model.cuda() if torch.cuda.is_available() else stden_model
self._logger.info("Model created")
self.experimentID = self._train_kwargs.get('load', 0)
if self.experimentID == 0:
# Make a new experiment ID
self.experimentID = int(SystemRandom().random()*100000)
self.ckpt_path = os.path.join("ckpt/", "experiment_" + str(self.experimentID))
self._epoch_num = self._train_kwargs.get('epoch', 0)
if self._epoch_num > 0:
self._logger.info('Loading model...')
self.load_model()
def save_model(self, epoch):
model_dir = self.ckpt_path
if not os.path.exists(model_dir):
os.makedirs(model_dir)
config = dict(self._kwargs)
config['model_state_dict'] = self.stden_model.state_dict()
config['epoch'] = epoch
model_path = os.path.join(model_dir, 'epo{}.tar'.format(epoch))
torch.save(config, model_path)
self._logger.info("Saved model at {}".format(epoch))
return model_path
def load_model(self):
self._setup_graph()
model_path = os.path.join(self.ckpt_path, 'epo{}.tar'.format(self._epoch_num))
assert os.path.exists(model_path), 'Weights at epoch %d not found' % self._epoch_num
checkpoint = torch.load(model_path, map_location='cpu')
self.stden_model.load_state_dict(checkpoint['model_state_dict'])
self._logger.info("Loaded model at {}".format(self._epoch_num))
def _setup_graph(self):
with torch.no_grad():
self.stden_model.eval()
val_iterator = self._data['val_loader'].get_iterator()
for _, (x, y) in enumerate(val_iterator):
x, y = self._prepare_data(x, y)
output = self.stden_model(x)
break
def train(self, **kwargs):
self._logger.info('Model mode: train')
kwargs.update(self._train_kwargs)
return self._train(**kwargs)
def _train(self, base_lr,
steps, patience=50, epochs=100, lr_decay_ratio=0.1, log_every=1, save_model=1,
test_every_n_epochs=10, epsilon=1e-8, **kwargs):
# steps is used in learning rate - will see if need to use it?
min_val_loss = float('inf')
wait = 0
optimizer = torch.optim.Adam(self.stden_model.parameters(), lr=base_lr, eps=epsilon)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=steps,
gamma=lr_decay_ratio)
self._logger.info('Start training ...')
# this will fail if model is loaded with a changed batch_size
num_batches = self._data['train_loader'].num_batch
self._logger.info("num_batches: {}".format(num_batches))
batches_seen = num_batches * self._epoch_num
# used for nfe
c = []
res, keys = [], []
for epoch_num in range(self._epoch_num, epochs):
self.stden_model.train()
train_iterator = self._data['train_loader'].get_iterator()
losses = []
start_time = time.time()
c.clear() #nfe
for i, (x, y) in enumerate(train_iterator):
if(i >= num_batches):
break
optimizer.zero_grad()
x, y = self._prepare_data(x, y)
output, fe = self.stden_model(x, y, batches_seen)
if batches_seen == 0:
# this is a workaround to accommodate dynamically registered parameters
optimizer = torch.optim.Adam(self.stden_model.parameters(), lr=base_lr, eps=epsilon)
loss = self._compute_loss(y, output)
self._logger.debug("FE: number - {}, time - {:.3f} s, err - {:.3f}".format(*fe, loss.item()))
c.append([*fe, loss.item()])
self._logger.debug(loss.item())
losses.append(loss.item())
batches_seen += 1 # global step in tensorboard
loss.backward()
# gradient clipping
torch.nn.utils.clip_grad_norm_(self.stden_model.parameters(), self.max_grad_norm)
optimizer.step()
del x, y, output, loss # del make these memory no-labeled trash
torch.cuda.empty_cache() # empty_cache() recycle no-labeled trash
# used for nfe
res.append(pd.DataFrame(c, columns=['nfe', 'time', 'err']))
keys.append(epoch_num)
self._logger.info("epoch complete")
lr_scheduler.step()
self._logger.info("evaluating now!")
val_loss, _ = self.evaluate(dataset='val', batches_seen=batches_seen)
end_time = time.time()
self._writer.add_scalar('training loss',
np.mean(losses),
batches_seen)
if (epoch_num % log_every) == log_every - 1:
message = 'Epoch [{}/{}] ({}) train_mae: {:.4f}, val_mae: {:.4f}, lr: {:.6f}, ' \
'{:.1f}s'.format(epoch_num, epochs, batches_seen,
np.mean(losses), val_loss, lr_scheduler.get_lr()[0],
(end_time - start_time))
self._logger.info(message)
if (epoch_num % test_every_n_epochs) == test_every_n_epochs - 1:
test_loss, _ = self.evaluate(dataset='test', batches_seen=batches_seen)
message = 'Epoch [{}/{}] ({}) train_mae: {:.4f}, test_mae: {:.4f}, lr: {:.6f}, ' \
'{:.1f}s'.format(epoch_num, epochs, batches_seen,
np.mean(losses), test_loss, lr_scheduler.get_lr()[0],
(end_time - start_time))
self._logger.info(message)
if val_loss < min_val_loss:
wait = 0
if save_model:
model_file_name = self.save_model(epoch_num)
self._logger.info(
'Val loss decrease from {:.4f} to {:.4f}, '
'saving to {}'.format(min_val_loss, val_loss, model_file_name))
min_val_loss = val_loss
elif val_loss >= min_val_loss:
wait += 1
if wait == patience:
self._logger.warning('Early stopping at epoch: %d' % epoch_num)
break
if bool(self._model_kwargs.get('nfe', False)):
res = pd.concat(res, keys=keys)
# self._logger.info("res.shape: ", res.shape)
res.index.names = ['epoch', 'iter']
filter_type = self._model_kwargs.get('filter_type', 'unknown')
atol = float(self._model_kwargs.get('odeint_atol', 1e-5))
rtol = float(self._model_kwargs.get('odeint_rtol', 1e-5))
nfe_file = os.path.join(
self._data_kwargs.get('dataset_dir', 'data'),
'nfe_{}_a{}_r{}.pkl'.format(filter_type, int(atol*1e5), int(rtol*1e5)))
res.to_pickle(nfe_file)
# res.to_csv(nfe_file)
def _prepare_data(self, x, y):
x, y = self._get_x_y(x, y)
x, y = self._get_x_y_in_correct_dims(x, y)
return x.to(device), y.to(device)
def _get_x_y(self, x, y):
"""
:param x: shape (batch_size, seq_len, num_edges, input_dim)
:param y: shape (batch_size, horizon, num_edges, input_dim)
:returns x shape (seq_len, batch_size, num_edges, input_dim)
y shape (horizon, batch_size, num_edges, input_dim)
"""
x = torch.from_numpy(x).float()
y = torch.from_numpy(y).float()
self._logger.debug("X: {}".format(x.size()))
self._logger.debug("y: {}".format(y.size()))
x = x.permute(1, 0, 2, 3)
y = y.permute(1, 0, 2, 3)
return x, y
def _get_x_y_in_correct_dims(self, x, y):
"""
:param x: shape (seq_len, batch_size, num_edges, input_dim)
:param y: shape (horizon, batch_size, num_edges, input_dim)
:return: x: shape (seq_len, batch_size, num_edges * input_dim)
y: shape (horizon, batch_size, num_edges * output_dim)
"""
batch_size = x.size(1)
self._logger.debug("size of x {}".format(x.size()))
x = x.view(self.seq_len, batch_size, self.num_edges * self.input_dim)
y = y[..., :self.output_dim].view(self.horizon, batch_size,
self.num_edges * self.output_dim)
return x, y
def _compute_loss(self, y_true, y_predicted):
y_true = self.standard_scaler.inverse_transform(y_true)
y_predicted = self.standard_scaler.inverse_transform(y_predicted)
return masked_mae_loss(y_predicted, y_true)
def _compute_loss_eval(self, y_true, y_predicted):
y_true = self.standard_scaler.inverse_transform(y_true)
y_predicted = self.standard_scaler.inverse_transform(y_predicted)
return masked_mae_loss(y_predicted, y_true).item(), masked_mape_loss(y_predicted, y_true).item(), masked_rmse_loss(y_predicted, y_true).item()
def evaluate(self, dataset='val', batches_seen=0, save=False):
"""
Computes mae rmse mape loss and the predict if save
:return: mean L1Loss
"""
with torch.no_grad():
self.stden_model.eval()
val_iterator = self._data['{}_loader'.format(dataset)].get_iterator()
mae_losses = []
mape_losses = []
rmse_losses = []
y_dict = None
if(save):
y_truths = []
y_preds = []
for _, (x, y) in enumerate(val_iterator):
x, y = self._prepare_data(x, y)
output, fe = self.stden_model(x)
mae, mape, rmse = self._compute_loss_eval(y, output)
mae_losses.append(mae)
mape_losses.append(mape)
rmse_losses.append(rmse)
if(save):
y_truths.append(y.cpu())
y_preds.append(output.cpu())
mean_loss = {
'mae': np.mean(mae_losses),
'mape': np.mean(mape_losses),
'rmse': np.mean(rmse_losses)
}
self._logger.info('Evaluation: - mae - {:.4f} - mape - {:.4f} - rmse - {:.4f}'.format(mean_loss['mae'], mean_loss['mape'], mean_loss['rmse']))
self._writer.add_scalar('{} loss'.format(dataset), mean_loss['mae'], batches_seen)
if(save):
y_preds = np.concatenate(y_preds, axis=1)
y_truths = np.concatenate(y_truths, axis=1) # concatenate on batch dimension
y_truths_scaled = []
y_preds_scaled = []
# self._logger.debug("y_preds shape: {}, y_truth shape {}".format(y_preds.shape, y_truths.shape))
for t in range(y_preds.shape[0]):
y_truth = self.standard_scaler.inverse_transform(y_truths[t])
y_pred = self.standard_scaler.inverse_transform(y_preds[t])
y_truths_scaled.append(y_truth)
y_preds_scaled.append(y_pred)
y_preds_scaled = np.stack(y_preds_scaled)
y_truths_scaled = np.stack(y_truths_scaled)
y_dict = {'prediction': y_preds_scaled, 'truth': y_truths_scaled}
# save_dir = self._data_kwargs.get('dataset_dir', 'data')
# save_path = os.path.join(save_dir, 'pred.npz')
# np.savez(save_path, prediction=y_preds_scaled, turth=y_truths_scaled)
return mean_loss['mae'], y_dict
def eval_more(self, dataset='val', save=False, seq_len=[3, 6, 9, 12], extract_latent=False):
"""
Computes mae rmse mape loss and the prediction if `save` is set True.
"""
self._logger.info('Model mode: Evaluation')
with torch.no_grad():
self.stden_model.eval()
val_iterator = self._data['{}_loader'.format(dataset)].get_iterator()
mae_losses = []
mape_losses = []
rmse_losses = []
if(save):
y_truths = []
y_preds = []
if(extract_latent):
latents = []
# used for nfe
c = []
for _, (x, y) in enumerate(val_iterator):
x, y = self._prepare_data(x, y)
output, fe = self.stden_model(x)
mae, mape, rmse = [], [], []
for seq in seq_len:
_mae, _mape, _rmse = self._compute_loss_eval(y[seq-1], output[seq-1])
mae.append(_mae)
mape.append(_mape)
rmse.append(_rmse)
mae_losses.append(mae)
mape_losses.append(mape)
rmse_losses.append(rmse)
c.append([*fe, np.mean(mae)])
if(save):
y_truths.append(y.cpu())
y_preds.append(output.cpu())
if(extract_latent):
latents.append(self.stden_model.latent_feat.cpu())
mean_loss = {
'mae': np.mean(mae_losses, axis=0),
'mape': np.mean(mape_losses, axis=0),
'rmse': np.mean(rmse_losses, axis=0)
}
for i, seq in enumerate(seq_len):
self._logger.info('Evaluation seq {}: - mae - {:.4f} - mape - {:.4f} - rmse - {:.4f}'.format(
seq, mean_loss['mae'][i], mean_loss['mape'][i], mean_loss['rmse'][i]))
if(save):
# shape (horizon, num_sapmles, feat_dim)
y_preds = np.concatenate(y_preds, axis=1)
y_truths = np.concatenate(y_truths, axis=1) # concatenate on batch dimension
y_preds_scaled = self.standard_scaler.inverse_transform(y_preds)
y_truths_scaled = self.standard_scaler.inverse_transform(y_truths)
save_dir = self._data_kwargs.get('dataset_dir', 'data')
save_path = os.path.join(save_dir, 'pred_{}_{}.npz'.format(self.experimentID, self._epoch_num))
np.savez_compressed(save_path, prediction=y_preds_scaled, turth=y_truths_scaled)
if(extract_latent):
# concatenate on batch dimension
latents = np.concatenate(latents, axis=1)
# Shape of latents (horizon, num_samples, self.num_edges * self.output_dim)
save_dir = self._data_kwargs.get('dataset_dir', 'data')
filter_type = self._model_kwargs.get('filter_type', 'unknown')
save_path = os.path.join(save_dir, '{}_latent_{}_{}.npz'.format(filter_type, self.experimentID, self._epoch_num))
np.savez_compressed(save_path, latent=latents)
if bool(self._model_kwargs.get('nfe', False)):
res = pd.DataFrame(c, columns=['nfe', 'time', 'err'])
res.index.name = 'iter'
filter_type = self._model_kwargs.get('filter_type', 'unknown')
atol = float(self._model_kwargs.get('odeint_atol', 1e-5))
rtol = float(self._model_kwargs.get('odeint_rtol', 1e-5))
nfe_file = os.path.join(
self._data_kwargs.get('dataset_dir', 'data'),
'nfe_{}_a{}_r{}.pkl'.format(filter_type, int(atol*1e5), int(rtol*1e5)))
res.to_pickle(nfe_file)
| 17,713
| 41.684337
| 154
|
py
|
STDEN
|
STDEN-main/model/stden_model.py
|
import time
import torch
import torch.nn as nn
from torch.nn.modules.rnn import GRU
from model.ode_func import ODEFunc
from model.diffeq_solver import DiffeqSolver
from lib import utils
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class EncoderAttrs:
def __init__(self, adj_mx, **model_kwargs):
self.adj_mx = adj_mx
self.num_nodes = adj_mx.shape[0]
self.num_edges = (adj_mx > 0.).sum()
self.gcn_step = int(model_kwargs.get('gcn_step', 2))
self.filter_type = model_kwargs.get('filter_type', 'default')
self.num_rnn_layers = int(model_kwargs.get('num_rnn_layers', 1))
self.rnn_units = int(model_kwargs.get('rnn_units'))
self.latent_dim = int(model_kwargs.get('latent_dim', 4))
class STDENModel(nn.Module, EncoderAttrs):
def __init__(self, adj_mx, logger, **model_kwargs):
nn.Module.__init__(self)
EncoderAttrs.__init__(self, adj_mx, **model_kwargs)
self._logger = logger
####################################################
# recognition net
####################################################
self.encoder_z0 = Encoder_z0_RNN(adj_mx, **model_kwargs)
####################################################
# ode solver
####################################################
self.n_traj_samples = int(model_kwargs.get('n_traj_samples', 1))
self.ode_method = model_kwargs.get('ode_method', 'dopri5')
self.atol = float(model_kwargs.get('odeint_atol', 1e-4))
self.rtol = float(model_kwargs.get('odeint_rtol', 1e-3))
self.num_gen_layer = int(model_kwargs.get('gen_layers', 1))
self.ode_gen_dim = int(model_kwargs.get('gen_dim', 64))
ode_set_str = "ODE setting --latent {} --samples {} --method {} \
--atol {:6f} --rtol {:6f} --gen_layer {} --gen_dim {}".format(\
self.latent_dim, self.n_traj_samples, self.ode_method, \
self.atol, self.rtol, self.num_gen_layer, self.ode_gen_dim)
odefunc = ODEFunc(self.ode_gen_dim, # hidden dimension
self.latent_dim,
adj_mx,
self.gcn_step,
self.num_nodes,
filter_type=self.filter_type
).to(device)
self.diffeq_solver = DiffeqSolver(odefunc,
self.ode_method,
self.latent_dim,
odeint_rtol=self.rtol,
odeint_atol=self.atol
)
self._logger.info(ode_set_str)
self.save_latent = bool(model_kwargs.get('save_latent', False))
self.latent_feat = None # used to extract the latent feature
####################################################
# decoder
####################################################
self.horizon = int(model_kwargs.get('horizon', 1))
self.out_feat = int(model_kwargs.get('output_dim', 1))
self.decoder = Decoder(
self.out_feat,
adj_mx,
self.num_nodes,
self.num_edges,
).to(device)
##########################################
def forward(self, inputs, labels=None, batches_seen=None):
"""
seq2seq forward pass
:param inputs: shape (seq_len, batch_size, num_edges * input_dim)
:param labels: shape (horizon, batch_size, num_edges * output_dim)
:param batches_seen: batches seen till now
:return: outputs: (self.horizon, batch_size, self.num_edges * self.output_dim)
"""
perf_time = time.time()
# shape: [1, batch, num_nodes * latent_dim]
first_point_mu, first_point_std = self.encoder_z0(inputs)
self._logger.debug("Recognition complete with {:.1f}s".format(time.time() - perf_time))
# sample 'n_traj_samples' trajectory
perf_time = time.time()
means_z0 = first_point_mu.repeat(self.n_traj_samples, 1, 1)
sigma_z0 = first_point_std.repeat(self.n_traj_samples, 1, 1)
first_point_enc = utils.sample_standard_gaussian(means_z0, sigma_z0)
time_steps_to_predict = torch.arange(start=0, end=self.horizon, step=1).float().to(device)
time_steps_to_predict = time_steps_to_predict / len(time_steps_to_predict)
# Shape of sol_ys (horizon, n_traj_samples, batch_size, self.num_nodes * self.latent_dim)
sol_ys, fe = self.diffeq_solver(first_point_enc, time_steps_to_predict)
self._logger.debug("ODE solver complete with {:.1f}s".format(time.time() - perf_time))
if(self.save_latent):
# Shape of latent_feat (horizon, batch_size, self.num_nodes * self.latent_dim)
self.latent_feat = torch.mean(sol_ys.detach(), axis=1)
perf_time = time.time()
outputs = self.decoder(sol_ys)
self._logger.debug("Decoder complete with {:.1f}s".format(time.time() - perf_time))
if batches_seen == 0:
self._logger.info(
"Total trainable parameters {}".format(count_parameters(self))
)
return outputs, fe
class Encoder_z0_RNN(nn.Module, EncoderAttrs):
def __init__(self, adj_mx, **model_kwargs):
nn.Module.__init__(self)
EncoderAttrs.__init__(self, adj_mx, **model_kwargs)
self.recg_type = model_kwargs.get('recg_type', 'gru') # gru
if(self.recg_type == 'gru'):
# gru settings
self.input_dim = int(model_kwargs.get('input_dim', 1))
self.gru_rnn = GRU(self.input_dim, self.rnn_units).to(device)
else:
raise NotImplementedError("The recognition net only support 'gru'.")
# hidden to z0 settings
self.inv_grad = utils.graph_grad(adj_mx).transpose(-2, -1)
self.inv_grad[self.inv_grad != 0.] = 0.5
self.hiddens_to_z0 = nn.Sequential(
nn.Linear(self.rnn_units, 50),
nn.Tanh(),
nn.Linear(50, self.latent_dim * 2),)
utils.init_network_weights(self.hiddens_to_z0)
def forward(self, inputs):
"""
encoder forward pass on t time steps
:param inputs: shape (seq_len, batch_size, num_edges * input_dim)
:return: mean, std: # shape (n_samples=1, batch_size, self.latent_dim)
"""
if(self.recg_type == 'gru'):
# shape of outputs: (seq_len, batch, num_senor * rnn_units)
seq_len, batch_size = inputs.size(0), inputs.size(1)
inputs = inputs.reshape(seq_len, batch_size, self.num_edges, self.input_dim)
inputs = inputs.reshape(seq_len, batch_size * self.num_edges, self.input_dim)
outputs, _ = self.gru_rnn(inputs)
last_output = outputs[-1]
# (batch_size, num_edges, rnn_units)
last_output = torch.reshape(last_output, (batch_size, self.num_edges, -1))
last_output = torch.transpose(last_output, (-2, -1))
# (batch_size, num_nodes, rnn_units)
last_output = torch.matmul(last_output, self.inv_grad).transpose(-2, -1)
else:
raise NotImplementedError("The recognition net only support 'gru'.")
mean, std = utils.split_last_dim(self.hiddens_to_z0(last_output))
mean = mean.reshape(batch_size, -1) # (batch_size, num_nodes * latent_dim)
std = std.reshape(batch_size, -1) # (batch_size, num_nodes * latent_dim)
std = std.abs()
assert(not torch.isnan(mean).any())
assert(not torch.isnan(std).any())
return mean.unsqueeze(0), std.unsqueeze(0) # for n_sample traj
class Decoder(nn.Module):
def __init__(self, output_dim, adj_mx, num_nodes, num_edges):
super(Decoder, self).__init__()
self.num_nodes = num_nodes
self.num_edges = num_edges
self.grap_grad = utils.graph_grad(adj_mx)
self.output_dim = output_dim
def forward(self, inputs):
"""
:param inputs: (horizon, n_traj_samples, batch_size, num_nodes * latent_dim)
:return outputs: (horizon, batch_size, num_edges * output_dim), average result of n_traj_samples.
"""
assert(len(inputs.size()) == 4)
horizon, n_traj_samples, batch_size = inputs.size()[:3]
inputs = inputs.reshape(horizon, n_traj_samples, batch_size, self.num_nodes, -1).transpose(-2, -1)
latent_dim = inputs.size(-2)
# transform z with shape `(..., num_nodes)` to f with shape `(..., num_edges)`.
outputs = torch.matmul(inputs, self.grap_grad)
outputs = outputs.reshape(horizon, n_traj_samples, batch_size, latent_dim, self.num_edges, self.output_dim)
outputs = torch.mean(
torch.mean(outputs, axis=3),
axis=1
)
outputs = outputs.reshape(horizon, batch_size, -1)
return outputs
| 8,910
| 42.048309
| 115
|
py
|
STDEN
|
STDEN-main/model/__init__.py
| 0
| 0
| 0
|
py
|
|
STDEN
|
STDEN-main/lib/utils.py
|
import logging
import numpy as np
import os
import time
import scipy.sparse as sp
import sys
import torch
import torch.nn as nn
class DataLoader(object):
def __init__(self, xs, ys, batch_size, pad_with_last_sample=True, shuffle=False):
"""
:param xs:
:param ys:
:param batch_size:
:param pad_with_last_sample: pad with the last sample to make number of samples divisible to batch_size.
"""
self.batch_size = batch_size
self.current_ind = 0
if pad_with_last_sample:
num_padding = (batch_size - (len(xs) % batch_size)) % batch_size
x_padding = np.repeat(xs[-1:], num_padding, axis=0)
y_padding = np.repeat(ys[-1:], num_padding, axis=0)
xs = np.concatenate([xs, x_padding], axis=0)
ys = np.concatenate([ys, y_padding], axis=0)
self.size = len(xs)
self.num_batch = int(self.size // self.batch_size)
if shuffle:
permutation = np.random.permutation(self.size)
xs, ys = xs[permutation], ys[permutation]
self.xs = xs
self.ys = ys
def get_iterator(self):
self.current_ind = 0
def _wrapper():
while self.current_ind < self.num_batch:
start_ind = self.batch_size * self.current_ind
end_ind = min(self.size, self.batch_size * (self.current_ind + 1))
x_i = self.xs[start_ind: end_ind, ...]
y_i = self.ys[start_ind: end_ind, ...]
yield (x_i, y_i)
self.current_ind += 1
return _wrapper()
class StandardScaler:
"""
Standard the input
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return (data * self.std) + self.mean
def calculate_random_walk_matrix(adj_mx):
adj_mx = sp.coo_matrix(adj_mx)
d = np.array(adj_mx.sum(1))
d_inv = np.power(d, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat_inv = sp.diags(d_inv)
random_walk_mx = d_mat_inv.dot(adj_mx).tocoo()
return random_walk_mx
def config_logging(log_dir, log_filename='info.log', level=logging.INFO):
# Add file handler and stdout handler
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Create the log directory if necessary.
try:
os.makedirs(log_dir)
except OSError:
pass
file_handler = logging.FileHandler(os.path.join(log_dir, log_filename))
file_handler.setFormatter(formatter)
file_handler.setLevel(level=level)
# Add console handler.
console_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(console_formatter)
console_handler.setLevel(level=level)
logging.basicConfig(handlers=[file_handler, console_handler], level=level)
def get_logger(log_dir, name, log_filename='info.log', level=logging.INFO):
logger = logging.getLogger(name)
logger.setLevel(level)
# Add file handler and stdout handler
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler(os.path.join(log_dir, log_filename))
file_handler.setFormatter(formatter)
# Add console handler.
console_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(console_formatter)
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# Add google cloud log handler
logger.info('Log directory: %s', log_dir)
return logger
def get_log_dir(kwargs):
log_dir = kwargs['train'].get('log_dir')
if log_dir is None:
batch_size = kwargs['data'].get('batch_size')
filter_type = kwargs['model'].get('filter_type')
gcn_step = kwargs['model'].get('gcn_step')
horizon = kwargs['model'].get('horizon')
latent_dim = kwargs['model'].get('latent_dim')
n_traj_samples = kwargs['model'].get('n_traj_samples')
ode_method = kwargs['model'].get('ode_method')
seq_len = kwargs['model'].get('seq_len')
rnn_units = kwargs['model'].get('rnn_units')
recg_type = kwargs['model'].get('recg_type')
if filter_type == 'unkP':
filter_type_abbr = 'UP'
elif filter_type == 'IncP':
filter_type_abbr = 'NV'
else:
filter_type_abbr = 'DF'
run_id = 'STDEN_%s-%d_%s-%d_L-%d_N-%d_M-%s_bs-%d_%d-%d_%s/' % (
recg_type, rnn_units, filter_type_abbr, gcn_step, latent_dim, n_traj_samples, ode_method, batch_size, seq_len, horizon, time.strftime('%m%d%H%M%S'))
base_dir = kwargs.get('log_base_dir')
log_dir = os.path.join(base_dir, run_id)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
def load_dataset(dataset_dir, batch_size, val_batch_size=None, **kwargs):
if('BJ' in dataset_dir):
data = dict(np.load(os.path.join(dataset_dir, 'flow.npz'))) # convert readonly NpzFile to writable dict Object
for category in ['train', 'val', 'test']:
data['x_' + category] = data['x_' + category] #[..., :4] # ignore the time index
else:
data = {}
for category in ['train', 'val', 'test']:
cat_data = np.load(os.path.join(dataset_dir, category + '.npz'))
data['x_' + category] = cat_data['x']
data['y_' + category] = cat_data['y']
scaler = StandardScaler(mean=data['x_train'].mean(), std=data['x_train'].std())
# Data format
for category in ['train', 'val', 'test']:
data['x_' + category] = scaler.transform(data['x_' + category])
data['y_' + category] = scaler.transform(data['y_' + category])
data['train_loader'] = DataLoader(data['x_train'], data['y_train'], batch_size, shuffle=True)
data['val_loader'] = DataLoader(data['x_val'], data['y_val'], val_batch_size, shuffle=False)
data['test_loader'] = DataLoader(data['x_test'], data['y_test'], val_batch_size, shuffle=False)
data['scaler'] = scaler
return data
def load_graph_data(pkl_filename):
adj_mx = np.load(pkl_filename)
return adj_mx
def graph_grad(adj_mx):
"""Fetch the graph gradient operator."""
num_nodes = adj_mx.shape[0]
num_edges = (adj_mx > 0.).sum()
grad = torch.zeros(num_nodes, num_edges)
e = 0
for i in range(num_nodes):
for j in range(num_nodes):
if adj_mx[i, j] == 0:
continue
grad[i, e] = 1.
grad[j, e] = -1.
e += 1
return grad
def init_network_weights(net, std = 0.1):
"""
Just for nn.Linear net.
"""
for m in net.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, val=0)
def split_last_dim(data):
last_dim = data.size()[-1]
last_dim = last_dim//2
res = data[..., :last_dim], data[..., last_dim:]
return res
def get_device(tensor):
device = torch.device("cpu")
if tensor.is_cuda:
device = tensor.get_device()
return device
def sample_standard_gaussian(mu, sigma):
device = get_device(mu)
d = torch.distributions.normal.Normal(torch.Tensor([0.]).to(device), torch.Tensor([1.]).to(device))
r = d.sample(mu.size()).squeeze(-1)
return r * sigma.float() + mu.float()
def create_net(n_inputs, n_outputs, n_layers = 0,
n_units = 100, nonlinear = nn.Tanh):
layers = [nn.Linear(n_inputs, n_units)]
for i in range(n_layers):
layers.append(nonlinear())
layers.append(nn.Linear(n_units, n_units))
layers.append(nonlinear())
layers.append(nn.Linear(n_units, n_outputs))
return nn.Sequential(*layers)
| 7,962
| 33.925439
| 160
|
py
|
STDEN
|
STDEN-main/lib/metrics.py
|
import torch
def masked_mae_loss(y_pred, y_true):
y_true[y_true < 1e-4] = 0
mask = (y_true != 0).float()
mask /= mask.mean() # assign the sample weights of zeros to nonzero-values
loss = torch.abs(y_pred - y_true)
loss = loss * mask
# trick for nans: https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/3
loss[loss != loss] = 0
return loss.mean()
def masked_mape_loss(y_pred, y_true):
y_true[y_true < 1e-4] = 0
mask = (y_true != 0).float()
mask /= mask.mean()
loss = torch.abs((y_pred - y_true) / y_true)
loss = loss * mask
loss[loss != loss] = 0
return loss.mean()
def masked_rmse_loss(y_pred, y_true):
y_true[y_true < 1e-4] = 0
mask = (y_true != 0).float()
mask /= mask.mean()
loss = torch.pow(y_pred - y_true, 2)
loss = loss * mask
loss[loss != loss] = 0
return torch.sqrt(loss.mean())
| 896
| 28.9
| 88
|
py
|
STDEN
|
STDEN-main/lib/__init__.py
| 0
| 0
| 0
|
py
|
|
grpc
|
grpc-master/setup.py
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A setup module for the GRPC Python package."""
# NOTE(https://github.com/grpc/grpc/issues/24028): allow setuptools to monkey
# patch distutils
import setuptools # isort:skip
# Monkey Patch the unix compiler to accept ASM
# files used by boring SSL.
from distutils.unixccompiler import UnixCCompiler
UnixCCompiler.src_extensions.append(".S")
del UnixCCompiler
from distutils import cygwinccompiler
from distutils import extension as _extension
from distutils import util
import os
import os.path
import pathlib
import platform
import re
import shlex
import shutil
import subprocess
from subprocess import PIPE
import sys
import sysconfig
import _metadata
import pkg_resources
from setuptools.command import egg_info
# Redirect the manifest template from MANIFEST.in to PYTHON-MANIFEST.in.
egg_info.manifest_maker.template = "PYTHON-MANIFEST.in"
PY3 = sys.version_info.major == 3
PYTHON_STEM = os.path.join("src", "python", "grpcio")
CORE_INCLUDE = (
"include",
".",
)
ABSL_INCLUDE = (os.path.join("third_party", "abseil-cpp"),)
ADDRESS_SORTING_INCLUDE = (
os.path.join("third_party", "address_sorting", "include"),
)
CARES_INCLUDE = (
os.path.join("third_party", "cares", "cares", "include"),
os.path.join("third_party", "cares"),
os.path.join("third_party", "cares", "cares"),
)
if "darwin" in sys.platform:
CARES_INCLUDE += (os.path.join("third_party", "cares", "config_darwin"),)
if "freebsd" in sys.platform:
CARES_INCLUDE += (os.path.join("third_party", "cares", "config_freebsd"),)
if "linux" in sys.platform:
CARES_INCLUDE += (os.path.join("third_party", "cares", "config_linux"),)
if "openbsd" in sys.platform:
CARES_INCLUDE += (os.path.join("third_party", "cares", "config_openbsd"),)
RE2_INCLUDE = (os.path.join("third_party", "re2"),)
SSL_INCLUDE = (
os.path.join("third_party", "boringssl-with-bazel", "src", "include"),
)
UPB_INCLUDE = (os.path.join("third_party", "upb"),)
UPB_GRPC_GENERATED_INCLUDE = (
os.path.join("src", "core", "ext", "upb-generated"),
)
UPBDEFS_GRPC_GENERATED_INCLUDE = (
os.path.join("src", "core", "ext", "upbdefs-generated"),
)
UTF8_RANGE_INCLUDE = (os.path.join("third_party", "utf8_range"),)
XXHASH_INCLUDE = (os.path.join("third_party", "xxhash"),)
ZLIB_INCLUDE = (os.path.join("third_party", "zlib"),)
README = os.path.join(PYTHON_STEM, "README.rst")
# Ensure we're in the proper directory whether or not we're being used by pip.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.abspath(PYTHON_STEM))
# Break import-style to ensure we can actually find our in-repo dependencies.
import _parallel_compile_patch
import _spawn_patch
import grpc_core_dependencies
import commands
import grpc_version
_parallel_compile_patch.monkeypatch_compile_maybe()
_spawn_patch.monkeypatch_spawn()
LICENSE = "Apache License 2.0"
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"License :: OSI Approved :: Apache Software License",
]
def _env_bool_value(env_name, default):
"""Parses a bool option from an environment variable"""
return os.environ.get(env_name, default).upper() not in ["FALSE", "0", ""]
BUILD_WITH_BORING_SSL_ASM = _env_bool_value(
"GRPC_BUILD_WITH_BORING_SSL_ASM", "True"
)
# Export this environment variable to override the platform variant that will
# be chosen for boringssl assembly optimizations. This option is useful when
# crosscompiling and the host platform as obtained by distutils.utils.get_platform()
# doesn't match the platform we are targetting.
# Example value: "linux-aarch64"
BUILD_OVERRIDE_BORING_SSL_ASM_PLATFORM = os.environ.get(
"GRPC_BUILD_OVERRIDE_BORING_SSL_ASM_PLATFORM", ""
)
# Environment variable to determine whether or not the Cython extension should
# *use* Cython or use the generated C files. Note that this requires the C files
# to have been generated by building first *with* Cython support. Even if this
# is set to false, if the script detects that the generated `.c` file isn't
# present, then it will still attempt to use Cython.
BUILD_WITH_CYTHON = _env_bool_value("GRPC_PYTHON_BUILD_WITH_CYTHON", "False")
# Export this variable to use the system installation of openssl. You need to
# have the header files installed (in /usr/include/openssl) and during
# runtime, the shared library must be installed
BUILD_WITH_SYSTEM_OPENSSL = _env_bool_value(
"GRPC_PYTHON_BUILD_SYSTEM_OPENSSL", "False"
)
# Export this variable to use the system installation of zlib. You need to
# have the header files installed (in /usr/include/) and during
# runtime, the shared library must be installed
BUILD_WITH_SYSTEM_ZLIB = _env_bool_value(
"GRPC_PYTHON_BUILD_SYSTEM_ZLIB", "False"
)
# Export this variable to use the system installation of cares. You need to
# have the header files installed (in /usr/include/) and during
# runtime, the shared library must be installed
BUILD_WITH_SYSTEM_CARES = _env_bool_value(
"GRPC_PYTHON_BUILD_SYSTEM_CARES", "False"
)
# Export this variable to use the system installation of re2. You need to
# have the header files installed (in /usr/include/re2) and during
# runtime, the shared library must be installed
BUILD_WITH_SYSTEM_RE2 = _env_bool_value("GRPC_PYTHON_BUILD_SYSTEM_RE2", "False")
# Export this variable to use the system installation of abseil. You need to
# have the header files installed (in /usr/include/absl) and during
# runtime, the shared library must be installed
BUILD_WITH_SYSTEM_ABSL = os.environ.get("GRPC_PYTHON_BUILD_SYSTEM_ABSL", False)
# Export this variable to force building the python extension with a statically linked libstdc++.
# At least on linux, this is normally not needed as we can build manylinux-compatible wheels on linux just fine
# without statically linking libstdc++ (which leads to a slight increase in the wheel size).
# This option is useful when crosscompiling wheels for aarch64 where
# it's difficult to ensure that the crosscompilation toolchain has a high-enough version
# of GCC (we require >=5.1) but still uses old-enough libstdc++ symbols.
# TODO(jtattermusch): remove this workaround once issues with crosscompiler version are resolved.
BUILD_WITH_STATIC_LIBSTDCXX = _env_bool_value(
"GRPC_PYTHON_BUILD_WITH_STATIC_LIBSTDCXX", "False"
)
# For local development use only: This skips building gRPC Core and its
# dependencies, including protobuf and boringssl. This allows "incremental"
# compilation by first building gRPC Core using make, then building only the
# Python/Cython layers here.
#
# Note that this requires libboringssl.a in the libs/{dbg,opt}/ directory, which
# may require configuring make to not use the system openssl implementation:
#
# make HAS_SYSTEM_OPENSSL_ALPN=0
#
# TODO(ericgribkoff) Respect the BUILD_WITH_SYSTEM_* flags alongside this option
USE_PREBUILT_GRPC_CORE = _env_bool_value(
"GRPC_PYTHON_USE_PREBUILT_GRPC_CORE", "False"
)
# If this environmental variable is set, GRPC will not try to be compatible with
# libc versions old than the one it was compiled against.
DISABLE_LIBC_COMPATIBILITY = _env_bool_value(
"GRPC_PYTHON_DISABLE_LIBC_COMPATIBILITY", "False"
)
# Environment variable to determine whether or not to enable coverage analysis
# in Cython modules.
ENABLE_CYTHON_TRACING = _env_bool_value(
"GRPC_PYTHON_ENABLE_CYTHON_TRACING", "False"
)
# Environment variable specifying whether or not there's interest in setting up
# documentation building.
ENABLE_DOCUMENTATION_BUILD = _env_bool_value(
"GRPC_PYTHON_ENABLE_DOCUMENTATION_BUILD", "False"
)
def check_linker_need_libatomic():
"""Test if linker on system needs libatomic."""
code_test = (
b"#include <atomic>\n"
+ b"int main() { return std::atomic<int64_t>{}; }"
)
cxx = shlex.split(os.environ.get("CXX", "c++"))
cpp_test = subprocess.Popen(
cxx + ["-x", "c++", "-std=c++14", "-"],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
)
cpp_test.communicate(input=code_test)
if cpp_test.returncode == 0:
return False
# Double-check to see if -latomic actually can solve the problem.
# https://github.com/grpc/grpc/issues/22491
cpp_test = subprocess.Popen(
cxx + ["-x", "c++", "-std=c++14", "-", "-latomic"],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
)
cpp_test.communicate(input=code_test)
return cpp_test.returncode == 0
# There are some situations (like on Windows) where CC, CFLAGS, and LDFLAGS are
# entirely ignored/dropped/forgotten by distutils and its Cygwin/MinGW support.
# We use these environment variables to thus get around that without locking
# ourselves in w.r.t. the multitude of operating systems this ought to build on.
# We can also use these variables as a way to inject environment-specific
# compiler/linker flags. We assume GCC-like compilers and/or MinGW as a
# reasonable default.
EXTRA_ENV_COMPILE_ARGS = os.environ.get("GRPC_PYTHON_CFLAGS", None)
EXTRA_ENV_LINK_ARGS = os.environ.get("GRPC_PYTHON_LDFLAGS", None)
if EXTRA_ENV_COMPILE_ARGS is None:
EXTRA_ENV_COMPILE_ARGS = " -std=c++14"
if "win32" in sys.platform:
if sys.version_info < (3, 5):
EXTRA_ENV_COMPILE_ARGS += " -D_hypot=hypot"
# We use define flags here and don't directly add to DEFINE_MACROS below to
# ensure that the expert user/builder has a way of turning it off (via the
# envvars) without adding yet more GRPC-specific envvars.
# See https://sourceforge.net/p/mingw-w64/bugs/363/
if "32" in platform.architecture()[0]:
EXTRA_ENV_COMPILE_ARGS += (
" -D_ftime=_ftime32 -D_timeb=__timeb32"
" -D_ftime_s=_ftime32_s"
)
else:
EXTRA_ENV_COMPILE_ARGS += (
" -D_ftime=_ftime64 -D_timeb=__timeb64"
)
else:
# We need to statically link the C++ Runtime, only the C runtime is
# available dynamically
EXTRA_ENV_COMPILE_ARGS += " /MT"
elif "linux" in sys.platform:
EXTRA_ENV_COMPILE_ARGS += (
" -fvisibility=hidden -fno-wrapv -fno-exceptions"
)
elif "darwin" in sys.platform:
EXTRA_ENV_COMPILE_ARGS += (
" -stdlib=libc++ -fvisibility=hidden -fno-wrapv -fno-exceptions"
" -DHAVE_UNISTD_H"
)
if EXTRA_ENV_LINK_ARGS is None:
EXTRA_ENV_LINK_ARGS = ""
if "linux" in sys.platform or "darwin" in sys.platform:
EXTRA_ENV_LINK_ARGS += " -lpthread"
if check_linker_need_libatomic():
EXTRA_ENV_LINK_ARGS += " -latomic"
elif "win32" in sys.platform and sys.version_info < (3, 5):
msvcr = cygwinccompiler.get_msvcr()[0]
EXTRA_ENV_LINK_ARGS += (
" -static-libgcc -static-libstdc++ -mcrtdll={msvcr}"
" -static -lshlwapi".format(msvcr=msvcr)
)
if "linux" in sys.platform:
EXTRA_ENV_LINK_ARGS += " -static-libgcc"
EXTRA_COMPILE_ARGS = shlex.split(EXTRA_ENV_COMPILE_ARGS)
EXTRA_LINK_ARGS = shlex.split(EXTRA_ENV_LINK_ARGS)
if BUILD_WITH_STATIC_LIBSTDCXX:
EXTRA_LINK_ARGS.append("-static-libstdc++")
CYTHON_EXTENSION_PACKAGE_NAMES = ()
CYTHON_EXTENSION_MODULE_NAMES = ("grpc._cython.cygrpc",)
CYTHON_HELPER_C_FILES = ()
CORE_C_FILES = tuple(grpc_core_dependencies.CORE_SOURCE_FILES)
if "win32" in sys.platform:
CORE_C_FILES = filter(lambda x: "third_party/cares" not in x, CORE_C_FILES)
if BUILD_WITH_SYSTEM_OPENSSL:
CORE_C_FILES = filter(
lambda x: "third_party/boringssl" not in x, CORE_C_FILES
)
CORE_C_FILES = filter(lambda x: "src/boringssl" not in x, CORE_C_FILES)
SSL_INCLUDE = (os.path.join("/usr", "include", "openssl"),)
if BUILD_WITH_SYSTEM_ZLIB:
CORE_C_FILES = filter(lambda x: "third_party/zlib" not in x, CORE_C_FILES)
ZLIB_INCLUDE = (os.path.join("/usr", "include"),)
if BUILD_WITH_SYSTEM_CARES:
CORE_C_FILES = filter(lambda x: "third_party/cares" not in x, CORE_C_FILES)
CARES_INCLUDE = (os.path.join("/usr", "include"),)
if BUILD_WITH_SYSTEM_RE2:
CORE_C_FILES = filter(lambda x: "third_party/re2" not in x, CORE_C_FILES)
RE2_INCLUDE = (os.path.join("/usr", "include", "re2"),)
if BUILD_WITH_SYSTEM_ABSL:
CORE_C_FILES = filter(
lambda x: "third_party/abseil-cpp" not in x, CORE_C_FILES
)
ABSL_INCLUDE = (os.path.join("/usr", "include"),)
EXTENSION_INCLUDE_DIRECTORIES = (
(PYTHON_STEM,)
+ CORE_INCLUDE
+ ABSL_INCLUDE
+ ADDRESS_SORTING_INCLUDE
+ CARES_INCLUDE
+ RE2_INCLUDE
+ SSL_INCLUDE
+ UPB_INCLUDE
+ UPB_GRPC_GENERATED_INCLUDE
+ UPBDEFS_GRPC_GENERATED_INCLUDE
+ UTF8_RANGE_INCLUDE
+ XXHASH_INCLUDE
+ ZLIB_INCLUDE
)
EXTENSION_LIBRARIES = ()
if "linux" in sys.platform:
EXTENSION_LIBRARIES += ("rt",)
if not "win32" in sys.platform:
EXTENSION_LIBRARIES += ("m",)
if "win32" in sys.platform:
EXTENSION_LIBRARIES += (
"advapi32",
"bcrypt",
"dbghelp",
"ws2_32",
)
if BUILD_WITH_SYSTEM_OPENSSL:
EXTENSION_LIBRARIES += (
"ssl",
"crypto",
)
if BUILD_WITH_SYSTEM_ZLIB:
EXTENSION_LIBRARIES += ("z",)
if BUILD_WITH_SYSTEM_CARES:
EXTENSION_LIBRARIES += ("cares",)
if BUILD_WITH_SYSTEM_RE2:
EXTENSION_LIBRARIES += ("re2",)
if BUILD_WITH_SYSTEM_ABSL:
EXTENSION_LIBRARIES += tuple(
lib.stem[3:] for lib in pathlib.Path("/usr").glob("lib*/libabsl_*.so")
)
DEFINE_MACROS = (("_WIN32_WINNT", 0x600),)
asm_files = []
# Quotes on Windows build macros are evaluated differently from other platforms,
# so we must apply quotes asymmetrically in order to yield the proper result in
# the binary.
def _quote_build_define(argument):
if "win32" in sys.platform:
return '"\\"{}\\""'.format(argument)
return '"{}"'.format(argument)
DEFINE_MACROS += (
("GRPC_XDS_USER_AGENT_NAME_SUFFIX", _quote_build_define("Python")),
(
"GRPC_XDS_USER_AGENT_VERSION_SUFFIX",
_quote_build_define(_metadata.__version__),
),
)
asm_key = ""
if BUILD_WITH_BORING_SSL_ASM and not BUILD_WITH_SYSTEM_OPENSSL:
boringssl_asm_platform = (
BUILD_OVERRIDE_BORING_SSL_ASM_PLATFORM
if BUILD_OVERRIDE_BORING_SSL_ASM_PLATFORM
else util.get_platform()
)
LINUX_X86_64 = "linux-x86_64"
LINUX_ARM = "linux-arm"
LINUX_AARCH64 = "linux-aarch64"
if LINUX_X86_64 == boringssl_asm_platform:
asm_key = "crypto_linux_x86_64"
elif LINUX_ARM == boringssl_asm_platform:
asm_key = "crypto_linux_arm"
elif LINUX_AARCH64 == boringssl_asm_platform:
asm_key = "crypto_linux_aarch64"
elif "mac" in boringssl_asm_platform and "x86_64" in boringssl_asm_platform:
asm_key = "crypto_apple_x86_64"
elif "mac" in boringssl_asm_platform and "arm64" in boringssl_asm_platform:
asm_key = "crypto_apple_aarch64"
else:
print(
"ASM Builds for BoringSSL currently not supported on:",
boringssl_asm_platform,
)
if asm_key:
asm_files = grpc_core_dependencies.ASM_SOURCE_FILES[asm_key]
else:
DEFINE_MACROS += (("OPENSSL_NO_ASM", 1),)
if not DISABLE_LIBC_COMPATIBILITY:
DEFINE_MACROS += (("GPR_BACKWARDS_COMPATIBILITY_MODE", 1),)
if "win32" in sys.platform:
# TODO(zyc): Re-enable c-ares on x64 and x86 windows after fixing the
# ares_library_init compilation issue
DEFINE_MACROS += (
("WIN32_LEAN_AND_MEAN", 1),
("CARES_STATICLIB", 1),
("GRPC_ARES", 0),
("NTDDI_VERSION", 0x06000000),
("NOMINMAX", 1),
)
if "64bit" in platform.architecture()[0]:
DEFINE_MACROS += (("MS_WIN64", 1),)
elif sys.version_info >= (3, 5):
# For some reason, this is needed to get access to inet_pton/inet_ntop
# on msvc, but only for 32 bits
DEFINE_MACROS += (("NTDDI_VERSION", 0x06000000),)
else:
DEFINE_MACROS += (
("HAVE_CONFIG_H", 1),
("GRPC_ENABLE_FORK_SUPPORT", 1),
)
# Fix for multiprocessing support on Apple devices.
# TODO(vigneshbabu): Remove this once the poll poller gets fork support.
DEFINE_MACROS += (("GRPC_DO_NOT_INSTANTIATE_POSIX_POLLER", 1),)
# Fix for Cython build issue in aarch64.
# It's required to define this macro before include <inttypes.h>.
# <inttypes.h> was included in core/lib/channel/call_tracer.h.
# This macro should already be defined in grpc/grpc.h through port_platform.h,
# but we're still having issue in aarch64, so we manually define the macro here.
# TODO(xuanwn): Figure out what's going on in the aarch64 build so we can support
# gcc + Bazel.
DEFINE_MACROS += (("__STDC_FORMAT_MACROS", None),)
LDFLAGS = tuple(EXTRA_LINK_ARGS)
CFLAGS = tuple(EXTRA_COMPILE_ARGS)
if "linux" in sys.platform or "darwin" in sys.platform:
pymodinit_type = "PyObject*" if PY3 else "void"
pymodinit = 'extern "C" __attribute__((visibility ("default"))) {}'.format(
pymodinit_type
)
DEFINE_MACROS += (("PyMODINIT_FUNC", pymodinit),)
DEFINE_MACROS += (("GRPC_POSIX_FORK_ALLOW_PTHREAD_ATFORK", 1),)
# By default, Python3 distutils enforces compatibility of
# c plugins (.so files) with the OSX version Python was built with.
# We need OSX 10.10, the oldest which supports C++ thread_local.
# Python 3.9: Mac OS Big Sur sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') returns int (11)
if "darwin" in sys.platform:
mac_target = sysconfig.get_config_var("MACOSX_DEPLOYMENT_TARGET")
if mac_target:
mac_target = pkg_resources.parse_version(str(mac_target))
if mac_target < pkg_resources.parse_version("10.10.0"):
os.environ["MACOSX_DEPLOYMENT_TARGET"] = "10.10"
os.environ["_PYTHON_HOST_PLATFORM"] = re.sub(
r"macosx-[0-9]+\.[0-9]+-(.+)",
r"macosx-10.10-\1",
util.get_platform(),
)
def cython_extensions_and_necessity():
cython_module_files = [
os.path.join(PYTHON_STEM, name.replace(".", "/") + ".pyx")
for name in CYTHON_EXTENSION_MODULE_NAMES
]
config = os.environ.get("CONFIG", "opt")
prefix = "libs/" + config + "/"
if USE_PREBUILT_GRPC_CORE:
extra_objects = [
prefix + "libares.a",
prefix + "libboringssl.a",
prefix + "libgpr.a",
prefix + "libgrpc.a",
]
core_c_files = []
else:
core_c_files = list(CORE_C_FILES)
extra_objects = []
extensions = [
_extension.Extension(
name=module_name,
sources=(
[module_file]
+ list(CYTHON_HELPER_C_FILES)
+ core_c_files
+ asm_files
),
include_dirs=list(EXTENSION_INCLUDE_DIRECTORIES),
libraries=list(EXTENSION_LIBRARIES),
define_macros=list(DEFINE_MACROS),
extra_objects=extra_objects,
extra_compile_args=list(CFLAGS),
extra_link_args=list(LDFLAGS),
)
for (module_name, module_file) in zip(
list(CYTHON_EXTENSION_MODULE_NAMES), cython_module_files
)
]
need_cython = BUILD_WITH_CYTHON
if not BUILD_WITH_CYTHON:
need_cython = (
need_cython
or not commands.check_and_update_cythonization(extensions)
)
# TODO: the strategy for conditional compiling and exposing the aio Cython
# dependencies will be revisited by https://github.com/grpc/grpc/issues/19728
return (
commands.try_cythonize(
extensions,
linetracing=ENABLE_CYTHON_TRACING,
mandatory=BUILD_WITH_CYTHON,
),
need_cython,
)
CYTHON_EXTENSION_MODULES, need_cython = cython_extensions_and_necessity()
PACKAGE_DIRECTORIES = {
"": PYTHON_STEM,
}
INSTALL_REQUIRES = ()
EXTRAS_REQUIRES = {
"protobuf": "grpcio-tools>={version}".format(version=grpc_version.VERSION),
}
SETUP_REQUIRES = (
INSTALL_REQUIRES + ("Sphinx~=1.8.1",) if ENABLE_DOCUMENTATION_BUILD else ()
)
try:
import Cython
except ImportError:
if BUILD_WITH_CYTHON:
sys.stderr.write(
"You requested a Cython build via GRPC_PYTHON_BUILD_WITH_CYTHON, "
"but do not have Cython installed. We won't stop you from using "
"other commands, but the extension files will fail to build.\n"
)
elif need_cython:
sys.stderr.write(
"We could not find Cython. Setup may take 10-20 minutes.\n"
)
SETUP_REQUIRES += ("cython>=0.23",)
COMMAND_CLASS = {
"doc": commands.SphinxDocumentation,
"build_project_metadata": commands.BuildProjectMetadata,
"build_py": commands.BuildPy,
"build_ext": commands.BuildExt,
"gather": commands.Gather,
"clean": commands.Clean,
}
# Ensure that package data is copied over before any commands have been run:
credentials_dir = os.path.join(PYTHON_STEM, "grpc", "_cython", "_credentials")
try:
os.mkdir(credentials_dir)
except OSError:
pass
shutil.copyfile(
os.path.join("etc", "roots.pem"), os.path.join(credentials_dir, "roots.pem")
)
PACKAGE_DATA = {
# Binaries that may or may not be present in the final installation, but are
# mentioned here for completeness.
"grpc._cython": [
"_credentials/roots.pem",
"_windows/grpc_c.32.python",
"_windows/grpc_c.64.python",
],
}
PACKAGES = setuptools.find_packages(PYTHON_STEM)
setuptools.setup(
name="grpcio",
version=grpc_version.VERSION,
description="HTTP/2-based RPC framework",
author="The gRPC Authors",
author_email="grpc-io@googlegroups.com",
url="https://grpc.io",
project_urls={
"Source Code": "https://github.com/grpc/grpc",
"Bug Tracker": "https://github.com/grpc/grpc/issues",
"Documentation": "https://grpc.github.io/grpc/python",
},
license=LICENSE,
classifiers=CLASSIFIERS,
long_description_content_type="text/x-rst",
long_description=open(README).read(),
ext_modules=CYTHON_EXTENSION_MODULES,
packages=list(PACKAGES),
package_dir=PACKAGE_DIRECTORIES,
package_data=PACKAGE_DATA,
python_requires=">=3.7",
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRES,
setup_requires=SETUP_REQUIRES,
cmdclass=COMMAND_CLASS,
)
| 23,243
| 34.925811
| 111
|
py
|
grpc
|
grpc-master/_metadata.py
|
# Copyright 2021 The gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/_metadata.py.template`!!!
__version__ = """1.57.0.dev0"""
| 685
| 37.111111
| 74
|
py
|
grpc
|
grpc-master/tools/distrib/check_include_guards.py
|
#!/usr/bin/env python3
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import os.path
import re
import subprocess
import sys
def build_valid_guard(fpath):
guard_components = (
fpath.replace("++", "XX").replace(".", "_").upper().split("/")
)
if fpath.startswith("include/"):
return "_".join(guard_components[1:])
else:
return "GRPC_" + "_".join(guard_components)
def load(fpath):
with open(fpath, "r") as f:
return f.read()
def save(fpath, contents):
with open(fpath, "w") as f:
f.write(contents)
class GuardValidator(object):
def __init__(self):
self.ifndef_re = re.compile(r"#ifndef ([A-Z][A-Z_0-9]*)")
self.define_re = re.compile(r"#define ([A-Z][A-Z_0-9]*)")
self.endif_c_core_re = re.compile(
r"#endif /\* (?: *\\\n *)?([A-Z][A-Z_0-9]*) (?:\\\n *)?\*/$"
)
self.endif_re = re.compile(r"#endif // ([A-Z][A-Z_0-9]*)")
self.comments_then_includes_re = re.compile(
(
r"^((//.*?$|/\*.*?\*/|[ \r\n\t])*)(([ \r\n\t]|#include"
r" .*)*)(#ifndef [^\n]*\n#define [^\n]*\n)"
),
re.DOTALL | re.MULTILINE,
)
self.failed = False
def _is_c_core_header(self, fpath):
return "include" in fpath and not (
"grpc++" in fpath
or "grpcpp" in fpath
or "event_engine" in fpath
or fpath.endswith("/grpc_audit_logging.h")
or fpath.endswith("/json.h")
)
def fail(self, fpath, regexp, fcontents, match_txt, correct, fix):
c_core_header = self._is_c_core_header(fpath)
self.failed = True
invalid_guards_msg_template = (
"{0}: Missing preprocessor guards (RE {1}). "
"Please wrap your code around the following guards:\n"
"#ifndef {2}\n"
"#define {2}\n"
"...\n"
"... epic code ...\n"
"...\n"
+ ("#endif /* {2} */" if c_core_header else "#endif // {2}")
)
if not match_txt:
print(
(
invalid_guards_msg_template.format(
fpath, regexp.pattern, build_valid_guard(fpath)
)
)
)
return fcontents
print(
(
(
"{}: Wrong preprocessor guards (RE {}):"
"\n\tFound {}, expected {}"
).format(fpath, regexp.pattern, match_txt, correct)
)
)
if fix:
print("Fixing {}...\n".format(fpath))
fixed_fcontents = re.sub(match_txt, correct, fcontents)
if fixed_fcontents:
self.failed = False
return fixed_fcontents
else:
print()
return fcontents
def check(self, fpath, fix):
c_core_header = self._is_c_core_header(fpath)
valid_guard = build_valid_guard(fpath)
fcontents = load(fpath)
match = self.ifndef_re.search(fcontents)
if not match:
print(("something drastically wrong with: %s" % fpath))
return False # failed
if match.lastindex is None:
# No ifndef. Request manual addition with hints
self.fail(fpath, match.re, match.string, "", "", False)
return False # failed
# Does the guard end with a '_H'?
running_guard = match.group(1)
if not running_guard.endswith("_H"):
fcontents = self.fail(
fpath, match.re, match.string, match.group(1), valid_guard, fix
)
if fix:
save(fpath, fcontents)
# Is it the expected one based on the file path?
if running_guard != valid_guard:
fcontents = self.fail(
fpath, match.re, match.string, match.group(1), valid_guard, fix
)
if fix:
save(fpath, fcontents)
# Is there a #define? Is it the same as the #ifndef one?
match = self.define_re.search(fcontents)
if match.lastindex is None:
# No define. Request manual addition with hints
self.fail(fpath, match.re, match.string, "", "", False)
return False # failed
# Is the #define guard the same as the #ifndef guard?
if match.group(1) != running_guard:
fcontents = self.fail(
fpath, match.re, match.string, match.group(1), valid_guard, fix
)
if fix:
save(fpath, fcontents)
# Is there a properly commented #endif?
flines = fcontents.rstrip().splitlines()
# Use findall and use the last result if there are multiple matches,
# i.e. nested include guards.
match = self.endif_c_core_re.findall("\n".join(flines[-3:]))
if not match and not c_core_header:
match = self.endif_re.findall("\n".join(flines[-3:]))
if not match:
# No endif. Check if we have the last line as just '#endif' and if so
# replace it with a properly commented one.
if flines[-1] == "#endif":
flines[-1] = "#endif" + (
" /* {} */\n".format(valid_guard)
if c_core_header
else " // {}\n".format(valid_guard)
)
if fix:
fcontents = "\n".join(flines)
save(fpath, fcontents)
else:
# something else is wrong, bail out
self.fail(
fpath,
self.endif_c_core_re if c_core_header else self.endif_re,
flines[-1],
"",
"",
False,
)
elif match[-1] != running_guard:
# Is the #endif guard the same as the #ifndef and #define guards?
fcontents = self.fail(
fpath, self.endif_re, fcontents, match[-1], valid_guard, fix
)
if fix:
save(fpath, fcontents)
match = self.comments_then_includes_re.search(fcontents)
assert match
bad_includes = match.group(3)
if bad_includes:
print(
"includes after initial comments but before include guards in",
fpath,
)
if fix:
fcontents = (
fcontents[: match.start(3)]
+ match.group(5)
+ match.group(3)
+ fcontents[match.end(5) :]
)
save(fpath, fcontents)
return not self.failed # Did the check succeed? (ie, not failed)
# find our home
ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
os.chdir(ROOT)
# parse command line
argp = argparse.ArgumentParser(description="include guard checker")
argp.add_argument("-f", "--fix", default=False, action="store_true")
argp.add_argument("--precommit", default=False, action="store_true")
args = argp.parse_args()
grep_filter = (
r"grep -E '^(include|src/core|src/cpp|test/core|test/cpp|fuzztest/)/.*\.h$'"
)
if args.precommit:
git_command = "git diff --name-only HEAD"
else:
git_command = "git ls-tree -r --name-only -r HEAD"
FILE_LIST_COMMAND = " | ".join((git_command, grep_filter))
# scan files
ok = True
filename_list = []
try:
filename_list = (
subprocess.check_output(FILE_LIST_COMMAND, shell=True)
.decode()
.splitlines()
)
# Filter out non-existent files (ie, file removed or renamed)
filename_list = (f for f in filename_list if os.path.isfile(f))
except subprocess.CalledProcessError:
sys.exit(0)
validator = GuardValidator()
for filename in filename_list:
# Skip check for upb generated code.
if (
filename.endswith(".upb.h")
or filename.endswith(".upb.c")
or filename.endswith(".upbdefs.h")
or filename.endswith(".upbdefs.c")
):
continue
ok = ok and validator.check(filename, args.fix)
sys.exit(0 if ok else 1)
| 8,767
| 32.212121
| 81
|
py
|
grpc
|
grpc-master/tools/distrib/check_redundant_namespace_qualifiers.py
|
#!/usr/bin/env python3
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Eliminate the kind of redundant namespace qualifiers that tend to
# creep in when converting C to C++.
import collections
import os
import re
import sys
def find_closing_mustache(contents, initial_depth):
"""Find the closing mustache for a given number of open mustaches."""
depth = initial_depth
start_len = len(contents)
while contents:
# Skip over strings.
if contents[0] == '"':
contents = contents[1:]
while contents[0] != '"':
if contents.startswith("\\\\"):
contents = contents[2:]
elif contents.startswith('\\"'):
contents = contents[2:]
else:
contents = contents[1:]
contents = contents[1:]
# And characters that might confuse us.
elif (
contents.startswith("'{'")
or contents.startswith("'\"'")
or contents.startswith("'}'")
):
contents = contents[3:]
# Skip over comments.
elif contents.startswith("//"):
contents = contents[contents.find("\n") :]
elif contents.startswith("/*"):
contents = contents[contents.find("*/") + 2 :]
# Count up or down if we see a mustache.
elif contents[0] == "{":
contents = contents[1:]
depth += 1
elif contents[0] == "}":
contents = contents[1:]
depth -= 1
if depth == 0:
return start_len - len(contents)
# Skip over everything else.
else:
contents = contents[1:]
return None
def is_a_define_statement(match, body):
"""See if the matching line begins with #define"""
# This does not yet help with multi-line defines
m = re.search(
r"^#define.*{}$".format(match.group(0)),
body[: match.end()],
re.MULTILINE,
)
return m is not None
def update_file(contents, namespaces):
"""Scan the contents of a file, and for top-level namespaces in namespaces remove redundant usages."""
output = ""
while contents:
m = re.search(r"namespace ([a-zA-Z0-9_]*) {", contents)
if not m:
output += contents
break
output += contents[: m.end()]
contents = contents[m.end() :]
end = find_closing_mustache(contents, 1)
if end is None:
print(
"Failed to find closing mustache for namespace {}".format(
m.group(1)
)
)
print("Remaining text:")
print(contents)
sys.exit(1)
body = contents[:end]
namespace = m.group(1)
if namespace in namespaces:
while body:
# Find instances of 'namespace::'
m = re.search(r"\b" + namespace + r"::\b", body)
if not m:
break
# Ignore instances of '::namespace::' -- these are usually meant to be there.
if m.start() >= 2 and body[m.start() - 2 :].startswith("::"):
output += body[: m.end()]
# Ignore #defines, since they may be used anywhere
elif is_a_define_statement(m, body):
output += body[: m.end()]
else:
output += body[: m.start()]
body = body[m.end() :]
output += body
contents = contents[end:]
return output
# self check before doing anything
_TEST = """
namespace bar {
namespace baz {
}
}
namespace foo {}
namespace foo {
foo::a;
::foo::a;
}
"""
_TEST_EXPECTED = """
namespace bar {
namespace baz {
}
}
namespace foo {}
namespace foo {
a;
::foo::a;
}
"""
output = update_file(_TEST, ["foo"])
if output != _TEST_EXPECTED:
import difflib
print("FAILED: self check")
print(
"\n".join(
difflib.ndiff(_TEST_EXPECTED.splitlines(1), output.splitlines(1))
)
)
sys.exit(1)
# Main loop.
Config = collections.namedtuple("Config", ["dirs", "namespaces"])
_CONFIGURATION = (Config(["src/core", "test/core"], ["grpc_core"]),)
changed = []
for config in _CONFIGURATION:
for dir in config.dirs:
for root, dirs, files in os.walk(dir):
for file in files:
if file.endswith(".cc") or file.endswith(".h"):
path = os.path.join(root, file)
try:
with open(path) as f:
contents = f.read()
except IOError:
continue
updated = update_file(contents, config.namespaces)
if updated != contents:
changed.append(path)
with open(os.path.join(root, file), "w") as f:
f.write(updated)
if changed:
print("The following files were changed:")
for path in changed:
print(" " + path)
sys.exit(1)
| 5,669
| 29.648649
| 106
|
py
|
grpc
|
grpc-master/tools/distrib/update_flakes.py
|
#!/usr/bin/env python3
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import subprocess
import sys
from google.cloud import bigquery
import run_buildozer
import update_flakes_query
lookback_hours = 24 * 7 * 4
def include_test(test):
if "@" in test:
return False
if test.startswith("//test/cpp/qps:"):
return False
return True
TEST_DIRS = ["test/core", "test/cpp"]
tests = {}
already_flaky = set()
for test_dir in TEST_DIRS:
for line in subprocess.check_output(
["bazel", "query", "tests({}/...)".format(test_dir)]
).splitlines():
test = line.strip().decode("utf-8")
if not include_test(test):
continue
tests[test] = False
for test_dir in TEST_DIRS:
for line in subprocess.check_output(
["bazel", "query", "attr(flaky, 1, tests({}/...))".format(test_dir)]
).splitlines():
test = line.strip().decode("utf-8")
if not include_test(test):
continue
already_flaky.add(test)
flaky_e2e = set()
client = bigquery.Client()
for row in client.query(
update_flakes_query.QUERY.format(lookback_hours=lookback_hours)
).result():
if "/macos/" in row.job_name:
continue # we know mac stuff is flaky
if row.test_binary not in tests:
m = re.match(
r"^//test/core/end2end:([^@]*)@([^@]*)(.*)", row.test_binary
)
if m:
flaky_e2e.add("{}@{}{}".format(m.group(1), m.group(2), m.group(3)))
print("will mark end2end test {} as flaky".format(row.test_binary))
else:
print("skip obsolete test {}".format(row.test_binary))
continue
print("will mark {} as flaky".format(row.test_binary))
tests[row.test_binary] = True
buildozer_commands = []
for test, flaky in sorted(tests.items()):
if flaky:
buildozer_commands.append("set flaky True|{}".format(test))
elif test in already_flaky:
buildozer_commands.append("remove flaky|{}".format(test))
with open("test/core/end2end/flaky.bzl", "w") as f:
with open(sys.argv[0]) as my_source:
for line in my_source:
if line[0] != "#":
break
for line in my_source:
if line[0] == "#":
print(line.strip(), file=f)
break
for line in my_source:
if line[0] != "#":
break
print(line.strip(), file=f)
print(
(
'"""A list of flaky tests, consumed by generate_tests.bzl to set'
' flaky attrs."""'
),
file=f,
)
print("FLAKY_TESTS = [", file=f)
for line in sorted(list(flaky_e2e)):
print(' "{}",'.format(line), file=f)
print("]", file=f)
run_buildozer.run_buildozer(buildozer_commands)
| 3,335
| 29.054054
| 79
|
py
|
grpc
|
grpc-master/tools/distrib/run_buildozer.py
|
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import sys
import tempfile
def run_buildozer(buildozer_commands):
if not buildozer_commands:
return
ok_statuses = (0, 3)
temp = tempfile.NamedTemporaryFile()
open(temp.name, "w").write("\n".join(buildozer_commands))
c = ["tools/distrib/buildozer.sh", "-f", temp.name]
r = subprocess.call(c)
if r not in ok_statuses:
print("{} failed with status {}".format(c, r))
sys.exit(1)
| 1,025
| 32.096774
| 74
|
py
|
grpc
|
grpc-master/tools/distrib/add-iwyu.py
|
#!/usr/bin/env python3
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import os
def to_inc(filename):
"""Given filename, synthesize what should go in an include statement to get that file"""
if filename.startswith("include/"):
return "<%s>" % filename[len("include/") :]
return '"%s"' % filename
def set_pragmas(filename, pragmas):
"""Set the file-level IWYU pragma in filename"""
lines = []
saw_first_define = False
for line in open(filename).read().splitlines():
if line.startswith("// IWYU pragma: "):
continue
lines.append(line)
if not saw_first_define and line.startswith("#define "):
saw_first_define = True
lines.append("")
for pragma in pragmas:
lines.append("// IWYU pragma: %s" % pragma)
lines.append("")
open(filename, "w").write("\n".join(lines) + "\n")
def set_exports(pub, cg):
"""In file pub, mark the include for cg with IWYU pragma: export"""
lines = []
for line in open(pub).read().splitlines():
if line.startswith("#include %s" % to_inc(cg)):
lines.append("#include %s // IWYU pragma: export" % to_inc(cg))
else:
lines.append(line)
open(pub, "w").write("\n".join(lines) + "\n")
CG_ROOTS_GRPC = (
(r"sync", "grpc/support/sync.h", False),
(r"atm", "grpc/support/atm.h", False),
(r"grpc_types", "grpc/grpc.h", True),
(r"gpr_types", "grpc/grpc.h", True),
(r"compression_types", "grpc/compression.h", True),
(r"connectivity_state", "grpc/grpc.h", True),
)
CG_ROOTS_GRPCPP = [
(r"status_code_enum", "grpcpp/support/status.h", False),
]
def fix_tree(tree, cg_roots):
"""Fix one include tree"""
# Map of filename --> paths including that filename
reverse_map = collections.defaultdict(list)
# The same, but for things with '/impl/codegen' in their names
cg_reverse_map = collections.defaultdict(list)
for root, dirs, files in os.walk(tree):
root_map = cg_reverse_map if "/impl/codegen" in root else reverse_map
for filename in files:
root_map[filename].append(root)
# For each thing in '/impl/codegen' figure out what exports it
for filename, paths in cg_reverse_map.items():
print("****", filename)
# Exclude non-headers
if not filename.endswith(".h"):
continue
pragmas = []
# Check for our 'special' headers: if we see one of these, we just
# hardcode where they go to because there's some complicated rules.
for root, target, friend in cg_roots:
print(root, target, friend)
if filename.startswith(root):
pragmas = ["private, include <%s>" % target]
if friend:
pragmas.append('friend "src/.*"')
if len(paths) == 1:
path = paths[0]
if filename.startswith(root + "."):
set_exports("include/" + target, path + "/" + filename)
if filename.startswith(root + "_"):
set_exports(
path + "/" + root + ".h", path + "/" + filename
)
# If the path for a file in /impl/codegen is ambiguous, just don't bother
if not pragmas and len(paths) == 1:
path = paths[0]
# Check if we have an exporting candidate
if filename in reverse_map:
proper = reverse_map[filename]
# And that it too is unambiguous
if len(proper) == 1:
# Build the two relevant pathnames
cg = path + "/" + filename
pub = proper[0] + "/" + filename
# And see if the public file actually includes the /impl/codegen file
if ("#include %s" % to_inc(cg)) in open(pub).read():
# Finally, if it does, we'll set that pragma
pragmas = ["private, include %s" % to_inc(pub)]
# And mark the export
set_exports(pub, cg)
# If we can't find a good alternative include to point people to,
# mark things private anyway... we don't want to recommend people include
# from impl/codegen
if not pragmas:
pragmas = ["private"]
for path in paths:
set_pragmas(path + "/" + filename, pragmas)
fix_tree("include/grpc", CG_ROOTS_GRPC)
fix_tree("include/grpcpp", CG_ROOTS_GRPCPP)
| 5,144
| 38.274809
| 92
|
py
|
grpc
|
grpc-master/tools/distrib/fix_build_deps.py
|
#!/usr/bin/env python3
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
from doctest import SKIP
import multiprocessing
import os
import re
import sys
import run_buildozer
# find our home
ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
os.chdir(ROOT)
vendors = collections.defaultdict(list)
scores = collections.defaultdict(int)
avoidness = collections.defaultdict(int)
consumes = {}
no_update = set()
buildozer_commands = []
original_deps = {}
original_external_deps = {}
skip_headers = collections.defaultdict(set)
# TODO(ctiller): ideally we wouldn't hardcode a bunch of paths here.
# We can likely parse out BUILD files from dependencies to generate this index.
EXTERNAL_DEPS = {
"absl/algorithm/container.h": "absl/algorithm:container",
"absl/base/attributes.h": "absl/base:core_headers",
"absl/base/call_once.h": "absl/base",
# TODO(ctiller) remove this
"absl/base/internal/endian.h": "absl/base",
"absl/base/thread_annotations.h": "absl/base:core_headers",
"absl/container/flat_hash_map.h": "absl/container:flat_hash_map",
"absl/container/flat_hash_set.h": "absl/container:flat_hash_set",
"absl/container/inlined_vector.h": "absl/container:inlined_vector",
"absl/cleanup/cleanup.h": "absl/cleanup",
"absl/debugging/failure_signal_handler.h": (
"absl/debugging:failure_signal_handler"
),
"absl/debugging/stacktrace.h": "absl/debugging:stacktrace",
"absl/debugging/symbolize.h": "absl/debugging:symbolize",
"absl/flags/flag.h": "absl/flags:flag",
"absl/flags/marshalling.h": "absl/flags:marshalling",
"absl/flags/parse.h": "absl/flags:parse",
"absl/functional/any_invocable.h": "absl/functional:any_invocable",
"absl/functional/bind_front.h": "absl/functional:bind_front",
"absl/functional/function_ref.h": "absl/functional:function_ref",
"absl/hash/hash.h": "absl/hash",
"absl/memory/memory.h": "absl/memory",
"absl/meta/type_traits.h": "absl/meta:type_traits",
"absl/numeric/int128.h": "absl/numeric:int128",
"absl/random/random.h": "absl/random",
"absl/random/distributions.h": "absl/random:distributions",
"absl/random/uniform_int_distribution.h": "absl/random:distributions",
"absl/status/status.h": "absl/status",
"absl/status/statusor.h": "absl/status:statusor",
"absl/strings/ascii.h": "absl/strings",
"absl/strings/cord.h": "absl/strings:cord",
"absl/strings/escaping.h": "absl/strings",
"absl/strings/match.h": "absl/strings",
"absl/strings/numbers.h": "absl/strings",
"absl/strings/str_cat.h": "absl/strings",
"absl/strings/str_format.h": "absl/strings:str_format",
"absl/strings/str_join.h": "absl/strings",
"absl/strings/str_replace.h": "absl/strings",
"absl/strings/str_split.h": "absl/strings",
"absl/strings/string_view.h": "absl/strings",
"absl/strings/strip.h": "absl/strings",
"absl/strings/substitute.h": "absl/strings",
"absl/synchronization/mutex.h": "absl/synchronization",
"absl/synchronization/notification.h": "absl/synchronization",
"absl/time/clock.h": "absl/time",
"absl/time/time.h": "absl/time",
"absl/types/optional.h": "absl/types:optional",
"absl/types/span.h": "absl/types:span",
"absl/types/variant.h": "absl/types:variant",
"absl/utility/utility.h": "absl/utility",
"address_sorting/address_sorting.h": "address_sorting",
"opentelemetry/context/context.h": "otel/api",
"opentelemetry/metrics/meter.h": "otel/api",
"opentelemetry/metrics/meter_provider.h": "otel/api",
"opentelemetry/metrics/provider.h": "otel/api",
"opentelemetry/metrics/sync_instruments.h": "otel/api",
"opentelemetry/nostd/shared_ptr.h": "otel/api",
"opentelemetry/nostd/unique_ptr.h": "otel/api",
"sdk/include/opentelemetry/sdk/metrics/meter_provider.h": "otel/sdk/src/metrics",
"ares.h": "cares",
"fuzztest/fuzztest.h": ["fuzztest", "fuzztest_main"],
"google/api/monitored_resource.pb.h": (
"google/api:monitored_resource_cc_proto"
),
"google/devtools/cloudtrace/v2/tracing.grpc.pb.h": (
"googleapis_trace_grpc_service"
),
"google/logging/v2/logging.grpc.pb.h": "googleapis_logging_grpc_service",
"google/logging/v2/logging.pb.h": "googleapis_logging_cc_proto",
"google/logging/v2/log_entry.pb.h": "googleapis_logging_cc_proto",
"google/monitoring/v3/metric_service.grpc.pb.h": (
"googleapis_monitoring_grpc_service"
),
"gmock/gmock.h": "gtest",
"gtest/gtest.h": "gtest",
"opencensus/exporters/stats/stackdriver/stackdriver_exporter.h": (
"opencensus-stats-stackdriver_exporter"
),
"opencensus/exporters/trace/stackdriver/stackdriver_exporter.h": (
"opencensus-trace-stackdriver_exporter"
),
"opencensus/trace/context_util.h": "opencensus-trace-context_util",
"opencensus/trace/propagation/grpc_trace_bin.h": (
"opencensus-trace-propagation"
),
"opencensus/tags/context_util.h": "opencensus-tags-context_util",
"opencensus/trace/span_context.h": "opencensus-trace-span_context",
"openssl/base.h": "libssl",
"openssl/bio.h": "libssl",
"openssl/bn.h": "libcrypto",
"openssl/buffer.h": "libcrypto",
"openssl/crypto.h": "libcrypto",
"openssl/digest.h": "libssl",
"openssl/engine.h": "libcrypto",
"openssl/err.h": "libcrypto",
"openssl/evp.h": "libcrypto",
"openssl/hmac.h": "libcrypto",
"openssl/pem.h": "libcrypto",
"openssl/rsa.h": "libcrypto",
"openssl/sha.h": "libcrypto",
"openssl/ssl.h": "libssl",
"openssl/tls1.h": "libssl",
"openssl/x509.h": "libcrypto",
"openssl/x509v3.h": "libcrypto",
"re2/re2.h": "re2",
"upb/arena.h": "upb_lib",
"upb/base/string_view.h": "upb_lib",
"upb/collections/map.h": "upb_collections_lib",
"upb/def.h": "upb_lib",
"upb/json_encode.h": "upb_json_lib",
"upb/mem/arena.h": "upb_lib",
"upb/text_encode.h": "upb_textformat_lib",
"upb/def.hpp": "upb_reflection",
"upb/upb.h": "upb_lib",
"upb/upb.hpp": "upb_lib",
"xxhash.h": "xxhash",
"zlib.h": "madler_zlib",
}
INTERNAL_DEPS = {
"test/core/event_engine/fuzzing_event_engine/fuzzing_event_engine.h": (
"//test/core/event_engine/fuzzing_event_engine"
),
"test/core/event_engine/fuzzing_event_engine/fuzzing_event_engine.pb.h": "//test/core/event_engine/fuzzing_event_engine:fuzzing_event_engine_proto",
"test/core/experiments/test_experiments.h": "//test/core/experiments:test_experiments_lib",
"google/api/expr/v1alpha1/syntax.upb.h": "google_type_expr_upb",
"google/rpc/status.upb.h": "google_rpc_status_upb",
"google/protobuf/any.upb.h": "protobuf_any_upb",
"google/protobuf/duration.upb.h": "protobuf_duration_upb",
"google/protobuf/struct.upb.h": "protobuf_struct_upb",
"google/protobuf/timestamp.upb.h": "protobuf_timestamp_upb",
"google/protobuf/wrappers.upb.h": "protobuf_wrappers_upb",
"grpc/status.h": "grpc_public_hdrs",
"src/proto/grpc/channelz/channelz.grpc.pb.h": (
"//src/proto/grpc/channelz:channelz_proto"
),
"src/proto/grpc/core/stats.pb.h": "//src/proto/grpc/core:stats_proto",
"src/proto/grpc/health/v1/health.upb.h": "grpc_health_upb",
"src/proto/grpc/lb/v1/load_reporter.grpc.pb.h": (
"//src/proto/grpc/lb/v1:load_reporter_proto"
),
"src/proto/grpc/lb/v1/load_balancer.upb.h": "grpc_lb_upb",
"src/proto/grpc/reflection/v1alpha/reflection.grpc.pb.h": (
"//src/proto/grpc/reflection/v1alpha:reflection_proto"
),
"src/proto/grpc/gcp/transport_security_common.upb.h": "alts_upb",
"src/proto/grpc/gcp/handshaker.upb.h": "alts_upb",
"src/proto/grpc/gcp/altscontext.upb.h": "alts_upb",
"src/proto/grpc/lookup/v1/rls.upb.h": "rls_upb",
"src/proto/grpc/lookup/v1/rls_config.upb.h": "rls_config_upb",
"src/proto/grpc/lookup/v1/rls_config.upbdefs.h": "rls_config_upbdefs",
"src/proto/grpc/testing/xds/v3/csds.grpc.pb.h": (
"//src/proto/grpc/testing/xds/v3:csds_proto"
),
"xds/data/orca/v3/orca_load_report.upb.h": "xds_orca_upb",
"xds/service/orca/v3/orca.upb.h": "xds_orca_service_upb",
"xds/type/v3/typed_struct.upb.h": "xds_type_upb",
}
class FakeSelects:
def config_setting_group(self, **kwargs):
pass
num_cc_libraries = 0
num_opted_out_cc_libraries = 0
parsing_path = None
# Convert the source or header target to a relative path.
def _get_filename(name, parsing_path):
filename = "%s%s" % (
(
parsing_path + "/"
if (parsing_path and not name.startswith("//"))
else ""
),
name,
)
filename = filename.replace("//:", "")
filename = filename.replace("//src/core:", "src/core/")
filename = filename.replace(
"//src/cpp/ext/filters/census:", "src/cpp/ext/filters/census/"
)
return filename
def grpc_cc_library(
name,
hdrs=[],
public_hdrs=[],
srcs=[],
select_deps=None,
tags=[],
deps=[],
external_deps=[],
proto=None,
**kwargs,
):
global args
global num_cc_libraries
global num_opted_out_cc_libraries
global parsing_path
assert parsing_path is not None
name = "//%s:%s" % (parsing_path, name)
num_cc_libraries += 1
if select_deps or "nofixdeps" in tags:
if args.whats_left and not select_deps and "nofixdeps" not in tags:
num_opted_out_cc_libraries += 1
print("Not opted in: {}".format(name))
no_update.add(name)
scores[name] = len(public_hdrs + hdrs)
# avoid_dep is the internal way of saying prefer something else
# we add grpc_avoid_dep to allow internal grpc-only stuff to avoid each
# other, whilst not biasing dependent projects
if "avoid_dep" in tags or "grpc_avoid_dep" in tags:
avoidness[name] += 10
if proto:
proto_hdr = "%s%s" % (
(parsing_path + "/" if parsing_path else ""),
proto.replace(".proto", ".pb.h"),
)
skip_headers[name].add(proto_hdr)
for hdr in hdrs + public_hdrs:
vendors[_get_filename(hdr, parsing_path)].append(name)
inc = set()
original_deps[name] = frozenset(deps)
original_external_deps[name] = frozenset(external_deps)
for src in hdrs + public_hdrs + srcs:
for line in open(_get_filename(src, parsing_path)):
m = re.search(r"^#include <(.*)>", line)
if m:
inc.add(m.group(1))
m = re.search(r'^#include "(.*)"', line)
if m:
inc.add(m.group(1))
consumes[name] = list(inc)
def grpc_proto_library(name, srcs, **kwargs):
global parsing_path
assert parsing_path is not None
name = "//%s:%s" % (parsing_path, name)
for src in srcs:
proto_hdr = src.replace(".proto", ".pb.h")
vendors[_get_filename(proto_hdr, parsing_path)].append(name)
def buildozer(cmd, target):
buildozer_commands.append("%s|%s" % (cmd, target))
def buildozer_set_list(name, values, target, via=""):
if not values:
buildozer("remove %s" % name, target)
return
adjust = via if via else name
buildozer(
"set %s %s" % (adjust, " ".join('"%s"' % s for s in values)), target
)
if via:
buildozer("remove %s" % name, target)
buildozer("rename %s %s" % (via, name), target)
def score_edit_distance(proposed, existing):
"""Score a proposed change primarily by edit distance"""
sum = 0
for p in proposed:
if p not in existing:
sum += 1
for e in existing:
if e not in proposed:
sum += 1
return sum
def total_score(proposal):
return sum(scores[dep] for dep in proposal)
def total_avoidness(proposal):
return sum(avoidness[dep] for dep in proposal)
def score_list_size(proposed, existing):
"""Score a proposed change primarily by number of dependencies"""
return len(proposed)
def score_best(proposed, existing):
"""Score a proposed change primarily by dependency score"""
return 0
SCORERS = {
"edit_distance": score_edit_distance,
"list_size": score_list_size,
"best": score_best,
}
parser = argparse.ArgumentParser(description="Fix build dependencies")
parser.add_argument(
"targets", nargs="*", default=[], help="targets to fix (empty => all)"
)
parser.add_argument(
"--score",
type=str,
default="edit_distance",
help="scoring function to use: one of " + ", ".join(SCORERS.keys()),
)
parser.add_argument(
"--whats_left",
action="store_true",
default=False,
help="show what is left to opt in",
)
parser.add_argument(
"--explain",
action="store_true",
default=False,
help="try to explain some decisions",
)
parser.add_argument(
"--why",
type=str,
default=None,
help="with --explain, target why a given dependency is needed",
)
args = parser.parse_args()
for dirname in [
"",
"src/core",
"src/cpp/ext/gcp",
"src/cpp/ext/otel",
"test/core/backoff",
"test/core/uri",
"test/core/util",
"test/core/end2end",
"test/core/event_engine",
"test/core/filters",
"test/core/promise",
"test/core/resource_quota",
"test/core/transport/chaotic_good",
"fuzztest",
"fuzztest/core/channel",
]:
parsing_path = dirname
exec(
open("%sBUILD" % (dirname + "/" if dirname else ""), "r").read(),
{
"load": lambda filename, *args: None,
"licenses": lambda licenses: None,
"package": lambda **kwargs: None,
"exports_files": lambda files, visibility=None: None,
"bool_flag": lambda **kwargs: None,
"config_setting": lambda **kwargs: None,
"selects": FakeSelects(),
"python_config_settings": lambda **kwargs: None,
"grpc_cc_binary": grpc_cc_library,
"grpc_cc_library": grpc_cc_library,
"grpc_cc_test": grpc_cc_library,
"grpc_core_end2end_test": lambda **kwargs: None,
"grpc_fuzzer": grpc_cc_library,
"grpc_fuzz_test": grpc_cc_library,
"grpc_proto_fuzzer": grpc_cc_library,
"grpc_proto_library": grpc_proto_library,
"select": lambda d: d["//conditions:default"],
"glob": lambda files: None,
"grpc_end2end_tests": lambda: None,
"grpc_upb_proto_library": lambda name, **kwargs: None,
"grpc_upb_proto_reflection_library": lambda name, **kwargs: None,
"grpc_generate_one_off_targets": lambda: None,
"grpc_generate_one_off_internal_targets": lambda: None,
"grpc_package": lambda **kwargs: None,
"filegroup": lambda name, **kwargs: None,
"sh_library": lambda name, **kwargs: None,
},
{},
)
parsing_path = None
if args.whats_left:
print(
"{}/{} libraries are opted in".format(
num_cc_libraries - num_opted_out_cc_libraries, num_cc_libraries
)
)
def make_relative_path(dep, lib):
if lib is None:
return dep
lib_path = lib[: lib.rfind(":") + 1]
if dep.startswith(lib_path):
return dep[len(lib_path) :]
return dep
if args.whats_left:
print(
"{}/{} libraries are opted in".format(
num_cc_libraries - num_opted_out_cc_libraries, num_cc_libraries
)
)
# Keeps track of all possible sets of dependencies that could satify the
# problem. (models the list monad in Haskell!)
class Choices:
def __init__(self, library, substitutions):
self.library = library
self.to_add = []
self.to_remove = []
self.substitutions = substitutions
def add_one_of(self, choices, trigger):
if not choices:
return
choices = sum(
[self.apply_substitutions(choice) for choice in choices], []
)
if args.explain and (args.why is None or args.why in choices):
print(
"{}: Adding one of {} for {}".format(
self.library, choices, trigger
)
)
self.to_add.append(
tuple(
make_relative_path(choice, self.library) for choice in choices
)
)
def add(self, choice, trigger):
self.add_one_of([choice], trigger)
def remove(self, remove):
for remove in self.apply_substitutions(remove):
self.to_remove.append(make_relative_path(remove, self.library))
def apply_substitutions(self, dep):
if dep in self.substitutions:
return self.substitutions[dep]
return [dep]
def best(self, scorer):
choices = set()
choices.add(frozenset())
for add in sorted(set(self.to_add), key=lambda x: (len(x), x)):
new_choices = set()
for append_choice in add:
for choice in choices:
new_choices.add(choice.union([append_choice]))
choices = new_choices
for remove in sorted(set(self.to_remove)):
new_choices = set()
for choice in choices:
new_choices.add(choice.difference([remove]))
choices = new_choices
best = None
def final_scorer(x):
return (total_avoidness(x), scorer(x), total_score(x))
for choice in choices:
if best is None or final_scorer(choice) < final_scorer(best):
best = choice
return best
def make_library(library):
error = False
hdrs = sorted(consumes[library])
# we need a little trickery here since grpc_base has channel.cc, which calls grpc_init
# which is in grpc, which is illegal but hard to change
# once EventEngine lands we can clean this up
deps = Choices(
library,
{"//:grpc_base": ["//:grpc", "//:grpc_unsecure"]}
if library.startswith("//test/")
else {},
)
external_deps = Choices(None, {})
for hdr in hdrs:
if hdr in skip_headers[library]:
continue
if hdr == "systemd/sd-daemon.h":
continue
if hdr == "src/core/lib/profiling/stap_probes.h":
continue
if hdr.startswith("src/libfuzzer/"):
continue
if hdr == "grpc/grpc.h" and library.startswith("//test:"):
# not the root build including grpc.h ==> //:grpc
deps.add_one_of(["//:grpc", "//:grpc_unsecure"], hdr)
continue
if hdr in INTERNAL_DEPS:
dep = INTERNAL_DEPS[hdr]
if isinstance(dep, list):
for d in dep:
deps.add(d, hdr)
else:
if not ("//" in dep):
dep = "//:" + dep
deps.add(dep, hdr)
continue
if hdr in vendors:
deps.add_one_of(vendors[hdr], hdr)
continue
if "include/" + hdr in vendors:
deps.add_one_of(vendors["include/" + hdr], hdr)
continue
if "." not in hdr:
# assume a c++ system include
continue
if hdr in EXTERNAL_DEPS:
if isinstance(EXTERNAL_DEPS[hdr], list):
for dep in EXTERNAL_DEPS[hdr]:
external_deps.add(dep, hdr)
else:
external_deps.add(EXTERNAL_DEPS[hdr], hdr)
continue
if hdr.startswith("opencensus/"):
trail = hdr[len("opencensus/") :]
trail = trail[: trail.find("/")]
external_deps.add("opencensus-" + trail, hdr)
continue
if hdr.startswith("envoy/"):
path, file = os.path.split(hdr)
file = file.split(".")
path = path.split("/")
dep = "_".join(path[:-1] + [file[1]])
deps.add(dep, hdr)
continue
if hdr.startswith("google/protobuf/") and not hdr.endswith(".upb.h"):
external_deps.add("protobuf_headers", hdr)
continue
if "/" not in hdr:
# assume a system include
continue
is_sys_include = False
for sys_path in [
"sys",
"arpa",
"gperftools",
"netinet",
"linux",
"android",
"mach",
"net",
"CoreFoundation",
]:
if hdr.startswith(sys_path + "/"):
is_sys_include = True
break
if is_sys_include:
# assume a system include
continue
print(
"# ERROR: can't categorize header: %s used by %s" % (hdr, library)
)
error = True
deps.remove(library)
deps = sorted(
deps.best(lambda x: SCORERS[args.score](x, original_deps[library]))
)
external_deps = sorted(
external_deps.best(
lambda x: SCORERS[args.score](x, original_external_deps[library])
)
)
return (library, error, deps, external_deps)
def main() -> None:
update_libraries = []
for library in sorted(consumes.keys()):
if library in no_update:
continue
if args.targets and library not in args.targets:
continue
update_libraries.append(library)
with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as p:
updated_libraries = p.map(make_library, update_libraries, 1)
error = False
for library, lib_error, deps, external_deps in updated_libraries:
if lib_error:
error = True
continue
buildozer_set_list("external_deps", external_deps, library, via="deps")
buildozer_set_list("deps", deps, library)
run_buildozer.run_buildozer(buildozer_commands)
if error:
sys.exit(1)
if __name__ == "__main__":
main()
| 22,474
| 32.544776
| 152
|
py
|
grpc
|
grpc-master/tools/distrib/check_path_length.py
|
#!/usr/bin/env python3
# Copyright 2023 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
# Maximum path length for a path in the repository before we start seeing
# problems with Windows cloning the repository. (kind of arbitrary, less than
# Windows actual limit, but enough that we avoid problems).
maxlen = 150
errors = 0
for path in subprocess.check_output(["git", "ls-files"]).decode().splitlines():
if len(path) > maxlen:
print(f"Path too long: {path}")
errors += 1
if errors:
print(f"Found {errors} files with paths longer than {maxlen} characters")
exit(1)
| 1,137
| 32.470588
| 79
|
py
|
grpc
|
grpc-master/tools/distrib/run_clang_tidy.py
|
#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import multiprocessing
import os
import subprocess
import sys
sys.path.append(
os.path.join(
os.path.dirname(sys.argv[0]), "..", "run_tests", "python_utils"
)
)
import jobset
clang_tidy = os.environ.get("CLANG_TIDY", "clang-tidy")
argp = argparse.ArgumentParser(description="Run clang-tidy against core")
argp.add_argument("files", nargs="+", help="Files to tidy")
argp.add_argument("--fix", dest="fix", action="store_true")
argp.add_argument(
"-j",
"--jobs",
type=int,
default=multiprocessing.cpu_count(),
help="Number of CPUs to use",
)
argp.add_argument("--only-changed", dest="only_changed", action="store_true")
argp.set_defaults(fix=False, only_changed=False)
args = argp.parse_args()
# Explicitly passing the .clang-tidy config by reading it.
# This is required because source files in the compilation database are
# in a different source tree so clang-tidy cannot find the right config file
# by seeking their parent directories.
with open(".clang-tidy") as f:
config = f.read()
cmdline = [
clang_tidy,
"--config=" + config,
]
if args.fix:
cmdline.append("--fix-errors")
if args.only_changed:
orig_files = set(args.files)
actual_files = []
output = subprocess.check_output(
["git", "diff", "upstream/master", "HEAD", "--name-only"]
)
for line in output.decode("ascii").splitlines(False):
if line in orig_files:
print(("check: %s" % line))
actual_files.append(line)
else:
print(("skip: %s - not in the build" % line))
args.files = actual_files
jobs = []
for filename in args.files:
jobs.append(
jobset.JobSpec(
cmdline + [filename],
shortname=filename,
timeout_seconds=15 * 60,
)
)
num_fails, res_set = jobset.run(jobs, maxjobs=args.jobs, quiet_success=True)
sys.exit(num_fails)
| 2,507
| 28.505882
| 77
|
py
|
grpc
|
grpc-master/tools/distrib/check_copyright.py
|
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import os
import re
import subprocess
import sys
# find our home
ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
os.chdir(ROOT)
# parse command line
argp = argparse.ArgumentParser(description="copyright checker")
argp.add_argument(
"-o", "--output", default="details", choices=["list", "details"]
)
argp.add_argument("-s", "--skips", default=0, action="store_const", const=1)
argp.add_argument("-a", "--ancient", default=0, action="store_const", const=1)
argp.add_argument("--precommit", action="store_true")
argp.add_argument("--fix", action="store_true")
args = argp.parse_args()
# open the license text
with open("NOTICE.txt") as f:
LICENSE_NOTICE = f.read().splitlines()
# license format by file extension
# key is the file extension, value is a format string
# that given a line of license text, returns what should
# be in the file
LICENSE_PREFIX_RE = {
".bat": r"@rem\s*",
".c": r"\s*(?://|\*)\s*",
".cc": r"\s*(?://|\*)\s*",
".h": r"\s*(?://|\*)\s*",
".m": r"\s*\*\s*",
".mm": r"\s*\*\s*",
".php": r"\s*\*\s*",
".js": r"\s*\*\s*",
".py": r"#\s*",
".pyx": r"#\s*",
".pxd": r"#\s*",
".pxi": r"#\s*",
".rb": r"#\s*",
".sh": r"#\s*",
".proto": r"//\s*",
".cs": r"//\s*",
".mak": r"#\s*",
".bazel": r"#\s*",
".bzl": r"#\s*",
"Makefile": r"#\s*",
"Dockerfile": r"#\s*",
"BUILD": r"#\s*",
}
# The key is the file extension, while the value is a tuple of fields
# (header, prefix, footer).
# For example, for javascript multi-line comments, the header will be '/*', the
# prefix will be '*' and the footer will be '*/'.
# If header and footer are irrelevant for a specific file extension, they are
# set to None.
LICENSE_PREFIX_TEXT = {
".bat": (None, "@rem", None),
".c": (None, "//", None),
".cc": (None, "//", None),
".h": (None, "//", None),
".m": ("/**", " *", " */"),
".mm": ("/**", " *", " */"),
".php": ("/**", " *", " */"),
".js": ("/**", " *", " */"),
".py": (None, "#", None),
".pyx": (None, "#", None),
".pxd": (None, "#", None),
".pxi": (None, "#", None),
".rb": (None, "#", None),
".sh": (None, "#", None),
".proto": (None, "//", None),
".cs": (None, "//", None),
".mak": (None, "#", None),
".bazel": (None, "#", None),
".bzl": (None, "#", None),
"Makefile": (None, "#", None),
"Dockerfile": (None, "#", None),
"BUILD": (None, "#", None),
}
_EXEMPT = frozenset(
(
# Generated protocol compiler output.
"examples/python/helloworld/helloworld_pb2.py",
"examples/python/helloworld/helloworld_pb2_grpc.py",
"examples/python/multiplex/helloworld_pb2.py",
"examples/python/multiplex/helloworld_pb2_grpc.py",
"examples/python/multiplex/route_guide_pb2.py",
"examples/python/multiplex/route_guide_pb2_grpc.py",
"examples/python/route_guide/route_guide_pb2.py",
"examples/python/route_guide/route_guide_pb2_grpc.py",
# Generated doxygen config file
"tools/doxygen/Doxyfile.php",
# An older file originally from outside gRPC.
"src/php/tests/bootstrap.php",
# census.proto copied from github
"tools/grpcz/census.proto",
# status.proto copied from googleapis
"src/proto/grpc/status/status.proto",
# Gradle wrappers used to build for Android
"examples/android/helloworld/gradlew.bat",
"src/android/test/interop/gradlew.bat",
# Designer-generated source
"examples/csharp/HelloworldXamarin/Droid/Resources/Resource.designer.cs",
"examples/csharp/HelloworldXamarin/iOS/ViewController.designer.cs",
# BoringSSL generated header. It has commit version information at the head
# of the file so we cannot check the license info.
"src/boringssl/boringssl_prefix_symbols.h",
)
)
_ENFORCE_CPP_STYLE_COMMENT_PATH_PREFIX = tuple(
[
"include/grpc++/",
"include/grpcpp/",
"src/core/",
"src/cpp/",
"test/core/",
"test/cpp/",
"fuzztest/",
]
)
RE_YEAR = (
r"Copyright (?P<first_year>[0-9]+\-)?(?P<last_year>[0-9]+) ([Tt]he )?gRPC"
r" [Aa]uthors(\.|)"
)
RE_LICENSE = dict(
(
k,
r"\n".join(
LICENSE_PREFIX_RE[k]
+ (RE_YEAR if re.search(RE_YEAR, line) else re.escape(line))
for line in LICENSE_NOTICE
),
)
for k, v in list(LICENSE_PREFIX_RE.items())
)
RE_C_STYLE_COMMENT_START = r"^/\*\s*\n"
RE_C_STYLE_COMMENT_OPTIONAL_LINE = r"(?:\s*\*\s*\n)*"
RE_C_STYLE_COMMENT_END = r"\s*\*/"
RE_C_STYLE_COMMENT_LICENSE = (
RE_C_STYLE_COMMENT_START
+ RE_C_STYLE_COMMENT_OPTIONAL_LINE
+ r"\n".join(
r"\s*(?:\*)\s*"
+ (RE_YEAR if re.search(RE_YEAR, line) else re.escape(line))
for line in LICENSE_NOTICE
)
+ r"\n"
+ RE_C_STYLE_COMMENT_OPTIONAL_LINE
+ RE_C_STYLE_COMMENT_END
)
RE_CPP_STYLE_COMMENT_LICENSE = r"\n".join(
r"\s*(?://)\s*" + (RE_YEAR if re.search(RE_YEAR, line) else re.escape(line))
for line in LICENSE_NOTICE
)
YEAR = datetime.datetime.now().year
LICENSE_YEAR = f"Copyright {YEAR} gRPC authors."
def join_license_text(header, prefix, footer, notice):
text = (header + "\n") if header else ""
def add_prefix(prefix, line):
# Don't put whitespace between prefix and empty line to avoid having
# trailing whitespaces.
return prefix + ("" if len(line) == 0 else " ") + line
text += "\n".join(
add_prefix(prefix, (LICENSE_YEAR if re.search(RE_YEAR, line) else line))
for line in LICENSE_NOTICE
)
text += "\n"
if footer:
text += footer + "\n"
return text
LICENSE_TEXT = dict(
(
k,
join_license_text(
LICENSE_PREFIX_TEXT[k][0],
LICENSE_PREFIX_TEXT[k][1],
LICENSE_PREFIX_TEXT[k][2],
LICENSE_NOTICE,
),
)
for k, v in list(LICENSE_PREFIX_TEXT.items())
)
if args.precommit:
FILE_LIST_COMMAND = (
"git status -z | grep -Poz '(?<=^[MARC][MARCD ] )[^\s]+'"
)
else:
FILE_LIST_COMMAND = (
"git ls-tree -r --name-only -r HEAD | "
"grep -v ^third_party/ |"
'grep -v "\(ares_config.h\|ares_build.h\)"'
)
def load(name):
with open(name) as f:
return f.read()
def save(name, text):
with open(name, "w") as f:
f.write(text)
assert re.search(RE_LICENSE["Makefile"], load("Makefile"))
def log(cond, why, filename):
if not cond:
return
if args.output == "details":
print(("%s: %s" % (why, filename)))
else:
print(filename)
def write_copyright(license_text, file_text, filename):
shebang = ""
lines = file_text.split("\n")
if lines and lines[0].startswith("#!"):
shebang = lines[0] + "\n"
file_text = file_text[len(shebang) :]
rewritten_text = shebang + license_text + "\n" + file_text
with open(filename, "w") as f:
f.write(rewritten_text)
def replace_copyright(license_text, file_text, filename):
m = re.search(RE_C_STYLE_COMMENT_LICENSE, text)
if m:
rewritten_text = license_text + file_text[m.end() :]
with open(filename, "w") as f:
f.write(rewritten_text)
return True
return False
# scan files, validate the text
ok = True
filename_list = []
try:
filename_list = (
subprocess.check_output(FILE_LIST_COMMAND, shell=True)
.decode()
.splitlines()
)
except subprocess.CalledProcessError:
sys.exit(0)
for filename in filename_list:
enforce_cpp_style_comment = False
if filename in _EXEMPT:
continue
# Skip check for upb generated code.
if (
filename.endswith(".upb.h")
or filename.endswith(".upb.c")
or filename.endswith(".upbdefs.h")
or filename.endswith(".upbdefs.c")
):
continue
ext = os.path.splitext(filename)[1]
base = os.path.basename(filename)
if filename.startswith(_ENFORCE_CPP_STYLE_COMMENT_PATH_PREFIX) and ext in [
".cc",
".h",
]:
enforce_cpp_style_comment = True
re_license = RE_CPP_STYLE_COMMENT_LICENSE
license_text = LICENSE_TEXT[ext]
elif ext in RE_LICENSE:
re_license = RE_LICENSE[ext]
license_text = LICENSE_TEXT[ext]
elif base in RE_LICENSE:
re_license = RE_LICENSE[base]
license_text = LICENSE_TEXT[base]
else:
log(args.skips, "skip", filename)
continue
try:
text = load(filename)
except:
continue
m = re.search(re_license, text)
if m:
pass
elif enforce_cpp_style_comment:
log(
1,
"copyright missing or does not use cpp-style copyright header",
filename,
)
if args.fix:
# Attempt fix: search for c-style copyright header and replace it
# with cpp-style copyright header. If that doesn't work
# (e.g. missing copyright header), write cpp-style copyright header.
if not replace_copyright(license_text, text, filename):
write_copyright(license_text, text, filename)
ok = False
elif "DO NOT EDIT" not in text:
if args.fix:
write_copyright(license_text, text, filename)
log(1, "copyright missing (fixed)", filename)
else:
log(1, "copyright missing", filename)
ok = False
if not ok and not args.fix:
print(
"You may use following command to automatically fix copyright headers:"
)
print(" tools/distrib/check_copyright.py --fix")
sys.exit(0 if ok else 1)
| 10,371
| 28.549858
| 83
|
py
|
grpc
|
grpc-master/tools/distrib/update_flakes_query.py
|
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
QUERY = """
#standardSQL
-- See https://console.cloud.google.com/bigquery?sq=830293263384:5a8549832dfb48d9b2c04312a4ae3181 for the original query
WITH
runs AS (
SELECT
RTRIM(LTRIM(REGEXP_REPLACE(test_target, r'(@poller=.+)', ''))) AS test_binary,
REGEXP_EXTRACT(test_target, r'poller=(\w+)') AS poll_strategy,
job_name,
test_target,
test_class_name,
CASE
# in case of timeout / retry / segfault the "test_case" fields will contain weird stuff
# e.g. "test_shard2_run0_attempt0" or "shard_2/20"
# when aggregating, we want to display all of these as a single category of problems
WHEN test_case like 'test_shard_%_run%_attempt%' THEN 'CANNOT_DETERMINE'
WHEN test_case like '%shard_%/%' THEN 'CANNOT_DETERMINE'
# when test_case looks suspiciously like test_target
# its value is probably meaningless and it means that the entire target has failed
# e.g. test_target="//test/cpp/client:destroy_grpclb_channel_with_active_connect_stress_test" and test_case="test/cpp/client/destroy_grpclb_channel_with_active_connect_stress_test.exe"
WHEN STRPOS(test_case, REPLACE(SUBSTR(test_target, 3), ":", "/")) != 0 THEN 'CANNOT_DETERMINE'
ELSE test_case
END AS test_case,
result,
build_id,
timestamp
FROM
`grpc-testing.jenkins_test_results.rbe_test_results`
WHERE
DATETIME_DIFF(CURRENT_DATETIME(),
dateTIME(timestamp),
HOUR) < {lookback_hours}
),
results_counts_per_build AS (
SELECT
test_binary,
#test_target, # aggregate data over all pollers
test_class_name,
test_case,
SUM(SAFE_CAST(result != 'PASSED'
AND result != 'SKIPPED' AS INT64)) AS runs_failed,
SUM(SAFE_CAST(result != 'SKIPPED' AS INT64)) AS runs_total,
job_name,
build_id
FROM
runs
GROUP BY
test_binary,
test_class_name,
test_case,
job_name,
build_id),
builds_with_missing_cannot_determine_testcase_entry AS (
SELECT
test_binary,
job_name,
build_id,
FROM
results_counts_per_build
GROUP BY
test_binary,
job_name,
build_id
HAVING COUNTIF(test_case = 'CANNOT_DETERMINE') = 0
),
# for each test target and build, generate a fake entry with "CANNOT_DETERMINE" test_case
# if not already present.
# this is because in many builds, there will be no "CANNOT_DETERMINE" entry
# and we want to avoid skewing the statistics
results_counts_per_build_with_fake_cannot_determine_test_case_entries AS (
(SELECT * FROM results_counts_per_build)
UNION ALL
(SELECT
test_binary,
'' AS test_class_name, # when test_case is 'CANNOT_DETERMINE', test class is empty string
'CANNOT_DETERMINE' AS test_case, # see table "runs"
0 AS runs_failed,
1 AS runs_total,
job_name,
build_id
FROM
builds_with_missing_cannot_determine_testcase_entry)
),
results_counts AS (
SELECT
test_binary,
test_class_name,
test_case,
job_name,
SUM(runs_failed) AS runs_failed,
SUM(runs_total) AS runs_total,
SUM(SAFE_CAST(runs_failed > 0 AS INT64)) AS builds_failed,
COUNT(build_id) AS builds_total,
STRING_AGG(CASE
WHEN runs_failed > 0 THEN 'X'
ELSE '_' END, ''
ORDER BY
build_id ASC) AS build_failure_pattern,
FORMAT("%T", ARRAY_AGG(build_id
ORDER BY
build_id ASC)) AS builds
FROM
#results_counts_per_build
results_counts_per_build_with_fake_cannot_determine_test_case_entries
GROUP BY
test_binary,
test_class_name,
test_case,
job_name
HAVING
runs_failed > 0)
SELECT
ROUND(100*builds_failed / builds_total, 2) AS pct_builds_failed,
ROUND(100*runs_failed / runs_total, 2) AS pct_runs_failed,
test_binary,
test_class_name,
test_case,
job_name,
build_failure_pattern
FROM
results_counts
ORDER BY
pct_builds_failed DESC
"""
| 4,537
| 29.662162
| 215
|
py
|
grpc
|
grpc-master/tools/distrib/check_namespace_qualification.py
|
#!/usr/bin/env python3
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import os.path
import re
import subprocess
import sys
# TODO(hork): dedupe args/load/validate/save code with other check scripts.
def load(fpath):
with open(fpath, "r") as f:
return f.readlines()
def save(fpath, contents):
with open(fpath, "w") as f:
f.write(contents)
class QualificationValidator(object):
def __init__(self):
self.fully_qualified_re = re.compile(r"([ (<])::(grpc[A-Za-z_:])")
self.using_re = re.compile(
r"(using +|using +[A-Za-z_]+ *= *|namespace [A-Za-z_]+ *= *)::"
)
self.define_re = re.compile(r"^#define")
def check(self, fpath, fix):
fcontents = load(fpath)
failed = False
for i, line in enumerate(fcontents):
if not self.fully_qualified_re.search(line):
continue
# skip `using` statements
if self.using_re.search(line):
continue
# skip `#define` statements
if self.define_re.search(line):
continue
# fully-qualified namespace found, which may be unnecessary
if fix:
fcontents[i] = self.fully_qualified_re.sub(r"\1\2", line)
else:
print("Found in %s:%d - %s" % (fpath, i, line.strip()))
failed = True
if fix:
save(fpath, "".join(fcontents))
return not failed
IGNORED_FILES = [
# TODO(hork): rename symbols to avoid the need for fully-qualified names
"src/cpp/common/core_codegen.cc",
# TODO(hork): This could be a breaking change for users that define their
# own (possibly nested) `grpc.*` namespaces that contain conflicting
# symbols. It may be worth trying to land this change at some point, as
# users would be better off using unique namespaces.
"src/compiler/cpp_generator.cc",
# multi-line #define statements are not handled
"src/core/lib/gprpp/global_config_env.h",
"src/core/lib/profiling/timers.h",
"src/core/lib/gprpp/crash.h",
]
# find our home
ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
os.chdir(ROOT)
# parse command line
argp = argparse.ArgumentParser(
description="c++ namespace full qualification checker"
)
argp.add_argument("-f", "--fix", default=False, action="store_true")
argp.add_argument("--precommit", default=False, action="store_true")
args = argp.parse_args()
grep_filter = r"grep -E '^(include|src|test).*\.(h|cc)$'"
if args.precommit:
git_command = "git diff --name-only HEAD"
else:
git_command = "git ls-tree -r --name-only -r HEAD"
FILE_LIST_COMMAND = " | ".join((git_command, grep_filter))
# scan files
ok = True
filename_list = []
try:
filename_list = (
subprocess.check_output(FILE_LIST_COMMAND, shell=True)
.decode()
.splitlines()
)
# Filter out non-existent files (ie, file removed or renamed)
filename_list = (f for f in filename_list if os.path.isfile(f))
except subprocess.CalledProcessError:
sys.exit(0)
validator = QualificationValidator()
for filename in filename_list:
# Skip check for upb generated code and ignored files.
if (
filename.endswith(".upb.h")
or filename.endswith(".upb.c")
or filename.endswith(".upbdefs.h")
or filename.endswith(".upbdefs.c")
or filename in IGNORED_FILES
):
continue
ok = validator.check(filename, args.fix) and ok
sys.exit(0 if ok else 1)
| 4,104
| 30.335878
| 77
|
py
|
grpc
|
grpc-master/tools/distrib/check_naked_includes.py
|
#!/usr/bin/env python3
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Check for includes of the form `#include "bar.h"` - i.e. not including the subdirectory. We require instead `#include "foo/bar.h"`.
import argparse
import os
import re
import sys
# find our home
ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
os.chdir(ROOT)
# parse command line
argp = argparse.ArgumentParser(description="include guard checker")
argp.add_argument("-f", "--fix", default=False, action="store_true")
args = argp.parse_args()
# error count
errors = 0
CHECK_SUBDIRS = [
"src/core",
"src/cpp",
"test/core",
"test/cpp",
"fuzztest",
]
for subdir in CHECK_SUBDIRS:
for root, dirs, files in os.walk(subdir):
for f in files:
if f.endswith(".h") or f.endswith(".cc"):
fpath = os.path.join(root, f)
output = open(fpath, "r").readlines()
changed = False
for i, line in enumerate(output):
m = re.match(r'^#include "([^"]*)"(.*)', line)
if not m:
continue
include = m.group(1)
if "/" in include:
continue
expect_path = os.path.join(root, include)
trailing = m.group(2)
if not os.path.exists(expect_path):
continue
changed = True
errors += 1
output[i] = '#include "{0}"{1}\n'.format(
expect_path, trailing
)
print(
"Found naked include '{0}' in {1}".format(
include, fpath
)
)
if changed and args.fix:
open(fpath, "w").writelines(output)
if errors > 0:
print("{} errors found.".format(errors))
sys.exit(1)
| 2,531
| 31.461538
| 133
|
py
|
grpc
|
grpc-master/tools/distrib/gen_compilation_database.py
|
#!/usr/bin/env python3
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is based on the script on the Envoy project
# https://github.com/envoyproxy/envoy/blob/master/tools/gen_compilation_database.py
import argparse
import glob
import json
import logging
import os
from pathlib import Path
import re
import shlex
import subprocess
RE_INCLUDE_SYSTEM = re.compile("\s*-I\s+/usr/[^ ]+")
# This method is equivalent to https://github.com/grailbio/bazel-compilation-database/blob/master/generate.sh
def generateCompilationDatabase(args):
# We need to download all remote outputs for generated source code.
# This option lives here to override those specified in bazelrc.
bazel_options = shlex.split(os.environ.get("BAZEL_BUILD_OPTIONS", "")) + [
"--config=compdb",
"--remote_download_outputs=all",
]
subprocess.check_call(
["bazel", "build"]
+ bazel_options
+ [
"--aspects=@bazel_compdb//:aspects.bzl%compilation_database_aspect",
"--output_groups=compdb_files,header_files",
]
+ args.bazel_targets
)
execroot = (
subprocess.check_output(
["bazel", "info", "execution_root"] + bazel_options
)
.decode()
.strip()
)
compdb = []
for compdb_file in Path(execroot).glob("**/*.compile_commands.json"):
compdb.extend(
json.loads(
"["
+ compdb_file.read_text().replace("__EXEC_ROOT__", execroot)
+ "]"
)
)
if args.dedup_targets:
compdb_map = {target["file"]: target for target in compdb}
compdb = list(compdb_map.values())
return compdb
def isHeader(filename):
for ext in (".h", ".hh", ".hpp", ".hxx"):
if filename.endswith(ext):
return True
return False
def isCompileTarget(target, args):
filename = target["file"]
if not args.include_headers and isHeader(filename):
return False
if not args.include_genfiles:
if filename.startswith("bazel-out/"):
return False
if not args.include_external:
if filename.startswith("external/"):
return False
return True
def modifyCompileCommand(target, args):
cc, options = target["command"].split(" ", 1)
# Workaround for bazel added C++14 options, those doesn't affect build itself but
# clang-tidy will misinterpret them.
options = options.replace("-std=c++0x ", "")
options = options.replace("-std=c++14 ", "")
# Add -DNDEBUG so that editors show the correct size information for structs.
options += " -DNDEBUG"
if args.vscode:
# Visual Studio Code doesn't seem to like "-iquote". Replace it with
# old-style "-I".
options = options.replace("-iquote ", "-I ")
if args.ignore_system_headers:
# Remove all include options for /usr/* directories
options = RE_INCLUDE_SYSTEM.sub("", options)
if isHeader(target["file"]):
options += " -Wno-pragma-once-outside-header -Wno-unused-const-variable"
options += " -Wno-unused-function"
if not target["file"].startswith("external/"):
# *.h file is treated as C header by default while our headers files are all C++14.
options = "-x c++ -std=c++14 -fexceptions " + options
target["command"] = " ".join([cc, options])
return target
def fixCompilationDatabase(args, db):
db = [
modifyCompileCommand(target, args)
for target in db
if isCompileTarget(target, args)
]
with open("compile_commands.json", "w") as db_file:
json.dump(db, db_file, indent=2)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate JSON compilation database"
)
parser.add_argument("--include_external", action="store_true")
parser.add_argument("--include_genfiles", action="store_true")
parser.add_argument("--include_headers", action="store_true")
parser.add_argument("--vscode", action="store_true")
parser.add_argument("--ignore_system_headers", action="store_true")
parser.add_argument("--dedup_targets", action="store_true")
parser.add_argument("bazel_targets", nargs="*", default=["//..."])
args = parser.parse_args()
fixCompilationDatabase(args, generateCompilationDatabase(args))
| 4,916
| 31.348684
| 109
|
py
|
grpc
|
grpc-master/tools/distrib/python/check_grpcio_tools.py
|
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import make_grpcio_tools as _make
OUT_OF_DATE_MESSAGE = """file {} is out of date
Have you called tools/distrib/python/make_grpcio_tools.py since upgrading protobuf?"""
submodule_commit_hash = _make.protobuf_submodule_commit_hash()
with open(_make.GRPC_PYTHON_PROTOC_LIB_DEPS, "r") as _protoc_lib_deps_file:
content = _protoc_lib_deps_file.read().splitlines()
testString = (
_make.COMMIT_HASH_PREFIX + submodule_commit_hash + _make.COMMIT_HASH_SUFFIX
)
if testString not in content:
print(OUT_OF_DATE_MESSAGE.format(_make.GRPC_PYTHON_PROTOC_LIB_DEPS))
raise SystemExit(1)
| 1,196
| 33.2
| 86
|
py
|
grpc
|
grpc-master/tools/distrib/python/grpc_version.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template`!!!
VERSION = '1.57.0.dev0'
PROTOBUF_VERSION = '3.23.4'
| 738
| 37.894737
| 106
|
py
|
grpc
|
grpc-master/tools/distrib/python/docgen.py
|
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import os.path
import shutil
import subprocess
import sys
import tempfile
import grpc_version
parser = argparse.ArgumentParser()
parser.add_argument(
"--repo-owner", type=str, help="Owner of the GitHub repository to be pushed"
)
parser.add_argument(
"--doc-branch", type=str, default="python-doc-%s" % grpc_version.VERSION
)
args = parser.parse_args()
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PROJECT_ROOT = os.path.abspath(os.path.join(SCRIPT_DIR, "..", "..", ".."))
SETUP_PATH = os.path.join(PROJECT_ROOT, "setup.py")
REQUIREMENTS_PATH = os.path.join(PROJECT_ROOT, "requirements.bazel.txt")
DOC_PATH = os.path.join(PROJECT_ROOT, "doc/build")
if "VIRTUAL_ENV" in os.environ:
VIRTUALENV_DIR = os.environ["VIRTUAL_ENV"]
PYTHON_PATH = os.path.join(VIRTUALENV_DIR, "bin", "python")
subprocess_arguments_list = []
else:
VIRTUALENV_DIR = os.path.join(SCRIPT_DIR, "distrib_virtualenv")
PYTHON_PATH = os.path.join(VIRTUALENV_DIR, "bin", "python")
subprocess_arguments_list = [
["python3", "-m", "virtualenv", VIRTUALENV_DIR],
]
subprocess_arguments_list += [
[PYTHON_PATH, "-m", "pip", "install", "--upgrade", "pip==19.3.1"],
[PYTHON_PATH, "-m", "pip", "install", "-r", REQUIREMENTS_PATH],
[PYTHON_PATH, "-m", "pip", "install", "--upgrade", "Sphinx"],
[PYTHON_PATH, SETUP_PATH, "doc"],
]
for subprocess_arguments in subprocess_arguments_list:
print("Running command: {}".format(subprocess_arguments))
subprocess.check_call(args=subprocess_arguments)
if not args.repo_owner or not args.doc_branch:
tty_width = int(os.popen("stty size", "r").read().split()[1])
print("-" * tty_width)
print("Please check generated Python doc inside doc/build")
print(
"To push to a GitHub repo, please provide repo owner and doc branch"
" name"
)
else:
# Create a temporary directory out of tree, checkout gh-pages from the
# specified repository, edit it, and push it. It's up to the user to then go
# onto GitHub and make a PR against grpc/grpc:gh-pages.
repo_parent_dir = tempfile.mkdtemp()
print("Documentation parent directory: {}".format(repo_parent_dir))
repo_dir = os.path.join(repo_parent_dir, "grpc")
python_doc_dir = os.path.join(repo_dir, "python")
doc_branch = args.doc_branch
print("Cloning your repository...")
subprocess.check_call(
[
"git",
"clone",
"--branch",
"gh-pages",
"https://github.com/grpc/grpc",
],
cwd=repo_parent_dir,
)
subprocess.check_call(["git", "checkout", "-b", doc_branch], cwd=repo_dir)
subprocess.check_call(
[
"git",
"remote",
"add",
"ssh-origin",
"git@github.com:%s/grpc.git" % args.repo_owner,
],
cwd=repo_dir,
)
print("Updating documentation...")
shutil.rmtree(python_doc_dir, ignore_errors=True)
shutil.copytree(DOC_PATH, python_doc_dir)
print(
"Attempting to push documentation to %s/%s..."
% (args.repo_owner, doc_branch)
)
try:
subprocess.check_call(["git", "add", "--all"], cwd=repo_dir)
subprocess.check_call(
["git", "commit", "-m", "Auto-update Python documentation"],
cwd=repo_dir,
)
subprocess.check_call(
["git", "push", "--set-upstream", "ssh-origin", doc_branch],
cwd=repo_dir,
)
except subprocess.CalledProcessError:
print(
"Failed to push documentation. Examine this directory and push "
"manually: {}".format(repo_parent_dir)
)
sys.exit(1)
shutil.rmtree(repo_parent_dir)
| 4,405
| 32.892308
| 80
|
py
|
grpc
|
grpc-master/tools/distrib/python/make_grpcio_tools.py
|
#!/usr/bin/env python3
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import errno
import filecmp
import glob
import os
import os.path
import pprint
import shutil
import subprocess
import sys
import traceback
import uuid
# the template for the content of protoc_lib_deps.py
DEPS_FILE_CONTENT = """
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED BY make_grpcio_tools.py!
CC_FILES={cc_files}
PROTO_FILES={proto_files}
CC_INCLUDES={cc_includes}
PROTO_INCLUDE={proto_include}
{commit_hash_expr}
"""
# expose commit hash suffix and prefix for check_grpcio_tools.py
COMMIT_HASH_PREFIX = 'PROTOBUF_SUBMODULE_VERSION="'
COMMIT_HASH_SUFFIX = '"'
EXTERNAL_LINKS = [
("@com_google_absl//", "third_party/abseil-cpp/"),
("@com_google_protobuf//", "third_party/protobuf/"),
("@utf8_range//:", "third_party/utf8_range/"),
]
PROTOBUF_PROTO_PREFIX = "@com_google_protobuf//src/"
# will be added to include path when building grpcio_tools
CC_INCLUDES = [
os.path.join("third_party", "abseil-cpp"),
os.path.join("third_party", "protobuf", "src"),
os.path.join("third_party", "utf8_range"),
]
# include path for .proto files
PROTO_INCLUDE = os.path.join("third_party", "protobuf", "src")
# the target directory is relative to the grpcio_tools package root.
GRPCIO_TOOLS_ROOT_PREFIX = "tools/distrib/python/grpcio_tools/"
# Pairs of (source, target) directories to copy
# from the grpc repo root to the grpcio_tools build root.
COPY_FILES_SOURCE_TARGET_PAIRS = [
("include", "grpc_root/include"),
("src/compiler", "grpc_root/src/compiler"),
("third_party/abseil-cpp/absl", "third_party/abseil-cpp/absl"),
("third_party/protobuf/src", "third_party/protobuf/src"),
("third_party/utf8_range", "third_party/utf8_range"),
]
DELETE_TARGETS_ON_CLEANUP = ["third_party"]
# grpc repo root
GRPC_ROOT = os.path.abspath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "..")
)
# the directory under which to probe for the current protobuf commit SHA
GRPC_PROTOBUF_SUBMODULE_ROOT = os.path.join(
GRPC_ROOT, "third_party", "protobuf"
)
# the file to generate
GRPC_PYTHON_PROTOC_LIB_DEPS = os.path.join(
GRPC_ROOT,
"tools",
"distrib",
"python",
"grpcio_tools",
"protoc_lib_deps.py",
)
# the script to run for getting dependencies
BAZEL_DEPS = os.path.join(
GRPC_ROOT, "tools", "distrib", "python", "bazel_deps.sh"
)
# the bazel target to scrape to get list of sources for the build
BAZEL_DEPS_PROTOC_LIB_QUERY = "@com_google_protobuf//:protoc_lib"
BAZEL_DEPS_COMMON_PROTOS_QUERIES = [
"@com_google_protobuf//:well_known_type_protos",
# has both plugin.proto and descriptor.proto
"@com_google_protobuf//:compiler_plugin_proto",
]
def protobuf_submodule_commit_hash():
"""Gets the commit hash for the HEAD of the protobuf submodule currently
checked out."""
cwd = os.getcwd()
os.chdir(GRPC_PROTOBUF_SUBMODULE_ROOT)
output = subprocess.check_output(["git", "rev-parse", "HEAD"])
os.chdir(cwd)
return output.decode("ascii").splitlines()[0].strip()
def _bazel_query(query):
"""Runs 'bazel query' to collect source file info."""
print('Running "bazel query %s"' % query)
output = subprocess.check_output([BAZEL_DEPS, query])
return output.decode("ascii").splitlines()
def _pretty_print_list(items):
"""Pretty print python list"""
formatted = pprint.pformat(items, indent=4)
# add newline after opening bracket (and fix indent of the next line)
if formatted.startswith("["):
formatted = formatted[0] + "\n " + formatted[1:]
# add newline before closing bracket
if formatted.endswith("]"):
formatted = formatted[:-1] + "\n" + formatted[-1]
return formatted
def _bazel_name_to_file_path(name):
"""Transform bazel reference to source file name."""
for link in EXTERNAL_LINKS:
if name.startswith(link[0]):
filepath = link[1] + name[len(link[0]) :].replace(":", "/")
# For some reason, the WKT sources (such as wrappers.pb.cc)
# end up being reported by bazel as having an extra 'wkt/google/protobuf'
# in path. Removing it makes the compilation pass.
# TODO(jtattermusch) Get dir of this hack.
return filepath.replace("wkt/google/protobuf/", "")
return None
def _generate_deps_file_content():
"""Returns the data structure with dependencies of protoc as python code."""
cc_files_output = _bazel_query(BAZEL_DEPS_PROTOC_LIB_QUERY)
# Collect .cc files (that will be later included in the native extension build)
cc_files = []
for name in cc_files_output:
if name.endswith(".cc"):
filepath = _bazel_name_to_file_path(name)
if filepath:
cc_files.append(filepath)
# Collect list of .proto files that will be bundled in the grpcio_tools package.
raw_proto_files = []
for target in BAZEL_DEPS_COMMON_PROTOS_QUERIES:
raw_proto_files += _bazel_query(target)
proto_files = [
name[len(PROTOBUF_PROTO_PREFIX) :].replace(":", "/")
for name in raw_proto_files
if name.endswith(".proto") and name.startswith(PROTOBUF_PROTO_PREFIX)
]
commit_hash = protobuf_submodule_commit_hash()
commit_hash_expr = COMMIT_HASH_PREFIX + commit_hash + COMMIT_HASH_SUFFIX
deps_file_content = DEPS_FILE_CONTENT.format(
cc_files=_pretty_print_list(sorted(cc_files)),
proto_files=_pretty_print_list(sorted(set(proto_files))),
cc_includes=_pretty_print_list(CC_INCLUDES),
proto_include=repr(PROTO_INCLUDE),
commit_hash_expr=commit_hash_expr,
)
return deps_file_content
def _copy_source_tree(source, target):
"""Copies source directory to a given target directory."""
print("Copying contents of %s to %s" % (source, target))
# TODO(jtattermusch): It is unclear why this legacy code needs to copy
# the source directory to the target via the following boilerplate.
# Should this code be simplified?
for source_dir, _, files in os.walk(source):
target_dir = os.path.abspath(
os.path.join(target, os.path.relpath(source_dir, source))
)
try:
os.makedirs(target_dir)
except OSError as error:
if error.errno != errno.EEXIST:
raise
for relative_file in files:
source_file = os.path.abspath(
os.path.join(source_dir, relative_file)
)
target_file = os.path.abspath(
os.path.join(target_dir, relative_file)
)
shutil.copyfile(source_file, target_file)
def _delete_source_tree(target):
"""Deletes the copied target directory."""
target = GRPCIO_TOOLS_ROOT_PREFIX + target
target_abs = os.path.join(*target.split("/"))
print("Deleting copied folder %s" % (target_abs))
shutil.rmtree(target_abs, ignore_errors=True)
def main():
parser = argparse.ArgumentParser()
# In Step 1 below, the third_party folder is copied to a location required
# by the build scripts. This folder does not need to be committed to the
# repo, so you can pass `--cleanup_third_party` in automated scripts to
# ensure that the temporary folders are deleted after the script runs.
# See Jan's TODO in _copy_source_tree above.
parser.add_argument(
"--cleanup_third_party",
default=False,
action="store_true",
help="Delete the temporary third_party folder",
)
args = parser.parse_args()
os.chdir(GRPC_ROOT)
# Step 1:
# In order to be able to build the grpcio_tools package, we need the source code for the codegen plugins
# and its dependencies to be available under the build root of the grpcio_tools package.
# So we simply copy all the necessary files where the build will expect them to be.
for source, target in COPY_FILES_SOURCE_TARGET_PAIRS:
# convert the slashes in the relative path to platform-specific path dividers.
# All paths are relative to GRPC_ROOT
source_abs = os.path.join(GRPC_ROOT, os.path.join(*source.split("/")))
# for targets, add grpcio_tools root prefix
target = GRPCIO_TOOLS_ROOT_PREFIX + target
target_abs = os.path.join(GRPC_ROOT, os.path.join(*target.split("/")))
_copy_source_tree(source_abs, target_abs)
print(
"The necessary source files were copied under the grpcio_tools package"
" root."
)
print()
# Step 2:
# Extract build metadata from bazel build (by running "bazel query")
# and populate the protoc_lib_deps.py file with python-readable data structure
# that will be used by grpcio_tools's setup.py (so it knows how to configure
# the native build for the codegen plugin)
try:
print('Invoking "bazel query" to gather the protobuf dependencies.')
protoc_lib_deps_content = _generate_deps_file_content()
except Exception as error:
# We allow this script to succeed even if we couldn't get the dependencies,
# as then we can assume that even without a successful bazel run the
# dependencies currently in source control are 'good enough'.
sys.stderr.write("Got non-fatal error:\n")
traceback.print_exc(file=sys.stderr)
return
# If we successfully got the dependencies, truncate and rewrite the deps file.
with open(GRPC_PYTHON_PROTOC_LIB_DEPS, "w") as deps_file:
deps_file.write(protoc_lib_deps_content)
print('File "%s" updated.' % GRPC_PYTHON_PROTOC_LIB_DEPS)
if args.cleanup_third_party:
for target in DELETE_TARGETS_ON_CLEANUP:
_delete_source_tree(target)
print("Done.")
if __name__ == "__main__":
main()
| 10,918
| 34.917763
| 108
|
py
|
grpc
|
grpc-master/tools/distrib/python/grpc_prefixed/generate.py
|
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates grpc-prefixed packages using template renderer.
To use this script, please use 3.7+ interpreter. This script is work-directory
agnostic. A quick executable command:
python3 tools/distrib/python/grpc_prefixed/generate.py
"""
import dataclasses
import datetime
import logging
import os
import shutil
import subprocess
import sys
import jinja2
WORK_PATH = os.path.realpath(os.path.dirname(__file__))
LICENSE = os.path.join(WORK_PATH, "../../../../LICENSE")
BUILD_PATH = os.path.join(WORK_PATH, "build")
DIST_PATH = os.path.join(WORK_PATH, "dist")
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(WORK_PATH, "templates"))
)
LOGGER = logging.getLogger(__name__)
POPEN_TIMEOUT_S = datetime.timedelta(minutes=1).total_seconds()
@dataclasses.dataclass
class PackageMeta:
"""Meta-info of a PyPI package."""
name: str
name_long: str
destination_package: str
version: str = "1.0.0"
def clean() -> None:
try:
shutil.rmtree(BUILD_PATH)
except FileNotFoundError:
pass
try:
shutil.rmtree(DIST_PATH)
except FileNotFoundError:
pass
def generate_package(meta: PackageMeta) -> None:
# Makes package directory
package_path = os.path.join(BUILD_PATH, meta.name)
os.makedirs(package_path, exist_ok=True)
# Copy license
shutil.copyfile(LICENSE, os.path.join(package_path, "LICENSE"))
# Generates source code
for template_name in env.list_templates():
template = env.get_template(template_name)
with open(
os.path.join(package_path, template_name.replace(".template", "")),
"w",
) as f:
f.write(template.render(dataclasses.asdict(meta)))
# Creates wheel
job = subprocess.Popen(
[
sys.executable,
os.path.join(package_path, "setup.py"),
"sdist",
"--dist-dir",
DIST_PATH,
],
cwd=package_path,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
outs, _ = job.communicate(timeout=POPEN_TIMEOUT_S)
# Logs result
if job.returncode != 0:
LOGGER.error("Wheel creation failed with %d", job.returncode)
LOGGER.error(outs)
else:
LOGGER.info("Package <%s> generated", meta.name)
def main():
clean()
generate_package(
PackageMeta(
name="grpc", name_long="gRPC Python", destination_package="grpcio"
)
)
generate_package(
PackageMeta(
name="grpc-status",
name_long="gRPC Rich Error Status",
destination_package="grpcio-status",
)
)
generate_package(
PackageMeta(
name="grpc-channelz",
name_long="gRPC Channel Tracing",
destination_package="grpcio-channelz",
)
)
generate_package(
PackageMeta(
name="grpc-tools",
name_long="ProtoBuf Code Generator",
destination_package="grpcio-tools",
)
)
generate_package(
PackageMeta(
name="grpc-reflection",
name_long="gRPC Reflection",
destination_package="grpcio-reflection",
)
)
generate_package(
PackageMeta(
name="grpc-testing",
name_long="gRPC Testing Utility",
destination_package="grpcio-testing",
)
)
generate_package(
PackageMeta(
name="grpc-health-checking",
name_long="gRPC Health Checking",
destination_package="grpcio-health-checking",
)
)
generate_package(
PackageMeta(
name="grpc-csds",
name_long="gRPC Client Status Discovery Service",
destination_package="grpcio-csds",
)
)
generate_package(
PackageMeta(
name="grpc-admin",
name_long="gRPC Admin Interface",
destination_package="grpcio-admin",
)
)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
| 4,678
| 24.429348
| 79
|
py
|
grpc
|
grpc-master/tools/distrib/python/xds_protos/setup.py
|
#! /usr/bin/env python3
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A PyPI package for xDS protos generated Python code."""
import os
import setuptools
WORK_DIR = os.path.dirname(os.path.abspath(__file__))
EXCLUDE_PYTHON_FILES = ["generated_file_import_test.py", "build.py"]
# Use setuptools to build Python package
with open(os.path.join(WORK_DIR, "README.rst"), "r") as f:
LONG_DESCRIPTION = f.read()
PACKAGES = setuptools.find_packages(where=".", exclude=EXCLUDE_PYTHON_FILES)
CLASSIFIERS = [
"Development Status :: 3 - Alpha",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
]
INSTALL_REQUIRES = [
"grpcio>=1.49.0",
"protobuf>=4.21.6,<5.0dev",
]
SETUP_REQUIRES = INSTALL_REQUIRES + ["grpcio-tools"]
setuptools.setup(
name="xds-protos",
version="0.0.12",
packages=PACKAGES,
description="Generated Python code from envoyproxy/data-plane-api",
long_description_content_type="text/x-rst",
long_description=LONG_DESCRIPTION,
author="The gRPC Authors",
author_email="grpc-io@googlegroups.com",
url="https://grpc.io",
license="Apache License 2.0",
python_requires=">=3.7",
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
classifiers=CLASSIFIERS,
)
| 1,869
| 33
| 76
|
py
|
grpc
|
grpc-master/tools/distrib/python/xds_protos/build.py
|
#! /usr/bin/env python3
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds the content of xds-protos package"""
import os
from grpc_tools import protoc
import pkg_resources
# We might not want to compile all the protos
EXCLUDE_PROTO_PACKAGES_LIST = [
# Requires extra dependency to Prometheus protos
"envoy/service/metrics/v2",
"envoy/service/metrics/v3",
"envoy/service/metrics/v4alpha",
]
# Compute the pathes
WORK_DIR = os.path.dirname(os.path.abspath(__file__))
GRPC_ROOT = os.path.abspath(os.path.join(WORK_DIR, "..", "..", "..", ".."))
XDS_PROTO_ROOT = os.path.join(GRPC_ROOT, "third_party", "envoy-api")
UDPA_PROTO_ROOT = os.path.join(GRPC_ROOT, "third_party", "udpa")
GOOGLEAPIS_ROOT = os.path.join(GRPC_ROOT, "third_party", "googleapis")
VALIDATE_ROOT = os.path.join(GRPC_ROOT, "third_party", "protoc-gen-validate")
OPENCENSUS_PROTO_ROOT = os.path.join(
GRPC_ROOT, "third_party", "opencensus-proto", "src"
)
OPENTELEMETRY_PROTO_ROOT = os.path.join(
GRPC_ROOT, "third_party", "opentelemetry"
)
WELL_KNOWN_PROTOS_INCLUDE = pkg_resources.resource_filename(
"grpc_tools", "_proto"
)
OUTPUT_PATH = WORK_DIR
# Prepare the test file generation
TEST_FILE_NAME = "generated_file_import_test.py"
TEST_IMPORTS = []
# The pkgutil-style namespace packaging __init__.py
PKGUTIL_STYLE_INIT = (
"__path__ = __import__('pkgutil').extend_path(__path__, __name__)\n"
)
NAMESPACE_PACKAGES = ["google"]
def add_test_import(
proto_package_path: str, file_name: str, service: bool = False
):
TEST_IMPORTS.append(
"from %s import %s\n"
% (
proto_package_path.replace("/", "."),
file_name.replace(".proto", "_pb2"),
)
)
if service:
TEST_IMPORTS.append(
"from %s import %s\n"
% (
proto_package_path.replace("/", "."),
file_name.replace(".proto", "_pb2_grpc"),
)
)
# Prepare Protoc command
COMPILE_PROTO_ONLY = [
"grpc_tools.protoc",
"--proto_path={}".format(XDS_PROTO_ROOT),
"--proto_path={}".format(UDPA_PROTO_ROOT),
"--proto_path={}".format(GOOGLEAPIS_ROOT),
"--proto_path={}".format(VALIDATE_ROOT),
"--proto_path={}".format(WELL_KNOWN_PROTOS_INCLUDE),
"--proto_path={}".format(OPENCENSUS_PROTO_ROOT),
"--proto_path={}".format(OPENTELEMETRY_PROTO_ROOT),
"--python_out={}".format(OUTPUT_PATH),
]
COMPILE_BOTH = COMPILE_PROTO_ONLY + ["--grpc_python_out={}".format(OUTPUT_PATH)]
def has_grpc_service(proto_package_path: str) -> bool:
return proto_package_path.startswith("envoy/service")
def compile_protos(proto_root: str, sub_dir: str = ".") -> None:
for root, _, files in os.walk(os.path.join(proto_root, sub_dir)):
proto_package_path = os.path.relpath(root, proto_root)
if proto_package_path in EXCLUDE_PROTO_PACKAGES_LIST:
print(f"Skipping package {proto_package_path}")
continue
for file_name in files:
if file_name.endswith(".proto"):
# Compile proto
if has_grpc_service(proto_package_path):
return_code = protoc.main(
COMPILE_BOTH + [os.path.join(root, file_name)]
)
add_test_import(proto_package_path, file_name, service=True)
else:
return_code = protoc.main(
COMPILE_PROTO_ONLY + [os.path.join(root, file_name)]
)
add_test_import(
proto_package_path, file_name, service=False
)
if return_code != 0:
raise Exception("error: {} failed".format(COMPILE_BOTH))
def create_init_file(path: str, package_path: str = "") -> None:
with open(os.path.join(path, "__init__.py"), "w") as f:
# Apply the pkgutil-style namespace packaging, which is compatible for 2
# and 3. Here is the full table of namespace compatibility:
# https://github.com/pypa/sample-namespace-packages/blob/master/table.md
if package_path in NAMESPACE_PACKAGES:
f.write(PKGUTIL_STYLE_INIT)
def main():
# Compile xDS protos
compile_protos(XDS_PROTO_ROOT)
compile_protos(UDPA_PROTO_ROOT)
# We don't want to compile the entire GCP surface API, just the essential ones
compile_protos(GOOGLEAPIS_ROOT, os.path.join("google", "api"))
compile_protos(GOOGLEAPIS_ROOT, os.path.join("google", "rpc"))
compile_protos(GOOGLEAPIS_ROOT, os.path.join("google", "longrunning"))
compile_protos(GOOGLEAPIS_ROOT, os.path.join("google", "logging"))
compile_protos(GOOGLEAPIS_ROOT, os.path.join("google", "type"))
compile_protos(VALIDATE_ROOT, "validate")
compile_protos(OPENCENSUS_PROTO_ROOT)
compile_protos(OPENTELEMETRY_PROTO_ROOT)
# Generate __init__.py files for all modules
create_init_file(WORK_DIR)
for proto_root_module in [
"envoy",
"google",
"opencensus",
"udpa",
"validate",
"xds",
"opentelemetry",
]:
for root, _, _ in os.walk(os.path.join(WORK_DIR, proto_root_module)):
package_path = os.path.relpath(root, WORK_DIR)
create_init_file(root, package_path)
# Generate test file
with open(os.path.join(WORK_DIR, TEST_FILE_NAME), "w") as f:
f.writelines(TEST_IMPORTS)
if __name__ == "__main__":
main()
| 6,003
| 34.738095
| 82
|
py
|
grpc
|
grpc-master/tools/distrib/python/grpcio_tools/setup.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils import cygwinccompiler
from distutils import extension
from distutils import util
import errno
import os
import os.path
import platform
import re
import shlex
import shutil
import subprocess
from subprocess import PIPE
import sys
import sysconfig
import pkg_resources
import setuptools
from setuptools.command import build_ext
# TODO(atash) add flag to disable Cython use
_PACKAGE_PATH = os.path.realpath(os.path.dirname(__file__))
_README_PATH = os.path.join(_PACKAGE_PATH, "README.rst")
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.abspath("."))
import _parallel_compile_patch
import protoc_lib_deps
import grpc_version
_EXT_INIT_SYMBOL = None
if sys.version_info[0] == 2:
_EXT_INIT_SYMBOL = "init_protoc_compiler"
else:
_EXT_INIT_SYMBOL = "PyInit__protoc_compiler"
_parallel_compile_patch.monkeypatch_compile_maybe()
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
]
PY3 = sys.version_info.major == 3
def _env_bool_value(env_name, default):
"""Parses a bool option from an environment variable"""
return os.environ.get(env_name, default).upper() not in ["FALSE", "0", ""]
# Environment variable to determine whether or not the Cython extension should
# *use* Cython or use the generated C files. Note that this requires the C files
# to have been generated by building first *with* Cython support.
BUILD_WITH_CYTHON = _env_bool_value("GRPC_PYTHON_BUILD_WITH_CYTHON", "False")
# Export this variable to force building the python extension with a statically linked libstdc++.
# At least on linux, this is normally not needed as we can build manylinux-compatible wheels on linux just fine
# without statically linking libstdc++ (which leads to a slight increase in the wheel size).
# This option is useful when crosscompiling wheels for aarch64 where
# it's difficult to ensure that the crosscompilation toolchain has a high-enough version
# of GCC (we require >=5.1) but still uses old-enough libstdc++ symbols.
# TODO(jtattermusch): remove this workaround once issues with crosscompiler version are resolved.
BUILD_WITH_STATIC_LIBSTDCXX = _env_bool_value(
"GRPC_PYTHON_BUILD_WITH_STATIC_LIBSTDCXX", "False"
)
def check_linker_need_libatomic():
"""Test if linker on system needs libatomic."""
code_test = (
b"#include <atomic>\n"
+ b"int main() { return std::atomic<int64_t>{}; }"
)
cxx = os.environ.get("CXX", "c++")
cpp_test = subprocess.Popen(
[cxx, "-x", "c++", "-std=c++14", "-"],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
)
cpp_test.communicate(input=code_test)
if cpp_test.returncode == 0:
return False
# Double-check to see if -latomic actually can solve the problem.
# https://github.com/grpc/grpc/issues/22491
cpp_test = subprocess.Popen(
[cxx, "-x", "c++", "-std=c++14", "-", "-latomic"],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
)
cpp_test.communicate(input=code_test)
return cpp_test.returncode == 0
class BuildExt(build_ext.build_ext):
"""Custom build_ext command."""
def get_ext_filename(self, ext_name):
# since python3.5, python extensions' shared libraries use a suffix that corresponds to the value
# of sysconfig.get_config_var('EXT_SUFFIX') and contains info about the architecture the library targets.
# E.g. on x64 linux the suffix is ".cpython-XYZ-x86_64-linux-gnu.so"
# When crosscompiling python wheels, we need to be able to override this suffix
# so that the resulting file name matches the target architecture and we end up with a well-formed
# wheel.
filename = build_ext.build_ext.get_ext_filename(self, ext_name)
orig_ext_suffix = sysconfig.get_config_var("EXT_SUFFIX")
new_ext_suffix = os.getenv("GRPC_PYTHON_OVERRIDE_EXT_SUFFIX")
if new_ext_suffix and filename.endswith(orig_ext_suffix):
filename = filename[: -len(orig_ext_suffix)] + new_ext_suffix
return filename
# There are some situations (like on Windows) where CC, CFLAGS, and LDFLAGS are
# entirely ignored/dropped/forgotten by distutils and its Cygwin/MinGW support.
# We use these environment variables to thus get around that without locking
# ourselves in w.r.t. the multitude of operating systems this ought to build on.
# We can also use these variables as a way to inject environment-specific
# compiler/linker flags. We assume GCC-like compilers and/or MinGW as a
# reasonable default.
EXTRA_ENV_COMPILE_ARGS = os.environ.get("GRPC_PYTHON_CFLAGS", None)
EXTRA_ENV_LINK_ARGS = os.environ.get("GRPC_PYTHON_LDFLAGS", None)
if EXTRA_ENV_COMPILE_ARGS is None:
EXTRA_ENV_COMPILE_ARGS = "-std=c++14"
if "win32" in sys.platform:
if sys.version_info < (3, 5):
# We use define flags here and don't directly add to DEFINE_MACROS below to
# ensure that the expert user/builder has a way of turning it off (via the
# envvars) without adding yet more GRPC-specific envvars.
# See https://sourceforge.net/p/mingw-w64/bugs/363/
if "32" in platform.architecture()[0]:
EXTRA_ENV_COMPILE_ARGS += (
" -D_ftime=_ftime32 -D_timeb=__timeb32"
" -D_ftime_s=_ftime32_s -D_hypot=hypot"
)
else:
EXTRA_ENV_COMPILE_ARGS += (
" -D_ftime=_ftime64 -D_timeb=__timeb64 -D_hypot=hypot"
)
else:
# We need to statically link the C++ Runtime, only the C runtime is
# available dynamically
EXTRA_ENV_COMPILE_ARGS += " /MT"
elif "linux" in sys.platform or "darwin" in sys.platform:
EXTRA_ENV_COMPILE_ARGS += " -fno-wrapv -frtti"
if EXTRA_ENV_LINK_ARGS is None:
EXTRA_ENV_LINK_ARGS = ""
# NOTE(rbellevi): Clang on Mac OS will make all static symbols (both
# variables and objects) global weak symbols. When a process loads the
# protobuf wheel's shared object library before loading *this* C extension,
# the runtime linker will prefer the protobuf module's version of symbols.
# This results in the process using a mixture of symbols from the protobuf
# wheel and this wheel, which may be using different versions of
# libprotobuf. In the case that they *are* using different versions of
# libprotobuf *and* there has been a change in data layout (or in other
# invariants) segfaults, data corruption, or "bad things" may happen.
#
# This flag ensures that on Mac, the only global symbol is the one loaded by
# the Python interpreter. The problematic global weak symbols become local
# weak symbols. This is not required on Linux since the compiler does not
# produce global weak symbols. This is not required on Windows as our ".pyd"
# file does not contain any symbols.
#
# Finally, the leading underscore here is part of the Mach-O ABI. Unlike
# more modern ABIs (ELF et al.), Mach-O prepends an underscore to the names
# of C functions.
if "darwin" in sys.platform:
EXTRA_ENV_LINK_ARGS += " -Wl,-exported_symbol,_{}".format(
_EXT_INIT_SYMBOL
)
if "linux" in sys.platform or "darwin" in sys.platform:
EXTRA_ENV_LINK_ARGS += " -lpthread"
if check_linker_need_libatomic():
EXTRA_ENV_LINK_ARGS += " -latomic"
elif "win32" in sys.platform and sys.version_info < (3, 5):
msvcr = cygwinccompiler.get_msvcr()[0]
EXTRA_ENV_LINK_ARGS += (
" -static-libgcc -static-libstdc++ -mcrtdll={msvcr}"
" -static -lshlwapi".format(msvcr=msvcr)
)
EXTRA_COMPILE_ARGS = shlex.split(EXTRA_ENV_COMPILE_ARGS)
EXTRA_LINK_ARGS = shlex.split(EXTRA_ENV_LINK_ARGS)
if BUILD_WITH_STATIC_LIBSTDCXX:
EXTRA_LINK_ARGS.append("-static-libstdc++")
CC_FILES = [os.path.normpath(cc_file) for cc_file in protoc_lib_deps.CC_FILES]
PROTO_FILES = [
os.path.normpath(proto_file) for proto_file in protoc_lib_deps.PROTO_FILES
]
CC_INCLUDES = [
os.path.normpath(include_dir) for include_dir in protoc_lib_deps.CC_INCLUDES
]
PROTO_INCLUDE = os.path.normpath(protoc_lib_deps.PROTO_INCLUDE)
GRPC_PYTHON_TOOLS_PACKAGE = "grpc_tools"
GRPC_PYTHON_PROTO_RESOURCES_NAME = "_proto"
DEFINE_MACROS = ()
if "win32" in sys.platform:
DEFINE_MACROS += (
("WIN32_LEAN_AND_MEAN", 1),
# avoid https://github.com/abseil/abseil-cpp/issues/1425
("NOMINMAX", 1),
)
if "64bit" in platform.architecture()[0]:
DEFINE_MACROS += (("MS_WIN64", 1),)
elif "linux" in sys.platform or "darwin" in sys.platform:
DEFINE_MACROS += (("HAVE_PTHREAD", 1),)
# By default, Python3 distutils enforces compatibility of
# c plugins (.so files) with the OSX version Python was built with.
# We need OSX 10.10, the oldest which supports C++ thread_local.
if "darwin" in sys.platform:
mac_target = sysconfig.get_config_var("MACOSX_DEPLOYMENT_TARGET")
if mac_target and (
pkg_resources.parse_version(mac_target)
< pkg_resources.parse_version("10.10.0")
):
os.environ["MACOSX_DEPLOYMENT_TARGET"] = "10.10"
os.environ["_PYTHON_HOST_PLATFORM"] = re.sub(
r"macosx-[0-9]+\.[0-9]+-(.+)",
r"macosx-10.10-\1",
util.get_platform(),
)
def package_data():
tools_path = GRPC_PYTHON_TOOLS_PACKAGE.replace(".", os.path.sep)
proto_resources_path = os.path.join(
tools_path, GRPC_PYTHON_PROTO_RESOURCES_NAME
)
proto_files = []
for proto_file in PROTO_FILES:
source = os.path.join(PROTO_INCLUDE, proto_file)
target = os.path.join(proto_resources_path, proto_file)
relative_target = os.path.join(
GRPC_PYTHON_PROTO_RESOURCES_NAME, proto_file
)
try:
os.makedirs(os.path.dirname(target))
except OSError as error:
if error.errno == errno.EEXIST:
pass
else:
raise
shutil.copy(source, target)
proto_files.append(relative_target)
return {GRPC_PYTHON_TOOLS_PACKAGE: proto_files}
def extension_modules():
if BUILD_WITH_CYTHON:
plugin_sources = [os.path.join("grpc_tools", "_protoc_compiler.pyx")]
else:
plugin_sources = [os.path.join("grpc_tools", "_protoc_compiler.cpp")]
plugin_sources += [
os.path.join("grpc_tools", "main.cc"),
os.path.join("grpc_root", "src", "compiler", "python_generator.cc"),
os.path.join("grpc_root", "src", "compiler", "proto_parser_helper.cc"),
] + CC_FILES
plugin_ext = extension.Extension(
name="grpc_tools._protoc_compiler",
sources=plugin_sources,
include_dirs=[
".",
"grpc_root",
os.path.join("grpc_root", "include"),
]
+ CC_INCLUDES,
language="c++",
define_macros=list(DEFINE_MACROS),
extra_compile_args=list(EXTRA_COMPILE_ARGS),
extra_link_args=list(EXTRA_LINK_ARGS),
)
extensions = [plugin_ext]
if BUILD_WITH_CYTHON:
from Cython import Build
return Build.cythonize(extensions)
else:
return extensions
setuptools.setup(
name="grpcio-tools",
version=grpc_version.VERSION,
description="Protobuf code generator for gRPC",
long_description_content_type="text/x-rst",
long_description=open(_README_PATH, "r").read(),
author="The gRPC Authors",
author_email="grpc-io@googlegroups.com",
url="https://grpc.io",
project_urls={
"Source Code": "https://github.com/grpc/grpc/tree/master/tools/distrib/python/grpcio_tools",
"Bug Tracker": "https://github.com/grpc/grpc/issues",
},
license="Apache License 2.0",
classifiers=CLASSIFIERS,
ext_modules=extension_modules(),
packages=setuptools.find_packages("."),
python_requires=">=3.7",
install_requires=[
"protobuf>=4.21.6,<5.0dev",
"grpcio>={version}".format(version=grpc_version.VERSION),
"setuptools",
],
package_data=package_data(),
cmdclass={
"build_ext": BuildExt,
},
)
| 12,902
| 37.516418
| 113
|
py
|
grpc
|
grpc-master/tools/distrib/python/grpcio_tools/protoc_lib_deps.py
|
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED BY make_grpcio_tools.py!
CC_FILES=[
'third_party/abseil-cpp/absl/base/internal/cycleclock.cc',
'third_party/abseil-cpp/absl/base/internal/low_level_alloc.cc',
'third_party/abseil-cpp/absl/base/internal/raw_logging.cc',
'third_party/abseil-cpp/absl/base/internal/spinlock.cc',
'third_party/abseil-cpp/absl/base/internal/spinlock_wait.cc',
'third_party/abseil-cpp/absl/base/internal/strerror.cc',
'third_party/abseil-cpp/absl/base/internal/sysinfo.cc',
'third_party/abseil-cpp/absl/base/internal/thread_identity.cc',
'third_party/abseil-cpp/absl/base/internal/throw_delegate.cc',
'third_party/abseil-cpp/absl/base/internal/unscaledcycleclock.cc',
'third_party/abseil-cpp/absl/base/log_severity.cc',
'third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc',
'third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc',
'third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc',
'third_party/abseil-cpp/absl/crc/crc32c.cc',
'third_party/abseil-cpp/absl/crc/internal/cpu_detect.cc',
'third_party/abseil-cpp/absl/crc/internal/crc.cc',
'third_party/abseil-cpp/absl/crc/internal/crc_cord_state.cc',
'third_party/abseil-cpp/absl/crc/internal/crc_memcpy_fallback.cc',
'third_party/abseil-cpp/absl/crc/internal/crc_memcpy_x86_64.cc',
'third_party/abseil-cpp/absl/crc/internal/crc_non_temporal_memcpy.cc',
'third_party/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc',
'third_party/abseil-cpp/absl/debugging/internal/address_is_readable.cc',
'third_party/abseil-cpp/absl/debugging/internal/demangle.cc',
'third_party/abseil-cpp/absl/debugging/internal/elf_mem_image.cc',
'third_party/abseil-cpp/absl/debugging/internal/examine_stack.cc',
'third_party/abseil-cpp/absl/debugging/internal/vdso_support.cc',
'third_party/abseil-cpp/absl/debugging/stacktrace.cc',
'third_party/abseil-cpp/absl/debugging/symbolize.cc',
'third_party/abseil-cpp/absl/hash/internal/city.cc',
'third_party/abseil-cpp/absl/hash/internal/hash.cc',
'third_party/abseil-cpp/absl/hash/internal/low_level_hash.cc',
'third_party/abseil-cpp/absl/log/globals.cc',
'third_party/abseil-cpp/absl/log/initialize.cc',
'third_party/abseil-cpp/absl/log/internal/check_op.cc',
'third_party/abseil-cpp/absl/log/internal/conditions.cc',
'third_party/abseil-cpp/absl/log/internal/globals.cc',
'third_party/abseil-cpp/absl/log/internal/log_format.cc',
'third_party/abseil-cpp/absl/log/internal/log_message.cc',
'third_party/abseil-cpp/absl/log/internal/log_sink_set.cc',
'third_party/abseil-cpp/absl/log/internal/nullguard.cc',
'third_party/abseil-cpp/absl/log/internal/proto.cc',
'third_party/abseil-cpp/absl/log/log_entry.cc',
'third_party/abseil-cpp/absl/log/log_sink.cc',
'third_party/abseil-cpp/absl/numeric/int128.cc',
'third_party/abseil-cpp/absl/profiling/internal/exponential_biased.cc',
'third_party/abseil-cpp/absl/status/status.cc',
'third_party/abseil-cpp/absl/status/status_payload_printer.cc',
'third_party/abseil-cpp/absl/status/statusor.cc',
'third_party/abseil-cpp/absl/strings/ascii.cc',
'third_party/abseil-cpp/absl/strings/charconv.cc',
'third_party/abseil-cpp/absl/strings/cord.cc',
'third_party/abseil-cpp/absl/strings/cord_analysis.cc',
'third_party/abseil-cpp/absl/strings/cord_buffer.cc',
'third_party/abseil-cpp/absl/strings/escaping.cc',
'third_party/abseil-cpp/absl/strings/internal/charconv_bigint.cc',
'third_party/abseil-cpp/absl/strings/internal/charconv_parse.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_internal.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_rep_btree.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_rep_consume.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_rep_crc.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_rep_ring.cc',
'third_party/abseil-cpp/absl/strings/internal/cordz_functions.cc',
'third_party/abseil-cpp/absl/strings/internal/cordz_handle.cc',
'third_party/abseil-cpp/absl/strings/internal/cordz_info.cc',
'third_party/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance.cc',
'third_party/abseil-cpp/absl/strings/internal/escaping.cc',
'third_party/abseil-cpp/absl/strings/internal/memutil.cc',
'third_party/abseil-cpp/absl/strings/internal/ostringstream.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/arg.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/bind.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/extension.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/output.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/parser.cc',
'third_party/abseil-cpp/absl/strings/internal/stringify_sink.cc',
'third_party/abseil-cpp/absl/strings/internal/utf8.cc',
'third_party/abseil-cpp/absl/strings/match.cc',
'third_party/abseil-cpp/absl/strings/numbers.cc',
'third_party/abseil-cpp/absl/strings/str_cat.cc',
'third_party/abseil-cpp/absl/strings/str_replace.cc',
'third_party/abseil-cpp/absl/strings/str_split.cc',
'third_party/abseil-cpp/absl/strings/string_view.cc',
'third_party/abseil-cpp/absl/strings/substitute.cc',
'third_party/abseil-cpp/absl/synchronization/barrier.cc',
'third_party/abseil-cpp/absl/synchronization/blocking_counter.cc',
'third_party/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc',
'third_party/abseil-cpp/absl/synchronization/internal/graphcycles.cc',
'third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc',
'third_party/abseil-cpp/absl/synchronization/internal/waiter.cc',
'third_party/abseil-cpp/absl/synchronization/mutex.cc',
'third_party/abseil-cpp/absl/synchronization/notification.cc',
'third_party/abseil-cpp/absl/time/civil_time.cc',
'third_party/abseil-cpp/absl/time/clock.cc',
'third_party/abseil-cpp/absl/time/duration.cc',
'third_party/abseil-cpp/absl/time/format.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/civil_time_detail.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_fixed.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_format.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_posix.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/zone_info_source.cc',
'third_party/abseil-cpp/absl/time/time.cc',
'third_party/abseil-cpp/absl/types/bad_optional_access.cc',
'third_party/abseil-cpp/absl/types/bad_variant_access.cc',
'third_party/protobuf/src/google/protobuf/any.cc',
'third_party/protobuf/src/google/protobuf/any_lite.cc',
'third_party/protobuf/src/google/protobuf/arena.cc',
'third_party/protobuf/src/google/protobuf/arena_align.cc',
'third_party/protobuf/src/google/protobuf/arena_config.cc',
'third_party/protobuf/src/google/protobuf/arenastring.cc',
'third_party/protobuf/src/google/protobuf/arenaz_sampler.cc',
'third_party/protobuf/src/google/protobuf/compiler/allowlists/empty_package.cc',
'third_party/protobuf/src/google/protobuf/compiler/allowlists/weak_imports.cc',
'third_party/protobuf/src/google/protobuf/compiler/code_generator.cc',
'third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc',
'third_party/protobuf/src/google/protobuf/compiler/cpp/enum.cc',
'third_party/protobuf/src/google/protobuf/compiler/cpp/extension.cc',
'third_party/protobuf/src/google/protobuf/compiler/cpp/field.cc',
'third_party/protobuf/src/google/protobuf/compiler/cpp/field_generators/cord_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/cpp/field_generators/enum_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/cpp/field_generators/map_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/cpp/field_generators/message_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/cpp/field_generators/primitive_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/cpp/field_generators/string_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/cpp/file.cc',
'third_party/protobuf/src/google/protobuf/compiler/cpp/generator.cc',
'third_party/protobuf/src/google/protobuf/compiler/cpp/helpers.cc',
'third_party/protobuf/src/google/protobuf/compiler/cpp/message.cc',
'third_party/protobuf/src/google/protobuf/compiler/cpp/padding_optimizer.cc',
'third_party/protobuf/src/google/protobuf/compiler/cpp/parse_function_generator.cc',
'third_party/protobuf/src/google/protobuf/compiler/cpp/service.cc',
'third_party/protobuf/src/google/protobuf/compiler/cpp/tracker.cc',
'third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_doc_comment.cc',
'third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_enum.cc',
'third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_enum_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_field_base.cc',
'third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_generator.cc',
'third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_helpers.cc',
'third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_map_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_message.cc',
'third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_message_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_primitive_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_reflection_class.cc',
'third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_repeated_enum_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_repeated_message_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_repeated_primitive_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_source_generator_base.cc',
'third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_wrapper_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/csharp/names.cc',
'third_party/protobuf/src/google/protobuf/compiler/importer.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/context.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/doc_comment.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/enum.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/enum_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/enum_field_lite.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/enum_lite.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/extension.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/extension_lite.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/field.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/file.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/generator.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/generator_factory.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/helpers.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/kotlin_generator.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/map_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/map_field_lite.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/message.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/message_builder.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/message_builder_lite.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/message_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/message_field_lite.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/message_lite.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/message_serialization.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/name_resolver.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/names.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/primitive_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/primitive_field_lite.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/service.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/shared_code_generator.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/string_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/java/string_field_lite.cc',
'third_party/protobuf/src/google/protobuf/compiler/main.cc',
'third_party/protobuf/src/google/protobuf/compiler/objectivec/enum.cc',
'third_party/protobuf/src/google/protobuf/compiler/objectivec/enum_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/objectivec/extension.cc',
'third_party/protobuf/src/google/protobuf/compiler/objectivec/field.cc',
'third_party/protobuf/src/google/protobuf/compiler/objectivec/file.cc',
'third_party/protobuf/src/google/protobuf/compiler/objectivec/generator.cc',
'third_party/protobuf/src/google/protobuf/compiler/objectivec/helpers.cc',
'third_party/protobuf/src/google/protobuf/compiler/objectivec/import_writer.cc',
'third_party/protobuf/src/google/protobuf/compiler/objectivec/line_consumer.cc',
'third_party/protobuf/src/google/protobuf/compiler/objectivec/map_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/objectivec/message.cc',
'third_party/protobuf/src/google/protobuf/compiler/objectivec/message_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/objectivec/names.cc',
'third_party/protobuf/src/google/protobuf/compiler/objectivec/oneof.cc',
'third_party/protobuf/src/google/protobuf/compiler/objectivec/primitive_field.cc',
'third_party/protobuf/src/google/protobuf/compiler/objectivec/text_format_decode_data.cc',
'third_party/protobuf/src/google/protobuf/compiler/parser.cc',
'third_party/protobuf/src/google/protobuf/compiler/php/names.cc',
'third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc',
'third_party/protobuf/src/google/protobuf/compiler/plugin.cc',
'third_party/protobuf/src/google/protobuf/compiler/plugin.pb.cc',
'third_party/protobuf/src/google/protobuf/compiler/python/generator.cc',
'third_party/protobuf/src/google/protobuf/compiler/python/helpers.cc',
'third_party/protobuf/src/google/protobuf/compiler/python/pyi_generator.cc',
'third_party/protobuf/src/google/protobuf/compiler/retention.cc',
'third_party/protobuf/src/google/protobuf/compiler/ruby/ruby_generator.cc',
'third_party/protobuf/src/google/protobuf/compiler/rust/accessors/accessors.cc',
'third_party/protobuf/src/google/protobuf/compiler/rust/accessors/singular_bytes.cc',
'third_party/protobuf/src/google/protobuf/compiler/rust/accessors/singular_scalar.cc',
'third_party/protobuf/src/google/protobuf/compiler/rust/context.cc',
'third_party/protobuf/src/google/protobuf/compiler/rust/generator.cc',
'third_party/protobuf/src/google/protobuf/compiler/rust/message.cc',
'third_party/protobuf/src/google/protobuf/compiler/rust/naming.cc',
'third_party/protobuf/src/google/protobuf/compiler/subprocess.cc',
'third_party/protobuf/src/google/protobuf/compiler/zip_writer.cc',
'third_party/protobuf/src/google/protobuf/descriptor.cc',
'third_party/protobuf/src/google/protobuf/descriptor.pb.cc',
'third_party/protobuf/src/google/protobuf/descriptor_database.cc',
'third_party/protobuf/src/google/protobuf/dynamic_message.cc',
'third_party/protobuf/src/google/protobuf/extension_set.cc',
'third_party/protobuf/src/google/protobuf/extension_set_heavy.cc',
'third_party/protobuf/src/google/protobuf/generated_enum_util.cc',
'third_party/protobuf/src/google/protobuf/generated_message_bases.cc',
'third_party/protobuf/src/google/protobuf/generated_message_reflection.cc',
'third_party/protobuf/src/google/protobuf/generated_message_tctable_full.cc',
'third_party/protobuf/src/google/protobuf/generated_message_tctable_gen.cc',
'third_party/protobuf/src/google/protobuf/generated_message_tctable_lite.cc',
'third_party/protobuf/src/google/protobuf/generated_message_util.cc',
'third_party/protobuf/src/google/protobuf/implicit_weak_message.cc',
'third_party/protobuf/src/google/protobuf/inlined_string_field.cc',
'third_party/protobuf/src/google/protobuf/io/coded_stream.cc',
'third_party/protobuf/src/google/protobuf/io/gzip_stream.cc',
'third_party/protobuf/src/google/protobuf/io/io_win32.cc',
'third_party/protobuf/src/google/protobuf/io/printer.cc',
'third_party/protobuf/src/google/protobuf/io/strtod.cc',
'third_party/protobuf/src/google/protobuf/io/tokenizer.cc',
'third_party/protobuf/src/google/protobuf/io/zero_copy_sink.cc',
'third_party/protobuf/src/google/protobuf/io/zero_copy_stream.cc',
'third_party/protobuf/src/google/protobuf/io/zero_copy_stream_impl.cc',
'third_party/protobuf/src/google/protobuf/io/zero_copy_stream_impl_lite.cc',
'third_party/protobuf/src/google/protobuf/map.cc',
'third_party/protobuf/src/google/protobuf/map_field.cc',
'third_party/protobuf/src/google/protobuf/message.cc',
'third_party/protobuf/src/google/protobuf/message_lite.cc',
'third_party/protobuf/src/google/protobuf/parse_context.cc',
'third_party/protobuf/src/google/protobuf/port.cc',
'third_party/protobuf/src/google/protobuf/reflection_mode.cc',
'third_party/protobuf/src/google/protobuf/reflection_ops.cc',
'third_party/protobuf/src/google/protobuf/repeated_field.cc',
'third_party/protobuf/src/google/protobuf/repeated_ptr_field.cc',
'third_party/protobuf/src/google/protobuf/service.cc',
'third_party/protobuf/src/google/protobuf/stubs/common.cc',
'third_party/protobuf/src/google/protobuf/text_format.cc',
'third_party/protobuf/src/google/protobuf/unknown_field_set.cc',
'third_party/protobuf/src/google/protobuf/wire_format.cc',
'third_party/protobuf/src/google/protobuf/wire_format_lite.cc',
'third_party/utf8_range/utf8_validity.cc'
]
PROTO_FILES=[
'google/protobuf/any.proto',
'google/protobuf/api.proto',
'google/protobuf/compiler/plugin.proto',
'google/protobuf/descriptor.proto',
'google/protobuf/duration.proto',
'google/protobuf/empty.proto',
'google/protobuf/field_mask.proto',
'google/protobuf/source_context.proto',
'google/protobuf/struct.proto',
'google/protobuf/timestamp.proto',
'google/protobuf/type.proto',
'google/protobuf/wrappers.proto'
]
CC_INCLUDES=[
'third_party/abseil-cpp', 'third_party/protobuf/src', 'third_party/utf8_range'
]
PROTO_INCLUDE='third_party/protobuf/src'
PROTOBUF_SUBMODULE_VERSION="2c5fa078d8e86e5f4bd34e6f4c9ea9e8d7d4d44a"
| 20,318
| 65.185668
| 98
|
py
|
grpc
|
grpc-master/tools/distrib/python/grpcio_tools/grpc_version.py
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template`!!!
VERSION = '1.57.0.dev0'
PROTOBUF_VERSION = '3.23.4'
| 738
| 37.894737
| 106
|
py
|
grpc
|
grpc-master/tools/distrib/python/grpcio_tools/_parallel_compile_patch.py
|
# Copyright 2018 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Patches the compile() to allow enable parallel compilation of C/C++.
build_ext has lots of C/C++ files and normally them one by one.
Enabling parallel build helps a lot.
"""
import distutils.ccompiler
import os
try:
BUILD_EXT_COMPILER_JOBS = int(
os.environ["GRPC_PYTHON_BUILD_EXT_COMPILER_JOBS"]
)
except KeyError:
import multiprocessing
BUILD_EXT_COMPILER_JOBS = multiprocessing.cpu_count()
# monkey-patch for parallel compilation
def _parallel_compile(
self,
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None,
):
# setup the same way as distutils.ccompiler.CCompiler
# https://github.com/python/cpython/blob/31368a4f0e531c19affe2a1becd25fc316bc7501/Lib/distutils/ccompiler.py#L564
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs
)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
def _compile_single_file(obj):
try:
src, ext = build[obj]
except KeyError:
return
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# run compilation of individual files in parallel
import multiprocessing.pool
multiprocessing.pool.ThreadPool(BUILD_EXT_COMPILER_JOBS).map(
_compile_single_file, objects
)
return objects
def monkeypatch_compile_maybe():
"""Monkeypatching is dumb, but the build speed gain is worth it."""
if BUILD_EXT_COMPILER_JOBS > 1:
distutils.ccompiler.CCompiler.compile = _parallel_compile
| 2,255
| 30.333333
| 117
|
py
|
grpc
|
grpc-master/tools/distrib/python/grpcio_tools/grpc_tools/protoc.py
|
#!/usr/bin/env python3
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from grpc_tools import _protoc_compiler
import pkg_resources
_PROTO_MODULE_SUFFIX = "_pb2"
_SERVICE_MODULE_SUFFIX = "_pb2_grpc"
_DISABLE_DYNAMIC_STUBS = "GRPC_PYTHON_DISABLE_DYNAMIC_STUBS"
def main(command_arguments):
"""Run the protocol buffer compiler with the given command-line arguments.
Args:
command_arguments: a list of strings representing command line arguments to
`protoc`.
"""
command_arguments = [argument.encode() for argument in command_arguments]
return _protoc_compiler.run_main(command_arguments)
# NOTE(rbellevi): importlib.abc is not supported on 3.4.
if sys.version_info >= (3, 5, 0):
import contextlib
import importlib
import importlib.abc
import importlib.machinery
import threading
_FINDERS_INSTALLED = False
_FINDERS_INSTALLED_LOCK = threading.Lock()
def _maybe_install_proto_finders():
global _FINDERS_INSTALLED
with _FINDERS_INSTALLED_LOCK:
if not _FINDERS_INSTALLED:
sys.meta_path.extend(
[
ProtoFinder(
_PROTO_MODULE_SUFFIX, _protoc_compiler.get_protos
),
ProtoFinder(
_SERVICE_MODULE_SUFFIX,
_protoc_compiler.get_services,
),
]
)
sys.path.append(
pkg_resources.resource_filename("grpc_tools", "_proto")
)
_FINDERS_INSTALLED = True
def _module_name_to_proto_file(suffix, module_name):
components = module_name.split(".")
proto_name = components[-1][: -1 * len(suffix)]
# NOTE(rbellevi): The Protobuf library expects this path to use
# forward slashes on every platform.
return "/".join(components[:-1] + [proto_name + ".proto"])
def _proto_file_to_module_name(suffix, proto_file):
components = proto_file.split(os.path.sep)
proto_base_name = os.path.splitext(components[-1])[0]
return ".".join(components[:-1] + [proto_base_name + suffix])
def _protos(protobuf_path):
"""Returns a gRPC module generated from the indicated proto file."""
_maybe_install_proto_finders()
module_name = _proto_file_to_module_name(
_PROTO_MODULE_SUFFIX, protobuf_path
)
module = importlib.import_module(module_name)
return module
def _services(protobuf_path):
"""Returns a module generated from the indicated proto file."""
_maybe_install_proto_finders()
_protos(protobuf_path)
module_name = _proto_file_to_module_name(
_SERVICE_MODULE_SUFFIX, protobuf_path
)
module = importlib.import_module(module_name)
return module
def _protos_and_services(protobuf_path):
"""Returns two modules, corresponding to _pb2.py and _pb2_grpc.py files."""
return (_protos(protobuf_path), _services(protobuf_path))
_proto_code_cache = {}
_proto_code_cache_lock = threading.RLock()
class ProtoLoader(importlib.abc.Loader):
def __init__(
self, suffix, codegen_fn, module_name, protobuf_path, proto_root
):
self._suffix = suffix
self._codegen_fn = codegen_fn
self._module_name = module_name
self._protobuf_path = protobuf_path
self._proto_root = proto_root
def create_module(self, spec):
return None
def _generated_file_to_module_name(self, filepath):
components = filepath.split(os.path.sep)
return ".".join(
components[:-1] + [os.path.splitext(components[-1])[0]]
)
def exec_module(self, module):
assert module.__name__ == self._module_name
code = None
with _proto_code_cache_lock:
if self._module_name in _proto_code_cache:
code = _proto_code_cache[self._module_name]
exec(code, module.__dict__)
else:
files = self._codegen_fn(
self._protobuf_path.encode("ascii"),
[path.encode("ascii") for path in sys.path],
)
# NOTE: The files are returned in topological order of dependencies. Each
# entry is guaranteed to depend only on the modules preceding it in the
# list and the last entry is guaranteed to be our requested module. We
# cache the code from the first invocation at module-scope so that we
# don't have to regenerate code that has already been generated by protoc.
for f in files[:-1]:
module_name = self._generated_file_to_module_name(
f[0].decode("ascii")
)
if module_name not in sys.modules:
if module_name not in _proto_code_cache:
_proto_code_cache[module_name] = f[1]
importlib.import_module(module_name)
exec(files[-1][1], module.__dict__)
class ProtoFinder(importlib.abc.MetaPathFinder):
def __init__(self, suffix, codegen_fn):
self._suffix = suffix
self._codegen_fn = codegen_fn
def find_spec(self, fullname, path, target=None):
if not fullname.endswith(self._suffix):
return None
filepath = _module_name_to_proto_file(self._suffix, fullname)
for search_path in sys.path:
try:
prospective_path = os.path.join(search_path, filepath)
os.stat(prospective_path)
except (FileNotFoundError, NotADirectoryError, OSError):
continue
else:
return importlib.machinery.ModuleSpec(
fullname,
ProtoLoader(
self._suffix,
self._codegen_fn,
fullname,
filepath,
search_path,
),
)
# NOTE(rbellevi): We provide an environment variable that enables users to completely
# disable this behavior if it is not desired, e.g. for performance reasons.
if not os.getenv(_DISABLE_DYNAMIC_STUBS):
_maybe_install_proto_finders()
if __name__ == "__main__":
proto_include = pkg_resources.resource_filename("grpc_tools", "_proto")
sys.exit(main(sys.argv + ["-I{}".format(proto_include)]))
| 7,446
| 38.194737
| 94
|
py
|
grpc
|
grpc-master/tools/distrib/python/grpcio_tools/grpc_tools/command.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from grpc_tools import protoc
import pkg_resources
import setuptools
def build_package_protos(package_root, strict_mode=False):
proto_files = []
inclusion_root = os.path.abspath(package_root)
for root, _, files in os.walk(inclusion_root):
for filename in files:
if filename.endswith(".proto"):
proto_files.append(
os.path.abspath(os.path.join(root, filename))
)
well_known_protos_include = pkg_resources.resource_filename(
"grpc_tools", "_proto"
)
for proto_file in proto_files:
command = [
"grpc_tools.protoc",
"--proto_path={}".format(inclusion_root),
"--proto_path={}".format(well_known_protos_include),
"--python_out={}".format(inclusion_root),
"--pyi_out={}".format(inclusion_root),
"--grpc_python_out={}".format(inclusion_root),
] + [proto_file]
if protoc.main(command) != 0:
if strict_mode:
raise Exception("error: {} failed".format(command))
else:
sys.stderr.write("warning: {} failed".format(command))
class BuildPackageProtos(setuptools.Command):
"""Command to generate project *_pb2.py modules from proto files."""
description = "build grpc protobuf modules"
user_options = [
(
"strict-mode",
"s",
"exit with non-zero value if the proto compiling fails.",
)
]
def initialize_options(self):
self.strict_mode = False
def finalize_options(self):
pass
def run(self):
# due to limitations of the proto generator, we require that only *one*
# directory is provided as an 'include' directory. We assume it's the '' key
# to `self.distribution.package_dir` (and get a key error if it's not
# there).
build_package_protos(
self.distribution.package_dir[""], self.strict_mode
)
| 2,601
| 31.936709
| 84
|
py
|
grpc
|
grpc-master/tools/distrib/python/grpcio_tools/grpc_tools/__init__.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 577
| 40.285714
| 74
|
py
|
grpc
|
grpc-master/tools/distrib/python/grpcio_tools/grpc_tools/test/protoc_test.py
|
# Copyright 2020 The gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for protoc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
import multiprocessing
import sys
import unittest
# TODO(https://github.com/grpc/grpc/issues/23847): Deduplicate this mechanism with
# the grpcio_tests module.
def _wrap_in_subprocess(error_queue, fn):
@functools.wraps(fn)
def _wrapped():
try:
fn()
except Exception as e:
error_queue.put(e)
raise
return _wrapped
def _run_in_subprocess(test_case):
error_queue = multiprocessing.Queue()
wrapped_case = _wrap_in_subprocess(error_queue, test_case)
proc = multiprocessing.Process(target=wrapped_case)
proc.start()
proc.join()
if not error_queue.empty():
raise error_queue.get()
assert proc.exitcode == 0, "Process exited with code {}".format(
proc.exitcode
)
@contextlib.contextmanager
def _augmented_syspath(new_paths):
original_sys_path = sys.path
if new_paths is not None:
sys.path = list(new_paths) + sys.path
try:
yield
finally:
sys.path = original_sys_path
def _test_import_protos():
from grpc_tools import protoc
with _augmented_syspath(
("tools/distrib/python/grpcio_tools/grpc_tools/test/",)
):
protos = protoc._protos("simple.proto")
assert protos.SimpleMessage is not None
def _test_import_services():
from grpc_tools import protoc
with _augmented_syspath(
("tools/distrib/python/grpcio_tools/grpc_tools/test/",)
):
protos = protoc._protos("simple.proto")
services = protoc._services("simple.proto")
assert services.SimpleMessageServiceStub is not None
def _test_import_services_without_protos():
from grpc_tools import protoc
with _augmented_syspath(
("tools/distrib/python/grpcio_tools/grpc_tools/test/",)
):
services = protoc._services("simple.proto")
assert services.SimpleMessageServiceStub is not None
def _test_proto_module_imported_once():
from grpc_tools import protoc
with _augmented_syspath(
("tools/distrib/python/grpcio_tools/grpc_tools/test/",)
):
protos = protoc._protos("simple.proto")
services = protoc._services("simple.proto")
complicated_protos = protoc._protos("complicated.proto")
simple_message = protos.SimpleMessage()
complicated_message = complicated_protos.ComplicatedMessage()
assert (
simple_message.simpler_message.simplest_message.__class__
is complicated_message.simplest_message.__class__
)
def _test_static_dynamic_combo():
with _augmented_syspath(
("tools/distrib/python/grpcio_tools/grpc_tools/test/",)
):
from grpc_tools import protoc # isort:skip
import complicated_pb2
protos = protoc._protos("simple.proto")
static_message = complicated_pb2.ComplicatedMessage()
dynamic_message = protos.SimpleMessage()
assert (
dynamic_message.simpler_message.simplest_message.__class__
is static_message.simplest_message.__class__
)
def _test_combined_import():
from grpc_tools import protoc
protos, services = protoc._protos_and_services("simple.proto")
assert protos.SimpleMessage is not None
assert services.SimpleMessageServiceStub is not None
def _test_syntax_errors():
from grpc_tools import protoc
try:
protos = protoc._protos("flawed.proto")
except Exception as e:
error_str = str(e)
assert "flawed.proto" in error_str
assert "17:23" in error_str
assert "21:23" in error_str
else:
assert False, "Compile error expected. None occurred."
class ProtocTest(unittest.TestCase):
def test_import_protos(self):
_run_in_subprocess(_test_import_protos)
def test_import_services(self):
_run_in_subprocess(_test_import_services)
def test_import_services_without_protos(self):
_run_in_subprocess(_test_import_services_without_protos)
def test_proto_module_imported_once(self):
_run_in_subprocess(_test_proto_module_imported_once)
def test_static_dynamic_combo(self):
_run_in_subprocess(_test_static_dynamic_combo)
def test_combined_import(self):
_run_in_subprocess(_test_combined_import)
def test_syntax_errors(self):
_run_in_subprocess(_test_syntax_errors)
if __name__ == "__main__":
unittest.main()
| 5,150
| 28.267045
| 82
|
py
|
grpc
|
grpc-master/tools/distrib/c-ish/check_documentation.py
|
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# check for directory level 'README.md' files
# check that all implementation and interface files have a \file doxygen comment
import os
import sys
# where do we run
_TARGET_DIRS = [
"include/grpc",
"include/grpc++",
"src/core",
"src/cpp",
"test/core",
"test/cpp",
]
# which file extensions do we care about
_INTERESTING_EXTENSIONS = [".c", ".h", ".cc"]
# find our home
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../../.."))
os.chdir(_ROOT)
errors = 0
# walk directories, find things
printed_banner = False
for target_dir in _TARGET_DIRS:
for root, dirs, filenames in os.walk(target_dir):
if "README.md" not in filenames:
if not printed_banner:
print("Missing README.md")
print("=================")
printed_banner = True
print(root)
errors += 1
if printed_banner:
print()
printed_banner = False
for target_dir in _TARGET_DIRS:
for root, dirs, filenames in os.walk(target_dir):
for filename in filenames:
if os.path.splitext(filename)[1] not in _INTERESTING_EXTENSIONS:
continue
path = os.path.join(root, filename)
with open(path) as f:
contents = f.read()
if "\\file" not in contents:
if not printed_banner:
print("Missing \\file comment")
print("======================")
printed_banner = True
print(path)
errors += 1
assert errors == 0, "error count = %d" % errors
| 2,224
| 29.479452
| 80
|
py
|
grpc
|
grpc-master/tools/debug/core/error_ref_leak.py
|
#!/usr/bin/env python3
#
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Reads stdin to find error_refcount log lines, and prints reference leaks
# to stdout
# usage: python error_ref_leak < logfile.txt
import re
import sys
data = sys.stdin.readlines()
errs = []
for line in data:
# if we care about the line
if re.search(r"error.cc", line):
# str manip to cut off left part of log line
line = line.partition("error.cc:")[-1]
line = re.sub(r"\d+] ", r"", line)
line = line.strip().split()
err = line[0].strip(":")
if line[1] == "create":
assert err not in errs
errs.append(err)
elif line[0] == "realloc":
errs.remove(line[1])
errs.append(line[3])
# explicitly look for the last dereference
elif line[1] == "1" and line[3] == "0":
assert err in errs
errs.remove(err)
print(("leaked:", errs))
| 1,480
| 29.854167
| 74
|
py
|
grpc
|
grpc-master/tools/debug/core/chttp2_ref_leak.py
|
#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Reads stdin to find chttp2_refcount log lines, and prints reference leaks
# to stdout
import collections
import re
import sys
def new_obj():
return ["destroy"]
outstanding = collections.defaultdict(new_obj)
# Sample log line:
# chttp2:unref:0x629000005200 2->1 destroy [src/core/ext/transport/chttp2/transport/chttp2_transport.c:599]
for line in sys.stdin:
m = re.search(
r"chttp2:( ref|unref):0x([a-fA-F0-9]+) [^ ]+ ([^[]+) \[(.*)\]", line
)
if m:
if m.group(1) == " ref":
outstanding[m.group(2)].append(m.group(3))
else:
outstanding[m.group(2)].remove(m.group(3))
for obj, remaining in list(outstanding.items()):
if remaining:
print(("LEAKED: %s %r" % (obj, remaining)))
| 1,362
| 28.630435
| 107
|
py
|
grpc
|
grpc-master/tools/profiling/bloat/bloat_diff.py
|
#!/usr/bin/env python3
#
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import glob
import math
import multiprocessing
import os
import pathlib
import shutil
import subprocess
import sys
sys.path.append(
os.path.join(
os.path.dirname(sys.argv[0]), "..", "..", "run_tests", "python_utils"
)
)
import check_on_pr
argp = argparse.ArgumentParser(description="Perform diff on microbenchmarks")
argp.add_argument(
"-d",
"--diff_base",
type=str,
help="Commit or branch to compare the current one to",
)
argp.add_argument("-j", "--jobs", type=int, default=multiprocessing.cpu_count())
args = argp.parse_args()
# the libraries for which check bloat difference is calculated
LIBS = [
"libgrpc.so",
"libgrpc++.so",
]
def _build(output_dir):
"""Perform the cmake build under the output_dir."""
shutil.rmtree(output_dir, ignore_errors=True)
subprocess.check_call("mkdir -p %s" % output_dir, shell=True, cwd=".")
subprocess.check_call(
[
"cmake",
"-DgRPC_BUILD_TESTS=OFF",
"-DBUILD_SHARED_LIBS=ON",
"-DCMAKE_BUILD_TYPE=RelWithDebInfo",
'-DCMAKE_C_FLAGS="-gsplit-dwarf"',
'-DCMAKE_CXX_FLAGS="-gsplit-dwarf"',
"..",
],
cwd=output_dir,
)
subprocess.check_call("make -j%d" % args.jobs, shell=True, cwd=output_dir)
def _rank_diff_bytes(diff_bytes):
"""Determine how significant diff_bytes is, and return a simple integer representing that"""
mul = 1
if diff_bytes < 0:
mul = -1
diff_bytes = -diff_bytes
if diff_bytes < 2 * 1024:
return 0
if diff_bytes < 16 * 1024:
return 1 * mul
if diff_bytes < 128 * 1024:
return 2 * mul
return 3 * mul
_build("bloat_diff_new")
if args.diff_base:
where_am_i = (
subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"])
.decode()
.strip()
)
# checkout the diff base (="old")
subprocess.check_call(["git", "checkout", args.diff_base])
subprocess.check_call(["git", "submodule", "update"])
try:
_build("bloat_diff_old")
finally:
# restore the original revision (="new")
subprocess.check_call(["git", "checkout", where_am_i])
subprocess.check_call(["git", "submodule", "update"])
pathlib.Path("bloaty-build").mkdir(exist_ok=True)
subprocess.check_call(
["cmake", "-G", "Unix Makefiles", "../third_party/bloaty"],
cwd="bloaty-build",
)
subprocess.check_call("make -j%d" % args.jobs, shell=True, cwd="bloaty-build")
text = ""
diff_size = 0
for lib in LIBS:
text += (
"****************************************************************\n\n"
)
text += lib + "\n\n"
old_version = glob.glob("bloat_diff_old/%s" % lib)
new_version = glob.glob("bloat_diff_new/%s" % lib)
for filename in [old_version, new_version]:
if filename:
subprocess.check_call(
"strip %s -o %s.stripped" % (filename[0], filename[0]),
shell=True,
)
assert len(new_version) == 1
cmd = "bloaty-build/bloaty -d compileunits,symbols"
if old_version:
assert len(old_version) == 1
text += subprocess.check_output(
"%s -n 0 --debug-file=%s --debug-file=%s %s.stripped -- %s.stripped"
% (
cmd,
new_version[0],
old_version[0],
new_version[0],
old_version[0],
),
shell=True,
).decode()
sections = [
x
for x in csv.reader(
subprocess.check_output(
"bloaty-build/bloaty -n 0 --csv %s -- %s"
% (new_version[0], old_version[0]),
shell=True,
)
.decode()
.splitlines()
)
]
print(sections)
for section in sections[1:]:
# skip debug sections for bloat severity calculation
if section[0].startswith(".debug"):
continue
# skip dynamic loader sections too
if section[0].startswith(".dyn"):
continue
diff_size += int(section[2])
else:
text += subprocess.check_output(
"%s %s.stripped -n 0 --debug-file=%s"
% (cmd, new_version[0], new_version[0]),
shell=True,
).decode()
text += "\n\n"
severity = _rank_diff_bytes(diff_size)
print("SEVERITY: %d" % severity)
print(text)
check_on_pr.check_on_pr("Bloat Difference", "```\n%s\n```" % text)
check_on_pr.label_significance_on_pr("bloat", severity)
| 5,268
| 28.435754
| 96
|
py
|
grpc
|
grpc-master/tools/profiling/memory/memory_diff.py
|
#!/usr/bin/env python3
#
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import glob
import math
import multiprocessing
import os
import pathlib
import re
import shutil
import subprocess
import sys
sys.path.append(
os.path.join(
os.path.dirname(sys.argv[0]), "..", "..", "run_tests", "python_utils"
)
)
import check_on_pr
argp = argparse.ArgumentParser(description="Perform diff on memory benchmarks")
argp.add_argument(
"-d",
"--diff_base",
type=str,
help="Commit or branch to compare the current one to",
)
argp.add_argument("-j", "--jobs", type=int, default=multiprocessing.cpu_count())
args = argp.parse_args()
_INTERESTING = {
"call/client": (
rb"client call memory usage: ([0-9\.]+) bytes per call",
float,
),
"call/server": (
rb"server call memory usage: ([0-9\.]+) bytes per call",
float,
),
"channel/client": (
rb"client channel memory usage: ([0-9\.]+) bytes per channel",
float,
),
"channel/server": (
rb"server channel memory usage: ([0-9\.]+) bytes per channel",
float,
),
}
_SCENARIOS = {
"default": [],
"minstack": ["--scenario_config=minstack"],
}
_BENCHMARKS = {
"call": ["--benchmark_names=call", "--size=50000"],
"channel": ["--benchmark_names=channel", "--size=10000"],
}
def _run():
"""Build with Bazel, then run, and extract interesting lines from the output."""
subprocess.check_call(
[
"tools/bazel",
"build",
"-c",
"opt",
"test/core/memory_usage/memory_usage_test",
]
)
ret = {}
for name, benchmark_args in _BENCHMARKS.items():
for scenario, extra_args in _SCENARIOS.items():
# TODO(chenancy) Remove when minstack is implemented for channel
if name == "channel" and scenario == "minstack":
continue
try:
output = subprocess.check_output(
[
"bazel-bin/test/core/memory_usage/memory_usage_test",
]
+ benchmark_args
+ extra_args
)
except subprocess.CalledProcessError as e:
print("Error running benchmark:", e)
continue
for line in output.splitlines():
for key, (pattern, conversion) in _INTERESTING.items():
m = re.match(pattern, line)
if m:
ret[scenario + ": " + key] = conversion(m.group(1))
return ret
cur = _run()
old = None
if args.diff_base:
where_am_i = (
subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"])
.decode()
.strip()
)
# checkout the diff base (="old")
subprocess.check_call(["git", "checkout", args.diff_base])
try:
old = _run()
finally:
# restore the original revision (="cur")
subprocess.check_call(["git", "checkout", where_am_i])
text = ""
if old is None:
print(cur)
for key, value in sorted(cur.items()):
text += "{}: {}\n".format(key, value)
else:
print(cur, old)
call_diff_size = 0
channel_diff_size = 0
for scenario in _SCENARIOS.keys():
for key, value in sorted(_INTERESTING.items()):
key = scenario + ": " + key
if key in cur:
if key not in old:
text += "{}: {}\n".format(key, cur[key])
else:
text += "{}: {} -> {}\n".format(key, old[key], cur[key])
if "call" in key:
call_diff_size += cur[key] - old[key]
else:
channel_diff_size += cur[key] - old[key]
print("CALL_DIFF_SIZE: %f" % call_diff_size)
print("CHANNEL_DIFF_SIZE: %f" % channel_diff_size)
check_on_pr.label_increase_decrease_on_pr(
"per-call-memory", call_diff_size, 64
)
check_on_pr.label_increase_decrease_on_pr(
"per-channel-memory", channel_diff_size, 1000
)
# TODO(chennancy)Change significant value when minstack also runs for channel
print(text)
check_on_pr.check_on_pr("Memory Difference", "```\n%s\n```" % text)
| 4,839
| 28.156627
| 84
|
py
|
grpc
|
grpc-master/tools/profiling/ios_bin/binary_size.py
|
#!/usr/bin/env python3
#
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import multiprocessing
import os
import shutil
import subprocess
import sys
from parse_link_map import parse_link_map
sys.path.append(
os.path.join(
os.path.dirname(sys.argv[0]), "..", "..", "run_tests", "python_utils"
)
)
import check_on_pr
# Only show diff 1KB or greater
_DIFF_THRESHOLD = 1000
_SIZE_LABELS = ("Core", "ObjC", "BoringSSL", "Protobuf", "Total")
argp = argparse.ArgumentParser(
description="Binary size diff of gRPC Objective-C sample"
)
argp.add_argument(
"-d",
"--diff_base",
type=str,
help="Commit or branch to compare the current one to",
)
args = argp.parse_args()
def dir_size(dir):
total = 0
for dirpath, dirnames, filenames in os.walk(dir):
for f in filenames:
fp = os.path.join(dirpath, f)
total += os.stat(fp).st_size
return total
def get_size(where):
build_dir = "src/objective-c/examples/Sample/Build/Build-%s/" % where
link_map_filename = "Build/Intermediates.noindex/Sample.build/Release-iphoneos/Sample.build/Sample-LinkMap-normal-arm64.txt"
# IMPORTANT: order needs to match labels in _SIZE_LABELS
return parse_link_map(build_dir + link_map_filename)
def build(where):
subprocess.check_call(["make", "clean"])
shutil.rmtree(
"src/objective-c/examples/Sample/Build/Build-%s" % where,
ignore_errors=True,
)
subprocess.check_call(
(
"CONFIG=opt EXAMPLE_PATH=src/objective-c/examples/Sample"
" SCHEME=Sample ./build_one_example.sh"
),
shell=True,
cwd="src/objective-c/tests",
)
os.rename(
"src/objective-c/examples/Sample/Build/Build",
"src/objective-c/examples/Sample/Build/Build-%s" % where,
)
def _render_row(new, label, old):
"""Render row in 3-column output format."""
try:
formatted_new = "{:,}".format(int(new))
except:
formatted_new = new
try:
formatted_old = "{:,}".format(int(old))
except:
formatted_old = old
return "{:>15}{:>15}{:>15}\n".format(formatted_new, label, formatted_old)
def _diff_sign(new, old, diff_threshold=None):
"""Generate diff sign based on values"""
diff_sign = " "
if (
diff_threshold is not None
and abs(new_size[i] - old_size[i]) >= diff_threshold
):
diff_sign += "!"
if new > old:
diff_sign += "(>)"
elif new < old:
diff_sign += "(<)"
else:
diff_sign += "(=)"
return diff_sign
text = "Objective-C binary sizes\n"
build("new")
new_size = get_size("new")
old_size = None
if args.diff_base:
old = "old"
where_am_i = (
subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"])
.decode()
.strip()
)
subprocess.check_call(["git", "checkout", "--", "."])
subprocess.check_call(["git", "checkout", args.diff_base])
subprocess.check_call(["git", "submodule", "update", "--force"])
try:
build("old")
old_size = get_size("old")
finally:
subprocess.check_call(["git", "checkout", "--", "."])
subprocess.check_call(["git", "checkout", where_am_i])
subprocess.check_call(["git", "submodule", "update", "--force"])
text += "**********************STATIC******************\n"
text += _render_row("New size", "", "Old size")
if old_size == None:
for i in range(0, len(_SIZE_LABELS)):
if i == len(_SIZE_LABELS) - 1:
# skip line before rendering "Total"
text += "\n"
text += _render_row(new_size[i], _SIZE_LABELS[i], "")
else:
has_diff = False
# go through all labels but "Total"
for i in range(0, len(_SIZE_LABELS) - 1):
if abs(new_size[i] - old_size[i]) >= _DIFF_THRESHOLD:
has_diff = True
diff_sign = _diff_sign(
new_size[i], old_size[i], diff_threshold=_DIFF_THRESHOLD
)
text += _render_row(
new_size[i], _SIZE_LABELS[i] + diff_sign, old_size[i]
)
# render the "Total"
i = len(_SIZE_LABELS) - 1
diff_sign = _diff_sign(new_size[i], old_size[i])
# skip line before rendering "Total"
text += "\n"
text += _render_row(new_size[i], _SIZE_LABELS[i] + diff_sign, old_size[i])
if not has_diff:
text += "\n No significant differences in binary sizes\n"
text += "\n"
print(text)
check_on_pr.check_on_pr("ObjC Binary Size", "```\n%s\n```" % text)
| 5,065
| 27.621469
| 128
|
py
|
grpc
|
grpc-master/tools/profiling/ios_bin/parse_link_map.py
|
#!/usr/bin/python
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script analyzes link map file generated by Xcode. It calculates and
# prints out the sizes of each dependent library and the total sizes of the
# symbols.
# The script takes one parameter, which is the path to the link map file.
import re
import sys
def parse_link_map(filename):
table_tag = {}
state = "start"
table_stats_symbol = {}
table_stats_dead = {}
section_total_size = 0
symbol_total_size = 0
boringssl_size = 0
core_size = 0
objc_size = 0
protobuf_size = 0
lines = open(filename, encoding="utf-8", errors="ignore").readlines()
for line in lines:
line_stripped = line[:-1]
if "# Object files:" == line_stripped:
state = "object"
continue
elif "# Sections:" == line_stripped:
state = "section"
continue
elif "# Symbols:" == line_stripped:
state = "symbol"
continue
elif "# Dead Stripped Symbols:" == line_stripped:
state = "dead"
continue
if state == "object":
segs = re.search("(\[ *[0-9]*\]) (.*)", line_stripped)
table_tag[segs.group(1)] = segs.group(2)
if state == "section":
if len(line_stripped) == 0 or line_stripped[0] == "#":
continue
segs = re.search("^(.+?)\s+(.+?)\s+.*", line_stripped)
section_total_size += int(segs.group(2), 16)
if state == "symbol":
if len(line_stripped) == 0 or line_stripped[0] == "#":
continue
segs = re.search("^.+?\s+(.+?)\s+(\[.+?\]).*", line_stripped)
if not segs:
continue
target = table_tag[segs.group(2)]
target_stripped = re.search("^(.*?)(\(.+?\))?$", target).group(1)
size = int(segs.group(1), 16)
if not target_stripped in table_stats_symbol:
table_stats_symbol[target_stripped] = 0
table_stats_symbol[target_stripped] += size
if "BoringSSL" in target_stripped:
boringssl_size += size
elif "libgRPC-Core" in target_stripped:
core_size += size
elif (
"libgRPC-RxLibrary" in target_stripped
or "libgRPC" in target_stripped
or "libgRPC-ProtoLibrary" in target_stripped
):
objc_size += size
elif "libProtobuf" in target_stripped:
protobuf_size += size
for target in table_stats_symbol:
symbol_total_size += table_stats_symbol[target]
return (
core_size,
objc_size,
boringssl_size,
protobuf_size,
symbol_total_size,
)
def main():
filename = sys.argv[1]
(
core_size,
objc_size,
boringssl_size,
protobuf_size,
total_size,
) = parse_link_map(filename)
print("Core size:{:,}".format(core_size))
print("ObjC size:{:,}".format(objc_size))
print("BoringSSL size:{:,}".format(boringssl_size))
print("Protobuf size:{:,}\n".format(protobuf_size))
print("Total size:{:,}".format(total_size))
if __name__ == "__main__":
main()
| 3,819
| 30.833333
| 77
|
py
|
grpc
|
grpc-master/tools/profiling/microbenchmarks/bm_json.py
|
#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Utilities for manipulating JSON data that represents microbenchmark results.
import os
# template arguments and dynamic arguments of individual benchmark types
# Example benchmark name: "BM_UnaryPingPong<TCP, NoOpMutator, NoOpMutator>/0/0"
_BM_SPECS = {
"BM_UnaryPingPong": {
"tpl": ["fixture", "client_mutator", "server_mutator"],
"dyn": ["request_size", "response_size"],
},
"BM_PumpStreamClientToServer": {
"tpl": ["fixture"],
"dyn": ["request_size"],
},
"BM_PumpStreamServerToClient": {
"tpl": ["fixture"],
"dyn": ["request_size"],
},
"BM_StreamingPingPong": {
"tpl": ["fixture", "client_mutator", "server_mutator"],
"dyn": ["request_size", "request_count"],
},
"BM_StreamingPingPongMsgs": {
"tpl": ["fixture", "client_mutator", "server_mutator"],
"dyn": ["request_size"],
},
"BM_PumpStreamServerToClient_Trickle": {
"tpl": [],
"dyn": ["request_size", "bandwidth_kilobits"],
},
"BM_PumpUnbalancedUnary_Trickle": {
"tpl": [],
"dyn": ["cli_req_size", "svr_req_size", "bandwidth_kilobits"],
},
"BM_ErrorStringOnNewError": {
"tpl": ["fixture"],
"dyn": [],
},
"BM_ErrorStringRepeatedly": {
"tpl": ["fixture"],
"dyn": [],
},
"BM_ErrorGetStatus": {
"tpl": ["fixture"],
"dyn": [],
},
"BM_ErrorGetStatusCode": {
"tpl": ["fixture"],
"dyn": [],
},
"BM_ErrorHttpError": {
"tpl": ["fixture"],
"dyn": [],
},
"BM_HasClearGrpcStatus": {
"tpl": ["fixture"],
"dyn": [],
},
"BM_IsolatedFilter": {
"tpl": ["fixture", "client_mutator"],
"dyn": [],
},
"BM_HpackEncoderEncodeHeader": {
"tpl": ["fixture"],
"dyn": ["end_of_stream", "request_size"],
},
"BM_HpackParserParseHeader": {
"tpl": ["fixture"],
"dyn": [],
},
"BM_CallCreateDestroy": {
"tpl": ["fixture"],
"dyn": [],
},
"BM_Zalloc": {
"tpl": [],
"dyn": ["request_size"],
},
"BM_PollEmptyPollset_SpeedOfLight": {
"tpl": [],
"dyn": ["request_size", "request_count"],
},
"BM_StreamCreateSendInitialMetadataDestroy": {
"tpl": ["fixture"],
"dyn": [],
},
"BM_TransportStreamSend": {
"tpl": [],
"dyn": ["request_size"],
},
"BM_TransportStreamRecv": {
"tpl": [],
"dyn": ["request_size"],
},
"BM_StreamingPingPongWithCoalescingApi": {
"tpl": ["fixture", "client_mutator", "server_mutator"],
"dyn": ["request_size", "request_count", "end_of_stream"],
},
"BM_Base16SomeStuff": {
"tpl": [],
"dyn": ["request_size"],
},
}
def numericalize(s):
"""Convert abbreviations like '100M' or '10k' to a number."""
if not s:
return ""
if s[-1] == "k":
return float(s[:-1]) * 1024
if s[-1] == "M":
return float(s[:-1]) * 1024 * 1024
if 0 <= (ord(s[-1]) - ord("0")) <= 9:
return float(s)
assert "not a number: %s" % s
def parse_name(name):
cpp_name = name
if "<" not in name and "/" not in name and name not in _BM_SPECS:
return {"name": name, "cpp_name": name}
rest = name
out = {}
tpl_args = []
dyn_args = []
if "<" in rest:
tpl_bit = rest[rest.find("<") + 1 : rest.rfind(">")]
arg = ""
nesting = 0
for c in tpl_bit:
if c == "<":
nesting += 1
arg += c
elif c == ">":
nesting -= 1
arg += c
elif c == ",":
if nesting == 0:
tpl_args.append(arg.strip())
arg = ""
else:
arg += c
else:
arg += c
tpl_args.append(arg.strip())
rest = rest[: rest.find("<")] + rest[rest.rfind(">") + 1 :]
if "/" in rest:
s = rest.split("/")
rest = s[0]
dyn_args = s[1:]
name = rest
assert name in _BM_SPECS, "_BM_SPECS needs to be expanded for %s" % name
assert len(dyn_args) == len(_BM_SPECS[name]["dyn"])
assert len(tpl_args) == len(_BM_SPECS[name]["tpl"])
out["name"] = name
out["cpp_name"] = cpp_name
out.update(
dict(
(k, numericalize(v))
for k, v in zip(_BM_SPECS[name]["dyn"], dyn_args)
)
)
out.update(dict(zip(_BM_SPECS[name]["tpl"], tpl_args)))
return out
def expand_json(js):
if not js:
raise StopIteration()
for bm in js["benchmarks"]:
if bm["name"].endswith("_stddev") or bm["name"].endswith("_mean"):
continue
context = js["context"]
if "label" in bm:
labels_list = [
s.split(":")
for s in bm["label"].strip().split(" ")
if len(s) and s[0] != "#"
]
for el in labels_list:
el[0] = el[0].replace("/iter", "_per_iteration")
labels = dict(labels_list)
else:
labels = {}
# TODO(jtattermusch): grabbing kokoro env values shouldn't be buried
# deep in the JSON conversion logic.
# Link the data to a kokoro job run by adding
# well known kokoro env variables as metadata for each row
row = {
"jenkins_build": os.environ.get("KOKORO_BUILD_NUMBER", ""),
"jenkins_job": os.environ.get("KOKORO_JOB_NAME", ""),
}
row.update(context)
row.update(bm)
row.update(parse_name(row["name"]))
row.update(labels)
yield row
| 6,352
| 28.548837
| 79
|
py
|
grpc
|
grpc-master/tools/profiling/microbenchmarks/bm2bq.py
|
#!/usr/bin/env python3
#
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Convert google-benchmark json output to something that can be uploaded to
# BigQuery
import csv
import json
import subprocess
import sys
import bm_json
columns = []
for row in json.loads(
# TODO(jtattermusch): make sure the dataset name is not hardcoded
subprocess.check_output(
["bq", "--format=json", "show", "microbenchmarks.microbenchmarks"]
)
)["schema"]["fields"]:
columns.append((row["name"], row["type"].lower()))
SANITIZE = {
"integer": int,
"float": float,
"boolean": bool,
"string": str,
"timestamp": str,
}
# TODO(jtattermusch): add proper argparse argument, rather than trying
# to emulate with manual argv inspection.
if sys.argv[1] == "--schema":
print(",\n".join("%s:%s" % (k, t.upper()) for k, t in columns))
sys.exit(0)
with open(sys.argv[1]) as f:
js = json.loads(f.read())
if len(sys.argv) > 2:
with open(sys.argv[2]) as f:
js2 = json.loads(f.read())
else:
js2 = None
# TODO(jtattermusch): write directly to a file instead of stdout
writer = csv.DictWriter(sys.stdout, [c for c, t in columns])
for row in bm_json.expand_json(js, js2):
sane_row = {}
for name, sql_type in columns:
if name in row:
if row[name] == "":
continue
sane_row[name] = SANITIZE[sql_type](row[name])
writer.writerow(sane_row)
| 1,965
| 26.690141
| 75
|
py
|
grpc
|
grpc-master/tools/profiling/microbenchmarks/bm_diff/bm_run.py
|
#!/usr/bin/env python3
#
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Python utility to run opt and counters benchmarks and save json output """
import argparse
import itertools
import multiprocessing
import os
import random
import subprocess
import sys
import bm_constants
import jobset
sys.path.append(
os.path.join(
os.path.dirname(sys.argv[0]),
"..",
"..",
"..",
"run_tests",
"python_utils",
)
)
def _args():
argp = argparse.ArgumentParser(description="Runs microbenchmarks")
argp.add_argument(
"-b",
"--benchmarks",
nargs="+",
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
help="Benchmarks to run",
)
argp.add_argument(
"-j",
"--jobs",
type=int,
default=multiprocessing.cpu_count(),
help="Number of CPUs to use",
)
argp.add_argument(
"-n",
"--name",
type=str,
help=(
"Unique name of the build to run. Needs to match the handle passed"
" to bm_build.py"
),
)
argp.add_argument(
"-r",
"--regex",
type=str,
default="",
help="Regex to filter benchmarks run",
)
argp.add_argument(
"-l",
"--loops",
type=int,
default=20,
help=(
"Number of times to loops the benchmarks. More loops cuts down on"
" noise"
),
)
argp.add_argument("--counters", dest="counters", action="store_true")
argp.add_argument("--no-counters", dest="counters", action="store_false")
argp.set_defaults(counters=True)
args = argp.parse_args()
assert args.name
if args.loops < 3:
print(
"WARNING: This run will likely be noisy. Increase loops to at "
"least 3."
)
return args
def _collect_bm_data(bm, cfg, name, regex, idx, loops):
jobs_list = []
for line in subprocess.check_output(
[
"bm_diff_%s/%s/%s" % (name, cfg, bm),
"--benchmark_list_tests",
"--benchmark_filter=%s" % regex,
]
).splitlines():
line = line.decode("UTF-8")
stripped_line = (
line.strip()
.replace("/", "_")
.replace("<", "_")
.replace(">", "_")
.replace(", ", "_")
)
cmd = [
"bm_diff_%s/%s/%s" % (name, cfg, bm),
"--benchmark_filter=^%s$" % line,
"--benchmark_out=%s.%s.%s.%s.%d.json"
% (bm, stripped_line, cfg, name, idx),
"--benchmark_out_format=json",
]
jobs_list.append(
jobset.JobSpec(
cmd,
shortname="%s %s %s %s %d/%d"
% (bm, line, cfg, name, idx + 1, loops),
verbose_success=True,
cpu_cost=2,
timeout_seconds=60 * 60,
)
) # one hour
return jobs_list
def create_jobs(name, benchmarks, loops, regex):
jobs_list = []
for loop in range(0, loops):
for bm in benchmarks:
jobs_list += _collect_bm_data(bm, "opt", name, regex, loop, loops)
random.shuffle(jobs_list, random.SystemRandom().random)
return jobs_list
if __name__ == "__main__":
args = _args()
jobs_list = create_jobs(
args.name, args.benchmarks, args.loops, args.regex, args.counters
)
jobset.run(jobs_list, maxjobs=args.jobs)
| 4,083
| 26.409396
| 79
|
py
|
grpc
|
grpc-master/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
|
#!/usr/bin/env python3
#
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configurable constants for the bm_*.py family """
_AVAILABLE_BENCHMARK_TESTS = [
"bm_fullstack_unary_ping_pong",
"bm_fullstack_streaming_ping_pong",
"bm_fullstack_streaming_pump",
"bm_closure",
"bm_cq",
"bm_call_create",
"bm_chttp2_hpack",
"bm_chttp2_transport",
"bm_pollset",
]
_INTERESTING = (
"cpu_time",
"real_time",
"locks_per_iteration",
"allocs_per_iteration",
"writes_per_iteration",
"atm_cas_per_iteration",
"atm_add_per_iteration",
"nows_per_iteration",
"cli_transport_stalls_per_iteration",
"cli_stream_stalls_per_iteration",
"svr_transport_stalls_per_iteration",
"svr_stream_stalls_per_iteration",
"http2_pings_sent_per_iteration",
)
| 1,342
| 28.844444
| 74
|
py
|
grpc
|
grpc-master/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
|
#!/usr/bin/env python3
#
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Computes the diff between two bm runs and outputs significant results """
import argparse
import collections
import json
import os
import subprocess
import sys
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), ".."))
import bm_constants
import bm_json
import bm_speedup
import tabulate
verbose = False
def _median(ary):
assert len(ary)
ary = sorted(ary)
n = len(ary)
if n % 2 == 0:
return (ary[(n - 1) // 2] + ary[(n - 1) // 2 + 1]) / 2.0
else:
return ary[n // 2]
def _args():
argp = argparse.ArgumentParser(
description="Perform diff on microbenchmarks"
)
argp.add_argument(
"-t",
"--track",
choices=sorted(bm_constants._INTERESTING),
nargs="+",
default=sorted(bm_constants._INTERESTING),
help="Which metrics to track",
)
argp.add_argument(
"-b",
"--benchmarks",
nargs="+",
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
help="Which benchmarks to run",
)
argp.add_argument(
"-l",
"--loops",
type=int,
default=20,
help=(
"Number of times to loops the benchmarks. Must match what was"
" passed to bm_run.py"
),
)
argp.add_argument(
"-r",
"--regex",
type=str,
default="",
help="Regex to filter benchmarks run",
)
argp.add_argument("-n", "--new", type=str, help="New benchmark name")
argp.add_argument("-o", "--old", type=str, help="Old benchmark name")
argp.add_argument(
"-v", "--verbose", type=bool, help="Print details of before/after"
)
args = argp.parse_args()
global verbose
if args.verbose:
verbose = True
assert args.new
assert args.old
return args
def _maybe_print(str):
if verbose:
print(str)
class Benchmark:
def __init__(self):
self.samples = {
True: collections.defaultdict(list),
False: collections.defaultdict(list),
}
self.final = {}
self.speedup = {}
def add_sample(self, track, data, new):
for f in track:
if f in data:
self.samples[new][f].append(float(data[f]))
def process(self, track, new_name, old_name):
for f in sorted(track):
new = self.samples[True][f]
old = self.samples[False][f]
if not new or not old:
continue
mdn_diff = abs(_median(new) - _median(old))
_maybe_print(
"%s: %s=%r %s=%r mdn_diff=%r"
% (f, new_name, new, old_name, old, mdn_diff)
)
s = bm_speedup.speedup(new, old, 1e-5)
self.speedup[f] = s
if abs(s) > 3:
if mdn_diff > 0.5:
self.final[f] = "%+d%%" % s
return self.final.keys()
def skip(self):
return not self.final
def row(self, flds):
return [self.final[f] if f in self.final else "" for f in flds]
def speedup(self, name):
if name in self.speedup:
return self.speedup[name]
return None
def _read_json(filename, badjson_files, nonexistant_files):
stripped = ".".join(filename.split(".")[:-2])
try:
with open(filename) as f:
r = f.read()
return json.loads(r)
except IOError as e:
if stripped in nonexistant_files:
nonexistant_files[stripped] += 1
else:
nonexistant_files[stripped] = 1
return None
except ValueError as e:
print(r)
if stripped in badjson_files:
badjson_files[stripped] += 1
else:
badjson_files[stripped] = 1
return None
def fmt_dict(d):
return "".join([" " + k + ": " + str(d[k]) + "\n" for k in d])
def diff(bms, loops, regex, track, old, new):
benchmarks = collections.defaultdict(Benchmark)
badjson_files = {}
nonexistant_files = {}
for bm in bms:
for loop in range(0, loops):
for line in subprocess.check_output(
[
"bm_diff_%s/opt/%s" % (old, bm),
"--benchmark_list_tests",
"--benchmark_filter=%s" % regex,
]
).splitlines():
line = line.decode("UTF-8")
stripped_line = (
line.strip()
.replace("/", "_")
.replace("<", "_")
.replace(">", "_")
.replace(", ", "_")
)
js_new_opt = _read_json(
"%s.%s.opt.%s.%d.json" % (bm, stripped_line, new, loop),
badjson_files,
nonexistant_files,
)
js_old_opt = _read_json(
"%s.%s.opt.%s.%d.json" % (bm, stripped_line, old, loop),
badjson_files,
nonexistant_files,
)
if js_new_opt:
for row in bm_json.expand_json(js_new_opt):
name = row["cpp_name"]
if name.endswith("_mean") or name.endswith("_stddev"):
continue
benchmarks[name].add_sample(track, row, True)
if js_old_opt:
for row in bm_json.expand_json(js_old_opt):
name = row["cpp_name"]
if name.endswith("_mean") or name.endswith("_stddev"):
continue
benchmarks[name].add_sample(track, row, False)
really_interesting = set()
for name, bm in benchmarks.items():
_maybe_print(name)
really_interesting.update(bm.process(track, new, old))
fields = [f for f in track if f in really_interesting]
# figure out the significance of the changes... right now we take the 95%-ile
# benchmark delta %-age, and then apply some hand chosen thresholds
histogram = []
_NOISY = ["BM_WellFlushed"]
for name, bm in benchmarks.items():
if name in _NOISY:
print(
"skipping noisy benchmark '%s' for labelling evaluation" % name
)
if bm.skip():
continue
d = bm.speedup["cpu_time"]
if d is None:
continue
histogram.append(d)
histogram.sort()
print("histogram of speedups: ", histogram)
if len(histogram) == 0:
significance = 0
else:
delta = histogram[int(len(histogram) * 0.95)]
mul = 1
if delta < 0:
delta = -delta
mul = -1
if delta < 2:
significance = 0
elif delta < 5:
significance = 1
elif delta < 10:
significance = 2
else:
significance = 3
significance *= mul
headers = ["Benchmark"] + fields
rows = []
for name in sorted(benchmarks.keys()):
if benchmarks[name].skip():
continue
rows.append([name] + benchmarks[name].row(fields))
note = None
if len(badjson_files):
note = (
"Corrupt JSON data (indicates timeout or crash): \n%s"
% fmt_dict(badjson_files)
)
if len(nonexistant_files):
if note:
note += (
"\n\nMissing files (indicates new benchmark): \n%s"
% fmt_dict(nonexistant_files)
)
else:
note = (
"\n\nMissing files (indicates new benchmark): \n%s"
% fmt_dict(nonexistant_files)
)
if rows:
return (
tabulate.tabulate(rows, headers=headers, floatfmt="+.2f"),
note,
significance,
)
else:
return None, note, 0
if __name__ == "__main__":
args = _args()
diff, note = diff(
args.benchmarks,
args.loops,
args.regex,
args.track,
args.old,
args.new,
args.counters,
)
print("%s\n%s" % (note, diff if diff else "No performance differences"))
| 8,864
| 28.451827
| 81
|
py
|
grpc
|
grpc-master/tools/profiling/microbenchmarks/bm_diff/bm_main.py
|
#!/usr/bin/env python3
#
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Runs the entire bm_*.py pipeline, and possible comments on the PR """
import argparse
import multiprocessing
import os
import random
import subprocess
import sys
sys.path.append(
os.path.join(
os.path.dirname(sys.argv[0]), "..", "..", "run_tests", "python_utils"
)
)
sys.path.append(
os.path.join(
os.path.dirname(sys.argv[0]),
"..",
"..",
"..",
"run_tests",
"python_utils",
)
)
import bm_build
import bm_constants
import bm_diff
import bm_run
import check_on_pr
import jobset
def _args():
argp = argparse.ArgumentParser(
description="Perform diff on microbenchmarks"
)
argp.add_argument(
"-t",
"--track",
choices=sorted(bm_constants._INTERESTING),
nargs="+",
default=sorted(bm_constants._INTERESTING),
help="Which metrics to track",
)
argp.add_argument(
"-b",
"--benchmarks",
nargs="+",
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
help="Which benchmarks to run",
)
argp.add_argument(
"-d",
"--diff_base",
type=str,
help="Commit or branch to compare the current one to",
)
argp.add_argument(
"-o",
"--old",
default="old",
type=str,
help='Name of baseline run to compare to. Usually just called "old"',
)
argp.add_argument(
"-r",
"--regex",
type=str,
default="",
help="Regex to filter benchmarks run",
)
argp.add_argument(
"-l",
"--loops",
type=int,
default=10,
help=(
"Number of times to loops the benchmarks. More loops cuts down on"
" noise"
),
)
argp.add_argument(
"-j",
"--jobs",
type=int,
default=multiprocessing.cpu_count(),
help="Number of CPUs to use",
)
argp.add_argument(
"--pr_comment_name",
type=str,
default="microbenchmarks",
help="Name that Jenkins will use to comment on the PR",
)
args = argp.parse_args()
assert args.diff_base or args.old, "One of diff_base or old must be set!"
if args.loops < 3:
print("WARNING: This run will likely be noisy. Increase loops.")
return args
def eintr_be_gone(fn):
"""Run fn until it doesn't stop because of EINTR"""
def inner(*args):
while True:
try:
return fn(*args)
except IOError as e:
if e.errno != errno.EINTR:
raise
return inner
def main(args):
bm_build.build("new", args.benchmarks, args.jobs)
old = args.old
if args.diff_base:
old = "old"
where_am_i = subprocess.check_output(
["git", "rev-parse", "--abbrev-ref", "HEAD"]
).strip()
subprocess.check_call(["git", "checkout", args.diff_base])
try:
bm_build.build(old, args.benchmarks, args.jobs)
finally:
subprocess.check_call(["git", "checkout", where_am_i])
subprocess.check_call(["git", "submodule", "update"])
jobs_list = []
jobs_list += bm_run.create_jobs(
"new", args.benchmarks, args.loops, args.regex
)
jobs_list += bm_run.create_jobs(
old, args.benchmarks, args.loops, args.regex
)
# shuffle all jobs to eliminate noise from GCE CPU drift
random.shuffle(jobs_list, random.SystemRandom().random)
jobset.run(jobs_list, maxjobs=args.jobs)
diff, note, significance = bm_diff.diff(
args.benchmarks, args.loops, args.regex, args.track, old, "new"
)
if diff:
text = "[%s] Performance differences noted:\n%s" % (
args.pr_comment_name,
diff,
)
else:
text = (
"[%s] No significant performance differences" % args.pr_comment_name
)
if note:
text = note + "\n\n" + text
print("%s" % text)
check_on_pr.check_on_pr("Benchmark", "```\n%s\n```" % text)
if __name__ == "__main__":
args = _args()
main(args)
| 4,785
| 25.153005
| 80
|
py
|
grpc
|
grpc-master/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py
|
#!/usr/bin/env python3
#
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from scipy import stats
_DEFAULT_THRESHOLD = 1e-10
def scale(a, mul):
return [x * mul for x in a]
def cmp(a, b):
return stats.ttest_ind(a, b)
def speedup(new, old, threshold=_DEFAULT_THRESHOLD):
if (len(set(new))) == 1 and new == old:
return 0
s0, p0 = cmp(new, old)
if math.isnan(p0):
return 0
if s0 == 0:
return 0
if p0 > threshold:
return 0
if s0 < 0:
pct = 1
while pct < 100:
sp, pp = cmp(new, scale(old, 1 - pct / 100.0))
if sp > 0:
break
if pp > threshold:
break
pct += 1
return -(pct - 1)
else:
pct = 1
while pct < 10000:
sp, pp = cmp(new, scale(old, 1 + pct / 100.0))
if sp < 0:
break
if pp > threshold:
break
pct += 1
return pct - 1
if __name__ == "__main__":
new = [0.0, 0.0, 0.0, 0.0]
old = [2.96608e-06, 3.35076e-06, 3.45384e-06, 3.34407e-06]
print(speedup(new, old, 1e-5))
print(speedup(old, new, 1e-5))
| 1,734
| 24.144928
| 74
|
py
|
grpc
|
grpc-master/tools/profiling/microbenchmarks/bm_diff/bm_build.py
|
#!/usr/bin/env python3
#
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Python utility to build opt and counters benchmarks """
import argparse
import multiprocessing
import os
import shutil
import subprocess
import bm_constants
def _args():
argp = argparse.ArgumentParser(description="Builds microbenchmarks")
argp.add_argument(
"-b",
"--benchmarks",
nargs="+",
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
help="Which benchmarks to build",
)
argp.add_argument(
"-j",
"--jobs",
type=int,
default=multiprocessing.cpu_count(),
help=(
"Deprecated. Bazel chooses number of CPUs to build with"
" automatically."
),
)
argp.add_argument(
"-n",
"--name",
type=str,
help=(
"Unique name of this build. To be used as a handle to pass to the"
" other bm* scripts"
),
)
args = argp.parse_args()
assert args.name
return args
def _build_cmd(cfg, benchmarks):
bazel_targets = [
"//test/cpp/microbenchmarks:%s" % benchmark for benchmark in benchmarks
]
# --dynamic_mode=off makes sure that we get a monolithic binary that can be safely
# moved outside of the bazel-bin directory
return [
"tools/bazel",
"build",
"--config=%s" % cfg,
"--dynamic_mode=off",
] + bazel_targets
def _build_config_and_copy(cfg, benchmarks, dest_dir):
"""Build given config and copy resulting binaries to dest_dir/CONFIG"""
subprocess.check_call(_build_cmd(cfg, benchmarks))
cfg_dir = dest_dir + "/%s" % cfg
os.makedirs(cfg_dir)
subprocess.check_call(
["cp"]
+ [
"bazel-bin/test/cpp/microbenchmarks/%s" % benchmark
for benchmark in benchmarks
]
+ [cfg_dir]
)
def build(name, benchmarks, jobs):
dest_dir = "bm_diff_%s" % name
shutil.rmtree(dest_dir, ignore_errors=True)
_build_config_and_copy("opt", benchmarks, dest_dir)
if __name__ == "__main__":
args = _args()
build(args.name, args.benchmarks, args.jobs)
| 2,755
| 26.838384
| 86
|
py
|
grpc
|
grpc-master/tools/profiling/qps/qps_diff.py
|
#!/usr/bin/env python3
#
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Computes the diff between two qps runs and outputs significant results """
import argparse
import json
import multiprocessing
import os
import shutil
import subprocess
import sys
import qps_scenarios
import tabulate
sys.path.append(
os.path.join(
os.path.dirname(sys.argv[0]), "..", "microbenchmarks", "bm_diff"
)
)
import bm_speedup
sys.path.append(
os.path.join(
os.path.dirname(sys.argv[0]), "..", "..", "run_tests", "python_utils"
)
)
import check_on_pr
def _args():
argp = argparse.ArgumentParser(description="Perform diff on QPS Driver")
argp.add_argument(
"-d",
"--diff_base",
type=str,
help="Commit or branch to compare the current one to",
)
argp.add_argument(
"-l",
"--loops",
type=int,
default=4,
help=(
"Number of loops for each benchmark. More loops cuts down on noise"
),
)
argp.add_argument(
"-j",
"--jobs",
type=int,
default=multiprocessing.cpu_count(),
help="Number of CPUs to use",
)
args = argp.parse_args()
assert args.diff_base, "diff_base must be set"
return args
def _make_cmd(jobs):
return ["make", "-j", "%d" % jobs, "qps_json_driver", "qps_worker"]
def build(name, jobs):
shutil.rmtree("qps_diff_%s" % name, ignore_errors=True)
subprocess.check_call(["git", "submodule", "update"])
try:
subprocess.check_call(_make_cmd(jobs))
except subprocess.CalledProcessError as e:
subprocess.check_call(["make", "clean"])
subprocess.check_call(_make_cmd(jobs))
os.rename("bins", "qps_diff_%s" % name)
def _run_cmd(name, scenario, fname):
return [
"qps_diff_%s/opt/qps_json_driver" % name,
"--scenarios_json",
scenario,
"--json_file_out",
fname,
]
def run(name, scenarios, loops):
for sn in scenarios:
for i in range(0, loops):
fname = "%s.%s.%d.json" % (sn, name, i)
subprocess.check_call(_run_cmd(name, scenarios[sn], fname))
def _load_qps(fname):
try:
with open(fname) as f:
return json.loads(f.read())["qps"]
except IOError as e:
print(("IOError occurred reading file: %s" % fname))
return None
except ValueError as e:
print(("ValueError occurred reading file: %s" % fname))
return None
def _median(ary):
assert len(ary)
ary = sorted(ary)
n = len(ary)
if n % 2 == 0:
return (ary[(n - 1) / 2] + ary[(n - 1) / 2 + 1]) / 2.0
else:
return ary[n / 2]
def diff(scenarios, loops, old, new):
old_data = {}
new_data = {}
# collect data
for sn in scenarios:
old_data[sn] = []
new_data[sn] = []
for i in range(loops):
old_data[sn].append(_load_qps("%s.%s.%d.json" % (sn, old, i)))
new_data[sn].append(_load_qps("%s.%s.%d.json" % (sn, new, i)))
# crunch data
headers = ["Benchmark", "qps"]
rows = []
for sn in scenarios:
mdn_diff = abs(_median(new_data[sn]) - _median(old_data[sn]))
print(
"%s: %s=%r %s=%r mdn_diff=%r"
% (sn, new, new_data[sn], old, old_data[sn], mdn_diff)
)
s = bm_speedup.speedup(new_data[sn], old_data[sn], 10e-5)
if abs(s) > 3 and mdn_diff > 0.5:
rows.append([sn, "%+d%%" % s])
if rows:
return tabulate.tabulate(rows, headers=headers, floatfmt="+.2f")
else:
return None
def main(args):
build("new", args.jobs)
if args.diff_base:
where_am_i = (
subprocess.check_output(
["git", "rev-parse", "--abbrev-ref", "HEAD"]
)
.decode()
.strip()
)
subprocess.check_call(["git", "checkout", args.diff_base])
try:
build("old", args.jobs)
finally:
subprocess.check_call(["git", "checkout", where_am_i])
subprocess.check_call(["git", "submodule", "update"])
run("new", qps_scenarios._SCENARIOS, args.loops)
run("old", qps_scenarios._SCENARIOS, args.loops)
diff_output = diff(qps_scenarios._SCENARIOS, args.loops, "old", "new")
if diff_output:
text = "[qps] Performance differences noted:\n%s" % diff_output
else:
text = "[qps] No significant performance differences"
print(("%s" % text))
check_on_pr.check_on_pr("QPS", "```\n%s\n```" % text)
if __name__ == "__main__":
args = _args()
main(args)
| 5,160
| 25.880208
| 79
|
py
|
grpc
|
grpc-master/tools/profiling/qps/qps_scenarios.py
|
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" QPS Scenarios to run """
_SCENARIOS = {
"large-message-throughput": (
'{"scenarios":[{"name":"large-message-throughput",'
' "spawn_local_worker_count": -2, "warmup_seconds": 30,'
' "benchmark_seconds": 270, "num_servers": 1, "server_config":'
' {"async_server_threads": 1, "security_params": null, "server_type":'
' "ASYNC_SERVER"}, "num_clients": 1, "client_config": {"client_type":'
' "ASYNC_CLIENT", "security_params": null, "payload_config":'
' {"simple_params": {"resp_size": 1048576, "req_size": 1048576}},'
' "client_channels": 1, "async_client_threads": 1,'
' "outstanding_rpcs_per_channel": 1, "rpc_type": "UNARY",'
' "load_params": {"closed_loop": {}}, "histogram_params":'
' {"max_possible": 60000000000.0, "resolution": 0.01}}}]}'
),
"multi-channel-64-KiB": (
'{"scenarios":[{"name":"multi-channel-64-KiB",'
' "spawn_local_worker_count": -3, "warmup_seconds": 30,'
' "benchmark_seconds": 270, "num_servers": 1, "server_config":'
' {"async_server_threads": 31, "security_params": null, "server_type":'
' "ASYNC_SERVER"}, "num_clients": 2, "client_config": {"client_type":'
' "ASYNC_CLIENT", "security_params": null, "payload_config":'
' {"simple_params": {"resp_size": 65536, "req_size": 65536}},'
' "client_channels": 32, "async_client_threads": 31,'
' "outstanding_rpcs_per_channel": 100, "rpc_type": "UNARY",'
' "load_params": {"closed_loop": {}}, "histogram_params":'
' {"max_possible": 60000000000.0, "resolution": 0.01}}}]}'
),
}
| 2,221
| 49.5
| 79
|
py
|
grpc
|
grpc-master/tools/mkowners/mkowners.py
|
#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import operator
import os
import re
import subprocess
#
# Find the root of the git tree
#
git_root = (
subprocess.check_output(["git", "rev-parse", "--show-toplevel"])
.decode("utf-8")
.strip()
)
#
# Parse command line arguments
#
default_out = os.path.join(git_root, ".github", "CODEOWNERS")
argp = argparse.ArgumentParser("Generate .github/CODEOWNERS file")
argp.add_argument(
"--out",
"-o",
type=str,
default=default_out,
help="Output file (default %s)" % default_out,
)
args = argp.parse_args()
#
# Walk git tree to locate all OWNERS files
#
owners_files = [
os.path.join(root, "OWNERS")
for root, dirs, files in os.walk(git_root)
if "OWNERS" in files
]
#
# Parse owners files
#
Owners = collections.namedtuple("Owners", "parent directives dir")
Directive = collections.namedtuple("Directive", "who globs")
def parse_owners(filename):
with open(filename) as f:
src = f.read().splitlines()
parent = True
directives = []
for line in src:
line = line.strip()
# line := directive | comment
if not line:
continue
if line[0] == "#":
continue
# it's a directive
directive = None
if line == "set noparent":
parent = False
elif line == "*":
directive = Directive(who="*", globs=[])
elif " " in line:
(who, globs) = line.split(" ", 1)
globs_list = [glob for glob in globs.split(" ") if glob]
directive = Directive(who=who, globs=globs_list)
else:
directive = Directive(who=line, globs=[])
if directive:
directives.append(directive)
return Owners(
parent=parent,
directives=directives,
dir=os.path.relpath(os.path.dirname(filename), git_root),
)
owners_data = sorted(
[parse_owners(filename) for filename in owners_files],
key=operator.attrgetter("dir"),
)
#
# Modify owners so that parented OWNERS files point to the actual
# Owners tuple with their parent field
#
new_owners_data = []
for owners in owners_data:
if owners.parent == True:
best_parent = None
best_parent_score = None
for possible_parent in owners_data:
if possible_parent is owners:
continue
rel = os.path.relpath(owners.dir, possible_parent.dir)
# '..' ==> we had to walk up from possible_parent to get to owners
# ==> not a parent
if ".." in rel:
continue
depth = len(rel.split(os.sep))
if not best_parent or depth < best_parent_score:
best_parent = possible_parent
best_parent_score = depth
if best_parent:
owners = owners._replace(parent=best_parent.dir)
else:
owners = owners._replace(parent=None)
new_owners_data.append(owners)
owners_data = new_owners_data
#
# In bottom to top order, process owners data structures to build up
# a CODEOWNERS file for GitHub
#
def full_dir(rules_dir, sub_path):
return os.path.join(rules_dir, sub_path) if rules_dir != "." else sub_path
# glob using git
gg_cache = {}
def git_glob(glob):
global gg_cache
if glob in gg_cache:
return gg_cache[glob]
r = set(
subprocess.check_output(
["git", "ls-files", os.path.join(git_root, glob)]
)
.decode("utf-8")
.strip()
.splitlines()
)
gg_cache[glob] = r
return r
def expand_directives(root, directives):
globs = collections.OrderedDict()
# build a table of glob --> owners
for directive in directives:
for glob in directive.globs or ["**"]:
if glob not in globs:
globs[glob] = []
if directive.who not in globs[glob]:
globs[glob].append(directive.who)
# expand owners for intersecting globs
sorted_globs = sorted(
list(globs.keys()),
key=lambda g: len(git_glob(full_dir(root, g))),
reverse=True,
)
out_globs = collections.OrderedDict()
for glob_add in sorted_globs:
who_add = globs[glob_add]
pre_items = [i for i in list(out_globs.items())]
out_globs[glob_add] = who_add.copy()
for glob_have, who_have in pre_items:
files_add = git_glob(full_dir(root, glob_add))
files_have = git_glob(full_dir(root, glob_have))
intersect = files_have.intersection(files_add)
if intersect:
for f in sorted(files_add): # sorted to ensure merge stability
if f not in intersect:
out_globs[os.path.relpath(f, start=root)] = who_add
for who in who_have:
if who not in out_globs[glob_add]:
out_globs[glob_add].append(who)
return out_globs
def add_parent_to_globs(parent, globs, globs_dir):
if not parent:
return
for owners in owners_data:
if owners.dir == parent:
owners_globs = expand_directives(owners.dir, owners.directives)
for oglob, oglob_who in list(owners_globs.items()):
for gglob, gglob_who in list(globs.items()):
files_parent = git_glob(full_dir(owners.dir, oglob))
files_child = git_glob(full_dir(globs_dir, gglob))
intersect = files_parent.intersection(files_child)
gglob_who_orig = gglob_who.copy()
if intersect:
for f in sorted(
files_child
): # sorted to ensure merge stability
if f not in intersect:
who = gglob_who_orig.copy()
globs[os.path.relpath(f, start=globs_dir)] = who
for who in oglob_who:
if who not in gglob_who:
gglob_who.append(who)
add_parent_to_globs(owners.parent, globs, globs_dir)
return
assert False
todo = owners_data.copy()
done = set()
with open(args.out, "w") as out:
out.write("# Auto-generated by the tools/mkowners/mkowners.py tool\n")
out.write("# Uses OWNERS files in different modules throughout the\n")
out.write("# repository as the source of truth for module ownership.\n")
written_globs = []
while todo:
head, *todo = todo
if head.parent and not head.parent in done:
todo.append(head)
continue
globs = expand_directives(head.dir, head.directives)
add_parent_to_globs(head.parent, globs, head.dir)
for glob, owners in list(globs.items()):
skip = False
for glob1, owners1, dir1 in reversed(written_globs):
files = git_glob(full_dir(head.dir, glob))
files1 = git_glob(full_dir(dir1, glob1))
intersect = files.intersection(files1)
if files == intersect:
if sorted(owners) == sorted(owners1):
skip = True # nothing new in this rule
break
elif intersect:
# continuing would cause a semantic change since some files are
# affected differently by this rule and CODEOWNERS is order dependent
break
if not skip:
out.write(
"/%s %s\n" % (full_dir(head.dir, glob), " ".join(owners))
)
written_globs.append((glob, owners, head.dir))
done.add(head.dir)
| 8,363
| 31.293436
| 89
|
py
|
grpc
|
grpc-master/tools/interop_matrix/client_matrix.py
|
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Defines languages, runtimes and releases for backward compatibility testing
from collections import OrderedDict
def get_github_repo(lang):
return {
"dart": "https://github.com/grpc/grpc-dart.git",
"go": "https://github.com/grpc/grpc-go.git",
"java": "https://github.com/grpc/grpc-java.git",
"node": "https://github.com/grpc/grpc-node.git",
# all other languages use the grpc.git repo.
}.get(lang, "https://github.com/grpc/grpc.git")
def get_release_tags(lang):
"""Returns list of known releases for given language."""
return list(LANG_RELEASE_MATRIX[lang].keys())
def get_runtimes_for_lang_release(lang, release):
"""Get list of valid runtimes for given release of lang."""
runtimes = list(LANG_RUNTIME_MATRIX[lang])
release_info = LANG_RELEASE_MATRIX[lang].get(release)
if release_info and release_info.runtimes:
runtimes = list(release_info.runtimes)
return runtimes
def should_build_docker_interop_image_from_release_tag(lang):
# All dockerfile definitions live in grpc/grpc repository.
# For language that have a separate repo, we need to use
# dockerfile definitions from head of grpc/grpc.
if lang in ["go", "java", "node"]:
return False
return True
# Dictionary of default runtimes per language
LANG_RUNTIME_MATRIX = {
"cxx": ["cxx"], # This is actually debian8.
"go": ["go1.8", "go1.11", "go1.16", "go1.19"],
"java": ["java"],
"python": ["python", "pythonasyncio"],
"node": ["node"],
"ruby": ["ruby"],
"php": ["php7"],
"csharp": ["csharp", "csharpcoreclr"],
}
class ReleaseInfo:
"""Info about a single release of a language"""
def __init__(self, patch=[], runtimes=[], testcases_file=None):
self.patch = patch
self.runtimes = runtimes
self.testcases_file = testcases_file
# Dictionary of known releases for given language.
LANG_RELEASE_MATRIX = {
"cxx": OrderedDict(
[
("v1.0.1", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.1.4", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.2.5", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.3.9", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.4.2", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.6.6", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.7.2", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.8.0", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.9.1", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.10.1", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.11.1", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.12.0", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.13.0", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.14.1", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.15.0", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.16.0", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.17.1", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.18.0", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.19.0", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.20.0", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.21.4", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.22.0", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.22.1", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.23.0", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.24.0", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.25.0", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.26.0", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.27.3", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.30.0", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.31.1", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.32.0", ReleaseInfo()),
("v1.33.2", ReleaseInfo()),
("v1.34.0", ReleaseInfo()),
("v1.35.0", ReleaseInfo()),
("v1.36.3", ReleaseInfo()),
("v1.37.0", ReleaseInfo()),
("v1.38.0", ReleaseInfo()),
("v1.39.0", ReleaseInfo()),
("v1.41.1", ReleaseInfo()),
("v1.42.0", ReleaseInfo()),
("v1.43.0", ReleaseInfo()),
("v1.44.0", ReleaseInfo()),
("v1.46.2", ReleaseInfo()),
("v1.47.1", ReleaseInfo()),
("v1.48.3", ReleaseInfo()),
("v1.49.1", ReleaseInfo()),
("v1.52.0", ReleaseInfo()),
("v1.53.0", ReleaseInfo()),
("v1.54.0", ReleaseInfo()),
("v1.55.0", ReleaseInfo()),
("v1.56.0", ReleaseInfo()),
]
),
"go": OrderedDict(
[
(
"v1.0.5",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.2.1",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.3.0",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.4.2",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.5.2",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.6.0",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.7.4",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.8.2",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.9.2",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.10.1",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.11.3",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.12.2",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.13.0",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.14.0",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.15.0",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.16.0",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.17.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.0.5"),
),
(
"v1.18.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.0.5"),
),
(
"v1.19.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.0.5"),
),
(
"v1.20.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.21.3",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.22.3",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.23.1",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.24.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.25.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.26.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.27.1",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.28.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.29.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.30.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.31.1",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.32.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.33.1",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
("v1.34.0", ReleaseInfo(runtimes=["go1.11"])),
("v1.35.0", ReleaseInfo(runtimes=["go1.11"])),
("v1.36.0", ReleaseInfo(runtimes=["go1.11"])),
("v1.37.0", ReleaseInfo(runtimes=["go1.11"])),
# NOTE: starting from release v1.38.0, use runtimes=['go1.16']
("v1.38.1", ReleaseInfo(runtimes=["go1.16"])),
("v1.39.1", ReleaseInfo(runtimes=["go1.16"])),
("v1.40.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.41.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.42.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.43.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.44.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.45.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.46.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.47.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.48.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.49.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.50.1", ReleaseInfo(runtimes=["go1.16"])),
("v1.51.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.52.3", ReleaseInfo(runtimes=["go1.19"])),
("v1.53.0", ReleaseInfo(runtimes=["go1.19"])),
("v1.54.1", ReleaseInfo(runtimes=["go1.19"])),
("v1.55.0", ReleaseInfo(runtimes=["go1.19"])),
]
),
"java": OrderedDict(
[
(
"v1.0.3",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.1.2",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.2.0",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.3.1",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.4.0",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.5.0",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.6.1",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
("v1.7.1", ReleaseInfo(testcases_file="java__v1.0.3")),
(
"v1.8.0",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.9.1",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.10.1",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.11.0",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
("v1.12.1", ReleaseInfo(testcases_file="java__v1.0.3")),
("v1.13.2", ReleaseInfo(testcases_file="java__v1.0.3")),
(
"v1.14.0",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
("v1.15.1", ReleaseInfo(testcases_file="java__v1.0.3")),
(
"v1.16.1",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
("v1.17.2", ReleaseInfo(testcases_file="java__v1.0.3")),
(
"v1.18.0",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.19.0",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
("v1.20.0", ReleaseInfo(runtimes=["java_oracle8"])),
("v1.21.1", ReleaseInfo()),
("v1.22.2", ReleaseInfo()),
("v1.23.0", ReleaseInfo()),
("v1.24.0", ReleaseInfo()),
("v1.25.0", ReleaseInfo()),
("v1.26.1", ReleaseInfo()),
("v1.27.2", ReleaseInfo()),
("v1.28.1", ReleaseInfo()),
("v1.29.0", ReleaseInfo()),
("v1.30.2", ReleaseInfo()),
("v1.31.2", ReleaseInfo()),
("v1.32.3", ReleaseInfo()),
("v1.33.1", ReleaseInfo()),
("v1.34.1", ReleaseInfo()),
("v1.35.1", ReleaseInfo()),
("v1.36.3", ReleaseInfo()),
("v1.37.1", ReleaseInfo()),
("v1.38.1", ReleaseInfo()),
("v1.39.0", ReleaseInfo()),
("v1.40.2", ReleaseInfo()),
("v1.41.3", ReleaseInfo()),
("v1.42.3", ReleaseInfo()),
("v1.43.3", ReleaseInfo()),
("v1.44.2", ReleaseInfo()),
("v1.45.3", ReleaseInfo()),
("v1.46.1", ReleaseInfo()),
("v1.47.1", ReleaseInfo()),
("v1.48.2", ReleaseInfo()),
("v1.49.2", ReleaseInfo()),
("v1.50.3", ReleaseInfo()),
("v1.51.1", ReleaseInfo()),
("v1.52.0", ReleaseInfo()),
("v1.54.0", ReleaseInfo()),
("v1.55.1", ReleaseInfo()),
("v1.56.0", ReleaseInfo()),
]
),
"python": OrderedDict(
[
(
"v1.0.x",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.1.4",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.2.5",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.3.9",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.4.2",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.6.6",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.7.2",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.8.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.9.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.10.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.11.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.11.1"
),
),
(
"v1.12.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.11.1"
),
),
(
"v1.13.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.11.1"
),
),
(
"v1.14.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.11.1"
),
),
(
"v1.15.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.11.1"
),
),
(
"v1.16.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.11.1"
),
),
(
"v1.17.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.11.1"
),
),
(
"v1.18.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.19.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.20.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.21.4",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.22.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.22.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.23.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.24.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.25.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.26.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.27.3",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.30.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.31.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.32.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.33.2",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.34.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.35.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.36.3",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.37.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.38.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.39.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.41.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.41.1"
),
),
(
"v1.42.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.41.1"
),
),
(
"v1.43.2",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.41.1"
),
),
(
"v1.44.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
(
"v1.46.2",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
(
"v1.47.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
(
"v1.48.3",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
(
"v1.49.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
(
"v1.52.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
(
"v1.53.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
(
"v1.54.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
(
"v1.55.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
(
"v1.56.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
]
),
"node": OrderedDict(
[
("v1.0.1", ReleaseInfo(testcases_file="node__v1.0.1")),
("v1.1.4", ReleaseInfo(testcases_file="node__v1.1.4")),
("v1.2.5", ReleaseInfo(testcases_file="node__v1.1.4")),
("v1.3.9", ReleaseInfo(testcases_file="node__v1.1.4")),
("v1.4.2", ReleaseInfo(testcases_file="node__v1.1.4")),
("v1.6.6", ReleaseInfo(testcases_file="node__v1.1.4")),
# TODO: https://github.com/grpc/grpc-node/issues/235.
# ('v1.7.2', ReleaseInfo()),
("v1.8.4", ReleaseInfo()),
("v1.9.1", ReleaseInfo()),
("v1.10.0", ReleaseInfo()),
("v1.11.3", ReleaseInfo()),
("v1.12.4", ReleaseInfo()),
]
),
"ruby": OrderedDict(
[
(
"v1.0.1",
ReleaseInfo(
patch=[
"tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile",
"tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh",
],
testcases_file="ruby__v1.0.1",
),
),
("v1.1.4", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.2.5", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.3.9", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.4.2", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.6.6", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.7.2", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.8.0", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.9.1", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.10.1", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.11.1", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.12.0", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.13.0", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.14.1", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.15.0", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.16.0", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.17.1", ReleaseInfo(testcases_file="ruby__v1.1.4")),
(
"v1.18.0",
ReleaseInfo(
patch=[
"tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh",
]
),
),
("v1.19.0", ReleaseInfo()),
("v1.20.0", ReleaseInfo()),
("v1.21.4", ReleaseInfo()),
("v1.22.0", ReleaseInfo()),
("v1.22.1", ReleaseInfo()),
("v1.23.0", ReleaseInfo()),
("v1.24.0", ReleaseInfo()),
("v1.25.0", ReleaseInfo()),
# TODO: https://github.com/grpc/grpc/issues/18262.
# If you are not encountering the error in above issue
# go ahead and upload the docker image for new releases.
("v1.26.0", ReleaseInfo()),
("v1.27.3", ReleaseInfo()),
("v1.30.0", ReleaseInfo()),
("v1.31.1", ReleaseInfo()),
("v1.32.0", ReleaseInfo()),
("v1.33.2", ReleaseInfo()),
("v1.34.0", ReleaseInfo()),
("v1.35.0", ReleaseInfo()),
("v1.36.3", ReleaseInfo()),
("v1.37.0", ReleaseInfo()),
("v1.38.0", ReleaseInfo()),
("v1.39.0", ReleaseInfo()),
("v1.41.1", ReleaseInfo()),
("v1.42.0", ReleaseInfo()),
("v1.43.0", ReleaseInfo()),
("v1.44.0", ReleaseInfo()),
("v1.46.2", ReleaseInfo()),
("v1.47.1", ReleaseInfo()),
("v1.48.3", ReleaseInfo()),
("v1.49.1", ReleaseInfo()),
("v1.52.0", ReleaseInfo()),
("v1.53.0", ReleaseInfo()),
("v1.54.0", ReleaseInfo()),
("v1.55.0", ReleaseInfo()),
("v1.56.0", ReleaseInfo()),
]
),
"php": OrderedDict(
[
("v1.0.1", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.1.4", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.2.5", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.3.9", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.4.2", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.6.6", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.7.2", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.8.0", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.9.1", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.10.1", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.11.1", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.12.0", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.13.0", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.14.1", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.15.0", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.16.0", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.17.1", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.18.0", ReleaseInfo()),
# v1.19 and v1.20 were deliberately omitted here because of an issue.
# See https://github.com/grpc/grpc/issues/18264
("v1.21.4", ReleaseInfo()),
("v1.22.0", ReleaseInfo()),
("v1.22.1", ReleaseInfo()),
("v1.23.0", ReleaseInfo()),
("v1.24.0", ReleaseInfo()),
("v1.25.0", ReleaseInfo()),
("v1.26.0", ReleaseInfo()),
("v1.27.3", ReleaseInfo()),
("v1.30.0", ReleaseInfo()),
("v1.31.1", ReleaseInfo()),
("v1.32.0", ReleaseInfo()),
("v1.33.2", ReleaseInfo()),
("v1.34.0", ReleaseInfo()),
("v1.35.0", ReleaseInfo()),
("v1.36.3", ReleaseInfo()),
("v1.37.0", ReleaseInfo()),
("v1.38.0", ReleaseInfo()),
("v1.39.0", ReleaseInfo()),
("v1.41.1", ReleaseInfo()),
("v1.42.0", ReleaseInfo()),
("v1.43.0", ReleaseInfo()),
("v1.44.0", ReleaseInfo()),
("v1.46.2", ReleaseInfo()),
("v1.47.1", ReleaseInfo()),
("v1.48.3", ReleaseInfo()),
("v1.49.1", ReleaseInfo()),
("v1.52.0", ReleaseInfo()),
("v1.53.0", ReleaseInfo()),
("v1.54.0", ReleaseInfo()),
("v1.55.0", ReleaseInfo()),
("v1.56.0", ReleaseInfo()),
]
),
"csharp": OrderedDict(
[
(
"v1.0.1",
ReleaseInfo(
patch=[
"tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile",
"tools/dockerfile/interoptest/grpc_interop_csharpcoreclr/Dockerfile",
],
testcases_file="csharp__v1.1.4",
),
),
("v1.1.4", ReleaseInfo(testcases_file="csharp__v1.1.4")),
("v1.2.5", ReleaseInfo(testcases_file="csharp__v1.1.4")),
("v1.3.9", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.4.2", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.6.6", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.7.2", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.8.0", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.9.1", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.10.1", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.11.1", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.12.0", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.13.0", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.14.1", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.15.0", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.16.0", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.17.1", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.18.0", ReleaseInfo(testcases_file="csharp__v1.18.0")),
("v1.19.0", ReleaseInfo(testcases_file="csharp__v1.18.0")),
("v1.20.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.20.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.21.4", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.22.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.22.1", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.23.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.24.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.25.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.26.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.27.3", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.30.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.31.1", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.32.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.33.2", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.34.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.35.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.36.3", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.37.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.38.1", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.39.1", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.41.1", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.42.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.43.0", ReleaseInfo()),
("v1.44.0", ReleaseInfo()),
("v1.46.2", ReleaseInfo()),
]
),
}
| 35,096
| 36.337234
| 93
|
py
|
grpc
|
grpc-master/tools/interop_matrix/create_matrix_images.py
|
#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build and upload docker images to Google Container Registry per matrix."""
from __future__ import print_function
import argparse
import atexit
import multiprocessing
import os
import shutil
import subprocess
import sys
import tempfile
# Language Runtime Matrix
import client_matrix
python_util_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../run_tests/python_utils")
)
sys.path.append(python_util_dir)
import dockerjob
import jobset
_IMAGE_BUILDER = "tools/run_tests/dockerize/build_interop_image.sh"
_LANGUAGES = list(client_matrix.LANG_RUNTIME_MATRIX.keys())
# All gRPC release tags, flattened, deduped and sorted.
_RELEASES = sorted(
list(
set(
release
for release_dict in list(client_matrix.LANG_RELEASE_MATRIX.values())
for release in list(release_dict.keys())
)
)
)
# Destination directory inside docker image to keep extra info from build time.
_BUILD_INFO = "/var/local/build_info"
argp = argparse.ArgumentParser(description="Run interop tests.")
argp.add_argument(
"--gcr_path",
default="gcr.io/grpc-testing",
help="Path of docker images in Google Container Registry",
)
argp.add_argument(
"--release",
default="master",
choices=["all", "master"] + _RELEASES,
help=(
"github commit tag to checkout. When building all "
'releases defined in client_matrix.py, use "all". Valid only '
"with --git_checkout."
),
)
argp.add_argument(
"-l",
"--language",
choices=["all"] + sorted(_LANGUAGES),
nargs="+",
default=["all"],
help="Test languages to build docker images for.",
)
argp.add_argument(
"--git_checkout",
action="store_true",
help=(
"Use a separate git clone tree for building grpc stack. "
"Required when using --release flag. By default, current"
"tree and the sibling will be used for building grpc stack."
),
)
argp.add_argument(
"--git_checkout_root",
default="/export/hda3/tmp/grpc_matrix",
help=(
"Directory under which grpc-go/java/main repo will be "
"cloned. Valid only with --git_checkout."
),
)
argp.add_argument(
"--keep",
action="store_true",
help="keep the created local images after uploading to GCR",
)
argp.add_argument(
"--reuse_git_root",
default=False,
action="store_const",
const=True,
help=(
"reuse the repo dir. If False, the existing git root "
"directory will removed before a clean checkout, because "
"reusing the repo can cause git checkout error if you switch "
"between releases."
),
)
argp.add_argument(
"--upload_images",
action="store_true",
help=(
"If set, images will be uploaded to container registry after building."
),
)
args = argp.parse_args()
def add_files_to_image(image, with_files, label=None):
"""Add files to a docker image.
image: docker image name, i.e. grpc_interop_java:26328ad8
with_files: additional files to include in the docker image.
label: label string to attach to the image.
"""
tag_idx = image.find(":")
if tag_idx == -1:
jobset.message(
"FAILED", "invalid docker image %s" % image, do_newline=True
)
sys.exit(1)
orig_tag = "%s_" % image
subprocess.check_output(["docker", "tag", image, orig_tag])
lines = ["FROM " + orig_tag]
if label:
lines.append("LABEL %s" % label)
temp_dir = tempfile.mkdtemp()
atexit.register(lambda: subprocess.call(["rm", "-rf", temp_dir]))
# Copy with_files inside the tmp directory, which will be the docker build
# context.
for f in with_files:
shutil.copy(f, temp_dir)
lines.append("COPY %s %s/" % (os.path.basename(f), _BUILD_INFO))
# Create a Dockerfile.
with open(os.path.join(temp_dir, "Dockerfile"), "w") as f:
f.write("\n".join(lines))
jobset.message("START", "Repackaging %s" % image, do_newline=True)
build_cmd = ["docker", "build", "--rm", "--tag", image, temp_dir]
subprocess.check_output(build_cmd)
dockerjob.remove_image(orig_tag, skip_nonexistent=True)
def build_image_jobspec(runtime, env, gcr_tag, stack_base):
"""Build interop docker image for a language with runtime.
runtime: a <lang><version> string, for example go1.8.
env: dictionary of env to passed to the build script.
gcr_tag: the tag for the docker image (i.e. v1.3.0).
stack_base: the local gRPC repo path.
"""
basename = "grpc_interop_%s" % runtime
tag = "%s/%s:%s" % (args.gcr_path, basename, gcr_tag)
build_env = {"INTEROP_IMAGE": tag, "BASE_NAME": basename}
build_env.update(env)
image_builder_path = _IMAGE_BUILDER
if client_matrix.should_build_docker_interop_image_from_release_tag(lang):
image_builder_path = os.path.join(stack_base, _IMAGE_BUILDER)
build_job = jobset.JobSpec(
cmdline=[image_builder_path],
environ=build_env,
shortname="build_docker_%s" % runtime,
timeout_seconds=30 * 60,
)
build_job.tag = tag
return build_job
def build_all_images_for_lang(lang):
"""Build all docker images for a language across releases and runtimes."""
if not args.git_checkout:
if args.release != "master":
print(
"Cannot use --release without also enabling --git_checkout.\n"
)
sys.exit(1)
releases = [args.release]
else:
if args.release == "all":
releases = client_matrix.get_release_tags(lang)
else:
# Build a particular release.
if args.release not in ["master"] + client_matrix.get_release_tags(
lang
):
jobset.message(
"SKIPPED",
"%s for %s is not defined" % (args.release, lang),
do_newline=True,
)
return []
releases = [args.release]
images = []
for release in releases:
images += build_all_images_for_release(lang, release)
jobset.message(
"SUCCESS",
"All docker images built for %s at %s." % (lang, releases),
do_newline=True,
)
return images
def build_all_images_for_release(lang, release):
"""Build all docker images for a release across all runtimes."""
docker_images = []
build_jobs = []
env = {}
# If we not using current tree or the sibling for grpc stack, do checkout.
stack_base = ""
if args.git_checkout:
stack_base = checkout_grpc_stack(lang, release)
var = {
"go": "GRPC_GO_ROOT",
"java": "GRPC_JAVA_ROOT",
"node": "GRPC_NODE_ROOT",
}.get(lang, "GRPC_ROOT")
env[var] = stack_base
for runtime in client_matrix.get_runtimes_for_lang_release(lang, release):
job = build_image_jobspec(runtime, env, release, stack_base)
docker_images.append(job.tag)
build_jobs.append(job)
jobset.message("START", "Building interop docker images.", do_newline=True)
print("Jobs to run: \n%s\n" % "\n".join(str(j) for j in build_jobs))
num_failures, _ = jobset.run(
build_jobs, newline_on_success=True, maxjobs=multiprocessing.cpu_count()
)
if num_failures:
jobset.message(
"FAILED", "Failed to build interop docker images.", do_newline=True
)
docker_images_cleanup.extend(docker_images)
sys.exit(1)
jobset.message(
"SUCCESS",
"All docker images built for %s at %s." % (lang, release),
do_newline=True,
)
if release != "master":
commit_log = os.path.join(stack_base, "commit_log")
if os.path.exists(commit_log):
for image in docker_images:
add_files_to_image(image, [commit_log], "release=%s" % release)
return docker_images
def cleanup():
if not args.keep:
for image in docker_images_cleanup:
dockerjob.remove_image(image, skip_nonexistent=True)
docker_images_cleanup = []
atexit.register(cleanup)
def maybe_apply_patches_on_git_tag(stack_base, lang, release):
files_to_patch = []
release_info = client_matrix.LANG_RELEASE_MATRIX[lang].get(release)
if release_info:
files_to_patch = release_info.patch
if not files_to_patch:
return
patch_file_relative_path = "patches/%s_%s/git_repo.patch" % (lang, release)
patch_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), patch_file_relative_path)
)
if not os.path.exists(patch_file):
jobset.message(
"FAILED", "expected patch file |%s| to exist" % patch_file
)
sys.exit(1)
subprocess.check_output(
["git", "apply", patch_file], cwd=stack_base, stderr=subprocess.STDOUT
)
# TODO(jtattermusch): this really would need simplification and refactoring
# - "git add" and "git commit" can easily be done in a single command
# - it looks like the only reason for the existence of the "files_to_patch"
# entry is to perform "git add" - which is clumsy and fragile.
# - we only allow a single patch with name "git_repo.patch". A better design
# would be to allow multiple patches that can have more descriptive names.
for repo_relative_path in files_to_patch:
subprocess.check_output(
["git", "add", repo_relative_path],
cwd=stack_base,
stderr=subprocess.STDOUT,
)
subprocess.check_output(
[
"git",
"commit",
"-m",
"Hack performed on top of %s git "
"tag in order to build and run the %s "
"interop tests on that tag." % (lang, release),
],
cwd=stack_base,
stderr=subprocess.STDOUT,
)
def checkout_grpc_stack(lang, release):
"""Invokes 'git check' for the lang/release and returns directory created."""
assert args.git_checkout and args.git_checkout_root
if not os.path.exists(args.git_checkout_root):
os.makedirs(args.git_checkout_root)
repo = client_matrix.get_github_repo(lang)
# Get the subdir name part of repo
# For example, 'git@github.com:grpc/grpc-go.git' should use 'grpc-go'.
repo_dir = os.path.splitext(os.path.basename(repo))[0]
stack_base = os.path.join(args.git_checkout_root, repo_dir)
# Clean up leftover repo dir if necessary.
if not args.reuse_git_root and os.path.exists(stack_base):
jobset.message("START", "Removing git checkout root.", do_newline=True)
shutil.rmtree(stack_base)
if not os.path.exists(stack_base):
subprocess.check_call(
["git", "clone", "--recursive", repo],
cwd=os.path.dirname(stack_base),
)
# git checkout.
jobset.message(
"START",
"git checkout %s from %s" % (release, stack_base),
do_newline=True,
)
# We should NEVER do checkout on current tree !!!
assert not os.path.dirname(__file__).startswith(stack_base)
output = subprocess.check_output(
["git", "checkout", release], cwd=stack_base, stderr=subprocess.STDOUT
)
maybe_apply_patches_on_git_tag(stack_base, lang, release)
commit_log = subprocess.check_output(["git", "log", "-1"], cwd=stack_base)
jobset.message(
"SUCCESS",
"git checkout",
"%s: %s" % (str(output), commit_log),
do_newline=True,
)
# git submodule update
jobset.message(
"START",
"git submodule update --init at %s from %s" % (release, stack_base),
do_newline=True,
)
subprocess.check_call(
["git", "submodule", "update", "--init"],
cwd=stack_base,
stderr=subprocess.STDOUT,
)
jobset.message(
"SUCCESS",
"git submodule update --init",
"%s: %s" % (str(output), commit_log),
do_newline=True,
)
# Write git log to commit_log so it can be packaged with the docker image.
with open(os.path.join(stack_base, "commit_log"), "wb") as f:
f.write(commit_log)
return stack_base
languages = args.language if args.language != ["all"] else _LANGUAGES
for lang in languages:
docker_images = build_all_images_for_lang(lang)
for image in docker_images:
if args.upload_images:
jobset.message("START", "Uploading %s" % image, do_newline=True)
# docker image name must be in the format <gcr_path>/<image>:<gcr_tag>
assert image.startswith(args.gcr_path) and image.find(":") != -1
subprocess.call(["gcloud", "docker", "--", "push", image])
else:
# Uploading (and overwriting images) by default can easily break things.
print(
"Not uploading image %s, run with --upload_images to upload."
% image
)
| 13,537
| 31.310263
| 84
|
py
|
grpc
|
grpc-master/tools/interop_matrix/run_interop_matrix_tests.py
|
#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run tests using docker images in Google Container Registry per matrix."""
from __future__ import print_function
import argparse
import atexit
import json
import multiprocessing
import os
import re
import subprocess
import sys
import uuid
# Language Runtime Matrix
import client_matrix
python_util_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../run_tests/python_utils")
)
sys.path.append(python_util_dir)
import dockerjob
import jobset
import report_utils
import upload_test_results
_TEST_TIMEOUT_SECONDS = 60
_PULL_IMAGE_TIMEOUT_SECONDS = 15 * 60
_MAX_PARALLEL_DOWNLOADS = 6
_LANGUAGES = list(client_matrix.LANG_RUNTIME_MATRIX.keys())
# All gRPC release tags, flattened, deduped and sorted.
_RELEASES = sorted(
list(
set(
release
for release_dict in list(client_matrix.LANG_RELEASE_MATRIX.values())
for release in list(release_dict.keys())
)
)
)
argp = argparse.ArgumentParser(description="Run interop tests.")
argp.add_argument("-j", "--jobs", default=multiprocessing.cpu_count(), type=int)
argp.add_argument(
"--gcr_path",
default="gcr.io/grpc-testing",
help="Path of docker images in Google Container Registry",
)
argp.add_argument(
"--release",
default="all",
choices=["all"] + _RELEASES,
help=(
"Release tags to test. When testing all "
'releases defined in client_matrix.py, use "all".'
),
)
argp.add_argument(
"-l",
"--language",
choices=["all"] + sorted(_LANGUAGES),
nargs="+",
default=["all"],
help="Languages to test",
)
argp.add_argument(
"--keep",
action="store_true",
help="keep the created local images after finishing the tests.",
)
argp.add_argument(
"--report_file", default="report.xml", help="The result file to create."
)
argp.add_argument(
"--allow_flakes",
default=False,
action="store_const",
const=True,
help=(
"Allow flaky tests to show as passing (re-runs failed "
"tests up to five times)"
),
)
argp.add_argument(
"--bq_result_table",
default="",
type=str,
nargs="?",
help="Upload test results to a specified BQ table.",
)
# Requests will be routed through specified VIP by default.
# See go/grpc-interop-tests (internal-only) for details.
argp.add_argument(
"--server_host",
default="74.125.206.210",
type=str,
nargs="?",
help="The gateway to backend services.",
)
def _get_test_images_for_lang(lang, release_arg, image_path_prefix):
"""Find docker images for a language across releases and runtimes.
Returns dictionary of list of (<tag>, <image-full-path>) keyed by runtime.
"""
if release_arg == "all":
# Use all defined releases for given language
releases = client_matrix.get_release_tags(lang)
else:
# Look for a particular release.
if release_arg not in client_matrix.get_release_tags(lang):
jobset.message(
"SKIPPED",
"release %s for %s is not defined" % (release_arg, lang),
do_newline=True,
)
return {}
releases = [release_arg]
# Image tuples keyed by runtime.
images = {}
for tag in releases:
for runtime in client_matrix.get_runtimes_for_lang_release(lang, tag):
image_name = "%s/grpc_interop_%s:%s" % (
image_path_prefix,
runtime,
tag,
)
image_tuple = (tag, image_name)
if runtime not in images:
images[runtime] = []
images[runtime].append(image_tuple)
return images
def _read_test_cases_file(lang, runtime, release):
"""Read test cases from a bash-like file and return a list of commands"""
# Check to see if we need to use a particular version of test cases.
release_info = client_matrix.LANG_RELEASE_MATRIX[lang].get(release)
if release_info:
testcases_file = release_info.testcases_file
if not testcases_file:
# TODO(jtattermusch): remove the double-underscore, it is pointless
testcases_file = "%s__master" % lang
# For csharp, the testcases file used depends on the runtime
# TODO(jtattermusch): remove this odd specialcase
if lang == "csharp" and runtime == "csharpcoreclr":
testcases_file = testcases_file.replace("csharp_", "csharpcoreclr_")
testcases_filepath = os.path.join(
os.path.dirname(__file__), "testcases", testcases_file
)
lines = []
with open(testcases_filepath) as f:
for line in f.readlines():
line = re.sub("\\#.*$", "", line) # remove hash comments
line = line.strip()
if line and not line.startswith("echo"):
# Each non-empty line is a treated as a test case command
lines.append(line)
return lines
def _cleanup_docker_image(image):
jobset.message("START", "Cleanup docker image %s" % image, do_newline=True)
dockerjob.remove_image(image, skip_nonexistent=True)
args = argp.parse_args()
# caches test cases (list of JobSpec) loaded from file. Keyed by lang and runtime.
def _generate_test_case_jobspecs(lang, runtime, release, suite_name):
"""Returns the list of test cases from testcase files per lang/release."""
testcase_lines = _read_test_cases_file(lang, runtime, release)
job_spec_list = []
for line in testcase_lines:
print("Creating jobspec with cmdline '{}'".format(line))
# TODO(jtattermusch): revisit the logic for updating test case commands
# what it currently being done seems fragile.
# Extract test case name from the command line
m = re.search(r"--test_case=(\w+)", line)
testcase_name = m.group(1) if m else "unknown_test"
# Extract the server name from the command line
if "--server_host_override=" in line:
m = re.search(
r"--server_host_override=((.*).sandbox.googleapis.com)", line
)
else:
m = re.search(r"--server_host=((.*).sandbox.googleapis.com)", line)
server = m.group(1) if m else "unknown_server"
server_short = m.group(2) if m else "unknown_server"
# replace original server_host argument
assert "--server_host=" in line
line = re.sub(
r"--server_host=[^ ]*", r"--server_host=%s" % args.server_host, line
)
# some interop tests don't set server_host_override (see #17407),
# but we need to use it if different host is set via cmdline args.
if args.server_host != server and not "--server_host_override=" in line:
line = re.sub(
r"(--server_host=[^ ]*)",
r"\1 --server_host_override=%s" % server,
line,
)
spec = jobset.JobSpec(
cmdline=line,
shortname="%s:%s:%s:%s"
% (suite_name, lang, server_short, testcase_name),
timeout_seconds=_TEST_TIMEOUT_SECONDS,
shell=True,
flake_retries=5 if args.allow_flakes else 0,
)
job_spec_list.append(spec)
return job_spec_list
def _pull_image_for_lang(lang, image, release):
"""Pull an image for a given language form the image registry."""
cmdline = [
"time gcloud docker -- pull %s && time docker run --rm=true %s"
" /bin/true" % (image, image)
]
return jobset.JobSpec(
cmdline=cmdline,
shortname="pull_image_{}".format(image),
timeout_seconds=_PULL_IMAGE_TIMEOUT_SECONDS,
shell=True,
flake_retries=2,
)
def _test_release(lang, runtime, release, image, xml_report_tree, skip_tests):
total_num_failures = 0
suite_name = "%s__%s_%s" % (lang, runtime, release)
job_spec_list = _generate_test_case_jobspecs(
lang, runtime, release, suite_name
)
if not job_spec_list:
jobset.message("FAILED", "No test cases were found.", do_newline=True)
total_num_failures += 1
else:
num_failures, resultset = jobset.run(
job_spec_list,
newline_on_success=True,
add_env={"docker_image": image},
maxjobs=args.jobs,
skip_jobs=skip_tests,
)
if args.bq_result_table and resultset:
upload_test_results.upload_interop_results_to_bq(
resultset, args.bq_result_table
)
if skip_tests:
jobset.message("FAILED", "Tests were skipped", do_newline=True)
total_num_failures += 1
if num_failures:
total_num_failures += num_failures
report_utils.append_junit_xml_results(
xml_report_tree,
resultset,
"grpc_interop_matrix",
suite_name,
str(uuid.uuid4()),
)
return total_num_failures
def _run_tests_for_lang(lang, runtime, images, xml_report_tree):
"""Find and run all test cases for a language.
images is a list of (<release-tag>, <image-full-path>) tuple.
"""
skip_tests = False
total_num_failures = 0
max_pull_jobs = min(args.jobs, _MAX_PARALLEL_DOWNLOADS)
max_chunk_size = max_pull_jobs
chunk_count = (len(images) + max_chunk_size) // max_chunk_size
for chunk_index in range(chunk_count):
chunk_start = chunk_index * max_chunk_size
chunk_size = min(max_chunk_size, len(images) - chunk_start)
chunk_end = chunk_start + chunk_size
pull_specs = []
if not skip_tests:
for release, image in images[chunk_start:chunk_end]:
pull_specs.append(_pull_image_for_lang(lang, image, release))
# NOTE(rbellevi): We batch docker pull operations to maximize
# parallelism, without letting the disk usage grow unbounded.
pull_failures, _ = jobset.run(
pull_specs, newline_on_success=True, maxjobs=max_pull_jobs
)
if pull_failures:
jobset.message(
"FAILED",
'Image download failed. Skipping tests for language "%s"'
% lang,
do_newline=True,
)
skip_tests = True
for release, image in images[chunk_start:chunk_end]:
total_num_failures += _test_release(
lang, runtime, release, image, xml_report_tree, skip_tests
)
if not args.keep:
for _, image in images[chunk_start:chunk_end]:
_cleanup_docker_image(image)
if not total_num_failures:
jobset.message(
"SUCCESS", "All {} tests passed".format(lang), do_newline=True
)
else:
jobset.message(
"FAILED", "Some {} tests failed".format(lang), do_newline=True
)
return total_num_failures
languages = args.language if args.language != ["all"] else _LANGUAGES
total_num_failures = 0
_xml_report_tree = report_utils.new_junit_xml_tree()
for lang in languages:
docker_images = _get_test_images_for_lang(lang, args.release, args.gcr_path)
for runtime in sorted(docker_images.keys()):
total_num_failures += _run_tests_for_lang(
lang, runtime, docker_images[runtime], _xml_report_tree
)
report_utils.create_xml_report_file(_xml_report_tree, args.report_file)
if total_num_failures:
sys.exit(1)
sys.exit(0)
| 12,012
| 32.185083
| 83
|
py
|
grpc
|
grpc-master/tools/codegen/core/gen_grpc_tls_credentials_options.py
|
#!/usr/bin/env python3
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generator script for src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h and test/core/security/grpc_tls_credentials_options_comparator_test.cc
# Should be executed from grpc's root directory.
from __future__ import print_function
import collections
from dataclasses import dataclass
import difflib
import filecmp
import os
import sys
import tempfile
@dataclass
class DataMember:
name: str # name of the data member without the trailing '_'
type: str # Type (eg. std::string, bool)
test_name: str # The name to use for the associated test
test_value_1: str # Test-specific value to use for comparison
test_value_2: str # Test-specific value (different from test_value_1)
default_initializer: str = ( # If non-empty, this will be used as the default initialization of this field
""
)
getter_comment: str = "" # Comment to add before the getter for this field
special_getter_return_type: str = ( # Override for the return type of getter (eg. const std::string&)
""
)
override_getter: str = ( # Override for the entire getter method. Relevant for certificate_verifier and certificate_provider
""
)
setter_comment: str = "" # Commend to add before the setter for this field
setter_move_semantics: bool = False # Should the setter use move-semantics
special_comparator: str = ( # If non-empty, this will be used in `operator==`
""
)
_DATA_MEMBERS = [
DataMember(
name="cert_request_type",
type="grpc_ssl_client_certificate_request_type",
default_initializer="GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE",
test_name="DifferentCertRequestType",
test_value_1="GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE",
test_value_2="GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY",
),
DataMember(
name="verify_server_cert",
type="bool",
default_initializer="true",
test_name="DifferentVerifyServerCert",
test_value_1="false",
test_value_2="true",
),
DataMember(
name="min_tls_version",
type="grpc_tls_version",
default_initializer="grpc_tls_version::TLS1_2",
test_name="DifferentMinTlsVersion",
test_value_1="grpc_tls_version::TLS1_2",
test_value_2="grpc_tls_version::TLS1_3",
),
DataMember(
name="max_tls_version",
type="grpc_tls_version",
default_initializer="grpc_tls_version::TLS1_3",
test_name="DifferentMaxTlsVersion",
test_value_1="grpc_tls_version::TLS1_2",
test_value_2="grpc_tls_version::TLS1_3",
),
DataMember(
name="certificate_verifier",
type="grpc_core::RefCountedPtr<grpc_tls_certificate_verifier>",
override_getter="""grpc_tls_certificate_verifier* certificate_verifier() {
return certificate_verifier_.get();
}""",
setter_move_semantics=True,
special_comparator=(
"(certificate_verifier_ == other.certificate_verifier_ ||"
" (certificate_verifier_ != nullptr && other.certificate_verifier_"
" != nullptr &&"
" certificate_verifier_->Compare(other.certificate_verifier_.get())"
" == 0))"
),
test_name="DifferentCertificateVerifier",
test_value_1="MakeRefCounted<HostNameCertificateVerifier>()",
test_value_2='MakeRefCounted<XdsCertificateVerifier>(nullptr, "")',
),
DataMember(
name="check_call_host",
type="bool",
default_initializer="true",
test_name="DifferentCheckCallHost",
test_value_1="false",
test_value_2="true",
),
DataMember(
name="certificate_provider",
type="grpc_core::RefCountedPtr<grpc_tls_certificate_provider>",
getter_comment=(
"Returns the distributor from certificate_provider_ if it is set,"
" nullptr otherwise."
),
override_getter="""grpc_tls_certificate_distributor* certificate_distributor() {
if (certificate_provider_ != nullptr) { return certificate_provider_->distributor().get(); }
return nullptr;
}""",
setter_move_semantics=True,
special_comparator=(
"(certificate_provider_ == other.certificate_provider_ ||"
" (certificate_provider_ != nullptr && other.certificate_provider_"
" != nullptr &&"
" certificate_provider_->Compare(other.certificate_provider_.get())"
" == 0))"
),
test_name="DifferentCertificateProvider",
test_value_1=(
'MakeRefCounted<StaticDataCertificateProvider>("root_cert_1",'
" PemKeyCertPairList())"
),
test_value_2=(
'MakeRefCounted<StaticDataCertificateProvider>("root_cert_2",'
" PemKeyCertPairList())"
),
),
DataMember(
name="watch_root_cert",
type="bool",
default_initializer="false",
setter_comment=(
"If need to watch the updates of root certificates with name"
" |root_cert_name|. The default value is false. If used in"
" tls_credentials, it should always be set to true unless the root"
" certificates are not needed."
),
test_name="DifferentWatchRootCert",
test_value_1="false",
test_value_2="true",
),
DataMember(
name="root_cert_name",
type="std::string",
special_getter_return_type="const std::string&",
setter_comment=(
"Sets the name of root certificates being watched, if"
" |set_watch_root_cert| is called. If not set, an empty string will"
" be used as the name."
),
setter_move_semantics=True,
test_name="DifferentRootCertName",
test_value_1='"root_cert_name_1"',
test_value_2='"root_cert_name_2"',
),
DataMember(
name="watch_identity_pair",
type="bool",
default_initializer="false",
setter_comment=(
"If need to watch the updates of identity certificates with name"
" |identity_cert_name|. The default value is false. If used in"
" tls_credentials, it should always be set to true unless the"
" identity key-cert pairs are not needed."
),
test_name="DifferentWatchIdentityPair",
test_value_1="false",
test_value_2="true",
),
DataMember(
name="identity_cert_name",
type="std::string",
special_getter_return_type="const std::string&",
setter_comment=(
"Sets the name of identity key-cert pairs being watched, if"
" |set_watch_identity_pair| is called. If not set, an empty string"
" will be used as the name."
),
setter_move_semantics=True,
test_name="DifferentIdentityCertName",
test_value_1='"identity_cert_name_1"',
test_value_2='"identity_cert_name_2"',
),
DataMember(
name="tls_session_key_log_file_path",
type="std::string",
special_getter_return_type="const std::string&",
setter_move_semantics=True,
test_name="DifferentTlsSessionKeyLogFilePath",
test_value_1='"file_path_1"',
test_value_2='"file_path_2"',
),
DataMember(
name="crl_directory",
type="std::string",
special_getter_return_type="const std::string&",
setter_comment=(
" gRPC will enforce CRLs on all handshakes from all hashed CRL"
" files inside of the crl_directory. If not set, an empty string"
" will be used, which will not enable CRL checking. Only supported"
" for OpenSSL version > 1.1."
),
setter_move_semantics=True,
test_name="DifferentCrlDirectory",
test_value_1='"crl_directory_1"',
test_value_2='"crl_directory_2"',
),
DataMember(
name="send_client_ca_list",
type="bool",
default_initializer="false",
test_name="DifferentSendClientCaListValues",
test_value_1="false",
test_value_2="true",
),
]
# print copyright notice from this file
def put_copyright(f, year):
print(
"""//
//
// Copyright %s gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//
"""
% (year),
file=f,
)
# Prints differences between two files
def get_file_differences(file1, file2):
with open(file1) as f1:
file1_text = f1.readlines()
with open(file2) as f2:
file2_text = f2.readlines()
return difflib.unified_diff(
file1_text, file2_text, fromfile=file1, tofile=file2
)
# Is this script executed in test mode?
test_mode = False
if len(sys.argv) > 1 and sys.argv[1] == "--test":
test_mode = True
HEADER_FILE_NAME = (
"src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h"
)
# Generate src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h
header_file_name = HEADER_FILE_NAME
if test_mode:
header_file_name = tempfile.NamedTemporaryFile(delete=False).name
H = open(header_file_name, "w")
put_copyright(H, "2018")
print(
"// Generated by tools/codegen/core/gen_grpc_tls_credentials_options.py\n",
file=H,
)
print(
"""#ifndef GRPC_SRC_CORE_LIB_SECURITY_CREDENTIALS_TLS_GRPC_TLS_CREDENTIALS_OPTIONS_H
#define GRPC_SRC_CORE_LIB_SECURITY_CREDENTIALS_TLS_GRPC_TLS_CREDENTIALS_OPTIONS_H
#include <grpc/support/port_platform.h>
#include "absl/container/inlined_vector.h"
#include <grpc/grpc_security.h>
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.h"
#include "src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.h"
#include "src/core/lib/security/credentials/tls/grpc_tls_certificate_verifier.h"
#include "src/core/lib/security/security_connector/ssl_utils.h"
// Contains configurable options specified by callers to configure their certain
// security features supported in TLS.
// TODO(ZhenLian): consider making this not ref-counted.
struct grpc_tls_credentials_options
: public grpc_core::RefCounted<grpc_tls_credentials_options> {
public:
~grpc_tls_credentials_options() override = default;
""",
file=H,
)
# Print out getters for all data members
print(" // Getters for member fields.", file=H)
for data_member in _DATA_MEMBERS:
if data_member.getter_comment != "":
print(" // " + data_member.getter_comment, file=H)
if data_member.override_getter:
print(" " + data_member.override_getter, file=H)
else:
print(
" %s %s() const { return %s; }"
% (
data_member.special_getter_return_type
if data_member.special_getter_return_type != ""
else data_member.type,
data_member.name,
data_member.name + "_",
),
file=H,
)
# Print out setters for all data members
print("", file=H)
print(" // Setters for member fields.", file=H)
for data_member in _DATA_MEMBERS:
if data_member.setter_comment != "":
print(" // " + data_member.setter_comment, file=H)
if data_member.setter_move_semantics:
print(
" void set_%s(%s %s) { %s_ = std::move(%s); }"
% (
data_member.name,
data_member.type,
data_member.name,
data_member.name,
data_member.name,
),
file=H,
)
else:
print(
" void set_%s(%s %s) { %s_ = %s; }"
% (
data_member.name,
data_member.type,
data_member.name,
data_member.name,
data_member.name,
),
file=H,
)
# Write out operator==
print(
"\n bool operator==(const grpc_tls_credentials_options& other) const {",
file=H,
)
operator_equal_content = " return "
for i in range(len(_DATA_MEMBERS)):
if i != 0:
operator_equal_content += " "
if _DATA_MEMBERS[i].special_comparator != "":
operator_equal_content += _DATA_MEMBERS[i].special_comparator
else:
operator_equal_content += (
_DATA_MEMBERS[i].name + "_ == other." + _DATA_MEMBERS[i].name + "_"
)
if i != len(_DATA_MEMBERS) - 1:
operator_equal_content += " &&\n"
print(operator_equal_content + ";\n }", file=H)
# Print out data member declarations
print("\n private:", file=H)
for data_member in _DATA_MEMBERS:
if data_member.default_initializer == "":
print(
" %s %s_;"
% (
data_member.type,
data_member.name,
),
file=H,
)
else:
print(
" %s %s_ = %s;"
% (
data_member.type,
data_member.name,
data_member.default_initializer,
),
file=H,
)
# Print out file ending
print(
"""};
#endif // GRPC_SRC_CORE_LIB_SECURITY_CREDENTIALS_TLS_GRPC_TLS_CREDENTIALS_OPTIONS_H""",
file=H,
)
H.close()
# Generate test/core/security/grpc_tls_credentials_options_comparator_test.cc
TEST_FILE_NAME = (
"test/core/security/grpc_tls_credentials_options_comparator_test.cc"
)
test_file_name = TEST_FILE_NAME
if test_mode:
test_file_name = tempfile.NamedTemporaryFile(delete=False).name
T = open(test_file_name, "w")
put_copyright(T, "2022")
print(
"// Generated by tools/codegen/core/gen_grpc_tls_credentials_options.py",
file=T,
)
print(
"""
#include <grpc/support/port_platform.h>
#include <string>
#include <gmock/gmock.h>
#include "src/core/lib/security/credentials/xds/xds_credentials.h"
#include "src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h"
#include "test/core/util/test_config.h"
namespace grpc_core {
namespace {
""",
file=T,
)
# Generate negative test for each negative member
for data_member in _DATA_MEMBERS:
print(
"""TEST(TlsCredentialsOptionsComparatorTest, %s) {
auto* options_1 = grpc_tls_credentials_options_create();
auto* options_2 = grpc_tls_credentials_options_create();
options_1->set_%s(%s);
options_2->set_%s(%s);
EXPECT_FALSE(*options_1 == *options_2);
EXPECT_FALSE(*options_2 == *options_1);
delete options_1;
delete options_2;
}"""
% (
data_member.test_name,
data_member.name,
data_member.test_value_1,
data_member.name,
data_member.test_value_2,
),
file=T,
)
# Print out file ending
print(
"""
} // namespace
} // namespace grpc_core
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
grpc::testing::TestEnvironment env(&argc, argv);
grpc_init();
auto result = RUN_ALL_TESTS();
grpc_shutdown();
return result;
}""",
file=T,
)
T.close()
if test_mode:
header_diff = get_file_differences(header_file_name, HEADER_FILE_NAME)
test_diff = get_file_differences(test_file_name, TEST_FILE_NAME)
os.unlink(header_file_name)
os.unlink(test_file_name)
header_error = False
for line in header_diff:
print(line)
header_error = True
if header_error:
print(
HEADER_FILE_NAME
+ " should not be manually modified. Please make changes to"
" tools/distrib/gen_grpc_tls_credentials_options.py instead."
)
test_error = False
for line in test_diff:
print(line)
test_error = True
if test_error:
print(
TEST_FILE_NAME
+ " should not be manually modified. Please make changes to"
" tools/distrib/gen_grpc_tls_credentials_options.py instead."
)
if header_error or test_error:
sys.exit(1)
| 17,033
| 31.757692
| 162
|
py
|
grpc
|
grpc-master/tools/codegen/core/gen_server_registered_method_bad_client_test_body.py
|
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def esc_c(line):
out = '"'
last_was_hex = False
for c in line:
if 32 <= c < 127:
if c in hex_bytes and last_was_hex:
out += '""'
if c != ord('"'):
out += chr(c)
else:
out += '\\"'
last_was_hex = False
else:
out += "\\x%02x" % c
last_was_hex = True
return out + '"'
done = set()
for message_length in range(0, 3):
for send_message_length in range(0, message_length + 1):
payload = [
0,
(message_length >> 24) & 0xFF,
(message_length >> 16) & 0xFF,
(message_length >> 8) & 0xFF,
(message_length) & 0xFF,
] + send_message_length * [0]
for frame_length in range(0, len(payload) + 1):
is_end = (
frame_length == len(payload)
and send_message_length == message_length
)
frame = [
(frame_length >> 16) & 0xFF,
(frame_length >> 8) & 0xFF,
(frame_length) & 0xFF,
0,
1 if is_end else 0,
0,
0,
0,
1,
] + payload[0:frame_length]
text = esc_c(frame)
if text not in done:
print(
"GRPC_RUN_BAD_CLIENT_TEST(verifier_%s, PFX_STR %s, %s);"
% (
"succeeds" if is_end else "fails",
text,
"0" if is_end else "GRPC_BAD_CLIENT_DISCONNECT",
)
)
done.add(text)
| 2,309
| 30.216216
| 76
|
py
|
grpc
|
grpc-master/tools/codegen/core/optimize_arena_pool_sizes.py
|
#!/usr/bin/env python3
# Copyright 2023 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# USAGE:
# Run some tests with the GRPC_ARENA_TRACE_POOLED_ALLOCATIONS #define turned on.
# Capture the output to a text file.
# Invoke this program with that as an argument, and let it work its magic.
import collections
import heapq
import random
import re
import sys
# A single allocation, negative size => free
Allocation = collections.namedtuple("Allocation", "size ptr")
Active = collections.namedtuple("Active", "id size")
# Read through all the captures, and build up scrubbed traces
arenas = []
building = collections.defaultdict(list)
active = {}
biggest = 0
smallest = 1024
sizes = set()
for filename in sys.argv[1:]:
for line in open(filename):
m = re.search(
r"ARENA 0x([0-9a-f]+) ALLOC ([0-9]+) @ 0x([0-9a-f]+)", line
)
if m:
size = int(m.group(2))
if size > biggest:
biggest = size
if size < smallest:
smallest = size
active[m.group(3)] = Active(m.group(1), size)
building[m.group(1)].append(size)
sizes.add(size)
m = re.search(r"FREE 0x([0-9a-f]+)", line)
if m:
# We may have spurious frees, so make sure there's an outstanding allocation
last = active.pop(m.group(1), None)
if last is not None:
building[last.id].append(-last.size)
m = re.search(r"DESTRUCT_ARENA 0x([0-9a-f]+)", line)
if m:
trace = building.pop(m.group(1), None)
if trace:
arenas.append(trace)
# Given a list of pool sizes, return which bucket an allocation should go into
def bucket(pool_sizes, size):
for bucket in sorted(pool_sizes):
if abs(size) <= bucket:
return bucket
# Given a list of pool sizes, determine the total outstanding bytes in the arena for once trace
def outstanding_bytes(pool_sizes, trace):
free_list = collections.defaultdict(int)
allocated = 0
for size in trace:
b = bucket(pool_sizes, size)
if size < 0:
free_list[b] += 1
else:
if free_list[b] > 0:
free_list[b] -= 1
else:
allocated += b
return allocated + len(pool_sizes) * 8
# Given a list of pool sizes, determine the maximum outstanding bytes for any seen trace
def measure(pool_sizes):
max_outstanding = 0
for trace in arenas:
max_outstanding = max(
max_outstanding, outstanding_bytes(pool_sizes, trace)
)
return max_outstanding
ALWAYS_INCLUDE = 1024
best = [ALWAYS_INCLUDE, biggest]
best_measure = measure(best)
testq = []
step = 0
def add(l):
global testq, best_measure, best
m = measure(l)
if m < best_measure:
best_measure = m
best = l
if l[-1] == smallest:
return
heapq.heappush(testq, (m, l))
add(best)
while testq:
top = heapq.heappop(testq)[1]
m = measure(top)
step += 1
if step % 1000 == 0:
print(
"iter %d; pending=%d; top=%r/%d"
% (step, len(testq), top, measure(top))
)
for i in sizes:
if i >= top[-1]:
continue
add(top + [i])
print("SAW SIZES: %r" % sorted(list(sizes)))
print("BEST: %r" % list(reversed(best)))
print("BEST MEASURE: %d" % best_measure)
| 3,921
| 27.627737
| 95
|
py
|
grpc
|
grpc-master/tools/codegen/core/gen_settings_ids.py
|
#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import collections
import sys
import perfection
_MAX_HEADER_LIST_SIZE = 16 * 1024 * 1024
Setting = collections.namedtuple("Setting", "id default min max on_error")
OnError = collections.namedtuple("OnError", "behavior code")
clamp_invalid_value = OnError("CLAMP_INVALID_VALUE", "PROTOCOL_ERROR")
disconnect_on_invalid_value = lambda e: OnError(
"DISCONNECT_ON_INVALID_VALUE", e
)
DecoratedSetting = collections.namedtuple(
"DecoratedSetting", "enum name setting"
)
_SETTINGS = {
"HEADER_TABLE_SIZE": Setting(1, 4096, 0, 0xFFFFFFFF, clamp_invalid_value),
"ENABLE_PUSH": Setting(
2, 1, 0, 1, disconnect_on_invalid_value("PROTOCOL_ERROR")
),
"MAX_CONCURRENT_STREAMS": Setting(
3,
0xFFFFFFFF,
0,
0xFFFFFFFF,
disconnect_on_invalid_value("PROTOCOL_ERROR"),
),
"INITIAL_WINDOW_SIZE": Setting(
4,
65535,
0,
0x7FFFFFFF,
disconnect_on_invalid_value("FLOW_CONTROL_ERROR"),
),
"MAX_FRAME_SIZE": Setting(
5, 16384, 16384, 16777215, disconnect_on_invalid_value("PROTOCOL_ERROR")
),
"MAX_HEADER_LIST_SIZE": Setting(
6, _MAX_HEADER_LIST_SIZE, 0, _MAX_HEADER_LIST_SIZE, clamp_invalid_value
),
"GRPC_ALLOW_TRUE_BINARY_METADATA": Setting(
0xFE03, 0, 0, 1, clamp_invalid_value
),
"GRPC_PREFERRED_RECEIVE_CRYPTO_FRAME_SIZE": Setting(
0xFE04, 0, 16384, 0x7FFFFFFF, clamp_invalid_value
),
}
H = open("src/core/ext/transport/chttp2/transport/http2_settings.h", "w")
C = open("src/core/ext/transport/chttp2/transport/http2_settings.cc", "w")
# utility: print a big comment block into a set of files
def put_banner(files, banner):
for f in files:
print("/*", file=f)
for line in banner:
print(" * %s" % line, file=f)
print(" */", file=f)
print(file=f)
# copy-paste copyright notice from this file
with open(sys.argv[0]) as my_source:
copyright = []
for line in my_source:
if line[0] != "#":
break
for line in my_source:
if line[0] == "#":
copyright.append(line)
break
for line in my_source:
if line[0] != "#":
break
copyright.append(line)
put_banner([H, C], [line[2:].rstrip() for line in copyright])
put_banner(
[H, C],
["Automatically generated by tools/codegen/core/gen_settings_ids.py"],
)
print(
"#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H", file=H
)
print(
"#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H", file=H
)
print(file=H)
print("#include <grpc/support/port_platform.h>", file=H)
print("#include <stdint.h>", file=H)
print(file=H)
print("#include <grpc/support/port_platform.h>", file=C)
print(
'#include "src/core/ext/transport/chttp2/transport/http2_settings.h"',
file=C,
)
print(file=C)
print('#include "src/core/lib/gpr/useful.h"', file=C)
print('#include "src/core/lib/transport/http2_errors.h"', file=C)
print(file=C)
p = perfection.hash_parameters(sorted(x.id for x in list(_SETTINGS.values())))
print(p)
def hash(i):
i += p.offset
x = i % p.t
y = i // p.t
return x + p.r[y]
decorated_settings = [
DecoratedSetting(hash(setting.id), name, setting)
for name, setting in _SETTINGS.items()
]
print("typedef enum {", file=H)
for decorated_setting in sorted(decorated_settings):
print(
" GRPC_CHTTP2_SETTINGS_%s = %d, /* wire id %d */"
% (
decorated_setting.name,
decorated_setting.enum,
decorated_setting.setting.id,
),
file=H,
)
print("} grpc_chttp2_setting_id;", file=H)
print(file=H)
print(
"#define GRPC_CHTTP2_NUM_SETTINGS %d"
% (max(x.enum for x in decorated_settings) + 1),
file=H,
)
print("extern const uint16_t grpc_setting_id_to_wire_id[];", file=H)
print(
"const uint16_t grpc_setting_id_to_wire_id[] = {%s};"
% ",".join("%d" % s for s in p.slots),
file=C,
)
print(file=H)
print(
(
"bool grpc_wire_id_to_setting_id(uint32_t wire_id,"
" grpc_chttp2_setting_id *out);"
),
file=H,
)
cgargs = {
"r": ",".join("%d" % (r if r is not None else 0) for r in p.r),
"t": p.t,
"offset": abs(p.offset),
"offset_sign": "+" if p.offset > 0 else "-",
}
print(
"""
bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id *out) {
uint32_t i = wire_id %(offset_sign)s %(offset)d;
uint32_t x = i %% %(t)d;
uint32_t y = i / %(t)d;
uint32_t h = x;
switch (y) {
"""
% cgargs,
file=C,
)
for i, r in enumerate(p.r):
if not r:
continue
if r < 0:
print("case %d: h -= %d; break;" % (i, -r), file=C)
else:
print("case %d: h += %d; break;" % (i, r), file=C)
print(
"""
}
*out = static_cast<grpc_chttp2_setting_id>(h);
return h < GPR_ARRAY_SIZE(grpc_setting_id_to_wire_id) && grpc_setting_id_to_wire_id[h] == wire_id;
}
"""
% cgargs,
file=C,
)
print(
"""
typedef enum {
GRPC_CHTTP2_CLAMP_INVALID_VALUE,
GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE
} grpc_chttp2_invalid_value_behavior;
typedef struct {
const char *name;
uint32_t default_value;
uint32_t min_value;
uint32_t max_value;
grpc_chttp2_invalid_value_behavior invalid_value_behavior;
uint32_t error_value;
} grpc_chttp2_setting_parameters;
extern const grpc_chttp2_setting_parameters grpc_chttp2_settings_parameters[GRPC_CHTTP2_NUM_SETTINGS];
""",
file=H,
)
print(
(
"const grpc_chttp2_setting_parameters"
" grpc_chttp2_settings_parameters[GRPC_CHTTP2_NUM_SETTINGS] = {"
),
file=C,
)
i = 0
for decorated_setting in sorted(decorated_settings):
while i < decorated_setting.enum:
print(
(
"{NULL, 0, 0, 0, GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE,"
" GRPC_HTTP2_PROTOCOL_ERROR},"
),
file=C,
)
i += 1
print(
'{"%s", %du, %du, %du, GRPC_CHTTP2_%s, GRPC_HTTP2_%s},'
% (
decorated_setting.name,
decorated_setting.setting.default,
decorated_setting.setting.min,
decorated_setting.setting.max,
decorated_setting.setting.on_error.behavior,
decorated_setting.setting.on_error.code,
),
file=C,
)
i += 1
print("};", file=C)
print(file=H)
print(
"#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H */",
file=H,
)
H.close()
C.close()
| 7,162
| 25.431734
| 102
|
py
|
grpc
|
grpc-master/tools/codegen/core/gen_stats_data.py
|
#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import collections
import ctypes
import json
import math
import sys
import yaml
with open("src/core/lib/debug/stats_data.yaml") as f:
attrs = yaml.safe_load(f.read(), Loader=yaml.Loader)
REQUIRED_FIELDS = ["name", "doc"]
def make_type(name, fields):
return (
collections.namedtuple(
name, " ".join(list(set(REQUIRED_FIELDS + fields)))
),
[],
)
def c_str(s, encoding="ascii"):
if isinstance(s, str):
s = s.encode(encoding)
result = ""
for c in s:
c = chr(c) if isinstance(c, int) else c
if not (32 <= ord(c) < 127) or c in ("\\", '"'):
result += "\\%03o" % ord(c)
else:
result += c
return '"' + result + '"'
types = (
make_type("Counter", []),
make_type("Histogram", ["max", "buckets"]),
)
Shape = collections.namedtuple("Shape", "max buckets")
inst_map = dict((t[0].__name__, t[1]) for t in types)
stats = []
for attr in attrs:
found = False
for t, lst in types:
t_name = t.__name__.lower()
if t_name in attr:
name = attr[t_name]
del attr[t_name]
lst.append(t(name=name, **attr))
found = True
break
assert found, "Bad decl: %s" % attr
def dbl2u64(d):
return ctypes.c_ulonglong.from_buffer(ctypes.c_double(d)).value
def u642dbl(d):
return ctypes.c_double.from_buffer(ctypes.c_ulonglong(d)).value
def shift_works_until(mapped_bounds, shift_bits):
for i, ab in enumerate(zip(mapped_bounds, mapped_bounds[1:])):
a, b = ab
if (a >> shift_bits) == (b >> shift_bits):
return i
return len(mapped_bounds)
def find_ideal_shift(mapped_bounds, max_size):
best = None
for shift_bits in reversed(list(range(0, 64))):
n = shift_works_until(mapped_bounds, shift_bits)
if n == 0:
continue
table_size = mapped_bounds[n - 1] >> shift_bits
if table_size > max_size:
continue
if best is None:
best = (shift_bits, n, table_size)
elif best[1] < n:
best = (shift_bits, n, table_size)
return best
def gen_map_table(mapped_bounds, shift_data):
# print("gen_map_table(%s, %s)" % (mapped_bounds, shift_data))
tbl = []
cur = 0
mapped_bounds = [x >> shift_data[0] for x in mapped_bounds]
for i in range(0, mapped_bounds[shift_data[1] - 1]):
while i > mapped_bounds[cur]:
cur += 1
tbl.append(cur)
return tbl
static_tables = []
def decl_static_table(values, type):
global static_tables
v = (type, values)
for i, vp in enumerate(static_tables):
if v == vp:
return i
r = len(static_tables)
static_tables.append(v)
return r
def type_for_uint_table(table):
mv = max(table)
if mv < 2**8:
return "uint8_t"
elif mv < 2**16:
return "uint16_t"
elif mv < 2**32:
return "uint32_t"
else:
return "uint64_t"
def merge_cases(cases):
l = len(cases)
if l == 1:
return cases[0][1]
left_len = l // 2
left = cases[0:left_len]
right = cases[left_len:]
return "if (value < %d) {\n%s\n} else {\n%s\n}" % (
left[-1][0],
merge_cases(left),
merge_cases(right),
)
def gen_bucket_code(shape):
bounds = [0, 1]
done_trivial = False
done_unmapped = False
first_nontrivial = None
first_unmapped = None
while len(bounds) < shape.buckets + 1:
if len(bounds) == shape.buckets:
nextb = int(shape.max)
else:
mul = math.pow(
float(shape.max) / bounds[-1],
1.0 / (shape.buckets + 1 - len(bounds)),
)
nextb = int(math.ceil(bounds[-1] * mul))
if nextb <= bounds[-1] + 1:
nextb = bounds[-1] + 1
elif not done_trivial:
done_trivial = True
first_nontrivial = len(bounds)
bounds.append(nextb)
bounds_idx = decl_static_table(bounds, "int")
# print first_nontrivial, shift_data, bounds
# if shift_data is not None: print [hex(x >> shift_data[0]) for x in code_bounds[first_nontrivial:]]
if first_nontrivial is None:
return (
"return grpc_core::Clamp(value, 0, %d);\n" % shape.max,
bounds_idx,
)
cases = [(0, "return 0;"), (first_nontrivial, "return value;")]
if done_trivial:
first_nontrivial_code = dbl2u64(first_nontrivial)
last_code = first_nontrivial_code
while True:
code = ""
first_nontrivial = u642dbl(first_nontrivial_code)
code_bounds_index = None
for i, b in enumerate(bounds):
if b > first_nontrivial:
code_bounds_index = i
break
code_bounds = [dbl2u64(x) - first_nontrivial_code for x in bounds]
shift_data = find_ideal_shift(
code_bounds[code_bounds_index:], 65536
)
if not shift_data:
break
map_table = gen_map_table(
code_bounds[code_bounds_index:], shift_data
)
if not map_table:
break
if map_table[-1] < 5:
break
map_table_idx = decl_static_table(
[x + code_bounds_index for x in map_table],
type_for_uint_table(map_table),
)
last_code = (
(len(map_table) - 1) << shift_data[0]
) + first_nontrivial_code
code += "DblUint val;\n"
code += "val.dbl = value;\n"
code += "const int bucket = "
code += "kStatsTable%d[((val.uint - %dull) >> %d)];\n" % (
map_table_idx,
first_nontrivial_code,
shift_data[0],
)
code += (
"return bucket - (value < kStatsTable%d[bucket]);" % bounds_idx
)
cases.append((int(u642dbl(last_code)) + 1, code))
first_nontrivial_code = last_code
last = u642dbl(last_code) + 1
for i, b in enumerate(bounds[:-2]):
if bounds[i + 1] < last:
continue
cases.append((bounds[i + 1], "return %d;" % i))
cases.append((None, "return %d;" % (len(bounds) - 2)))
return (merge_cases(cases), bounds_idx)
# utility: print a big comment block into a set of files
def put_banner(files, banner):
for f in files:
for line in banner:
print("// %s" % line, file=f)
print(file=f)
shapes = set()
for histogram in inst_map["Histogram"]:
shapes.add(Shape(max=histogram.max, buckets=histogram.buckets))
def snake_to_pascal(name):
return "".join([x.capitalize() for x in name.split("_")])
with open("src/core/lib/debug/stats_data.h", "w") as H:
# copy-paste copyright notice from this file
with open(sys.argv[0]) as my_source:
copyright = []
for line in my_source:
if line[0] != "#":
break
for line in my_source:
if line[0] == "#":
copyright.append(line)
break
for line in my_source:
if line[0] != "#":
break
copyright.append(line)
put_banner([H], [line[2:].rstrip() for line in copyright])
put_banner(
[H], ["Automatically generated by tools/codegen/core/gen_stats_data.py"]
)
print("#ifndef GRPC_SRC_CORE_LIB_DEBUG_STATS_DATA_H", file=H)
print("#define GRPC_SRC_CORE_LIB_DEBUG_STATS_DATA_H", file=H)
print(file=H)
print("#include <grpc/support/port_platform.h>", file=H)
print("#include <atomic>", file=H)
print("#include <memory>", file=H)
print("#include <stdint.h>", file=H)
print('#include "src/core/lib/debug/histogram_view.h"', file=H)
print('#include "absl/strings/string_view.h"', file=H)
print('#include "src/core/lib/gprpp/per_cpu.h"', file=H)
print(file=H)
print("namespace grpc_core {", file=H)
for shape in shapes:
print(
"class HistogramCollector_%d_%d;" % (shape.max, shape.buckets),
file=H,
)
print("class Histogram_%d_%d {" % (shape.max, shape.buckets), file=H)
print(" public:", file=H)
print(" static int BucketFor(int value);", file=H)
print(" const uint64_t* buckets() const { return buckets_; }", file=H)
print(
" friend Histogram_%d_%d operator-(const Histogram_%d_%d& left,"
" const Histogram_%d_%d& right);"
% (
shape.max,
shape.buckets,
shape.max,
shape.buckets,
shape.max,
shape.buckets,
),
file=H,
)
print(" private:", file=H)
print(
" friend class HistogramCollector_%d_%d;"
% (shape.max, shape.buckets),
file=H,
)
print(" uint64_t buckets_[%d]{};" % shape.buckets, file=H)
print("};", file=H)
print(
"class HistogramCollector_%d_%d {" % (shape.max, shape.buckets),
file=H,
)
print(" public:", file=H)
print(" void Increment(int value) {", file=H)
print(
" buckets_[Histogram_%d_%d::BucketFor(value)]"
% (shape.max, shape.buckets),
file=H,
)
print(" .fetch_add(1, std::memory_order_relaxed);", file=H)
print(" }", file=H)
print(
" void Collect(Histogram_%d_%d* result) const;"
% (shape.max, shape.buckets),
file=H,
)
print(" private:", file=H)
print(" std::atomic<uint64_t> buckets_[%d]{};" % shape.buckets, file=H)
print("};", file=H)
print("struct GlobalStats {", file=H)
print(" enum class Counter {", file=H)
for ctr in inst_map["Counter"]:
print(" k%s," % snake_to_pascal(ctr.name), file=H)
print(" COUNT", file=H)
print(" };", file=H)
print(" enum class Histogram {", file=H)
for ctr in inst_map["Histogram"]:
print(" k%s," % snake_to_pascal(ctr.name), file=H)
print(" COUNT", file=H)
print(" };", file=H)
print(" GlobalStats();", file=H)
print(
(
" static const absl::string_view"
" counter_name[static_cast<int>(Counter::COUNT)];"
),
file=H,
)
print(
(
" static const absl::string_view"
" histogram_name[static_cast<int>(Histogram::COUNT)];"
),
file=H,
)
print(
(
" static const absl::string_view"
" counter_doc[static_cast<int>(Counter::COUNT)];"
),
file=H,
)
print(
(
" static const absl::string_view"
" histogram_doc[static_cast<int>(Histogram::COUNT)];"
),
file=H,
)
print(" union {", file=H)
print(" struct {", file=H)
for ctr in inst_map["Counter"]:
print(" uint64_t %s;" % ctr.name, file=H)
print(" };", file=H)
print(" uint64_t counters[static_cast<int>(Counter::COUNT)];", file=H)
print(" };", file=H)
for ctr in inst_map["Histogram"]:
print(
" Histogram_%d_%d %s;" % (ctr.max, ctr.buckets, ctr.name), file=H
)
print(" HistogramView histogram(Histogram which) const;", file=H)
print(
" std::unique_ptr<GlobalStats> Diff(const GlobalStats& other) const;",
file=H,
)
print("};", file=H)
print("class GlobalStatsCollector {", file=H)
print(" public:", file=H)
print(" std::unique_ptr<GlobalStats> Collect() const;", file=H)
for ctr in inst_map["Counter"]:
print(
" void Increment%s() { data_.this_cpu().%s.fetch_add(1,"
" std::memory_order_relaxed); }"
% (snake_to_pascal(ctr.name), ctr.name),
file=H,
)
for ctr in inst_map["Histogram"]:
print(
" void Increment%s(int value) {"
" data_.this_cpu().%s.Increment(value); }"
% (snake_to_pascal(ctr.name), ctr.name),
file=H,
)
print(" private:", file=H)
print(" struct Data {", file=H)
for ctr in inst_map["Counter"]:
print(" std::atomic<uint64_t> %s{0};" % ctr.name, file=H)
for ctr in inst_map["Histogram"]:
print(
" HistogramCollector_%d_%d %s;"
% (ctr.max, ctr.buckets, ctr.name),
file=H,
)
print(" };", file=H)
print(
(
" PerCpu<Data>"
" data_{PerCpuOptions().SetCpusPerShard(4).SetMaxShards(32)};"
),
file=H,
)
print("};", file=H)
print("}", file=H)
print(file=H)
print("#endif // GRPC_SRC_CORE_LIB_DEBUG_STATS_DATA_H", file=H)
with open("src/core/lib/debug/stats_data.cc", "w") as C:
# copy-paste copyright notice from this file
with open(sys.argv[0]) as my_source:
copyright = []
for line in my_source:
if line[0] != "#":
break
for line in my_source:
if line[0] == "#":
copyright.append(line)
break
for line in my_source:
if line[0] != "#":
break
copyright.append(line)
put_banner([C], [line[2:].rstrip() for line in copyright])
put_banner(
[C], ["Automatically generated by tools/codegen/core/gen_stats_data.py"]
)
print("#include <grpc/support/port_platform.h>", file=C)
print(file=C)
print('#include "src/core/lib/debug/stats_data.h"', file=C)
print("#include <stdint.h>", file=C)
print(file=C)
histo_code = []
histo_bucket_boundaries = {}
for shape in shapes:
code, bounds_idx = gen_bucket_code(shape)
histo_bucket_boundaries[shape] = bounds_idx
histo_code.append(code)
print("namespace grpc_core {", file=C)
print("namespace { union DblUint { double dbl; uint64_t uint; }; }", file=C)
for shape in shapes:
print(
"void HistogramCollector_%d_%d::Collect(Histogram_%d_%d* result)"
" const {" % (shape.max, shape.buckets, shape.max, shape.buckets),
file=C,
)
print(" for (int i=0; i<%d; i++) {" % shape.buckets, file=C)
print(
(
" result->buckets_[i] +="
" buckets_[i].load(std::memory_order_relaxed);"
),
file=C,
)
print(" }", file=C)
print("}", file=C)
print(
"Histogram_%d_%d operator-(const Histogram_%d_%d& left, const"
" Histogram_%d_%d& right) {"
% (
shape.max,
shape.buckets,
shape.max,
shape.buckets,
shape.max,
shape.buckets,
),
file=C,
)
print(" Histogram_%d_%d result;" % (shape.max, shape.buckets), file=C)
print(" for (int i=0; i<%d; i++) {" % shape.buckets, file=C)
print(
" result.buckets_[i] = left.buckets_[i] - right.buckets_[i];",
file=C,
)
print(" }", file=C)
print(" return result;", file=C)
print("}", file=C)
for typename, instances in sorted(inst_map.items()):
print(
"const absl::string_view"
" GlobalStats::%s_name[static_cast<int>(%s::COUNT)] = {"
% (typename.lower(), typename),
file=C,
)
for inst in instances:
print(" %s," % c_str(inst.name), file=C)
print("};", file=C)
print(
"const absl::string_view"
" GlobalStats::%s_doc[static_cast<int>(%s::COUNT)] = {"
% (typename.lower(), typename),
file=C,
)
for inst in instances:
print(" %s," % c_str(inst.doc), file=C)
print("};", file=C)
print("namespace {", file=C)
for i, tbl in enumerate(static_tables):
print(
"const %s kStatsTable%d[%d] = {%s};"
% (tbl[0], i, len(tbl[1]), ",".join("%s" % x for x in tbl[1])),
file=C,
)
print("} // namespace", file=C)
for shape, code in zip(shapes, histo_code):
print(
"int Histogram_%d_%d::BucketFor(int value) {%s}"
% (shape.max, shape.buckets, code),
file=C,
)
print(
"GlobalStats::GlobalStats() : %s {}"
% ",".join("%s{0}" % ctr.name for ctr in inst_map["Counter"]),
file=C,
)
print(
"HistogramView GlobalStats::histogram(Histogram which) const {", file=C
)
print(" switch (which) {", file=C)
print(" default: GPR_UNREACHABLE_CODE(return HistogramView());", file=C)
for inst in inst_map["Histogram"]:
print(" case Histogram::k%s:" % snake_to_pascal(inst.name), file=C)
print(
" return HistogramView{&Histogram_%d_%d::BucketFor,"
" kStatsTable%d, %d, %s.buckets()};"
% (
inst.max,
inst.buckets,
histo_bucket_boundaries[Shape(inst.max, inst.buckets)],
inst.buckets,
inst.name,
),
file=C,
)
print(" }", file=C)
print("}", file=C)
print(
"std::unique_ptr<GlobalStats> GlobalStatsCollector::Collect() const {",
file=C,
)
print(" auto result = std::make_unique<GlobalStats>();", file=C)
print(" for (const auto& data : data_) {", file=C)
for ctr in inst_map["Counter"]:
print(
" result->%s += data.%s.load(std::memory_order_relaxed);"
% (ctr.name, ctr.name),
file=C,
)
for h in inst_map["Histogram"]:
print(" data.%s.Collect(&result->%s);" % (h.name, h.name), file=C)
print(" }", file=C)
print(" return result;", file=C)
print("}", file=C)
print(
(
"std::unique_ptr<GlobalStats> GlobalStats::Diff(const GlobalStats&"
" other) const {"
),
file=C,
)
print(" auto result = std::make_unique<GlobalStats>();", file=C)
for ctr in inst_map["Counter"]:
print(
" result->%s = %s - other.%s;" % (ctr.name, ctr.name, ctr.name),
file=C,
)
for h in inst_map["Histogram"]:
print(
" result->%s = %s - other.%s;" % (h.name, h.name, h.name), file=C
)
print(" return result;", file=C)
print("}", file=C)
print("}", file=C)
| 19,433
| 30.0944
| 104
|
py
|
grpc
|
grpc-master/tools/codegen/core/gen_upb_api_from_bazel_xml.py
|
#!/usr/bin/env python3
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script generates upb source files (e.g. *.upb.c) from all upb targets
# in Bazel BUILD file. These generate upb files are for non-Bazel build such
# as makefile and python build which cannot generate them at the build time.
#
# As an example, for the following upb target
#
# grpc_upb_proto_library(
# name = "grpc_health_upb",
# deps = ["//src/proto/grpc/health/v1:health_proto_descriptor"],
# )
#
# this will generate these upb source files at src/core/ext/upb-generated.
#
# src/proto/grpc/health/v1/health.upb.c
# src/proto/grpc/health/v1/health.upb.h
import argparse
import collections
import os
import shutil
import subprocess
import xml.etree.ElementTree
# Rule object representing the UPB rule of Bazel BUILD.
Rule = collections.namedtuple("Rule", "name type srcs deps proto_files")
BAZEL_BIN = "tools/bazel"
def parse_bazel_rule(elem):
"""Returns a rule from bazel XML rule."""
srcs = []
deps = []
for child in elem:
if child.tag == "list" and child.attrib["name"] == "srcs":
for tag in child:
if tag.tag == "label":
srcs.append(tag.attrib["value"])
if child.tag == "list" and child.attrib["name"] == "deps":
for tag in child:
if tag.tag == "label":
deps.append(tag.attrib["value"])
if child.tag == "label":
# extract actual name for alias rules
label_name = child.attrib["name"]
if label_name in ["actual"]:
actual_name = child.attrib.get("value", None)
if actual_name:
# HACK: since we do a lot of transitive dependency scanning,
# make it seem that the actual name is a dependency of the alias rule
# (aliases don't have dependencies themselves)
deps.append(actual_name)
return Rule(elem.attrib["name"], elem.attrib["class"], srcs, deps, [])
def get_transitive_protos(rules, t):
que = [
t,
]
visited = set()
ret = []
while que:
name = que.pop(0)
rule = rules.get(name, None)
if rule:
for dep in rule.deps:
if dep not in visited:
visited.add(dep)
que.append(dep)
for src in rule.srcs:
if src.endswith(".proto"):
ret.append(src)
return list(set(ret))
def read_upb_bazel_rules():
"""Runs bazel query on given package file and returns all upb rules."""
# Use a wrapper version of bazel in gRPC not to use system-wide bazel
# to avoid bazel conflict when running on Kokoro.
result = subprocess.check_output(
[BAZEL_BIN, "query", "--output", "xml", "--noimplicit_deps", "//:all"]
)
root = xml.etree.ElementTree.fromstring(result)
rules = [
parse_bazel_rule(elem)
for elem in root
if elem.tag == "rule"
and elem.attrib["class"]
in [
"upb_proto_library",
"upb_proto_reflection_library",
]
]
# query all dependencies of upb rules to get a list of proto files
all_deps = [dep for rule in rules for dep in rule.deps]
result = subprocess.check_output(
[
BAZEL_BIN,
"query",
"--output",
"xml",
"--noimplicit_deps",
" union ".join("deps({0})".format(d) for d in all_deps),
]
)
root = xml.etree.ElementTree.fromstring(result)
dep_rules = {}
for dep_rule in (
parse_bazel_rule(elem) for elem in root if elem.tag == "rule"
):
dep_rules[dep_rule.name] = dep_rule
# add proto files to upb rules transitively
for rule in rules:
if not rule.type.startswith("upb_proto_"):
continue
if len(rule.deps) == 1:
rule.proto_files.extend(
get_transitive_protos(dep_rules, rule.deps[0])
)
return rules
def build_upb_bazel_rules(rules):
result = subprocess.check_output(
[BAZEL_BIN, "build"] + [rule.name for rule in rules]
)
def get_upb_path(proto_path, ext):
return proto_path.replace(":", "/").replace(".proto", ext)
def get_bazel_bin_root_path(elink):
BAZEL_BIN_ROOT = "bazel-bin/"
if elink[0].startswith("@"):
# external
result = os.path.join(
BAZEL_BIN_ROOT,
"external",
elink[0].replace("@", "").replace("//", ""),
)
if elink[1]:
result = os.path.join(result, elink[1])
return result
else:
# internal
return BAZEL_BIN_ROOT
def get_external_link(file):
EXTERNAL_LINKS = [
("@com_google_protobuf//", "src/"),
("@com_google_googleapis//", ""),
("@com_github_cncf_udpa//", ""),
("@com_envoyproxy_protoc_gen_validate//", ""),
("@envoy_api//", ""),
("@opencensus_proto//", ""),
]
for external_link in EXTERNAL_LINKS:
if file.startswith(external_link[0]):
return external_link
return ("//", "")
def copy_upb_generated_files(rules, args):
files = {}
for rule in rules:
if rule.type == "upb_proto_library":
frag = ".upb"
output_dir = args.upb_out
else:
frag = ".upbdefs"
output_dir = args.upbdefs_out
for proto_file in rule.proto_files:
elink = get_external_link(proto_file)
prefix_to_strip = elink[0] + elink[1]
if not proto_file.startswith(prefix_to_strip):
raise Exception(
'Source file "{0}" in does not have the expected prefix'
' "{1}"'.format(proto_file, prefix_to_strip)
)
proto_file = proto_file[len(prefix_to_strip) :]
for ext in (".h", ".c"):
file = get_upb_path(proto_file, frag + ext)
src = os.path.join(get_bazel_bin_root_path(elink), file)
dst = os.path.join(output_dir, file)
files[src] = dst
for src, dst in files.items():
if args.verbose:
print("Copy:")
print(" {0}".format(src))
print(" -> {0}".format(dst))
os.makedirs(os.path.split(dst)[0], exist_ok=True)
shutil.copyfile(src, dst)
parser = argparse.ArgumentParser(description="UPB code-gen from bazel")
parser.add_argument("--verbose", default=False, action="store_true")
parser.add_argument(
"--upb_out",
default="src/core/ext/upb-generated",
help="Output directory for upb targets",
)
parser.add_argument(
"--upbdefs_out",
default="src/core/ext/upbdefs-generated",
help="Output directory for upbdefs targets",
)
def main():
args = parser.parse_args()
rules = read_upb_bazel_rules()
if args.verbose:
print("Rules:")
for rule in rules:
print(
" name={0} type={1} proto_files={2}".format(
rule.name, rule.type, rule.proto_files
)
)
if rules:
build_upb_bazel_rules(rules)
copy_upb_generated_files(rules, args)
if __name__ == "__main__":
main()
| 7,846
| 31.028571
| 89
|
py
|
grpc
|
grpc-master/tools/codegen/core/gen_experiments.py
|
#!/usr/bin/env python3
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generate experiment related code artifacts.
Invoke as: tools/codegen/core/gen_experiments.py
Experiment definitions are in src/core/lib/experiments/experiments.yaml
"""
from __future__ import print_function
import argparse
import sys
import experiments_compiler as exp
import yaml
DEFAULTS = {
"broken": "false",
False: "false",
True: "true",
"debug": "kDefaultForDebugOnly",
}
PLATFORMS_DEFINE = {
"windows": "GPR_WINDOWS",
"ios": "GRPC_CFSTREAM",
"posix": "",
}
FINAL_RETURN = {
"broken": "return false;",
False: "return false;",
True: "return true;",
"debug": "\n#ifdef NDEBUG\nreturn false;\n#else\nreturn true;\n#endif\n",
}
FINAL_DEFINE = {
"broken": None,
False: None,
True: "#define %s",
"debug": "#ifndef NDEBUG\n#define %s\n#endif",
}
BZL_LIST_FOR_DEFAULTS = {
"broken": None,
False: "off",
True: "on",
"debug": "dbg",
}
def ParseCommandLineArguments(args):
"""Wrapper for argparse command line arguments handling.
Args:
args: List of command line arguments.
Returns:
Command line arguments namespace built by argparse.ArgumentParser().
"""
# formatter_class=argparse.ArgumentDefaultsHelpFormatter is not used here
# intentionally, We want more formatting than this class can provide.
flag_parser = argparse.ArgumentParser()
flag_parser.add_argument(
"--check",
action="store_false",
help="If specified, disables checking experiment expiry dates",
)
return flag_parser.parse_args(args)
args = ParseCommandLineArguments(sys.argv[1:])
def _GenerateExperimentFiles(args, mode):
if mode == "test":
_EXPERIMENTS_DEFS = (
"test/core/experiments/fixtures/test_experiments.yaml"
)
_EXPERIMENTS_ROLLOUTS = (
"test/core/experiments/fixtures/test_experiments_rollout.yaml"
)
_EXPERIMENTS_HDR_FILE = "test/core/experiments/fixtures/experiments.h"
_EXPERIMENTS_SRC_FILE = "test/core/experiments/fixtures/experiments.cc"
else:
_EXPERIMENTS_DEFS = "src/core/lib/experiments/experiments.yaml"
_EXPERIMENTS_ROLLOUTS = "src/core/lib/experiments/rollouts.yaml"
_EXPERIMENTS_HDR_FILE = "src/core/lib/experiments/experiments.h"
_EXPERIMENTS_SRC_FILE = "src/core/lib/experiments/experiments.cc"
with open(_EXPERIMENTS_DEFS) as f:
attrs = yaml.safe_load(f.read())
with open(_EXPERIMENTS_ROLLOUTS) as f:
rollouts = yaml.safe_load(f.read())
compiler = exp.ExperimentsCompiler(
DEFAULTS,
FINAL_RETURN,
FINAL_DEFINE,
PLATFORMS_DEFINE,
BZL_LIST_FOR_DEFAULTS,
)
experiment_annotation = "gRPC Experiments: "
for attr in attrs:
exp_definition = exp.ExperimentDefinition(attr)
if not exp_definition.IsValid(args.check):
sys.exit(1)
experiment_annotation += exp_definition.name + ":0,"
if not compiler.AddExperimentDefinition(exp_definition):
print("Experiment = %s ERROR adding" % exp_definition.name)
sys.exit(1)
if len(experiment_annotation) > 2000:
print("comma-delimited string of experiments is too long")
sys.exit(1)
for rollout_attr in rollouts:
if not compiler.AddRolloutSpecification(rollout_attr):
print("ERROR adding rollout spec")
sys.exit(1)
print(f"Mode = {mode} Generating experiments headers")
compiler.GenerateExperimentsHdr(_EXPERIMENTS_HDR_FILE, mode)
print(f"Mode = {mode} Generating experiments srcs")
compiler.GenerateExperimentsSrc(
_EXPERIMENTS_SRC_FILE, _EXPERIMENTS_HDR_FILE, mode
)
if mode == "test":
print("Generating experiments tests")
compiler.GenTest("test/core/experiments/experiments_test.cc")
else:
print("Generating experiments.bzl")
compiler.GenExperimentsBzl("bazel/experiments.bzl")
_GenerateExperimentFiles(args, "production")
_GenerateExperimentFiles(args, "test")
| 4,649
| 28.807692
| 79
|
py
|
grpc
|
grpc-master/tools/codegen/core/gen_header_frame.py
|
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Read from stdin a set of colon separated http headers:
:path: /foo/bar
content-type: application/grpc
Write a set of strings containing a hpack encoded http2 frame that
represents said headers."""
import argparse
import json
import sys
def append_never_indexed(payload_line, n, count, key, value, value_is_huff):
payload_line.append(0x10)
assert len(key) <= 126
payload_line.append(len(key))
payload_line.extend(ord(c) for c in key)
assert len(value) <= 126
payload_line.append(len(value) + (0x80 if value_is_huff else 0))
payload_line.extend(value)
def append_inc_indexed(payload_line, n, count, key, value, value_is_huff):
payload_line.append(0x40)
assert len(key) <= 126
payload_line.append(len(key))
payload_line.extend(ord(c) for c in key)
assert len(value) <= 126
payload_line.append(len(value) + (0x80 if value_is_huff else 0))
payload_line.extend(value)
def append_pre_indexed(payload_line, n, count, key, value, value_is_huff):
assert not value_is_huff
payload_line.append(0x80 + 61 + count - n)
def esc_c(line):
out = '"'
last_was_hex = False
for c in line:
if 32 <= c < 127:
if c in hex_bytes and last_was_hex:
out += '""'
if c != ord('"'):
out += chr(c)
else:
out += '\\"'
last_was_hex = False
else:
out += "\\x%02x" % c
last_was_hex = True
return out + '"'
def output_c(payload_bytes):
for line in payload_bytes:
print((esc_c(line)))
def output_hex(payload_bytes):
all_bytes = []
for line in payload_bytes:
all_bytes.extend(line)
print(("{%s}" % ", ".join("0x%02x" % c for c in all_bytes)))
def output_hexstr(payload_bytes):
all_bytes = []
for line in payload_bytes:
all_bytes.extend(line)
print(("%s" % "".join("%02x" % c for c in all_bytes)))
_COMPRESSORS = {
"never": append_never_indexed,
"inc": append_inc_indexed,
"pre": append_pre_indexed,
}
_OUTPUTS = {
"c": output_c,
"hex": output_hex,
"hexstr": output_hexstr,
}
argp = argparse.ArgumentParser("Generate header frames")
argp.add_argument(
"--set_end_stream", default=False, action="store_const", const=True
)
argp.add_argument(
"--no_framing", default=False, action="store_const", const=True
)
argp.add_argument(
"--compression", choices=sorted(_COMPRESSORS.keys()), default="never"
)
argp.add_argument("--huff", default=False, action="store_const", const=True)
argp.add_argument("--output", default="c", choices=sorted(_OUTPUTS.keys()))
args = argp.parse_args()
# parse input, fill in vals
vals = []
for line in sys.stdin:
line = line.strip()
if line == "":
continue
if line[0] == "#":
continue
key_tail, value = line[1:].split(":")
key = (line[0] + key_tail).strip()
value = value.strip().encode("ascii")
if args.huff:
from hpack.huffman import HuffmanEncoder
from hpack.huffman_constants import REQUEST_CODES
from hpack.huffman_constants import REQUEST_CODES_LENGTH
value = HuffmanEncoder(REQUEST_CODES, REQUEST_CODES_LENGTH).encode(
value
)
vals.append((key, value))
# generate frame payload binary data
payload_bytes = []
if not args.no_framing:
payload_bytes.append([]) # reserve space for header
payload_len = 0
n = 0
for key, value in vals:
payload_line = []
_COMPRESSORS[args.compression](
payload_line, n, len(vals), key, value, args.huff
)
n += 1
payload_len += len(payload_line)
payload_bytes.append(payload_line)
# fill in header
if not args.no_framing:
flags = 0x04 # END_HEADERS
if args.set_end_stream:
flags |= 0x01 # END_STREAM
payload_bytes[0].extend(
[
(payload_len >> 16) & 0xFF,
(payload_len >> 8) & 0xFF,
(payload_len) & 0xFF,
# header frame
0x01,
# flags
flags,
# stream id
0x00,
0x00,
0x00,
0x01,
]
)
hex_bytes = [ord(c) for c in "abcdefABCDEF0123456789"]
# dump bytes
_OUTPUTS[args.output](payload_bytes)
| 4,871
| 26.525424
| 76
|
py
|
grpc
|
grpc-master/tools/codegen/core/gen_if_list.py
|
#!/usr/bin/env python3
# Copyright 2023 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
# utility: print a big comment block into a set of files
def put_banner(files, banner):
for f in files:
for line in banner:
print("// %s" % line, file=f)
print("", file=f)
with open("src/core/lib/gprpp/if_list.h", "w") as H:
# copy-paste copyright notice from this file
with open(sys.argv[0]) as my_source:
copyright = []
for line in my_source:
if line[0] != "#":
break
for line in my_source:
if line[0] == "#":
copyright.append(line)
break
for line in my_source:
if line[0] != "#":
break
copyright.append(line)
put_banner([H], [line[2:].rstrip() for line in copyright])
put_banner([H], ["", "Automatically generated by %s" % sys.argv[0], ""])
print("#ifndef GRPC_CORE_LIB_GPRPP_IF_LIST_H", file=H)
print("#define GRPC_CORE_LIB_GPRPP_IF_LIST_H", file=H)
print("", file=H)
print("#include <grpc/support/port_platform.h>", file=H)
print("", file=H)
print("#include <stdlib.h>", file=H)
print("", file=H)
print("namespace grpc_core {", file=H)
for n in range(1, 64):
print("", file=H)
print(
"template <typename CheckArg, typename ActionArg, typename"
" ActionFail, %s, %s> auto IfList(CheckArg input, ActionArg"
" action_arg, ActionFail action_fail, %s, %s) {"
% (
", ".join("typename Check%d" % (i) for i in range(0, n)),
", ".join("typename Action%d" % (i) for i in range(0, n)),
", ".join("Check%d check%d" % (i, i) for i in range(0, n)),
", ".join("Action%d action%d" % (i, i) for i in range(0, n)),
),
file=H,
)
for i in range(0, n):
print(
" if (check%d(input)) return action%d(action_arg);" % (i, i),
file=H,
)
print(" return action_fail(action_arg);", file=H)
print("}", file=H)
print("", file=H)
print("}", file=H)
print("", file=H)
print("#endif // GRPC_CORE_LIB_GPRPP_IF_LIST_H", file=H)
| 2,808
| 33.256098
| 78
|
py
|
grpc
|
grpc-master/tools/codegen/core/experiments_compiler.py
|
#!/usr/bin/env python3
# Copyright 2023 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A module to assist in generating experiment related code and artifacts.
"""
from __future__ import print_function
import collections
import ctypes
import datetime
import json
import math
import os
import re
import sys
import yaml
_CODEGEN_PLACEHOLDER_TEXT = """
This file contains the autogenerated parts of the experiments API.
It generates two symbols for each experiment.
For the experiment named new_car_project, it generates:
- a function IsNewCarProjectEnabled() that returns true if the experiment
should be enabled at runtime.
- a macro GRPC_EXPERIMENT_IS_INCLUDED_NEW_CAR_PROJECT that is defined if the
experiment *could* be enabled at runtime.
The function is used to determine whether to run the experiment or
non-experiment code path.
If the experiment brings significant bloat, the macro can be used to avoid
including the experiment code path in the binary for binaries that are size
sensitive.
By default that includes our iOS and Android builds.
Finally, a small array is included that contains the metadata for each
experiment.
A macro, GRPC_EXPERIMENTS_ARE_FINAL, controls whether we fix experiment
configuration at build time (if it's defined) or allow it to be tuned at
runtime (if it's disabled).
If you are using the Bazel build system, that macro can be configured with
--define=grpc_experiments_are_final=true
"""
def _EXPERIMENTS_TEST_SKELETON(defs, test_body):
return f"""
#include <grpc/support/port_platform.h>
#include "test/core/experiments/fixtures/experiments.h"
#include "gtest/gtest.h"
#include "src/core/lib/experiments/config.h"
#ifndef GRPC_EXPERIMENTS_ARE_FINAL
{defs}
TEST(ExperimentsTest, CheckExperimentValuesTest) {{
{test_body}
}}
#endif // GRPC_EXPERIMENTS_ARE_FINAL
int main(int argc, char** argv) {{
testing::InitGoogleTest(&argc, argv);
grpc_core::LoadTestOnlyExperimentsFromMetadata(
grpc_core::g_test_experiment_metadata, grpc_core::kNumTestExperiments);
return RUN_ALL_TESTS();
}}
"""
def _EXPERIMENTS_EXPECTED_VALUE(name, expected_value):
return f"""
bool GetExperiment{name}ExpectedValue() {{
{expected_value}
}}
"""
def _EXPERIMENT_CHECK_TEXT(name):
return f"""
ASSERT_EQ(grpc_core::Is{name}Enabled(),
GetExperiment{name}ExpectedValue());
"""
def ToCStr(s, encoding="ascii"):
if isinstance(s, str):
s = s.encode(encoding)
result = ""
for c in s:
c = chr(c) if isinstance(c, int) else c
if not (32 <= ord(c) < 127) or c in ("\\", '"'):
result += "\\%03o" % ord(c)
else:
result += c
return '"' + result + '"'
def SnakeToPascal(s):
return "".join(x.capitalize() for x in s.split("_"))
def PutBanner(files, banner, prefix):
# Print a big comment block into a set of files
for f in files:
for line in banner:
if not line:
print(prefix, file=f)
else:
print("%s %s" % (prefix, line), file=f)
print(file=f)
def PutCopyright(file, prefix):
# copy-paste copyright notice from this file
with open(__file__) as my_source:
copyright = []
for line in my_source:
if line[0] != "#":
break
for line in my_source:
if line[0] == "#":
copyright.append(line)
break
for line in my_source:
if line[0] != "#":
break
copyright.append(line)
PutBanner([file], [line[2:].rstrip() for line in copyright], prefix)
class ExperimentDefinition(object):
def __init__(self, attributes):
self._error = False
if "name" not in attributes:
print("ERROR: experiment with no name: %r" % attributes)
self._error = True
if "description" not in attributes:
print(
"ERROR: no description for experiment %s" % attributes["name"]
)
self._error = True
if "owner" not in attributes:
print("ERROR: no owner for experiment %s" % attributes["name"])
self._error = True
if "expiry" not in attributes:
print("ERROR: no expiry for experiment %s" % attributes["name"])
self._error = True
if attributes["name"] == "monitoring_experiment":
if attributes["expiry"] != "never-ever":
print("ERROR: monitoring_experiment should never expire")
self._error = True
if self._error:
print("Failed to create experiment definition")
return
self._allow_in_fuzzing_config = True
self._name = attributes["name"]
self._description = attributes["description"]
self._expiry = attributes["expiry"]
self._default = {}
self._additional_constraints = {}
self._test_tags = []
if "allow_in_fuzzing_config" in attributes:
self._allow_in_fuzzing_config = attributes[
"allow_in_fuzzing_config"
]
if "test_tags" in attributes:
self._test_tags = attributes["test_tags"]
def IsValid(self, check_expiry=False):
if self._error:
return False
if not check_expiry:
return True
if (
self._name == "monitoring_experiment"
and self._expiry == "never-ever"
):
return True
today = datetime.date.today()
two_quarters_from_now = today + datetime.timedelta(days=180)
expiry = datetime.datetime.strptime(self._expiry, "%Y/%m/%d").date()
if expiry < today:
print(
"WARNING: experiment %s expired on %s"
% (self._name, self._expiry)
)
if expiry > two_quarters_from_now:
print(
"WARNING: experiment %s expires far in the future on %s"
% (self._name, self._expiry)
)
print("expiry should be no more than two quarters from now")
return not self._error
def AddRolloutSpecification(
self, allowed_defaults, allowed_platforms, rollout_attributes
):
if self._error:
return False
if rollout_attributes["name"] != self._name:
print(
"ERROR: Rollout specification does not apply to this"
" experiment: %s" % self._name
)
return False
if "default" not in rollout_attributes:
print(
"ERROR: no default for experiment %s"
% rollout_attributes["name"]
)
self._error = True
return False
is_dict = isinstance(rollout_attributes["default"], dict)
for platform in allowed_platforms:
if is_dict:
value = rollout_attributes["default"].get(platform, False)
else:
value = rollout_attributes["default"]
if isinstance(value, dict):
self._default[platform] = "debug"
self._additional_constraints[platform] = value
elif value not in allowed_defaults:
print(
"ERROR: default for experiment %s on platform %s "
"is of incorrect format"
% (rollout_attributes["name"], platform)
)
self._error = True
return False
else:
self._default[platform] = value
self._additional_constraints[platform] = {}
return True
@property
def name(self):
return self._name
@property
def description(self):
return self._description
def default(self, platform):
return self._default.get(platform, False)
@property
def test_tags(self):
return self._test_tags
@property
def allow_in_fuzzing_config(self):
return self._allow_in_fuzzing_config
def additional_constraints(self, platform):
return self._additional_constraints.get(platform, {})
class ExperimentsCompiler(object):
def __init__(
self,
defaults,
final_return,
final_define,
platforms_define,
bzl_list_for_defaults=None,
):
self._defaults = defaults
self._final_return = final_return
self._final_define = final_define
self._platforms_define = platforms_define
self._bzl_list_for_defaults = bzl_list_for_defaults
self._experiment_definitions = {}
self._experiment_rollouts = {}
def AddExperimentDefinition(self, experiment_definition):
if experiment_definition.name in self._experiment_definitions:
print(
"ERROR: Duplicate experiment definition: %s"
% experiment_definition.name
)
return False
self._experiment_definitions[
experiment_definition.name
] = experiment_definition
return True
def AddRolloutSpecification(self, rollout_attributes):
if "name" not in rollout_attributes:
print(
"ERROR: experiment with no name: %r in rollout_attribute"
% rollout_attributes
)
return False
if rollout_attributes["name"] not in self._experiment_definitions:
print(
"WARNING: rollout for an undefined experiment: %s ignored"
% rollout_attributes["name"]
)
return True
return self._experiment_definitions[
rollout_attributes["name"]
].AddRolloutSpecification(
self._defaults, self._platforms_define, rollout_attributes
)
def _GenerateExperimentsHdrForPlatform(self, platform, file_desc):
for _, exp in self._experiment_definitions.items():
define_fmt = self._final_define[exp.default(platform)]
if define_fmt:
print(
define_fmt
% ("GRPC_EXPERIMENT_IS_INCLUDED_%s" % exp.name.upper()),
file=file_desc,
)
print(
"inline bool Is%sEnabled() { %s }"
% (
SnakeToPascal(exp.name),
self._final_return[exp.default(platform)],
),
file=file_desc,
)
def GenerateExperimentsHdr(self, output_file, mode):
with open(output_file, "w") as H:
PutCopyright(H, "//")
PutBanner(
[H],
["Auto generated by tools/codegen/core/gen_experiments.py"]
+ _CODEGEN_PLACEHOLDER_TEXT.splitlines(),
"//",
)
if mode != "test":
include_guard = "GRPC_SRC_CORE_LIB_EXPERIMENTS_EXPERIMENTS_H"
else:
file_path_list = output_file.split("/")[0:-1]
file_name = output_file.split("/")[-1].split(".")[0]
include_guard = f"GRPC_{'_'.join(path.upper() for path in file_path_list)}_{file_name.upper()}_H"
print(f"#ifndef {include_guard}", file=H)
print(f"#define {include_guard}", file=H)
print(file=H)
print("#include <grpc/support/port_platform.h>", file=H)
print(file=H)
print("#include <stddef.h>", file=H)
print('#include "src/core/lib/experiments/config.h"', file=H)
print(file=H)
print("namespace grpc_core {", file=H)
print(file=H)
print("#ifdef GRPC_EXPERIMENTS_ARE_FINAL", file=H)
idx = 0
for platform in sorted(self._platforms_define.keys()):
if platform == "posix":
continue
print(
f"\n#{'if' if idx ==0 else 'elif'} "
f"defined({self._platforms_define[platform]})",
file=H,
)
self._GenerateExperimentsHdrForPlatform(platform, H)
idx += 1
print("\n#else", file=H)
self._GenerateExperimentsHdrForPlatform("posix", H)
print("#endif", file=H)
print("\n#else", file=H)
for i, (_, exp) in enumerate(self._experiment_definitions.items()):
print(
"#define GRPC_EXPERIMENT_IS_INCLUDED_%s" % exp.name.upper(),
file=H,
)
print(
"inline bool Is%sEnabled() { return"
" Is%sExperimentEnabled(%d); }"
% (
SnakeToPascal(exp.name),
"Test" if mode == "test" else "",
i,
),
file=H,
)
print(file=H)
if mode == "test":
num_experiments_var_name = "kNumTestExperiments"
experiments_metadata_var_name = "g_test_experiment_metadata"
else:
num_experiments_var_name = "kNumExperiments"
experiments_metadata_var_name = "g_experiment_metadata"
print(
f"constexpr const size_t {num_experiments_var_name} = "
f"{len(self._experiment_definitions.keys())};",
file=H,
)
print(
(
"extern const ExperimentMetadata"
f" {experiments_metadata_var_name}[{num_experiments_var_name}];"
),
file=H,
)
print(file=H)
print("#endif", file=H)
print("} // namespace grpc_core", file=H)
print(file=H)
print(f"#endif // {include_guard}", file=H)
def _GenerateExperimentsSrcForPlatform(self, platform, mode, file_desc):
print("namespace {", file=file_desc)
have_defaults = set()
for _, exp in self._experiment_definitions.items():
print(
"const char* const description_%s = %s;"
% (exp.name, ToCStr(exp.description)),
file=file_desc,
)
print(
"const char* const additional_constraints_%s = %s;"
% (
exp.name,
ToCStr(json.dumps(exp.additional_constraints(platform))),
),
file=file_desc,
)
have_defaults.add(self._defaults[exp.default(platform)])
if "kDefaultForDebugOnly" in have_defaults:
print("#ifdef NDEBUG", file=file_desc)
if "kDefaultForDebugOnly" in have_defaults:
print(
"const bool kDefaultForDebugOnly = false;", file=file_desc
)
print("#else", file=file_desc)
if "kDefaultForDebugOnly" in have_defaults:
print("const bool kDefaultForDebugOnly = true;", file=file_desc)
print("#endif", file=file_desc)
print("}", file=file_desc)
print(file=file_desc)
print("namespace grpc_core {", file=file_desc)
print(file=file_desc)
if mode == "test":
experiments_metadata_var_name = "g_test_experiment_metadata"
else:
experiments_metadata_var_name = "g_experiment_metadata"
print(
f"const ExperimentMetadata {experiments_metadata_var_name}[] = {{",
file=file_desc,
)
for _, exp in self._experiment_definitions.items():
print(
" {%s, description_%s, additional_constraints_%s, %s, %s},"
% (
ToCStr(exp.name),
exp.name,
exp.name,
self._defaults[exp.default(platform)],
"true" if exp.allow_in_fuzzing_config else "false",
),
file=file_desc,
)
print("};", file=file_desc)
print(file=file_desc)
print("} // namespace grpc_core", file=file_desc)
def GenerateExperimentsSrc(self, output_file, header_file_path, mode):
with open(output_file, "w") as C:
PutCopyright(C, "//")
PutBanner(
[C],
["Auto generated by tools/codegen/core/gen_experiments.py"],
"//",
)
print("#include <grpc/support/port_platform.h>", file=C)
print(f'#include "{header_file_path}"', file=C)
print(file=C)
print("#ifndef GRPC_EXPERIMENTS_ARE_FINAL", file=C)
idx = 0
for platform in sorted(self._platforms_define.keys()):
if platform == "posix":
continue
print(
f"\n#{'if' if idx ==0 else 'elif'} "
f"defined({self._platforms_define[platform]})",
file=C,
)
self._GenerateExperimentsSrcForPlatform(platform, mode, C)
idx += 1
print("\n#else", file=C)
self._GenerateExperimentsSrcForPlatform("posix", mode, C)
print("#endif", file=C)
print("#endif", file=C)
def _GenTestExperimentsExpectedValues(self, platform):
defs = ""
for _, exp in self._experiment_definitions.items():
defs += _EXPERIMENTS_EXPECTED_VALUE(
SnakeToPascal(exp.name),
self._final_return[exp.default(platform)],
)
return defs
def GenTest(self, output_file):
with open(output_file, "w") as C:
PutCopyright(C, "//")
PutBanner(
[C],
["Auto generated by tools/codegen/core/gen_experiments.py"],
"//",
)
defs = ""
test_body = ""
idx = 0
for platform in sorted(self._platforms_define.keys()):
if platform == "posix":
continue
defs += (
f"\n#{'if' if idx ==0 else 'elif'} "
f"defined({self._platforms_define[platform]})"
)
defs += self._GenTestExperimentsExpectedValues(platform)
idx += 1
defs += "\n#else"
defs += self._GenTestExperimentsExpectedValues("posix")
defs += "#endif\n"
for _, exp in self._experiment_definitions.items():
test_body += _EXPERIMENT_CHECK_TEXT(SnakeToPascal(exp.name))
print(_EXPERIMENTS_TEST_SKELETON(defs, test_body), file=C)
def GenExperimentsBzl(self, output_file):
if self._bzl_list_for_defaults is None:
return
bzl_to_tags_to_experiments = dict(
(key, collections.defaultdict(list))
for key in self._bzl_list_for_defaults.keys()
if key is not None
)
for _, exp in self._experiment_definitions.items():
for tag in exp.test_tags:
default = False
# Search through default values for all platforms.
for platform in self._platforms_define.keys():
platform_default = exp.default(platform)
# if the experiment is disabled on any platform, only
# add it to the "off" list.
if not platform_default or platform_default == "broken":
default = platform_default
break
elif platform_default == "debug":
# Only add the experiment to the "dbg" list if it is
# debug in atleast one platform and true in every other
# platform.
default = "debug"
elif platform_default and default != "debug":
# Only add the experiment to the "on" list if it is
# enabled in every platform.
default = True
bzl_to_tags_to_experiments[default][tag].append(exp.name)
with open(output_file, "w") as B:
PutCopyright(B, "#")
PutBanner(
[B],
["Auto generated by tools/codegen/core/gen_experiments.py"],
"#",
)
print(
(
'"""Dictionary of tags to experiments so we know when to'
' test different experiments."""'
),
file=B,
)
bzl_to_tags_to_experiments = sorted(
(self._bzl_list_for_defaults[default], tags_to_experiments)
for default, tags_to_experiments in bzl_to_tags_to_experiments.items()
if self._bzl_list_for_defaults[default] is not None
)
print(file=B)
print("EXPERIMENTS = {", file=B)
for key, tags_to_experiments in bzl_to_tags_to_experiments:
print(' "%s": {' % key, file=B)
for tag, experiments in sorted(tags_to_experiments.items()):
print(' "%s": [' % tag, file=B)
for experiment in sorted(experiments):
print(' "%s",' % experiment, file=B)
print(" ],", file=B)
print(" },", file=B)
print("}", file=B)
| 22,142
| 34.714516
| 113
|
py
|
grpc
|
grpc-master/tools/codegen/core/gen_switch.py
|
#!/usr/bin/env python3
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
# utility: print a big comment block into a set of files
def put_banner(files, banner):
for f in files:
print("/*", file=f)
for line in banner:
print(" * %s" % line, file=f)
print(" */", file=f)
print("", file=f)
with open("src/core/lib/promise/detail/switch.h", "w") as H:
# copy-paste copyright notice from this file
with open(sys.argv[0]) as my_source:
copyright = []
for line in my_source:
if line[0] != "#":
break
for line in my_source:
if line[0] == "#":
copyright.append(line)
break
for line in my_source:
if line[0] != "#":
break
copyright.append(line)
put_banner([H], [line[2:].rstrip() for line in copyright])
put_banner([H], ["Automatically generated by %s" % sys.argv[0]])
print("#ifndef GRPC_CORE_LIB_PROMISE_DETAIL_SWITCH_H", file=H)
print("#define GRPC_CORE_LIB_PROMISE_DETAIL_SWITCH_H", file=H)
print("", file=H)
print("#include <grpc/support/port_platform.h>", file=H)
print("", file=H)
print("#include <stdlib.h>", file=H)
print("", file=H)
print("namespace grpc_core {", file=H)
for n in range(1, 33):
print("", file=H)
print(
"template <typename R, %s> R Switch(char idx, %s) {"
% (
", ".join("typename F%d" % i for i in range(0, n)),
", ".join("F%d f%d" % (i, i) for i in range(0, n)),
),
file=H,
)
print(" switch (idx) {", file=H)
for i in range(0, n):
print(" case %d: return f%d();" % (i, i), file=H)
print(" }", file=H)
print(" abort();", file=H)
print("}", file=H)
print("", file=H)
print("}", file=H)
print("", file=H)
print("#endif // GRPC_CORE_LIB_PROMISE_DETAIL_SWITCH_H", file=H)
| 2,557
| 31.379747
| 74
|
py
|
grpc
|
grpc-master/tools/codegen/core/gen_config_vars.py
|
#!/usr/bin/env python3
# Copyright 2023 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import binascii
import collections
import ctypes
import datetime
import json
import math
import os
import re
import sys
import yaml
with open("src/core/lib/config/config_vars.yaml") as f:
attrs = yaml.safe_load(f.read(), Loader=yaml.FullLoader)
error = False
today = datetime.date.today()
two_quarters_from_now = today + datetime.timedelta(days=180)
for attr in attrs:
if "name" not in attr:
print("config has no name: %r" % attr)
error = True
continue
if "experiment" in attr["name"] and attr["name"] != "experiments":
print("use experiment system for experiments")
error = True
if "description" not in attr:
print("no description for %s" % attr["name"])
error = True
if "default" not in attr:
print("no default for %s" % attr["name"])
error = True
if error:
sys.exit(1)
def c_str(s, encoding="ascii"):
if s is None:
return '""'
if isinstance(s, str):
s = s.encode(encoding)
result = ""
for c in s:
c = chr(c) if isinstance(c, int) else c
if not (32 <= ord(c) < 127) or c in ("\\", '"'):
result += "\\%03o" % ord(c)
else:
result += c
return '"' + result + '"'
def snake_to_pascal(s):
return "".join(x.capitalize() for x in s.split("_"))
# utility: print a big comment block into a set of files
def put_banner(files, banner):
for f in files:
for line in banner:
print("// %s" % line, file=f)
print(file=f)
def put_copyright(file):
# copy-paste copyright notice from this file
with open(sys.argv[0]) as my_source:
copyright = []
for line in my_source:
if line[0] != "#":
break
for line in my_source:
if line[0] == "#":
copyright.append(line)
break
for line in my_source:
if line[0] != "#":
break
copyright.append(line)
put_banner([file], [line[2:].rstrip() for line in copyright])
RETURN_TYPE = {
"int": "int32_t",
"string": "absl::string_view",
"comma_separated_string": "absl::string_view",
"bool": "bool",
}
MEMBER_TYPE = {
"int": "int32_t",
"string": "std::string",
"comma_separated_string": "std::string",
"bool": "bool",
}
FLAG_TYPE = {
"int": "absl::optional<int32_t>",
"string": "absl::optional<std::string>",
"comma_separated_string": "std::vector<std::string>",
"bool": "absl::optional<bool>",
}
PROTO_TYPE = {
"int": "int32",
"string": "string",
"comma_separated_string": "string",
"bool": "bool",
}
SORT_ORDER_FOR_PACKING = {
"int": 0,
"bool": 1,
"string": 2,
"comma_separated_string": 3,
}
def bool_default_value(x, name):
if x == True:
return "true"
if x == False:
return "false"
if isinstance(x, str) and x.startswith("$"):
return x[1:]
return x
def int_default_value(x, name):
if isinstance(x, str) and x.startswith("$"):
return x[1:]
return x
def string_default_value(x, name):
if x is None:
return '""'
if x.startswith("$"):
return x[1:]
return c_str(x)
DEFAULT_VALUE = {
"int": int_default_value,
"bool": bool_default_value,
"string": string_default_value,
"comma_separated_string": string_default_value,
}
TO_STRING = {
"int": "$",
"bool": '$?"true":"false"',
"string": '"\\"", absl::CEscape($), "\\""',
"comma_separated_string": '"\\"", absl::CEscape($), "\\""',
}
attrs_in_packing_order = sorted(
attrs, key=lambda a: SORT_ORDER_FOR_PACKING[a["type"]]
)
with open("test/core/util/fuzz_config_vars.proto", "w") as P:
put_copyright(P)
put_banner(
[P],
[
"",
"Automatically generated by tools/codegen/core/gen_config_vars.py",
"",
],
)
print('syntax = "proto3";', file=P)
print(file=P)
print("package grpc.testing;", file=P)
print(file=P)
print("message FuzzConfigVars {", file=P)
for attr in attrs_in_packing_order:
if attr.get("fuzz", False) == False:
continue
print(
" optional %s %s = %d;"
% (
PROTO_TYPE[attr["type"]],
attr["name"],
binascii.crc32(attr["name"].encode("ascii")) & 0x1FFFFFFF,
),
file=P,
)
print("};", file=P)
with open("test/core/util/fuzz_config_vars.h", "w") as H:
put_copyright(H)
put_banner(
[H],
[
"",
"Automatically generated by tools/codegen/core/gen_config_vars.py",
"",
],
)
print("#ifndef GRPC_TEST_CORE_UTIL_FUZZ_CONFIG_VARS_H", file=H)
print("#define GRPC_TEST_CORE_UTIL_FUZZ_CONFIG_VARS_H", file=H)
print(file=H)
print("#include <grpc/support/port_platform.h>", file=H)
print(file=H)
print('#include "test/core/util/fuzz_config_vars.pb.h"', file=H)
print('#include "src/core/lib/config/config_vars.h"', file=H)
print(file=H)
print("namespace grpc_core {", file=H)
print(file=H)
print(
(
"ConfigVars::Overrides OverridesFromFuzzConfigVars(const"
" grpc::testing::FuzzConfigVars& vars);"
),
file=H,
)
print(
"void ApplyFuzzConfigVars(const grpc::testing::FuzzConfigVars& vars);",
file=H,
)
print(file=H)
print("} // namespace grpc_core", file=H)
print(file=H)
print("#endif // GRPC_TEST_CORE_UTIL_FUZZ_CONFIG_VARS_H", file=H)
with open("test/core/util/fuzz_config_vars.cc", "w") as C:
put_copyright(C)
put_banner(
[C],
[
"",
"Automatically generated by tools/codegen/core/gen_config_vars.py",
"",
],
)
print('#include "test/core/util/fuzz_config_vars.h"', file=C)
print('#include "test/core/util/fuzz_config_vars_helpers.h"', file=C)
print(file=C)
print("namespace grpc_core {", file=C)
print(file=C)
print(
(
"ConfigVars::Overrides OverridesFromFuzzConfigVars(const"
" grpc::testing::FuzzConfigVars& vars) {"
),
file=C,
)
print(" ConfigVars::Overrides overrides;", file=C)
for attr in attrs_in_packing_order:
fuzz = attr.get("fuzz", False)
if not fuzz:
continue
print(" if (vars.has_%s()) {" % attr["name"], file=C)
if isinstance(fuzz, str):
print(
" overrides.%s = %s(vars.%s());"
% (attr["name"], fuzz, attr["name"]),
file=C,
)
else:
print(
" overrides.%s = vars.%s();" % (attr["name"], attr["name"]),
file=C,
)
print(" }", file=C)
print(" return overrides;", file=C)
print("}", file=C)
print(
"void ApplyFuzzConfigVars(const grpc::testing::FuzzConfigVars& vars) {",
file=C,
)
print(
" ConfigVars::SetOverrides(OverridesFromFuzzConfigVars(vars));", file=C
)
print("}", file=C)
print(file=C)
print("} // namespace grpc_core", file=C)
with open("src/core/lib/config/config_vars.h", "w") as H:
put_copyright(H)
put_banner(
[H],
[
"",
"Automatically generated by tools/codegen/core/gen_config_vars.py",
"",
],
)
print("#ifndef GRPC_SRC_CORE_LIB_CONFIG_CONFIG_VARS_H", file=H)
print("#define GRPC_SRC_CORE_LIB_CONFIG_CONFIG_VARS_H", file=H)
print(file=H)
print("#include <grpc/support/port_platform.h>", file=H)
print(file=H)
print("#include <string>", file=H)
print("#include <atomic>", file=H)
print("#include <stdint.h>", file=H)
print('#include "absl/strings/string_view.h"', file=H)
print('#include "absl/types/optional.h"', file=H)
print(file=H)
print("namespace grpc_core {", file=H)
print(file=H)
print("class ConfigVars {", file=H)
print(" public:", file=H)
print(" struct Overrides {", file=H)
for attr in attrs_in_packing_order:
print(
" absl::optional<%s> %s;"
% (MEMBER_TYPE[attr["type"]], attr["name"]),
file=H,
)
print(" };", file=H)
print(" ConfigVars(const ConfigVars&) = delete;", file=H)
print(" ConfigVars& operator=(const ConfigVars&) = delete;", file=H)
print(
" // Get the core configuration; if it does not exist, create it.",
file=H,
)
print(" static const ConfigVars& Get() {", file=H)
print(" auto* p = config_vars_.load(std::memory_order_acquire);", file=H)
print(" if (p != nullptr) return *p;", file=H)
print(" return Load();", file=H)
print(" }", file=H)
print(" static void SetOverrides(const Overrides& overrides);", file=H)
print(
" // Drop the config vars. Users must ensure no other threads are",
file=H,
)
print(" // accessing the configuration.", file=H)
print(" static void Reset();", file=H)
print(" std::string ToString() const;", file=H)
for attr in attrs:
for line in attr["description"].splitlines():
print(" // %s" % line, file=H)
if attr.get("force-load-on-access", False):
print(
" %s %s() const;"
% (MEMBER_TYPE[attr["type"]], snake_to_pascal(attr["name"])),
file=H,
)
else:
print(
" %s %s() const { return %s_; }"
% (
RETURN_TYPE[attr["type"]],
snake_to_pascal(attr["name"]),
attr["name"],
),
file=H,
)
print(" private:", file=H)
print(" explicit ConfigVars(const Overrides& overrides);", file=H)
print(" static const ConfigVars& Load();", file=H)
print(" static std::atomic<ConfigVars*> config_vars_;", file=H)
for attr in attrs_in_packing_order:
if attr.get("force-load-on-access", False):
continue
print(" %s %s_;" % (MEMBER_TYPE[attr["type"]], attr["name"]), file=H)
for attr in attrs_in_packing_order:
if attr.get("force-load-on-access", False) == False:
continue
print(
" absl::optional<%s> override_%s_;"
% (MEMBER_TYPE[attr["type"]], attr["name"]),
file=H,
)
print("};", file=H)
print(file=H)
print("} // namespace grpc_core", file=H)
print(file=H)
print("#endif // GRPC_SRC_CORE_LIB_CONFIG_CONFIG_VARS_H", file=H)
with open("src/core/lib/config/config_vars.cc", "w") as C:
put_copyright(C)
put_banner(
[C],
[
"",
"Automatically generated by tools/codegen/core/gen_config_vars.py",
"",
],
)
print("#include <grpc/support/port_platform.h>", file=C)
print('#include "src/core/lib/config/config_vars.h"', file=C)
print('#include "src/core/lib/config/load_config.h"', file=C)
print('#include "absl/strings/escaping.h"', file=C)
print('#include "absl/flags/flag.h"', file=C)
print(file=C)
for attr in attrs:
if "prelude" in attr:
print(attr["prelude"], file=C)
for attr in attrs:
print(
"ABSL_FLAG(%s, %s, {}, %s);"
% (
FLAG_TYPE[attr["type"]],
"grpc_" + attr["name"],
c_str(attr["description"]),
),
file=C,
)
print(file=C)
print("namespace grpc_core {", file=C)
print(file=C)
print("ConfigVars::ConfigVars(const Overrides& overrides) :", file=C)
initializers = [
'%s_(LoadConfig(FLAGS_grpc_%s, "GRPC_%s", overrides.%s, %s))'
% (
attr["name"],
attr["name"],
attr["name"].upper(),
attr["name"],
DEFAULT_VALUE[attr["type"]](attr["default"], attr["name"]),
)
for attr in attrs_in_packing_order
if attr.get("force-load-on-access", False) == False
]
initializers += [
"override_%s_(overrides.%s)" % (attr["name"], attr["name"])
for attr in attrs_in_packing_order
if attr.get("force-load-on-access", False)
]
print(",".join(initializers), file=C)
print("{}", file=C)
print(file=C)
for attr in attrs:
if attr.get("force-load-on-access", False):
print(
"%s ConfigVars::%s() const { return LoadConfig(FLAGS_grpc_%s,"
' "GRPC_%s", override_%s_, %s); }'
% (
MEMBER_TYPE[attr["type"]],
snake_to_pascal(attr["name"]),
attr["name"],
attr["name"].upper(),
attr["name"],
DEFAULT_VALUE[attr["type"]](attr["default"], attr["name"]),
),
file=C,
)
print(file=C)
print("std::string ConfigVars::ToString() const {", file=C)
print(" return absl::StrCat(", file=C)
for i, attr in enumerate(attrs):
if i:
print(",", file=C)
print(c_str(", " + attr["name"] + ": "), file=C)
else:
print(c_str(attr["name"] + ": "), file=C)
print(
",",
TO_STRING[attr["type"]].replace(
"$", snake_to_pascal(attr["name"]) + "()"
),
file=C,
)
print(");}", file=C)
print(file=C)
print("}", file=C)
| 14,306
| 28.257669
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/run_xds_tests.py
|
#!/usr/bin/env python3
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run xDS integration tests on GCP using Traffic Director."""
import argparse
import datetime
import json
import logging
import os
import random
import re
import shlex
import socket
import subprocess
import sys
import tempfile
import time
import uuid
from google.protobuf import json_format
import googleapiclient.discovery
import grpc
from oauth2client.client import GoogleCredentials
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
from src.proto.grpc.health.v1 import health_pb2
from src.proto.grpc.health.v1 import health_pb2_grpc
from src.proto.grpc.testing import empty_pb2
from src.proto.grpc.testing import messages_pb2
from src.proto.grpc.testing import test_pb2_grpc
# Envoy protos provided by PyPI package xds-protos
# Needs to import the generated Python file to load descriptors
try:
from envoy.extensions.filters.common.fault.v3 import fault_pb2
from envoy.extensions.filters.http.fault.v3 import fault_pb2
from envoy.extensions.filters.http.router.v3 import router_pb2
from envoy.extensions.filters.network.http_connection_manager.v3 import (
http_connection_manager_pb2,
)
from envoy.service.status.v3 import csds_pb2
from envoy.service.status.v3 import csds_pb2_grpc
except ImportError:
# These protos are required by CSDS test. We should not fail the entire
# script for one test case.
pass
logger = logging.getLogger()
console_handler = logging.StreamHandler()
formatter = logging.Formatter(fmt="%(asctime)s: %(levelname)-8s %(message)s")
console_handler.setFormatter(formatter)
logger.handlers = []
logger.addHandler(console_handler)
logger.setLevel(logging.WARNING)
# Suppress excessive logs for gRPC Python
original_grpc_trace = os.environ.pop("GRPC_TRACE", None)
original_grpc_verbosity = os.environ.pop("GRPC_VERBOSITY", None)
# Suppress not-essential logs for GCP clients
logging.getLogger("google_auth_httplib2").setLevel(logging.WARNING)
logging.getLogger("googleapiclient.discovery").setLevel(logging.WARNING)
_TEST_CASES = [
"backends_restart",
"change_backend_service",
"gentle_failover",
"load_report_based_failover",
"ping_pong",
"remove_instance_group",
"round_robin",
"secondary_locality_gets_no_requests_on_partial_primary_failure",
"secondary_locality_gets_requests_on_primary_failure",
"traffic_splitting",
"path_matching",
"header_matching",
"api_listener",
"forwarding_rule_port_match",
"forwarding_rule_default_port",
"metadata_filter",
]
# Valid test cases, but not in all. So the tests can only run manually, and
# aren't enabled automatically for all languages.
#
# TODO: Move them into _TEST_CASES when support is ready in all languages.
_ADDITIONAL_TEST_CASES = [
"circuit_breaking",
"timeout",
"fault_injection",
"csds",
]
# Test cases that require the V3 API. Skipped in older runs.
_V3_TEST_CASES = frozenset(["timeout", "fault_injection", "csds"])
# Test cases that require the alpha API. Skipped for stable API runs.
_ALPHA_TEST_CASES = frozenset(["timeout"])
def parse_test_cases(arg):
if arg == "":
return []
arg_split = arg.split(",")
test_cases = set()
all_test_cases = _TEST_CASES + _ADDITIONAL_TEST_CASES
for arg in arg_split:
if arg == "all":
test_cases = test_cases.union(_TEST_CASES)
else:
test_cases = test_cases.union([arg])
if not all([test_case in all_test_cases for test_case in test_cases]):
raise Exception("Failed to parse test cases %s" % arg)
# Perserve order.
return [x for x in all_test_cases if x in test_cases]
def parse_port_range(port_arg):
try:
port = int(port_arg)
return list(range(port, port + 1))
except:
port_min, port_max = port_arg.split(":")
return list(range(int(port_min), int(port_max) + 1))
argp = argparse.ArgumentParser(description="Run xDS interop tests on GCP")
# TODO(zdapeng): remove default value of project_id and project_num
argp.add_argument("--project_id", default="grpc-testing", help="GCP project id")
argp.add_argument(
"--project_num", default="830293263384", help="GCP project number"
)
argp.add_argument(
"--gcp_suffix",
default="",
help=(
"Optional suffix for all generated GCP resource names. Useful to "
"ensure distinct names across test runs."
),
)
argp.add_argument(
"--test_case",
default="ping_pong",
type=parse_test_cases,
help=(
"Comma-separated list of test cases to run. Available tests: %s, "
"(or 'all' to run every test). "
"Alternative tests not included in 'all': %s"
)
% (",".join(_TEST_CASES), ",".join(_ADDITIONAL_TEST_CASES)),
)
argp.add_argument(
"--bootstrap_file",
default="",
help=(
"File to reference via GRPC_XDS_BOOTSTRAP. Disables built-in "
"bootstrap generation"
),
)
argp.add_argument(
"--xds_v3_support",
default=False,
action="store_true",
help=(
"Support xDS v3 via GRPC_XDS_EXPERIMENTAL_V3_SUPPORT. "
"If a pre-created bootstrap file is provided via the --bootstrap_file "
"parameter, it should include xds_v3 in its server_features field."
),
)
argp.add_argument(
"--client_cmd",
default=None,
help=(
"Command to launch xDS test client. {server_uri}, {stats_port} and"
" {qps} references will be replaced using str.format()."
" GRPC_XDS_BOOTSTRAP will be set for the command"
),
)
argp.add_argument(
"--client_hosts",
default=None,
help=(
"Comma-separated list of hosts running client processes. If set,"
" --client_cmd is ignored and client processes are assumed to be"
" running on the specified hosts."
),
)
argp.add_argument("--zone", default="us-central1-a")
argp.add_argument(
"--secondary_zone",
default="us-west1-b",
help="Zone to use for secondary TD locality tests",
)
argp.add_argument("--qps", default=100, type=int, help="Client QPS")
argp.add_argument(
"--wait_for_backend_sec",
default=1200,
type=int,
help=(
"Time limit for waiting for created backend services to report "
"healthy when launching or updated GCP resources"
),
)
argp.add_argument(
"--use_existing_gcp_resources",
default=False,
action="store_true",
help=(
"If set, find and use already created GCP resources instead of creating"
" new ones."
),
)
argp.add_argument(
"--keep_gcp_resources",
default=False,
action="store_true",
help=(
"Leave GCP VMs and configuration running after test. Default behavior"
" is to delete when tests complete."
),
)
argp.add_argument(
"--halt_after_fail",
action="store_true",
help="Halt and save the resources when test failed.",
)
argp.add_argument(
"--compute_discovery_document",
default=None,
type=str,
help=(
"If provided, uses this file instead of retrieving via the GCP"
" discovery API"
),
)
argp.add_argument(
"--alpha_compute_discovery_document",
default=None,
type=str,
help=(
"If provided, uses this file instead of retrieving via the alpha GCP "
"discovery API"
),
)
argp.add_argument(
"--network", default="global/networks/default", help="GCP network to use"
)
_DEFAULT_PORT_RANGE = "8080:8280"
argp.add_argument(
"--service_port_range",
default=_DEFAULT_PORT_RANGE,
type=parse_port_range,
help=(
"Listening port for created gRPC backends. Specified as "
"either a single int or as a range in the format min:max, in "
"which case an available port p will be chosen s.t. min <= p "
"<= max"
),
)
argp.add_argument(
"--stats_port",
default=8079,
type=int,
help="Local port for the client process to expose the LB stats service",
)
argp.add_argument(
"--xds_server",
default="trafficdirector.googleapis.com:443",
help="xDS server",
)
argp.add_argument(
"--source_image",
default="projects/debian-cloud/global/images/family/debian-9",
help="Source image for VMs created during the test",
)
argp.add_argument(
"--path_to_server_binary",
default=None,
type=str,
help=(
"If set, the server binary must already be pre-built on "
"the specified source image"
),
)
argp.add_argument(
"--machine_type",
default="e2-standard-2",
help="Machine type for VMs created during the test",
)
argp.add_argument(
"--instance_group_size",
default=2,
type=int,
help=(
"Number of VMs to create per instance group. Certain test cases (e.g.,"
" round_robin) may not give meaningful results if this is set to a"
" value less than 2."
),
)
argp.add_argument(
"--verbose", help="verbose log output", default=False, action="store_true"
)
# TODO(ericgribkoff) Remove this param once the sponge-formatted log files are
# visible in all test environments.
argp.add_argument(
"--log_client_output",
help="Log captured client output",
default=False,
action="store_true",
)
# TODO(ericgribkoff) Remove this flag once all test environments are verified to
# have access to the alpha compute APIs.
argp.add_argument(
"--only_stable_gcp_apis",
help=(
"Do not use alpha compute APIs. Some tests may be "
"incompatible with this option (gRPC health checks are "
"currently alpha and required for simulating server failure"
),
default=False,
action="store_true",
)
args = argp.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
CLIENT_HOSTS = []
if args.client_hosts:
CLIENT_HOSTS = args.client_hosts.split(",")
# Each of the config propagation in the control plane should finish within 600s.
# Otherwise, it indicates a bug in the control plane. The config propagation
# includes all kinds of traffic config update, like updating urlMap, creating
# the resources for the first time, updating BackendService, and changing the
# status of endpoints in BackendService.
_WAIT_FOR_URL_MAP_PATCH_SEC = 600
# In general, fetching load balancing stats only takes ~10s. However, slow
# config update could lead to empty EDS or similar symptoms causing the
# connection to hang for a long period of time. So, we want to extend the stats
# wait time to be the same as urlMap patch time.
_WAIT_FOR_STATS_SEC = _WAIT_FOR_URL_MAP_PATCH_SEC
_DEFAULT_SERVICE_PORT = 80
_WAIT_FOR_BACKEND_SEC = args.wait_for_backend_sec
_WAIT_FOR_OPERATION_SEC = 1200
_INSTANCE_GROUP_SIZE = args.instance_group_size
_NUM_TEST_RPCS = 10 * args.qps
_CONNECTION_TIMEOUT_SEC = 60
_GCP_API_RETRIES = 5
_BOOTSTRAP_TEMPLATE = """
{{
"node": {{
"id": "{node_id}",
"metadata": {{
"TRAFFICDIRECTOR_NETWORK_NAME": "%s",
"com.googleapis.trafficdirector.config_time_trace": "TRUE"
}},
"locality": {{
"zone": "%s"
}}
}},
"xds_servers": [{{
"server_uri": "%s",
"channel_creds": [
{{
"type": "google_default",
"config": {{}}
}}
],
"server_features": {server_features}
}}]
}}""" % (
args.network.split("/")[-1],
args.zone,
args.xds_server,
)
# TODO(ericgribkoff) Add change_backend_service to this list once TD no longer
# sends an update with no localities when adding the MIG to the backend service
# can race with the URL map patch.
_TESTS_TO_FAIL_ON_RPC_FAILURE = ["ping_pong", "round_robin"]
# Tests that run UnaryCall and EmptyCall.
_TESTS_TO_RUN_MULTIPLE_RPCS = ["path_matching", "header_matching"]
# Tests that make UnaryCall with test metadata.
_TESTS_TO_SEND_METADATA = ["header_matching"]
_TEST_METADATA_KEY = "xds_md"
_TEST_METADATA_VALUE_UNARY = "unary_yranu"
_TEST_METADATA_VALUE_EMPTY = "empty_ytpme"
# Extra RPC metadata whose value is a number, sent with UnaryCall only.
_TEST_METADATA_NUMERIC_KEY = "xds_md_numeric"
_TEST_METADATA_NUMERIC_VALUE = "159"
_PATH_MATCHER_NAME = "path-matcher"
_BASE_TEMPLATE_NAME = "test-template"
_BASE_INSTANCE_GROUP_NAME = "test-ig"
_BASE_HEALTH_CHECK_NAME = "test-hc"
_BASE_FIREWALL_RULE_NAME = "test-fw-rule"
_BASE_BACKEND_SERVICE_NAME = "test-backend-service"
_BASE_URL_MAP_NAME = "test-map"
_BASE_SERVICE_HOST = "grpc-test"
_BASE_TARGET_PROXY_NAME = "test-target-proxy"
_BASE_FORWARDING_RULE_NAME = "test-forwarding-rule"
_TEST_LOG_BASE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../../reports"
)
_SPONGE_LOG_NAME = "sponge_log.log"
_SPONGE_XML_NAME = "sponge_log.xml"
def get_client_stats(num_rpcs, timeout_sec):
if CLIENT_HOSTS:
hosts = CLIENT_HOSTS
else:
hosts = ["localhost"]
for host in hosts:
with grpc.insecure_channel(
"%s:%d" % (host, args.stats_port)
) as channel:
stub = test_pb2_grpc.LoadBalancerStatsServiceStub(channel)
request = messages_pb2.LoadBalancerStatsRequest()
request.num_rpcs = num_rpcs
request.timeout_sec = timeout_sec
rpc_timeout = timeout_sec + _CONNECTION_TIMEOUT_SEC
logger.debug(
"Invoking GetClientStats RPC to %s:%d:", host, args.stats_port
)
response = stub.GetClientStats(
request, wait_for_ready=True, timeout=rpc_timeout
)
logger.debug(
"Invoked GetClientStats RPC to %s: %s",
host,
json_format.MessageToJson(response),
)
return response
def get_client_accumulated_stats():
if CLIENT_HOSTS:
hosts = CLIENT_HOSTS
else:
hosts = ["localhost"]
for host in hosts:
with grpc.insecure_channel(
"%s:%d" % (host, args.stats_port)
) as channel:
stub = test_pb2_grpc.LoadBalancerStatsServiceStub(channel)
request = messages_pb2.LoadBalancerAccumulatedStatsRequest()
logger.debug(
"Invoking GetClientAccumulatedStats RPC to %s:%d:",
host,
args.stats_port,
)
response = stub.GetClientAccumulatedStats(
request, wait_for_ready=True, timeout=_CONNECTION_TIMEOUT_SEC
)
logger.debug(
"Invoked GetClientAccumulatedStats RPC to %s: %s",
host,
response,
)
return response
def get_client_xds_config_dump():
if CLIENT_HOSTS:
hosts = CLIENT_HOSTS
else:
hosts = ["localhost"]
for host in hosts:
server_address = "%s:%d" % (host, args.stats_port)
with grpc.insecure_channel(server_address) as channel:
stub = csds_pb2_grpc.ClientStatusDiscoveryServiceStub(channel)
logger.debug("Fetching xDS config dump from %s", server_address)
response = stub.FetchClientStatus(
csds_pb2.ClientStatusRequest(),
wait_for_ready=True,
timeout=_CONNECTION_TIMEOUT_SEC,
)
logger.debug("Fetched xDS config dump from %s", server_address)
if len(response.config) != 1:
logger.error(
"Unexpected number of ClientConfigs %d: %s",
len(response.config),
response,
)
return None
else:
# Converting the ClientStatusResponse into JSON, because many
# fields are packed in google.protobuf.Any. It will require many
# duplicated code to unpack proto message and inspect values.
return json_format.MessageToDict(
response.config[0], preserving_proto_field_name=True
)
def configure_client(rpc_types, metadata=[], timeout_sec=None):
if CLIENT_HOSTS:
hosts = CLIENT_HOSTS
else:
hosts = ["localhost"]
for host in hosts:
with grpc.insecure_channel(
"%s:%d" % (host, args.stats_port)
) as channel:
stub = test_pb2_grpc.XdsUpdateClientConfigureServiceStub(channel)
request = messages_pb2.ClientConfigureRequest()
request.types.extend(rpc_types)
for rpc_type, md_key, md_value in metadata:
md = request.metadata.add()
md.type = rpc_type
md.key = md_key
md.value = md_value
if timeout_sec:
request.timeout_sec = timeout_sec
logger.debug(
"Invoking XdsUpdateClientConfigureService RPC to %s:%d: %s",
host,
args.stats_port,
request,
)
stub.Configure(
request, wait_for_ready=True, timeout=_CONNECTION_TIMEOUT_SEC
)
logger.debug(
"Invoked XdsUpdateClientConfigureService RPC to %s", host
)
class RpcDistributionError(Exception):
pass
def _verify_rpcs_to_given_backends(
backends, timeout_sec, num_rpcs, allow_failures
):
start_time = time.time()
error_msg = None
logger.debug(
"Waiting for %d sec until backends %s receive load"
% (timeout_sec, backends)
)
while time.time() - start_time <= timeout_sec:
error_msg = None
stats = get_client_stats(num_rpcs, timeout_sec)
rpcs_by_peer = stats.rpcs_by_peer
for backend in backends:
if backend not in rpcs_by_peer:
error_msg = "Backend %s did not receive load" % backend
break
if not error_msg and len(rpcs_by_peer) > len(backends):
error_msg = "Unexpected backend received load: %s" % rpcs_by_peer
if not allow_failures and stats.num_failures > 0:
error_msg = "%d RPCs failed" % stats.num_failures
if not error_msg:
return
raise RpcDistributionError(error_msg)
def wait_until_all_rpcs_go_to_given_backends_or_fail(
backends, timeout_sec, num_rpcs=_NUM_TEST_RPCS
):
_verify_rpcs_to_given_backends(
backends, timeout_sec, num_rpcs, allow_failures=True
)
def wait_until_all_rpcs_go_to_given_backends(
backends, timeout_sec, num_rpcs=_NUM_TEST_RPCS
):
_verify_rpcs_to_given_backends(
backends, timeout_sec, num_rpcs, allow_failures=False
)
def wait_until_no_rpcs_go_to_given_backends(backends, timeout_sec):
start_time = time.time()
while time.time() - start_time <= timeout_sec:
stats = get_client_stats(_NUM_TEST_RPCS, timeout_sec)
error_msg = None
rpcs_by_peer = stats.rpcs_by_peer
for backend in backends:
if backend in rpcs_by_peer:
error_msg = "Unexpected backend %s receives load" % backend
break
if not error_msg:
return
raise Exception("Unexpected RPCs going to given backends")
def wait_until_rpcs_in_flight(rpc_type, timeout_sec, num_rpcs, threshold):
"""Block until the test client reaches the state with the given number
of RPCs being outstanding stably.
Args:
rpc_type: A string indicating the RPC method to check for. Either
'UnaryCall' or 'EmptyCall'.
timeout_sec: Maximum number of seconds to wait until the desired state
is reached.
num_rpcs: Expected number of RPCs to be in-flight.
threshold: Number within [0,100], the tolerable percentage by which
the actual number of RPCs in-flight can differ from the expected number.
"""
if threshold < 0 or threshold > 100:
raise ValueError("Value error: Threshold should be between 0 to 100")
threshold_fraction = threshold / 100.0
start_time = time.time()
error_msg = None
logger.debug(
"Waiting for %d sec until %d %s RPCs (with %d%% tolerance) in-flight"
% (timeout_sec, num_rpcs, rpc_type, threshold)
)
while time.time() - start_time <= timeout_sec:
error_msg = _check_rpcs_in_flight(
rpc_type, num_rpcs, threshold, threshold_fraction
)
if error_msg:
logger.debug("Progress: %s", error_msg)
time.sleep(2)
else:
break
# Ensure the number of outstanding RPCs is stable.
if not error_msg:
time.sleep(5)
error_msg = _check_rpcs_in_flight(
rpc_type, num_rpcs, threshold, threshold_fraction
)
if error_msg:
raise Exception(
"Wrong number of %s RPCs in-flight: %s" % (rpc_type, error_msg)
)
def _check_rpcs_in_flight(rpc_type, num_rpcs, threshold, threshold_fraction):
error_msg = None
stats = get_client_accumulated_stats()
rpcs_started = stats.num_rpcs_started_by_method[rpc_type]
rpcs_succeeded = stats.num_rpcs_succeeded_by_method[rpc_type]
rpcs_failed = stats.num_rpcs_failed_by_method[rpc_type]
rpcs_in_flight = rpcs_started - rpcs_succeeded - rpcs_failed
if rpcs_in_flight < (num_rpcs * (1 - threshold_fraction)):
error_msg = "actual(%d) < expected(%d - %d%%)" % (
rpcs_in_flight,
num_rpcs,
threshold,
)
elif rpcs_in_flight > (num_rpcs * (1 + threshold_fraction)):
error_msg = "actual(%d) > expected(%d + %d%%)" % (
rpcs_in_flight,
num_rpcs,
threshold,
)
return error_msg
def compare_distributions(
actual_distribution, expected_distribution, threshold
):
"""Compare if two distributions are similar.
Args:
actual_distribution: A list of floats, contains the actual distribution.
expected_distribution: A list of floats, contains the expected distribution.
threshold: Number within [0,100], the threshold percentage by which the
actual distribution can differ from the expected distribution.
Returns:
The similarity between the distributions as a boolean. Returns true if the
actual distribution lies within the threshold of the expected
distribution, false otherwise.
Raises:
ValueError: if threshold is not with in [0,100].
Exception: containing detailed error messages.
"""
if len(expected_distribution) != len(actual_distribution):
raise Exception(
"Error: expected and actual distributions have different size (%d"
" vs %d)" % (len(expected_distribution), len(actual_distribution))
)
if threshold < 0 or threshold > 100:
raise ValueError("Value error: Threshold should be between 0 to 100")
threshold_fraction = threshold / 100.0
for expected, actual in zip(expected_distribution, actual_distribution):
if actual < (expected * (1 - threshold_fraction)):
raise Exception(
"actual(%f) < expected(%f-%d%%)" % (actual, expected, threshold)
)
if actual > (expected * (1 + threshold_fraction)):
raise Exception(
"actual(%f) > expected(%f+%d%%)" % (actual, expected, threshold)
)
return True
def compare_expected_instances(stats, expected_instances):
"""Compare if stats have expected instances for each type of RPC.
Args:
stats: LoadBalancerStatsResponse reported by interop client.
expected_instances: a dict with key as the RPC type (string), value as
the expected backend instances (list of strings).
Returns:
Returns true if the instances are expected. False if not.
"""
for rpc_type, expected_peers in list(expected_instances.items()):
rpcs_by_peer_for_type = stats.rpcs_by_method[rpc_type]
rpcs_by_peer = (
rpcs_by_peer_for_type.rpcs_by_peer
if rpcs_by_peer_for_type
else None
)
logger.debug("rpc: %s, by_peer: %s", rpc_type, rpcs_by_peer)
peers = list(rpcs_by_peer.keys())
if set(peers) != set(expected_peers):
logger.info(
"unexpected peers for %s, got %s, want %s",
rpc_type,
peers,
expected_peers,
)
return False
return True
def test_backends_restart(gcp, backend_service, instance_group):
logger.info("Running test_backends_restart")
instance_names = get_instance_names(gcp, instance_group)
num_instances = len(instance_names)
start_time = time.time()
wait_until_all_rpcs_go_to_given_backends(
instance_names, _WAIT_FOR_STATS_SEC
)
try:
resize_instance_group(gcp, instance_group, 0)
wait_until_all_rpcs_go_to_given_backends_or_fail(
[], _WAIT_FOR_BACKEND_SEC
)
finally:
resize_instance_group(gcp, instance_group, num_instances)
wait_for_healthy_backends(gcp, backend_service, instance_group)
new_instance_names = get_instance_names(gcp, instance_group)
wait_until_all_rpcs_go_to_given_backends(
new_instance_names, _WAIT_FOR_BACKEND_SEC
)
def test_change_backend_service(
gcp,
original_backend_service,
instance_group,
alternate_backend_service,
same_zone_instance_group,
):
logger.info("Running test_change_backend_service")
original_backend_instances = get_instance_names(gcp, instance_group)
alternate_backend_instances = get_instance_names(
gcp, same_zone_instance_group
)
patch_backend_service(
gcp, alternate_backend_service, [same_zone_instance_group]
)
wait_for_healthy_backends(gcp, original_backend_service, instance_group)
wait_for_healthy_backends(
gcp, alternate_backend_service, same_zone_instance_group
)
wait_until_all_rpcs_go_to_given_backends(
original_backend_instances, _WAIT_FOR_STATS_SEC
)
passed = True
try:
patch_url_map_backend_service(gcp, alternate_backend_service)
wait_until_all_rpcs_go_to_given_backends(
alternate_backend_instances, _WAIT_FOR_URL_MAP_PATCH_SEC
)
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(gcp, alternate_backend_service, [])
def test_gentle_failover(
gcp,
backend_service,
primary_instance_group,
secondary_instance_group,
swapped_primary_and_secondary=False,
):
logger.info("Running test_gentle_failover")
num_primary_instances = len(get_instance_names(gcp, primary_instance_group))
min_instances_for_gentle_failover = 3 # Need >50% failure to start failover
passed = True
try:
if num_primary_instances < min_instances_for_gentle_failover:
resize_instance_group(
gcp, primary_instance_group, min_instances_for_gentle_failover
)
patch_backend_service(
gcp,
backend_service,
[primary_instance_group, secondary_instance_group],
)
primary_instance_names = get_instance_names(gcp, primary_instance_group)
secondary_instance_names = get_instance_names(
gcp, secondary_instance_group
)
wait_for_healthy_backends(gcp, backend_service, primary_instance_group)
wait_for_healthy_backends(
gcp, backend_service, secondary_instance_group
)
wait_until_all_rpcs_go_to_given_backends(
primary_instance_names, _WAIT_FOR_STATS_SEC
)
instances_to_stop = primary_instance_names[:-1]
remaining_instances = primary_instance_names[-1:]
try:
set_serving_status(
instances_to_stop, gcp.service_port, serving=False
)
wait_until_all_rpcs_go_to_given_backends(
remaining_instances + secondary_instance_names,
_WAIT_FOR_BACKEND_SEC,
)
finally:
set_serving_status(
primary_instance_names, gcp.service_port, serving=True
)
except RpcDistributionError as e:
if not swapped_primary_and_secondary and is_primary_instance_group(
gcp, secondary_instance_group
):
# Swap expectation of primary and secondary instance groups.
test_gentle_failover(
gcp,
backend_service,
secondary_instance_group,
primary_instance_group,
swapped_primary_and_secondary=True,
)
else:
passed = False
raise e
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_backend_service(
gcp, backend_service, [primary_instance_group]
)
resize_instance_group(
gcp, primary_instance_group, num_primary_instances
)
instance_names = get_instance_names(gcp, primary_instance_group)
wait_until_all_rpcs_go_to_given_backends(
instance_names, _WAIT_FOR_BACKEND_SEC
)
def test_load_report_based_failover(
gcp, backend_service, primary_instance_group, secondary_instance_group
):
logger.info("Running test_load_report_based_failover")
passed = True
try:
patch_backend_service(
gcp,
backend_service,
[primary_instance_group, secondary_instance_group],
)
primary_instance_names = get_instance_names(gcp, primary_instance_group)
secondary_instance_names = get_instance_names(
gcp, secondary_instance_group
)
wait_for_healthy_backends(gcp, backend_service, primary_instance_group)
wait_for_healthy_backends(
gcp, backend_service, secondary_instance_group
)
wait_until_all_rpcs_go_to_given_backends(
primary_instance_names, _WAIT_FOR_STATS_SEC
)
# Set primary locality's balance mode to RATE, and RPS to 20% of the
# client's QPS. The secondary locality will be used.
max_rate = int(args.qps * 1 / 5)
logger.info(
"Patching backend service to RATE with %d max_rate", max_rate
)
patch_backend_service(
gcp,
backend_service,
[primary_instance_group, secondary_instance_group],
balancing_mode="RATE",
max_rate=max_rate,
)
wait_until_all_rpcs_go_to_given_backends(
primary_instance_names + secondary_instance_names,
_WAIT_FOR_BACKEND_SEC,
)
# Set primary locality's balance mode to RATE, and RPS to 120% of the
# client's QPS. Only the primary locality will be used.
max_rate = int(args.qps * 6 / 5)
logger.info(
"Patching backend service to RATE with %d max_rate", max_rate
)
patch_backend_service(
gcp,
backend_service,
[primary_instance_group, secondary_instance_group],
balancing_mode="RATE",
max_rate=max_rate,
)
wait_until_all_rpcs_go_to_given_backends(
primary_instance_names, _WAIT_FOR_BACKEND_SEC
)
logger.info("success")
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_backend_service(
gcp, backend_service, [primary_instance_group]
)
instance_names = get_instance_names(gcp, primary_instance_group)
wait_until_all_rpcs_go_to_given_backends(
instance_names, _WAIT_FOR_BACKEND_SEC
)
def test_ping_pong(gcp, backend_service, instance_group):
logger.info("Running test_ping_pong")
wait_for_healthy_backends(gcp, backend_service, instance_group)
instance_names = get_instance_names(gcp, instance_group)
wait_until_all_rpcs_go_to_given_backends(
instance_names, _WAIT_FOR_STATS_SEC
)
def test_remove_instance_group(
gcp, backend_service, instance_group, same_zone_instance_group
):
logger.info("Running test_remove_instance_group")
passed = True
try:
patch_backend_service(
gcp,
backend_service,
[instance_group, same_zone_instance_group],
balancing_mode="RATE",
)
wait_for_healthy_backends(gcp, backend_service, instance_group)
wait_for_healthy_backends(
gcp, backend_service, same_zone_instance_group
)
instance_names = get_instance_names(gcp, instance_group)
same_zone_instance_names = get_instance_names(
gcp, same_zone_instance_group
)
try:
wait_until_all_rpcs_go_to_given_backends(
instance_names + same_zone_instance_names,
_WAIT_FOR_OPERATION_SEC,
)
remaining_instance_group = same_zone_instance_group
remaining_instance_names = same_zone_instance_names
except RpcDistributionError as e:
# If connected to TD in a different zone, we may route traffic to
# only one instance group. Determine which group that is to continue
# with the remainder of the test case.
try:
wait_until_all_rpcs_go_to_given_backends(
instance_names, _WAIT_FOR_STATS_SEC
)
remaining_instance_group = same_zone_instance_group
remaining_instance_names = same_zone_instance_names
except RpcDistributionError as e:
wait_until_all_rpcs_go_to_given_backends(
same_zone_instance_names, _WAIT_FOR_STATS_SEC
)
remaining_instance_group = instance_group
remaining_instance_names = instance_names
patch_backend_service(
gcp,
backend_service,
[remaining_instance_group],
balancing_mode="RATE",
)
wait_until_all_rpcs_go_to_given_backends(
remaining_instance_names, _WAIT_FOR_BACKEND_SEC
)
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_backend_service(gcp, backend_service, [instance_group])
wait_until_all_rpcs_go_to_given_backends(
instance_names, _WAIT_FOR_BACKEND_SEC
)
def test_round_robin(gcp, backend_service, instance_group):
logger.info("Running test_round_robin")
wait_for_healthy_backends(gcp, backend_service, instance_group)
instance_names = get_instance_names(gcp, instance_group)
threshold = 1
wait_until_all_rpcs_go_to_given_backends(
instance_names, _WAIT_FOR_STATS_SEC
)
# TODO(ericgribkoff) Delayed config propagation from earlier tests
# may result in briefly receiving an empty EDS update, resulting in failed
# RPCs. Retry distribution validation if this occurs; long-term fix is
# creating new backend resources for each individual test case.
# Each attempt takes 10 seconds. Config propagation can take several
# minutes.
max_attempts = 40
for i in range(max_attempts):
stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
requests_received = [stats.rpcs_by_peer[x] for x in stats.rpcs_by_peer]
total_requests_received = sum(requests_received)
if total_requests_received != _NUM_TEST_RPCS:
logger.info("Unexpected RPC failures, retrying: %s", stats)
continue
expected_requests = total_requests_received / len(instance_names)
for instance in instance_names:
if (
abs(stats.rpcs_by_peer[instance] - expected_requests)
> threshold
):
raise Exception(
"RPC peer distribution differs from expected by more than"
" %d for instance %s (%s)" % (threshold, instance, stats)
)
return
raise Exception("RPC failures persisted through %d retries" % max_attempts)
def test_secondary_locality_gets_no_requests_on_partial_primary_failure(
gcp,
backend_service,
primary_instance_group,
secondary_instance_group,
swapped_primary_and_secondary=False,
):
logger.info(
"Running secondary_locality_gets_no_requests_on_partial_primary_failure"
)
passed = True
try:
patch_backend_service(
gcp,
backend_service,
[primary_instance_group, secondary_instance_group],
)
wait_for_healthy_backends(gcp, backend_service, primary_instance_group)
wait_for_healthy_backends(
gcp, backend_service, secondary_instance_group
)
primary_instance_names = get_instance_names(gcp, primary_instance_group)
wait_until_all_rpcs_go_to_given_backends(
primary_instance_names, _WAIT_FOR_STATS_SEC
)
instances_to_stop = primary_instance_names[:1]
remaining_instances = primary_instance_names[1:]
try:
set_serving_status(
instances_to_stop, gcp.service_port, serving=False
)
wait_until_all_rpcs_go_to_given_backends(
remaining_instances, _WAIT_FOR_BACKEND_SEC
)
finally:
set_serving_status(
primary_instance_names, gcp.service_port, serving=True
)
except RpcDistributionError as e:
if not swapped_primary_and_secondary and is_primary_instance_group(
gcp, secondary_instance_group
):
# Swap expectation of primary and secondary instance groups.
test_secondary_locality_gets_no_requests_on_partial_primary_failure(
gcp,
backend_service,
secondary_instance_group,
primary_instance_group,
swapped_primary_and_secondary=True,
)
else:
passed = False
raise e
finally:
if passed or not args.halt_after_fail:
patch_backend_service(
gcp, backend_service, [primary_instance_group]
)
def test_secondary_locality_gets_requests_on_primary_failure(
gcp,
backend_service,
primary_instance_group,
secondary_instance_group,
swapped_primary_and_secondary=False,
):
logger.info("Running secondary_locality_gets_requests_on_primary_failure")
passed = True
try:
patch_backend_service(
gcp,
backend_service,
[primary_instance_group, secondary_instance_group],
)
wait_for_healthy_backends(gcp, backend_service, primary_instance_group)
wait_for_healthy_backends(
gcp, backend_service, secondary_instance_group
)
primary_instance_names = get_instance_names(gcp, primary_instance_group)
secondary_instance_names = get_instance_names(
gcp, secondary_instance_group
)
wait_until_all_rpcs_go_to_given_backends(
primary_instance_names, _WAIT_FOR_STATS_SEC
)
try:
set_serving_status(
primary_instance_names, gcp.service_port, serving=False
)
wait_until_all_rpcs_go_to_given_backends(
secondary_instance_names, _WAIT_FOR_BACKEND_SEC
)
finally:
set_serving_status(
primary_instance_names, gcp.service_port, serving=True
)
except RpcDistributionError as e:
if not swapped_primary_and_secondary and is_primary_instance_group(
gcp, secondary_instance_group
):
# Swap expectation of primary and secondary instance groups.
test_secondary_locality_gets_requests_on_primary_failure(
gcp,
backend_service,
secondary_instance_group,
primary_instance_group,
swapped_primary_and_secondary=True,
)
else:
passed = False
raise e
finally:
if passed or not args.halt_after_fail:
patch_backend_service(
gcp, backend_service, [primary_instance_group]
)
def prepare_services_for_urlmap_tests(
gcp,
original_backend_service,
instance_group,
alternate_backend_service,
same_zone_instance_group,
):
"""
This function prepares the services to be ready for tests that modifies
urlmaps.
Returns:
Returns original and alternate backend names as lists of strings.
"""
logger.info("waiting for original backends to become healthy")
wait_for_healthy_backends(gcp, original_backend_service, instance_group)
patch_backend_service(
gcp, alternate_backend_service, [same_zone_instance_group]
)
logger.info("waiting for alternate to become healthy")
wait_for_healthy_backends(
gcp, alternate_backend_service, same_zone_instance_group
)
original_backend_instances = get_instance_names(gcp, instance_group)
logger.info("original backends instances: %s", original_backend_instances)
alternate_backend_instances = get_instance_names(
gcp, same_zone_instance_group
)
logger.info("alternate backends instances: %s", alternate_backend_instances)
# Start with all traffic going to original_backend_service.
logger.info("waiting for traffic to all go to original backends")
wait_until_all_rpcs_go_to_given_backends(
original_backend_instances, _WAIT_FOR_STATS_SEC
)
return original_backend_instances, alternate_backend_instances
def test_metadata_filter(
gcp,
original_backend_service,
instance_group,
alternate_backend_service,
same_zone_instance_group,
):
logger.info("Running test_metadata_filter")
wait_for_healthy_backends(gcp, original_backend_service, instance_group)
original_backend_instances = get_instance_names(gcp, instance_group)
alternate_backend_instances = get_instance_names(
gcp, same_zone_instance_group
)
patch_backend_service(
gcp, alternate_backend_service, [same_zone_instance_group]
)
wait_for_healthy_backends(
gcp, alternate_backend_service, same_zone_instance_group
)
passed = True
try:
with open(bootstrap_path) as f:
md = json.load(f)["node"]["metadata"]
match_labels = []
for k, v in list(md.items()):
match_labels.append({"name": k, "value": v})
not_match_labels = [{"name": "fake", "value": "fail"}]
test_route_rules = [
# test MATCH_ALL
[
{
"priority": 0,
"matchRules": [
{
"prefixMatch": "/",
"metadataFilters": [
{
"filterMatchCriteria": "MATCH_ALL",
"filterLabels": not_match_labels,
}
],
}
],
"service": original_backend_service.url,
},
{
"priority": 1,
"matchRules": [
{
"prefixMatch": "/",
"metadataFilters": [
{
"filterMatchCriteria": "MATCH_ALL",
"filterLabels": match_labels,
}
],
}
],
"service": alternate_backend_service.url,
},
],
# test mixing MATCH_ALL and MATCH_ANY
# test MATCH_ALL: super set labels won't match
[
{
"priority": 0,
"matchRules": [
{
"prefixMatch": "/",
"metadataFilters": [
{
"filterMatchCriteria": "MATCH_ALL",
"filterLabels": not_match_labels
+ match_labels,
}
],
}
],
"service": original_backend_service.url,
},
{
"priority": 1,
"matchRules": [
{
"prefixMatch": "/",
"metadataFilters": [
{
"filterMatchCriteria": "MATCH_ANY",
"filterLabels": not_match_labels
+ match_labels,
}
],
}
],
"service": alternate_backend_service.url,
},
],
# test MATCH_ANY
[
{
"priority": 0,
"matchRules": [
{
"prefixMatch": "/",
"metadataFilters": [
{
"filterMatchCriteria": "MATCH_ANY",
"filterLabels": not_match_labels,
}
],
}
],
"service": original_backend_service.url,
},
{
"priority": 1,
"matchRules": [
{
"prefixMatch": "/",
"metadataFilters": [
{
"filterMatchCriteria": "MATCH_ANY",
"filterLabels": not_match_labels
+ match_labels,
}
],
}
],
"service": alternate_backend_service.url,
},
],
# test match multiple route rules
[
{
"priority": 0,
"matchRules": [
{
"prefixMatch": "/",
"metadataFilters": [
{
"filterMatchCriteria": "MATCH_ANY",
"filterLabels": match_labels,
}
],
}
],
"service": alternate_backend_service.url,
},
{
"priority": 1,
"matchRules": [
{
"prefixMatch": "/",
"metadataFilters": [
{
"filterMatchCriteria": "MATCH_ALL",
"filterLabels": match_labels,
}
],
}
],
"service": original_backend_service.url,
},
],
]
for route_rules in test_route_rules:
wait_until_all_rpcs_go_to_given_backends(
original_backend_instances, _WAIT_FOR_STATS_SEC
)
patch_url_map_backend_service(
gcp, original_backend_service, route_rules=route_rules
)
wait_until_no_rpcs_go_to_given_backends(
original_backend_instances, _WAIT_FOR_STATS_SEC
)
wait_until_all_rpcs_go_to_given_backends(
alternate_backend_instances, _WAIT_FOR_STATS_SEC
)
patch_url_map_backend_service(gcp, original_backend_service)
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_backend_service(gcp, alternate_backend_service, [])
def test_api_listener(
gcp, backend_service, instance_group, alternate_backend_service
):
logger.info("Running api_listener")
passed = True
try:
wait_for_healthy_backends(gcp, backend_service, instance_group)
backend_instances = get_instance_names(gcp, instance_group)
wait_until_all_rpcs_go_to_given_backends(
backend_instances, _WAIT_FOR_STATS_SEC
)
# create a second suite of map+tp+fr with the same host name in host rule
# and we have to disable proxyless validation because it needs `0.0.0.0`
# ip address in fr for proxyless and also we violate ip:port uniqueness
# for test purpose. See https://github.com/grpc/grpc-java/issues/8009
new_config_suffix = "2"
url_map_2 = create_url_map(
gcp,
url_map_name + new_config_suffix,
backend_service,
service_host_name,
)
target_proxy_2 = create_target_proxy(
gcp, target_proxy_name + new_config_suffix, False, url_map_2
)
if not gcp.service_port:
raise Exception(
"Faied to find a valid port for the forwarding rule"
)
potential_ip_addresses = []
max_attempts = 10
for i in range(max_attempts):
potential_ip_addresses.append(
"10.10.10.%d" % (random.randint(0, 255))
)
create_global_forwarding_rule(
gcp,
forwarding_rule_name + new_config_suffix,
[gcp.service_port],
potential_ip_addresses,
target_proxy_2,
)
if gcp.service_port != _DEFAULT_SERVICE_PORT:
patch_url_map_host_rule_with_port(
gcp,
url_map_name + new_config_suffix,
backend_service,
service_host_name,
)
wait_until_all_rpcs_go_to_given_backends(
backend_instances, _WAIT_FOR_STATS_SEC
)
delete_global_forwarding_rule(gcp, gcp.global_forwarding_rules[0])
delete_target_proxy(gcp, gcp.target_proxies[0])
delete_url_map(gcp, gcp.url_maps[0])
verify_attempts = int(
_WAIT_FOR_URL_MAP_PATCH_SEC / _NUM_TEST_RPCS * args.qps
)
for i in range(verify_attempts):
wait_until_all_rpcs_go_to_given_backends(
backend_instances, _WAIT_FOR_STATS_SEC
)
# delete host rule for the original host name
patch_url_map_backend_service(gcp, alternate_backend_service)
wait_until_no_rpcs_go_to_given_backends(
backend_instances, _WAIT_FOR_STATS_SEC
)
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
delete_global_forwarding_rules(gcp)
delete_target_proxies(gcp)
delete_url_maps(gcp)
create_url_map(
gcp, url_map_name, backend_service, service_host_name
)
create_target_proxy(gcp, target_proxy_name)
create_global_forwarding_rule(
gcp, forwarding_rule_name, potential_service_ports
)
if gcp.service_port != _DEFAULT_SERVICE_PORT:
patch_url_map_host_rule_with_port(
gcp, url_map_name, backend_service, service_host_name
)
server_uri = service_host_name + ":" + str(gcp.service_port)
else:
server_uri = service_host_name
return server_uri
def test_forwarding_rule_port_match(gcp, backend_service, instance_group):
logger.info("Running test_forwarding_rule_port_match")
passed = True
try:
wait_for_healthy_backends(gcp, backend_service, instance_group)
backend_instances = get_instance_names(gcp, instance_group)
wait_until_all_rpcs_go_to_given_backends(
backend_instances, _WAIT_FOR_STATS_SEC
)
delete_global_forwarding_rules(gcp)
create_global_forwarding_rule(
gcp,
forwarding_rule_name,
[
x
for x in parse_port_range(_DEFAULT_PORT_RANGE)
if x != gcp.service_port
],
)
wait_until_no_rpcs_go_to_given_backends(
backend_instances, _WAIT_FOR_STATS_SEC
)
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
delete_global_forwarding_rules(gcp)
create_global_forwarding_rule(
gcp, forwarding_rule_name, potential_service_ports
)
if gcp.service_port != _DEFAULT_SERVICE_PORT:
patch_url_map_host_rule_with_port(
gcp, url_map_name, backend_service, service_host_name
)
server_uri = service_host_name + ":" + str(gcp.service_port)
else:
server_uri = service_host_name
return server_uri
def test_forwarding_rule_default_port(gcp, backend_service, instance_group):
logger.info("Running test_forwarding_rule_default_port")
passed = True
try:
wait_for_healthy_backends(gcp, backend_service, instance_group)
backend_instances = get_instance_names(gcp, instance_group)
if gcp.service_port == _DEFAULT_SERVICE_PORT:
wait_until_all_rpcs_go_to_given_backends(
backend_instances, _WAIT_FOR_STATS_SEC
)
delete_global_forwarding_rules(gcp)
create_global_forwarding_rule(
gcp, forwarding_rule_name, parse_port_range(_DEFAULT_PORT_RANGE)
)
patch_url_map_host_rule_with_port(
gcp, url_map_name, backend_service, service_host_name
)
wait_until_no_rpcs_go_to_given_backends(
backend_instances, _WAIT_FOR_STATS_SEC
)
# expect success when no port in client request service uri, and no port in url-map
delete_global_forwarding_rule(gcp, gcp.global_forwarding_rules[0])
delete_target_proxy(gcp, gcp.target_proxies[0])
delete_url_map(gcp, gcp.url_maps[0])
create_url_map(gcp, url_map_name, backend_service, service_host_name)
create_target_proxy(gcp, target_proxy_name, False)
potential_ip_addresses = []
max_attempts = 10
for i in range(max_attempts):
potential_ip_addresses.append(
"10.10.10.%d" % (random.randint(0, 255))
)
create_global_forwarding_rule(
gcp, forwarding_rule_name, [80], potential_ip_addresses
)
wait_until_all_rpcs_go_to_given_backends(
backend_instances, _WAIT_FOR_STATS_SEC
)
# expect failure when no port in client request uri, but specify port in url-map
patch_url_map_host_rule_with_port(
gcp, url_map_name, backend_service, service_host_name
)
wait_until_no_rpcs_go_to_given_backends(
backend_instances, _WAIT_FOR_STATS_SEC
)
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
delete_global_forwarding_rules(gcp)
delete_target_proxies(gcp)
delete_url_maps(gcp)
create_url_map(
gcp, url_map_name, backend_service, service_host_name
)
create_target_proxy(gcp, target_proxy_name)
create_global_forwarding_rule(
gcp, forwarding_rule_name, potential_service_ports
)
if gcp.service_port != _DEFAULT_SERVICE_PORT:
patch_url_map_host_rule_with_port(
gcp, url_map_name, backend_service, service_host_name
)
server_uri = service_host_name + ":" + str(gcp.service_port)
else:
server_uri = service_host_name
return server_uri
def test_traffic_splitting(
gcp,
original_backend_service,
instance_group,
alternate_backend_service,
same_zone_instance_group,
):
# This test start with all traffic going to original_backend_service. Then
# it updates URL-map to set default action to traffic splitting between
# original and alternate. It waits for all backends in both services to
# receive traffic, then verifies that weights are expected.
logger.info("Running test_traffic_splitting")
(
original_backend_instances,
alternate_backend_instances,
) = prepare_services_for_urlmap_tests(
gcp,
original_backend_service,
instance_group,
alternate_backend_service,
same_zone_instance_group,
)
passed = True
try:
# Patch urlmap, change route action to traffic splitting between
# original and alternate.
logger.info("patching url map with traffic splitting")
original_service_percentage, alternate_service_percentage = 20, 80
patch_url_map_backend_service(
gcp,
services_with_weights={
original_backend_service: original_service_percentage,
alternate_backend_service: alternate_service_percentage,
},
)
# Split percentage between instances: [20,80] -> [10,10,40,40].
expected_instance_percentage = [
original_service_percentage * 1.0 / len(original_backend_instances)
] * len(original_backend_instances) + [
alternate_service_percentage
* 1.0
/ len(alternate_backend_instances)
] * len(
alternate_backend_instances
)
# Wait for traffic to go to both services.
logger.info(
"waiting for traffic to go to all backends (including alternate)"
)
wait_until_all_rpcs_go_to_given_backends(
original_backend_instances + alternate_backend_instances,
_WAIT_FOR_STATS_SEC,
)
# Verify that weights between two services are expected.
retry_count = 10
# Each attempt takes about 10 seconds, 10 retries is equivalent to 100
# seconds timeout.
for i in range(retry_count):
stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
got_instance_count = [
stats.rpcs_by_peer[i] for i in original_backend_instances
] + [stats.rpcs_by_peer[i] for i in alternate_backend_instances]
total_count = sum(got_instance_count)
got_instance_percentage = [
x * 100.0 / total_count for x in got_instance_count
]
try:
compare_distributions(
got_instance_percentage, expected_instance_percentage, 5
)
except Exception as e:
logger.info("attempt %d", i)
logger.info("got percentage: %s", got_instance_percentage)
logger.info(
"expected percentage: %s", expected_instance_percentage
)
logger.info(e)
if i == retry_count - 1:
raise Exception(
"RPC distribution (%s) differs from expected (%s)"
% (
got_instance_percentage,
expected_instance_percentage,
)
)
else:
logger.info("success")
break
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(gcp, alternate_backend_service, [])
def test_path_matching(
gcp,
original_backend_service,
instance_group,
alternate_backend_service,
same_zone_instance_group,
):
# This test start with all traffic (UnaryCall and EmptyCall) going to
# original_backend_service.
#
# Then it updates URL-map to add routes, to make UnaryCall and EmptyCall to
# go different backends. It waits for all backends in both services to
# receive traffic, then verifies that traffic goes to the expected
# backends.
logger.info("Running test_path_matching")
(
original_backend_instances,
alternate_backend_instances,
) = prepare_services_for_urlmap_tests(
gcp,
original_backend_service,
instance_group,
alternate_backend_service,
same_zone_instance_group,
)
passed = True
try:
# A list of tuples (route_rules, expected_instances).
test_cases = [
(
[
{
"priority": 0,
# FullPath EmptyCall -> alternate_backend_service.
"matchRules": [
{
"fullPathMatch": (
"/grpc.testing.TestService/EmptyCall"
)
}
],
"service": alternate_backend_service.url,
}
],
{
"EmptyCall": alternate_backend_instances,
"UnaryCall": original_backend_instances,
},
),
(
[
{
"priority": 0,
# Prefix UnaryCall -> alternate_backend_service.
"matchRules": [
{"prefixMatch": "/grpc.testing.TestService/Unary"}
],
"service": alternate_backend_service.url,
}
],
{
"UnaryCall": alternate_backend_instances,
"EmptyCall": original_backend_instances,
},
),
(
# This test case is similar to the one above (but with route
# services swapped). This test has two routes (full_path and
# the default) to match EmptyCall, and both routes set
# alternative_backend_service as the action. This forces the
# client to handle duplicate Clusters in the RDS response.
[
{
"priority": 0,
# Prefix UnaryCall -> original_backend_service.
"matchRules": [
{"prefixMatch": "/grpc.testing.TestService/Unary"}
],
"service": original_backend_service.url,
},
{
"priority": 1,
# FullPath EmptyCall -> alternate_backend_service.
"matchRules": [
{
"fullPathMatch": (
"/grpc.testing.TestService/EmptyCall"
)
}
],
"service": alternate_backend_service.url,
},
],
{
"UnaryCall": original_backend_instances,
"EmptyCall": alternate_backend_instances,
},
),
(
[
{
"priority": 0,
# Regex UnaryCall -> alternate_backend_service.
"matchRules": [
{
"regexMatch": ( # Unary methods with any services.
"^\/.*\/UnaryCall$"
)
}
],
"service": alternate_backend_service.url,
}
],
{
"UnaryCall": alternate_backend_instances,
"EmptyCall": original_backend_instances,
},
),
(
[
{
"priority": 0,
# ignoreCase EmptyCall -> alternate_backend_service.
"matchRules": [
{
# Case insensitive matching.
"fullPathMatch": (
"/gRpC.tEsTinG.tEstseRvice/empTycaLl"
),
"ignoreCase": True,
}
],
"service": alternate_backend_service.url,
}
],
{
"UnaryCall": original_backend_instances,
"EmptyCall": alternate_backend_instances,
},
),
]
for route_rules, expected_instances in test_cases:
logger.info("patching url map with %s", route_rules)
patch_url_map_backend_service(
gcp, original_backend_service, route_rules=route_rules
)
# Wait for traffic to go to both services.
logger.info(
"waiting for traffic to go to all backends (including"
" alternate)"
)
wait_until_all_rpcs_go_to_given_backends(
original_backend_instances + alternate_backend_instances,
_WAIT_FOR_STATS_SEC,
)
retry_count = 80
# Each attempt takes about 5 seconds, 80 retries is equivalent to 400
# seconds timeout.
for i in range(retry_count):
stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
if not stats.rpcs_by_method:
raise ValueError(
"stats.rpcs_by_method is None, the interop client stats"
" service does not support this test case"
)
logger.info("attempt %d", i)
if compare_expected_instances(stats, expected_instances):
logger.info("success")
break
elif i == retry_count - 1:
raise Exception(
"timeout waiting for RPCs to the expected instances: %s"
% expected_instances
)
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(gcp, alternate_backend_service, [])
def test_header_matching(
gcp,
original_backend_service,
instance_group,
alternate_backend_service,
same_zone_instance_group,
):
# This test start with all traffic (UnaryCall and EmptyCall) going to
# original_backend_service.
#
# Then it updates URL-map to add routes, to make RPCs with test headers to
# go to different backends. It waits for all backends in both services to
# receive traffic, then verifies that traffic goes to the expected
# backends.
logger.info("Running test_header_matching")
(
original_backend_instances,
alternate_backend_instances,
) = prepare_services_for_urlmap_tests(
gcp,
original_backend_service,
instance_group,
alternate_backend_service,
same_zone_instance_group,
)
passed = True
try:
# A list of tuples (route_rules, expected_instances).
test_cases = [
(
[
{
"priority": 0,
# Header ExactMatch -> alternate_backend_service.
# EmptyCall is sent with the metadata.
"matchRules": [
{
"prefixMatch": "/",
"headerMatches": [
{
"headerName": _TEST_METADATA_KEY,
"exactMatch": _TEST_METADATA_VALUE_EMPTY,
}
],
}
],
"service": alternate_backend_service.url,
}
],
{
"EmptyCall": alternate_backend_instances,
"UnaryCall": original_backend_instances,
},
),
(
[
{
"priority": 0,
# Header PrefixMatch -> alternate_backend_service.
# UnaryCall is sent with the metadata.
"matchRules": [
{
"prefixMatch": "/",
"headerMatches": [
{
"headerName": _TEST_METADATA_KEY,
"prefixMatch": _TEST_METADATA_VALUE_UNARY[
:2
],
}
],
}
],
"service": alternate_backend_service.url,
}
],
{
"EmptyCall": original_backend_instances,
"UnaryCall": alternate_backend_instances,
},
),
(
[
{
"priority": 0,
# Header SuffixMatch -> alternate_backend_service.
# EmptyCall is sent with the metadata.
"matchRules": [
{
"prefixMatch": "/",
"headerMatches": [
{
"headerName": _TEST_METADATA_KEY,
"suffixMatch": _TEST_METADATA_VALUE_EMPTY[
-2:
],
}
],
}
],
"service": alternate_backend_service.url,
}
],
{
"EmptyCall": alternate_backend_instances,
"UnaryCall": original_backend_instances,
},
),
(
[
{
"priority": 0,
# Header 'xds_md_numeric' present -> alternate_backend_service.
# UnaryCall is sent with the metadata, so will be sent to alternative.
"matchRules": [
{
"prefixMatch": "/",
"headerMatches": [
{
"headerName": _TEST_METADATA_NUMERIC_KEY,
"presentMatch": True,
}
],
}
],
"service": alternate_backend_service.url,
}
],
{
"EmptyCall": original_backend_instances,
"UnaryCall": alternate_backend_instances,
},
),
(
[
{
"priority": 0,
# Header invert ExactMatch -> alternate_backend_service.
# UnaryCall is sent with the metadata, so will be sent to
# original. EmptyCall will be sent to alternative.
"matchRules": [
{
"prefixMatch": "/",
"headerMatches": [
{
"headerName": _TEST_METADATA_KEY,
"exactMatch": _TEST_METADATA_VALUE_UNARY,
"invertMatch": True,
}
],
}
],
"service": alternate_backend_service.url,
}
],
{
"EmptyCall": alternate_backend_instances,
"UnaryCall": original_backend_instances,
},
),
(
[
{
"priority": 0,
# Header 'xds_md_numeric' range [100,200] -> alternate_backend_service.
# UnaryCall is sent with the metadata in range.
"matchRules": [
{
"prefixMatch": "/",
"headerMatches": [
{
"headerName": _TEST_METADATA_NUMERIC_KEY,
"rangeMatch": {
"rangeStart": "100",
"rangeEnd": "200",
},
}
],
}
],
"service": alternate_backend_service.url,
}
],
{
"EmptyCall": original_backend_instances,
"UnaryCall": alternate_backend_instances,
},
),
(
[
{
"priority": 0,
# Header RegexMatch -> alternate_backend_service.
# EmptyCall is sent with the metadata.
"matchRules": [
{
"prefixMatch": "/",
"headerMatches": [
{
"headerName": _TEST_METADATA_KEY,
"regexMatch": "^%s.*%s$"
% (
_TEST_METADATA_VALUE_EMPTY[:2],
_TEST_METADATA_VALUE_EMPTY[-2:],
),
}
],
}
],
"service": alternate_backend_service.url,
}
],
{
"EmptyCall": alternate_backend_instances,
"UnaryCall": original_backend_instances,
},
),
]
for route_rules, expected_instances in test_cases:
logger.info(
"patching url map with %s -> alternative",
route_rules[0]["matchRules"],
)
patch_url_map_backend_service(
gcp, original_backend_service, route_rules=route_rules
)
# Wait for traffic to go to both services.
logger.info(
"waiting for traffic to go to all backends (including"
" alternate)"
)
wait_until_all_rpcs_go_to_given_backends(
original_backend_instances + alternate_backend_instances,
_WAIT_FOR_STATS_SEC,
)
retry_count = 80
# Each attempt takes about 5 seconds, 80 retries is equivalent to 400
# seconds timeout.
for i in range(retry_count):
stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
if not stats.rpcs_by_method:
raise ValueError(
"stats.rpcs_by_method is None, the interop client stats"
" service does not support this test case"
)
logger.info("attempt %d", i)
if compare_expected_instances(stats, expected_instances):
logger.info("success")
break
elif i == retry_count - 1:
raise Exception(
"timeout waiting for RPCs to the expected instances: %s"
% expected_instances
)
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(gcp, alternate_backend_service, [])
def test_circuit_breaking(
gcp, original_backend_service, instance_group, same_zone_instance_group
):
"""
Since backend service circuit_breakers configuration cannot be unset,
which causes trouble for restoring validate_for_proxy flag in target
proxy/global forwarding rule. This test uses dedicated backend sevices.
The url_map and backend services undergoes the following state changes:
Before test:
original_backend_service -> [instance_group]
extra_backend_service -> []
more_extra_backend_service -> []
url_map -> [original_backend_service]
In test:
extra_backend_service (with circuit_breakers) -> [instance_group]
more_extra_backend_service (with circuit_breakers) -> [same_zone_instance_group]
url_map -> [extra_backend_service, more_extra_backend_service]
After test:
original_backend_service -> [instance_group]
extra_backend_service (with circuit_breakers) -> []
more_extra_backend_service (with circuit_breakers) -> []
url_map -> [original_backend_service]
"""
logger.info("Running test_circuit_breaking")
additional_backend_services = []
passed = True
try:
# TODO(chengyuanzhang): Dedicated backend services created for circuit
# breaking test. Once the issue for unsetting backend service circuit
# breakers is resolved or configuring backend service circuit breakers is
# enabled for config validation, these dedicated backend services can be
# eliminated.
extra_backend_service_name = (
_BASE_BACKEND_SERVICE_NAME + "-extra" + gcp_suffix
)
more_extra_backend_service_name = (
_BASE_BACKEND_SERVICE_NAME + "-more-extra" + gcp_suffix
)
extra_backend_service = add_backend_service(
gcp, extra_backend_service_name
)
additional_backend_services.append(extra_backend_service)
more_extra_backend_service = add_backend_service(
gcp, more_extra_backend_service_name
)
additional_backend_services.append(more_extra_backend_service)
# The config validation for proxyless doesn't allow setting
# circuit_breakers. Disable validate validate_for_proxyless
# for this test. This can be removed when validation
# accepts circuit_breakers.
logger.info("disabling validate_for_proxyless in target proxy")
set_validate_for_proxyless(gcp, False)
extra_backend_service_max_requests = 500
more_extra_backend_service_max_requests = 1000
patch_backend_service(
gcp,
extra_backend_service,
[instance_group],
circuit_breakers={
"maxRequests": extra_backend_service_max_requests
},
)
logger.info("Waiting for extra backends to become healthy")
wait_for_healthy_backends(gcp, extra_backend_service, instance_group)
patch_backend_service(
gcp,
more_extra_backend_service,
[same_zone_instance_group],
circuit_breakers={
"maxRequests": more_extra_backend_service_max_requests
},
)
logger.info("Waiting for more extra backend to become healthy")
wait_for_healthy_backends(
gcp, more_extra_backend_service, same_zone_instance_group
)
extra_backend_instances = get_instance_names(gcp, instance_group)
more_extra_backend_instances = get_instance_names(
gcp, same_zone_instance_group
)
route_rules = [
{
"priority": 0,
# UnaryCall -> extra_backend_service
"matchRules": [
{"fullPathMatch": "/grpc.testing.TestService/UnaryCall"}
],
"service": extra_backend_service.url,
},
{
"priority": 1,
# EmptyCall -> more_extra_backend_service
"matchRules": [
{"fullPathMatch": "/grpc.testing.TestService/EmptyCall"}
],
"service": more_extra_backend_service.url,
},
]
# Make client send UNARY_CALL and EMPTY_CALL.
configure_client(
[
messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
messages_pb2.ClientConfigureRequest.RpcType.EMPTY_CALL,
]
)
logger.info("Patching url map with %s", route_rules)
patch_url_map_backend_service(
gcp, extra_backend_service, route_rules=route_rules
)
logger.info("Waiting for traffic to go to all backends")
wait_until_all_rpcs_go_to_given_backends(
extra_backend_instances + more_extra_backend_instances,
_WAIT_FOR_STATS_SEC,
)
# Make all calls keep-open.
configure_client(
[
messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
messages_pb2.ClientConfigureRequest.RpcType.EMPTY_CALL,
],
[
(
messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
"rpc-behavior",
"keep-open",
),
(
messages_pb2.ClientConfigureRequest.RpcType.EMPTY_CALL,
"rpc-behavior",
"keep-open",
),
],
)
wait_until_rpcs_in_flight(
"UNARY_CALL",
(
_WAIT_FOR_BACKEND_SEC
+ int(extra_backend_service_max_requests / args.qps)
),
extra_backend_service_max_requests,
1,
)
logger.info(
"UNARY_CALL reached stable state (%d)",
extra_backend_service_max_requests,
)
wait_until_rpcs_in_flight(
"EMPTY_CALL",
(
_WAIT_FOR_BACKEND_SEC
+ int(more_extra_backend_service_max_requests / args.qps)
),
more_extra_backend_service_max_requests,
1,
)
logger.info(
"EMPTY_CALL reached stable state (%d)",
more_extra_backend_service_max_requests,
)
# Increment circuit breakers max_requests threshold.
extra_backend_service_max_requests = 800
patch_backend_service(
gcp,
extra_backend_service,
[instance_group],
circuit_breakers={
"maxRequests": extra_backend_service_max_requests
},
)
wait_until_rpcs_in_flight(
"UNARY_CALL",
(
_WAIT_FOR_BACKEND_SEC
+ int(extra_backend_service_max_requests / args.qps)
),
extra_backend_service_max_requests,
1,
)
logger.info(
"UNARY_CALL reached stable state after increase (%d)",
extra_backend_service_max_requests,
)
logger.info("success")
# Avoid new RPCs being outstanding (some test clients create threads
# for sending RPCs) after restoring backend services.
configure_client(
[messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL]
)
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(
gcp, original_backend_service, [instance_group]
)
for backend_service in additional_backend_services:
delete_backend_service(gcp, backend_service)
set_validate_for_proxyless(gcp, True)
def test_timeout(gcp, original_backend_service, instance_group):
logger.info("Running test_timeout")
logger.info("waiting for original backends to become healthy")
wait_for_healthy_backends(gcp, original_backend_service, instance_group)
# UnaryCall -> maxStreamDuration:3s
route_rules = [
{
"priority": 0,
"matchRules": [
{"fullPathMatch": "/grpc.testing.TestService/UnaryCall"}
],
"service": original_backend_service.url,
"routeAction": {
"maxStreamDuration": {
"seconds": 3,
},
},
}
]
patch_url_map_backend_service(
gcp, original_backend_service, route_rules=route_rules
)
# A list of tuples (testcase_name, {client_config}, {expected_results})
test_cases = [
(
(
"timeout_exceeded (UNARY_CALL), timeout_different_route"
" (EMPTY_CALL)"
),
# UnaryCall and EmptyCall both sleep-4.
# UnaryCall timeouts, EmptyCall succeeds.
{
"rpc_types": [
messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
messages_pb2.ClientConfigureRequest.RpcType.EMPTY_CALL,
],
"metadata": [
(
messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
"rpc-behavior",
"sleep-4",
),
(
messages_pb2.ClientConfigureRequest.RpcType.EMPTY_CALL,
"rpc-behavior",
"sleep-4",
),
],
},
{
"UNARY_CALL": 4, # DEADLINE_EXCEEDED
"EMPTY_CALL": 0,
},
),
(
"app_timeout_exceeded",
# UnaryCall only with sleep-2; timeout=1s; calls timeout.
{
"rpc_types": [
messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
],
"metadata": [
(
messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
"rpc-behavior",
"sleep-2",
),
],
"timeout_sec": 1,
},
{
"UNARY_CALL": 4, # DEADLINE_EXCEEDED
},
),
(
"timeout_not_exceeded",
# UnaryCall only with no sleep; calls succeed.
{
"rpc_types": [
messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
],
},
{
"UNARY_CALL": 0,
},
),
]
passed = True
try:
first_case = True
for testcase_name, client_config, expected_results in test_cases:
logger.info("starting case %s", testcase_name)
configure_client(**client_config)
# wait a second to help ensure the client stops sending RPCs with
# the old config. We will make multiple attempts if it is failing,
# but this improves confidence that the test is valid if the
# previous client_config would lead to the same results.
time.sleep(1)
# Each attempt takes 10 seconds; 20 attempts is equivalent to 200
# second timeout.
attempt_count = 20
if first_case:
attempt_count = 120
first_case = False
before_stats = get_client_accumulated_stats()
if not before_stats.stats_per_method:
raise ValueError(
"stats.stats_per_method is None, the interop client stats"
" service does not support this test case"
)
for i in range(attempt_count):
logger.info("%s: attempt %d", testcase_name, i)
test_runtime_secs = 10
time.sleep(test_runtime_secs)
after_stats = get_client_accumulated_stats()
success = True
for rpc, status in list(expected_results.items()):
qty = (
after_stats.stats_per_method[rpc].result[status]
- before_stats.stats_per_method[rpc].result[status]
)
want = test_runtime_secs * args.qps
# Allow 10% deviation from expectation to reduce flakiness
if qty < (want * 0.9) or qty > (want * 1.1):
logger.info(
"%s: failed due to %s[%s]: got %d want ~%d",
testcase_name,
rpc,
status,
qty,
want,
)
success = False
if success:
logger.info("success")
break
logger.info("%s attempt %d failed", testcase_name, i)
before_stats = after_stats
else:
raise Exception(
"%s: timeout waiting for expected results: %s; got %s"
% (
testcase_name,
expected_results,
after_stats.stats_per_method,
)
)
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
def test_fault_injection(gcp, original_backend_service, instance_group):
logger.info("Running test_fault_injection")
logger.info("waiting for original backends to become healthy")
wait_for_healthy_backends(gcp, original_backend_service, instance_group)
testcase_header = "fi_testcase"
def _route(pri, name, fi_policy):
return {
"priority": pri,
"matchRules": [
{
"prefixMatch": "/",
"headerMatches": [
{
"headerName": testcase_header,
"exactMatch": name,
}
],
}
],
"service": original_backend_service.url,
"routeAction": {"faultInjectionPolicy": fi_policy},
}
def _abort(pct):
return {
"abort": {
"httpStatus": 401,
"percentage": pct,
}
}
def _delay(pct):
return {
"delay": {
"fixedDelay": {"seconds": "20"},
"percentage": pct,
}
}
zero_route = _abort(0)
zero_route.update(_delay(0))
route_rules = [
_route(0, "zero_percent_fault_injection", zero_route),
_route(1, "always_delay", _delay(100)),
_route(2, "always_abort", _abort(100)),
_route(3, "delay_half", _delay(50)),
_route(4, "abort_half", _abort(50)),
{
"priority": 5,
"matchRules": [{"prefixMatch": "/"}],
"service": original_backend_service.url,
},
]
set_validate_for_proxyless(gcp, False)
patch_url_map_backend_service(
gcp, original_backend_service, route_rules=route_rules
)
# A list of tuples (testcase_name, {client_config}, {code: percent}). Each
# test case will set the testcase_header with the testcase_name for routing
# to the appropriate config for the case, defined above.
test_cases = [
(
"always_delay",
{"timeout_sec": 2},
{4: 1}, # DEADLINE_EXCEEDED
),
(
"always_abort",
{},
{16: 1}, # UNAUTHENTICATED
),
(
"delay_half",
{"timeout_sec": 2},
{4: 0.5, 0: 0.5}, # DEADLINE_EXCEEDED / OK: 50% / 50%
),
(
"abort_half",
{},
{16: 0.5, 0: 0.5}, # UNAUTHENTICATED / OK: 50% / 50%
),
(
"zero_percent_fault_injection",
{},
{0: 1}, # OK
),
(
"non_matching_fault_injection", # Not in route_rules, above.
{},
{0: 1}, # OK
),
]
passed = True
try:
first_case = True
for testcase_name, client_config, expected_results in test_cases:
logger.info("starting case %s", testcase_name)
client_config["metadata"] = [
(
messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
testcase_header,
testcase_name,
)
]
client_config["rpc_types"] = [
messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL,
]
configure_client(**client_config)
# wait a second to help ensure the client stops sending RPCs with
# the old config. We will make multiple attempts if it is failing,
# but this improves confidence that the test is valid if the
# previous client_config would lead to the same results.
time.sleep(1)
# Each attempt takes 10 seconds
if first_case:
# Give the first test case 600s for xDS config propagation.
attempt_count = 60
first_case = False
else:
# The accumulated stats might include previous sub-test, running
# the test multiple times to deflake
attempt_count = 10
before_stats = get_client_accumulated_stats()
if not before_stats.stats_per_method:
raise ValueError(
"stats.stats_per_method is None, the interop client stats"
" service does not support this test case"
)
for i in range(attempt_count):
logger.info("%s: attempt %d", testcase_name, i)
test_runtime_secs = 10
time.sleep(test_runtime_secs)
after_stats = get_client_accumulated_stats()
success = True
for status, pct in list(expected_results.items()):
rpc = "UNARY_CALL"
qty = (
after_stats.stats_per_method[rpc].result[status]
- before_stats.stats_per_method[rpc].result[status]
)
want = pct * args.qps * test_runtime_secs
# Allow 10% deviation from expectation to reduce flakiness
VARIANCE_ALLOWED = 0.1
if abs(qty - want) > want * VARIANCE_ALLOWED:
logger.info(
"%s: failed due to %s[%s]: got %d want ~%d",
testcase_name,
rpc,
status,
qty,
want,
)
success = False
if success:
logger.info("success")
break
logger.info("%s attempt %d failed", testcase_name, i)
before_stats = after_stats
else:
raise Exception(
"%s: timeout waiting for expected results: %s; got %s"
% (
testcase_name,
expected_results,
after_stats.stats_per_method,
)
)
except Exception:
passed = False
raise
finally:
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
set_validate_for_proxyless(gcp, True)
def test_csds(gcp, original_backend_service, instance_group, server_uri):
test_csds_timeout_s = datetime.timedelta(minutes=5).total_seconds()
sleep_interval_between_attempts_s = datetime.timedelta(
seconds=2
).total_seconds()
logger.info("Running test_csds")
logger.info("waiting for original backends to become healthy")
wait_for_healthy_backends(gcp, original_backend_service, instance_group)
# Test case timeout: 5 minutes
deadline = time.time() + test_csds_timeout_s
cnt = 0
while time.time() <= deadline:
client_config = get_client_xds_config_dump()
logger.info(
"test_csds attempt %d: received xDS config %s",
cnt,
json.dumps(client_config, indent=2),
)
if client_config is not None:
# Got the xDS config dump, now validate it
ok = True
try:
if client_config["node"]["locality"]["zone"] != args.zone:
logger.info(
"Invalid zone %s != %s",
client_config["node"]["locality"]["zone"],
args.zone,
)
ok = False
seen = set()
for xds_config in client_config.get("xds_config", []):
if "listener_config" in xds_config:
listener_name = xds_config["listener_config"][
"dynamic_listeners"
][0]["active_state"]["listener"]["name"]
if listener_name != server_uri:
logger.info(
"Invalid Listener name %s != %s",
listener_name,
server_uri,
)
ok = False
else:
seen.add("lds")
elif "route_config" in xds_config:
num_vh = len(
xds_config["route_config"]["dynamic_route_configs"][
0
]["route_config"]["virtual_hosts"]
)
if num_vh <= 0:
logger.info(
"Invalid number of VirtualHosts %s", num_vh
)
ok = False
else:
seen.add("rds")
elif "cluster_config" in xds_config:
cluster_type = xds_config["cluster_config"][
"dynamic_active_clusters"
][0]["cluster"]["type"]
if cluster_type != "EDS":
logger.info(
"Invalid cluster type %s != EDS", cluster_type
)
ok = False
else:
seen.add("cds")
elif "endpoint_config" in xds_config:
sub_zone = xds_config["endpoint_config"][
"dynamic_endpoint_configs"
][0]["endpoint_config"]["endpoints"][0]["locality"][
"sub_zone"
]
if args.zone not in sub_zone:
logger.info(
"Invalid endpoint sub_zone %s", sub_zone
)
ok = False
else:
seen.add("eds")
for generic_xds_config in client_config.get(
"generic_xds_configs", []
):
if re.search(
r"\.Listener$", generic_xds_config["type_url"]
):
seen.add("lds")
listener = generic_xds_config["xds_config"]
if listener["name"] != server_uri:
logger.info(
"Invalid Listener name %s != %s",
listener_name,
server_uri,
)
ok = False
elif re.search(
r"\.RouteConfiguration$", generic_xds_config["type_url"]
):
seen.add("rds")
route_config = generic_xds_config["xds_config"]
if not len(route_config["virtual_hosts"]):
logger.info(
"Invalid number of VirtualHosts %s", num_vh
)
ok = False
elif re.search(
r"\.Cluster$", generic_xds_config["type_url"]
):
seen.add("cds")
cluster = generic_xds_config["xds_config"]
if cluster["type"] != "EDS":
logger.info(
"Invalid cluster type %s != EDS", cluster_type
)
ok = False
elif re.search(
r"\.ClusterLoadAssignment$",
generic_xds_config["type_url"],
):
seen.add("eds")
endpoint = generic_xds_config["xds_config"]
if (
args.zone
not in endpoint["endpoints"][0]["locality"][
"sub_zone"
]
):
logger.info(
"Invalid endpoint sub_zone %s", sub_zone
)
ok = False
want = {"lds", "rds", "cds", "eds"}
if seen != want:
logger.info("Incomplete xDS config dump, seen=%s", seen)
ok = False
except:
logger.exception("Error in xDS config dump:")
ok = False
finally:
if ok:
# Successfully fetched xDS config, and they looks good.
logger.info("success")
return
logger.info("test_csds attempt %d failed", cnt)
# Give the client some time to fetch xDS resources
time.sleep(sleep_interval_between_attempts_s)
cnt += 1
raise RuntimeError(
"failed to receive a valid xDS config in %s seconds"
% test_csds_timeout_s
)
def set_validate_for_proxyless(gcp, validate_for_proxyless):
if not gcp.alpha_compute:
logger.debug(
"Not setting validateForProxy because alpha is not enabled"
)
return
if (
len(gcp.global_forwarding_rules) != 1
or len(gcp.target_proxies) != 1
or len(gcp.url_maps) != 1
):
logger.debug(
"Global forwarding rule, target proxy or url map not found."
)
return
# This function deletes global_forwarding_rule and target_proxy, then
# recreate target_proxy with validateForProxyless=False. This is necessary
# because patching target_grpc_proxy isn't supported.
delete_global_forwarding_rule(gcp, gcp.global_forwarding_rules[0])
delete_target_proxy(gcp, gcp.target_proxies[0])
create_target_proxy(gcp, target_proxy_name, validate_for_proxyless)
create_global_forwarding_rule(gcp, forwarding_rule_name, [gcp.service_port])
def get_serving_status(instance, service_port):
with grpc.insecure_channel("%s:%d" % (instance, service_port)) as channel:
health_stub = health_pb2_grpc.HealthStub(channel)
return health_stub.Check(health_pb2.HealthCheckRequest())
def set_serving_status(instances, service_port, serving):
logger.info("setting %s serving status to %s", instances, serving)
for instance in instances:
with grpc.insecure_channel(
"%s:%d" % (instance, service_port)
) as channel:
logger.info("setting %s serving status to %s", instance, serving)
stub = test_pb2_grpc.XdsUpdateHealthServiceStub(channel)
retry_count = 5
for i in range(5):
if serving:
stub.SetServing(empty_pb2.Empty())
else:
stub.SetNotServing(empty_pb2.Empty())
serving_status = get_serving_status(instance, service_port)
logger.info("got instance service status %s", serving_status)
want_status = (
health_pb2.HealthCheckResponse.SERVING
if serving
else health_pb2.HealthCheckResponse.NOT_SERVING
)
if serving_status.status == want_status:
break
if i == retry_count - 1:
raise Exception(
"failed to set instance service status after %d retries"
% retry_count
)
def is_primary_instance_group(gcp, instance_group):
# Clients may connect to a TD instance in a different region than the
# client, in which case primary/secondary assignments may not be based on
# the client's actual locality.
instance_names = get_instance_names(gcp, instance_group)
stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
return all(
peer in instance_names for peer in list(stats.rpcs_by_peer.keys())
)
def get_startup_script(path_to_server_binary, service_port):
if path_to_server_binary:
return "nohup %s --port=%d 1>/dev/null &" % (
path_to_server_binary,
service_port,
)
else:
return (
"""#!/bin/bash
sudo apt update
sudo apt install -y git default-jdk
mkdir java_server
pushd java_server
git clone https://github.com/grpc/grpc-java.git
pushd grpc-java
pushd interop-testing
../gradlew installDist -x test -PskipCodegen=true -PskipAndroid=true
nohup build/install/grpc-interop-testing/bin/xds-test-server \
--port=%d 1>/dev/null &"""
% service_port
)
def create_instance_template(
gcp, name, network, source_image, machine_type, startup_script
):
config = {
"name": name,
"properties": {
"tags": {"items": ["allow-health-checks"]},
"machineType": machine_type,
"serviceAccounts": [
{
"email": "default",
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
],
}
],
"networkInterfaces": [
{
"accessConfigs": [{"type": "ONE_TO_ONE_NAT"}],
"network": network,
}
],
"disks": [
{
"boot": True,
"initializeParams": {"sourceImage": source_image},
"autoDelete": True,
}
],
"metadata": {
"items": [{"key": "startup-script", "value": startup_script}]
},
},
}
logger.debug("Sending GCP request with body=%s", config)
result = (
gcp.compute.instanceTemplates()
.insert(project=gcp.project, body=config)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_global_operation(gcp, result["name"])
gcp.instance_template = GcpResource(config["name"], result["targetLink"])
def add_instance_group(gcp, zone, name, size):
config = {
"name": name,
"instanceTemplate": gcp.instance_template.url,
"targetSize": size,
"namedPorts": [{"name": "grpc", "port": gcp.service_port}],
}
logger.debug("Sending GCP request with body=%s", config)
result = (
gcp.compute.instanceGroupManagers()
.insert(project=gcp.project, zone=zone, body=config)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_zone_operation(gcp, zone, result["name"])
result = (
gcp.compute.instanceGroupManagers()
.get(
project=gcp.project, zone=zone, instanceGroupManager=config["name"]
)
.execute(num_retries=_GCP_API_RETRIES)
)
instance_group = InstanceGroup(
config["name"], result["instanceGroup"], zone
)
gcp.instance_groups.append(instance_group)
wait_for_instance_group_to_reach_expected_size(
gcp, instance_group, size, _WAIT_FOR_OPERATION_SEC
)
return instance_group
def create_health_check(gcp, name):
if gcp.alpha_compute:
config = {
"name": name,
"type": "GRPC",
"grpcHealthCheck": {"portSpecification": "USE_SERVING_PORT"},
}
compute_to_use = gcp.alpha_compute
else:
config = {
"name": name,
"type": "TCP",
"tcpHealthCheck": {"portName": "grpc"},
}
compute_to_use = gcp.compute
logger.debug("Sending GCP request with body=%s", config)
result = (
compute_to_use.healthChecks()
.insert(project=gcp.project, body=config)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_global_operation(gcp, result["name"])
gcp.health_check = GcpResource(config["name"], result["targetLink"])
def create_health_check_firewall_rule(gcp, name):
config = {
"name": name,
"direction": "INGRESS",
"allowed": [{"IPProtocol": "tcp"}],
"sourceRanges": ["35.191.0.0/16", "130.211.0.0/22"],
"targetTags": ["allow-health-checks"],
}
logger.debug("Sending GCP request with body=%s", config)
result = (
gcp.compute.firewalls()
.insert(project=gcp.project, body=config)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_global_operation(gcp, result["name"])
gcp.health_check_firewall_rule = GcpResource(
config["name"], result["targetLink"]
)
def add_backend_service(gcp, name):
if gcp.alpha_compute:
protocol = "GRPC"
compute_to_use = gcp.alpha_compute
else:
protocol = "HTTP2"
compute_to_use = gcp.compute
config = {
"name": name,
"loadBalancingScheme": "INTERNAL_SELF_MANAGED",
"healthChecks": [gcp.health_check.url],
"portName": "grpc",
"protocol": protocol,
}
logger.debug("Sending GCP request with body=%s", config)
result = (
compute_to_use.backendServices()
.insert(project=gcp.project, body=config)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_global_operation(gcp, result["name"])
backend_service = GcpResource(config["name"], result["targetLink"])
gcp.backend_services.append(backend_service)
return backend_service
def create_url_map(gcp, name, backend_service, host_name):
config = {
"name": name,
"defaultService": backend_service.url,
"pathMatchers": [
{
"name": _PATH_MATCHER_NAME,
"defaultService": backend_service.url,
}
],
"hostRules": [
{"hosts": [host_name], "pathMatcher": _PATH_MATCHER_NAME}
],
}
logger.debug("Sending GCP request with body=%s", config)
result = (
gcp.compute.urlMaps()
.insert(project=gcp.project, body=config)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_global_operation(gcp, result["name"])
url_map = GcpResource(config["name"], result["targetLink"])
gcp.url_maps.append(url_map)
return url_map
def patch_url_map_host_rule_with_port(gcp, name, backend_service, host_name):
config = {
"hostRules": [
{
"hosts": ["%s:%d" % (host_name, gcp.service_port)],
"pathMatcher": _PATH_MATCHER_NAME,
}
]
}
logger.debug("Sending GCP request with body=%s", config)
result = (
gcp.compute.urlMaps()
.patch(project=gcp.project, urlMap=name, body=config)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_global_operation(gcp, result["name"])
def create_target_proxy(gcp, name, validate_for_proxyless=True, url_map=None):
if url_map:
arg_url_map_url = url_map.url
else:
arg_url_map_url = gcp.url_maps[0].url
if gcp.alpha_compute:
config = {
"name": name,
"url_map": arg_url_map_url,
"validate_for_proxyless": validate_for_proxyless,
}
logger.debug("Sending GCP request with body=%s", config)
result = (
gcp.alpha_compute.targetGrpcProxies()
.insert(project=gcp.project, body=config)
.execute(num_retries=_GCP_API_RETRIES)
)
else:
config = {
"name": name,
"url_map": arg_url_map_url,
}
logger.debug("Sending GCP request with body=%s", config)
result = (
gcp.compute.targetHttpProxies()
.insert(project=gcp.project, body=config)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_global_operation(gcp, result["name"])
target_proxy = GcpResource(config["name"], result["targetLink"])
gcp.target_proxies.append(target_proxy)
return target_proxy
def create_global_forwarding_rule(
gcp,
name,
potential_ports,
potential_ip_addresses=["0.0.0.0"],
target_proxy=None,
):
if target_proxy:
arg_target_proxy_url = target_proxy.url
else:
arg_target_proxy_url = gcp.target_proxies[0].url
if gcp.alpha_compute:
compute_to_use = gcp.alpha_compute
else:
compute_to_use = gcp.compute
for port in potential_ports:
for ip_address in potential_ip_addresses:
try:
config = {
"name": name,
"loadBalancingScheme": "INTERNAL_SELF_MANAGED",
"portRange": str(port),
"IPAddress": ip_address,
"network": args.network,
"target": arg_target_proxy_url,
}
logger.debug("Sending GCP request with body=%s", config)
result = (
compute_to_use.globalForwardingRules()
.insert(project=gcp.project, body=config)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_global_operation(gcp, result["name"])
global_forwarding_rule = GcpResource(
config["name"], result["targetLink"]
)
gcp.global_forwarding_rules.append(global_forwarding_rule)
gcp.service_port = port
return
except googleapiclient.errors.HttpError as http_error:
logger.warning(
"Got error %s when attempting to create forwarding rule to "
"%s:%d. Retrying with another port."
% (http_error, ip_address, port)
)
def get_health_check(gcp, health_check_name):
try:
result = (
gcp.compute.healthChecks()
.get(project=gcp.project, healthCheck=health_check_name)
.execute()
)
gcp.health_check = GcpResource(health_check_name, result["selfLink"])
except Exception as e:
gcp.errors.append(e)
gcp.health_check = GcpResource(health_check_name, None)
def get_health_check_firewall_rule(gcp, firewall_name):
try:
result = (
gcp.compute.firewalls()
.get(project=gcp.project, firewall=firewall_name)
.execute()
)
gcp.health_check_firewall_rule = GcpResource(
firewall_name, result["selfLink"]
)
except Exception as e:
gcp.errors.append(e)
gcp.health_check_firewall_rule = GcpResource(firewall_name, None)
def get_backend_service(gcp, backend_service_name, record_error=True):
try:
result = (
gcp.compute.backendServices()
.get(project=gcp.project, backendService=backend_service_name)
.execute()
)
backend_service = GcpResource(backend_service_name, result["selfLink"])
except Exception as e:
if record_error:
gcp.errors.append(e)
backend_service = GcpResource(backend_service_name, None)
gcp.backend_services.append(backend_service)
return backend_service
def get_url_map(gcp, url_map_name, record_error=True):
try:
result = (
gcp.compute.urlMaps()
.get(project=gcp.project, urlMap=url_map_name)
.execute()
)
url_map = GcpResource(url_map_name, result["selfLink"])
gcp.url_maps.append(url_map)
except Exception as e:
if record_error:
gcp.errors.append(e)
def get_target_proxy(gcp, target_proxy_name, record_error=True):
try:
if gcp.alpha_compute:
result = (
gcp.alpha_compute.targetGrpcProxies()
.get(project=gcp.project, targetGrpcProxy=target_proxy_name)
.execute()
)
else:
result = (
gcp.compute.targetHttpProxies()
.get(project=gcp.project, targetHttpProxy=target_proxy_name)
.execute()
)
target_proxy = GcpResource(target_proxy_name, result["selfLink"])
gcp.target_proxies.append(target_proxy)
except Exception as e:
if record_error:
gcp.errors.append(e)
def get_global_forwarding_rule(gcp, forwarding_rule_name, record_error=True):
try:
result = (
gcp.compute.globalForwardingRules()
.get(project=gcp.project, forwardingRule=forwarding_rule_name)
.execute()
)
global_forwarding_rule = GcpResource(
forwarding_rule_name, result["selfLink"]
)
gcp.global_forwarding_rules.append(global_forwarding_rule)
except Exception as e:
if record_error:
gcp.errors.append(e)
def get_instance_template(gcp, template_name):
try:
result = (
gcp.compute.instanceTemplates()
.get(project=gcp.project, instanceTemplate=template_name)
.execute()
)
gcp.instance_template = GcpResource(template_name, result["selfLink"])
except Exception as e:
gcp.errors.append(e)
gcp.instance_template = GcpResource(template_name, None)
def get_instance_group(gcp, zone, instance_group_name):
try:
result = (
gcp.compute.instanceGroups()
.get(
project=gcp.project,
zone=zone,
instanceGroup=instance_group_name,
)
.execute()
)
gcp.service_port = result["namedPorts"][0]["port"]
instance_group = InstanceGroup(
instance_group_name, result["selfLink"], zone
)
except Exception as e:
gcp.errors.append(e)
instance_group = InstanceGroup(instance_group_name, None, zone)
gcp.instance_groups.append(instance_group)
return instance_group
def delete_global_forwarding_rule(gcp, forwarding_rule_to_delete=None):
if not forwarding_rule_to_delete:
return
try:
logger.debug(
"Deleting forwarding rule %s", forwarding_rule_to_delete.name
)
result = (
gcp.compute.globalForwardingRules()
.delete(
project=gcp.project,
forwardingRule=forwarding_rule_to_delete.name,
)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_global_operation(gcp, result["name"])
if forwarding_rule_to_delete in gcp.global_forwarding_rules:
gcp.global_forwarding_rules.remove(forwarding_rule_to_delete)
else:
logger.debug(
(
"Forwarding rule %s does not exist in"
" gcp.global_forwarding_rules"
),
forwarding_rule_to_delete.name,
)
except googleapiclient.errors.HttpError as http_error:
logger.info("Delete failed: %s", http_error)
def delete_global_forwarding_rules(gcp):
forwarding_rules_to_delete = gcp.global_forwarding_rules.copy()
for forwarding_rule in forwarding_rules_to_delete:
delete_global_forwarding_rule(gcp, forwarding_rule)
def delete_target_proxy(gcp, proxy_to_delete=None):
if not proxy_to_delete:
return
try:
if gcp.alpha_compute:
logger.debug("Deleting grpc proxy %s", proxy_to_delete.name)
result = (
gcp.alpha_compute.targetGrpcProxies()
.delete(
project=gcp.project, targetGrpcProxy=proxy_to_delete.name
)
.execute(num_retries=_GCP_API_RETRIES)
)
else:
logger.debug("Deleting http proxy %s", proxy_to_delete.name)
result = (
gcp.compute.targetHttpProxies()
.delete(
project=gcp.project, targetHttpProxy=proxy_to_delete.name
)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_global_operation(gcp, result["name"])
if proxy_to_delete in gcp.target_proxies:
gcp.target_proxies.remove(proxy_to_delete)
else:
logger.debug(
"Gcp proxy %s does not exist in gcp.target_proxies",
proxy_to_delete.name,
)
except googleapiclient.errors.HttpError as http_error:
logger.info("Delete failed: %s", http_error)
def delete_target_proxies(gcp):
target_proxies_to_delete = gcp.target_proxies.copy()
for target_proxy in target_proxies_to_delete:
delete_target_proxy(gcp, target_proxy)
def delete_url_map(gcp, url_map_to_delete=None):
if not url_map_to_delete:
return
try:
logger.debug("Deleting url map %s", url_map_to_delete.name)
result = (
gcp.compute.urlMaps()
.delete(project=gcp.project, urlMap=url_map_to_delete.name)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_global_operation(gcp, result["name"])
if url_map_to_delete in gcp.url_maps:
gcp.url_maps.remove(url_map_to_delete)
else:
logger.debug(
"Url map %s does not exist in gcp.url_maps",
url_map_to_delete.name,
)
except googleapiclient.errors.HttpError as http_error:
logger.info("Delete failed: %s", http_error)
def delete_url_maps(gcp):
url_maps_to_delete = gcp.url_maps.copy()
for url_map in url_maps_to_delete:
delete_url_map(gcp, url_map)
def delete_backend_service(gcp, backend_service):
try:
logger.debug("Deleting backend service %s", backend_service.name)
result = (
gcp.compute.backendServices()
.delete(project=gcp.project, backendService=backend_service.name)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_global_operation(gcp, result["name"])
except googleapiclient.errors.HttpError as http_error:
logger.info("Delete failed: %s", http_error)
def delete_backend_services(gcp):
for backend_service in gcp.backend_services:
delete_backend_service(gcp, backend_service)
def delete_firewall(gcp):
try:
logger.debug(
"Deleting firewall %s", gcp.health_check_firewall_rule.name
)
result = (
gcp.compute.firewalls()
.delete(
project=gcp.project,
firewall=gcp.health_check_firewall_rule.name,
)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_global_operation(gcp, result["name"])
except googleapiclient.errors.HttpError as http_error:
logger.info("Delete failed: %s", http_error)
def delete_health_check(gcp):
try:
logger.debug("Deleting health check %s", gcp.health_check.name)
result = (
gcp.compute.healthChecks()
.delete(project=gcp.project, healthCheck=gcp.health_check.name)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_global_operation(gcp, result["name"])
except googleapiclient.errors.HttpError as http_error:
logger.info("Delete failed: %s", http_error)
def delete_instance_groups(gcp):
for instance_group in gcp.instance_groups:
try:
logger.debug(
"Deleting instance group %s %s",
instance_group.name,
instance_group.zone,
)
result = (
gcp.compute.instanceGroupManagers()
.delete(
project=gcp.project,
zone=instance_group.zone,
instanceGroupManager=instance_group.name,
)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_zone_operation(
gcp,
instance_group.zone,
result["name"],
timeout_sec=_WAIT_FOR_BACKEND_SEC,
)
except googleapiclient.errors.HttpError as http_error:
logger.info("Delete failed: %s", http_error)
def delete_instance_template(gcp):
try:
logger.debug(
"Deleting instance template %s", gcp.instance_template.name
)
result = (
gcp.compute.instanceTemplates()
.delete(
project=gcp.project, instanceTemplate=gcp.instance_template.name
)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_global_operation(gcp, result["name"])
except googleapiclient.errors.HttpError as http_error:
logger.info("Delete failed: %s", http_error)
def patch_backend_service(
gcp,
backend_service,
instance_groups,
balancing_mode="UTILIZATION",
max_rate=1,
circuit_breakers=None,
):
if gcp.alpha_compute:
compute_to_use = gcp.alpha_compute
else:
compute_to_use = gcp.compute
config = {
"backends": [
{
"group": instance_group.url,
"balancingMode": balancing_mode,
"maxRate": max_rate if balancing_mode == "RATE" else None,
}
for instance_group in instance_groups
],
"circuitBreakers": circuit_breakers,
}
logger.debug("Sending GCP request with body=%s", config)
result = (
compute_to_use.backendServices()
.patch(
project=gcp.project,
backendService=backend_service.name,
body=config,
)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_global_operation(
gcp, result["name"], timeout_sec=_WAIT_FOR_BACKEND_SEC
)
def resize_instance_group(
gcp, instance_group, new_size, timeout_sec=_WAIT_FOR_OPERATION_SEC
):
result = (
gcp.compute.instanceGroupManagers()
.resize(
project=gcp.project,
zone=instance_group.zone,
instanceGroupManager=instance_group.name,
size=new_size,
)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_zone_operation(
gcp, instance_group.zone, result["name"], timeout_sec=360
)
wait_for_instance_group_to_reach_expected_size(
gcp, instance_group, new_size, timeout_sec
)
def patch_url_map_backend_service(
gcp,
backend_service=None,
services_with_weights=None,
route_rules=None,
url_map=None,
):
if url_map:
url_map_name = url_map.name
else:
url_map_name = gcp.url_maps[0].name
"""change url_map's backend service
Only one of backend_service and service_with_weights can be not None.
"""
if gcp.alpha_compute:
compute_to_use = gcp.alpha_compute
else:
compute_to_use = gcp.compute
if backend_service and services_with_weights:
raise ValueError(
"both backend_service and service_with_weights are not None."
)
default_service = backend_service.url if backend_service else None
default_route_action = (
{
"weightedBackendServices": [
{
"backendService": service.url,
"weight": w,
}
for service, w in list(services_with_weights.items())
]
}
if services_with_weights
else None
)
config = {
"pathMatchers": [
{
"name": _PATH_MATCHER_NAME,
"defaultService": default_service,
"defaultRouteAction": default_route_action,
"routeRules": route_rules,
}
]
}
logger.debug("Sending GCP request with body=%s", config)
result = (
compute_to_use.urlMaps()
.patch(project=gcp.project, urlMap=url_map_name, body=config)
.execute(num_retries=_GCP_API_RETRIES)
)
wait_for_global_operation(gcp, result["name"])
def wait_for_instance_group_to_reach_expected_size(
gcp, instance_group, expected_size, timeout_sec
):
start_time = time.time()
while True:
current_size = len(get_instance_names(gcp, instance_group))
if current_size == expected_size:
break
if time.time() - start_time > timeout_sec:
raise Exception(
"Instance group had expected size %d but actual size %d"
% (expected_size, current_size)
)
time.sleep(2)
def wait_for_global_operation(
gcp, operation, timeout_sec=_WAIT_FOR_OPERATION_SEC
):
start_time = time.time()
while time.time() - start_time <= timeout_sec:
result = (
gcp.compute.globalOperations()
.get(project=gcp.project, operation=operation)
.execute(num_retries=_GCP_API_RETRIES)
)
if result["status"] == "DONE":
if "error" in result:
raise Exception(result["error"])
return
time.sleep(2)
raise Exception(
"Operation %s did not complete within %d" % (operation, timeout_sec)
)
def wait_for_zone_operation(
gcp, zone, operation, timeout_sec=_WAIT_FOR_OPERATION_SEC
):
start_time = time.time()
while time.time() - start_time <= timeout_sec:
result = (
gcp.compute.zoneOperations()
.get(project=gcp.project, zone=zone, operation=operation)
.execute(num_retries=_GCP_API_RETRIES)
)
if result["status"] == "DONE":
if "error" in result:
raise Exception(result["error"])
return
time.sleep(2)
raise Exception(
"Operation %s did not complete within %d" % (operation, timeout_sec)
)
def wait_for_healthy_backends(
gcp, backend_service, instance_group, timeout_sec=_WAIT_FOR_BACKEND_SEC
):
start_time = time.time()
config = {"group": instance_group.url}
instance_names = get_instance_names(gcp, instance_group)
expected_size = len(instance_names)
while time.time() - start_time <= timeout_sec:
for instance_name in instance_names:
try:
status = get_serving_status(instance_name, gcp.service_port)
logger.info(
"serving status response from %s: %s", instance_name, status
)
except grpc.RpcError as rpc_error:
logger.info(
"checking serving status of %s failed: %s",
instance_name,
rpc_error,
)
result = (
gcp.compute.backendServices()
.getHealth(
project=gcp.project,
backendService=backend_service.name,
body=config,
)
.execute(num_retries=_GCP_API_RETRIES)
)
if "healthStatus" in result:
logger.info("received GCP healthStatus: %s", result["healthStatus"])
healthy = True
for instance in result["healthStatus"]:
if instance["healthState"] != "HEALTHY":
healthy = False
break
if healthy and expected_size == len(result["healthStatus"]):
return
else:
logger.info("no healthStatus received from GCP")
time.sleep(5)
raise Exception(
"Not all backends became healthy within %d seconds: %s"
% (timeout_sec, result)
)
def get_instance_names(gcp, instance_group):
instance_names = []
result = (
gcp.compute.instanceGroups()
.listInstances(
project=gcp.project,
zone=instance_group.zone,
instanceGroup=instance_group.name,
body={"instanceState": "ALL"},
)
.execute(num_retries=_GCP_API_RETRIES)
)
if "items" not in result:
return []
for item in result["items"]:
# listInstances() returns the full URL of the instance, which ends with
# the instance name. compute.instances().get() requires using the
# instance name (not the full URL) to look up instance details, so we
# just extract the name manually.
instance_name = item["instance"].split("/")[-1]
instance_names.append(instance_name)
logger.info("retrieved instance names: %s", instance_names)
return instance_names
def clean_up(gcp):
delete_global_forwarding_rules(gcp)
delete_target_proxies(gcp)
delete_url_maps(gcp)
delete_backend_services(gcp)
if gcp.health_check_firewall_rule:
delete_firewall(gcp)
if gcp.health_check:
delete_health_check(gcp)
delete_instance_groups(gcp)
if gcp.instance_template:
delete_instance_template(gcp)
class InstanceGroup(object):
def __init__(self, name, url, zone):
self.name = name
self.url = url
self.zone = zone
class GcpResource(object):
def __init__(self, name, url):
self.name = name
self.url = url
class GcpState(object):
def __init__(self, compute, alpha_compute, project, project_num):
self.compute = compute
self.alpha_compute = alpha_compute
self.project = project
self.project_num = project_num
self.health_check = None
self.health_check_firewall_rule = None
self.backend_services = []
self.url_maps = []
self.target_proxies = []
self.global_forwarding_rules = []
self.service_port = None
self.instance_template = None
self.instance_groups = []
self.errors = []
logging.debug(
"script start time: %s",
datetime.datetime.now(datetime.timezone.utc)
.astimezone()
.strftime("%Y-%m-%dT%H:%M:%S %Z"),
)
logging.debug(
"logging local timezone: %s",
datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo,
)
alpha_compute = None
if args.compute_discovery_document:
with open(args.compute_discovery_document, "r") as discovery_doc:
compute = googleapiclient.discovery.build_from_document(
discovery_doc.read()
)
if not args.only_stable_gcp_apis and args.alpha_compute_discovery_document:
with open(args.alpha_compute_discovery_document, "r") as discovery_doc:
alpha_compute = googleapiclient.discovery.build_from_document(
discovery_doc.read()
)
else:
compute = googleapiclient.discovery.build("compute", "v1")
if not args.only_stable_gcp_apis:
alpha_compute = googleapiclient.discovery.build("compute", "alpha")
test_results = {}
failed_tests = []
try:
gcp = GcpState(compute, alpha_compute, args.project_id, args.project_num)
gcp_suffix = args.gcp_suffix
health_check_name = _BASE_HEALTH_CHECK_NAME + gcp_suffix
if not args.use_existing_gcp_resources:
if args.keep_gcp_resources:
# Auto-generating a unique suffix in case of conflict should not be
# combined with --keep_gcp_resources, as the suffix actually used
# for GCP resources will not match the provided --gcp_suffix value.
num_attempts = 1
else:
num_attempts = 5
for i in range(num_attempts):
try:
logger.info("Using GCP suffix %s", gcp_suffix)
create_health_check(gcp, health_check_name)
break
except googleapiclient.errors.HttpError as http_error:
gcp_suffix = "%s-%04d" % (gcp_suffix, random.randint(0, 9999))
health_check_name = _BASE_HEALTH_CHECK_NAME + gcp_suffix
logger.exception("HttpError when creating health check")
if gcp.health_check is None:
raise Exception(
"Failed to create health check name after %d attempts"
% num_attempts
)
firewall_name = _BASE_FIREWALL_RULE_NAME + gcp_suffix
backend_service_name = _BASE_BACKEND_SERVICE_NAME + gcp_suffix
alternate_backend_service_name = (
_BASE_BACKEND_SERVICE_NAME + "-alternate" + gcp_suffix
)
extra_backend_service_name = (
_BASE_BACKEND_SERVICE_NAME + "-extra" + gcp_suffix
)
more_extra_backend_service_name = (
_BASE_BACKEND_SERVICE_NAME + "-more-extra" + gcp_suffix
)
url_map_name = _BASE_URL_MAP_NAME + gcp_suffix
url_map_name_2 = url_map_name + "2"
service_host_name = _BASE_SERVICE_HOST + gcp_suffix
target_proxy_name = _BASE_TARGET_PROXY_NAME + gcp_suffix
target_proxy_name_2 = target_proxy_name + "2"
forwarding_rule_name = _BASE_FORWARDING_RULE_NAME + gcp_suffix
forwarding_rule_name_2 = forwarding_rule_name + "2"
template_name = _BASE_TEMPLATE_NAME + gcp_suffix
instance_group_name = _BASE_INSTANCE_GROUP_NAME + gcp_suffix
same_zone_instance_group_name = (
_BASE_INSTANCE_GROUP_NAME + "-same-zone" + gcp_suffix
)
secondary_zone_instance_group_name = (
_BASE_INSTANCE_GROUP_NAME + "-secondary-zone" + gcp_suffix
)
potential_service_ports = list(args.service_port_range)
random.shuffle(potential_service_ports)
if args.use_existing_gcp_resources:
logger.info("Reusing existing GCP resources")
get_health_check(gcp, health_check_name)
get_health_check_firewall_rule(gcp, firewall_name)
backend_service = get_backend_service(gcp, backend_service_name)
alternate_backend_service = get_backend_service(
gcp, alternate_backend_service_name
)
extra_backend_service = get_backend_service(
gcp, extra_backend_service_name, record_error=False
)
more_extra_backend_service = get_backend_service(
gcp, more_extra_backend_service_name, record_error=False
)
get_url_map(gcp, url_map_name)
get_target_proxy(gcp, target_proxy_name)
get_global_forwarding_rule(gcp, forwarding_rule_name)
get_url_map(gcp, url_map_name_2, record_error=False)
get_target_proxy(gcp, target_proxy_name_2, record_error=False)
get_global_forwarding_rule(
gcp, forwarding_rule_name_2, record_error=False
)
get_instance_template(gcp, template_name)
instance_group = get_instance_group(gcp, args.zone, instance_group_name)
same_zone_instance_group = get_instance_group(
gcp, args.zone, same_zone_instance_group_name
)
secondary_zone_instance_group = get_instance_group(
gcp, args.secondary_zone, secondary_zone_instance_group_name
)
if gcp.errors:
raise Exception(gcp.errors)
else:
create_health_check_firewall_rule(gcp, firewall_name)
backend_service = add_backend_service(gcp, backend_service_name)
alternate_backend_service = add_backend_service(
gcp, alternate_backend_service_name
)
create_url_map(gcp, url_map_name, backend_service, service_host_name)
create_target_proxy(gcp, target_proxy_name)
create_global_forwarding_rule(
gcp, forwarding_rule_name, potential_service_ports
)
if not gcp.service_port:
raise Exception(
"Failed to find a valid ip:port for the forwarding rule"
)
if gcp.service_port != _DEFAULT_SERVICE_PORT:
patch_url_map_host_rule_with_port(
gcp, url_map_name, backend_service, service_host_name
)
startup_script = get_startup_script(
args.path_to_server_binary, gcp.service_port
)
create_instance_template(
gcp,
template_name,
args.network,
args.source_image,
args.machine_type,
startup_script,
)
instance_group = add_instance_group(
gcp, args.zone, instance_group_name, _INSTANCE_GROUP_SIZE
)
patch_backend_service(gcp, backend_service, [instance_group])
same_zone_instance_group = add_instance_group(
gcp, args.zone, same_zone_instance_group_name, _INSTANCE_GROUP_SIZE
)
secondary_zone_instance_group = add_instance_group(
gcp,
args.secondary_zone,
secondary_zone_instance_group_name,
_INSTANCE_GROUP_SIZE,
)
wait_for_healthy_backends(gcp, backend_service, instance_group)
if args.test_case:
client_env = dict(os.environ)
if original_grpc_trace:
client_env["GRPC_TRACE"] = original_grpc_trace
if original_grpc_verbosity:
client_env["GRPC_VERBOSITY"] = original_grpc_verbosity
bootstrap_server_features = []
if gcp.service_port == _DEFAULT_SERVICE_PORT:
server_uri = service_host_name
else:
server_uri = service_host_name + ":" + str(gcp.service_port)
if args.xds_v3_support:
client_env["GRPC_XDS_EXPERIMENTAL_V3_SUPPORT"] = "true"
bootstrap_server_features.append("xds_v3")
if args.bootstrap_file:
bootstrap_path = os.path.abspath(args.bootstrap_file)
else:
with tempfile.NamedTemporaryFile(delete=False) as bootstrap_file:
bootstrap_file.write(
_BOOTSTRAP_TEMPLATE.format(
node_id="projects/%s/networks/%s/nodes/%s"
% (
gcp.project_num,
args.network.split("/")[-1],
uuid.uuid1(),
),
server_features=json.dumps(bootstrap_server_features),
).encode("utf-8")
)
bootstrap_path = bootstrap_file.name
client_env["GRPC_XDS_BOOTSTRAP"] = bootstrap_path
client_env["GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING"] = "true"
client_env["GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT"] = "true"
client_env["GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION"] = "true"
for test_case in args.test_case:
if test_case in _V3_TEST_CASES and not args.xds_v3_support:
logger.info(
"skipping test %s due to missing v3 support", test_case
)
continue
if test_case in _ALPHA_TEST_CASES and not gcp.alpha_compute:
logger.info(
"skipping test %s due to missing alpha support", test_case
)
continue
if (
test_case
in [
"api_listener",
"forwarding_rule_port_match",
"forwarding_rule_default_port",
]
and CLIENT_HOSTS
):
logger.info(
(
"skipping test %s because test configuration is"
"not compatible with client processes on existing"
"client hosts"
),
test_case,
)
continue
if test_case == "forwarding_rule_default_port":
server_uri = service_host_name
result = jobset.JobResult()
log_dir = os.path.join(_TEST_LOG_BASE_DIR, test_case)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
test_log_filename = os.path.join(log_dir, _SPONGE_LOG_NAME)
test_log_file = open(test_log_filename, "w+")
client_process = None
if test_case in _TESTS_TO_RUN_MULTIPLE_RPCS:
rpcs_to_send = '--rpc="UnaryCall,EmptyCall"'
else:
rpcs_to_send = '--rpc="UnaryCall"'
if test_case in _TESTS_TO_SEND_METADATA:
metadata_to_send = '--metadata="EmptyCall:{keyE}:{valueE},UnaryCall:{keyU}:{valueU},UnaryCall:{keyNU}:{valueNU}"'.format(
keyE=_TEST_METADATA_KEY,
valueE=_TEST_METADATA_VALUE_EMPTY,
keyU=_TEST_METADATA_KEY,
valueU=_TEST_METADATA_VALUE_UNARY,
keyNU=_TEST_METADATA_NUMERIC_KEY,
valueNU=_TEST_METADATA_NUMERIC_VALUE,
)
else:
# Setting the arg explicitly to empty with '--metadata=""'
# makes C# client fail
# (see https://github.com/commandlineparser/commandline/issues/412),
# so instead we just rely on clients using the default when
# metadata arg is not specified.
metadata_to_send = ""
# TODO(ericgribkoff) Temporarily disable fail_on_failed_rpc checks
# in the client. This means we will ignore intermittent RPC
# failures (but this framework still checks that the final result
# is as expected).
#
# Reason for disabling this is, the resources are shared by
# multiple tests, and a change in previous test could be delayed
# until the second test starts. The second test may see
# intermittent failures because of that.
#
# A fix is to not share resources between tests (though that does
# mean the tests will be significantly slower due to creating new
# resources).
fail_on_failed_rpc = ""
try:
if not CLIENT_HOSTS:
client_cmd_formatted = args.client_cmd.format(
server_uri=server_uri,
stats_port=args.stats_port,
qps=args.qps,
fail_on_failed_rpc=fail_on_failed_rpc,
rpcs_to_send=rpcs_to_send,
metadata_to_send=metadata_to_send,
)
logger.debug("running client: %s", client_cmd_formatted)
client_cmd = shlex.split(client_cmd_formatted)
client_process = subprocess.Popen(
client_cmd,
env=client_env,
stderr=subprocess.STDOUT,
stdout=test_log_file,
)
if test_case == "backends_restart":
test_backends_restart(gcp, backend_service, instance_group)
elif test_case == "change_backend_service":
test_change_backend_service(
gcp,
backend_service,
instance_group,
alternate_backend_service,
same_zone_instance_group,
)
elif test_case == "gentle_failover":
test_gentle_failover(
gcp,
backend_service,
instance_group,
secondary_zone_instance_group,
)
elif test_case == "load_report_based_failover":
test_load_report_based_failover(
gcp,
backend_service,
instance_group,
secondary_zone_instance_group,
)
elif test_case == "ping_pong":
test_ping_pong(gcp, backend_service, instance_group)
elif test_case == "remove_instance_group":
test_remove_instance_group(
gcp,
backend_service,
instance_group,
same_zone_instance_group,
)
elif test_case == "round_robin":
test_round_robin(gcp, backend_service, instance_group)
elif (
test_case
== "secondary_locality_gets_no_requests_on_partial_primary_failure"
):
test_secondary_locality_gets_no_requests_on_partial_primary_failure(
gcp,
backend_service,
instance_group,
secondary_zone_instance_group,
)
elif (
test_case
== "secondary_locality_gets_requests_on_primary_failure"
):
test_secondary_locality_gets_requests_on_primary_failure(
gcp,
backend_service,
instance_group,
secondary_zone_instance_group,
)
elif test_case == "traffic_splitting":
test_traffic_splitting(
gcp,
backend_service,
instance_group,
alternate_backend_service,
same_zone_instance_group,
)
elif test_case == "path_matching":
test_path_matching(
gcp,
backend_service,
instance_group,
alternate_backend_service,
same_zone_instance_group,
)
elif test_case == "header_matching":
test_header_matching(
gcp,
backend_service,
instance_group,
alternate_backend_service,
same_zone_instance_group,
)
elif test_case == "circuit_breaking":
test_circuit_breaking(
gcp,
backend_service,
instance_group,
same_zone_instance_group,
)
elif test_case == "timeout":
test_timeout(gcp, backend_service, instance_group)
elif test_case == "fault_injection":
test_fault_injection(gcp, backend_service, instance_group)
elif test_case == "api_listener":
server_uri = test_api_listener(
gcp,
backend_service,
instance_group,
alternate_backend_service,
)
elif test_case == "forwarding_rule_port_match":
server_uri = test_forwarding_rule_port_match(
gcp, backend_service, instance_group
)
elif test_case == "forwarding_rule_default_port":
server_uri = test_forwarding_rule_default_port(
gcp, backend_service, instance_group
)
elif test_case == "metadata_filter":
test_metadata_filter(
gcp,
backend_service,
instance_group,
alternate_backend_service,
same_zone_instance_group,
)
elif test_case == "csds":
test_csds(gcp, backend_service, instance_group, server_uri)
else:
logger.error("Unknown test case: %s", test_case)
sys.exit(1)
if client_process and client_process.poll() is not None:
raise Exception(
"Client process exited prematurely with exit code %d"
% client_process.returncode
)
result.state = "PASSED"
result.returncode = 0
except Exception as e:
logger.exception("Test case %s failed", test_case)
failed_tests.append(test_case)
result.state = "FAILED"
result.message = str(e)
if args.halt_after_fail:
# Stop the test suite if one case failed.
raise
finally:
if client_process:
if client_process.returncode:
logger.info(
"Client exited with code %d"
% client_process.returncode
)
else:
client_process.terminate()
test_log_file.close()
# Workaround for Python 3, as report_utils will invoke decode() on
# result.message, which has a default value of ''.
result.message = result.message.encode("UTF-8")
test_results[test_case] = [result]
if args.log_client_output:
logger.info("Client output:")
with open(test_log_filename, "r") as client_output:
logger.info(client_output.read())
if not os.path.exists(_TEST_LOG_BASE_DIR):
os.makedirs(_TEST_LOG_BASE_DIR)
report_utils.render_junit_xml_report(
test_results,
os.path.join(_TEST_LOG_BASE_DIR, _SPONGE_XML_NAME),
suite_name="xds_tests",
multi_target=True,
)
if failed_tests:
logger.error("Test case(s) %s failed", failed_tests)
sys.exit(1)
finally:
keep_resources = args.keep_gcp_resources
if args.halt_after_fail and failed_tests:
logger.info(
"Halt after fail triggered, exiting without cleaning up resources"
)
keep_resources = True
if not keep_resources:
logger.info("Cleaning up GCP resources. This may take some time.")
clean_up(gcp)
| 158,921
| 35.736477
| 137
|
py
|
grpc
|
grpc-master/tools/run_tests/task_runner.py
|
#!/usr/bin/env python3
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs selected gRPC test/build tasks."""
from __future__ import print_function
import argparse
import multiprocessing
import sys
import artifacts.artifact_targets as artifact_targets
import artifacts.distribtest_targets as distribtest_targets
import artifacts.package_targets as package_targets
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
_TARGETS = []
_TARGETS += artifact_targets.targets()
_TARGETS += distribtest_targets.targets()
_TARGETS += package_targets.targets()
def _create_build_map():
"""Maps task names and labels to list of tasks to be built."""
target_build_map = dict([(target.name, [target]) for target in _TARGETS])
if len(_TARGETS) > len(list(target_build_map.keys())):
raise Exception("Target names need to be unique")
label_build_map = {}
label_build_map["all"] = [t for t in _TARGETS] # to build all targets
for target in _TARGETS:
for label in target.labels:
if label in label_build_map:
label_build_map[label].append(target)
else:
label_build_map[label] = [target]
if set(target_build_map.keys()).intersection(list(label_build_map.keys())):
raise Exception("Target names need to be distinct from label names")
return dict(list(target_build_map.items()) + list(label_build_map.items()))
_BUILD_MAP = _create_build_map()
argp = argparse.ArgumentParser(description="Runs build/test targets.")
argp.add_argument(
"-b",
"--build",
choices=sorted(_BUILD_MAP.keys()),
nargs="+",
default=["all"],
help="Target name or target label to build.",
)
argp.add_argument(
"-f",
"--filter",
choices=sorted(_BUILD_MAP.keys()),
nargs="+",
default=[],
help="Filter targets to build with AND semantics.",
)
argp.add_argument("-j", "--jobs", default=multiprocessing.cpu_count(), type=int)
argp.add_argument(
"-x",
"--xml_report",
default="report_taskrunner_sponge_log.xml",
type=str,
help="Filename for the JUnit-compatible XML report",
)
argp.add_argument(
"--dry_run",
default=False,
action="store_const",
const=True,
help="Only print what would be run.",
)
argp.add_argument(
"--inner_jobs",
default=None,
type=int,
help=(
"Number of parallel jobs to use by each target. Passed as"
" build_jobspec(inner_jobs=N) to each target."
),
)
args = argp.parse_args()
# Figure out which targets to build
targets = []
for label in args.build:
targets += _BUILD_MAP[label]
# Among targets selected by -b, filter out those that don't match the filter
targets = [t for t in targets if all(f in t.labels for f in args.filter)]
print("Will build %d targets:" % len(targets))
for target in targets:
print(" %s, labels %s" % (target.name, target.labels))
print()
if args.dry_run:
print("--dry_run was used, exiting")
sys.exit(1)
# Execute pre-build phase
prebuild_jobs = []
for target in targets:
prebuild_jobs += target.pre_build_jobspecs()
if prebuild_jobs:
num_failures, _ = jobset.run(
prebuild_jobs, newline_on_success=True, maxjobs=args.jobs
)
if num_failures != 0:
jobset.message("FAILED", "Pre-build phase failed.", do_newline=True)
sys.exit(1)
build_jobs = []
for target in targets:
build_jobs.append(target.build_jobspec(inner_jobs=args.inner_jobs))
if not build_jobs:
print("Nothing to build.")
sys.exit(1)
jobset.message("START", "Building targets.", do_newline=True)
num_failures, resultset = jobset.run(
build_jobs, newline_on_success=True, maxjobs=args.jobs
)
report_utils.render_junit_xml_report(
resultset, args.xml_report, suite_name="tasks"
)
if num_failures == 0:
jobset.message(
"SUCCESS", "All targets built successfully.", do_newline=True
)
else:
jobset.message("FAILED", "Failed to build targets.", do_newline=True)
sys.exit(1)
| 4,541
| 29.07947
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/run_tests_matrix.py
|
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run test matrix."""
from __future__ import print_function
import argparse
import multiprocessing
import os
import sys
from python_utils.filter_pull_request_tests import filter_tests
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
os.chdir(_ROOT)
_DEFAULT_RUNTESTS_TIMEOUT = 1 * 60 * 60
# C/C++ tests can take long time
_CPP_RUNTESTS_TIMEOUT = 4 * 60 * 60
# Set timeout high for ObjC for Cocoapods to install pods
_OBJC_RUNTESTS_TIMEOUT = 2 * 60 * 60
# Number of jobs assigned to each run_tests.py instance
_DEFAULT_INNER_JOBS = 2
# Name of the top-level umbrella report that includes all the run_tests.py invocations
# Note that the starting letter 't' matters so that the targets are listed AFTER
# the per-test breakdown items that start with 'run_tests/' (it is more readable that way)
_MATRIX_REPORT_NAME = "toplevel_run_tests_invocations"
def _safe_report_name(name):
"""Reports with '+' in target name won't show correctly in ResultStore"""
return name.replace("+", "p")
def _report_filename(name):
"""Generates report file name with directory structure that leads to better presentation by internal CI"""
# 'sponge_log.xml' suffix must be there for results to get recognized by kokoro.
return "%s/%s" % (_safe_report_name(name), "sponge_log.xml")
def _matrix_job_logfilename(shortname_for_multi_target):
"""Generate location for log file that will match the sponge_log.xml from the top-level matrix report."""
# 'sponge_log.log' suffix must be there for log to get recognized as "target log"
# for the corresponding 'sponge_log.xml' report.
# the shortname_for_multi_target component must be set to match the sponge_log.xml location
# because the top-level render_junit_xml_report is called with multi_target=True
sponge_log_name = "%s/%s/%s" % (
_MATRIX_REPORT_NAME,
shortname_for_multi_target,
"sponge_log.log",
)
# env variable can be used to override the base location for the reports
# so we need to match that behavior here too
base_dir = os.getenv("GRPC_TEST_REPORT_BASE_DIR", None)
if base_dir:
sponge_log_name = os.path.join(base_dir, sponge_log_name)
return sponge_log_name
def _docker_jobspec(
name,
runtests_args=[],
runtests_envs={},
inner_jobs=_DEFAULT_INNER_JOBS,
timeout_seconds=None,
):
"""Run a single instance of run_tests.py in a docker container"""
if not timeout_seconds:
timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
shortname = "run_tests_%s" % name
test_job = jobset.JobSpec(
cmdline=[
"python3",
"tools/run_tests/run_tests.py",
"--use_docker",
"-t",
"-j",
str(inner_jobs),
"-x",
"run_tests/%s" % _report_filename(name),
"--report_suite_name",
"%s" % _safe_report_name(name),
]
+ runtests_args,
environ=runtests_envs,
shortname=shortname,
timeout_seconds=timeout_seconds,
logfilename=_matrix_job_logfilename(shortname),
)
return test_job
def _workspace_jobspec(
name,
runtests_args=[],
workspace_name=None,
runtests_envs={},
inner_jobs=_DEFAULT_INNER_JOBS,
timeout_seconds=None,
):
"""Run a single instance of run_tests.py in a separate workspace"""
if not workspace_name:
workspace_name = "workspace_%s" % name
if not timeout_seconds:
timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
shortname = "run_tests_%s" % name
env = {"WORKSPACE_NAME": workspace_name}
env.update(runtests_envs)
# if report base dir is set, we don't need to ".." to come out of the workspace dir
report_dir_prefix = (
"" if os.getenv("GRPC_TEST_REPORT_BASE_DIR", None) else "../"
)
test_job = jobset.JobSpec(
cmdline=[
"bash",
"tools/run_tests/helper_scripts/run_tests_in_workspace.sh",
"-t",
"-j",
str(inner_jobs),
"-x",
"%srun_tests/%s" % (report_dir_prefix, _report_filename(name)),
"--report_suite_name",
"%s" % _safe_report_name(name),
]
+ runtests_args,
environ=env,
shortname=shortname,
timeout_seconds=timeout_seconds,
logfilename=_matrix_job_logfilename(shortname),
)
return test_job
def _generate_jobs(
languages,
configs,
platforms,
iomgr_platforms=["native"],
arch=None,
compiler=None,
labels=[],
extra_args=[],
extra_envs={},
inner_jobs=_DEFAULT_INNER_JOBS,
timeout_seconds=None,
):
result = []
for language in languages:
for platform in platforms:
for iomgr_platform in iomgr_platforms:
for config in configs:
name = "%s_%s_%s_%s" % (
language,
platform,
config,
iomgr_platform,
)
runtests_args = [
"-l",
language,
"-c",
config,
"--iomgr_platform",
iomgr_platform,
]
if arch or compiler:
name += "_%s_%s" % (arch, compiler)
runtests_args += [
"--arch",
arch,
"--compiler",
compiler,
]
if "--build_only" in extra_args:
name += "_buildonly"
for extra_env in extra_envs:
name += "_%s_%s" % (extra_env, extra_envs[extra_env])
runtests_args += extra_args
if platform == "linux":
job = _docker_jobspec(
name=name,
runtests_args=runtests_args,
runtests_envs=extra_envs,
inner_jobs=inner_jobs,
timeout_seconds=timeout_seconds,
)
else:
job = _workspace_jobspec(
name=name,
runtests_args=runtests_args,
runtests_envs=extra_envs,
inner_jobs=inner_jobs,
timeout_seconds=timeout_seconds,
)
job.labels = [
platform,
config,
language,
iomgr_platform,
] + labels
result.append(job)
return result
def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
test_jobs = []
# sanity tests
test_jobs += _generate_jobs(
languages=["sanity", "clang-tidy", "iwyu"],
configs=["dbg"],
platforms=["linux"],
labels=["basictests"],
extra_args=extra_args + ["--report_multi_target"],
inner_jobs=inner_jobs,
)
# supported on all platforms.
test_jobs += _generate_jobs(
languages=["c"],
configs=["dbg", "opt"],
platforms=["linux", "macos", "windows"],
labels=["basictests", "corelang"],
extra_args=extra_args, # don't use multi_target report because C has too many test cases
inner_jobs=inner_jobs,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT,
)
# C# tests (both on .NET desktop/mono and .NET core)
test_jobs += _generate_jobs(
languages=["csharp"],
configs=["dbg", "opt"],
platforms=["linux", "macos", "windows"],
labels=["basictests", "multilang"],
extra_args=extra_args + ["--report_multi_target"],
inner_jobs=inner_jobs,
)
# ARM64 Linux C# tests
test_jobs += _generate_jobs(
languages=["csharp"],
configs=["dbg", "opt"],
platforms=["linux"],
arch="arm64",
compiler="default",
labels=["basictests_arm64"],
extra_args=extra_args + ["--report_multi_target"],
inner_jobs=inner_jobs,
)
test_jobs += _generate_jobs(
languages=["python"],
configs=["opt"],
platforms=["linux", "macos", "windows"],
iomgr_platforms=["native"],
labels=["basictests", "multilang"],
extra_args=extra_args + ["--report_multi_target"],
inner_jobs=inner_jobs,
)
# ARM64 Linux Python tests
test_jobs += _generate_jobs(
languages=["python"],
configs=["opt"],
platforms=["linux"],
arch="arm64",
compiler="default",
iomgr_platforms=["native"],
labels=["basictests_arm64"],
extra_args=extra_args + ["--report_multi_target"],
inner_jobs=inner_jobs,
)
# supported on linux and mac.
test_jobs += _generate_jobs(
languages=["c++"],
configs=["dbg", "opt"],
platforms=["linux", "macos"],
labels=["basictests", "corelang"],
extra_args=extra_args, # don't use multi_target report because C++ has too many test cases
inner_jobs=inner_jobs,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT,
)
test_jobs += _generate_jobs(
languages=["ruby", "php7"],
configs=["dbg", "opt"],
platforms=["linux", "macos"],
labels=["basictests", "multilang"],
extra_args=extra_args + ["--report_multi_target"],
inner_jobs=inner_jobs,
)
# ARM64 Linux Ruby and PHP tests
test_jobs += _generate_jobs(
languages=["ruby", "php7"],
configs=["dbg", "opt"],
platforms=["linux"],
arch="arm64",
compiler="default",
labels=["basictests_arm64"],
extra_args=extra_args + ["--report_multi_target"],
inner_jobs=inner_jobs,
)
# supported on mac only.
test_jobs += _generate_jobs(
languages=["objc"],
configs=["opt"],
platforms=["macos"],
labels=["basictests", "multilang"],
extra_args=extra_args + ["--report_multi_target"],
inner_jobs=inner_jobs,
timeout_seconds=_OBJC_RUNTESTS_TIMEOUT,
)
return test_jobs
def _create_portability_test_jobs(
extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS
):
test_jobs = []
# portability C x86
test_jobs += _generate_jobs(
languages=["c"],
configs=["dbg"],
platforms=["linux"],
arch="x86",
compiler="default",
labels=["portability", "corelang"],
extra_args=extra_args,
inner_jobs=inner_jobs,
)
# portability C and C++ on x64
for compiler in [
"gcc7",
# 'gcc10.2_openssl102', // TODO(b/283304471): Enable this later
"gcc12",
"gcc_musl",
"clang6",
"clang15",
]:
test_jobs += _generate_jobs(
languages=["c", "c++"],
configs=["dbg"],
platforms=["linux"],
arch="x64",
compiler=compiler,
labels=["portability", "corelang"],
extra_args=extra_args,
inner_jobs=inner_jobs,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT,
)
# portability C on Windows 64-bit (x86 is the default)
test_jobs += _generate_jobs(
languages=["c"],
configs=["dbg"],
platforms=["windows"],
arch="x64",
compiler="default",
labels=["portability", "corelang"],
extra_args=extra_args,
inner_jobs=inner_jobs,
)
# portability C on Windows with the "Visual Studio" cmake
# generator, i.e. not using Ninja (to verify that we can still build with msbuild)
test_jobs += _generate_jobs(
languages=["c"],
configs=["dbg"],
platforms=["windows"],
arch="default",
compiler="cmake_vs2019",
labels=["portability", "corelang"],
extra_args=extra_args,
inner_jobs=inner_jobs,
)
# portability C++ on Windows
# TODO(jtattermusch): some of the tests are failing, so we force --build_only
test_jobs += _generate_jobs(
languages=["c++"],
configs=["dbg"],
platforms=["windows"],
arch="default",
compiler="default",
labels=["portability", "corelang"],
extra_args=extra_args + ["--build_only"],
inner_jobs=inner_jobs,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT,
)
# portability C and C++ on Windows using VS2019 (build only)
# TODO(jtattermusch): The C tests with exactly the same config are already running as part of the
# basictests_c suite (so we force --build_only to avoid running them twice).
# The C++ tests aren't all passing, so also force --build_only.
# NOTE(veblush): This is not neded as default=cmake_ninja_vs2019
# test_jobs += _generate_jobs(
# languages=["c", "c++"],
# configs=["dbg"],
# platforms=["windows"],
# arch="x64",
# compiler="cmake_ninja_vs2019",
# labels=["portability", "corelang"],
# extra_args=extra_args + ["--build_only"],
# inner_jobs=inner_jobs,
# timeout_seconds=_CPP_RUNTESTS_TIMEOUT,
# )
# C and C++ with no-exceptions on Linux
test_jobs += _generate_jobs(
languages=["c", "c++"],
configs=["noexcept"],
platforms=["linux"],
labels=["portability", "corelang"],
extra_args=extra_args,
inner_jobs=inner_jobs,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT,
)
test_jobs += _generate_jobs(
languages=["python"],
configs=["dbg"],
platforms=["linux"],
arch="default",
compiler="python_alpine",
labels=["portability", "multilang"],
extra_args=extra_args + ["--report_multi_target"],
inner_jobs=inner_jobs,
)
return test_jobs
def _allowed_labels():
"""Returns a list of existing job labels."""
all_labels = set()
for job in _create_test_jobs() + _create_portability_test_jobs():
for label in job.labels:
all_labels.add(label)
return sorted(all_labels)
def _runs_per_test_type(arg_str):
"""Auxiliary function to parse the "runs_per_test" flag."""
try:
n = int(arg_str)
if n <= 0:
raise ValueError
return n
except:
msg = "'{}' is not a positive integer".format(arg_str)
raise argparse.ArgumentTypeError(msg)
if __name__ == "__main__":
argp = argparse.ArgumentParser(
description="Run a matrix of run_tests.py tests."
)
argp.add_argument(
"-j",
"--jobs",
default=multiprocessing.cpu_count() / _DEFAULT_INNER_JOBS,
type=int,
help="Number of concurrent run_tests.py instances.",
)
argp.add_argument(
"-f",
"--filter",
choices=_allowed_labels(),
nargs="+",
default=[],
help="Filter targets to run by label with AND semantics.",
)
argp.add_argument(
"--exclude",
choices=_allowed_labels(),
nargs="+",
default=[],
help="Exclude targets with any of given labels.",
)
argp.add_argument(
"--build_only",
default=False,
action="store_const",
const=True,
help="Pass --build_only flag to run_tests.py instances.",
)
argp.add_argument(
"--force_default_poller",
default=False,
action="store_const",
const=True,
help="Pass --force_default_poller to run_tests.py instances.",
)
argp.add_argument(
"--dry_run",
default=False,
action="store_const",
const=True,
help="Only print what would be run.",
)
argp.add_argument(
"--filter_pr_tests",
default=False,
action="store_const",
const=True,
help="Filters out tests irrelevant to pull request changes.",
)
argp.add_argument(
"--base_branch",
default="origin/master",
type=str,
help="Branch that pull request is requesting to merge into",
)
argp.add_argument(
"--inner_jobs",
default=_DEFAULT_INNER_JOBS,
type=int,
help="Number of jobs in each run_tests.py instance",
)
argp.add_argument(
"-n",
"--runs_per_test",
default=1,
type=_runs_per_test_type,
help="How many times to run each tests. >1 runs implies "
+ "omitting passing test from the output & reports.",
)
argp.add_argument(
"--max_time",
default=-1,
type=int,
help="Maximum amount of time to run tests for"
+ "(other tests will be skipped)",
)
argp.add_argument(
"--internal_ci",
default=False,
action="store_const",
const=True,
help=(
"(Deprecated, has no effect) Put reports into subdirectories to"
" improve presentation of results by Kokoro."
),
)
argp.add_argument(
"--bq_result_table",
default="",
type=str,
nargs="?",
help="Upload test results to a specified BQ table.",
)
argp.add_argument(
"--extra_args",
default="",
type=str,
nargs=argparse.REMAINDER,
help="Extra test args passed to each sub-script.",
)
args = argp.parse_args()
extra_args = []
if args.build_only:
extra_args.append("--build_only")
if args.force_default_poller:
extra_args.append("--force_default_poller")
if args.runs_per_test > 1:
extra_args.append("-n")
extra_args.append("%s" % args.runs_per_test)
extra_args.append("--quiet_success")
if args.max_time > 0:
extra_args.extend(("--max_time", "%d" % args.max_time))
if args.bq_result_table:
extra_args.append("--bq_result_table")
extra_args.append("%s" % args.bq_result_table)
extra_args.append("--measure_cpu_costs")
if args.extra_args:
extra_args.extend(args.extra_args)
all_jobs = _create_test_jobs(
extra_args=extra_args, inner_jobs=args.inner_jobs
) + _create_portability_test_jobs(
extra_args=extra_args, inner_jobs=args.inner_jobs
)
jobs = []
for job in all_jobs:
if not args.filter or all(
filter in job.labels for filter in args.filter
):
if not any(
exclude_label in job.labels for exclude_label in args.exclude
):
jobs.append(job)
if not jobs:
jobset.message(
"FAILED", "No test suites match given criteria.", do_newline=True
)
sys.exit(1)
print("IMPORTANT: The changes you are testing need to be locally committed")
print("because only the committed changes in the current branch will be")
print("copied to the docker environment or into subworkspaces.")
skipped_jobs = []
if args.filter_pr_tests:
print("Looking for irrelevant tests to skip...")
relevant_jobs = filter_tests(jobs, args.base_branch)
if len(relevant_jobs) == len(jobs):
print("No tests will be skipped.")
else:
print("These tests will be skipped:")
skipped_jobs = list(set(jobs) - set(relevant_jobs))
# Sort by shortnames to make printing of skipped tests consistent
skipped_jobs.sort(key=lambda job: job.shortname)
for job in list(skipped_jobs):
print(" %s" % job.shortname)
jobs = relevant_jobs
print("Will run these tests:")
for job in jobs:
print(' %s: "%s"' % (job.shortname, " ".join(job.cmdline)))
print("")
if args.dry_run:
print("--dry_run was used, exiting")
sys.exit(1)
jobset.message("START", "Running test matrix.", do_newline=True)
num_failures, resultset = jobset.run(
jobs, newline_on_success=True, travis=True, maxjobs=args.jobs
)
# Merge skipped tests into results to show skipped tests on report.xml
if skipped_jobs:
ignored_num_skipped_failures, skipped_results = jobset.run(
skipped_jobs, skip_jobs=True
)
resultset.update(skipped_results)
report_utils.render_junit_xml_report(
resultset,
_report_filename(_MATRIX_REPORT_NAME),
suite_name=_MATRIX_REPORT_NAME,
multi_target=True,
)
if num_failures == 0:
jobset.message(
"SUCCESS",
"All run_tests.py instances finished successfully.",
do_newline=True,
)
else:
jobset.message(
"FAILED",
"Some run_tests.py instances have failed.",
do_newline=True,
)
sys.exit(1)
| 21,716
| 30.75
| 110
|
py
|
grpc
|
grpc-master/tools/run_tests/start_port_server.py
|
#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrapper around port server starting code.
Used by developers who wish to run individual C/C++ tests outside of the
run_tests.py infrastructure.
The path to this file is called out in test/core/util/port.c, and printed as
an error message to users.
"""
import python_utils.start_port_server as start_port_server
start_port_server.start_port_server()
print("Port server started successfully")
| 1,000
| 32.366667
| 76
|
py
|
grpc
|
grpc-master/tools/run_tests/run_performance_tests.py
|
#!/usr/bin/env python3
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run performance tests locally or remotely."""
from __future__ import print_function
import argparse
import collections
import itertools
import json
import multiprocessing
import os
import pipes
import re
import subprocess
import sys
import tempfile
import time
import traceback
import uuid
import six
import performance.scenario_config as scenario_config
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
os.chdir(_ROOT)
_REMOTE_HOST_USERNAME = "jenkins"
_SCENARIO_TIMEOUT = 3 * 60
_WORKER_TIMEOUT = 3 * 60
_NETPERF_TIMEOUT = 60
_QUIT_WORKER_TIMEOUT = 2 * 60
class QpsWorkerJob:
"""Encapsulates a qps worker server job."""
def __init__(self, spec, language, host_and_port, perf_file_base_name=None):
self._spec = spec
self.language = language
self.host_and_port = host_and_port
self._job = None
self.perf_file_base_name = perf_file_base_name
def start(self):
self._job = jobset.Job(
self._spec, newline_on_success=True, travis=True, add_env={}
)
def is_running(self):
"""Polls a job and returns True if given job is still running."""
return self._job and self._job.state() == jobset._RUNNING
def kill(self):
if self._job:
self._job.kill()
self._job = None
def create_qpsworker_job(
language, shortname=None, port=10000, remote_host=None, perf_cmd=None
):
cmdline = language.worker_cmdline() + ["--driver_port=%s" % port]
if remote_host:
host_and_port = "%s:%s" % (remote_host, port)
else:
host_and_port = "localhost:%s" % port
perf_file_base_name = None
if perf_cmd:
perf_file_base_name = "%s-%s" % (host_and_port, shortname)
# specify -o output file so perf.data gets collected when worker stopped
cmdline = (
perf_cmd + ["-o", "%s-perf.data" % perf_file_base_name] + cmdline
)
worker_timeout = _WORKER_TIMEOUT
if remote_host:
user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, remote_host)
ssh_cmd = ["ssh"]
cmdline = ["timeout", "%s" % (worker_timeout + 30)] + cmdline
ssh_cmd.extend(
[
str(user_at_host),
"cd ~/performance_workspace/grpc/ && %s" % " ".join(cmdline),
]
)
cmdline = ssh_cmd
jobspec = jobset.JobSpec(
cmdline=cmdline,
shortname=shortname,
timeout_seconds=worker_timeout, # workers get restarted after each scenario
verbose_success=True,
)
return QpsWorkerJob(jobspec, language, host_and_port, perf_file_base_name)
def create_scenario_jobspec(
scenario_json,
workers,
remote_host=None,
bq_result_table=None,
server_cpu_load=0,
):
"""Runs one scenario using QPS driver."""
# setting QPS_WORKERS env variable here makes sure it works with SSH too.
cmd = 'QPS_WORKERS="%s" ' % ",".join(workers)
if bq_result_table:
cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
cmd += "tools/run_tests/performance/run_qps_driver.sh "
cmd += "--scenarios_json=%s " % pipes.quote(
json.dumps({"scenarios": [scenario_json]})
)
cmd += "--scenario_result_file=scenario_result.json "
if server_cpu_load != 0:
cmd += (
"--search_param=offered_load --initial_search_value=1000"
" --targeted_cpu_load=%d --stride=500 --error_tolerance=0.01"
% server_cpu_load
)
if remote_host:
user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, remote_host)
cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
user_at_host,
pipes.quote(cmd),
)
return jobset.JobSpec(
cmdline=[cmd],
shortname="%s" % scenario_json["name"],
timeout_seconds=_SCENARIO_TIMEOUT,
shell=True,
verbose_success=True,
)
def create_quit_jobspec(workers, remote_host=None):
"""Runs quit using QPS driver."""
# setting QPS_WORKERS env variable here makes sure it works with SSH too.
cmd = 'QPS_WORKERS="%s" cmake/build/qps_json_driver --quit' % ",".join(
w.host_and_port for w in workers
)
if remote_host:
user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, remote_host)
cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
user_at_host,
pipes.quote(cmd),
)
return jobset.JobSpec(
cmdline=[cmd],
shortname="shutdown_workers",
timeout_seconds=_QUIT_WORKER_TIMEOUT,
shell=True,
verbose_success=True,
)
def create_netperf_jobspec(
server_host="localhost", client_host=None, bq_result_table=None
):
"""Runs netperf benchmark."""
cmd = 'NETPERF_SERVER_HOST="%s" ' % server_host
if bq_result_table:
cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
if client_host:
# If netperf is running remotely, the env variables populated by Jenkins
# won't be available on the client, but we need them for uploading results
# to BigQuery.
jenkins_job_name = os.getenv("KOKORO_JOB_NAME")
if jenkins_job_name:
cmd += 'KOKORO_JOB_NAME="%s" ' % jenkins_job_name
jenkins_build_number = os.getenv("KOKORO_BUILD_NUMBER")
if jenkins_build_number:
cmd += 'KOKORO_BUILD_NUMBER="%s" ' % jenkins_build_number
cmd += "tools/run_tests/performance/run_netperf.sh"
if client_host:
user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, client_host)
cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
user_at_host,
pipes.quote(cmd),
)
return jobset.JobSpec(
cmdline=[cmd],
shortname="netperf",
timeout_seconds=_NETPERF_TIMEOUT,
shell=True,
verbose_success=True,
)
def archive_repo(languages):
"""Archives local version of repo including submodules."""
cmdline = ["tar", "-cf", "../grpc.tar", "../grpc/"]
if "java" in languages:
cmdline.append("../grpc-java")
if "go" in languages:
cmdline.append("../grpc-go")
if "node" in languages or "node_purejs" in languages:
cmdline.append("../grpc-node")
archive_job = jobset.JobSpec(
cmdline=cmdline, shortname="archive_repo", timeout_seconds=3 * 60
)
jobset.message("START", "Archiving local repository.", do_newline=True)
num_failures, _ = jobset.run(
[archive_job], newline_on_success=True, maxjobs=1
)
if num_failures == 0:
jobset.message(
"SUCCESS",
"Archive with local repository created successfully.",
do_newline=True,
)
else:
jobset.message(
"FAILED", "Failed to archive local repository.", do_newline=True
)
sys.exit(1)
def prepare_remote_hosts(hosts, prepare_local=False):
"""Prepares remote hosts (and maybe prepare localhost as well)."""
prepare_timeout = 10 * 60
prepare_jobs = []
for host in hosts:
user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, host)
prepare_jobs.append(
jobset.JobSpec(
cmdline=["tools/run_tests/performance/remote_host_prepare.sh"],
shortname="remote_host_prepare.%s" % host,
environ={"USER_AT_HOST": user_at_host},
timeout_seconds=prepare_timeout,
)
)
if prepare_local:
# Prepare localhost as well
prepare_jobs.append(
jobset.JobSpec(
cmdline=["tools/run_tests/performance/kill_workers.sh"],
shortname="local_prepare",
timeout_seconds=prepare_timeout,
)
)
jobset.message("START", "Preparing hosts.", do_newline=True)
num_failures, _ = jobset.run(
prepare_jobs, newline_on_success=True, maxjobs=10
)
if num_failures == 0:
jobset.message(
"SUCCESS", "Prepare step completed successfully.", do_newline=True
)
else:
jobset.message(
"FAILED", "Failed to prepare remote hosts.", do_newline=True
)
sys.exit(1)
def build_on_remote_hosts(
hosts, languages=list(scenario_config.LANGUAGES.keys()), build_local=False
):
"""Builds performance worker on remote hosts (and maybe also locally)."""
build_timeout = 45 * 60
# Kokoro VMs (which are local only) do not have caching, so they need more time to build
local_build_timeout = 60 * 60
build_jobs = []
for host in hosts:
user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, host)
build_jobs.append(
jobset.JobSpec(
cmdline=["tools/run_tests/performance/remote_host_build.sh"]
+ languages,
shortname="remote_host_build.%s" % host,
environ={"USER_AT_HOST": user_at_host, "CONFIG": "opt"},
timeout_seconds=build_timeout,
)
)
if build_local:
# start port server locally
build_jobs.append(
jobset.JobSpec(
cmdline=["python", "tools/run_tests/start_port_server.py"],
shortname="local_start_port_server",
timeout_seconds=2 * 60,
)
)
# Build locally as well
build_jobs.append(
jobset.JobSpec(
cmdline=["tools/run_tests/performance/build_performance.sh"]
+ languages,
shortname="local_build",
environ={"CONFIG": "opt"},
timeout_seconds=local_build_timeout,
)
)
jobset.message("START", "Building.", do_newline=True)
num_failures, _ = jobset.run(
build_jobs, newline_on_success=True, maxjobs=10
)
if num_failures == 0:
jobset.message("SUCCESS", "Built successfully.", do_newline=True)
else:
jobset.message("FAILED", "Build failed.", do_newline=True)
sys.exit(1)
def create_qpsworkers(languages, worker_hosts, perf_cmd=None):
"""Creates QPS workers (but does not start them)."""
if not worker_hosts:
# run two workers locally (for each language)
workers = [(None, 10000), (None, 10010)]
elif len(worker_hosts) == 1:
# run two workers on the remote host (for each language)
workers = [(worker_hosts[0], 10000), (worker_hosts[0], 10010)]
else:
# run one worker per each remote host (for each language)
workers = [(worker_host, 10000) for worker_host in worker_hosts]
return [
create_qpsworker_job(
language,
shortname="qps_worker_%s_%s" % (language, worker_idx),
port=worker[1] + language.worker_port_offset(),
remote_host=worker[0],
perf_cmd=perf_cmd,
)
for language in languages
for worker_idx, worker in enumerate(workers)
]
def perf_report_processor_job(
worker_host, perf_base_name, output_filename, flame_graph_reports
):
print("Creating perf report collection job for %s" % worker_host)
cmd = ""
if worker_host != "localhost":
user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host)
cmd = (
"USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s"
" tools/run_tests/performance/process_remote_perf_flamegraphs.sh"
% (
user_at_host,
output_filename,
flame_graph_reports,
perf_base_name,
)
)
else:
cmd = (
"OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s"
" tools/run_tests/performance/process_local_perf_flamegraphs.sh"
% (output_filename, flame_graph_reports, perf_base_name)
)
return jobset.JobSpec(
cmdline=cmd,
timeout_seconds=3 * 60,
shell=True,
verbose_success=True,
shortname="process perf report",
)
Scenario = collections.namedtuple("Scenario", "jobspec workers name")
def create_scenarios(
languages,
workers_by_lang,
remote_host=None,
regex=".*",
category="all",
bq_result_table=None,
netperf=False,
netperf_hosts=[],
server_cpu_load=0,
):
"""Create jobspecs for scenarios to run."""
all_workers = [
worker
for workers in list(workers_by_lang.values())
for worker in workers
]
scenarios = []
_NO_WORKERS = []
if netperf:
if not netperf_hosts:
netperf_server = "localhost"
netperf_client = None
elif len(netperf_hosts) == 1:
netperf_server = netperf_hosts[0]
netperf_client = netperf_hosts[0]
else:
netperf_server = netperf_hosts[0]
netperf_client = netperf_hosts[1]
scenarios.append(
Scenario(
create_netperf_jobspec(
server_host=netperf_server,
client_host=netperf_client,
bq_result_table=bq_result_table,
),
_NO_WORKERS,
"netperf",
)
)
for language in languages:
for scenario_json in language.scenarios():
if re.search(regex, scenario_json["name"]):
categories = scenario_json.get(
"CATEGORIES", ["scalable", "smoketest"]
)
if category in categories or category == "all":
workers = workers_by_lang[str(language)][:]
# 'SERVER_LANGUAGE' is an indicator for this script to pick
# a server in different language.
custom_server_lang = scenario_json.get(
"SERVER_LANGUAGE", None
)
custom_client_lang = scenario_json.get(
"CLIENT_LANGUAGE", None
)
scenario_json = scenario_config.remove_nonproto_fields(
scenario_json
)
if custom_server_lang and custom_client_lang:
raise Exception(
"Cannot set both custom CLIENT_LANGUAGE and"
" SERVER_LANGUAGEin the same scenario"
)
if custom_server_lang:
if not workers_by_lang.get(custom_server_lang, []):
print(
"Warning: Skipping scenario %s as"
% scenario_json["name"]
)
print(
"SERVER_LANGUAGE is set to %s yet the language"
" has not been selected with -l"
% custom_server_lang
)
continue
for idx in range(0, scenario_json["num_servers"]):
# replace first X workers by workers of a different language
workers[idx] = workers_by_lang[custom_server_lang][
idx
]
if custom_client_lang:
if not workers_by_lang.get(custom_client_lang, []):
print(
"Warning: Skipping scenario %s as"
% scenario_json["name"]
)
print(
"CLIENT_LANGUAGE is set to %s yet the language"
" has not been selected with -l"
% custom_client_lang
)
continue
for idx in range(
scenario_json["num_servers"], len(workers)
):
# replace all client workers by workers of a different language,
# leave num_server workers as they are server workers.
workers[idx] = workers_by_lang[custom_client_lang][
idx
]
scenario = Scenario(
create_scenario_jobspec(
scenario_json,
[w.host_and_port for w in workers],
remote_host=remote_host,
bq_result_table=bq_result_table,
server_cpu_load=server_cpu_load,
),
workers,
scenario_json["name"],
)
scenarios.append(scenario)
return scenarios
def finish_qps_workers(jobs, qpsworker_jobs):
"""Waits for given jobs to finish and eventually kills them."""
retries = 0
num_killed = 0
while any(job.is_running() for job in jobs):
for job in qpsworker_jobs:
if job.is_running():
print('QPS worker "%s" is still running.' % job.host_and_port)
if retries > 10:
print("Killing all QPS workers.")
for job in jobs:
job.kill()
num_killed += 1
retries += 1
time.sleep(3)
print("All QPS workers finished.")
return num_killed
profile_output_files = []
# Collect perf text reports and flamegraphs if perf_cmd was used
# Note the base names of perf text reports are used when creating and processing
# perf data. The scenario name uniqifies the output name in the final
# perf reports directory.
# Alos, the perf profiles need to be fetched and processed after each scenario
# in order to avoid clobbering the output files.
def run_collect_perf_profile_jobs(
hosts_and_base_names, scenario_name, flame_graph_reports
):
perf_report_jobs = []
global profile_output_files
for host_and_port in hosts_and_base_names:
perf_base_name = hosts_and_base_names[host_and_port]
output_filename = "%s-%s" % (scenario_name, perf_base_name)
# from the base filename, create .svg output filename
host = host_and_port.split(":")[0]
profile_output_files.append("%s.svg" % output_filename)
perf_report_jobs.append(
perf_report_processor_job(
host, perf_base_name, output_filename, flame_graph_reports
)
)
jobset.message(
"START", "Collecting perf reports from qps workers", do_newline=True
)
failures, _ = jobset.run(
perf_report_jobs, newline_on_success=True, maxjobs=1
)
jobset.message(
"SUCCESS", "Collecting perf reports from qps workers", do_newline=True
)
return failures
def main():
argp = argparse.ArgumentParser(description="Run performance tests.")
argp.add_argument(
"-l",
"--language",
choices=["all"] + sorted(scenario_config.LANGUAGES.keys()),
nargs="+",
required=True,
help="Languages to benchmark.",
)
argp.add_argument(
"--remote_driver_host",
default=None,
help=(
"Run QPS driver on given host. By default, QPS driver is run"
" locally."
),
)
argp.add_argument(
"--remote_worker_host",
nargs="+",
default=[],
help="Worker hosts where to start QPS workers.",
)
argp.add_argument(
"--dry_run",
default=False,
action="store_const",
const=True,
help="Just list scenarios to be run, but don't run them.",
)
argp.add_argument(
"-r",
"--regex",
default=".*",
type=str,
help="Regex to select scenarios to run.",
)
argp.add_argument(
"--bq_result_table",
default=None,
type=str,
help='Bigquery "dataset.table" to upload results to.',
)
argp.add_argument(
"--category",
choices=["smoketest", "all", "scalable", "sweep"],
default="all",
help="Select a category of tests to run.",
)
argp.add_argument(
"--netperf",
default=False,
action="store_const",
const=True,
help="Run netperf benchmark as one of the scenarios.",
)
argp.add_argument(
"--server_cpu_load",
default=0,
type=int,
help=(
"Select a targeted server cpu load to run. 0 means ignore this flag"
),
)
argp.add_argument(
"-x",
"--xml_report",
default="report.xml",
type=str,
help="Name of XML report file to generate.",
)
argp.add_argument(
"--perf_args",
help=(
'Example usage: "--perf_args=record -F 99 -g". '
"Wrap QPS workers in a perf command "
"with the arguments to perf specified here. "
'".svg" flame graph profiles will be '
"created for each Qps Worker on each scenario. "
'Files will output to "<repo_root>/<args.flame_graph_reports>" '
"directory. Output files from running the worker "
"under perf are saved in the repo root where its ran. "
'Note that the perf "-g" flag is necessary for '
"flame graphs generation to work (assuming the binary "
"being profiled uses frame pointers, check out "
'"--call-graph dwarf" option using libunwind otherwise.) '
'Also note that the entire "--perf_args=<arg(s)>" must '
"be wrapped in quotes as in the example usage. "
'If the "--perg_args" is unspecified, "perf" will '
"not be used at all. "
"See http://www.brendangregg.com/perf.html "
"for more general perf examples."
),
)
argp.add_argument(
"--skip_generate_flamegraphs",
default=False,
action="store_const",
const=True,
help=(
"Turn flame graph generation off. "
'May be useful if "perf_args" arguments do not make sense for '
'generating flamegraphs (e.g., "--perf_args=stat ...")'
),
)
argp.add_argument(
"-f",
"--flame_graph_reports",
default="perf_reports",
type=str,
help=(
"Name of directory to output flame graph profiles to, if any are"
" created."
),
)
argp.add_argument(
"-u",
"--remote_host_username",
default="",
type=str,
help='Use a username that isn\'t "Jenkins" to SSH into remote workers.',
)
args = argp.parse_args()
global _REMOTE_HOST_USERNAME
if args.remote_host_username:
_REMOTE_HOST_USERNAME = args.remote_host_username
languages = set(
scenario_config.LANGUAGES[l]
for l in itertools.chain.from_iterable(
six.iterkeys(scenario_config.LANGUAGES) if x == "all" else [x]
for x in args.language
)
)
# Put together set of remote hosts where to run and build
remote_hosts = set()
if args.remote_worker_host:
for host in args.remote_worker_host:
remote_hosts.add(host)
if args.remote_driver_host:
remote_hosts.add(args.remote_driver_host)
if not args.dry_run:
if remote_hosts:
archive_repo(languages=[str(l) for l in languages])
prepare_remote_hosts(remote_hosts, prepare_local=True)
else:
prepare_remote_hosts([], prepare_local=True)
build_local = False
if not args.remote_driver_host:
build_local = True
if not args.dry_run:
build_on_remote_hosts(
remote_hosts,
languages=[str(l) for l in languages],
build_local=build_local,
)
perf_cmd = None
if args.perf_args:
print("Running workers under perf profiler")
# Expect /usr/bin/perf to be installed here, as is usual
perf_cmd = ["/usr/bin/perf"]
perf_cmd.extend(re.split("\s+", args.perf_args))
qpsworker_jobs = create_qpsworkers(
languages, args.remote_worker_host, perf_cmd=perf_cmd
)
# get list of worker addresses for each language.
workers_by_lang = dict([(str(language), []) for language in languages])
for job in qpsworker_jobs:
workers_by_lang[str(job.language)].append(job)
scenarios = create_scenarios(
languages,
workers_by_lang=workers_by_lang,
remote_host=args.remote_driver_host,
regex=args.regex,
category=args.category,
bq_result_table=args.bq_result_table,
netperf=args.netperf,
netperf_hosts=args.remote_worker_host,
server_cpu_load=args.server_cpu_load,
)
if not scenarios:
raise Exception("No scenarios to run")
total_scenario_failures = 0
qps_workers_killed = 0
merged_resultset = {}
perf_report_failures = 0
for scenario in scenarios:
if args.dry_run:
print(scenario.name)
else:
scenario_failures = 0
try:
for worker in scenario.workers:
worker.start()
jobs = [scenario.jobspec]
if scenario.workers:
# TODO(jtattermusch): ideally the "quit" job won't show up
# in the report
jobs.append(
create_quit_jobspec(
scenario.workers,
remote_host=args.remote_driver_host,
)
)
scenario_failures, resultset = jobset.run(
jobs, newline_on_success=True, maxjobs=1
)
total_scenario_failures += scenario_failures
merged_resultset = dict(
itertools.chain(
six.iteritems(merged_resultset),
six.iteritems(resultset),
)
)
finally:
# Consider qps workers that need to be killed as failures
qps_workers_killed += finish_qps_workers(
scenario.workers, qpsworker_jobs
)
if (
perf_cmd
and scenario_failures == 0
and not args.skip_generate_flamegraphs
):
workers_and_base_names = {}
for worker in scenario.workers:
if not worker.perf_file_base_name:
raise Exception(
"using perf buf perf report filename is unspecified"
)
workers_and_base_names[
worker.host_and_port
] = worker.perf_file_base_name
perf_report_failures += run_collect_perf_profile_jobs(
workers_and_base_names,
scenario.name,
args.flame_graph_reports,
)
# Still write the index.html even if some scenarios failed.
# 'profile_output_files' will only have names for scenarios that passed
if perf_cmd and not args.skip_generate_flamegraphs:
# write the index fil to the output dir, with all profiles from all scenarios/workers
report_utils.render_perf_profiling_results(
"%s/index.html" % args.flame_graph_reports, profile_output_files
)
report_utils.render_junit_xml_report(
merged_resultset,
args.xml_report,
suite_name="benchmarks",
multi_target=True,
)
if total_scenario_failures > 0 or qps_workers_killed > 0:
print(
"%s scenarios failed and %s qps worker jobs killed"
% (total_scenario_failures, qps_workers_killed)
)
sys.exit(1)
if perf_report_failures > 0:
print("%s perf profile collection jobs failed" % perf_report_failures)
sys.exit(1)
if __name__ == "__main__":
main()
| 28,981
| 33.136631
| 93
|
py
|
grpc
|
grpc-master/tools/run_tests/run_interop_tests.py
|
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run interop (cross-language) tests in parallel."""
from __future__ import print_function
import argparse
import atexit
import itertools
import json
import multiprocessing
import os
import re
import subprocess
import sys
import tempfile
import time
import traceback
import uuid
import six
import python_utils.dockerjob as dockerjob
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
# It's ok to not import because this is only necessary to upload results to BQ.
try:
from python_utils.upload_test_results import upload_interop_results_to_bq
except ImportError as e:
print(e)
# Docker doesn't clean up after itself, so we do it on exit.
atexit.register(lambda: subprocess.call(["stty", "echo"]))
ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
os.chdir(ROOT)
_DEFAULT_SERVER_PORT = 8080
_SKIP_CLIENT_COMPRESSION = [
"client_compressed_unary",
"client_compressed_streaming",
]
_SKIP_SERVER_COMPRESSION = [
"server_compressed_unary",
"server_compressed_streaming",
]
_SKIP_COMPRESSION = _SKIP_CLIENT_COMPRESSION + _SKIP_SERVER_COMPRESSION
_SKIP_ADVANCED = [
"status_code_and_message",
"custom_metadata",
"unimplemented_method",
"unimplemented_service",
]
_SKIP_SPECIAL_STATUS_MESSAGE = ["special_status_message"]
_ORCA_TEST_CASES = ["orca_per_rpc", "orca_oob"]
_GOOGLE_DEFAULT_CREDS_TEST_CASE = "google_default_credentials"
_SKIP_GOOGLE_DEFAULT_CREDS = [
_GOOGLE_DEFAULT_CREDS_TEST_CASE,
]
_COMPUTE_ENGINE_CHANNEL_CREDS_TEST_CASE = "compute_engine_channel_credentials"
_SKIP_COMPUTE_ENGINE_CHANNEL_CREDS = [
_COMPUTE_ENGINE_CHANNEL_CREDS_TEST_CASE,
]
_TEST_TIMEOUT = 3 * 60
# disable this test on core-based languages,
# see https://github.com/grpc/grpc/issues/9779
_SKIP_DATA_FRAME_PADDING = ["data_frame_padding"]
# report suffix "sponge_log.xml" is important for reports to get picked up by internal CI
_DOCKER_BUILD_XML_REPORT = "interop_docker_build/sponge_log.xml"
_TESTS_XML_REPORT = "interop_test/sponge_log.xml"
class CXXLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.http2_cwd = None
self.safename = "cxx"
def client_cmd(self, args):
return ["cmake/build/interop_client"] + args
def client_cmd_http2interop(self, args):
return ["cmake/build/http2_client"] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ["cmake/build/interop_server"] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return (
_SKIP_DATA_FRAME_PADDING
+ _SKIP_SPECIAL_STATUS_MESSAGE
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
)
def unimplemented_test_cases_server(self):
return []
def __str__(self):
return "c++"
class AspNetCoreLanguage:
def __init__(self):
self.client_cwd = "../grpc-dotnet/output/InteropTestsClient"
self.server_cwd = "../grpc-dotnet/output/InteropTestsWebsite"
self.safename = str(self)
def cloud_to_prod_env(self):
return {}
def client_cmd(self, args):
return ["dotnet", "exec", "InteropTestsClient.dll"] + args
def server_cmd(self, args):
return ["dotnet", "exec", "InteropTestsWebsite.dll"] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return (
_SKIP_GOOGLE_DEFAULT_CREDS
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+ _ORCA_TEST_CASES
)
def unimplemented_test_cases_server(self):
return _ORCA_TEST_CASES
def __str__(self):
return "aspnetcore"
class DartLanguage:
def __init__(self):
self.client_cwd = "../grpc-dart/interop"
self.server_cwd = "../grpc-dart/interop"
self.http2_cwd = "../grpc-dart/interop"
self.safename = str(self)
def client_cmd(self, args):
return ["dart", "bin/client.dart"] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ["dart", "bin/server.dart"] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return (
_SKIP_COMPRESSION
+ _SKIP_SPECIAL_STATUS_MESSAGE
+ _SKIP_GOOGLE_DEFAULT_CREDS
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+ _ORCA_TEST_CASES
)
def unimplemented_test_cases_server(self):
return (
_SKIP_COMPRESSION + _SKIP_SPECIAL_STATUS_MESSAGE + _ORCA_TEST_CASES
)
def __str__(self):
return "dart"
class JavaLanguage:
def __init__(self):
self.client_cwd = "../grpc-java"
self.server_cwd = "../grpc-java"
self.http2_cwd = "../grpc-java"
self.safename = str(self)
def client_cmd(self, args):
return ["./run-test-client.sh"] + args
def client_cmd_http2interop(self, args):
return [
"./interop-testing/build/install/grpc-interop-testing/bin/http2-client"
] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ["./run-test-server.sh"] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return []
def unimplemented_test_cases_server(self):
# Does not support CompressedRequest feature.
# Only supports CompressedResponse feature for unary.
return _SKIP_CLIENT_COMPRESSION + ["server_compressed_streaming"]
def __str__(self):
return "java"
class JavaOkHttpClient:
def __init__(self):
self.client_cwd = "../grpc-java"
self.safename = "java"
def client_cmd(self, args):
return ["./run-test-client.sh", "--use_okhttp=true"] + args
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_DATA_FRAME_PADDING
def __str__(self):
return "javaokhttp"
class GoLanguage:
def __init__(self):
# TODO: this relies on running inside docker
self.client_cwd = "/go/src/google.golang.org/grpc/interop/client"
self.server_cwd = "/go/src/google.golang.org/grpc/interop/server"
self.http2_cwd = "/go/src/google.golang.org/grpc/interop/http2"
self.safename = str(self)
def client_cmd(self, args):
return ["go", "run", "client.go"] + args
def client_cmd_http2interop(self, args):
return ["go", "run", "negative_http2_client.go"] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ["go", "run", "server.go"] + args
def global_env(self):
return {"GO111MODULE": "on"}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return "go"
class Http2Server:
"""Represents the HTTP/2 Interop Test server
This pretends to be a language in order to be built and run, but really it
isn't.
"""
def __init__(self):
self.server_cwd = None
self.safename = str(self)
def server_cmd(self, args):
return ["python test/http2_test/http2_test_server.py"]
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return (
_TEST_CASES
+ _SKIP_DATA_FRAME_PADDING
+ _SKIP_SPECIAL_STATUS_MESSAGE
+ _SKIP_GOOGLE_DEFAULT_CREDS
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
)
def unimplemented_test_cases_server(self):
return _TEST_CASES
def __str__(self):
return "http2"
class Http2Client:
"""Represents the HTTP/2 Interop Test
This pretends to be a language in order to be built and run, but really it
isn't.
"""
def __init__(self):
self.client_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ["tools/http2_interop/http2_interop.test", "-test.v"] + args
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return (
_TEST_CASES
+ _SKIP_SPECIAL_STATUS_MESSAGE
+ _SKIP_GOOGLE_DEFAULT_CREDS
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
)
def unimplemented_test_cases_server(self):
return _TEST_CASES
def __str__(self):
return "http2"
class NodeLanguage:
def __init__(self):
self.client_cwd = "../../../../home/appuser/grpc-node"
self.server_cwd = "../../../../home/appuser/grpc-node"
self.safename = str(self)
def client_cmd(self, args):
return [
"packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh",
"node",
"--require",
"./test/fixtures/native_native",
"test/interop/interop_client.js",
] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return [
"packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh",
"node",
"--require",
"./test/fixtures/native_native",
"test/interop/interop_server.js",
] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return (
_SKIP_COMPRESSION
+ _SKIP_DATA_FRAME_PADDING
+ _SKIP_GOOGLE_DEFAULT_CREDS
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+ _ORCA_TEST_CASES
)
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION + _ORCA_TEST_CASES
def __str__(self):
return "node"
class NodePureJSLanguage:
def __init__(self):
self.client_cwd = "../../../../home/appuser/grpc-node"
self.server_cwd = "../../../../home/appuser/grpc-node"
self.safename = str(self)
def client_cmd(self, args):
return [
"packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh",
"node",
"--require",
"./test/fixtures/js_js",
"test/interop/interop_client.js",
] + args
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return (
_SKIP_COMPRESSION
+ _SKIP_DATA_FRAME_PADDING
+ _SKIP_GOOGLE_DEFAULT_CREDS
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+ _ORCA_TEST_CASES
)
def unimplemented_test_cases_server(self):
return _ORCA_TEST_CASES
def __str__(self):
return "nodepurejs"
class PHP7Language:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ["src/php/bin/interop_client.sh"] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ["src/php/bin/interop_server.sh"] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return (
_SKIP_SERVER_COMPRESSION
+ _SKIP_DATA_FRAME_PADDING
+ _SKIP_GOOGLE_DEFAULT_CREDS
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+ _ORCA_TEST_CASES
)
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION + _ORCA_TEST_CASES
def __str__(self):
return "php7"
class ObjcLanguage:
def __init__(self):
self.client_cwd = "src/objective-c/tests"
self.safename = str(self)
def client_cmd(self, args):
# from args, extract the server port and craft xcodebuild command out of it
for arg in args:
port = re.search("--server_port=(\d+)", arg)
if port:
portnum = port.group(1)
cmdline = (
"pod install && xcodebuild -workspace Tests.xcworkspace"
' -scheme InteropTestsLocalSSL -destination name="iPhone 6"'
" HOST_PORT_LOCALSSL=localhost:%s test" % portnum
)
return [cmdline]
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
# ObjC test runs all cases with the same command. It ignores the testcase
# cmdline argument. Here we return all but one test cases as unimplemented,
# and depend upon ObjC test's behavior that it runs all cases even when
# we tell it to run just one.
return (
_TEST_CASES[1:]
+ _SKIP_COMPRESSION
+ _SKIP_DATA_FRAME_PADDING
+ _SKIP_SPECIAL_STATUS_MESSAGE
+ _SKIP_GOOGLE_DEFAULT_CREDS
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+ _ORCA_TEST_CASES
)
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION + _ORCA_TEST_CASES
def __str__(self):
return "objc"
class RubyLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return [
"tools/run_tests/interop/with_rvm.sh",
"ruby",
"src/ruby/pb/test/client.rb",
] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return [
"tools/run_tests/interop/with_rvm.sh",
"ruby",
"src/ruby/pb/test/server.rb",
] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return (
_SKIP_SERVER_COMPRESSION
+ _SKIP_DATA_FRAME_PADDING
+ _SKIP_SPECIAL_STATUS_MESSAGE
+ _SKIP_GOOGLE_DEFAULT_CREDS
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+ _ORCA_TEST_CASES
)
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION + _ORCA_TEST_CASES
def __str__(self):
return "ruby"
_PYTHON_BINARY = "py39/bin/python"
class PythonLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.http2_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return [
_PYTHON_BINARY,
"src/python/grpcio_tests/setup.py",
"run_interop",
"--client",
'--args="{}"'.format(" ".join(args)),
]
def client_cmd_http2interop(self, args):
return [
_PYTHON_BINARY,
"src/python/grpcio_tests/tests/http2/negative_http2_client.py",
] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return [
_PYTHON_BINARY,
"src/python/grpcio_tests/setup.py",
"run_interop",
"--server",
'--args="{}"'.format(" ".join(args)),
]
def global_env(self):
return {
"LD_LIBRARY_PATH": "{}/libs/opt".format(DOCKER_WORKDIR_ROOT),
"PYTHONPATH": "{}/src/python/gens".format(DOCKER_WORKDIR_ROOT),
}
def unimplemented_test_cases(self):
return (
_SKIP_COMPRESSION
+ _SKIP_DATA_FRAME_PADDING
+ _SKIP_GOOGLE_DEFAULT_CREDS
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+ _ORCA_TEST_CASES
)
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION + _ORCA_TEST_CASES
def __str__(self):
return "python"
class PythonAsyncIOLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.http2_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return [
_PYTHON_BINARY,
"src/python/grpcio_tests/setup.py",
"run_interop",
"--use-asyncio",
"--client",
'--args="{}"'.format(" ".join(args)),
]
def client_cmd_http2interop(self, args):
return [
_PYTHON_BINARY,
"src/python/grpcio_tests/tests/http2/negative_http2_client.py",
] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return [
_PYTHON_BINARY,
"src/python/grpcio_tests/setup.py",
"run_interop",
"--use-asyncio",
"--server",
'--args="{}"'.format(" ".join(args)),
]
def global_env(self):
return {
"LD_LIBRARY_PATH": "{}/libs/opt".format(DOCKER_WORKDIR_ROOT),
"PYTHONPATH": "{}/src/python/gens".format(DOCKER_WORKDIR_ROOT),
}
def unimplemented_test_cases(self):
# TODO(https://github.com/grpc/grpc/issues/21707)
return (
_SKIP_COMPRESSION
+ _SKIP_DATA_FRAME_PADDING
+ _AUTH_TEST_CASES
+ ["timeout_on_sleeping_server"]
+ _ORCA_TEST_CASES
)
def unimplemented_test_cases_server(self):
# TODO(https://github.com/grpc/grpc/issues/21749)
return (
_TEST_CASES
+ _AUTH_TEST_CASES
+ _HTTP2_TEST_CASES
+ _HTTP2_SERVER_TEST_CASES
)
def __str__(self):
return "pythonasyncio"
_LANGUAGES = {
"c++": CXXLanguage(),
"aspnetcore": AspNetCoreLanguage(),
"dart": DartLanguage(),
"go": GoLanguage(),
"java": JavaLanguage(),
"javaokhttp": JavaOkHttpClient(),
"node": NodeLanguage(),
"nodepurejs": NodePureJSLanguage(),
"php7": PHP7Language(),
"objc": ObjcLanguage(),
"ruby": RubyLanguage(),
"python": PythonLanguage(),
"pythonasyncio": PythonAsyncIOLanguage(),
}
# languages supported as cloud_to_cloud servers
_SERVERS = [
"c++",
"node",
"aspnetcore",
"java",
"go",
"ruby",
"python",
"dart",
"pythonasyncio",
"php7",
]
_TEST_CASES = [
"large_unary",
"empty_unary",
"ping_pong",
"empty_stream",
"client_streaming",
"server_streaming",
"cancel_after_begin",
"cancel_after_first_response",
"timeout_on_sleeping_server",
"custom_metadata",
"status_code_and_message",
"unimplemented_method",
"client_compressed_unary",
"server_compressed_unary",
"client_compressed_streaming",
"server_compressed_streaming",
"unimplemented_service",
"special_status_message",
"orca_per_rpc",
"orca_oob",
]
_AUTH_TEST_CASES = [
"compute_engine_creds",
"jwt_token_creds",
"oauth2_auth_token",
"per_rpc_creds",
_GOOGLE_DEFAULT_CREDS_TEST_CASE,
_COMPUTE_ENGINE_CHANNEL_CREDS_TEST_CASE,
]
_HTTP2_TEST_CASES = ["tls", "framing"]
_HTTP2_SERVER_TEST_CASES = [
"rst_after_header",
"rst_after_data",
"rst_during_data",
"goaway",
"ping",
"max_streams",
"data_frame_padding",
"no_df_padding_sanity_test",
]
_GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES = {
"data_frame_padding": "large_unary",
"no_df_padding_sanity_test": "large_unary",
}
_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS = list(
_GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES.keys()
)
_LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES = [
"java",
"go",
"python",
"c++",
]
_LANGUAGES_FOR_ALTS_TEST_CASES = ["java", "go", "c++", "python"]
_SERVERS_FOR_ALTS_TEST_CASES = ["java", "go", "c++", "python"]
_TRANSPORT_SECURITY_OPTIONS = ["tls", "alts", "insecure"]
_CUSTOM_CREDENTIALS_TYPE_OPTIONS = [
"tls",
"google_default_credentials",
"compute_engine_channel_creds",
]
DOCKER_WORKDIR_ROOT = "/var/local/git/grpc"
def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None):
"""Wraps given cmdline array to create 'docker run' cmdline from it."""
# don't use '-t' even when TTY is available, since that would break
# the testcases generated by tools/interop_matrix/create_testcases.sh
docker_cmdline = ["docker", "run", "-i", "--rm=true"]
# turn environ into -e docker args
if environ:
for k, v in list(environ.items()):
docker_cmdline += ["-e", "%s=%s" % (k, v)]
# set working directory
workdir = DOCKER_WORKDIR_ROOT
if cwd:
workdir = os.path.join(workdir, cwd)
docker_cmdline += ["-w", workdir]
docker_cmdline += docker_args + [image] + cmdline
return docker_cmdline
def manual_cmdline(docker_cmdline, docker_image):
"""Returns docker cmdline adjusted for manual invocation."""
print_cmdline = []
for item in docker_cmdline:
if item.startswith("--name="):
continue
if item == docker_image:
item = "$docker_image"
item = item.replace('"', '\\"')
# add quotes when necessary
if any(character.isspace() for character in item):
item = '"%s"' % item
print_cmdline.append(item)
return " ".join(print_cmdline)
def write_cmdlog_maybe(cmdlog, filename):
"""Returns docker cmdline adjusted for manual invocation."""
if cmdlog:
with open(filename, "w") as logfile:
logfile.write("#!/bin/bash\n")
logfile.write("# DO NOT MODIFY\n")
logfile.write(
"# This file is generated by"
" run_interop_tests.py/create_testcases.sh\n"
)
logfile.writelines("%s\n" % line for line in cmdlog)
print("Command log written to file %s" % filename)
def bash_cmdline(cmdline):
"""Creates bash -c cmdline from args list."""
# Use login shell:
# * makes error messages clearer if executables are missing
return ["bash", "-c", " ".join(cmdline)]
def compute_engine_creds_required(language, test_case):
"""Returns True if given test requires access to compute engine creds."""
language = str(language)
if test_case == "compute_engine_creds":
return True
if test_case == "oauth2_auth_token" and language == "c++":
# C++ oauth2 test uses GCE creds because C++ only supports JWT
return True
return False
def auth_options(
language,
test_case,
google_default_creds_use_key_file,
service_account_key_file,
default_service_account,
):
"""Returns (cmdline, env) tuple with cloud_to_prod_auth test options."""
language = str(language)
cmdargs = []
env = {}
oauth_scope_arg = "--oauth_scope=https://www.googleapis.com/auth/xapi.zoo"
key_file_arg = "--service_account_key_file=%s" % service_account_key_file
default_account_arg = (
"--default_service_account=%s" % default_service_account
)
if test_case in ["jwt_token_creds", "per_rpc_creds", "oauth2_auth_token"]:
if language in [
"aspnetcore",
"node",
"php7",
"python",
"ruby",
"nodepurejs",
]:
env["GOOGLE_APPLICATION_CREDENTIALS"] = service_account_key_file
else:
cmdargs += [key_file_arg]
if test_case in ["per_rpc_creds", "oauth2_auth_token"]:
cmdargs += [oauth_scope_arg]
if test_case == "oauth2_auth_token" and language == "c++":
# C++ oauth2 test uses GCE creds and thus needs to know the default account
cmdargs += [default_account_arg]
if test_case == "compute_engine_creds":
cmdargs += [oauth_scope_arg, default_account_arg]
if test_case == _GOOGLE_DEFAULT_CREDS_TEST_CASE:
if google_default_creds_use_key_file:
env["GOOGLE_APPLICATION_CREDENTIALS"] = service_account_key_file
cmdargs += [default_account_arg]
if test_case == _COMPUTE_ENGINE_CHANNEL_CREDS_TEST_CASE:
cmdargs += [default_account_arg]
return (cmdargs, env)
def _job_kill_handler(job):
if job._spec.container_name:
dockerjob.docker_kill(job._spec.container_name)
# When the job times out and we decide to kill it,
# we need to wait a before restarting the job
# to prevent "container name already in use" error.
# TODO(jtattermusch): figure out a cleaner way to this.
time.sleep(2)
def cloud_to_prod_jobspec(
language,
test_case,
server_host_nickname,
server_host,
google_default_creds_use_key_file,
docker_image=None,
auth=False,
manual_cmd_log=None,
service_account_key_file=None,
default_service_account=None,
transport_security="tls",
):
"""Creates jobspec for cloud-to-prod interop test"""
container_name = None
cmdargs = [
"--server_host=%s" % server_host,
"--server_port=443",
"--test_case=%s" % test_case,
]
if transport_security == "tls":
transport_security_options = ["--use_tls=true"]
elif transport_security == "google_default_credentials" and str(
language
) in ["c++", "go", "java", "javaokhttp"]:
transport_security_options = [
"--custom_credentials_type=google_default_credentials"
]
elif transport_security == "compute_engine_channel_creds" and str(
language
) in ["go", "java", "javaokhttp"]:
transport_security_options = [
"--custom_credentials_type=compute_engine_channel_creds"
]
else:
print(
"Invalid transport security option %s in cloud_to_prod_jobspec."
" Lang: %s" % (str(language), transport_security)
)
sys.exit(1)
cmdargs = cmdargs + transport_security_options
environ = dict(language.cloud_to_prod_env(), **language.global_env())
if auth:
auth_cmdargs, auth_env = auth_options(
language,
test_case,
google_default_creds_use_key_file,
service_account_key_file,
default_service_account,
)
cmdargs += auth_cmdargs
environ.update(auth_env)
cmdline = bash_cmdline(language.client_cmd(cmdargs))
cwd = language.client_cwd
if docker_image:
container_name = dockerjob.random_name(
"interop_client_%s" % language.safename
)
cmdline = docker_run_cmdline(
cmdline,
image=docker_image,
cwd=cwd,
environ=environ,
docker_args=["--net=host", "--name=%s" % container_name],
)
if manual_cmd_log is not None:
if manual_cmd_log == []:
manual_cmd_log.append(
'echo "Testing ${docker_image:=%s}"' % docker_image
)
manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
cwd = None
environ = None
suite_name = "cloud_to_prod_auth" if auth else "cloud_to_prod"
test_job = jobset.JobSpec(
cmdline=cmdline,
cwd=cwd,
environ=environ,
shortname="%s:%s:%s:%s:%s"
% (
suite_name,
language,
server_host_nickname,
test_case,
transport_security,
),
timeout_seconds=_TEST_TIMEOUT,
flake_retries=4 if args.allow_flakes else 0,
timeout_retries=2 if args.allow_flakes else 0,
kill_handler=_job_kill_handler,
)
if docker_image:
test_job.container_name = container_name
return test_job
def cloud_to_cloud_jobspec(
language,
test_case,
server_name,
server_host,
server_port,
docker_image=None,
transport_security="tls",
manual_cmd_log=None,
):
"""Creates jobspec for cloud-to-cloud interop test"""
interop_only_options = [
"--server_host_override=foo.test.google.fr",
"--use_test_ca=true",
]
if transport_security == "tls":
interop_only_options += ["--use_tls=true"]
elif transport_security == "alts":
interop_only_options += ["--use_tls=false", "--use_alts=true"]
elif transport_security == "insecure":
interop_only_options += ["--use_tls=false"]
else:
print(
"Invalid transport security option %s in cloud_to_cloud_jobspec."
% transport_security
)
sys.exit(1)
client_test_case = test_case
if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
client_test_case = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES[
test_case
]
if client_test_case in language.unimplemented_test_cases():
print(
"asking client %s to run unimplemented test case %s"
% (repr(language), client_test_case)
)
sys.exit(1)
if test_case in _ORCA_TEST_CASES:
interop_only_options += [
'--service_config_json=\'{"loadBalancingConfig":[{"test_backend_metrics_load_balancer":{}}]}\''
]
common_options = [
"--test_case=%s" % client_test_case,
"--server_host=%s" % server_host,
"--server_port=%s" % server_port,
]
if test_case in _HTTP2_SERVER_TEST_CASES:
if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
client_options = interop_only_options + common_options
cmdline = bash_cmdline(language.client_cmd(client_options))
cwd = language.client_cwd
else:
cmdline = bash_cmdline(
language.client_cmd_http2interop(common_options)
)
cwd = language.http2_cwd
else:
cmdline = bash_cmdline(
language.client_cmd(common_options + interop_only_options)
)
cwd = language.client_cwd
environ = language.global_env()
if docker_image and language.safename != "objc":
# we can't run client in docker for objc.
container_name = dockerjob.random_name(
"interop_client_%s" % language.safename
)
cmdline = docker_run_cmdline(
cmdline,
image=docker_image,
environ=environ,
cwd=cwd,
docker_args=["--net=host", "--name=%s" % container_name],
)
if manual_cmd_log is not None:
if manual_cmd_log == []:
manual_cmd_log.append(
'echo "Testing ${docker_image:=%s}"' % docker_image
)
manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
cwd = None
test_job = jobset.JobSpec(
cmdline=cmdline,
cwd=cwd,
environ=environ,
shortname="cloud_to_cloud:%s:%s_server:%s:%s"
% (language, server_name, test_case, transport_security),
timeout_seconds=_TEST_TIMEOUT,
flake_retries=4 if args.allow_flakes else 0,
timeout_retries=2 if args.allow_flakes else 0,
kill_handler=_job_kill_handler,
)
if docker_image:
test_job.container_name = container_name
return test_job
def server_jobspec(
language, docker_image, transport_security="tls", manual_cmd_log=None
):
"""Create jobspec for running a server"""
container_name = dockerjob.random_name(
"interop_server_%s" % language.safename
)
server_cmd = ["--port=%s" % _DEFAULT_SERVER_PORT]
if transport_security == "tls":
server_cmd += ["--use_tls=true"]
elif transport_security == "alts":
server_cmd += ["--use_tls=false", "--use_alts=true"]
elif transport_security == "insecure":
server_cmd += ["--use_tls=false"]
else:
print(
"Invalid transport security option %s in server_jobspec."
% transport_security
)
sys.exit(1)
cmdline = bash_cmdline(language.server_cmd(server_cmd))
environ = language.global_env()
docker_args = ["--name=%s" % container_name]
if language.safename == "http2":
# we are running the http2 interop server. Open next N ports beginning
# with the server port. These ports are used for http2 interop test
# (one test case per port).
docker_args += list(
itertools.chain.from_iterable(
("-p", str(_DEFAULT_SERVER_PORT + i))
for i in range(len(_HTTP2_SERVER_TEST_CASES))
)
)
# Enable docker's healthcheck mechanism.
# This runs a Python script inside the container every second. The script
# pings the http2 server to verify it is ready. The 'health-retries' flag
# specifies the number of consecutive failures before docker will report
# the container's status as 'unhealthy'. Prior to the first 'health_retries'
# failures or the first success, the status will be 'starting'. 'docker ps'
# or 'docker inspect' can be used to see the health of the container on the
# command line.
docker_args += [
"--health-cmd=python test/http2_test/http2_server_health_check.py "
"--server_host=%s --server_port=%d"
% ("localhost", _DEFAULT_SERVER_PORT),
"--health-interval=1s",
"--health-retries=5",
"--health-timeout=10s",
]
else:
docker_args += ["-p", str(_DEFAULT_SERVER_PORT)]
docker_cmdline = docker_run_cmdline(
cmdline,
image=docker_image,
cwd=language.server_cwd,
environ=environ,
docker_args=docker_args,
)
if manual_cmd_log is not None:
if manual_cmd_log == []:
manual_cmd_log.append(
'echo "Testing ${docker_image:=%s}"' % docker_image
)
manual_cmd_log.append(manual_cmdline(docker_cmdline, docker_image))
server_job = jobset.JobSpec(
cmdline=docker_cmdline,
environ=environ,
shortname="interop_server_%s" % language,
timeout_seconds=30 * 60,
)
server_job.container_name = container_name
return server_job
def build_interop_image_jobspec(language, tag=None):
"""Creates jobspec for building interop docker image for a language"""
if not tag:
tag = "grpc_interop_%s:%s" % (language.safename, uuid.uuid4())
env = {
"INTEROP_IMAGE": tag,
"BASE_NAME": "grpc_interop_%s" % language.safename,
}
build_job = jobset.JobSpec(
cmdline=["tools/run_tests/dockerize/build_interop_image.sh"],
environ=env,
shortname="build_docker_%s" % (language),
timeout_seconds=30 * 60,
)
build_job.tag = tag
return build_job
def aggregate_http2_results(stdout):
match = re.search(r'\{"cases[^\]]*\]\}', stdout)
if not match:
return None
results = json.loads(match.group(0))
skipped = 0
passed = 0
failed = 0
failed_cases = []
for case in results["cases"]:
if case.get("skipped", False):
skipped += 1
else:
if case.get("passed", False):
passed += 1
else:
failed += 1
failed_cases.append(case.get("name", "NONAME"))
return {
"passed": passed,
"failed": failed,
"skipped": skipped,
"failed_cases": ", ".join(failed_cases),
"percent": 1.0 * passed / (passed + failed),
}
# A dictionary of prod servers to test against.
# See go/grpc-interop-tests (internal-only) for details.
prod_servers = {
"default": "grpc-test.sandbox.googleapis.com",
"gateway_v4": "grpc-test4.sandbox.googleapis.com",
}
argp = argparse.ArgumentParser(description="Run interop tests.")
argp.add_argument(
"-l",
"--language",
choices=["all"] + sorted(_LANGUAGES),
nargs="+",
default=["all"],
help="Clients to run. Objc client can be only run on OSX.",
)
argp.add_argument("-j", "--jobs", default=multiprocessing.cpu_count(), type=int)
argp.add_argument(
"--cloud_to_prod",
default=False,
action="store_const",
const=True,
help="Run cloud_to_prod tests.",
)
argp.add_argument(
"--cloud_to_prod_auth",
default=False,
action="store_const",
const=True,
help="Run cloud_to_prod_auth tests.",
)
argp.add_argument(
"--google_default_creds_use_key_file",
default=False,
action="store_const",
const=True,
help=(
"Whether or not we should use a key file for the "
"google_default_credentials test case, e.g. by "
"setting env var GOOGLE_APPLICATION_CREDENTIALS."
),
)
argp.add_argument(
"--prod_servers",
choices=list(prod_servers.keys()),
default=["default"],
nargs="+",
help=(
"The servers to run cloud_to_prod and cloud_to_prod_auth tests against."
),
)
argp.add_argument(
"-s",
"--server",
choices=["all"] + sorted(_SERVERS),
nargs="+",
help="Run cloud_to_cloud servers in a separate docker "
+ "image. Servers can only be started automatically if "
+ "--use_docker option is enabled.",
default=[],
)
argp.add_argument(
"--override_server",
action="append",
type=lambda kv: kv.split("="),
help=(
"Use servername=HOST:PORT to explicitly specify a server. E.g."
" csharp=localhost:50000"
),
default=[],
)
# TODO(jtattermusch): the default service_account_key_file only works when --use_docker is used.
argp.add_argument(
"--service_account_key_file",
type=str,
help="The service account key file to use for some auth interop tests.",
default="/root/service_account/grpc-testing-ebe7c1ac7381.json",
)
argp.add_argument(
"--default_service_account",
type=str,
help=(
"Default GCE service account email to use for some auth interop tests."
),
default="830293263384-compute@developer.gserviceaccount.com",
)
argp.add_argument(
"-t",
"--travis",
default=False,
action="store_const",
const=True,
help=(
"When set, indicates that the script is running on CI (= not locally)."
),
)
argp.add_argument(
"-v", "--verbose", default=False, action="store_const", const=True
)
argp.add_argument(
"--use_docker",
default=False,
action="store_const",
const=True,
help="Run all the interop tests under docker. That provides "
+ "additional isolation and prevents the need to install "
+ "language specific prerequisites. Only available on Linux.",
)
argp.add_argument(
"--allow_flakes",
default=False,
action="store_const",
const=True,
help=(
"Allow flaky tests to show as passing (re-runs failed tests up to five"
" times)"
),
)
argp.add_argument(
"--manual_run",
default=False,
action="store_const",
const=True,
help="Prepare things for running interop tests manually. "
+ "Preserve docker images after building them and skip "
"actually running the tests. Only print commands to run by " + "hand.",
)
argp.add_argument(
"--http2_interop",
default=False,
action="store_const",
const=True,
help="Enable HTTP/2 client edge case testing. (Bad client, good server)",
)
argp.add_argument(
"--http2_server_interop",
default=False,
action="store_const",
const=True,
help=(
"Enable HTTP/2 server edge case testing. (Includes positive and"
" negative tests"
),
)
argp.add_argument(
"--transport_security",
choices=_TRANSPORT_SECURITY_OPTIONS,
default="tls",
type=str,
nargs="?",
const=True,
help="Which transport security mechanism to use.",
)
argp.add_argument(
"--custom_credentials_type",
choices=_CUSTOM_CREDENTIALS_TYPE_OPTIONS,
default=_CUSTOM_CREDENTIALS_TYPE_OPTIONS,
nargs="+",
help=(
"Credential types to test in the cloud_to_prod setup. Default is to"
" test with all creds types possible."
),
)
argp.add_argument(
"--skip_compute_engine_creds",
default=False,
action="store_const",
const=True,
help="Skip auth tests requiring access to compute engine credentials.",
)
argp.add_argument(
"--internal_ci",
default=False,
action="store_const",
const=True,
help=(
"(Deprecated, has no effect) Put reports into subdirectories to improve"
" presentation of results by Internal CI."
),
)
argp.add_argument(
"--bq_result_table",
default="",
type=str,
nargs="?",
help="Upload test results to a specified BQ table.",
)
args = argp.parse_args()
servers = set(
s
for s in itertools.chain.from_iterable(
_SERVERS if x == "all" else [x] for x in args.server
)
)
# ALTS servers are only available for certain languages.
if args.transport_security == "alts":
servers = servers.intersection(_SERVERS_FOR_ALTS_TEST_CASES)
if args.use_docker:
if not args.travis:
print("Seen --use_docker flag, will run interop tests under docker.")
print("")
print(
"IMPORTANT: The changes you are testing need to be locally"
" committed"
)
print(
"because only the committed changes in the current branch will be"
)
print("copied to the docker environment.")
time.sleep(5)
if args.manual_run and not args.use_docker:
print("--manual_run is only supported with --use_docker option enabled.")
sys.exit(1)
if not args.use_docker and servers:
print(
"Running interop servers is only supported with --use_docker option"
" enabled."
)
sys.exit(1)
# we want to include everything but objc in 'all'
# because objc won't run on non-mac platforms
all_but_objc = set(six.iterkeys(_LANGUAGES)) - set(["objc"])
languages = set(
_LANGUAGES[l]
for l in itertools.chain.from_iterable(
all_but_objc if x == "all" else [x] for x in args.language
)
)
# ALTS interop clients are only available for certain languages.
if args.transport_security == "alts":
alts_languages = set(_LANGUAGES[l] for l in _LANGUAGES_FOR_ALTS_TEST_CASES)
languages = languages.intersection(alts_languages)
languages_http2_clients_for_http2_server_interop = set()
if args.http2_server_interop:
languages_http2_clients_for_http2_server_interop = set(
_LANGUAGES[l]
for l in _LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES
if "all" in args.language or l in args.language
)
http2Interop = Http2Client() if args.http2_interop else None
http2InteropServer = Http2Server() if args.http2_server_interop else None
docker_images = {}
if args.use_docker:
# languages for which to build docker images
languages_to_build = set(
_LANGUAGES[k]
for k in set([str(l) for l in languages] + [s for s in servers])
)
languages_to_build = (
languages_to_build | languages_http2_clients_for_http2_server_interop
)
if args.http2_interop:
languages_to_build.add(http2Interop)
if args.http2_server_interop:
languages_to_build.add(http2InteropServer)
build_jobs = []
for l in languages_to_build:
if str(l) == "objc":
# we don't need to build a docker image for objc
continue
job = build_interop_image_jobspec(l)
docker_images[str(l)] = job.tag
build_jobs.append(job)
if build_jobs:
jobset.message(
"START", "Building interop docker images.", do_newline=True
)
if args.verbose:
print("Jobs to run: \n%s\n" % "\n".join(str(j) for j in build_jobs))
num_failures, build_resultset = jobset.run(
build_jobs, newline_on_success=True, maxjobs=args.jobs
)
report_utils.render_junit_xml_report(
build_resultset, _DOCKER_BUILD_XML_REPORT
)
if num_failures == 0:
jobset.message(
"SUCCESS",
"All docker images built successfully.",
do_newline=True,
)
else:
jobset.message(
"FAILED",
"Failed to build interop docker images.",
do_newline=True,
)
for image in six.itervalues(docker_images):
dockerjob.remove_image(image, skip_nonexistent=True)
sys.exit(1)
server_manual_cmd_log = [] if args.manual_run else None
client_manual_cmd_log = [] if args.manual_run else None
# Start interop servers.
server_jobs = {}
server_addresses = {}
try:
for s in servers:
lang = str(s)
spec = server_jobspec(
_LANGUAGES[lang],
docker_images.get(lang),
args.transport_security,
manual_cmd_log=server_manual_cmd_log,
)
if not args.manual_run:
job = dockerjob.DockerJob(spec)
server_jobs[lang] = job
server_addresses[lang] = (
"localhost",
job.mapped_port(_DEFAULT_SERVER_PORT),
)
else:
# don't run the server, set server port to a placeholder value
server_addresses[lang] = ("localhost", "${SERVER_PORT}")
http2_server_job = None
if args.http2_server_interop:
# launch a HTTP2 server emulator that creates edge cases
lang = str(http2InteropServer)
spec = server_jobspec(
http2InteropServer,
docker_images.get(lang),
manual_cmd_log=server_manual_cmd_log,
)
if not args.manual_run:
http2_server_job = dockerjob.DockerJob(spec)
server_jobs[lang] = http2_server_job
else:
# don't run the server, set server port to a placeholder value
server_addresses[lang] = ("localhost", "${SERVER_PORT}")
jobs = []
if args.cloud_to_prod:
if args.transport_security not in ["tls"]:
print("TLS is always enabled for cloud_to_prod scenarios.")
for server_host_nickname in args.prod_servers:
for language in languages:
for test_case in _TEST_CASES:
if not test_case in language.unimplemented_test_cases():
if (
not test_case
in _SKIP_ADVANCED
+ _SKIP_COMPRESSION
+ _SKIP_SPECIAL_STATUS_MESSAGE
+ _ORCA_TEST_CASES
):
for (
transport_security
) in args.custom_credentials_type:
# google_default_credentials not yet supported by all languages
if (
transport_security
== "google_default_credentials"
and str(language)
not in ["c++", "go", "java", "javaokhttp"]
):
continue
# compute_engine_channel_creds not yet supported by all languages
if (
transport_security
== "compute_engine_channel_creds"
and str(language)
not in ["go", "java", "javaokhttp"]
):
continue
test_job = cloud_to_prod_jobspec(
language,
test_case,
server_host_nickname,
prod_servers[server_host_nickname],
google_default_creds_use_key_file=args.google_default_creds_use_key_file,
docker_image=docker_images.get(
str(language)
),
manual_cmd_log=client_manual_cmd_log,
service_account_key_file=args.service_account_key_file,
default_service_account=args.default_service_account,
transport_security=transport_security,
)
jobs.append(test_job)
if args.http2_interop:
for test_case in _HTTP2_TEST_CASES:
test_job = cloud_to_prod_jobspec(
http2Interop,
test_case,
server_host_nickname,
prod_servers[server_host_nickname],
google_default_creds_use_key_file=args.google_default_creds_use_key_file,
docker_image=docker_images.get(str(http2Interop)),
manual_cmd_log=client_manual_cmd_log,
service_account_key_file=args.service_account_key_file,
default_service_account=args.default_service_account,
transport_security=args.transport_security,
)
jobs.append(test_job)
if args.cloud_to_prod_auth:
if args.transport_security not in ["tls"]:
print("TLS is always enabled for cloud_to_prod scenarios.")
for server_host_nickname in args.prod_servers:
for language in languages:
for test_case in _AUTH_TEST_CASES:
if (
not args.skip_compute_engine_creds
or not compute_engine_creds_required(
language, test_case
)
):
if not test_case in language.unimplemented_test_cases():
if test_case == _GOOGLE_DEFAULT_CREDS_TEST_CASE:
transport_security = (
"google_default_credentials"
)
elif (
test_case
== _COMPUTE_ENGINE_CHANNEL_CREDS_TEST_CASE
):
transport_security = (
"compute_engine_channel_creds"
)
else:
transport_security = "tls"
if (
transport_security
not in args.custom_credentials_type
):
continue
test_job = cloud_to_prod_jobspec(
language,
test_case,
server_host_nickname,
prod_servers[server_host_nickname],
google_default_creds_use_key_file=args.google_default_creds_use_key_file,
docker_image=docker_images.get(str(language)),
auth=True,
manual_cmd_log=client_manual_cmd_log,
service_account_key_file=args.service_account_key_file,
default_service_account=args.default_service_account,
transport_security=transport_security,
)
jobs.append(test_job)
for server in args.override_server:
server_name = server[0]
(server_host, server_port) = server[1].split(":")
server_addresses[server_name] = (server_host, server_port)
for server_name, server_address in list(server_addresses.items()):
(server_host, server_port) = server_address
server_language = _LANGUAGES.get(server_name, None)
skip_server = [] # test cases unimplemented by server
if server_language:
skip_server = server_language.unimplemented_test_cases_server()
for language in languages:
for test_case in _TEST_CASES:
if not test_case in language.unimplemented_test_cases():
if not test_case in skip_server:
test_job = cloud_to_cloud_jobspec(
language,
test_case,
server_name,
server_host,
server_port,
docker_image=docker_images.get(str(language)),
transport_security=args.transport_security,
manual_cmd_log=client_manual_cmd_log,
)
jobs.append(test_job)
if args.http2_interop:
for test_case in _HTTP2_TEST_CASES:
if server_name == "go":
# TODO(carl-mastrangelo): Reenable after https://github.com/grpc/grpc-go/issues/434
continue
test_job = cloud_to_cloud_jobspec(
http2Interop,
test_case,
server_name,
server_host,
server_port,
docker_image=docker_images.get(str(http2Interop)),
transport_security=args.transport_security,
manual_cmd_log=client_manual_cmd_log,
)
jobs.append(test_job)
if args.http2_server_interop:
if not args.manual_run:
http2_server_job.wait_for_healthy(timeout_seconds=600)
for language in languages_http2_clients_for_http2_server_interop:
for test_case in set(_HTTP2_SERVER_TEST_CASES) - set(
_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS
):
offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
server_port = _DEFAULT_SERVER_PORT + offset
if not args.manual_run:
server_port = http2_server_job.mapped_port(server_port)
test_job = cloud_to_cloud_jobspec(
language,
test_case,
str(http2InteropServer),
"localhost",
server_port,
docker_image=docker_images.get(str(language)),
manual_cmd_log=client_manual_cmd_log,
)
jobs.append(test_job)
for language in languages:
# HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS is a subset of
# HTTP_SERVER_TEST_CASES, in which clients use their gRPC interop clients rather
# than specialized http2 clients, reusing existing test implementations.
# For example, in the "data_frame_padding" test, use language's gRPC
# interop clients and make them think that they're running "large_unary"
# test case. This avoids implementing a new test case in each language.
for test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
if test_case not in language.unimplemented_test_cases():
offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
server_port = _DEFAULT_SERVER_PORT + offset
if not args.manual_run:
server_port = http2_server_job.mapped_port(server_port)
if args.transport_security != "insecure":
print(
"Creating grpc client to http2 server test case "
"with insecure connection, even though "
"args.transport_security is not insecure. Http2 "
"test server only supports insecure connections."
)
test_job = cloud_to_cloud_jobspec(
language,
test_case,
str(http2InteropServer),
"localhost",
server_port,
docker_image=docker_images.get(str(language)),
transport_security="insecure",
manual_cmd_log=client_manual_cmd_log,
)
jobs.append(test_job)
if not jobs:
print("No jobs to run.")
for image in six.itervalues(docker_images):
dockerjob.remove_image(image, skip_nonexistent=True)
sys.exit(1)
if args.manual_run:
print("All tests will skipped --manual_run option is active.")
if args.verbose:
print("Jobs to run: \n%s\n" % "\n".join(str(job) for job in jobs))
num_failures, resultset = jobset.run(
jobs,
newline_on_success=True,
maxjobs=args.jobs,
skip_jobs=args.manual_run,
)
if args.bq_result_table and resultset:
upload_interop_results_to_bq(resultset, args.bq_result_table)
if num_failures:
jobset.message("FAILED", "Some tests failed", do_newline=True)
else:
jobset.message("SUCCESS", "All tests passed", do_newline=True)
write_cmdlog_maybe(server_manual_cmd_log, "interop_server_cmds.sh")
write_cmdlog_maybe(client_manual_cmd_log, "interop_client_cmds.sh")
report_utils.render_junit_xml_report(resultset, _TESTS_XML_REPORT)
for name, job in list(resultset.items()):
if "http2" in name:
job[0].http2results = aggregate_http2_results(job[0].message)
http2_server_test_cases = (
_HTTP2_SERVER_TEST_CASES if args.http2_server_interop else []
)
if num_failures:
sys.exit(1)
else:
sys.exit(0)
finally:
# Check if servers are still running.
for server, job in list(server_jobs.items()):
if not job.is_running():
print('Server "%s" has exited prematurely.' % server)
dockerjob.finish_jobs([j for j in six.itervalues(server_jobs)])
for image in six.itervalues(docker_images):
if not args.manual_run:
print("Removing docker image %s" % image)
dockerjob.remove_image(image)
else:
print("Preserving docker image: %s" % image)
| 59,174
| 30.661316
| 109
|
py
|
grpc
|
grpc-master/tools/run_tests/run_microbenchmark.py
|
#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import html
import multiprocessing
import os
import subprocess
import sys
import python_utils.jobset as jobset
import python_utils.start_port_server as start_port_server
sys.path.append(
os.path.join(
os.path.dirname(sys.argv[0]),
"..",
"profiling",
"microbenchmarks",
"bm_diff",
)
)
import bm_constants
flamegraph_dir = os.path.join(os.path.expanduser("~"), "FlameGraph")
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
if not os.path.exists("reports"):
os.makedirs("reports")
start_port_server.start_port_server()
def fnize(s):
out = ""
for c in s:
if c in "<>, /":
if len(out) and out[-1] == "_":
continue
out += "_"
else:
out += c
return out
# index html
index_html = """
<html>
<head>
<title>Microbenchmark Results</title>
</head>
<body>
"""
def heading(name):
global index_html
index_html += "<h1>%s</h1>\n" % name
def link(txt, tgt):
global index_html
index_html += '<p><a href="%s">%s</a></p>\n' % (
html.escape(tgt, quote=True),
html.escape(txt),
)
def text(txt):
global index_html
index_html += "<p><pre>%s</pre></p>\n" % html.escape(txt)
def _bazel_build_benchmark(bm_name, cfg):
"""Build given benchmark with bazel"""
subprocess.check_call(
[
"tools/bazel",
"build",
"--config=%s" % cfg,
"//test/cpp/microbenchmarks:%s" % bm_name,
]
)
def run_summary(bm_name, cfg, base_json_name):
_bazel_build_benchmark(bm_name, cfg)
cmd = [
"bazel-bin/test/cpp/microbenchmarks/%s" % bm_name,
"--benchmark_out=%s.%s.json" % (base_json_name, cfg),
"--benchmark_out_format=json",
]
if args.summary_time is not None:
cmd += ["--benchmark_min_time=%d" % args.summary_time]
return subprocess.check_output(cmd).decode("UTF-8")
def collect_summary(bm_name, args):
# no counters, run microbenchmark and add summary
# both to HTML report and to console.
nocounters_heading = "Summary: %s" % bm_name
nocounters_summary = run_summary(bm_name, "opt", bm_name)
heading(nocounters_heading)
text(nocounters_summary)
print(nocounters_heading)
print(nocounters_summary)
collectors = {
"summary": collect_summary,
}
argp = argparse.ArgumentParser(description="Collect data from microbenchmarks")
argp.add_argument(
"-c",
"--collect",
choices=sorted(collectors.keys()),
nargs="*",
default=sorted(collectors.keys()),
help="Which collectors should be run against each benchmark",
)
argp.add_argument(
"-b",
"--benchmarks",
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
nargs="+",
type=str,
help="Which microbenchmarks should be run",
)
argp.add_argument(
"--bq_result_table",
default="",
type=str,
help=(
"Upload results from summary collection to a specified bigquery table."
),
)
argp.add_argument(
"--summary_time",
default=None,
type=int,
help="Minimum time to run benchmarks for the summary collection",
)
args = argp.parse_args()
try:
for collect in args.collect:
for bm_name in args.benchmarks:
collectors[collect](bm_name, args)
finally:
if not os.path.exists("reports"):
os.makedirs("reports")
index_html += "</body>\n</html>\n"
with open("reports/index.html", "w") as f:
f.write(index_html)
| 4,173
| 23.698225
| 79
|
py
|
grpc
|
grpc-master/tools/run_tests/run_grpclb_interop_tests.py
|
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run interop (cross-language) tests in parallel."""
from __future__ import print_function
import argparse
import atexit
import itertools
import json
import multiprocessing
import os
import re
import subprocess
import sys
import tempfile
import time
import traceback
import uuid
import six
import python_utils.dockerjob as dockerjob
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
# Docker doesn't clean up after itself, so we do it on exit.
atexit.register(lambda: subprocess.call(["stty", "echo"]))
ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
os.chdir(ROOT)
_FALLBACK_SERVER_PORT = 443
_BALANCER_SERVER_PORT = 12000
_BACKEND_SERVER_PORT = 8080
_TEST_TIMEOUT = 30
_FAKE_SERVERS_SAFENAME = "fake_servers"
# Use a name that's verified by the test certs
_SERVICE_NAME = "server.test.google.fr"
class CXXLanguage:
def __init__(self):
self.client_cwd = "/var/local/git/grpc"
self.safename = "cxx"
def client_cmd(self, args):
return ["bins/opt/interop_client"] + args
def global_env(self):
# 1) Set c-ares as the resolver, to
# enable grpclb.
# 2) Turn on verbose logging.
# 3) Set the ROOTS_PATH env variable
# to the test CA in order for
# GoogleDefaultCredentials to be
# able to use the test CA.
return {
"GRPC_DNS_RESOLVER": "ares",
"GRPC_VERBOSITY": "DEBUG",
"GRPC_TRACE": "client_channel,glb",
"GRPC_DEFAULT_SSL_ROOTS_FILE_PATH": (
"/var/local/git/grpc/src/core/tsi/test_creds/ca.pem"
),
}
def __str__(self):
return "c++"
class JavaLanguage:
def __init__(self):
self.client_cwd = "/var/local/git/grpc-java"
self.safename = str(self)
def client_cmd(self, args):
# Take necessary steps to import our test CA into
# the set of test CA's that the Java runtime of the
# docker container will pick up, so that
# Java GoogleDefaultCreds can use it.
pem_to_der_cmd = (
"openssl x509 -outform der "
"-in /external_mount/src/core/tsi/test_creds/ca.pem "
"-out /tmp/test_ca.der"
)
keystore_import_cmd = (
"keytool -import "
"-keystore /usr/lib/jvm/java-8-oracle/jre/lib/security/cacerts "
"-file /tmp/test_ca.der "
"-deststorepass changeit "
"-noprompt"
)
return [
"bash",
"-c",
(
"{pem_to_der_cmd} && "
"{keystore_import_cmd} && "
"./run-test-client.sh {java_client_args}"
).format(
pem_to_der_cmd=pem_to_der_cmd,
keystore_import_cmd=keystore_import_cmd,
java_client_args=" ".join(args),
),
]
def global_env(self):
# 1) Enable grpclb
# 2) Enable verbose logging
return {
"JAVA_OPTS": (
"-Dio.grpc.internal.DnsNameResolverProvider.enable_grpclb=true "
"-Djava.util.logging.config.file=/var/local/grpc_java_logging/logconf.txt"
)
}
def __str__(self):
return "java"
class GoLanguage:
def __init__(self):
self.client_cwd = "/go/src/google.golang.org/grpc/interop/client"
self.safename = str(self)
def client_cmd(self, args):
# Copy the test CA file into the path that
# the Go runtime in the docker container will use, so
# that Go's GoogleDefaultCredentials can use it.
# See https://golang.org/src/crypto/x509/root_linux.go.
return [
"bash",
"-c",
(
"cp /external_mount/src/core/tsi/test_creds/ca.pem "
"/etc/ssl/certs/ca-certificates.crt && "
"/go/bin/client {go_client_args}"
).format(go_client_args=" ".join(args)),
]
def global_env(self):
return {
"GRPC_GO_LOG_VERBOSITY_LEVEL": "3",
"GRPC_GO_LOG_SEVERITY_LEVEL": "INFO",
}
def __str__(self):
return "go"
_LANGUAGES = {
"c++": CXXLanguage(),
"go": GoLanguage(),
"java": JavaLanguage(),
}
def docker_run_cmdline(cmdline, image, docker_args, cwd, environ=None):
"""Wraps given cmdline array to create 'docker run' cmdline from it."""
# turn environ into -e docker args
docker_cmdline = "docker run -i --rm=true".split()
if environ:
for k, v in list(environ.items()):
docker_cmdline += ["-e", "%s=%s" % (k, v)]
return docker_cmdline + ["-w", cwd] + docker_args + [image] + cmdline
def _job_kill_handler(job):
assert job._spec.container_name
dockerjob.docker_kill(job._spec.container_name)
def transport_security_to_args(transport_security):
args = []
if transport_security == "tls":
args += ["--use_tls=true"]
elif transport_security == "alts":
args += ["--use_tls=false", "--use_alts=true"]
elif transport_security == "insecure":
args += ["--use_tls=false"]
elif transport_security == "google_default_credentials":
args += ["--custom_credentials_type=google_default_credentials"]
else:
print("Invalid transport security option.")
sys.exit(1)
return args
def lb_client_interop_jobspec(
language, dns_server_ip, docker_image, transport_security="tls"
):
"""Runs a gRPC client under test in a docker container"""
interop_only_options = [
"--server_host=%s" % _SERVICE_NAME,
"--server_port=%d" % _FALLBACK_SERVER_PORT,
] + transport_security_to_args(transport_security)
# Don't set the server host override in any client;
# Go and Java default to no override.
# We're using a DNS server so there's no need.
if language.safename == "c++":
interop_only_options += ['--server_host_override=""']
# Don't set --use_test_ca; we're configuring
# clients to use test CA's via alternate means.
interop_only_options += ["--use_test_ca=false"]
client_args = language.client_cmd(interop_only_options)
container_name = dockerjob.random_name(
"lb_interop_client_%s" % language.safename
)
docker_cmdline = docker_run_cmdline(
client_args,
environ=language.global_env(),
image=docker_image,
cwd=language.client_cwd,
docker_args=[
"--dns=%s" % dns_server_ip,
"--net=host",
"--name=%s" % container_name,
"-v",
"{grpc_grpc_root_dir}:/external_mount:ro".format(
grpc_grpc_root_dir=ROOT
),
],
)
jobset.message(
"IDLE",
"docker_cmdline:\b|%s|" % " ".join(docker_cmdline),
do_newline=True,
)
test_job = jobset.JobSpec(
cmdline=docker_cmdline,
shortname="lb_interop_client:%s" % language,
timeout_seconds=_TEST_TIMEOUT,
kill_handler=_job_kill_handler,
)
test_job.container_name = container_name
return test_job
def fallback_server_jobspec(transport_security, shortname):
"""Create jobspec for running a fallback server"""
cmdline = [
"bin/server",
"--port=%d" % _FALLBACK_SERVER_PORT,
] + transport_security_to_args(transport_security)
return grpc_server_in_docker_jobspec(
server_cmdline=cmdline, shortname=shortname
)
def backend_server_jobspec(transport_security, shortname):
"""Create jobspec for running a backend server"""
cmdline = [
"bin/server",
"--port=%d" % _BACKEND_SERVER_PORT,
] + transport_security_to_args(transport_security)
return grpc_server_in_docker_jobspec(
server_cmdline=cmdline, shortname=shortname
)
def grpclb_jobspec(transport_security, short_stream, backend_addrs, shortname):
"""Create jobspec for running a balancer server"""
cmdline = [
"bin/fake_grpclb",
"--backend_addrs=%s" % ",".join(backend_addrs),
"--port=%d" % _BALANCER_SERVER_PORT,
"--short_stream=%s" % short_stream,
"--service_name=%s" % _SERVICE_NAME,
] + transport_security_to_args(transport_security)
return grpc_server_in_docker_jobspec(
server_cmdline=cmdline, shortname=shortname
)
def grpc_server_in_docker_jobspec(server_cmdline, shortname):
container_name = dockerjob.random_name(shortname)
environ = {
"GRPC_GO_LOG_VERBOSITY_LEVEL": "3",
"GRPC_GO_LOG_SEVERITY_LEVEL": "INFO ",
}
docker_cmdline = docker_run_cmdline(
server_cmdline,
cwd="/go",
image=docker_images.get(_FAKE_SERVERS_SAFENAME),
environ=environ,
docker_args=["--name=%s" % container_name],
)
jobset.message(
"IDLE",
"docker_cmdline:\b|%s|" % " ".join(docker_cmdline),
do_newline=True,
)
server_job = jobset.JobSpec(
cmdline=docker_cmdline, shortname=shortname, timeout_seconds=30 * 60
)
server_job.container_name = container_name
return server_job
def dns_server_in_docker_jobspec(
grpclb_ips,
fallback_ips,
shortname,
cause_no_error_no_data_for_balancer_a_record,
):
container_name = dockerjob.random_name(shortname)
run_dns_server_cmdline = [
"python",
"test/cpp/naming/utils/run_dns_server_for_lb_interop_tests.py",
"--grpclb_ips=%s" % ",".join(grpclb_ips),
"--fallback_ips=%s" % ",".join(fallback_ips),
]
if cause_no_error_no_data_for_balancer_a_record:
run_dns_server_cmdline.append(
"--cause_no_error_no_data_for_balancer_a_record"
)
docker_cmdline = docker_run_cmdline(
run_dns_server_cmdline,
cwd="/var/local/git/grpc",
image=docker_images.get(_FAKE_SERVERS_SAFENAME),
docker_args=["--name=%s" % container_name],
)
jobset.message(
"IDLE",
"docker_cmdline:\b|%s|" % " ".join(docker_cmdline),
do_newline=True,
)
server_job = jobset.JobSpec(
cmdline=docker_cmdline, shortname=shortname, timeout_seconds=30 * 60
)
server_job.container_name = container_name
return server_job
def build_interop_image_jobspec(lang_safename, basename_prefix="grpc_interop"):
"""Creates jobspec for building interop docker image for a language"""
tag = "%s_%s:%s" % (basename_prefix, lang_safename, uuid.uuid4())
env = {
"INTEROP_IMAGE": tag,
"BASE_NAME": "%s_%s" % (basename_prefix, lang_safename),
}
build_job = jobset.JobSpec(
cmdline=["tools/run_tests/dockerize/build_interop_image.sh"],
environ=env,
shortname="build_docker_%s" % lang_safename,
timeout_seconds=30 * 60,
)
build_job.tag = tag
return build_job
argp = argparse.ArgumentParser(description="Run interop tests.")
argp.add_argument(
"-l",
"--language",
choices=["all"] + sorted(_LANGUAGES),
nargs="+",
default=["all"],
help="Clients to run.",
)
argp.add_argument("-j", "--jobs", default=multiprocessing.cpu_count(), type=int)
argp.add_argument(
"-s",
"--scenarios_file",
default=None,
type=str,
help="File containing test scenarios as JSON configs.",
)
argp.add_argument(
"-n",
"--scenario_name",
default=None,
type=str,
help=(
"Useful for manual runs: specify the name of "
"the scenario to run from scenarios_file. Run all scenarios if unset."
),
)
argp.add_argument(
"--cxx_image_tag",
default=None,
type=str,
help=(
"Setting this skips the clients docker image "
"build step and runs the client from the named "
"image. Only supports running a one client language."
),
)
argp.add_argument(
"--go_image_tag",
default=None,
type=str,
help=(
"Setting this skips the clients docker image build "
"step and runs the client from the named image. Only "
"supports running a one client language."
),
)
argp.add_argument(
"--java_image_tag",
default=None,
type=str,
help=(
"Setting this skips the clients docker image build "
"step and runs the client from the named image. Only "
"supports running a one client language."
),
)
argp.add_argument(
"--servers_image_tag",
default=None,
type=str,
help=(
"Setting this skips the fake servers docker image "
"build step and runs the servers from the named image."
),
)
argp.add_argument(
"--no_skips",
default=False,
type=bool,
nargs="?",
const=True,
help=(
"Useful for manual runs. Setting this overrides test "
'"skips" configured in test scenarios.'
),
)
argp.add_argument(
"--verbose",
default=False,
type=bool,
nargs="?",
const=True,
help="Increase logging.",
)
args = argp.parse_args()
docker_images = {}
build_jobs = []
if len(args.language) and args.language[0] == "all":
languages = list(_LANGUAGES.keys())
else:
languages = args.language
for lang_name in languages:
l = _LANGUAGES[lang_name]
# First check if a pre-built image was supplied, and avoid
# rebuilding the particular docker image if so.
if lang_name == "c++" and args.cxx_image_tag:
docker_images[str(l.safename)] = args.cxx_image_tag
elif lang_name == "go" and args.go_image_tag:
docker_images[str(l.safename)] = args.go_image_tag
elif lang_name == "java" and args.java_image_tag:
docker_images[str(l.safename)] = args.java_image_tag
else:
# Build the test client in docker and save the fully
# built image.
job = build_interop_image_jobspec(l.safename)
build_jobs.append(job)
docker_images[str(l.safename)] = job.tag
# First check if a pre-built image was supplied.
if args.servers_image_tag:
docker_images[_FAKE_SERVERS_SAFENAME] = args.servers_image_tag
else:
# Build the test servers in docker and save the fully
# built image.
job = build_interop_image_jobspec(
_FAKE_SERVERS_SAFENAME, basename_prefix="lb_interop"
)
build_jobs.append(job)
docker_images[_FAKE_SERVERS_SAFENAME] = job.tag
if build_jobs:
jobset.message("START", "Building interop docker images.", do_newline=True)
print("Jobs to run: \n%s\n" % "\n".join(str(j) for j in build_jobs))
num_failures, _ = jobset.run(
build_jobs, newline_on_success=True, maxjobs=args.jobs
)
if num_failures == 0:
jobset.message(
"SUCCESS", "All docker images built successfully.", do_newline=True
)
else:
jobset.message(
"FAILED", "Failed to build interop docker images.", do_newline=True
)
sys.exit(1)
def wait_until_dns_server_is_up(dns_server_ip):
"""Probes the DNS server until it's running and safe for tests."""
for i in range(0, 30):
print("Health check: attempt to connect to DNS server over TCP.")
tcp_connect_subprocess = subprocess.Popen(
[
os.path.join(
os.getcwd(), "test/cpp/naming/utils/tcp_connect.py"
),
"--server_host",
dns_server_ip,
"--server_port",
str(53),
"--timeout",
str(1),
]
)
tcp_connect_subprocess.communicate()
if tcp_connect_subprocess.returncode == 0:
print(
"Health check: attempt to make an A-record query to DNS server."
)
dns_resolver_subprocess = subprocess.Popen(
[
os.path.join(
os.getcwd(), "test/cpp/naming/utils/dns_resolver.py"
),
"--qname",
(
"health-check-local-dns-server-is-alive."
"resolver-tests.grpctestingexp"
),
"--server_host",
dns_server_ip,
"--server_port",
str(53),
],
stdout=subprocess.PIPE,
)
dns_resolver_stdout, _ = dns_resolver_subprocess.communicate()
if dns_resolver_subprocess.returncode == 0:
if "123.123.123.123" in dns_resolver_stdout:
print(
"DNS server is up! "
"Successfully reached it over UDP and TCP."
)
return
time.sleep(0.1)
raise Exception(
"Failed to reach DNS server over TCP and/or UDP. "
"Exitting without running tests."
)
def shortname(shortname_prefix, shortname, index):
return "%s_%s_%d" % (shortname_prefix, shortname, index)
def run_one_scenario(scenario_config):
jobset.message("START", "Run scenario: %s" % scenario_config["name"])
server_jobs = {}
server_addresses = {}
suppress_server_logs = True
try:
backend_addrs = []
fallback_ips = []
grpclb_ips = []
shortname_prefix = scenario_config["name"]
# Start backends
for i in range(len(scenario_config["backend_configs"])):
backend_config = scenario_config["backend_configs"][i]
backend_shortname = shortname(shortname_prefix, "backend_server", i)
backend_spec = backend_server_jobspec(
backend_config["transport_sec"], backend_shortname
)
backend_job = dockerjob.DockerJob(backend_spec)
server_jobs[backend_shortname] = backend_job
backend_addrs.append(
"%s:%d" % (backend_job.ip_address(), _BACKEND_SERVER_PORT)
)
# Start fallbacks
for i in range(len(scenario_config["fallback_configs"])):
fallback_config = scenario_config["fallback_configs"][i]
fallback_shortname = shortname(
shortname_prefix, "fallback_server", i
)
fallback_spec = fallback_server_jobspec(
fallback_config["transport_sec"], fallback_shortname
)
fallback_job = dockerjob.DockerJob(fallback_spec)
server_jobs[fallback_shortname] = fallback_job
fallback_ips.append(fallback_job.ip_address())
# Start balancers
for i in range(len(scenario_config["balancer_configs"])):
balancer_config = scenario_config["balancer_configs"][i]
grpclb_shortname = shortname(shortname_prefix, "grpclb_server", i)
grpclb_spec = grpclb_jobspec(
balancer_config["transport_sec"],
balancer_config["short_stream"],
backend_addrs,
grpclb_shortname,
)
grpclb_job = dockerjob.DockerJob(grpclb_spec)
server_jobs[grpclb_shortname] = grpclb_job
grpclb_ips.append(grpclb_job.ip_address())
# Start DNS server
dns_server_shortname = shortname(shortname_prefix, "dns_server", 0)
dns_server_spec = dns_server_in_docker_jobspec(
grpclb_ips,
fallback_ips,
dns_server_shortname,
scenario_config["cause_no_error_no_data_for_balancer_a_record"],
)
dns_server_job = dockerjob.DockerJob(dns_server_spec)
server_jobs[dns_server_shortname] = dns_server_job
# Get the IP address of the docker container running the DNS server.
# The DNS server is running on port 53 of that IP address. Note we will
# point the DNS resolvers of grpc clients under test to our controlled
# DNS server by effectively modifying the /etc/resolve.conf "nameserver"
# lists of their docker containers.
dns_server_ip = dns_server_job.ip_address()
wait_until_dns_server_is_up(dns_server_ip)
# Run clients
jobs = []
for lang_name in languages:
# Skip languages that are known to not currently
# work for this test.
if not args.no_skips and lang_name in scenario_config.get(
"skip_langs", []
):
jobset.message(
"IDLE",
"Skipping scenario: %s for language: %s\n"
% (scenario_config["name"], lang_name),
)
continue
lang = _LANGUAGES[lang_name]
test_job = lb_client_interop_jobspec(
lang,
dns_server_ip,
docker_image=docker_images.get(lang.safename),
transport_security=scenario_config["transport_sec"],
)
jobs.append(test_job)
jobset.message(
"IDLE", "Jobs to run: \n%s\n" % "\n".join(str(job) for job in jobs)
)
num_failures, resultset = jobset.run(
jobs, newline_on_success=True, maxjobs=args.jobs
)
report_utils.render_junit_xml_report(resultset, "sponge_log.xml")
if num_failures:
suppress_server_logs = False
jobset.message(
"FAILED",
"Scenario: %s. Some tests failed" % scenario_config["name"],
do_newline=True,
)
else:
jobset.message(
"SUCCESS",
"Scenario: %s. All tests passed" % scenario_config["name"],
do_newline=True,
)
return num_failures
finally:
# Check if servers are still running.
for server, job in list(server_jobs.items()):
if not job.is_running():
print('Server "%s" has exited prematurely.' % server)
suppress_failure = suppress_server_logs and not args.verbose
dockerjob.finish_jobs(
[j for j in six.itervalues(server_jobs)],
suppress_failure=suppress_failure,
)
num_failures = 0
with open(args.scenarios_file, "r") as scenarios_input:
all_scenarios = json.loads(scenarios_input.read())
for scenario in all_scenarios:
if args.scenario_name:
if args.scenario_name != scenario["name"]:
jobset.message(
"IDLE", "Skipping scenario: %s" % scenario["name"]
)
continue
num_failures += run_one_scenario(scenario)
if num_failures == 0:
sys.exit(0)
else:
sys.exit(1)
| 23,168
| 32.145923
| 90
|
py
|
grpc
|
grpc-master/tools/run_tests/run_tests.py
|
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run tests in parallel."""
from __future__ import print_function
import argparse
import ast
import collections
import glob
import itertools
import json
import logging
import multiprocessing
import os
import os.path
import pipes
import platform
import random
import re
import socket
import subprocess
import sys
import tempfile
import time
import traceback
import uuid
import six
from six.moves import urllib
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
import python_utils.start_port_server as start_port_server
import python_utils.watch_dirs as watch_dirs
try:
from python_utils.upload_test_results import upload_results_to_bq
except ImportError:
pass # It's ok to not import because this is only necessary to upload results to BQ.
gcp_utils_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../gcp/utils")
)
sys.path.append(gcp_utils_dir)
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {
"GRPC_VERBOSITY": "DEBUG",
}
_POLLING_STRATEGIES = {
"linux": ["epoll1", "poll"],
"mac": ["poll"],
}
def platform_string():
return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
_PRE_BUILD_STEP_TIMEOUT_SECONDS = 10 * 60
def run_shell_command(cmd, env=None, cwd=None):
try:
subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
except subprocess.CalledProcessError as e:
logging.exception(
"Error while running command '%s'. Exit status %d. Output:\n%s",
e.cmd,
e.returncode,
e.output,
)
raise
def max_parallel_tests_for_current_platform():
# Too much test parallelization has only been seen to be a problem
# so far on windows.
if jobset.platform_string() == "windows":
return 64
return 1024
def _print_debug_info_epilogue(dockerfile_dir=None):
"""Use to print useful info for debug/repro just before exiting."""
print("")
print("=== run_tests.py DEBUG INFO ===")
print('command: "%s"' % " ".join(sys.argv))
if dockerfile_dir:
print("dockerfile: %s" % dockerfile_dir)
kokoro_job_name = os.getenv("KOKORO_JOB_NAME")
if kokoro_job_name:
print("kokoro job name: %s" % kokoro_job_name)
print("===============================")
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
def __init__(
self,
config,
environ=None,
timeout_multiplier=1,
tool_prefix=[],
iomgr_platform="native",
):
if environ is None:
environ = {}
self.build_config = config
self.environ = environ
self.environ["CONFIG"] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
self.iomgr_platform = iomgr_platform
def job_spec(
self,
cmdline,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
shortname=None,
environ={},
cpu_cost=1.0,
flaky=False,
):
"""Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
"""
actual_environ = self.environ.copy()
for k, v in environ.items():
actual_environ[k] = v
if not flaky and shortname and shortname in flaky_tests:
flaky = True
if shortname in shortname_to_cpu:
cpu_cost = shortname_to_cpu[shortname]
return jobset.JobSpec(
cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(
self.timeout_multiplier * timeout_seconds
if timeout_seconds
else None
),
flake_retries=4 if flaky or args.allow_flakes else 0,
timeout_retries=1 if flaky or args.allow_flakes else 0,
)
def get_c_tests(travis, test_lang):
out = []
platforms_str = "ci_platforms" if travis else "platforms"
with open("tools/run_tests/generated/tests.json") as f:
js = json.load(f)
return [
tgt
for tgt in js
if tgt["language"] == test_lang
and platform_string() in tgt[platforms_str]
and not (travis and tgt["flaky"])
]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
raise Exception(
"Compiler %s not supported (on this platform)." % compiler
)
def _check_arch(arch, supported_archs):
if arch not in supported_archs:
raise Exception("Architecture %s not supported." % arch)
def _is_use_docker_child():
"""Returns True if running running as a --use_docker child."""
return True if os.getenv("DOCKER_RUN_SCRIPT_COMMAND") else False
_PythonConfigVars = collections.namedtuple(
"_ConfigVars",
[
"shell",
"builder",
"builder_prefix_arguments",
"venv_relative_python",
"toolchain",
"runner",
],
)
def _python_config_generator(name, major, minor, bits, config_vars):
build = (
config_vars.shell
+ config_vars.builder
+ config_vars.builder_prefix_arguments
+ [_python_pattern_function(major=major, minor=minor, bits=bits)]
+ [name]
+ config_vars.venv_relative_python
+ config_vars.toolchain
)
run = (
config_vars.shell
+ config_vars.runner
+ [
os.path.join(name, config_vars.venv_relative_python[0]),
]
)
return PythonConfig(name, build, run)
def _pypy_config_generator(name, major, config_vars):
return PythonConfig(
name,
config_vars.shell
+ config_vars.builder
+ config_vars.builder_prefix_arguments
+ [_pypy_pattern_function(major=major)]
+ [name]
+ config_vars.venv_relative_python
+ config_vars.toolchain,
config_vars.shell
+ config_vars.runner
+ [os.path.join(name, config_vars.venv_relative_python[0])],
)
def _python_pattern_function(major, minor, bits):
# Bit-ness is handled by the test machine's environment
if os.name == "nt":
if bits == "64":
return "/c/Python{major}{minor}/python.exe".format(
major=major, minor=minor, bits=bits
)
else:
return "/c/Python{major}{minor}_{bits}bits/python.exe".format(
major=major, minor=minor, bits=bits
)
else:
return "python{major}.{minor}".format(major=major, minor=minor)
def _pypy_pattern_function(major):
if major == "2":
return "pypy"
elif major == "3":
return "pypy3"
else:
raise ValueError("Unknown PyPy major version")
class CLanguage(object):
def __init__(self, lang_suffix, test_lang):
self.lang_suffix = lang_suffix
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == "windows":
_check_compiler(
self.args.compiler,
[
"default",
"cmake",
"cmake_ninja_vs2019",
"cmake_vs2019",
],
)
_check_arch(self.args.arch, ["default", "x64", "x86"])
activate_vs_tools = ""
if (
self.args.compiler == "cmake_ninja_vs2019"
or self.args.compiler == "cmake"
or self.args.compiler == "default"
):
# cmake + ninja build is the default because it is faster and supports boringssl assembly optimizations
# the compiler used is exactly the same as for cmake_vs2017
cmake_generator = "Ninja"
activate_vs_tools = "2019"
elif self.args.compiler == "cmake_vs2019":
cmake_generator = "Visual Studio 16 2019"
else:
print("should never reach here.")
sys.exit(1)
self._cmake_configure_extra_args = []
self._cmake_generator_windows = cmake_generator
# required to pass as cmake "-A" configuration for VS builds (but not for Ninja)
self._cmake_architecture_windows = (
"x64" if self.args.arch == "x64" else "Win32"
)
# when builing with Ninja, the VS common tools need to be activated first
self._activate_vs_tools_windows = activate_vs_tools
# "x64_x86" means create 32bit binaries, but use 64bit toolkit to secure more memory for the build
self._vs_tools_architecture_windows = (
"x64" if self.args.arch == "x64" else "x64_x86"
)
else:
if self.platform == "linux":
# Allow all the known architectures. _check_arch_option has already checked that we're not doing
# something illegal when not running under docker.
_check_arch(self.args.arch, ["default", "x64", "x86", "arm64"])
else:
_check_arch(self.args.arch, ["default"])
(
self._docker_distro,
self._cmake_configure_extra_args,
) = self._compiler_options(self.args.use_docker, self.args.compiler)
if self.args.arch == "x86":
# disable boringssl asm optimizations when on x86
# see https://github.com/grpc/grpc/blob/b5b8578b3f8b4a9ce61ed6677e19d546e43c5c68/tools/run_tests/artifacts/artifact_targets.py#L253
self._cmake_configure_extra_args.append("-DOPENSSL_NO_ASM=ON")
def test_specs(self):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
for target in binaries:
if target.get("boringssl", False):
# cmake doesn't build boringssl tests
continue
auto_timeout_scaling = target.get("auto_timeout_scaling", True)
polling_strategies = (
_POLLING_STRATEGIES.get(self.platform, ["all"])
if target.get("uses_polling", True)
else ["none"]
)
for polling_strategy in polling_strategies:
env = {
"GRPC_DEFAULT_SSL_ROOTS_FILE_PATH": _ROOT
+ "/src/core/tsi/test_creds/ca.pem",
"GRPC_POLL_STRATEGY": polling_strategy,
"GRPC_VERBOSITY": "DEBUG",
}
resolver = os.environ.get("GRPC_DNS_RESOLVER", None)
if resolver:
env["GRPC_DNS_RESOLVER"] = resolver
shortname_ext = (
""
if polling_strategy == "all"
else " GRPC_POLL_STRATEGY=%s" % polling_strategy
)
if polling_strategy in target.get("excluded_poll_engines", []):
continue
timeout_scaling = 1
if auto_timeout_scaling:
config = self.args.config
if (
"asan" in config
or config == "msan"
or config == "tsan"
or config == "ubsan"
or config == "helgrind"
or config == "memcheck"
):
# Scale overall test timeout if running under various sanitizers.
# scaling value is based on historical data analysis
timeout_scaling *= 3
if self.config.build_config in target["exclude_configs"]:
continue
if self.args.iomgr_platform in target.get("exclude_iomgrs", []):
continue
if self.platform == "windows":
if self._cmake_generator_windows == "Ninja":
binary = "cmake/build/%s.exe" % target["name"]
else:
binary = "cmake/build/%s/%s.exe" % (
_MSBUILD_CONFIG[self.config.build_config],
target["name"],
)
else:
binary = "cmake/build/%s" % target["name"]
cpu_cost = target["cpu_cost"]
if cpu_cost == "capacity":
cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
list_test_command = None
filter_test_command = None
# these are the flag defined by gtest and benchmark framework to list
# and filter test runs. We use them to split each individual test
# into its own JobSpec, and thus into its own process.
if "benchmark" in target and target["benchmark"]:
with open(os.devnull, "w") as fnull:
tests = subprocess.check_output(
[binary, "--benchmark_list_tests"], stderr=fnull
)
for line in tests.decode().split("\n"):
test = line.strip()
if not test:
continue
cmdline = [
binary,
"--benchmark_filter=%s$" % test,
] + target["args"]
out.append(
self.config.job_spec(
cmdline,
shortname="%s %s"
% (" ".join(cmdline), shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=target.get(
"timeout_seconds",
_DEFAULT_TIMEOUT_SECONDS,
)
* timeout_scaling,
environ=env,
)
)
elif "gtest" in target and target["gtest"]:
# here we parse the output of --gtest_list_tests to build up a complete
# list of the tests contained in a binary for each test, we then
# add a job to run, filtering for just that test.
with open(os.devnull, "w") as fnull:
tests = subprocess.check_output(
[binary, "--gtest_list_tests"], stderr=fnull
)
base = None
for line in tests.decode().split("\n"):
i = line.find("#")
if i >= 0:
line = line[:i]
if not line:
continue
if line[0] != " ":
base = line.strip()
else:
assert base is not None
assert line[1] == " "
test = base + line.strip()
cmdline = [
binary,
"--gtest_filter=%s" % test,
] + target["args"]
out.append(
self.config.job_spec(
cmdline,
shortname="%s %s"
% (" ".join(cmdline), shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=target.get(
"timeout_seconds",
_DEFAULT_TIMEOUT_SECONDS,
)
* timeout_scaling,
environ=env,
)
)
else:
cmdline = [binary] + target["args"]
shortname = target.get(
"shortname",
" ".join(pipes.quote(arg) for arg in cmdline),
)
shortname += shortname_ext
out.append(
self.config.job_spec(
cmdline,
shortname=shortname,
cpu_cost=cpu_cost,
flaky=target.get("flaky", False),
timeout_seconds=target.get(
"timeout_seconds", _DEFAULT_TIMEOUT_SECONDS
)
* timeout_scaling,
environ=env,
)
)
elif self.args.regex == ".*" or self.platform == "windows":
print("\nWARNING: binary not found, skipping", binary)
return sorted(out)
def pre_build_steps(self):
return []
def build_steps(self):
if self.platform == "windows":
return [
[
"tools\\run_tests\\helper_scripts\\build_cxx.bat",
"-DgRPC_BUILD_MSVC_MP_COUNT=%d" % self.args.jobs,
]
+ self._cmake_configure_extra_args
]
else:
return [
["tools/run_tests/helper_scripts/build_cxx.sh"]
+ self._cmake_configure_extra_args
]
def build_steps_environ(self):
"""Extra environment variables set for pre_build_steps and build_steps jobs."""
environ = {"GRPC_RUN_TESTS_CXX_LANGUAGE_SUFFIX": self.lang_suffix}
if self.platform == "windows":
environ["GRPC_CMAKE_GENERATOR"] = self._cmake_generator_windows
environ[
"GRPC_CMAKE_ARCHITECTURE"
] = self._cmake_architecture_windows
environ[
"GRPC_BUILD_ACTIVATE_VS_TOOLS"
] = self._activate_vs_tools_windows
environ[
"GRPC_BUILD_VS_TOOLS_ARCHITECTURE"
] = self._vs_tools_architecture_windows
return environ
def post_tests_steps(self):
if self.platform == "windows":
return []
else:
return [["tools/run_tests/helper_scripts/post_tests_c.sh"]]
def _clang_cmake_configure_extra_args(self, version_suffix=""):
return [
"-DCMAKE_C_COMPILER=clang%s" % version_suffix,
"-DCMAKE_CXX_COMPILER=clang++%s" % version_suffix,
]
def _compiler_options(self, use_docker, compiler):
"""Returns docker distro and cmake configure args to use for given compiler."""
if not use_docker and not _is_use_docker_child():
# if not running under docker, we cannot ensure the right compiler version will be used,
# so we only allow the non-specific choices.
_check_compiler(compiler, ["default", "cmake"])
if compiler == "default" or compiler == "cmake":
return ("debian11", [])
elif compiler == "gcc7":
return ("gcc_7", [])
elif compiler == "gcc10.2":
return ("debian11", [])
elif compiler == "gcc10.2_openssl102":
return (
"debian11_openssl102",
[
"-DgRPC_SSL_PROVIDER=package",
],
)
elif compiler == "gcc12":
return ("gcc_12", ["-DCMAKE_CXX_STANDARD=20"])
elif compiler == "gcc_musl":
return ("alpine", [])
elif compiler == "clang6":
return ("clang_6", self._clang_cmake_configure_extra_args())
elif compiler == "clang15":
return ("clang_15", self._clang_cmake_configure_extra_args())
else:
raise Exception("Compiler %s not supported." % compiler)
def dockerfile_dir(self):
return "tools/dockerfile/test/cxx_%s_%s" % (
self._docker_distro,
_docker_arch_suffix(self.args.arch),
)
def __str__(self):
return self.lang_suffix
# This tests Node on grpc/grpc-node and will become the standard for Node testing
class RemoteNodeLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
# Note: electron ABI only depends on major and minor version, so that's all
# we should specify in the compiler argument
_check_compiler(
self.args.compiler,
[
"default",
"node0.12",
"node4",
"node5",
"node6",
"node7",
"node8",
"electron1.3",
"electron1.6",
],
)
if self.args.compiler == "default":
self.runtime = "node"
self.node_version = "8"
else:
if self.args.compiler.startswith("electron"):
self.runtime = "electron"
self.node_version = self.args.compiler[8:]
else:
self.runtime = "node"
# Take off the word "node"
self.node_version = self.args.compiler[4:]
# TODO: update with Windows/electron scripts when available for grpc/grpc-node
def test_specs(self):
if self.platform == "windows":
return [
self.config.job_spec(
["tools\\run_tests\\helper_scripts\\run_node.bat"]
)
]
else:
return [
self.config.job_spec(
["tools/run_tests/helper_scripts/run_grpc-node.sh"],
None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS,
)
]
def pre_build_steps(self):
return []
def build_steps(self):
return []
def build_steps_environ(self):
"""Extra environment variables set for pre_build_steps and build_steps jobs."""
return {}
def post_tests_steps(self):
return []
def dockerfile_dir(self):
return "tools/dockerfile/test/node_jessie_%s" % _docker_arch_suffix(
self.args.arch
)
def __str__(self):
return "grpc-node"
class Php7Language(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ["default"])
def test_specs(self):
return [
self.config.job_spec(
["src/php/bin/run_tests.sh"],
environ=_FORCE_ENVIRON_FOR_WRAPPERS,
)
]
def pre_build_steps(self):
return []
def build_steps(self):
return [["tools/run_tests/helper_scripts/build_php.sh"]]
def build_steps_environ(self):
"""Extra environment variables set for pre_build_steps and build_steps jobs."""
return {}
def post_tests_steps(self):
return [["tools/run_tests/helper_scripts/post_tests_php.sh"]]
def dockerfile_dir(self):
return "tools/dockerfile/test/php7_debian11_%s" % _docker_arch_suffix(
self.args.arch
)
def __str__(self):
return "php7"
class PythonConfig(
collections.namedtuple("PythonConfig", ["name", "build", "run"])
):
"""Tuple of commands (named s.t. 'what it says on the tin' applies)"""
class PythonLanguage(object):
_TEST_SPECS_FILE = {
"native": ["src/python/grpcio_tests/tests/tests.json"],
"gevent": [
"src/python/grpcio_tests/tests/tests.json",
"src/python/grpcio_tests/tests_gevent/tests.json",
],
"asyncio": ["src/python/grpcio_tests/tests_aio/tests.json"],
}
_TEST_COMMAND = {
"native": "test_lite",
"gevent": "test_gevent",
"asyncio": "test_aio",
}
def configure(self, config, args):
self.config = config
self.args = args
self.pythons = self._get_pythons(self.args)
def test_specs(self):
# load list of known test suites
jobs = []
for io_platform in self._TEST_SPECS_FILE:
test_cases = []
for tests_json_file_name in self._TEST_SPECS_FILE[io_platform]:
with open(tests_json_file_name) as tests_json_file:
test_cases.extend(json.load(tests_json_file))
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
# TODO(https://github.com/grpc/grpc/issues/21401) Fork handlers is not
# designed for non-native IO manager. It has a side-effect that
# overrides threading settings in C-Core.
if io_platform != "native":
environment["GRPC_ENABLE_FORK_SUPPORT"] = "0"
for python_config in self.pythons:
jobs.extend(
[
self.config.job_spec(
python_config.run
+ [self._TEST_COMMAND[io_platform]],
timeout_seconds=8 * 60,
environ=dict(
GRPC_PYTHON_TESTRUNNER_FILTER=str(test_case),
**environment,
),
shortname="%s.%s.%s"
% (python_config.name, io_platform, test_case),
)
for test_case in test_cases
]
)
return jobs
def pre_build_steps(self):
return []
def build_steps(self):
return [config.build for config in self.pythons]
def build_steps_environ(self):
"""Extra environment variables set for pre_build_steps and build_steps jobs."""
return {}
def post_tests_steps(self):
if self.config.build_config != "gcov":
return []
else:
return [["tools/run_tests/helper_scripts/post_tests_python.sh"]]
def dockerfile_dir(self):
return "tools/dockerfile/test/python_%s_%s" % (
self._python_docker_distro_name(),
_docker_arch_suffix(self.args.arch),
)
def _python_docker_distro_name(self):
"""Choose the docker image to use based on python version."""
if self.args.compiler == "python_alpine":
return "alpine"
else:
return "debian11_default"
def _get_pythons(self, args):
"""Get python runtimes to test with, based on current platform, architecture, compiler etc."""
if args.iomgr_platform != "native":
raise ValueError(
"Python builds no longer differentiate IO Manager platforms,"
' please use "native"'
)
if args.arch == "x86":
bits = "32"
else:
bits = "64"
if os.name == "nt":
shell = ["bash"]
builder = [
os.path.abspath(
"tools/run_tests/helper_scripts/build_python_msys2.sh"
)
]
builder_prefix_arguments = ["MINGW{}".format(bits)]
venv_relative_python = ["Scripts/python.exe"]
toolchain = ["mingw32"]
else:
shell = []
builder = [
os.path.abspath(
"tools/run_tests/helper_scripts/build_python.sh"
)
]
builder_prefix_arguments = []
venv_relative_python = ["bin/python"]
toolchain = ["unix"]
runner = [
os.path.abspath("tools/run_tests/helper_scripts/run_python.sh")
]
config_vars = _PythonConfigVars(
shell,
builder,
builder_prefix_arguments,
venv_relative_python,
toolchain,
runner,
)
python37_config = _python_config_generator(
name="py37",
major="3",
minor="7",
bits=bits,
config_vars=config_vars,
)
python38_config = _python_config_generator(
name="py38",
major="3",
minor="8",
bits=bits,
config_vars=config_vars,
)
python39_config = _python_config_generator(
name="py39",
major="3",
minor="9",
bits=bits,
config_vars=config_vars,
)
python310_config = _python_config_generator(
name="py310",
major="3",
minor="10",
bits=bits,
config_vars=config_vars,
)
pypy27_config = _pypy_config_generator(
name="pypy", major="2", config_vars=config_vars
)
pypy32_config = _pypy_config_generator(
name="pypy3", major="3", config_vars=config_vars
)
if args.compiler == "default":
if os.name == "nt":
return (python38_config,)
elif os.uname()[0] == "Darwin":
# NOTE(rbellevi): Testing takes significantly longer on
# MacOS, so we restrict the number of interpreter versions
# tested.
return (python38_config,)
elif platform.machine() == "aarch64":
# Currently the python_debian11_default_arm64 docker image
# only has python3.9 installed (and that seems sufficient
# for arm64 testing)
return (python39_config,)
else:
return (
python37_config,
python38_config,
)
elif args.compiler == "python3.7":
return (python37_config,)
elif args.compiler == "python3.8":
return (python38_config,)
elif args.compiler == "python3.9":
return (python39_config,)
elif args.compiler == "python3.10":
return (python310_config,)
elif args.compiler == "pypy":
return (pypy27_config,)
elif args.compiler == "pypy3":
return (pypy32_config,)
elif args.compiler == "python_alpine":
return (python39_config,)
elif args.compiler == "all_the_cpythons":
return (
python37_config,
python38_config,
python39_config,
python310_config,
)
else:
raise Exception("Compiler %s not supported." % args.compiler)
def __str__(self):
return "python"
class RubyLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ["default"])
def test_specs(self):
tests = [
self.config.job_spec(
["tools/run_tests/helper_scripts/run_ruby.sh"],
timeout_seconds=10 * 60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS,
)
]
# TODO(apolcyn): re-enable the following tests after
# https://bugs.ruby-lang.org/issues/15499 is fixed:
# They previously worked on ruby 2.5 but needed to be disabled
# after dropping support for ruby 2.5:
# - src/ruby/end2end/channel_state_test.rb
# - src/ruby/end2end/sig_int_during_channel_watch_test.rb
# TODO(apolcyn): the following test is skipped because it sometimes
# hits "Bus Error" crashes while requiring the grpc/ruby C-extension.
# This crashes have been unreproducible outside of CI. Also see
# b/266212253.
# - src/ruby/end2end/grpc_class_init_test.rb
for test in [
"src/ruby/end2end/fork_test.rb",
"src/ruby/end2end/simple_fork_test.rb",
"src/ruby/end2end/secure_fork_test.rb",
"src/ruby/end2end/bad_usage_fork_test.rb",
"src/ruby/end2end/sig_handling_test.rb",
"src/ruby/end2end/channel_closing_test.rb",
"src/ruby/end2end/killed_client_thread_test.rb",
"src/ruby/end2end/forking_client_test.rb",
"src/ruby/end2end/multiple_killed_watching_threads_test.rb",
"src/ruby/end2end/load_grpc_with_gc_stress_test.rb",
"src/ruby/end2end/client_memory_usage_test.rb",
"src/ruby/end2end/package_with_underscore_test.rb",
"src/ruby/end2end/graceful_sig_handling_test.rb",
"src/ruby/end2end/graceful_sig_stop_test.rb",
"src/ruby/end2end/errors_load_before_grpc_lib_test.rb",
"src/ruby/end2end/logger_load_before_grpc_lib_test.rb",
"src/ruby/end2end/status_codes_load_before_grpc_lib_test.rb",
"src/ruby/end2end/call_credentials_timeout_test.rb",
"src/ruby/end2end/call_credentials_returning_bad_metadata_doesnt_kill_background_thread_test.rb",
]:
if test in [
"src/ruby/end2end/fork_test.rb",
"src/ruby/end2end/simple_fork_test.rb",
"src/ruby/end2end/secure_fork_test.rb",
"src/ruby/end2end/bad_usage_fork_test.rb",
]:
if platform_string() == "mac":
# Skip fork tests on mac, it's only supported on linux.
continue
if self.config.build_config == "dbg":
# There's a known issue with dbg builds that breaks fork
# support: https://github.com/grpc/grpc/issues/31885.
# TODO(apolcyn): unskip these tests on dbg builds after we
# migrate to event engine and hence fix that issue.
continue
tests.append(
self.config.job_spec(
["ruby", test],
shortname=test,
timeout_seconds=20 * 60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS,
)
)
return tests
def pre_build_steps(self):
return [["tools/run_tests/helper_scripts/pre_build_ruby.sh"]]
def build_steps(self):
return [["tools/run_tests/helper_scripts/build_ruby.sh"]]
def build_steps_environ(self):
"""Extra environment variables set for pre_build_steps and build_steps jobs."""
return {}
def post_tests_steps(self):
return [["tools/run_tests/helper_scripts/post_tests_ruby.sh"]]
def dockerfile_dir(self):
return "tools/dockerfile/test/ruby_debian11_%s" % _docker_arch_suffix(
self.args.arch
)
def __str__(self):
return "ruby"
class CSharpLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ["default", "coreclr", "mono"])
if self.args.compiler == "default":
# test both runtimes by default
self.test_runtimes = ["coreclr", "mono"]
else:
# only test the specified runtime
self.test_runtimes = [self.args.compiler]
if self.platform == "windows":
_check_arch(self.args.arch, ["default"])
self._cmake_arch_option = "x64"
else:
self._docker_distro = "debian11"
def test_specs(self):
with open("src/csharp/tests.json") as f:
tests_by_assembly = json.load(f)
msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
nunit_args = ["--labels=All", "--noresult", "--workers=1"]
specs = []
for test_runtime in self.test_runtimes:
if test_runtime == "coreclr":
assembly_extension = ".dll"
assembly_subdir = "bin/%s/netcoreapp3.1" % msbuild_config
runtime_cmd = ["dotnet", "exec"]
elif test_runtime == "mono":
assembly_extension = ".exe"
assembly_subdir = "bin/%s/net45" % msbuild_config
if self.platform == "windows":
runtime_cmd = []
elif self.platform == "mac":
# mono before version 5.2 on MacOS defaults to 32bit runtime
runtime_cmd = ["mono", "--arch=64"]
else:
runtime_cmd = ["mono"]
else:
raise Exception('Illegal runtime "%s" was specified.')
for assembly in six.iterkeys(tests_by_assembly):
assembly_file = "src/csharp/%s/%s/%s%s" % (
assembly,
assembly_subdir,
assembly,
assembly_extension,
)
# normally, run each test as a separate process
for test in tests_by_assembly[assembly]:
cmdline = (
runtime_cmd
+ [assembly_file, "--test=%s" % test]
+ nunit_args
)
specs.append(
self.config.job_spec(
cmdline,
shortname="csharp.%s.%s" % (test_runtime, test),
environ=_FORCE_ENVIRON_FOR_WRAPPERS,
)
)
return specs
def pre_build_steps(self):
if self.platform == "windows":
return [["tools\\run_tests\\helper_scripts\\pre_build_csharp.bat"]]
else:
return [["tools/run_tests/helper_scripts/pre_build_csharp.sh"]]
def build_steps(self):
if self.platform == "windows":
return [["tools\\run_tests\\helper_scripts\\build_csharp.bat"]]
else:
return [["tools/run_tests/helper_scripts/build_csharp.sh"]]
def build_steps_environ(self):
"""Extra environment variables set for pre_build_steps and build_steps jobs."""
if self.platform == "windows":
return {"ARCHITECTURE": self._cmake_arch_option}
else:
return {}
def post_tests_steps(self):
if self.platform == "windows":
return [["tools\\run_tests\\helper_scripts\\post_tests_csharp.bat"]]
else:
return [["tools/run_tests/helper_scripts/post_tests_csharp.sh"]]
def dockerfile_dir(self):
return "tools/dockerfile/test/csharp_%s_%s" % (
self._docker_distro,
_docker_arch_suffix(self.args.arch),
)
def __str__(self):
return "csharp"
class ObjCLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ["default"])
def test_specs(self):
out = []
out.append(
self.config.job_spec(
["src/objective-c/tests/build_one_example.sh"],
timeout_seconds=20 * 60,
shortname="ios-buildtest-example-sample",
cpu_cost=1e6,
environ={
"SCHEME": "Sample",
"EXAMPLE_PATH": "src/objective-c/examples/Sample",
},
)
)
# TODO(jtattermusch): Create bazel target for the sample and remove the test task from here.
out.append(
self.config.job_spec(
["src/objective-c/tests/build_one_example.sh"],
timeout_seconds=20 * 60,
shortname="ios-buildtest-example-switftsample",
cpu_cost=1e6,
environ={
"SCHEME": "SwiftSample",
"EXAMPLE_PATH": "src/objective-c/examples/SwiftSample",
},
)
)
# Disabled due to #20258
# TODO (mxyan): Reenable this test when #20258 is resolved.
# out.append(
# self.config.job_spec(
# ['src/objective-c/tests/build_one_example_bazel.sh'],
# timeout_seconds=20 * 60,
# shortname='ios-buildtest-example-watchOS-sample',
# cpu_cost=1e6,
# environ={
# 'SCHEME': 'watchOS-sample-WatchKit-App',
# 'EXAMPLE_PATH': 'src/objective-c/examples/watchOS-sample',
# 'FRAMEWORKS': 'NO'
# }))
# TODO(jtattermusch): move the test out of the test/core/iomgr/CFStreamTests directory?
# How does one add the cfstream dependency in bazel?
out.append(
self.config.job_spec(
["test/core/iomgr/ios/CFStreamTests/build_and_run_tests.sh"],
timeout_seconds=60 * 60,
shortname="ios-test-cfstream-tests",
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS,
)
)
return sorted(out)
def pre_build_steps(self):
return []
def build_steps(self):
return []
def build_steps_environ(self):
"""Extra environment variables set for pre_build_steps and build_steps jobs."""
return {}
def post_tests_steps(self):
return []
def dockerfile_dir(self):
return None
def __str__(self):
return "objc"
class Sanity(object):
def __init__(self, config_file):
self.config_file = config_file
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ["default"])
def test_specs(self):
import yaml
with open("tools/run_tests/sanity/%s" % self.config_file, "r") as f:
environ = {"TEST": "true"}
if _is_use_docker_child():
environ["CLANG_FORMAT_SKIP_DOCKER"] = "true"
environ["CLANG_TIDY_SKIP_DOCKER"] = "true"
environ["IWYU_SKIP_DOCKER"] = "true"
# sanity tests run tools/bazel wrapper concurrently
# and that can result in a download/run race in the wrapper.
# under docker we already have the right version of bazel
# so we can just disable the wrapper.
environ["DISABLE_BAZEL_WRAPPER"] = "true"
return [
self.config.job_spec(
cmd["script"].split(),
timeout_seconds=45 * 60,
environ=environ,
cpu_cost=cmd.get("cpu_cost", 1),
)
for cmd in yaml.safe_load(f)
]
def pre_build_steps(self):
return []
def build_steps(self):
return []
def build_steps_environ(self):
"""Extra environment variables set for pre_build_steps and build_steps jobs."""
return {}
def post_tests_steps(self):
return []
def dockerfile_dir(self):
return "tools/dockerfile/test/sanity"
def __str__(self):
return "sanity"
# different configurations we can run under
with open("tools/run_tests/generated/configs.json") as f:
_CONFIGS = dict(
(cfg["config"], Config(**cfg)) for cfg in ast.literal_eval(f.read())
)
_LANGUAGES = {
"c++": CLanguage("cxx", "c++"),
"c": CLanguage("c", "c"),
"grpc-node": RemoteNodeLanguage(),
"php7": Php7Language(),
"python": PythonLanguage(),
"ruby": RubyLanguage(),
"csharp": CSharpLanguage(),
"objc": ObjCLanguage(),
"sanity": Sanity("sanity_tests.yaml"),
"clang-tidy": Sanity("clang_tidy_tests.yaml"),
"iwyu": Sanity("iwyu_tests.yaml"),
}
_MSBUILD_CONFIG = {
"dbg": "Debug",
"opt": "Release",
"gcov": "Debug",
}
def _build_step_environ(cfg, extra_env={}):
"""Environment variables set for each build step."""
environ = {"CONFIG": cfg, "GRPC_RUN_TESTS_JOBS": str(args.jobs)}
msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
if msbuild_cfg:
environ["MSBUILD_CONFIG"] = msbuild_cfg
environ.update(extra_env)
return environ
def _windows_arch_option(arch):
"""Returns msbuild cmdline option for selected architecture."""
if arch == "default" or arch == "x86":
return "/p:Platform=Win32"
elif arch == "x64":
return "/p:Platform=x64"
else:
print("Architecture %s not supported." % arch)
sys.exit(1)
def _check_arch_option(arch):
"""Checks that architecture option is valid."""
if platform_string() == "windows":
_windows_arch_option(arch)
elif platform_string() == "linux":
# On linux, we need to be running under docker with the right architecture.
runtime_machine = platform.machine()
runtime_arch = platform.architecture()[0]
if arch == "default":
return
elif (
runtime_machine == "x86_64"
and runtime_arch == "64bit"
and arch == "x64"
):
return
elif (
runtime_machine == "x86_64"
and runtime_arch == "32bit"
and arch == "x86"
):
return
elif (
runtime_machine == "aarch64"
and runtime_arch == "64bit"
and arch == "arm64"
):
return
else:
print(
"Architecture %s does not match current runtime architecture."
% arch
)
sys.exit(1)
else:
if args.arch != "default":
print(
"Architecture %s not supported on current platform." % args.arch
)
sys.exit(1)
def _docker_arch_suffix(arch):
"""Returns suffix to dockerfile dir to use."""
if arch == "default" or arch == "x64":
return "x64"
elif arch == "x86":
return "x86"
elif arch == "arm64":
return "arm64"
else:
print("Architecture %s not supported with current settings." % arch)
sys.exit(1)
def runs_per_test_type(arg_str):
"""Auxiliary function to parse the "runs_per_test" flag.
Returns:
A positive integer or 0, the latter indicating an infinite number of
runs.
Raises:
argparse.ArgumentTypeError: Upon invalid input.
"""
if arg_str == "inf":
return 0
try:
n = int(arg_str)
if n <= 0:
raise ValueError
return n
except:
msg = "'{}' is not a positive integer or 'inf'".format(arg_str)
raise argparse.ArgumentTypeError(msg)
def percent_type(arg_str):
pct = float(arg_str)
if pct > 100 or pct < 0:
raise argparse.ArgumentTypeError(
"'%f' is not a valid percentage in the [0, 100] range" % pct
)
return pct
# This is math.isclose in python >= 3.5
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def _shut_down_legacy_server(legacy_server_port):
"""Shut down legacy version of port server."""
try:
version = int(
urllib.request.urlopen(
"http://localhost:%d/version_number" % legacy_server_port,
timeout=10,
).read()
)
except:
pass
else:
urllib.request.urlopen(
"http://localhost:%d/quitquitquit" % legacy_server_port
).read()
def _calculate_num_runs_failures(list_of_results):
"""Calculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures
class BuildAndRunError(object):
"""Represents error type in _build_and_run."""
BUILD = object()
TEST = object()
POST_TEST = object()
# returns a list of things that failed (or an empty list on success)
def _build_and_run(
check_cancelled, newline_on_success, xml_report=None, build_only=False
):
"""Do one pass of building & running tests."""
# build latest sequentially
num_failures, resultset = jobset.run(
build_steps,
maxjobs=1,
stop_on_failure=True,
newline_on_success=newline_on_success,
travis=args.travis,
)
if num_failures:
return [BuildAndRunError.BUILD]
if build_only:
if xml_report:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name
)
return []
# start antagonists
antagonists = [
subprocess.Popen(["tools/run_tests/python_utils/antagonist.py"])
for _ in range(0, args.antagonists)
]
start_port_server.start_port_server()
resultset = None
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
one_run = set(
spec
for language in languages
for spec in language.test_specs()
if (
re.search(args.regex, spec.shortname)
and (
args.regex_exclude == ""
or not re.search(args.regex_exclude, spec.shortname)
)
)
)
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis and args.max_time <= 0:
massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
# chance to run.
massaged_one_run = list(
one_run
) # random.sample needs an indexable seq.
num_jobs = len(massaged_one_run)
# for a random sample, get as many as indicated by the 'sample_percent'
# argument. By default this arg is 100, resulting in a shuffle of all
# jobs.
sample_size = int(num_jobs * args.sample_percent / 100.0)
massaged_one_run = random.sample(massaged_one_run, sample_size)
if not isclose(args.sample_percent, 100.0):
assert (
args.runs_per_test == 1
), "Can't do sampling (-p) over multiple runs (-n)."
print(
"Running %d tests out of %d (~%d%%)"
% (sample_size, num_jobs, args.sample_percent)
)
if infinite_runs:
assert (
len(massaged_one_run) > 0
), "Must have at least one test for a -n inf run"
runs_sequence = (
itertools.repeat(massaged_one_run)
if infinite_runs
else itertools.repeat(massaged_one_run, runs_per_test)
)
all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
jobset.message(
"START",
"Running tests quietly, only failing tests will be reported",
do_newline=True,
)
num_test_failures, resultset = jobset.run(
all_runs,
check_cancelled,
newline_on_success=newline_on_success,
travis=args.travis,
maxjobs=args.jobs,
maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
stop_on_failure=args.stop_on_failure,
quiet_success=args.quiet_success,
max_time=args.max_time,
)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures > 0:
if num_failures == num_runs: # what about infinite_runs???
jobset.message("FAILED", k, do_newline=True)
else:
jobset.message(
"FLAKE",
"%s [%d/%d runs flaked]"
% (k, num_failures, num_runs),
do_newline=True,
)
finally:
for antagonist in antagonists:
antagonist.kill()
if args.bq_result_table and resultset:
upload_extra_fields = {
"compiler": args.compiler,
"config": args.config,
"iomgr_platform": args.iomgr_platform,
"language": args.language[
0
], # args.language is a list but will always have one element when uploading to BQ is enabled.
"platform": platform_string(),
}
try:
upload_results_to_bq(
resultset, args.bq_result_table, upload_extra_fields
)
except NameError as e:
logging.warning(
e
) # It's fine to ignore since this is not critical
if xml_report and resultset:
report_utils.render_junit_xml_report(
resultset,
xml_report,
suite_name=args.report_suite_name,
multi_target=args.report_multi_target,
)
number_failures, _ = jobset.run(
post_tests_steps,
maxjobs=1,
stop_on_failure=False,
newline_on_success=newline_on_success,
travis=args.travis,
)
out = []
if number_failures:
out.append(BuildAndRunError.POST_TEST)
if num_test_failures:
out.append(BuildAndRunError.TEST)
return out
# parse command line
argp = argparse.ArgumentParser(description="Run grpc tests.")
argp.add_argument(
"-c", "--config", choices=sorted(_CONFIGS.keys()), default="opt"
)
argp.add_argument(
"-n",
"--runs_per_test",
default=1,
type=runs_per_test_type,
help=(
'A positive integer or "inf". If "inf", all tests will run in an '
'infinite loop. Especially useful in combination with "-f"'
),
)
argp.add_argument("-r", "--regex", default=".*", type=str)
argp.add_argument("--regex_exclude", default="", type=str)
argp.add_argument("-j", "--jobs", default=multiprocessing.cpu_count(), type=int)
argp.add_argument("-s", "--slowdown", default=1.0, type=float)
argp.add_argument(
"-p",
"--sample_percent",
default=100.0,
type=percent_type,
help="Run a random sample with that percentage of tests",
)
argp.add_argument(
"-t",
"--travis",
default=False,
action="store_const",
const=True,
help=(
"When set, indicates that the script is running on CI (= not locally)."
),
)
argp.add_argument(
"--newline_on_success", default=False, action="store_const", const=True
)
argp.add_argument(
"-l",
"--language",
choices=sorted(_LANGUAGES.keys()),
nargs="+",
required=True,
)
argp.add_argument(
"-S", "--stop_on_failure", default=False, action="store_const", const=True
)
argp.add_argument(
"--use_docker",
default=False,
action="store_const",
const=True,
help="Run all the tests under docker. That provides "
+ "additional isolation and prevents the need to install "
+ "language specific prerequisites. Only available on Linux.",
)
argp.add_argument(
"--allow_flakes",
default=False,
action="store_const",
const=True,
help=(
"Allow flaky tests to show as passing (re-runs failed tests up to five"
" times)"
),
)
argp.add_argument(
"--arch",
choices=["default", "x86", "x64", "arm64"],
default="default",
help=(
'Selects architecture to target. For some platforms "default" is the'
" only supported choice."
),
)
argp.add_argument(
"--compiler",
choices=[
"default",
"gcc7",
"gcc10.2",
"gcc10.2_openssl102",
"gcc12",
"gcc_musl",
"clang6",
"clang15",
"python2.7",
"python3.5",
"python3.7",
"python3.8",
"python3.9",
"pypy",
"pypy3",
"python_alpine",
"all_the_cpythons",
"electron1.3",
"electron1.6",
"coreclr",
"cmake",
"cmake_ninja_vs2019",
"cmake_vs2019",
"mono",
],
default="default",
help=(
"Selects compiler to use. Allowed values depend on the platform and"
" language."
),
)
argp.add_argument(
"--iomgr_platform",
choices=["native", "gevent", "asyncio"],
default="native",
help="Selects iomgr platform to build on",
)
argp.add_argument(
"--build_only",
default=False,
action="store_const",
const=True,
help="Perform all the build steps but don't run any tests.",
)
argp.add_argument(
"--measure_cpu_costs",
default=False,
action="store_const",
const=True,
help="Measure the cpu costs of tests",
)
argp.add_argument("-a", "--antagonists", default=0, type=int)
argp.add_argument(
"-x",
"--xml_report",
default=None,
type=str,
help="Generates a JUnit-compatible XML report",
)
argp.add_argument(
"--report_suite_name",
default="tests",
type=str,
help="Test suite name to use in generated JUnit XML report",
)
argp.add_argument(
"--report_multi_target",
default=False,
const=True,
action="store_const",
help=(
"Generate separate XML report for each test job (Looks better in UIs)."
),
)
argp.add_argument(
"--quiet_success",
default=False,
action="store_const",
const=True,
help=(
"Don't print anything when a test passes. Passing tests also will not"
" be reported in XML report. "
)
+ "Useful when running many iterations of each test (argument -n).",
)
argp.add_argument(
"--force_default_poller",
default=False,
action="store_const",
const=True,
help="Don't try to iterate over many polling strategies when they exist",
)
argp.add_argument(
"--force_use_pollers",
default=None,
type=str,
help=(
"Only use the specified comma-delimited list of polling engines. "
"Example: --force_use_pollers epoll1,poll "
" (This flag has no effect if --force_default_poller flag is also used)"
),
)
argp.add_argument(
"--max_time", default=-1, type=int, help="Maximum test runtime in seconds"
)
argp.add_argument(
"--bq_result_table",
default="",
type=str,
nargs="?",
help="Upload test results to a specified BQ table.",
)
args = argp.parse_args()
flaky_tests = set()
shortname_to_cpu = {}
if args.force_default_poller:
_POLLING_STRATEGIES = {}
elif args.force_use_pollers:
_POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(",")
jobset.measure_cpu_costs = args.measure_cpu_costs
# grab config
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
# TODO(jtattermusch): is this setting applied/being used?
if args.travis:
_FORCE_ENVIRON_FOR_WRAPPERS = {"GRPC_TRACE": "api"}
languages = set(_LANGUAGES[l] for l in args.language)
for l in languages:
l.configure(run_config, args)
if len(languages) != 1:
print("Building multiple languages simultaneously is not supported!")
sys.exit(1)
# If --use_docker was used, respawn the run_tests.py script under a docker container
# instead of continuing.
if args.use_docker:
if not args.travis:
print("Seen --use_docker flag, will run tests under docker.")
print("")
print(
"IMPORTANT: The changes you are testing need to be locally"
" committed"
)
print(
"because only the committed changes in the current branch will be"
)
print("copied to the docker environment.")
time.sleep(5)
dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
if len(dockerfile_dirs) > 1:
print(
"Languages to be tested require running under different docker "
"images."
)
sys.exit(1)
else:
dockerfile_dir = next(iter(dockerfile_dirs))
child_argv = [arg for arg in sys.argv if not arg == "--use_docker"]
run_tests_cmd = "python3 tools/run_tests/run_tests.py %s" % " ".join(
child_argv[1:]
)
env = os.environ.copy()
env["DOCKERFILE_DIR"] = dockerfile_dir
env["DOCKER_RUN_SCRIPT"] = "tools/run_tests/dockerize/docker_run.sh"
env["DOCKER_RUN_SCRIPT_COMMAND"] = run_tests_cmd
retcode = subprocess.call(
"tools/run_tests/dockerize/build_and_run_docker.sh", shell=True, env=env
)
_print_debug_info_epilogue(dockerfile_dir=dockerfile_dir)
sys.exit(retcode)
_check_arch_option(args.arch)
# collect pre-build steps (which get retried if they fail, e.g. to avoid
# flakes on downloading dependencies etc.)
build_steps = list(
set(
jobset.JobSpec(
cmdline,
environ=_build_step_environ(
build_config, extra_env=l.build_steps_environ()
),
timeout_seconds=_PRE_BUILD_STEP_TIMEOUT_SECONDS,
flake_retries=2,
)
for l in languages
for cmdline in l.pre_build_steps()
)
)
# collect build steps
build_steps.extend(
set(
jobset.JobSpec(
cmdline,
environ=_build_step_environ(
build_config, extra_env=l.build_steps_environ()
),
timeout_seconds=None,
)
for l in languages
for cmdline in l.build_steps()
)
)
# collect post test steps
post_tests_steps = list(
set(
jobset.JobSpec(
cmdline,
environ=_build_step_environ(
build_config, extra_env=l.build_steps_environ()
),
)
for l in languages
for cmdline in l.post_tests_steps()
)
)
runs_per_test = args.runs_per_test
errors = _build_and_run(
check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
xml_report=args.xml_report,
build_only=args.build_only,
)
if not errors:
jobset.message("SUCCESS", "All tests passed", do_newline=True)
else:
jobset.message("FAILED", "Some tests failed", do_newline=True)
if not _is_use_docker_child():
# if --use_docker was used, the outer invocation of run_tests.py will
# print the debug info instead.
_print_debug_info_epilogue()
exit_code = 0
if BuildAndRunError.BUILD in errors:
exit_code |= 1
if BuildAndRunError.TEST in errors:
exit_code |= 2
if BuildAndRunError.POST_TEST in errors:
exit_code |= 4
sys.exit(exit_code)
| 64,499
| 32.523909
| 147
|
py
|
grpc
|
grpc-master/tools/run_tests/lb_interop_tests/gen_build_yaml.py
|
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate JSON data for LB interop test scenarios."""
import json
import os
import yaml
all_scenarios = []
# TODO(https://github.com/grpc/grpc-go/issues/2347): enable
# client_falls_back_because_no_backends_* scenarios for Java/Go.
# TODO(https://github.com/grpc/grpc-java/issues/4887): enable
# *short_stream* scenarios for Java.
# TODO(https://github.com/grpc/grpc-java/issues/4912): enable
# Java TLS tests involving TLS to the balancer.
def server_sec(transport_sec):
if transport_sec == "google_default_credentials":
return "alts", "alts", "tls"
return transport_sec, transport_sec, transport_sec
def generate_no_balancer_because_lb_a_record_returns_nx_domain():
all_configs = []
for transport_sec in [
"insecure",
"alts",
"tls",
"google_default_credentials",
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
config = {
"name": "no_balancer_because_lb_a_record_returns_nx_domain_%s"
% transport_sec,
"skip_langs": [],
"transport_sec": transport_sec,
"balancer_configs": [],
"backend_configs": [],
"fallback_configs": [
{
"transport_sec": fallback_sec,
}
],
"cause_no_error_no_data_for_balancer_a_record": False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_no_balancer_because_lb_a_record_returns_nx_domain()
def generate_no_balancer_because_lb_a_record_returns_no_data():
all_configs = []
for transport_sec in [
"insecure",
"alts",
"tls",
"google_default_credentials",
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
config = {
"name": "no_balancer_because_lb_a_record_returns_no_data_%s"
% transport_sec,
"skip_langs": [],
"transport_sec": transport_sec,
"balancer_configs": [],
"backend_configs": [],
"fallback_configs": [
{
"transport_sec": fallback_sec,
}
],
"cause_no_error_no_data_for_balancer_a_record": True,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_no_balancer_because_lb_a_record_returns_no_data()
def generate_client_referred_to_backend():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
"insecure",
"alts",
"tls",
"google_default_credentials",
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == "tls":
skip_langs += ["java"]
if balancer_short_stream:
skip_langs += ["java"]
config = {
"name": "client_referred_to_backend_%s_short_stream_%s"
% (transport_sec, balancer_short_stream),
"skip_langs": skip_langs,
"transport_sec": transport_sec,
"balancer_configs": [
{
"transport_sec": balancer_sec,
"short_stream": balancer_short_stream,
}
],
"backend_configs": [
{
"transport_sec": backend_sec,
}
],
"fallback_configs": [],
"cause_no_error_no_data_for_balancer_a_record": False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend()
def generate_client_referred_to_backend_fallback_broken():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in ["alts", "tls", "google_default_credentials"]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == "tls":
skip_langs += ["java"]
if balancer_short_stream:
skip_langs += ["java"]
config = {
"name": "client_referred_to_backend_fallback_broken_%s_short_stream_%s"
% (transport_sec, balancer_short_stream),
"skip_langs": skip_langs,
"transport_sec": transport_sec,
"balancer_configs": [
{
"transport_sec": balancer_sec,
"short_stream": balancer_short_stream,
}
],
"backend_configs": [
{
"transport_sec": backend_sec,
}
],
"fallback_configs": [
{
"transport_sec": "insecure",
}
],
"cause_no_error_no_data_for_balancer_a_record": False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_fallback_broken()
def generate_client_referred_to_backend_multiple_backends():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
"insecure",
"alts",
"tls",
"google_default_credentials",
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == "tls":
skip_langs += ["java"]
if balancer_short_stream:
skip_langs += ["java"]
config = {
"name": "client_referred_to_backend_multiple_backends_%s_short_stream_%s"
% (transport_sec, balancer_short_stream),
"skip_langs": skip_langs,
"transport_sec": transport_sec,
"balancer_configs": [
{
"transport_sec": balancer_sec,
"short_stream": balancer_short_stream,
}
],
"backend_configs": [
{
"transport_sec": backend_sec,
},
{
"transport_sec": backend_sec,
},
{
"transport_sec": backend_sec,
},
{
"transport_sec": backend_sec,
},
{
"transport_sec": backend_sec,
},
],
"fallback_configs": [],
"cause_no_error_no_data_for_balancer_a_record": False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_multiple_backends()
def generate_client_falls_back_because_no_backends():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
"insecure",
"alts",
"tls",
"google_default_credentials",
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = ["go", "java"]
if transport_sec == "tls":
skip_langs += ["java"]
if balancer_short_stream:
skip_langs += ["java"]
config = {
"name": (
"client_falls_back_because_no_backends_%s_short_stream_%s"
)
% (transport_sec, balancer_short_stream),
"skip_langs": skip_langs,
"transport_sec": transport_sec,
"balancer_configs": [
{
"transport_sec": balancer_sec,
"short_stream": balancer_short_stream,
}
],
"backend_configs": [],
"fallback_configs": [
{
"transport_sec": fallback_sec,
}
],
"cause_no_error_no_data_for_balancer_a_record": False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_falls_back_because_no_backends()
def generate_client_falls_back_because_balancer_connection_broken():
all_configs = []
for transport_sec in ["alts", "tls", "google_default_credentials"]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == "tls":
skip_langs = ["java"]
config = {
"name": "client_falls_back_because_balancer_connection_broken_%s"
% transport_sec,
"skip_langs": skip_langs,
"transport_sec": transport_sec,
"balancer_configs": [
{
"transport_sec": "insecure",
"short_stream": False,
}
],
"backend_configs": [],
"fallback_configs": [
{
"transport_sec": fallback_sec,
}
],
"cause_no_error_no_data_for_balancer_a_record": False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_falls_back_because_balancer_connection_broken()
def generate_client_referred_to_backend_multiple_balancers():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
"insecure",
"alts",
"tls",
"google_default_credentials",
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == "tls":
skip_langs += ["java"]
if balancer_short_stream:
skip_langs += ["java"]
config = {
"name": "client_referred_to_backend_multiple_balancers_%s_short_stream_%s"
% (transport_sec, balancer_short_stream),
"skip_langs": skip_langs,
"transport_sec": transport_sec,
"balancer_configs": [
{
"transport_sec": balancer_sec,
"short_stream": balancer_short_stream,
},
{
"transport_sec": balancer_sec,
"short_stream": balancer_short_stream,
},
{
"transport_sec": balancer_sec,
"short_stream": balancer_short_stream,
},
{
"transport_sec": balancer_sec,
"short_stream": balancer_short_stream,
},
{
"transport_sec": balancer_sec,
"short_stream": balancer_short_stream,
},
],
"backend_configs": [
{
"transport_sec": backend_sec,
},
],
"fallback_configs": [],
"cause_no_error_no_data_for_balancer_a_record": False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_multiple_balancers()
print(
(
yaml.dump(
{
"lb_interop_test_scenarios": all_scenarios,
}
)
)
)
| 12,557
| 32.488
| 90
|
py
|
grpc
|
grpc-master/tools/run_tests/sanity/check_version.py
|
#!/usr/bin/env python3
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import subprocess
import sys
import yaml
errors = 0
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), "../../.."))
# hack import paths to pick up extra code
sys.path.insert(0, os.path.abspath("tools/buildgen/plugins"))
from expand_version import Version
try:
branch_name = subprocess.check_output(
"git rev-parse --abbrev-ref HEAD", shell=True
).decode()
except:
print("WARNING: not a git repository")
branch_name = None
if branch_name is not None:
m = re.match(r"^release-([0-9]+)_([0-9]+)$", branch_name)
if m:
print("RELEASE branch")
# version number should align with the branched version
check_version = lambda version: (
version.major == int(m.group(1))
and version.minor == int(m.group(2))
)
warning = (
'Version key "%%s" value "%%s" should have a major version %s and'
" minor version %s" % (m.group(1), m.group(2))
)
elif re.match(r"^debian/.*$", branch_name):
# no additional version checks for debian branches
check_version = lambda version: True
else:
# all other branches should have a -dev tag
check_version = lambda version: version.tag == "dev"
warning = 'Version key "%s" value "%s" should have a -dev tag'
else:
check_version = lambda version: True
with open("build_handwritten.yaml", "r") as f:
build_yaml = yaml.safe_load(f.read())
settings = build_yaml["settings"]
top_version = Version(settings["version"])
if not check_version(top_version):
errors += 1
print((warning % ("version", top_version)))
for tag, value in list(settings.items()):
if re.match(r"^[a-z]+_version$", tag):
value = Version(value)
if tag != "core_version":
if value.major != top_version.major:
errors += 1
print(
"major version mismatch on %s: %d vs %d"
% (tag, value.major, top_version.major)
)
if value.minor != top_version.minor:
errors += 1
print(
"minor version mismatch on %s: %d vs %d"
% (tag, value.minor, top_version.minor)
)
if not check_version(value):
errors += 1
print((warning % (tag, value)))
sys.exit(errors)
| 2,998
| 30.904255
| 78
|
py
|
grpc
|
grpc-master/tools/run_tests/sanity/check_qps_scenario_changes.py
|
#!/usr/bin/env python3
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), "../../../test/cpp/qps"))
subprocess.check_call(["./json_run_localhost_scenario_gen.py"])
subprocess.check_call(["./qps_json_driver_scenario_gen.py"])
subprocess.check_call(["buildifier", "-v", "-r", "."])
output = subprocess.check_output(["git", "status", "--porcelain"]).decode()
qps_json_driver_bzl = "test/cpp/qps/qps_json_driver_scenarios.bzl"
json_run_localhost_bzl = "test/cpp/qps/json_run_localhost_scenarios.bzl"
if qps_json_driver_bzl in output or json_run_localhost_bzl in output:
print(
"qps benchmark scenarios have been updated, please commit "
"test/cpp/qps/qps_json_driver_scenarios.bzl and/or "
"test/cpp/qps/json_run_localhost_scenarios.bzl"
)
sys.exit(1)
| 1,406
| 37.027027
| 77
|
py
|
grpc
|
grpc-master/tools/run_tests/sanity/core_banned_functions.py
|
#!/usr/bin/env python3
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Explicitly ban select functions from being used in src/core/**.
Most of these functions have internal versions that should be used instead."""
import os
import sys
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), "../../.."))
# map of banned function signature to allowlist
BANNED_EXCEPT = {
"grpc_slice_from_static_buffer(": ["src/core/lib/slice/slice.cc"],
"grpc_resource_quota_ref(": ["src/core/lib/resource_quota/api.cc"],
"grpc_resource_quota_unref(": [
"src/core/lib/resource_quota/api.cc",
"src/core/lib/surface/server.cc",
],
"grpc_error_create(": [
"src/core/lib/iomgr/error.cc",
"src/core/lib/iomgr/error_cfstream.cc",
],
"grpc_error_ref(": ["src/core/lib/iomgr/error.cc"],
"grpc_error_unref(": ["src/core/lib/iomgr/error.cc"],
"grpc_os_error(": [
"src/core/lib/iomgr/error.cc",
"src/core/lib/iomgr/error.h",
],
"grpc_wsa_error(": [
"src/core/lib/iomgr/error.cc",
"src/core/lib/iomgr/error.h",
],
"grpc_log_if_error(": [
"src/core/lib/iomgr/error.cc",
"src/core/lib/iomgr/error.h",
],
"grpc_slice_malloc(": [
"src/core/lib/slice/slice.cc",
"src/core/lib/slice/slice.h",
],
"grpc_call_cancel(": ["src/core/lib/surface/call.cc"],
"grpc_channel_destroy(": [
"src/core/lib/surface/channel.cc",
"src/core/tsi/alts/handshaker/alts_shared_resource.cc",
],
"grpc_closure_create(": [
"src/core/lib/iomgr/closure.cc",
"src/core/lib/iomgr/closure.h",
],
"grpc_closure_init(": [
"src/core/lib/iomgr/closure.cc",
"src/core/lib/iomgr/closure.h",
],
"grpc_closure_sched(": ["src/core/lib/iomgr/closure.cc"],
"grpc_closure_run(": ["src/core/lib/iomgr/closure.cc"],
"grpc_closure_list_sched(": ["src/core/lib/iomgr/closure.cc"],
"grpc_error*": ["src/core/lib/iomgr/error.cc"],
"grpc_error_string": ["src/core/lib/iomgr/error.cc"],
# use grpc_core::CSlice{Ref,Unref} instead inside core
# (or prefer grpc_core::Slice!)
"grpc_slice_ref(": ["src/core/lib/slice/slice.cc"],
"grpc_slice_unref(": ["src/core/lib/slice/slice.cc"],
# std::random_device needs /dev/random which is not available on all linuxes that we support.
# Any usage must be optional and opt-in, so that those platforms can use gRPC without problem.
"std::random_device": [
"src/core/ext/filters/client_channel/lb_policy/rls/rls.cc",
"src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc",
],
# use 'grpc_core::Crash' instead
"GPR_ASSERT(false": [],
# Use `std::exchange()` instead.
"absl::exchange": [],
# Use `std::make_unique()` instead.
"absl::make_unique": [],
}
errors = 0
num_files = 0
for root, dirs, files in os.walk("src/core"):
if root.startswith("src/core/tsi"):
continue
for filename in files:
num_files += 1
path = os.path.join(root, filename)
if os.path.splitext(path)[1] not in (".h", ".cc"):
continue
with open(path) as f:
text = f.read()
for banned, exceptions in list(BANNED_EXCEPT.items()):
if path in exceptions:
continue
if banned in text:
print(('Illegal use of "%s" in %s' % (banned, path)))
errors += 1
assert errors == 0
# This check comes about from this issue:
# https://github.com/grpc/grpc/issues/15381
# Basically, a change rendered this script useless and we did not realize it.
# This dumb check ensures that this type of issue doesn't occur again.
assert num_files > 300 # we definitely have more than 300 files
| 4,322
| 36.267241
| 98
|
py
|
grpc
|
grpc-master/tools/run_tests/sanity/check_include_style.py
|
#!/usr/bin/env python3
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), "../../.."))
BAD_REGEXES = [
(r'\n#include "include/(.*)"', r"\n#include <\1>"),
(r'\n#include "grpc(.*)"', r"\n#include <grpc\1>"),
]
fix = sys.argv[1:] == ["--fix"]
if fix:
print("FIXING!")
def check_include_style(directory_root):
bad_files = []
for root, dirs, files in os.walk(directory_root):
for filename in files:
path = os.path.join(root, filename)
if os.path.splitext(path)[1] not in [".c", ".cc", ".h"]:
continue
if filename.endswith(".pb.h") or filename.endswith(".pb.c"):
continue
# Skip check for upb generated code.
if (
filename.endswith(".upb.h")
or filename.endswith(".upb.c")
or filename.endswith(".upbdefs.h")
or filename.endswith(".upbdefs.c")
):
continue
with open(path) as f:
text = f.read()
original = text
for regex, replace in BAD_REGEXES:
text = re.sub(regex, replace, text)
if text != original:
bad_files.append(path)
if fix:
with open(path, "w") as f:
f.write(text)
return bad_files
all_bad_files = []
all_bad_files += check_include_style(os.path.join("src", "core"))
all_bad_files += check_include_style(os.path.join("src", "cpp"))
all_bad_files += check_include_style(os.path.join("test", "core"))
all_bad_files += check_include_style(os.path.join("test", "cpp"))
all_bad_files += check_include_style(os.path.join("include", "grpc"))
all_bad_files += check_include_style(os.path.join("include", "grpcpp"))
if all_bad_files:
for f in all_bad_files:
print("%s has badly formed grpc system header files" % f)
sys.exit(1)
| 2,529
| 32.733333
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/sanity/check_port_platform.py
|
#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), "../../.."))
def check_port_platform_inclusion(directory_root, legal_list):
bad_files = []
for root, dirs, files in os.walk(directory_root):
for filename in files:
path = os.path.join(root, filename)
if os.path.splitext(path)[1] not in [".c", ".cc", ".h"]:
continue
if path in [
os.path.join("include", "grpc", "support", "port_platform.h"),
os.path.join(
"include", "grpc", "impl", "codegen", "port_platform.h"
),
]:
continue
if filename.endswith(".pb.h") or filename.endswith(".pb.c"):
continue
# Skip check for upb generated code.
if (
filename.endswith(".upb.h")
or filename.endswith(".upb.c")
or filename.endswith(".upbdefs.h")
or filename.endswith(".upbdefs.c")
):
continue
with open(path) as f:
all_lines_in_file = f.readlines()
for index, l in enumerate(all_lines_in_file):
if "#include" in l:
if l not in legal_list:
bad_files.append(path)
elif all_lines_in_file[index + 1] != "\n":
# Require a blank line after including port_platform.h in
# order to prevent the formatter from reording it's
# inclusion order upon future changes.
bad_files.append(path)
break
return bad_files
all_bad_files = []
all_bad_files += check_port_platform_inclusion(
os.path.join("src", "core"),
[
"#include <grpc/support/port_platform.h>\n",
],
)
all_bad_files += check_port_platform_inclusion(
os.path.join("include", "grpc"),
[
"#include <grpc/support/port_platform.h>\n",
"#include <grpc/impl/codegen/port_platform.h>\n",
],
)
if sys.argv[1:] == ["--fix"]:
for path in all_bad_files:
text = ""
found = False
with open(path) as f:
for l in f.readlines():
if not found and "#include" in l:
text += "#include <grpc/support/port_platform.h>\n\n"
found = True
text += l
with open(path, "w") as f:
f.write(text)
else:
if len(all_bad_files) > 0:
for f in all_bad_files:
print(
"port_platform.h is not the first included header or there "
"is not a blank line following its inclusion in %s" % f
)
sys.exit(1)
| 3,417
| 34.237113
| 85
|
py
|
grpc
|
grpc-master/tools/run_tests/sanity/check_test_filtering.py
|
#!/usr/bin/env python3
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
import unittest
# hack import paths to pick up extra code
sys.path.insert(0, os.path.abspath("tools/run_tests/"))
import python_utils.filter_pull_request_tests as filter_pull_request_tests
from run_tests_matrix import _create_portability_test_jobs
from run_tests_matrix import _create_test_jobs
_LIST_OF_LANGUAGE_LABELS = [
"c",
"c++",
"csharp",
"grpc-node",
"objc",
"php",
"php7",
"python",
"ruby",
]
_LIST_OF_PLATFORM_LABELS = ["linux", "macos", "windows"]
_LIST_OF_SANITY_TESTS = ["sanity", "clang-tidy", "iwyu"]
def has_sanity_tests(job):
for test in _LIST_OF_SANITY_TESTS:
if test in job.labels:
return True
return False
class TestFilteringTest(unittest.TestCase):
def generate_all_tests(self):
all_jobs = _create_test_jobs() + _create_portability_test_jobs()
self.assertIsNotNone(all_jobs)
return all_jobs
def test_filtering(self, changed_files=[], labels=_LIST_OF_LANGUAGE_LABELS):
"""
Default args should filter no tests because changed_files is empty and
default labels should be able to match all jobs
:param changed_files: mock list of changed_files from pull request
:param labels: list of job labels that should be skipped
"""
all_jobs = self.generate_all_tests()
# Replacing _get_changed_files function to allow specifying changed files in filter_tests function
def _get_changed_files(foo):
return changed_files
filter_pull_request_tests._get_changed_files = _get_changed_files
print()
filtered_jobs = filter_pull_request_tests.filter_tests(all_jobs, "test")
# Make sure sanity tests aren't being filtered out
sanity_tests_in_all_jobs = 0
sanity_tests_in_filtered_jobs = 0
for job in all_jobs:
if has_sanity_tests(job):
sanity_tests_in_all_jobs += 1
all_jobs = [job for job in all_jobs if has_sanity_tests(job)]
for job in filtered_jobs:
if has_sanity_tests(job):
sanity_tests_in_filtered_jobs += 1
filtered_jobs = [job for job in filtered_jobs if has_sanity_tests(job)]
self.assertEqual(
sanity_tests_in_all_jobs, sanity_tests_in_filtered_jobs
)
for label in labels:
for job in filtered_jobs:
if has_sanity_tests(job):
continue
self.assertNotIn(label, job.labels)
jobs_matching_labels = 0
for label in labels:
for job in all_jobs:
if has_sanity_tests(job):
continue
if label in job.labels:
jobs_matching_labels += 1
self.assertEqual(
len(filtered_jobs), len(all_jobs) - jobs_matching_labels
)
def test_individual_language_filters(self):
# Changing unlisted file should trigger all languages
self.test_filtering(["ffffoo/bar.baz"], [_LIST_OF_LANGUAGE_LABELS])
# Changing core should trigger all tests
self.test_filtering(["src/core/foo.bar"], [_LIST_OF_LANGUAGE_LABELS])
# Testing individual languages
self.test_filtering(
["test/core/foo.bar"],
[
label
for label in _LIST_OF_LANGUAGE_LABELS
if label
not in filter_pull_request_tests._CORE_TEST_SUITE.labels
+ filter_pull_request_tests._CPP_TEST_SUITE.labels
],
)
self.test_filtering(
["src/cpp/foo.bar"],
[
label
for label in _LIST_OF_LANGUAGE_LABELS
if label not in filter_pull_request_tests._CPP_TEST_SUITE.labels
],
)
self.test_filtering(
["src/csharp/foo.bar"],
[
label
for label in _LIST_OF_LANGUAGE_LABELS
if label
not in filter_pull_request_tests._CSHARP_TEST_SUITE.labels
],
)
self.test_filtering(
["src/objective-c/foo.bar"],
[
label
for label in _LIST_OF_LANGUAGE_LABELS
if label
not in filter_pull_request_tests._OBJC_TEST_SUITE.labels
],
)
self.test_filtering(
["src/php/foo.bar"],
[
label
for label in _LIST_OF_LANGUAGE_LABELS
if label not in filter_pull_request_tests._PHP_TEST_SUITE.labels
],
)
self.test_filtering(
["src/python/foo.bar"],
[
label
for label in _LIST_OF_LANGUAGE_LABELS
if label
not in filter_pull_request_tests._PYTHON_TEST_SUITE.labels
],
)
self.test_filtering(
["src/ruby/foo.bar"],
[
label
for label in _LIST_OF_LANGUAGE_LABELS
if label
not in filter_pull_request_tests._RUBY_TEST_SUITE.labels
],
)
def test_combined_language_filters(self):
self.test_filtering(
["src/cpp/foo.bar", "test/core/foo.bar"],
[
label
for label in _LIST_OF_LANGUAGE_LABELS
if label not in filter_pull_request_tests._CPP_TEST_SUITE.labels
and label
not in filter_pull_request_tests._CORE_TEST_SUITE.labels
],
)
self.test_filtering(
["src/cpp/foo.bar", "src/csharp/foo.bar"],
[
label
for label in _LIST_OF_LANGUAGE_LABELS
if label not in filter_pull_request_tests._CPP_TEST_SUITE.labels
and label
not in filter_pull_request_tests._CSHARP_TEST_SUITE.labels
],
)
self.test_filtering(
[
"src/objective-c/foo.bar",
"src/php/foo.bar",
"src/python/foo.bar",
"src/ruby/foo.bar",
],
[
label
for label in _LIST_OF_LANGUAGE_LABELS
if label
not in filter_pull_request_tests._OBJC_TEST_SUITE.labels
and label
not in filter_pull_request_tests._PHP_TEST_SUITE.labels
and label
not in filter_pull_request_tests._PYTHON_TEST_SUITE.labels
and label
not in filter_pull_request_tests._RUBY_TEST_SUITE.labels
],
)
def test_platform_filter(self):
self.test_filtering(
["vsprojects/foo.bar"],
[
label
for label in _LIST_OF_PLATFORM_LABELS
if label
not in filter_pull_request_tests._WINDOWS_TEST_SUITE.labels
],
)
def test_allowlist(self):
allowlist = filter_pull_request_tests._ALLOWLIST_DICT
files_that_should_trigger_all_tests = [
"src/core/foo.bar",
"some_file_not_on_the_white_list",
"BUILD",
"etc/roots.pem",
"Makefile",
"tools/foo",
]
for key in list(allowlist.keys()):
for file_name in files_that_should_trigger_all_tests:
self.assertFalse(re.match(key, file_name))
if __name__ == "__main__":
unittest.main(verbosity=2)
| 8,231
| 32.737705
| 106
|
py
|
grpc
|
grpc-master/tools/run_tests/sanity/check_bazel_workspace.py
|
#!/usr/bin/env python3
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import os
import re
import subprocess
import sys
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), "../../.."))
git_hash_pattern = re.compile("[0-9a-f]{40}")
# Parse git hashes from submodules
git_submodules = (
subprocess.check_output("git submodule", shell=True)
.decode()
.strip()
.split("\n")
)
git_submodule_hashes = {
re.search(git_hash_pattern, s).group() for s in git_submodules
}
_BAZEL_SKYLIB_DEP_NAME = "bazel_skylib"
_BAZEL_TOOLCHAINS_DEP_NAME = "bazel_toolchains"
_BAZEL_COMPDB_DEP_NAME = "bazel_compdb"
_TWISTED_TWISTED_DEP_NAME = "com_github_twisted_twisted"
_YAML_PYYAML_DEP_NAME = "com_github_yaml_pyyaml"
_TWISTED_INCREMENTAL_DEP_NAME = "com_github_twisted_incremental"
_ZOPEFOUNDATION_ZOPE_INTERFACE_DEP_NAME = (
"com_github_zopefoundation_zope_interface"
)
_TWISTED_CONSTANTLY_DEP_NAME = "com_github_twisted_constantly"
_GRPC_DEP_NAMES = [
"upb",
"boringssl",
"zlib",
"com_google_protobuf",
"com_google_googletest",
"rules_cc",
"com_github_google_benchmark",
"com_github_cares_cares",
"com_google_absl",
"com_google_fuzztest",
"io_opencensus_cpp",
"io_opentelemetry_cpp",
"envoy_api",
_BAZEL_SKYLIB_DEP_NAME,
_BAZEL_TOOLCHAINS_DEP_NAME,
_BAZEL_COMPDB_DEP_NAME,
_TWISTED_TWISTED_DEP_NAME,
_YAML_PYYAML_DEP_NAME,
_TWISTED_INCREMENTAL_DEP_NAME,
_ZOPEFOUNDATION_ZOPE_INTERFACE_DEP_NAME,
_TWISTED_CONSTANTLY_DEP_NAME,
"io_bazel_rules_go",
"build_bazel_rules_apple",
"build_bazel_apple_support",
"com_github_libuv_libuv",
"com_googlesource_code_re2",
"bazel_gazelle",
"opencensus_proto",
"com_envoyproxy_protoc_gen_validate",
"com_google_googleapis",
"com_google_libprotobuf_mutator",
"com_github_cncf_udpa",
]
_GRPC_BAZEL_ONLY_DEPS = [
"upb", # third_party/upb is checked in locally
"rules_cc",
"com_google_absl",
"com_google_fuzztest",
"io_opencensus_cpp",
"io_opentelemetry_cpp",
_BAZEL_SKYLIB_DEP_NAME,
_BAZEL_TOOLCHAINS_DEP_NAME,
_BAZEL_COMPDB_DEP_NAME,
_TWISTED_TWISTED_DEP_NAME,
_YAML_PYYAML_DEP_NAME,
_TWISTED_INCREMENTAL_DEP_NAME,
_ZOPEFOUNDATION_ZOPE_INTERFACE_DEP_NAME,
_TWISTED_CONSTANTLY_DEP_NAME,
"io_bazel_rules_go",
"build_bazel_rules_apple",
"build_bazel_apple_support",
"com_googlesource_code_re2",
"bazel_gazelle",
"opencensus_proto",
"com_envoyproxy_protoc_gen_validate",
"com_google_googleapis",
"com_google_libprotobuf_mutator",
]
class BazelEvalState(object):
def __init__(self, names_and_urls, overridden_name=None):
self.names_and_urls = names_and_urls
self.overridden_name = overridden_name
def http_archive(self, **args):
self.archive(**args)
def new_http_archive(self, **args):
self.archive(**args)
def bind(self, **args):
pass
def existing_rules(self):
if self.overridden_name:
return [self.overridden_name]
return []
def archive(self, **args):
assert self.names_and_urls.get(args["name"]) is None
if args["name"] in _GRPC_BAZEL_ONLY_DEPS:
self.names_and_urls[args["name"]] = "dont care"
return
url = args.get("url", None)
if not url:
# we will only be looking for git commit hashes, so concatenating
# the urls is fine.
url = " ".join(args["urls"])
self.names_and_urls[args["name"]] = url
def git_repository(self, **args):
assert self.names_and_urls.get(args["name"]) is None
if args["name"] in _GRPC_BAZEL_ONLY_DEPS:
self.names_and_urls[args["name"]] = "dont care"
return
self.names_and_urls[args["name"]] = args["remote"]
def grpc_python_deps(self):
pass
# Parse git hashes from bazel/grpc_deps.bzl {new_}http_archive rules
with open(os.path.join("bazel", "grpc_deps.bzl"), "r") as f:
names_and_urls = {}
eval_state = BazelEvalState(names_and_urls)
bazel_file = f.read()
# grpc_deps.bzl only defines 'grpc_deps' and 'grpc_test_only_deps', add these
# lines to call them.
bazel_file += "\ngrpc_deps()\n"
bazel_file += "\ngrpc_test_only_deps()\n"
build_rules = {
"native": eval_state,
"http_archive": lambda **args: eval_state.http_archive(**args),
"load": lambda a, b: None,
"git_repository": lambda **args: eval_state.git_repository(**args),
"grpc_python_deps": lambda: None,
}
exec((bazel_file), build_rules)
for name in _GRPC_DEP_NAMES:
assert name in list(names_and_urls.keys())
assert len(_GRPC_DEP_NAMES) == len(list(names_and_urls.keys()))
# There are some "bazel-only" deps that are exceptions to this sanity check,
# we don't require that there is a corresponding git module for these.
names_without_bazel_only_deps = list(names_and_urls.keys())
for dep_name in _GRPC_BAZEL_ONLY_DEPS:
names_without_bazel_only_deps.remove(dep_name)
archive_urls = [names_and_urls[name] for name in names_without_bazel_only_deps]
workspace_git_hashes = {
re.search(git_hash_pattern, url).group() for url in archive_urls
}
if len(workspace_git_hashes) == 0:
print("(Likely) parse error, did not find any bazel git dependencies.")
sys.exit(1)
# Validate the equivalence of the git submodules and Bazel git dependencies. The
# condition we impose is that there is a git submodule for every dependency in
# the workspace, but not necessarily conversely. E.g. Bloaty is a dependency
# not used by any of the targets built by Bazel.
if len(workspace_git_hashes - git_submodule_hashes) > 0:
print(
"Found discrepancies between git submodules and Bazel WORKSPACE"
" dependencies"
)
print(("workspace_git_hashes: %s" % workspace_git_hashes))
print(("git_submodule_hashes: %s" % git_submodule_hashes))
print(
"workspace_git_hashes - git_submodule_hashes: %s"
% (workspace_git_hashes - git_submodule_hashes)
)
sys.exit(1)
# Also check that we can override each dependency
for name in _GRPC_DEP_NAMES:
names_and_urls_with_overridden_name = {}
state = BazelEvalState(
names_and_urls_with_overridden_name, overridden_name=name
)
rules = {
"native": state,
"http_archive": lambda **args: state.http_archive(**args),
"load": lambda a, b: None,
"git_repository": lambda **args: state.git_repository(**args),
"grpc_python_deps": lambda *args, **kwargs: None,
}
exec((bazel_file), rules)
assert name not in list(names_and_urls_with_overridden_name.keys())
sys.exit(0)
| 7,211
| 31.486486
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/sanity/check_banned_filenames.py
|
#!/usr/bin/env python3
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
BANNED_FILENAMES = [
"BUILD.gn",
]
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), "../../.."))
bad = []
for filename in BANNED_FILENAMES:
if os.path.exists(filename):
bad.append(filename)
if bad:
for file in bad:
print("%s should not exist" % file)
sys.exit(1)
| 925
| 25.457143
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/sanity/check_tracer_sanity.py
|
#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), "../../.."))
errors = 0
tracers = []
pattern = re.compile('GRPC_TRACER_INITIALIZER\((true|false), "(.*)"\)')
for root, dirs, files in os.walk("src/core"):
for filename in files:
path = os.path.join(root, filename)
if os.path.splitext(path)[1] != ".c":
continue
with open(path) as f:
text = f.read()
for o in pattern.findall(text):
tracers.append(o[1])
with open("doc/environment_variables.md") as f:
text = f.read()
for t in tracers:
if t not in text:
print(
'ERROR: tracer "%s" is not mentioned in'
" doc/environment_variables.md" % t
)
errors += 1
assert errors == 0
| 1,387
| 27.916667
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/sanity/check_package_name.py
|
#!/usr/bin/env python3
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), "../../.."))
# Allowance for overrides for specific files
EXPECTED_NAMES = {
"src/proto/grpc/channelz": "channelz",
"src/proto/grpc/status": "status",
"src/proto/grpc/testing": "testing",
"src/proto/grpc/testing/duplicate": "duplicate",
"src/proto/grpc/lb/v1": "lb",
"src/proto/grpc/testing/xds": "xds",
"src/proto/grpc/testing/xds/v3": "xds_v3",
"src/proto/grpc/core": "core",
"src/proto/grpc/health/v1": "health",
"src/proto/grpc/reflection/v1alpha": "reflection",
"src/proto/grpc/reflection/v1": "reflection_v1",
}
errors = 0
for root, dirs, files in os.walk("."):
if root.startswith("./"):
root = root[len("./") :]
# don't check third party
if root.startswith("third_party/"):
continue
# only check BUILD files
if "BUILD" not in files:
continue
text = open("%s/BUILD" % root).read()
# find a grpc_package clause
pkg_start = text.find("grpc_package(")
if pkg_start == -1:
continue
# parse it, taking into account nested parens
pkg_end = pkg_start + len("grpc_package(")
level = 1
while level == 1:
if text[pkg_end] == ")":
level -= 1
elif text[pkg_end] == "(":
level += 1
pkg_end += 1
# it's a python statement, so evaluate it to pull out the name of the package
name = eval(
text[pkg_start:pkg_end], {"grpc_package": lambda name, **kwargs: name}
)
# the name should be the path within the source tree, excepting some special
# BUILD files (really we should normalize them too at some point)
# TODO(ctiller): normalize all package names
expected_name = EXPECTED_NAMES.get(root, root)
if name != expected_name:
print(
"%s/BUILD should define a grpc_package with name=%r, not %r"
% (root, expected_name, name)
)
errors += 1
if errors != 0:
sys.exit(1)
| 2,600
| 32.346154
| 81
|
py
|
grpc
|
grpc-master/tools/run_tests/sanity/check_deprecated_grpc++.py
|
#!/usr/bin/env python3
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), "../../.."))
expected_files = [
"include/grpc++/create_channel_posix.h",
"include/grpc++/server_builder.h",
"include/grpc++/resource_quota.h",
"include/grpc++/create_channel.h",
"include/grpc++/alarm.h",
"include/grpc++/server.h",
"include/grpc++/server_context.h",
"include/grpc++/client_context.h",
"include/grpc++/server_posix.h",
"include/grpc++/grpc++.h",
"include/grpc++/health_check_service_interface.h",
"include/grpc++/completion_queue.h",
"include/grpc++/channel.h",
"include/grpc++/support/sync_stream.h",
"include/grpc++/support/status.h",
"include/grpc++/support/config.h",
"include/grpc++/support/status_code_enum.h",
"include/grpc++/support/byte_buffer.h",
"include/grpc++/support/error_details.h",
"include/grpc++/support/async_unary_call.h",
"include/grpc++/support/channel_arguments.h",
"include/grpc++/support/async_stream.h",
"include/grpc++/support/slice.h",
"include/grpc++/support/stub_options.h",
"include/grpc++/support/string_ref.h",
"include/grpc++/support/time.h",
"include/grpc++/security/auth_metadata_processor.h",
"include/grpc++/security/credentials.h",
"include/grpc++/security/server_credentials.h",
"include/grpc++/security/auth_context.h",
"include/grpc++/impl/rpc_method.h",
"include/grpc++/impl/server_builder_option.h",
"include/grpc++/impl/call.h",
"include/grpc++/impl/service_type.h",
"include/grpc++/impl/grpc_library.h",
"include/grpc++/impl/client_unary_call.h",
"include/grpc++/impl/channel_argument_option.h",
"include/grpc++/impl/rpc_service_method.h",
"include/grpc++/impl/method_handler_impl.h",
"include/grpc++/impl/server_builder_plugin.h",
"include/grpc++/impl/server_initializer.h",
"include/grpc++/impl/serialization_traits.h",
"include/grpc++/impl/codegen/sync_stream.h",
"include/grpc++/impl/codegen/channel_interface.h",
"include/grpc++/impl/codegen/config_protobuf.h",
"include/grpc++/impl/codegen/status.h",
"include/grpc++/impl/codegen/config.h",
"include/grpc++/impl/codegen/status_code_enum.h",
"include/grpc++/impl/codegen/metadata_map.h",
"include/grpc++/impl/codegen/rpc_method.h",
"include/grpc++/impl/codegen/server_context.h",
"include/grpc++/impl/codegen/byte_buffer.h",
"include/grpc++/impl/codegen/async_unary_call.h",
"include/grpc++/impl/codegen/server_interface.h",
"include/grpc++/impl/codegen/call.h",
"include/grpc++/impl/codegen/client_context.h",
"include/grpc++/impl/codegen/service_type.h",
"include/grpc++/impl/codegen/async_stream.h",
"include/grpc++/impl/codegen/slice.h",
"include/grpc++/impl/codegen/client_unary_call.h",
"include/grpc++/impl/codegen/proto_utils.h",
"include/grpc++/impl/codegen/stub_options.h",
"include/grpc++/impl/codegen/rpc_service_method.h",
"include/grpc++/impl/codegen/method_handler_impl.h",
"include/grpc++/impl/codegen/string_ref.h",
"include/grpc++/impl/codegen/completion_queue_tag.h",
"include/grpc++/impl/codegen/call_hook.h",
"include/grpc++/impl/codegen/completion_queue.h",
"include/grpc++/impl/codegen/serialization_traits.h",
"include/grpc++/impl/codegen/create_auth_context.h",
"include/grpc++/impl/codegen/time.h",
"include/grpc++/impl/codegen/security/auth_context.h",
"include/grpc++/ext/health_check_service_server_builder_option.h",
"include/grpc++/ext/proto_server_reflection_plugin.h",
"include/grpc++/generic/async_generic_service.h",
"include/grpc++/generic/generic_stub.h",
"include/grpc++/test/mock_stream.h",
"include/grpc++/test/server_context_test_spouse.h",
]
file_template = """//
//
// Copyright 2018 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//
// DEPRECATED: The headers in include/grpc++ are deprecated. Please include the
// headers in include/grpcpp instead. This header exists only for backwards
// compatibility.
#ifndef GRPCXX_FILE_PATH_NAME_UPPER
#define GRPCXX_FILE_PATH_NAME_UPPER
#include <grpcpp/FILE_PATH_NAME_LOWER>
#endif // GRPCXX_FILE_PATH_NAME_UPPER
"""
errors = 0
path_files = []
for root, dirs, files in os.walk("include/grpc++"):
for filename in files:
path_file = os.path.join(root, filename)
path_files.append(path_file)
if path_files.sort() != expected_files.sort():
diff_plus = [file for file in path_files if file not in expected_files]
diff_minus = [file for file in expected_files if file not in path_files]
for file in diff_minus:
print(("- ", file))
for file in diff_plus:
print(("+ ", file))
errors += 1
if errors > 0:
sys.exit(errors)
for path_file in expected_files:
relative_path_file = path_file.split("/", 2)[2]
replace_lower = relative_path_file.replace("+", "p")
replace_upper = relative_path_file.replace("/", "_")
replace_upper = replace_upper.replace(".", "_")
replace_upper = replace_upper.upper().replace("+", "X")
expected_content = file_template.replace(
"FILE_PATH_NAME_LOWER", replace_lower
)
expected_content = expected_content.replace(
"FILE_PATH_NAME_UPPER", replace_upper
)
path_file_expected = path_file + ".expected"
with open(path_file_expected, "w") as fo:
fo.write(expected_content)
if 0 != os.system("diff %s %s" % (path_file_expected, path_file)):
print(("Difference found in file:", path_file))
errors += 1
os.remove(path_file_expected)
check_extensions = [".h", ".cc", ".c", ".m"]
for root, dirs, files in os.walk("src"):
for filename in files:
path_file = os.path.join(root, filename)
for ext in check_extensions:
if path_file.endswith(ext):
try:
with open(path_file, "r") as fi:
content = fi.read()
if "#include <grpc++/" in content:
print(
"Failed: invalid include of deprecated headers"
" in include/grpc++ in %s" % path_file
)
errors += 1
except IOError:
pass
sys.exit(errors)
| 7,480
| 36.592965
| 79
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/bin/run_test_client.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import signal
from absl import app
from absl import flags
from bin.lib import common
from framework import xds_flags
from framework import xds_k8s_flags
from framework.infrastructure import gcp
from framework.infrastructure import k8s
logger = logging.getLogger(__name__)
# Flags
_CMD = flags.DEFINE_enum(
"cmd", default="run", enum_values=["run", "cleanup"], help="Command"
)
_SECURE = flags.DEFINE_bool(
"secure", default=False, help="Run client in the secure mode"
)
_QPS = flags.DEFINE_integer("qps", default=25, help="Queries per second")
_PRINT_RESPONSE = flags.DEFINE_bool(
"print_response", default=False, help="Client prints responses"
)
_FOLLOW = flags.DEFINE_bool(
"follow",
default=False,
help=(
"Follow pod logs. Requires --collect_app_logs or"
" --debug_use_port_forwarding"
),
)
_CONFIG_MESH = flags.DEFINE_bool(
"config_mesh",
default=None,
help="Optional. Supplied to bootstrap generator to indicate AppNet mesh.",
)
_REUSE_NAMESPACE = flags.DEFINE_bool(
"reuse_namespace", default=True, help="Use existing namespace if exists"
)
_CLEANUP_NAMESPACE = flags.DEFINE_bool(
"cleanup_namespace",
default=False,
help="Delete namespace during resource cleanup",
)
flags.adopt_module_key_flags(xds_flags)
flags.adopt_module_key_flags(xds_k8s_flags)
# Running outside of a test suite, so require explicit resource_suffix.
flags.mark_flag_as_required(xds_flags.RESOURCE_SUFFIX.name)
@flags.multi_flags_validator(
(xds_flags.SERVER_XDS_PORT.name, _CMD.name),
message=(
"Run outside of a test suite, must provide"
" the exact port value (must be greater than 0)."
),
)
def _check_server_xds_port_flag(flags_dict):
if flags_dict[_CMD.name] == "cleanup":
return True
return flags_dict[xds_flags.SERVER_XDS_PORT.name] > 0
def _make_sigint_handler(client_runner: common.KubernetesClientRunner):
def sigint_handler(sig, frame):
del sig, frame
print("Caught Ctrl+C. Shutting down the logs")
client_runner.stop_pod_dependencies(log_drain_sec=3)
return sigint_handler
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
# Must be called before KubernetesApiManager or GcpApiManager init.
xds_flags.set_socket_default_timeout_from_flag()
# Log following and port forwarding.
should_follow_logs = _FOLLOW.value and xds_flags.COLLECT_APP_LOGS.value
should_port_forward = (
should_follow_logs and xds_k8s_flags.DEBUG_USE_PORT_FORWARDING.value
)
# Setup.
gcp_api_manager = gcp.api.GcpApiManager()
k8s_api_manager = k8s.KubernetesApiManager(xds_k8s_flags.KUBE_CONTEXT.value)
client_namespace = common.make_client_namespace(k8s_api_manager)
client_runner = common.make_client_runner(
client_namespace,
gcp_api_manager,
reuse_namespace=_REUSE_NAMESPACE.value,
secure=_SECURE.value,
port_forwarding=should_port_forward,
)
# Server target
server_xds_host = xds_flags.SERVER_XDS_HOST.value
server_xds_port = xds_flags.SERVER_XDS_PORT.value
if _CMD.value == "run":
logger.info("Run client, secure_mode=%s", _SECURE.value)
client_runner.run(
server_target=f"xds:///{server_xds_host}:{server_xds_port}",
qps=_QPS.value,
print_response=_PRINT_RESPONSE.value,
secure_mode=_SECURE.value,
config_mesh=_CONFIG_MESH.value,
log_to_stdout=_FOLLOW.value,
)
if should_follow_logs:
print("Following pod logs. Press Ctrl+C top stop")
signal.signal(signal.SIGINT, _make_sigint_handler(client_runner))
signal.pause()
elif _CMD.value == "cleanup":
logger.info("Cleanup client")
client_runner.cleanup(
force=True, force_namespace=_CLEANUP_NAMESPACE.value
)
if __name__ == "__main__":
app.run(main)
| 4,570
| 31.65
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/bin/run_ping_pong.py
|
# Copyright 2023 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import app
from absl import flags
from absl import logging
from bin.lib import common
from framework import xds_flags
from framework import xds_k8s_flags
from framework.helpers import grpc as helpers_grpc
import framework.helpers.highlighter
from framework.infrastructure import gcp
from framework.infrastructure import k8s
from framework.rpc import grpc_channelz
from framework.rpc import grpc_testing
from framework.test_app import client_app
from framework.test_app import server_app
# Flags
_SECURE = flags.DEFINE_bool(
"secure",
default=False,
help=(
"Set to True if the the client/server were started "
"with the PSM security enabled."
),
)
_NUM_RPCS = flags.DEFINE_integer(
"num_rpcs",
default=100,
lower_bound=1,
upper_bound=10_000,
help="The number of RPCs to check.",
)
flags.adopt_module_key_flags(xds_flags)
flags.adopt_module_key_flags(xds_k8s_flags)
# Running outside of a test suite, so require explicit resource_suffix.
flags.mark_flag_as_required(xds_flags.RESOURCE_SUFFIX.name)
flags.register_validator(
xds_flags.SERVER_XDS_PORT.name,
lambda val: val > 0,
message=(
"Run outside of a test suite, must provide"
" the exact port value (must be greater than 0)."
),
)
logger = logging.get_absl_logger()
# Type aliases
_Channel = grpc_channelz.Channel
_Socket = grpc_channelz.Socket
_ChannelState = grpc_channelz.ChannelState
_XdsTestServer = server_app.XdsTestServer
_XdsTestClient = client_app.XdsTestClient
LoadBalancerStatsResponse = grpc_testing.LoadBalancerStatsResponse
def get_client_rpc_stats(
test_client: _XdsTestClient, num_rpcs: int
) -> LoadBalancerStatsResponse:
lb_stats = test_client.get_load_balancer_stats(num_rpcs=num_rpcs)
hl = framework.helpers.highlighter.HighlighterYaml()
logger.info(
"[%s] Received LoadBalancerStatsResponse:\n%s",
test_client.hostname,
hl.highlight(helpers_grpc.lb_stats_pretty(lb_stats)),
)
return lb_stats
def run_ping_pong(test_client: _XdsTestClient, num_rpcs: int):
test_client.wait_for_active_server_channel()
lb_stats = get_client_rpc_stats(test_client, num_rpcs)
for backend, rpcs_count in lb_stats.rpcs_by_peer.items():
if int(rpcs_count) < 1:
raise AssertionError(
f"Backend {backend} did not receive a single RPC"
)
failed = int(lb_stats.num_failures)
if int(lb_stats.num_failures) > 0:
raise AssertionError(
f"Expected all RPCs to succeed: {failed} of {num_rpcs} failed"
)
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
# Must be called before KubernetesApiManager or GcpApiManager init.
xds_flags.set_socket_default_timeout_from_flag()
# Flags.
should_port_forward: bool = xds_k8s_flags.DEBUG_USE_PORT_FORWARDING.value
is_secure: bool = _SECURE.value
# Setup.
gcp_api_manager = gcp.api.GcpApiManager()
k8s_api_manager = k8s.KubernetesApiManager(xds_k8s_flags.KUBE_CONTEXT.value)
# Server.
server_namespace = common.make_server_namespace(k8s_api_manager)
server_runner = common.make_server_runner(
server_namespace,
gcp_api_manager,
port_forwarding=should_port_forward,
secure=is_secure,
)
# Find server pod.
server_pod: k8s.V1Pod = common.get_server_pod(
server_runner, xds_flags.SERVER_NAME.value
)
# Client
client_namespace = common.make_client_namespace(k8s_api_manager)
client_runner = common.make_client_runner(
client_namespace,
gcp_api_manager,
port_forwarding=should_port_forward,
secure=is_secure,
)
# Find client pod.
client_pod: k8s.V1Pod = common.get_client_pod(
client_runner, xds_flags.CLIENT_NAME.value
)
# Ensure port forwarding stopped.
common.register_graceful_exit(server_runner, client_runner)
# Create server app for the server pod.
test_server: _XdsTestServer = common.get_test_server_for_pod(
server_runner,
server_pod,
test_port=xds_flags.SERVER_PORT.value,
secure_mode=is_secure,
)
test_server.set_xds_address(
xds_flags.SERVER_XDS_HOST.value, xds_flags.SERVER_XDS_PORT.value
)
# Create client app for the client pod.
test_client: _XdsTestClient = common.get_test_client_for_pod(
client_runner, client_pod, server_target=test_server.xds_uri
)
with test_client, test_server:
run_ping_pong(test_client, _NUM_RPCS.value)
logger.info("SUCCESS!")
if __name__ == "__main__":
app.run(main)
| 5,250
| 30.443114
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/bin/run_test_server.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import signal
from absl import app
from absl import flags
from bin.lib import common
from framework import xds_flags
from framework import xds_k8s_flags
from framework.infrastructure import gcp
from framework.infrastructure import k8s
logger = logging.getLogger(__name__)
# Flags
_CMD = flags.DEFINE_enum(
"cmd", default="run", enum_values=["run", "cleanup"], help="Command"
)
_SECURE = flags.DEFINE_bool(
"secure", default=False, help="Run server in the secure mode"
)
_REUSE_NAMESPACE = flags.DEFINE_bool(
"reuse_namespace", default=True, help="Use existing namespace if exists"
)
_REUSE_SERVICE = flags.DEFINE_bool(
"reuse_service", default=False, help="Use existing service if exists"
)
_FOLLOW = flags.DEFINE_bool(
"follow", default=False, help="Follow pod logs. Requires --collect_app_logs"
)
_CLEANUP_NAMESPACE = flags.DEFINE_bool(
"cleanup_namespace",
default=False,
help="Delete namespace during resource cleanup",
)
flags.adopt_module_key_flags(xds_flags)
flags.adopt_module_key_flags(xds_k8s_flags)
# Running outside of a test suite, so require explicit resource_suffix.
flags.mark_flag_as_required("resource_suffix")
def _make_sigint_handler(server_runner: common.KubernetesServerRunner):
def sigint_handler(sig, frame):
del sig, frame
print("Caught Ctrl+C. Shutting down the logs")
server_runner.stop_pod_dependencies(log_drain_sec=3)
return sigint_handler
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
# Must be called before KubernetesApiManager or GcpApiManager init.
xds_flags.set_socket_default_timeout_from_flag()
should_follow_logs = _FOLLOW.value and xds_flags.COLLECT_APP_LOGS.value
should_port_forward = (
should_follow_logs and xds_k8s_flags.DEBUG_USE_PORT_FORWARDING.value
)
# Setup.
gcp_api_manager = gcp.api.GcpApiManager()
k8s_api_manager = k8s.KubernetesApiManager(xds_k8s_flags.KUBE_CONTEXT.value)
server_namespace = common.make_server_namespace(k8s_api_manager)
server_runner = common.make_server_runner(
server_namespace,
gcp_api_manager,
reuse_namespace=_REUSE_NAMESPACE.value,
reuse_service=_REUSE_SERVICE.value,
secure=_SECURE.value,
port_forwarding=should_port_forward,
)
if _CMD.value == "run":
logger.info("Run server, secure_mode=%s", _SECURE.value)
server_runner.run(
test_port=xds_flags.SERVER_PORT.value,
maintenance_port=xds_flags.SERVER_MAINTENANCE_PORT.value,
secure_mode=_SECURE.value,
log_to_stdout=_FOLLOW.value,
)
if should_follow_logs:
print("Following pod logs. Press Ctrl+C top stop")
signal.signal(signal.SIGINT, _make_sigint_handler(server_runner))
signal.pause()
elif _CMD.value == "cleanup":
logger.info("Cleanup server")
server_runner.cleanup(
force=True, force_namespace=_CLEANUP_NAMESPACE.value
)
if __name__ == "__main__":
app.run(main)
| 3,689
| 32.545455
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/bin/run_channelz.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Channelz debugging tool for xDS test client/server.
This is intended as a debugging / local development helper and not executed
as a part of interop test suites.
Typical usage examples:
# Show channel and server socket pair
python -m bin.run_channelz --flagfile=config/local-dev.cfg
# Evaluate setup for different security configurations
python -m bin.run_channelz --flagfile=config/local-dev.cfg --security=tls
python -m bin.run_channelz --flagfile=config/local-dev.cfg --security=mtls_error
# More information and usage options
python -m bin.run_channelz --helpfull
"""
import hashlib
from absl import app
from absl import flags
from absl import logging
from bin.lib import common
from framework import xds_flags
from framework import xds_k8s_flags
from framework.infrastructure import gcp
from framework.infrastructure import k8s
from framework.rpc import grpc_channelz
from framework.test_app import client_app
from framework.test_app import server_app
# Flags
_SECURITY = flags.DEFINE_enum(
"security",
default=None,
enum_values=[
"mtls",
"tls",
"plaintext",
"mtls_error",
"server_authz_error",
],
help="Show info for a security setup",
)
flags.adopt_module_key_flags(xds_flags)
flags.adopt_module_key_flags(xds_k8s_flags)
# Running outside of a test suite, so require explicit resource_suffix.
flags.mark_flag_as_required(xds_flags.RESOURCE_SUFFIX.name)
flags.register_validator(
xds_flags.SERVER_XDS_PORT.name,
lambda val: val > 0,
message=(
"Run outside of a test suite, must provide"
" the exact port value (must be greater than 0)."
),
)
logger = logging.get_absl_logger()
# Type aliases
_Channel = grpc_channelz.Channel
_Socket = grpc_channelz.Socket
_ChannelState = grpc_channelz.ChannelState
_XdsTestServer = server_app.XdsTestServer
_XdsTestClient = client_app.XdsTestClient
def debug_cert(cert):
if not cert:
return "<missing>"
sha1 = hashlib.sha1(cert)
return f"sha1={sha1.hexdigest()}, len={len(cert)}"
def debug_sock_tls(tls):
return (
f"local: {debug_cert(tls.local_certificate)}\n"
f"remote: {debug_cert(tls.remote_certificate)}"
)
def get_deployment_pods(k8s_ns, deployment_name):
deployment = k8s_ns.get_deployment(deployment_name)
return k8s_ns.list_deployment_pods(deployment)
def debug_security_setup_negative(test_client):
"""Debug negative cases: mTLS Error, Server AuthZ error
1) mTLS Error: Server expects client mTLS cert,
but client configured only for TLS.
2) AuthZ error: Client does not authorize server because of mismatched
SAN name.
"""
# Client side.
client_correct_setup = True
channel: _Channel = test_client.wait_for_server_channel_state(
state=_ChannelState.TRANSIENT_FAILURE
)
try:
subchannel, *subchannels = list(
test_client.channelz.list_channel_subchannels(channel)
)
except ValueError:
print(
"Client setup fail: subchannel not found. "
"Common causes: test client didn't connect to TD; "
"test client exhausted retries, and closed all subchannels."
)
return
# Client must have exactly one subchannel.
logger.debug("Found subchannel, %s", subchannel)
if subchannels:
client_correct_setup = False
print(f"Unexpected subchannels {subchannels}")
subchannel_state: _ChannelState = subchannel.data.state.state
if subchannel_state is not _ChannelState.TRANSIENT_FAILURE:
client_correct_setup = False
print(
"Subchannel expected to be in "
"TRANSIENT_FAILURE, same as its channel"
)
# Client subchannel must have no sockets.
sockets = list(test_client.channelz.list_subchannels_sockets(subchannel))
if sockets:
client_correct_setup = False
print(f"Unexpected subchannel sockets {sockets}")
# Results.
if client_correct_setup:
print(
"Client setup pass: the channel "
"to the server has exactly one subchannel "
"in TRANSIENT_FAILURE, and no sockets"
)
def debug_security_setup_positive(test_client, test_server):
"""Debug positive cases: mTLS, TLS, Plaintext."""
test_client.wait_for_active_server_channel()
client_sock: _Socket = test_client.get_active_server_channel_socket()
server_sock: _Socket = test_server.get_server_socket_matching_client(
client_sock
)
server_tls = server_sock.security.tls
client_tls = client_sock.security.tls
print(f"\nServer certs:\n{debug_sock_tls(server_tls)}")
print(f"\nClient certs:\n{debug_sock_tls(client_tls)}")
print()
if server_tls.local_certificate:
eq = server_tls.local_certificate == client_tls.remote_certificate
print(f"(TLS) Server local matches client remote: {eq}")
else:
print("(TLS) Not detected")
if server_tls.remote_certificate:
eq = server_tls.remote_certificate == client_tls.local_certificate
print(f"(mTLS) Server remote matches client local: {eq}")
else:
print("(mTLS) Not detected")
def debug_basic_setup(test_client, test_server):
"""Show channel and server socket pair"""
test_client.wait_for_active_server_channel()
client_sock: _Socket = test_client.get_active_server_channel_socket()
server_sock: _Socket = test_server.get_server_socket_matching_client(
client_sock
)
logger.debug("Client socket: %s\n", client_sock)
logger.debug("Matching server socket: %s\n", server_sock)
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
# Must be called before KubernetesApiManager or GcpApiManager init.
xds_flags.set_socket_default_timeout_from_flag()
# Flags.
should_port_forward: bool = xds_k8s_flags.DEBUG_USE_PORT_FORWARDING.value
is_secure: bool = bool(_SECURITY.value)
# Setup.
gcp_api_manager = gcp.api.GcpApiManager()
k8s_api_manager = k8s.KubernetesApiManager(xds_k8s_flags.KUBE_CONTEXT.value)
# Server.
server_namespace = common.make_server_namespace(k8s_api_manager)
server_runner = common.make_server_runner(
server_namespace,
gcp_api_manager,
port_forwarding=should_port_forward,
secure=is_secure,
)
# Find server pod.
server_pod: k8s.V1Pod = common.get_server_pod(
server_runner, xds_flags.SERVER_NAME.value
)
# Client
client_namespace = common.make_client_namespace(k8s_api_manager)
client_runner = common.make_client_runner(
client_namespace,
gcp_api_manager,
port_forwarding=should_port_forward,
secure=is_secure,
)
# Find client pod.
client_pod: k8s.V1Pod = common.get_client_pod(
client_runner, xds_flags.CLIENT_NAME.value
)
# Ensure port forwarding stopped.
common.register_graceful_exit(server_runner, client_runner)
# Create server app for the server pod.
test_server: _XdsTestServer = common.get_test_server_for_pod(
server_runner,
server_pod,
test_port=xds_flags.SERVER_PORT.value,
secure_mode=is_secure,
)
test_server.set_xds_address(
xds_flags.SERVER_XDS_HOST.value, xds_flags.SERVER_XDS_PORT.value
)
# Create client app for the client pod.
test_client: _XdsTestClient = common.get_test_client_for_pod(
client_runner, client_pod, server_target=test_server.xds_uri
)
with test_client, test_server:
if _SECURITY.value in ("mtls", "tls", "plaintext"):
debug_security_setup_positive(test_client, test_server)
elif _SECURITY.value in ("mtls_error", "server_authz_error"):
debug_security_setup_negative(test_client)
else:
debug_basic_setup(test_client, test_server)
logger.info("SUCCESS!")
if __name__ == "__main__":
app.run(main)
| 8,598
| 31.205993
| 84
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/bin/__init__.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 577
| 40.285714
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/bin/run_td_setup.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configure Traffic Director for different GRPC Proxyless.
This is intended as a debugging / local development helper and not executed
as a part of interop test suites.
Typical usage examples:
# Regular proxyless setup
python -m bin.run_td_setup --flagfile=config/local-dev.cfg
# Additional commands: cleanup, backend management, etc.
python -m bin.run_td_setup --flagfile=config/local-dev.cfg --cmd=cleanup
# PSM security setup options: mtls, tls, etc.
python -m bin.run_td_setup --flagfile=config/local-dev.cfg --security=mtls
# More information and usage options
python -m bin.run_td_setup --helpfull
"""
import logging
from absl import app
from absl import flags
from framework import xds_flags
from framework import xds_k8s_flags
from framework.helpers import rand
from framework.infrastructure import gcp
from framework.infrastructure import k8s
from framework.infrastructure import traffic_director
from framework.test_app.runners.k8s import k8s_xds_server_runner
logger = logging.getLogger(__name__)
# Flags
_CMD = flags.DEFINE_enum(
"cmd",
default="create",
enum_values=[
"cycle",
"create",
"cleanup",
"backends-add",
"backends-cleanup",
"unused-xds-port",
],
help="Command",
)
_SECURITY = flags.DEFINE_enum(
"security",
default=None,
enum_values=[
"mtls",
"tls",
"plaintext",
"mtls_error",
"server_authz_error",
],
help="Configure TD with security",
)
flags.adopt_module_key_flags(xds_flags)
flags.adopt_module_key_flags(xds_k8s_flags)
# Running outside of a test suite, so require explicit resource_suffix.
flags.mark_flag_as_required(xds_flags.RESOURCE_SUFFIX.name)
@flags.multi_flags_validator(
(xds_flags.SERVER_XDS_PORT.name, _CMD.name),
message=(
"Run outside of a test suite, must provide"
" the exact port value (must be greater than 0)."
),
)
def _check_server_xds_port_flag(flags_dict):
if flags_dict[_CMD.name] not in ("create", "cycle"):
return True
return flags_dict[xds_flags.SERVER_XDS_PORT.name] > 0
# Type aliases
_KubernetesServerRunner = k8s_xds_server_runner.KubernetesServerRunner
def main(
argv,
): # pylint: disable=too-many-locals,too-many-branches,too-many-statements
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
# Must be called before KubernetesApiManager or GcpApiManager init.
xds_flags.set_socket_default_timeout_from_flag()
command = _CMD.value
security_mode = _SECURITY.value
project: str = xds_flags.PROJECT.value
network: str = xds_flags.NETWORK.value
# Resource names.
resource_prefix: str = xds_flags.RESOURCE_PREFIX.value
resource_suffix: str = xds_flags.RESOURCE_SUFFIX.value
# Test server
server_name = xds_flags.SERVER_NAME.value
server_port = xds_flags.SERVER_PORT.value
server_maintenance_port = xds_flags.SERVER_MAINTENANCE_PORT.value
server_xds_host = xds_flags.SERVER_XDS_HOST.value
server_xds_port = xds_flags.SERVER_XDS_PORT.value
server_namespace = _KubernetesServerRunner.make_namespace_name(
resource_prefix, resource_suffix
)
gcp_api_manager = gcp.api.GcpApiManager()
if security_mode is None:
td = traffic_director.TrafficDirectorManager(
gcp_api_manager,
project=project,
network=network,
resource_prefix=resource_prefix,
resource_suffix=resource_suffix,
)
else:
td = traffic_director.TrafficDirectorSecureManager(
gcp_api_manager,
project=project,
network=network,
resource_prefix=resource_prefix,
resource_suffix=resource_suffix,
)
if server_maintenance_port is None:
server_maintenance_port = (
_KubernetesServerRunner.DEFAULT_SECURE_MODE_MAINTENANCE_PORT
)
try:
if command in ("create", "cycle"):
logger.info("Create mode")
if security_mode is None:
logger.info("No security")
td.setup_for_grpc(
server_xds_host,
server_xds_port,
health_check_port=server_maintenance_port,
)
elif security_mode == "mtls":
logger.info("Setting up mtls")
td.setup_for_grpc(
server_xds_host,
server_xds_port,
health_check_port=server_maintenance_port,
)
td.setup_server_security(
server_namespace=server_namespace,
server_name=server_name,
server_port=server_port,
tls=True,
mtls=True,
)
td.setup_client_security(
server_namespace=server_namespace,
server_name=server_name,
tls=True,
mtls=True,
)
elif security_mode == "tls":
logger.info("Setting up tls")
td.setup_for_grpc(
server_xds_host,
server_xds_port,
health_check_port=server_maintenance_port,
)
td.setup_server_security(
server_namespace=server_namespace,
server_name=server_name,
server_port=server_port,
tls=True,
mtls=False,
)
td.setup_client_security(
server_namespace=server_namespace,
server_name=server_name,
tls=True,
mtls=False,
)
elif security_mode == "plaintext":
logger.info("Setting up plaintext")
td.setup_for_grpc(
server_xds_host,
server_xds_port,
health_check_port=server_maintenance_port,
)
td.setup_server_security(
server_namespace=server_namespace,
server_name=server_name,
server_port=server_port,
tls=False,
mtls=False,
)
td.setup_client_security(
server_namespace=server_namespace,
server_name=server_name,
tls=False,
mtls=False,
)
elif security_mode == "mtls_error":
# Error case: server expects client mTLS cert,
# but client configured only for TLS
logger.info("Setting up mtls_error")
td.setup_for_grpc(
server_xds_host,
server_xds_port,
health_check_port=server_maintenance_port,
)
td.setup_server_security(
server_namespace=server_namespace,
server_name=server_name,
server_port=server_port,
tls=True,
mtls=True,
)
td.setup_client_security(
server_namespace=server_namespace,
server_name=server_name,
tls=True,
mtls=False,
)
elif security_mode == "server_authz_error":
# Error case: client does not authorize server
# because of mismatched SAN name.
logger.info("Setting up mtls_error")
td.setup_for_grpc(
server_xds_host,
server_xds_port,
health_check_port=server_maintenance_port,
)
# Regular TLS setup, but with client policy configured using
# intentionality incorrect server_namespace.
td.setup_server_security(
server_namespace=server_namespace,
server_name=server_name,
server_port=server_port,
tls=True,
mtls=False,
)
td.setup_client_security(
server_namespace=(
f"incorrect-namespace-{rand.rand_string()}"
),
server_name=server_name,
tls=True,
mtls=False,
)
logger.info("Works!")
except Exception: # noqa pylint: disable=broad-except
logger.exception("Got error during creation")
if command in ("cleanup", "cycle"):
logger.info("Cleaning up")
td.cleanup(force=True)
if command == "backends-add":
logger.info("Adding backends")
k8s_api_manager = k8s.KubernetesApiManager(
xds_k8s_flags.KUBE_CONTEXT.value
)
k8s_namespace = k8s.KubernetesNamespace(
k8s_api_manager, server_namespace
)
neg_name, neg_zones = k8s_namespace.get_service_neg(
server_name, server_port
)
td.load_backend_service()
td.backend_service_add_neg_backends(neg_name, neg_zones)
td.wait_for_backends_healthy_status()
elif command == "backends-cleanup":
td.load_backend_service()
td.backend_service_remove_all_backends()
elif command == "unused-xds-port":
try:
unused_xds_port = td.find_unused_forwarding_rule_port()
logger.info(
"Found unused forwarding rule port: %s", unused_xds_port
)
except Exception: # noqa pylint: disable=broad-except
logger.exception("Couldn't find unused forwarding rule port")
if __name__ == "__main__":
app.run(main)
| 10,587
| 33.045016
| 78
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/bin/lib/common.py
|
# Copyright 2023 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common functionality for bin/ python helpers."""
import atexit
import signal
import sys
from absl import logging
from framework import xds_flags
from framework import xds_k8s_flags
from framework.infrastructure import gcp
from framework.infrastructure import k8s
from framework.test_app import client_app
from framework.test_app import server_app
from framework.test_app.runners.k8s import k8s_xds_client_runner
from framework.test_app.runners.k8s import k8s_xds_server_runner
logger = logging.get_absl_logger()
# Type aliases
KubernetesClientRunner = k8s_xds_client_runner.KubernetesClientRunner
KubernetesServerRunner = k8s_xds_server_runner.KubernetesServerRunner
_XdsTestServer = server_app.XdsTestServer
_XdsTestClient = client_app.XdsTestClient
def make_client_namespace(
k8s_api_manager: k8s.KubernetesApiManager,
) -> k8s.KubernetesNamespace:
namespace_name: str = KubernetesClientRunner.make_namespace_name(
xds_flags.RESOURCE_PREFIX.value, xds_flags.RESOURCE_SUFFIX.value
)
return k8s.KubernetesNamespace(k8s_api_manager, namespace_name)
def make_client_runner(
namespace: k8s.KubernetesNamespace,
gcp_api_manager: gcp.api.GcpApiManager,
port_forwarding: bool = False,
reuse_namespace: bool = True,
secure: bool = False,
) -> KubernetesClientRunner:
# KubernetesClientRunner arguments.
runner_kwargs = dict(
deployment_name=xds_flags.CLIENT_NAME.value,
image_name=xds_k8s_flags.CLIENT_IMAGE.value,
td_bootstrap_image=xds_k8s_flags.TD_BOOTSTRAP_IMAGE.value,
gcp_project=xds_flags.PROJECT.value,
gcp_api_manager=gcp_api_manager,
gcp_service_account=xds_k8s_flags.GCP_SERVICE_ACCOUNT.value,
xds_server_uri=xds_flags.XDS_SERVER_URI.value,
network=xds_flags.NETWORK.value,
stats_port=xds_flags.CLIENT_PORT.value,
reuse_namespace=reuse_namespace,
debug_use_port_forwarding=port_forwarding,
)
if secure:
runner_kwargs.update(
deployment_template="client-secure.deployment.yaml"
)
return KubernetesClientRunner(namespace, **runner_kwargs)
def make_server_namespace(
k8s_api_manager: k8s.KubernetesApiManager,
) -> k8s.KubernetesNamespace:
namespace_name: str = KubernetesServerRunner.make_namespace_name(
xds_flags.RESOURCE_PREFIX.value, xds_flags.RESOURCE_SUFFIX.value
)
return k8s.KubernetesNamespace(k8s_api_manager, namespace_name)
def make_server_runner(
namespace: k8s.KubernetesNamespace,
gcp_api_manager: gcp.api.GcpApiManager,
port_forwarding: bool = False,
reuse_namespace: bool = True,
reuse_service: bool = False,
secure: bool = False,
) -> KubernetesServerRunner:
# KubernetesServerRunner arguments.
runner_kwargs = dict(
deployment_name=xds_flags.SERVER_NAME.value,
image_name=xds_k8s_flags.SERVER_IMAGE.value,
td_bootstrap_image=xds_k8s_flags.TD_BOOTSTRAP_IMAGE.value,
xds_server_uri=xds_flags.XDS_SERVER_URI.value,
gcp_project=xds_flags.PROJECT.value,
gcp_api_manager=gcp_api_manager,
gcp_service_account=xds_k8s_flags.GCP_SERVICE_ACCOUNT.value,
network=xds_flags.NETWORK.value,
reuse_namespace=reuse_namespace,
reuse_service=reuse_service,
debug_use_port_forwarding=port_forwarding,
)
if secure:
runner_kwargs["deployment_template"] = "server-secure.deployment.yaml"
return KubernetesServerRunner(namespace, **runner_kwargs)
def _ensure_atexit(signum, frame):
"""Needed to handle signals or atexit handler won't be called."""
del frame
# Pylint is wrong about "Module 'signal' has no 'Signals' member":
# https://docs.python.org/3/library/signal.html#signal.Signals
sig = signal.Signals(signum) # pylint: disable=no-member
logger.warning("Caught %r, initiating graceful shutdown...\n", sig)
sys.exit(1)
def _graceful_exit(
server_runner: KubernetesServerRunner, client_runner: KubernetesClientRunner
):
"""Stop port forwarding processes."""
client_runner.stop_pod_dependencies()
server_runner.stop_pod_dependencies()
def register_graceful_exit(
server_runner: KubernetesServerRunner, client_runner: KubernetesClientRunner
):
atexit.register(_graceful_exit, server_runner, client_runner)
for signum in (signal.SIGTERM, signal.SIGHUP, signal.SIGINT):
signal.signal(signum, _ensure_atexit)
def get_client_pod(
client_runner: KubernetesClientRunner, deployment_name: str
) -> k8s.V1Pod:
client_deployment: k8s.V1Deployment
client_deployment = client_runner.k8s_namespace.get_deployment(
deployment_name
)
client_pod_name: str = client_runner._wait_deployment_pod_count(
client_deployment
)[0]
return client_runner._wait_pod_started(client_pod_name)
def get_server_pod(
server_runner: KubernetesServerRunner, deployment_name: str
) -> k8s.V1Pod:
server_deployment: k8s.V1Deployment
server_deployment = server_runner.k8s_namespace.get_deployment(
deployment_name
)
server_pod_name: str = server_runner._wait_deployment_pod_count(
server_deployment
)[0]
return server_runner._wait_pod_started(server_pod_name)
def get_test_server_for_pod(
server_runner: KubernetesServerRunner, server_pod: k8s.V1Pod, **kwargs
) -> _XdsTestServer:
return server_runner._xds_test_server_for_pod(server_pod, **kwargs)
def get_test_client_for_pod(
client_runner: KubernetesClientRunner, client_pod: k8s.V1Pod, **kwargs
) -> _XdsTestClient:
return client_runner._xds_test_client_for_pod(client_pod, **kwargs)
| 6,223
| 33.966292
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/bin/lib/__init__.py
|
# Copyright 2023 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 577
| 40.285714
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/bin/cleanup/namespace.py
|
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Clean up GKE namespaces leaked by the tests."""
from absl import app
from bin.cleanup import cleanup
from framework import xds_flags
from framework import xds_k8s_flags
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
cleanup.load_keep_config()
# Must be called before KubernetesApiManager or GcpApiManager init.
xds_flags.set_socket_default_timeout_from_flag()
project: str = xds_flags.PROJECT.value
network: str = xds_flags.NETWORK.value
gcp_service_account: str = xds_k8s_flags.GCP_SERVICE_ACCOUNT.value
dry_run: bool = cleanup.DRY_RUN.value
cleanup.find_and_remove_leaked_k8s_resources(
dry_run, project, network, gcp_service_account
)
if __name__ == "__main__":
app.run(main)
| 1,372
| 30.930233
| 74
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.