index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
44,073,506
|
kent1201/Master-thesis
|
refs/heads/master
|
/Loss/joint_Dloss.py
|
import math
import torch
import torch.nn as nn
# from Loss.soft_dtw import SoftDTW
class JointDloss(nn.Module):
def __init__(self, uloss_func):
super(JointDloss, self).__init__()
self.gamma = 1
self.uloss_func = uloss_func
def forward(self, Y_real, Y_fake, Y_fake_e):
real_loss, fake_loss, fake_loss_e = 0.0, 0.0, 0.0
lossD = 0.0
if self.uloss_func == 'wgan':
real_loss = Y_real.mean()
fake_loss = Y_fake.mean()
fake_loss_e = Y_fake_e.mean()
lossD = 0.5 * (fake_loss + fake_loss_e) - real_loss
elif self.uloss_func == 'hinge':
# label smoothing : 1.0 -> 0.9 (to avoid discriminator become overconfident)
d_loss_real = torch.nn.ReLU()(1.0 - Y_real).mean()
d_loss_fake = torch.nn.ReLU()(1.0 + Y_fake).mean()
d_loss_fake_e = torch.nn.ReLU()(1.0 + Y_fake_e).mean()
lossD = d_loss_real + d_loss_fake + 0.1 * d_loss_fake_e
else:
d_loss_real = torch.nn.functional.binary_cross_entropy_with_logits(Y_real, torch.ones_like(Y_real))
d_loss_fake = torch.nn.functional.binary_cross_entropy_with_logits(Y_fake, torch.zeros_like(Y_fake))
d_loss_fake_e = torch.nn.functional.binary_cross_entropy_with_logits(Y_fake_e, torch.zeros_like(Y_fake_e))
lossD = d_loss_real + d_loss_fake + 0.1 * d_loss_fake_e
return lossD
if __name__ == '__main__':
Y_real = torch.randn(32, 82, 1)
Y_fake = torch.randn(32, 82, 1)
Y_fake_e = torch.randn(32, 82, 1)
criterion = JointDloss()
loss = criterion(Y_real, Y_fake, Y_fake_e)
print(loss)
|
{"/train.py": ["/Network/supervisor.py", "/Network/generator.py", "/Timedataset.py", "/Loss/embedder_loss.py", "/Loss/supervised_loss.py", "/Loss/joint_Gloss.py", "/Loss/joint_Dloss.py", "/utils.py"], "/Time_generate_data.py": ["/Timedataset.py", "/utils.py", "/dataset_preprocess.py"], "/dataset.py": ["/dataset_preprocess.py"], "/Timedataset.py": ["/dataset_preprocess.py"], "/utils.py": ["/dataset.py"], "/Timetest-tf.py": ["/dataset_preprocess.py"], "/Timetest.py": ["/Timedataset.py", "/Network/simple_predictor.py", "/utils.py"], "/train_c_rnn_gan.py": ["/Timedataset.py", "/utils.py"], "/c_rnn_gan_generate_data.py": ["/Timedataset.py", "/utils.py", "/dataset_preprocess.py"], "/Network/generator.py": ["/Network/Self_Attention/utils.py"]}
|
44,073,507
|
kent1201/Master-thesis
|
refs/heads/master
|
/train.py
|
import math
import configparser
import os
from datetime import date
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import MultiStepLR
import numpy as np
import itertools
import random
from tqdm import tqdm, trange
import matplotlib.pyplot as plt
from Network.embedder import Embedder
from Network.recovery import Recovery
from Network.supervisor import Supervisor
from Network.generator import Generator
from Network.discriminator import Discriminator
from Timedataset import TimeSeriesDataset
from Loss.supervised_loss import SupervisedLoss
from Loss.joint_Gloss import JointGloss
from Loss.joint_Dloss import JointDloss
from utils import random_generator, _gradient_penalty, add_noise
# 取得設定檔
config = configparser.ConfigParser()
config.read('Configure.ini', encoding="utf-8")
# torch.backends.cudnn.deterministic = True
# 固定演算法進行加速
# torch.backends.cudnn.benchmark = True
# 防止 specified launch error: 強行統一至同一GPU
# os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
# 取得設定檔參數
# gpu-used
CUDA_DEVICES = torch.device("cuda:"+config.get('default',
'cuda_device_number') if torch.cuda.is_available() else "cpu")
dataset_dir = config.get('train', 'Dataset_path')
stage1_epochs = config.getint('train', 'stage1_epochs')
stage2_epochs = config.getint('train', 'stage2_epochs')
stage3_epochs = config.getint('train', 'stage3_epochs')
batch_size = config.getint('train', 'batch_size')
seq_len = config.getint('train', 'seq_len')
n_features = config.getint('train', 'n_features')
hidden_size = config.getint('train', 'hidden_size')
num_layers = config.getint('train', 'num_layers')
learning_rate1 = config.getfloat('train', 'learning_rate1')
learning_rate2 = config.getfloat('train', 'learning_rate2')
learning_rate3 = config.getfloat('train', 'learning_rate3')
dis_func = config.get('train', 'dis_func')
uloss_func = config.get('train', 'uloss_func')
embedder_name = config.get('default', 'embedder_name')
recovery_name = config.get('default', 'recovery_name')
generator_name = config.get('default', 'generator_name')
supervisor_name = config.get('default', 'supervisor_name')
discriminator_name = config.get('default', 'discriminator_name')
module_name = config.get('default', 'module_name')
PADDING_VALUE = config.getfloat('default', 'padding_value')
# save model path
today = date.today()
save_time = today.strftime("%d_%m_%Y")
output_dir = config.get('train', 'model_path') + '/' + save_time + \
'/' + config.get('train', 'classification_dir') + '/'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# 1. Embedding network training: 預先訓練 data autoencoder
def train_stage1(data_loader, embedder, recovery):
# Loss
criterion = SupervisedLoss(dis_func="MSE")
# model
embedder.train()
recovery.train()
# Optimizer
optimizer = torch.optim.Adam(
[{'params': embedder.parameters()},
{'params': recovery.parameters()}],
lr=learning_rate1
)
# 設定 learning rate 隨 epoch 衰減率
idx = np.round(np.linspace(0, stage1_epochs, 10)).astype(int)
idx = idx[1:-1]
# idx = np.insert(idx, 0, 15)
scheduler = MultiStepLR(optimizer, milestones=idx, gamma=0.9)
print('Start Embedding Network Training')
# trange: 以進度條顯示訓練進度
logger = trange(stage1_epochs, desc=f"Epoch: 0, Loss: 0")
for epoch in logger:
training_loss = 0.0
for _, (X_mb, T_mb) in enumerate(data_loader):
X = X_mb.to(CUDA_DEVICES)
T = T_mb.to(CUDA_DEVICES)
optimizer.zero_grad()
H = embedder(X, T)
outputs = recovery(H, T)
E_loss_T0, E_loss0 = criterion(outputs, X)
E_loss0.backward()
optimizer.step()
training_loss = E_loss_T0.item()
logger.set_description(f"Epoch: {epoch}, Loss: {training_loss:.4f}")
scheduler.step()
print('Finish Embedding Network Training')
# 2. Training with supervised loss only
def train_stage2(data_loader, embedder, supervisor, z_recovery):
# Loss
criterion = SupervisedLoss(dis_func=dis_func)
# model
embedder.train()
supervisor.train()
z_recovery.train()
# Optimizer
optimizer = torch.optim.Adam(
[{'params': supervisor.parameters()},
{'params': z_recovery.parameters()}],
lr=learning_rate2
)
# 設定 learning rate 隨 epoch 衰減率
idx = np.round(np.linspace(0, stage2_epochs-1, 5)).astype(int)
idx = idx[1:-1]
scheduler = MultiStepLR(optimizer, milestones=idx, gamma=0.8)
print('Start Training with Supervised Loss Only')
# trange: 以進度條顯示訓練進度
logger = trange(stage2_epochs, desc=f"Epoch: 0, Loss: 0")
for epoch in logger:
training_loss = 0.0
for _, (X_mb, T_mb) in enumerate(data_loader):
X = X_mb.to(CUDA_DEVICES)
T = T_mb.to(CUDA_DEVICES)
optimizer.zero_grad()
H = embedder(X, T)
H_hat_supervise = supervisor(H, T)
# Teacher forcing next output
loss, _ = criterion(H_hat_supervise[:, :-1, :], H[:, 1:, :])
loss.backward()
optimizer.step()
training_loss = np.sqrt(loss.item())
logger.set_description(f"Epoch: {epoch}, Loss: {training_loss:.4f}")
scheduler.step()
print('Finish Training with Supervised Loss Only')
# 3. Joint Training
def train_stage3(data_loader, embedder, recovery, z_embedder, z_recovery, supervisor, discriminator, z_discriminator):
print('Start Joint Training')
# loss
# data autoencoder loss
E_loss_criterion = SupervisedLoss(dis_func=dis_func)
# noise autoencoder loss
GZ_loss_crition = SupervisedLoss(dis_func=dis_func)
# Supervised loss
GS_loss_criterion = SupervisedLoss(dis_func=dis_func)
# data discriminator loss
DD_loss_criterion = JointDloss(uloss_func=uloss_func)
# data generator loss
DG_loss_criterion = JointGloss(uloss_func=uloss_func)
# model
embedder.train()
recovery.train()
z_embedder.train()
z_recovery.train()
supervisor.train()
discriminator.train()
z_discriminator.train()
# Optimizer
optimizerE = torch.optim.Adam(
[{'params': embedder.parameters()},
{'params': recovery.parameters()}],
lr=learning_rate3
)
optimizerZE = torch.optim.Adam(
[{'params': z_embedder.parameters()},
{'params': z_recovery.parameters()}],
lr=learning_rate3
)
optimizerZG = torch.optim.Adam(
params=z_embedder.parameters(),
lr=learning_rate3
)
optimizerZD = torch.optim.Adam(
params= z_discriminator.parameters(),
lr=(learning_rate3*3.0)
)
optimizerGS = torch.optim.Adam(
[{'params': supervisor.parameters()},
{'params': z_recovery.parameters()}],
lr=learning_rate3
)
optimizerG = torch.optim.Adam(
[{'params': supervisor.parameters()},
{'params': z_recovery.parameters()},
{'params': recovery.parameters()}],
lr=learning_rate3
)
optimizerD = torch.optim.Adam(
params=discriminator.parameters(), lr=(learning_rate3*3.0))
# 設定 learning rate 隨 epoch 衰減率
# learning rate scheduler
idx = np.round(np.linspace(0, stage3_epochs-1, 10)).astype(int)
idx = idx[1:-1]
schedulerE = MultiStepLR(optimizerE, milestones=idx, gamma=0.8)
schedulerZE = MultiStepLR(optimizerZE, milestones=idx, gamma=0.8)
schedulerZG = MultiStepLR(optimizerZG, milestones=idx, gamma=0.6)
schedulerZD = MultiStepLR(optimizerZD, milestones=idx, gamma=0.6)
schedulerGS = MultiStepLR(optimizerGS, milestones=idx, gamma=0.6)
schedulerD = MultiStepLR(optimizerD, milestones=idx, gamma=0.6)
schedulerG = MultiStepLR(optimizerG, milestones=idx, gamma=0.6)
# automatic mixed precision (AMP) 節省空間
scalerE = torch.cuda.amp.GradScaler()
scalerZE = torch.cuda.amp.GradScaler()
scalerZG= torch.cuda.amp.GradScaler()
scalerZD = torch.cuda.amp.GradScaler()
scalerGS = torch.cuda.amp.GradScaler()
scalerD = torch.cuda.amp.GradScaler()
scalerG = torch.cuda.amp.GradScaler()
Noise_wasserstein_dis = []
Data_wasserstein_dis = []
training_loss_D = []
training_loss_G = []
training_loss_GZ = []
training_loss_DZ = []
# trange: 以進度條顯示訓練進度
if uloss_func == 'wgan':
logger = trange(
stage3_epochs, desc=f"Epoch: 0, Noise wasserstein dis: 0, Data wasserstein dis: 0")
else:
logger = trange(
stage3_epochs, desc=f"Epoch: 0, Noise G: 0, Noise D: 0, Data G: 0, Data D: 0")
for epoch in logger:
for X_mb, T_mb in data_loader:
# Get data x
X = X_mb.to(CUDA_DEVICES)
T = T_mb.to(CUDA_DEVICES)
# Generate noise z
z_batch_size, z_seq_len, z_dim = X.shape
Z = random_generator(z_batch_size, z_seq_len, z_dim, T_mb)
Z = Z.to(CUDA_DEVICES)
## Train data autoencoder
optimizerE.zero_grad()
with torch.cuda.amp.autocast():
H = embedder(X, T)
X_tilde = recovery(H, T)
E_loss_T0, E_loss0 = E_loss_criterion(X_tilde, X)
scalerE.scale(E_loss0).backward() # E_loss0.backward()
scalerE.step(optimizerE) # optimizerE.step()
scalerE.update()
## Train AAE
for p in z_discriminator.parameters():
p.requires_grad = False
optimizerZE.zero_grad()
## Train noise autoencoder
with torch.cuda.amp.autocast():
H = embedder(X, T)
Z_hat = z_embedder(H, T)
H_hat = z_recovery(Z_hat, T)
_, ze_loss = GZ_loss_crition(H_hat, H)
scalerZE.scale(ze_loss).backward() # ze_loss.backward()
scalerZE.step(optimizerZE) # optimizerZE.step()
scalerZE.update()
## Train noise discriminator
optimizerZD.zero_grad()
for p in z_discriminator.parameters(): # reset requires_grad
p.requires_grad = True
with torch.cuda.amp.autocast():
d_out_real = z_discriminator(Z, T)
Z_hat = z_embedder(H.detach(), T)
d_out_fake = z_discriminator(Z_hat, T)
if uloss_func == 'wgan':
real_loss = torch.mean(d_out_real)
fake_loss = torch.mean(d_out_fake)
noise_wasserstein_dis = real_loss - fake_loss
with torch.backends.cudnn.flags(enabled=False):
lossD_gp = _gradient_penalty(CUDA_DEVICES, z_discriminator, Z, Z_hat, T)
d_loss = fake_loss - real_loss + 2 * lossD_gp
elif uloss_func == 'hinge':
# label smoothing : 1.0 -> 0.9 (to avoid discriminator become overconfident)
d_loss_real = torch.nn.ReLU()(1.0 - d_out_real).mean()
d_loss_fake = torch.nn.ReLU()(1.0 + d_out_fake).mean()
d_loss = d_loss_real + d_loss_fake
else:
d_loss_real = torch.nn.functional.binary_cross_entropy_with_logits(d_out_real, torch.ones_like(d_out_real))
d_loss_fake = torch.nn.functional.binary_cross_entropy_with_logits(d_out_fake, torch.zeros_like(d_out_fake))
d_loss = d_loss_real + d_loss_fake
if d_loss > 0.15:
scalerZD.scale(d_loss).backward() # d_loss.backward()
scalerZD.step(optimizerZD) # optimizerZD.step()
scalerZD.update()
## Train noise generator
optimizerZG.zero_grad()
for p in z_discriminator.parameters(): # reset requires_grad
p.requires_grad = False
with torch.cuda.amp.autocast():
Z_hat = z_embedder(H.detach(), T)
d_out_fake = z_discriminator(Z_hat, T)
if uloss_func == 'wgan' or uloss_func == 'hinge':
g_loss = -torch.mean(d_out_fake)
else:
g_loss = torch.nn.functional.binary_cross_entropy_with_logits(d_out_fake, torch.ones_like(d_out_fake))
scalerZG.scale(g_loss).backward() # g_loss.backward()
scalerZG.step(optimizerZG) # optimizerZG.step()
scalerZG.update()
## Train supervised loss
optimizerGS.zero_grad()
with torch.cuda.amp.autocast():
H = embedder(X, T)
H_hat_supervise = supervisor(H, T)
# Teacher forcing next output
_, GS_loss = GS_loss_criterion(H_hat_supervise[:, :-1, :], H[:, 1:, :])
# 可選擇現在 updata or 在下一階段 update
scalerGS.scale(GS_loss).backward() # GS_loss.backward()
scalerGS.step(optimizerGS) # optimizerGS.step()
scalerGS.update()
## Train data generator
optimizerG.zero_grad()
for p in discriminator.parameters(): # reset requires_grad
p.requires_grad = False
with torch.cuda.amp.autocast():
H = embedder(X, T)
H_hat_supervise = supervisor(H, T)
_, GS_loss = GS_loss_criterion(H_hat_supervise[:, :-1, :], H[:, 1:, :])
E_hat = z_recovery(Z, T)
H_hat = supervisor(E_hat, T)
# Synthetic data generated
X_hat = recovery(H_hat, T)
X_hat_e = recovery(E_hat, T)
# Adversarial loss
Y_fake = discriminator(X_hat, T)
Y_fake_e = discriminator(X_hat_e, T)
lossG = DG_loss_criterion(Y_fake, Y_fake_e)
# Add supervised loss
lossG = lossG + GS_loss
scalerG.scale(lossG).backward() # lossG.backward()
scalerG.step(optimizerG) # optimizerG.step()
scalerG.update()
## Data Discriminator training
for p in discriminator.parameters(): # reset requires_grad
p.requires_grad = True # they are set to False below in netG update
optimizerD.zero_grad()
with torch.cuda.amp.autocast():
H = embedder(X, T)
H_hat_supervise = supervisor(H, T)
E_hat = z_recovery(Z, T)
H_hat = supervisor(E_hat, T)
X_hat = recovery(H_hat, T)
X_hat_e = recovery(E_hat, T)
Y_real = discriminator(X, T)
Y_fake = discriminator(X_hat, T) # Output of supervisor
Y_fake_e = discriminator(X_hat_e, T) # Output of generator
lossD = 0.0
_, GS_loss = GS_loss_criterion(H_hat_supervise[:, :-1, :], H[:, 1:, :])
lossD = DD_loss_criterion(Y_real, Y_fake, Y_fake_e)
if uloss_func == 'wgan':
data_wasserstein_dis = Y_real.mean() - 0.5 * (Y_fake.mean() + Y_fake_e.mean())
with torch.backends.cudnn.flags(enabled=False):
lossD_gp = _gradient_penalty(CUDA_DEVICES, discriminator, X, X_hat, T)
lossD = lossD + 10 * lossD_gp
# Add supervised loss
lossD = lossD + GS_loss
# only when the generator does not work well
if lossD > 0.15:
scalerD.scale(lossD).backward() # lossD.backward()
scalerD.step(optimizerD) # optimizerD.step()
scalerD.update()
if uloss_func == 'wgan':
Noise_wasserstein_dis.append(noise_wasserstein_dis.item())
Data_wasserstein_dis.append(data_wasserstein_dis.item())
logger.set_description(
f"Epoch: {epoch}, Noise wasserstein dis: {noise_wasserstein_dis.item():.4f}, Data wasserstein dis: {data_wasserstein_dis.item():.4f}"
)
else:
training_loss_D.append(lossD.item())
training_loss_G.append(lossG.item())
training_loss_GZ.append(g_loss.item())
training_loss_DZ.append(d_loss.item())
logger.set_description(
f"Epoch: {epoch}, Noise G: {g_loss.item():.4f}, Noise D: {d_loss.item():.4f}, Data G: {lossG.item():.4f}, Data D: {lossD.item():.4f}"
)
schedulerE.step()
schedulerZE.step()
schedulerZG.step()
schedulerZD.step()
schedulerGS.step()
schedulerD.step()
schedulerG.step()
# Save multiple checkpoints
if epoch % 100 == 0:
torch.save(recovery.state_dict(), f'{output_dir+str(epoch)+"_"+recovery_name}')
torch.save(z_recovery.state_dict(), f'{output_dir+str(epoch)+"_"+generator_name}')
torch.save(supervisor.state_dict(), f'{output_dir+str(epoch)+"_"+supervisor_name}')
torch.save(discriminator.state_dict(), f'{output_dir+str(epoch)+"_discriminator.pth"}')
torch.save(z_discriminator.state_dict(), f'{output_dir+str(epoch)+"_Zdiscriminator.pth"}')
torch.save(embedder.state_dict(), f'{output_dir+str(epoch)+"_embedder.pth"}')
torch.save(z_embedder.state_dict(), f'{output_dir+str(epoch)+"_Zembedder.pth"}')
# 畫圖驗證 loss
if uloss_func == 'wgan':
Noise_wasserstein_dis
plt.plot(Noise_wasserstein_dis, color='blue', label="Noise W dis")
plt.plot(Data_wasserstein_dis, color='green', label="Data W dis")
plt.title("WGAN Training loss")
plt.xlabel('Epoch')
plt.legend()
plt.savefig('./Loss_curve/training_loss_curve.png', bbox_inches='tight')
plt.close()
else:
plt.plot(training_loss_D, color='red', label="Data D")
plt.plot(training_loss_G, color='green', label="Data G")
plt.plot(training_loss_GZ, color='blue', label="Noise G")
plt.plot(training_loss_DZ, color='orange', label="Noise D")
plt.title("Hinge Training loss")
plt.xlabel('Epoch')
plt.legend()
plt.savefig('./Loss_curve/training_loss_curve.png', bbox_inches='tight')
plt.close()
print('Finish Joint Training')
if __name__ == '__main__':
# Parameters
print("CUDA DEVICE: {}".format(CUDA_DEVICES))
print("[train] module: {}".format(module_name))
print("[train] action: {}".format(config.get(
'train', 'classification_dir').split('_')[0]))
print("[train] seq_len: {}".format(seq_len))
print("[train] n_features: {}".format(n_features))
print("[train] hidden size: {}".format(hidden_size))
print("[train] num_layers: {}".format(num_layers))
print("[train] num_epochs: {}".format(stage3_epochs))
print("[train] batch_size: {}".format(batch_size))
print("[train] distance function: {}".format(dis_func))
print("[train] adversarial loss function: {}".format(uloss_func))
# Dataset
Data_set = TimeSeriesDataset(
root_dir=dataset_dir, seq_len=seq_len, transform=None)
Data_loader = DataLoader(
dataset=Data_set, batch_size=batch_size, shuffle=False, num_workers=0)
Max_Seq_len = Data_set.max_seq_len
# models
embedder = Embedder(
module=module_name,
mode='data',
time_stamp=seq_len,
input_size=n_features,
hidden_dim=hidden_size,
output_dim=hidden_size,
num_layers=num_layers,
activate_function=nn.Tanh(),
padding_value=PADDING_VALUE,
max_seq_len=Max_Seq_len
)
recovery = Recovery(
module=module_name,
mode='data',
time_stamp=seq_len,
input_size=hidden_size,
hidden_dim=hidden_size,
output_dim=n_features,
num_layers=num_layers,
activate_function=nn.Tanh(),
padding_value=PADDING_VALUE,
max_seq_len=Max_Seq_len
)
z_embedder = Embedder(
module=module_name,
mode='noise',
time_stamp=seq_len,
input_size=hidden_size,
hidden_dim=hidden_size,
output_dim=n_features,
num_layers=num_layers,
activate_function=nn.Tanh(),
padding_value=PADDING_VALUE,
max_seq_len=Max_Seq_len
)
z_recovery = Recovery(
module=module_name,
mode='noise',
time_stamp=seq_len,
input_size=n_features,
hidden_dim=hidden_size,
output_dim=hidden_size,
num_layers=num_layers,
activate_function=nn.Tanh(),
padding_value=PADDING_VALUE,
max_seq_len=Max_Seq_len
)
supervisor = Supervisor(
module=module_name,
time_stamp=seq_len,
input_size=hidden_size,
hidden_dim=hidden_size,
output_dim=hidden_size,
# [Supervisor] num_layers must less(-1) than other component, embedder
num_layers=num_layers - 1,
activate_function=nn.Tanh(),
padding_value=PADDING_VALUE,
max_seq_len=Max_Seq_len
)
discriminator = Discriminator(
module=module_name,
time_stamp=seq_len,
input_size=n_features,
hidden_dim=hidden_size,
output_dim=1,
num_layers=num_layers,
activate_function=nn.Tanh(),
padding_value=PADDING_VALUE,
max_seq_len=Max_Seq_len
)
z_discriminator = Discriminator(
module=module_name,
time_stamp=seq_len,
input_size=n_features,
hidden_dim=hidden_size,
output_dim=1,
num_layers=num_layers,
activate_function=nn.Tanh(),
padding_value=PADDING_VALUE,
max_seq_len=Max_Seq_len
)
# embedder.load_state_dict(torch.load('/home/kent1201/Documents/Master-thesis/models/26_05_2021/action1_gru_MSE_hinge_6000_64_82_27_108_5/1000_embedder.pth'))
# recovery.load_state_dict(torch.load('/home/kent1201/Documents/Master-thesis/models/26_05_2021/action1_gru_MSE_hinge_6000_64_82_27_108_5/1000_recovery.pth'))
# z_embedder.load_state_dict(torch.load('/home/kent1201/Documents/Master-thesis/models/26_05_2021/action1_gru_MSE_hinge_6000_64_82_27_108_5/1000_Zembedder.pth'))
# z_recovery.load_state_dict(torch.load('/home/kent1201/Documents/Master-thesis/models/26_05_2021/action1_gru_MSE_hinge_6000_64_82_27_108_5/1000_generator.pth'))
# supervisor.load_state_dict(torch.load('/home/kent1201/Documents/Master-thesis/models/26_05_2021/action1_gru_MSE_hinge_6000_64_82_27_108_5/1000_supervisor.pth'))
# discriminator.load_state_dict(torch.load('/home/kent1201/Documents/Master-thesis/models/26_05_2021/action1_gru_MSE_hinge_6000_64_82_27_108_5/1000_discriminator.pth'))
# z_discriminator.load_state_dict(torch.load('/home/kent1201/Documents/Master-thesis/models/26_05_2021/action1_gru_MSE_hinge_6000_64_82_27_108_5/1000_Zdiscriminator.pth'))
embedder = embedder.to(CUDA_DEVICES)
recovery = recovery.to(CUDA_DEVICES)
z_embedder = z_embedder.to(CUDA_DEVICES)
z_recovery = z_recovery.to(CUDA_DEVICES)
supervisor = supervisor.to(CUDA_DEVICES)
discriminator = discriminator.to(CUDA_DEVICES)
z_discriminator = z_discriminator.to(CUDA_DEVICES)
train_stage1(Data_loader, embedder, recovery)
train_stage2(Data_loader, embedder, supervisor, z_recovery)
train_stage3(Data_loader, embedder, recovery, z_embedder, z_recovery, supervisor, discriminator, z_discriminator)
torch.save(recovery.state_dict(), f'{output_dir+recovery_name}')
torch.save(z_recovery.state_dict(), f'{output_dir+generator_name}')
torch.save(supervisor.state_dict(), f'{output_dir+supervisor_name}')
print('Finish Saving Models.')
|
{"/train.py": ["/Network/supervisor.py", "/Network/generator.py", "/Timedataset.py", "/Loss/embedder_loss.py", "/Loss/supervised_loss.py", "/Loss/joint_Gloss.py", "/Loss/joint_Dloss.py", "/utils.py"], "/Time_generate_data.py": ["/Timedataset.py", "/utils.py", "/dataset_preprocess.py"], "/dataset.py": ["/dataset_preprocess.py"], "/Timedataset.py": ["/dataset_preprocess.py"], "/utils.py": ["/dataset.py"], "/Timetest-tf.py": ["/dataset_preprocess.py"], "/Timetest.py": ["/Timedataset.py", "/Network/simple_predictor.py", "/utils.py"], "/train_c_rnn_gan.py": ["/Timedataset.py", "/utils.py"], "/c_rnn_gan_generate_data.py": ["/Timedataset.py", "/utils.py", "/dataset_preprocess.py"], "/Network/generator.py": ["/Network/Self_Attention/utils.py"]}
|
44,073,508
|
kent1201/Master-thesis
|
refs/heads/master
|
/train_c_rnn_gan.py
|
import math
import configparser
import os
from datetime import date
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import MultiStepLR
import numpy as np
import itertools
import random
from tqdm import tqdm, trange
import matplotlib.pyplot as plt
from Network.c_rnn_gan import Generator
from Network.c_rnn_gan import Discriminator
# from dataset import WaferDataset
from Timedataset import TimeSeriesDataset
from utils import random_generator
config = configparser.ConfigParser()
config.read('Configure.ini', encoding="utf-8")
# torch.backends.cudnn.deterministic = True
# 固定演算法進行加速
# torch.backends.cudnn.benchmark = True
# 防止 specified launch error: 強行統一至同一GPU
# os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
# gpu-used
CUDA_DEVICES = torch.device("cuda:"+config.get('default',
'cuda_device_number') if torch.cuda.is_available() else "cpu")
dataset_dir = config.get('train', 'Dataset_path')
stage1_epochs = config.getint('train', 'stage1_epochs')
stage2_epochs = config.getint('train', 'stage2_epochs')
stage3_epochs = config.getint('train', 'stage3_epochs')
stage4_epochs = config.getint('train', 'stage4_epochs')
stage5_epochs = config.getint('train', 'stage5_epochs')
batch_size = config.getint('train', 'batch_size')
seq_len = config.getint('train', 'seq_len')
n_features = config.getint('train', 'n_features')
hidden_size = config.getint('train', 'hidden_size')
num_layers = config.getint('train', 'num_layers')
learning_rate1 = config.getfloat('train', 'learning_rate1')
learning_rate2 = config.getfloat('train', 'learning_rate2')
learning_rate3 = config.getfloat('train', 'learning_rate3')
learning_rate4 = config.getfloat('train', 'learning_rate4')
learning_rate5 = config.getfloat('train', 'learning_rate5')
dis_func = config.get('train', 'dis_func')
uloss_func = config.get('train', 'uloss_func')
generator_name = config.get('default', 'generator_name')
discriminator_name = config.get('default', 'discriminator_name')
module_name = config.get('default', 'module_name')
PADDING_VALUE = config.getfloat('default', 'padding_value')
# save model path
today = date.today()
save_time = today.strftime("%d_%m_%Y")
output_dir = config.get('train', 'model_path') + '/' + save_time + \
'/' + config.get('train', 'classification_dir') + '/'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
class GLoss(nn.Module):
''' C-RNN-GAN generator loss
'''
def __init__(self):
super(GLoss, self).__init__()
def forward(self, logits_gen):
logits_gen = torch.clamp(logits_gen, 1e-40, 1.0)
batch_loss = -torch.log(logits_gen)
return torch.mean(batch_loss)
class DLoss(nn.Module):
''' C-RNN-GAN discriminator loss
'''
def __init__(self, label_smoothing=False):
super(DLoss, self).__init__()
self.label_smoothing = label_smoothing
def forward(self, logits_real, logits_gen):
''' Discriminator loss
logits_real: logits from D, when input is real
logits_gen: logits from D, when input is from Generator
loss = -(ylog(p) + (1-y)log(1-p))
'''
logits_real = torch.clamp(logits_real, 1e-40, 1.0)
d_loss_real = -torch.log(logits_real)
if self.label_smoothing:
p_fake = torch.clamp((1 - logits_real), 1e-40, 1.0)
d_loss_fake = -torch.log(p_fake)
d_loss_real = 0.9*d_loss_real + 0.1*d_loss_fake
logits_gen = torch.clamp((1 - logits_gen), 1e-40, 1.0)
d_loss_gen = -torch.log(logits_gen)
batch_loss = d_loss_real + d_loss_gen
return torch.mean(batch_loss)
# Start Training
def train_stage(data_loader, generator, discriminator):
print('Start Training')
# loss
Gloss_criterion = GLoss()
Dloss_criterion = DLoss(label_smoothing=True)
# model
discriminator.train()
generator.train()
# Optimizer
optimizerG = torch.optim.Adam(
generator.parameters(),
lr=learning_rate5
)
optimizerD = torch.optim.Adam(
params=discriminator.parameters(), lr=(learning_rate5*3.0))
# learning rate scheduler
idx = np.round(np.linspace(0, stage5_epochs-1, 10)).astype(int)
idx = idx[1:-1]
schedulerD = MultiStepLR(optimizerD, milestones=idx, gamma=0.5)
schedulerG = MultiStepLR(optimizerG, milestones=idx, gamma=0.5)
# automatic mixed precision (AMP) 節省空間
scalerD = torch.cuda.amp.GradScaler()
scalerG = torch.cuda.amp.GradScaler()
training_loss_D = []
training_loss_G = []
logger = trange(
stage5_epochs, desc=f"Data G: 0, Data D: 0")
for epoch in logger:
for X_mb, T_mb in data_loader:
X = X_mb.to(CUDA_DEVICES)
T = T_mb.to(CUDA_DEVICES)
z_batch_size, z_seq_len, z_dim = X.shape
Z = random_generator(z_batch_size, z_seq_len, z_dim, T_mb)
Z = Z.to(CUDA_DEVICES)
# inital state
d_state = discriminator.init_hidden(z_batch_size)
g_state = generator.init_hidden(z_batch_size)
## Train generator
optimizerG.zero_grad()
for p in discriminator.parameters(): # reset requires_grad
p.requires_grad = False
with torch.cuda.amp.autocast():
X_hat, _ = generator(Z, g_state)
Y_fake, _, _ = discriminator(X_hat, d_state)
lossG = Gloss_criterion(Y_fake)
scalerG.scale(lossG).backward() # lossG.backward()
scalerG.step(optimizerG) # optimizerG.step()
scalerG.update()
## Discriminator training
for p in discriminator.parameters(): # reset requires_grad
p.requires_grad = True # they are set to False below in netG update
optimizerD.zero_grad()
with torch.cuda.amp.autocast():
Y_real, _, _ = discriminator(X, d_state)
Y_fake, _, _ = discriminator(X_hat.detach(), d_state) # Output of supervisor
lossD = Dloss_criterion(Y_real, Y_fake)
# Train discriminator (only when the discriminator does not work well)
if lossD > 0.15:
scalerD.scale(lossD).backward() # lossD.backward()
scalerD.step(optimizerD) # optimizerD.step()
scalerD.update()
training_loss_D.append(lossD.item())
training_loss_G.append(lossG.item())
logger.set_description(
f"Epoch: {epoch}, Data G: {lossG.item():.4f}, Data D: {lossD.item():.4f}"
)
schedulerD.step()
schedulerG.step()
# Save multiple checkpoints
if epoch % 100 == 0:
torch.save(generator.state_dict(), f'{output_dir+str(epoch)+"_"+generator_name}')
torch.save(discriminator.state_dict(), f'{output_dir+str(epoch)+"_"+discriminator_name}')
plt.plot(training_loss_D, color='red', label="Data D")
plt.plot(training_loss_G, color='green', label="Data G")
plt.title("C_RNN_GAN Training loss")
plt.xlabel('Epoch')
plt.legend()
plt.savefig('./Loss_curve/training_loss_curve.png', bbox_inches='tight')
plt.close()
print('Finish Joint Training')
if __name__ == '__main__':
# # save model path
# today = date.today()
# save_time = today.strftime("%d_%m_%Y")
# output_dir = config.get('train', 'model_path') + '/' + save_time + \
# '/' + config.get('train', 'classification_dir') + '/'
# if not os.path.exists(output_dir):
# os.makedirs(output_dir)
# Parameters
print("CUDA DEVICE: {}".format(CUDA_DEVICES))
print("[train] module: {}".format(module_name))
print("[train] action: {}".format(config.get(
'train', 'classification_dir').split('_')[0]))
print("[train] seq_len: {}".format(seq_len))
print("[train] n_features: {}".format(n_features))
print("[train] hidden size: {}".format(hidden_size))
print("[train] num_layers: {}".format(num_layers))
print("[train] num_epochs: {}".format(stage5_epochs))
print("[train] batch_size: {}".format(batch_size))
print("[train] distance function: {}".format(dis_func))
print("[train] adversarial loss function: {}".format(uloss_func))
# Dataset
Data_set = TimeSeriesDataset(
root_dir=dataset_dir, seq_len=seq_len, transform=None)
Data_loader = DataLoader(
dataset=Data_set, batch_size=batch_size, shuffle=False, num_workers=0)
Max_Seq_len = Data_set.max_seq_len
# models
generator = Generator(n_features, CUDA_DEVICES, hidden_units=hidden_size)
discriminator = Discriminator(n_features, CUDA_DEVICES, hidden_units=hidden_size)
# discriminator.load_state_dict(torch.load('/home/kent1201/Documents/Master-thesis/models/26_05_2021/action1_gru_MSE_hinge_6000_64_82_27_108_5/1000_discriminator.pth'))
# generator.load_state_dict(torch.load('/home/kent1201/Documents/Master-thesis/models/26_05_2021/action1_gru_MSE_hinge_6000_64_82_27_108_5/1000_discriminator.pth'))
discriminator = discriminator.to(CUDA_DEVICES)
generator = generator.to(CUDA_DEVICES)
train_stage(Data_loader, generator, discriminator)
torch.save(discriminator.state_dict(), f'{output_dir+discriminator_name}')
torch.save(generator.state_dict(), f'{output_dir+generator_name}')
print('Finish Saving Models.')
|
{"/train.py": ["/Network/supervisor.py", "/Network/generator.py", "/Timedataset.py", "/Loss/embedder_loss.py", "/Loss/supervised_loss.py", "/Loss/joint_Gloss.py", "/Loss/joint_Dloss.py", "/utils.py"], "/Time_generate_data.py": ["/Timedataset.py", "/utils.py", "/dataset_preprocess.py"], "/dataset.py": ["/dataset_preprocess.py"], "/Timedataset.py": ["/dataset_preprocess.py"], "/utils.py": ["/dataset.py"], "/Timetest-tf.py": ["/dataset_preprocess.py"], "/Timetest.py": ["/Timedataset.py", "/Network/simple_predictor.py", "/utils.py"], "/train_c_rnn_gan.py": ["/Timedataset.py", "/utils.py"], "/c_rnn_gan_generate_data.py": ["/Timedataset.py", "/utils.py", "/dataset_preprocess.py"], "/Network/generator.py": ["/Network/Self_Attention/utils.py"]}
|
44,073,509
|
kent1201/Master-thesis
|
refs/heads/master
|
/c_rnn_gan_generate_data.py
|
import math
import torch
import torch.nn as nn
import configparser
import os
import gc
import pandas as pd
from datetime import date
from torch.utils.data import DataLoader
from torchvision import transforms
import numpy as np
from Timedataset import TimeSeriesDataset
from Network.c_rnn_gan import Generator
from utils import random_generator
from dataset_preprocess import MinMaxScaler1, batch_generation, extract_time, MinMaxScaler2, ReMinMaxScaler2, data_postprocess
from tqdm import tqdm, trange
config = configparser.ConfigParser()
config.read('Configure.ini', encoding="utf-8")
# gpu-used
CUDA_DEVICES = torch.device("cuda:"+config.get('default',
'cuda_device_number') if torch.cuda.is_available() else "cpu")
# 模型參數路徑
PATH_TO_WEIGHTS = config.get('GenTstVis', 'model_path')
OUTPUT_DIR = config.get('GenTstVis', 'syntheticDataset_path')
dataset_dir = config.get('GenTstVis', 'Dataset_path')
classification_dir = config.get('GenTstVis', 'classification_dir')
date_dir = config.get('GenTstVis', 'date_dir')
seq_len = config.getint('GenTstVis', 'seq_len')
n_features = config.getint('GenTstVis', 'n_features')
hidden_size = config.getint('GenTstVis', 'hidden_size')
num_layers = config.getint('GenTstVis', 'num_layers')
PADDING_VALUE = config.getfloat('default', 'padding_value')
generator_name = config.get('GenTstVis', 'model_epoch') + config.get('generate_data', 'generator_name')
supervisor_name = config.get('GenTstVis', 'model_epoch') + config.get('generate_data', 'supervisor_name')
recovery_name = config.get('GenTstVis', 'model_epoch') + config.get('generate_data', 'recovery_name')
syntheitc_data_name = config.get('GenTstVis', 'synthetic_data_name')
module_name = config.get('default', 'module_name')
times_iteration = config.getint('generate_data', 'iteration')
def concat_data(data, data_list):
# concat each batch data into a alist
logger = trange(len(data), desc=f"iteration: 0")
for i in logger:
if len(data_list):
data_list = np.concatenate((data_list, data[i]), axis=0)
else:
data_list = data[i]
logger.set_description(f"iteration: {i}")
return data_list
def Save_Data(data, save_path, data_names):
if not os.path.exists(save_path):
os.makedirs(save_path)
file_name = data_names
output_path = os.path.join(save_path, file_name)
temp_df = pd.DataFrame(data)
temp_df.to_csv(output_path, index=False, header=False)
return data_names
def Generate_data(model_path, no, data_seq_len, dim, ori_time):
# load model
generator = Generator(n_features, CUDA_DEVICES, hidden_units=hidden_size)
generator.load_state_dict(torch.load(model_path + '/' + generator_name))
# generator = torch.load(model_path + '/' + generator_name)
# supervisor = torch.load(model_path + '/' + supervisor_name)
# recovery = torch.load(model_path + '/' + recovery_name)
generator.eval()
# move to GPU
generator = generator.to(CUDA_DEVICES)
generated_data = list()
# times_iteration: How many times of the data's amount we want
for iteration in range(0, times_iteration):
print("iteration: {}".format(iteration))
with torch.no_grad():
with torch.cuda.amp.autocast():
# generate noize
Z = random_generator(no, seq_len, dim, ori_time)
Z = Z.to(CUDA_DEVICES)
g_state = generator.init_hidden(no)
# generate synthetic data
X_hat, _ = generator(Z, g_state)
# X_hat = X_hat.cpu().detach().numpy()
# 這裡雖然將下面的顯存釋放了,但我們通過 nvidia-smi 命令顯存仍然在佔用
X_hat = X_hat.cpu().numpy()
# print("X_hat: {}".format(X_hat.shape))
for i in range(no):
temp = X_hat[i, :ori_time[i], :]
generated_data.append(temp)
# 只有執行完下面這句話,顯存會在 nvidia-smi 中釋放
torch.cuda.empty_cache()
return generated_data
if __name__ == '__main__':
# get synthetic data directory
save_dir_path = OUTPUT_DIR + '/' + date_dir + '/' + classification_dir
# get models' path
model_path = PATH_TO_WEIGHTS + '/' + date_dir + '/' + classification_dir
# load real data
real_data = np.loadtxt(dataset_dir, delimiter=",", skiprows=0)
# get real data min(max) value
real_data, min_val1, max_val1 = MinMaxScaler1(real_data)
batch_real_data = batch_generation(real_data, seq_len, 1)
ori_time, _ = extract_time(batch_real_data)
_, min_val2, max_val2 = MinMaxScaler2(batch_real_data)
# To get same amount of data
data_seq_len, dim = np.asarray(real_data).shape
no = len(batch_real_data)
# release variable memory
del _
del real_data
del batch_real_data
gc.collect()
# _, real_data, batch_real_data = None, None, None
print("Start generate data.")
generated_data = Generate_data(model_path, no, data_seq_len, dim, ori_time)
print("Start renormalize 2.")
generated_data = generated_data * max_val2
generated_data = generated_data + min_val2
print("Start concating data")
# Make all batch data into a list
generated_data_list = list()
generated_data_list = concat_data(generated_data, generated_data_list)
# release variable memory
del generated_data
gc.collect()
print("Start renormalize 1.")
# Renormalized the synthetic normalized data
generated_data_list = data_postprocess(
generated_data_list, min_val1, max_val1)
print("Start saving data.")
# Save the data
data_names = 1
data_names = Save_Data(
generated_data_list, save_dir_path, syntheitc_data_name)
|
{"/train.py": ["/Network/supervisor.py", "/Network/generator.py", "/Timedataset.py", "/Loss/embedder_loss.py", "/Loss/supervised_loss.py", "/Loss/joint_Gloss.py", "/Loss/joint_Dloss.py", "/utils.py"], "/Time_generate_data.py": ["/Timedataset.py", "/utils.py", "/dataset_preprocess.py"], "/dataset.py": ["/dataset_preprocess.py"], "/Timedataset.py": ["/dataset_preprocess.py"], "/utils.py": ["/dataset.py"], "/Timetest-tf.py": ["/dataset_preprocess.py"], "/Timetest.py": ["/Timedataset.py", "/Network/simple_predictor.py", "/utils.py"], "/train_c_rnn_gan.py": ["/Timedataset.py", "/utils.py"], "/c_rnn_gan_generate_data.py": ["/Timedataset.py", "/utils.py", "/dataset_preprocess.py"], "/Network/generator.py": ["/Network/Self_Attention/utils.py"]}
|
44,073,510
|
kent1201/Master-thesis
|
refs/heads/master
|
/Network/generator.py
|
import torch.nn as nn
import torch
import torch.nn.functional as F
# for others
from Network.tcn import TemporalConvNet
from Network.Self_Attention.layers import EncoderLayer
from Network.Self_Attention.utils import PositionalEncoding
from Network.Self_Attention.sublayers import MultiHeadAttention
# for inner testing
# from tcn import TemporalConvNet
# from Attention.layers import EncoderLayer
class Generator(nn.Module):
def __init__(self, module='gru', time_stamp=82, input_size=27, hidden_dim=100,
output_dim=100, num_layers=10, activate_function=nn.Tanh(), padding_value=-999.0, max_seq_len = 100):
super(Generator, self).__init__()
self.module = module
self.input_size = input_size
self.time_stamp = time_stamp
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.hidden_dim_layers = []
self.output_dim = output_dim
self.padding_value = padding_value
self.max_seq_len = max_seq_len
self.dropout = nn.Dropout(p=0.1)
if self.module == 'gru':
self.r_cell = nn.GRU(
input_size=self.hidden_dim,
hidden_size=self.hidden_dim,
num_layers=self.num_layers,
batch_first=True
)
elif self.module == 'tcn':
for i in range(num_layers):
self.hidden_dim_layers.append(self.hidden_dim)
self.r_cell = TemporalConvNet(
num_inputs=self.hidden_dim,
num_channels=self.hidden_dim_layers,
kernel_size=4,
dropout=0.2
)
elif self.module == 'self-attn':
self.position = PositionalEncoding(self.hidden_dim, dropout=0.1, max_len=self.max_seq_len)
# self.r_cell = nn.ModuleList([
# EncoderLayer(
# d_model=self.hidden_dim,
# d_inner=self.hidden_dim,
# n_head=(self.hidden_dim // 3),
# d_k=(self.hidden_dim // (self.hidden_dim // 3)),
# d_v=(self.hidden_dim // (self.hidden_dim // 3))
# ) for _ in range(self.num_layers)])
self.r_cell = MultiHeadAttention(
n_head=(self.hidden_dim // 3),
d_model=self.hidden_dim,
d_k=(self.hidden_dim // (self.hidden_dim // 3)),
d_v=(self.hidden_dim // (self.hidden_dim // 3))
)
self.activate = activate_function
self.fc1 = nn.Sequential(
nn.utils.spectral_norm(nn.Linear(self.input_size, self.hidden_dim)),
nn.LayerNorm([self.time_stamp, self.hidden_dim]),
self.dropout,
self.activate
)
self.fc2 = nn.Sequential(
nn.utils.spectral_norm(nn.Linear(self.hidden_dim, self.output_dim)),
)
# self.fc1_1 = nn.Sequential(
# nn.utils.spectral_norm(nn.Linear(self.hidden_dim, self.hidden_dim)),
# nn.LayerNorm([self.hidden_dim, self.hidden_dim]),
# self.activate
# )
# self.fc1_2 = nn.Sequential(
# nn.utils.spectral_norm(nn.Linear(self.hidden_dim, self.hidden_dim)),
# nn.LayerNorm([self.hidden_dim, self.hidden_dim]),
# self.activate
# )
# self.fc1_3 = nn.Sequential(
# nn.utils.spectral_norm(nn.Linear(self.hidden_dim, self.hidden_dim)),
# nn.LayerNorm([self.hidden_dim, self.hidden_dim]),
# self.activate
# )
# Init weights
# Default weights of TensorFlow is Xavier Uniform for W and 1 or 0 for b
# Reference:
# - https://www.tensorflow.org/api_docs/python/tf/compat/v1/get_variable
# - https://github.com/tensorflow/tensorflow/blob/v2.3.1/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py#L484-L614
with torch.no_grad():
for name, param in self.r_cell.named_parameters():
if 'weight_ih' in name:
torch.nn.init.xavier_uniform_(param.data)
elif 'weight_hh' in name:
torch.nn.init.xavier_uniform_(param.data)
elif 'bias_ih' in name:
param.data.fill_(1)
elif 'bias_hh' in name:
param.data.fill_(0)
for name, param in self.fc1.named_parameters():
if 'weight' in name:
torch.nn.init.xavier_uniform_(param)
elif 'bias' in name:
param.data.fill_(0)
for name, param in self.fc2.named_parameters():
if 'weight' in name:
torch.nn.init.xavier_uniform_(param)
elif 'bias' in name:
param.data.fill_(0)
def forward(self, X, T):
fc1_out = self.fc1(X)
if self.module == 'tcn':
fc1_out = torch.transpose(fc1_out, 1, 2)
output = self.r_cell(fc1_out)
output = torch.transpose(output, 1, 2)
elif self.module == 'self-attn':
fc1_out = self.position(fc1_out)
fc1_out = self.fc1_1(fc1_out)
fc1_out = self.fc1_2(fc1_out)
enc_output = torch.transpose(fc1_out, 0, 1)
## transformer encoder layer
# for enc_layer in self.r_cell:
# enc_output, enc_slf_attn = enc_layer(enc_output)
enc_output, enc_slf_attn = self.r_cell(enc_output, enc_output, enc_output)
output = torch.transpose(enc_output, 0, 1)
output = self.fc1_3(output)
elif self.module == 'gru':
X_packed = torch.nn.utils.rnn.pack_padded_sequence(
input=fc1_out,
lengths=T.cpu(),
batch_first=True,
enforce_sorted=False
)
H_o, H_t = self.r_cell(X_packed)
# Pad RNN output back to sequence length
output, T = torch.nn.utils.rnn.pad_packed_sequence(
sequence=H_o,
batch_first=True,
padding_value=self.padding_value,
total_length=self.max_seq_len
)
H = self.fc2(output)
return self.activate(H)
# gpu-used
CUDA_DEVICES = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def test():
Inputs = torch.randn(64, 24, 6)
T = []
max_seq_len = 0
for i in range(len(Inputs)):
max_seq_len = max(max_seq_len, len(Inputs[i][:, 0]))
T.append(len(Inputs[i][:, 0]))
model = Generator(
module='tcn',
time_stamp=24,
input_size=6,
hidden_dim=24,
output_dim=24,
num_layers=18,
activate_function=nn.Sigmoid(),
max_seq_len=max_seq_len
)
model.train()
outputs = model(Inputs, T)
print("[generator.py] model: {}".format(model))
print("[generator.py] inputs: {}".format(Inputs.shape))
print("[generator.py] outputs: {}".format(outputs.shape))
if __name__ == '__main__':
test()
|
{"/train.py": ["/Network/supervisor.py", "/Network/generator.py", "/Timedataset.py", "/Loss/embedder_loss.py", "/Loss/supervised_loss.py", "/Loss/joint_Gloss.py", "/Loss/joint_Dloss.py", "/utils.py"], "/Time_generate_data.py": ["/Timedataset.py", "/utils.py", "/dataset_preprocess.py"], "/dataset.py": ["/dataset_preprocess.py"], "/Timedataset.py": ["/dataset_preprocess.py"], "/utils.py": ["/dataset.py"], "/Timetest-tf.py": ["/dataset_preprocess.py"], "/Timetest.py": ["/Timedataset.py", "/Network/simple_predictor.py", "/utils.py"], "/train_c_rnn_gan.py": ["/Timedataset.py", "/utils.py"], "/c_rnn_gan_generate_data.py": ["/Timedataset.py", "/utils.py", "/dataset_preprocess.py"], "/Network/generator.py": ["/Network/Self_Attention/utils.py"]}
|
44,073,511
|
kent1201/Master-thesis
|
refs/heads/master
|
/Loss/joint_Gloss.py
|
import math
import torch
import torch.nn as nn
from Loss.soft_dtw import SoftDTW
class JointGloss(nn.Module):
def __init__(self, uloss_func):
super(JointGloss, self).__init__()
self.uloss_func = uloss_func
# Adversarial loss
# equivalent as sigmoid_cross_entropy_with_logits
# self.G_loss_U = nn.BCELoss()
self.gamma = 1
def forward(self, Y_fake, Y_fake_e):
"""
Y_fake, Y_fake_e: [batch_size, seq_len, 1]
H, H_hat_supervise: [batch_size, seq_len-1, n_features(hidden)]
X, X_hat: [batch_size, seq_len, n_features]
"""
if self.uloss_func == 'wgan' or self.uloss_func == 'hinge':
lossG = torch.add(-torch.mean(Y_fake), torch.mul(0.1, -torch.mean(Y_fake_e)))
else:
loss_g = torch.nn.functional.binary_cross_entropy_with_logits(Y_fake, torch.ones_like(Y_fake))
loss_g_e = torch.nn.functional.binary_cross_entropy_with_logits(Y_fake_e, torch.ones_like(Y_fake_e))
lossG = loss_g + 0.1 * loss_g_e
return lossG
if __name__ == '__main__':
Y_fake = torch.randn(32, 82, 1)
Y_fake_e = torch.randn(32, 82, 1)
H = torch.randn(32, 81, 24)
H_hat_supervise = torch.randn(32, 81, 24)
X = torch.randn(32, 82, 34)
X_hat = torch.randn(32, 82, 34)
criterion = JointGloss()
loss = criterion(Y_fake, Y_fake_e, H, H_hat_supervise, X, X_hat)
print(loss)
|
{"/train.py": ["/Network/supervisor.py", "/Network/generator.py", "/Timedataset.py", "/Loss/embedder_loss.py", "/Loss/supervised_loss.py", "/Loss/joint_Gloss.py", "/Loss/joint_Dloss.py", "/utils.py"], "/Time_generate_data.py": ["/Timedataset.py", "/utils.py", "/dataset_preprocess.py"], "/dataset.py": ["/dataset_preprocess.py"], "/Timedataset.py": ["/dataset_preprocess.py"], "/utils.py": ["/dataset.py"], "/Timetest-tf.py": ["/dataset_preprocess.py"], "/Timetest.py": ["/Timedataset.py", "/Network/simple_predictor.py", "/utils.py"], "/train_c_rnn_gan.py": ["/Timedataset.py", "/utils.py"], "/c_rnn_gan_generate_data.py": ["/Timedataset.py", "/utils.py", "/dataset_preprocess.py"], "/Network/generator.py": ["/Network/Self_Attention/utils.py"]}
|
44,101,248
|
Nishant-Narsale/Book-World
|
refs/heads/master
|
/api/serializers.py
|
from django.db.models import fields
from rest_framework import serializers
from main.models import *
class ProductsSerializer(serializers.ModelSerializer):
class Meta():
model = Products
exclude = ['image']
|
{"/api/serializers.py": ["/main/models.py"], "/api/views.py": ["/api/serializers.py", "/main/models.py"], "/main/views.py": ["/main/models.py"], "/accounts/views.py": ["/main/models.py"]}
|
44,101,249
|
Nishant-Narsale/Book-World
|
refs/heads/master
|
/api/views.py
|
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializers import ProductsSerializer
from main.models import Products
@api_view(['GET'])
def ApiOverview(request):
api_urls = {
'View Products' : 'api/products/',
'Create New Product' : 'api/create-product/',
'Update Existing Product' : 'api/update-product/product_id(e.g. 2)/',
'Delete Existing Product' : 'api/delete-product/product_id(e.g. 2)/'
}
return Response(api_urls)
@api_view(['GET'])
def ProductsView(request):
products = Products.objects.all()
serializer = ProductsSerializer(products, many=True)
return Response(serializer.data)
@api_view(['POST'])
def CreateProduct(request):
serializer = ProductsSerializer(data=request.data)
if not serializer.is_valid():
return Response('Invalid data')
serializer.save()
return Response(serializer.data)
@api_view(['POST'])
def UpdateProduct(request,id):
product = Products.objects.get(id=id)
serializer = ProductsSerializer(instance=product, data=request.data)
if not serializer.is_valid():
return Response('Invalid data')
serializer.save()
return Response(serializer.data)
@api_view(['DELETE'])
def DeleteProduct(request,id):
product = Products.objects.get(id=id)
product.delete()
return Response(f'{product} Deleted successfully')
|
{"/api/serializers.py": ["/main/models.py"], "/api/views.py": ["/api/serializers.py", "/main/models.py"], "/main/views.py": ["/main/models.py"], "/accounts/views.py": ["/main/models.py"]}
|
44,101,250
|
Nishant-Narsale/Book-World
|
refs/heads/master
|
/main/views.py
|
from django.shortcuts import render
from django.contrib import messages
from django.http import JsonResponse
import json
from .models import *
# Create your views here.
def index(request):
books = Products.objects.all()
if request.user.is_authenticated:
user_is_authenticated = True
customer = request.user.customer
order = Order.objects.get(customer=customer)
else:
books = Products.objects.all()
user_is_authenticated = False
order = []
context = {
'books':books,
'user_is_authenticated':user_is_authenticated,
'order':order
}
return render(request, 'index.html', context)
def cart(request):
if request.user.is_authenticated :
user_is_authenticated = True
customer = request.user.customer
order, created = Order.objects.get_or_create(customer=customer,complete=False)
order_items = order.orderitem_set.all()
else:
user_is_authenticated = False
order = []
order_items = []
context = {
"order_items":order_items,
'user_is_authenticated':user_is_authenticated,
"order":order
}
return render(request, 'cart.html', context)
def checkout(request):
if request.user.is_authenticated:
user_is_authenticated = True
customer = request.user.customer
order = Order.objects.get(customer=customer)
order_items = order.orderitem_set.all()
else:
user_is_authenticated = False
order = []
order_items = []
context = {
'user_is_authenticated':user_is_authenticated,
'order_items':order_items,
'order':order
}
return render(request, 'checkout.html', context)
def profile(request):
if request.user.is_authenticated:
user_is_authenticated = True
user = request.user
customer = request.user.customer
order = Order.objects.get(customer=customer)
else:
user = {'username':'Guest User','email':'guest@gmail.com',}
user_is_authenticated = False
order = []
context = {
'user_is_authenticated':user_is_authenticated,
'user':user,
'order':order
}
return render(request, 'profile.html', context)
def about(request):
about_page = True
if request.user.is_authenticated:
user_is_authenticated = True
customer = request.user.customer
else:
user_is_authenticated = False
context = {
'user_is_authenticated':user_is_authenticated,
'about_page':about_page,
}
return render(request, 'about.html', context)
def category(request, category_name):
category_page = True
books = Products.objects.filter(category=category_name)
if request.user.is_authenticated:
user_is_authenticated = True
customer = request.user.customer
order = Order.objects.get(customer=customer)
else:
user_is_authenticated = False
order = []
context = {
'user_is_authenticated':user_is_authenticated,
'category_page':category_page,
'category_name':category_name,
'books':books,
'order':order
}
return render(request, 'layout_categories.html' ,context)
def updateItem(request):
data = json.loads(request.body)
productId = data['productId']
action = data['action']
print('Product Id:',productId)
print('Action:',action)
product = Products.objects.get(id=productId)
customer = request.user.customer
order, created = Order.objects.get_or_create(customer=customer)
try:
orderItem = OrderItem.objects.get(order=order, product=product)
except:
orderItem = OrderItem.objects.create(order=order, product=product, quantity=0)
if action=='add':
orderItem.quantity = (orderItem.quantity + 1)
elif action=='remove':
orderItem.quantity = (orderItem.quantity - 1)
orderItem.save()
if action=='removeItem':
orderItem.delete()
if orderItem.quantity <= 0:
orderItem.delete()
return JsonResponse("Item is added", safe=False)
def sellItem(request):
sell_page = True
context = {
'sell_page':sell_page
}
return render(request, 'sell.html', context)
|
{"/api/serializers.py": ["/main/models.py"], "/api/views.py": ["/api/serializers.py", "/main/models.py"], "/main/views.py": ["/main/models.py"], "/accounts/views.py": ["/main/models.py"]}
|
44,101,251
|
Nishant-Narsale/Book-World
|
refs/heads/master
|
/main/templatetags/to_url.py
|
from django import template
register = template.Library()
@register.filter
def to_url(value):
return value[1:]
|
{"/api/serializers.py": ["/main/models.py"], "/api/views.py": ["/api/serializers.py", "/main/models.py"], "/main/views.py": ["/main/models.py"], "/accounts/views.py": ["/main/models.py"]}
|
44,101,252
|
Nishant-Narsale/Book-World
|
refs/heads/master
|
/main/urls.py
|
from django.urls import path, include
from . import views
app_name = 'main'
urlpatterns = [
path('', views.index , name="index"),
path('cart/', views.cart , name="cart"),
path('checkout/', views.checkout, name="checkout"),
path('profile/', views.profile, name="profile"),
path('category/<str:category_name>', views.category, name="category"),
path('about/',views.about, name="about"),
path('updateItem/', views.updateItem, name="updateItem"),
path('sell_book/', views.sellItem, name="sellItem")
]
|
{"/api/serializers.py": ["/main/models.py"], "/api/views.py": ["/api/serializers.py", "/main/models.py"], "/main/views.py": ["/main/models.py"], "/accounts/views.py": ["/main/models.py"]}
|
44,101,253
|
Nishant-Narsale/Book-World
|
refs/heads/master
|
/api/urls.py
|
from django.urls import path
from . import views
app_name = 'api'
urlpatterns = [
path('',views.ApiOverview,name='apioverview'),
path('products/',views.ProductsView,name='products'),
path('create-product/',views.CreateProduct,name='create-product'),
path('update-product/<str:id>/',views.UpdateProduct,name='update-product'),
path('delete-product/<str:id>/',views.DeleteProduct,name='delete-product'),
]
|
{"/api/serializers.py": ["/main/models.py"], "/api/views.py": ["/api/serializers.py", "/main/models.py"], "/main/views.py": ["/main/models.py"], "/accounts/views.py": ["/main/models.py"]}
|
44,101,254
|
Nishant-Narsale/Book-World
|
refs/heads/master
|
/main/migrations/0005_auto_20210424_1726.py
|
# Generated by Django 3.1.6 on 2021-04-24 11:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0004_products_category'),
]
operations = [
migrations.AlterField(
model_name='products',
name='category',
field=models.CharField(choices=[('S.Y.Engineering', 'S.Y.Engineering'), ('F.Y.Engineering', 'F.Y.Engineering'), ('other', 'other')], default='other', max_length=120),
),
]
|
{"/api/serializers.py": ["/main/models.py"], "/api/views.py": ["/api/serializers.py", "/main/models.py"], "/main/views.py": ["/main/models.py"], "/accounts/views.py": ["/main/models.py"]}
|
44,101,255
|
Nishant-Narsale/Book-World
|
refs/heads/master
|
/mpbi/email_info.py
|
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = 'nishantnarsale33@gmail.com'
EMAIL_HOST_PASSWORD = 'qexzmiminsnlvovr'
ACCOUNT_EMAIL_VERIFICATION = 'none'
|
{"/api/serializers.py": ["/main/models.py"], "/api/views.py": ["/api/serializers.py", "/main/models.py"], "/main/views.py": ["/main/models.py"], "/accounts/views.py": ["/main/models.py"]}
|
44,101,256
|
Nishant-Narsale/Book-World
|
refs/heads/master
|
/main/models.py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
category_choice = {
('F.Y.Engineering','F.Y.Engineering'),
('S.Y.Engineering','S.Y.Engineering'),
('Horror','Horror'),
('Comic','Comic'),
('Science Fiction','Science Fiction'),
('other','other')
}
class Products(models.Model):
name = models.CharField(max_length=100,null=True,blank=True)
price = models.FloatField(blank=True, null=True)
image = models.ImageField(null=True,blank=True)
category = models.CharField(max_length=120, default='other', choices=category_choice)
def __str__(self):
return self.name
@property
def imageURL(self):
try:
return self.image.url
except:
return ''
class Customer(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True, blank=True)
name = models.CharField(max_length=120, null=True, blank=True)
email = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return self.user.username
class Order(models.Model):
customer = models.ForeignKey(Customer,on_delete=models.CASCADE, null=True, blank=True)
date_ordered = models.DateField(auto_now_add=True)
complete = models.BooleanField(default=False)
transaction_id = models.IntegerField(null=True)
@property
def total_cart_items(self):
order_items = self.orderitem_set.all()
total = 0
for item in order_items:
total += item.quantity
return total
@property
def total_cart_price(self):
order_items = self.orderitem_set.all()
total = 0
for item in order_items:
total += item.total_price
return total
class OrderItem(models.Model):
product = models.ForeignKey(Products, on_delete=models.SET_NULL , null=True, blank=True)
order = models.ForeignKey(Order, on_delete=models.SET_NULL, null=True, blank=True)
quantity = models.IntegerField(null=True, blank=True)
date_added = models.DateField(auto_now_add=True)
@property
def total_price(self):
return self.product.price * self.quantity
class ShippingAddress(models.Model):
customer = models.ForeignKey(Customer, on_delete=models.SET_NULL, null=True, blank=True)
order = models.ForeignKey(Order, on_delete=models.SET_NULL , null=True, blank=True)
address = models.CharField(max_length=200, null=False)
city = models.CharField(max_length=200, null=False)
state = models.CharField(max_length=200, null=False)
zipcode = models.CharField(max_length=200, null=False)
date_added = models.DateField(auto_now_add=True)
def __str__(self):
return self.address
|
{"/api/serializers.py": ["/main/models.py"], "/api/views.py": ["/api/serializers.py", "/main/models.py"], "/main/views.py": ["/main/models.py"], "/accounts/views.py": ["/main/models.py"]}
|
44,101,257
|
Nishant-Narsale/Book-World
|
refs/heads/master
|
/accounts/views.py
|
from django.shortcuts import render,redirect
from django.contrib import messages
from django.contrib.auth.models import User, auth
import django.contrib.auth as account
from django.core.mail import EmailMessage
from django.conf import settings
from django.template.loader import render_to_string
from main.models import Customer,Order
# Create your views here.
def login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
return redirect('main:index')
else:
messages.error(request,'You are not registered. Please register.')
return redirect('accounts:register')
else:
if request.user.is_authenticated:
return redirect('main:index')
return render(request, 'account/login.html')
def register(request):
if request.method == "POST":
username = request.POST['username']
password1 = request.POST['password1']
password2 = request.POST['password2']
email = request.POST['email']
if password1 == password2:
if User.objects.filter(email = email).exists():
messages.error(request,'Email Already Exists')
return redirect('accounts:register')
elif User.objects.filter(username = username).exists():
messages.error(request,'Username Already Exists')
return redirect('accounts:register')
else:
#sending email on successful registration
stringed_template = render_to_string('email_template.html',{'name':username})
mail = EmailMessage(
subject='Your Registration for Book World is successful',
body=stringed_template,
from_email=settings.EMAIL_HOST_USER,
to=[email],
)
mail.fail_silently=False
mail.send()
#creating new user
user = User.objects.create_user(username = username, password = password1, email = email)
user.save()
print('user created')
#logging in new user created
user = auth.authenticate(username=username, password=password1)
auth.login(request, user)
print('user logged in')
#creating customer and order for new user registerd
customer = Customer.objects.create(user=user,email=email)
Order.objects.create(customer=customer)
return redirect('main:index')
else:
messages.error(request,'Password not matching...')
return redirect('accounts:register')
else:
if request.user.is_authenticated:
return redirect('main:index')
return render(request, 'account/signup.html')
def logout(request):
print(request.user,"logged out")
account.logout(request=request)
return render(request, 'index.html')
|
{"/api/serializers.py": ["/main/models.py"], "/api/views.py": ["/api/serializers.py", "/main/models.py"], "/main/views.py": ["/main/models.py"], "/accounts/views.py": ["/main/models.py"]}
|
44,120,142
|
jmvduenas/network_tracker
|
refs/heads/main
|
/ip_tracker/admin.py
|
from django.contrib import admin
from .models import Network
admin.site.register(Network)
class NetworkAdmin(admin.ModelAdmin):
list_display = ('office', 'ip_address','device','configured','date')
|
{"/ip_tracker/views.py": ["/ip_tracker/models.py", "/ip_tracker/forms.py", "/ip_tracker/filters.py"], "/ip_tracker/filters.py": ["/ip_tracker/models.py"], "/ip_tracker/admin.py": ["/ip_tracker/models.py"], "/ip_tracker/forms.py": ["/ip_tracker/models.py"], "/cssp_ip_tracker/urls.py": ["/ip_tracker/views.py"]}
|
44,120,143
|
jmvduenas/network_tracker
|
refs/heads/main
|
/ip_tracker/migrations/0003_remove_network_remarks.py
|
# Generated by Django 2.2 on 2021-07-22 02:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ip_tracker', '0002_network_remarks'),
]
operations = [
migrations.RemoveField(
model_name='network',
name='remarks',
),
]
|
{"/ip_tracker/views.py": ["/ip_tracker/models.py", "/ip_tracker/forms.py", "/ip_tracker/filters.py"], "/ip_tracker/filters.py": ["/ip_tracker/models.py"], "/ip_tracker/admin.py": ["/ip_tracker/models.py"], "/ip_tracker/forms.py": ["/ip_tracker/models.py"], "/cssp_ip_tracker/urls.py": ["/ip_tracker/views.py"]}
|
44,120,144
|
jmvduenas/network_tracker
|
refs/heads/main
|
/ip_tracker/migrations/0012_auto_20210802_1452.py
|
# Generated by Django 2.2 on 2021-08-02 06:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ip_tracker', '0011_auto_20210802_1022'),
]
operations = [
migrations.AlterField(
model_name='network',
name='office',
field=models.CharField(max_length=200, verbose_name='Department'),
),
]
|
{"/ip_tracker/views.py": ["/ip_tracker/models.py", "/ip_tracker/forms.py", "/ip_tracker/filters.py"], "/ip_tracker/filters.py": ["/ip_tracker/models.py"], "/ip_tracker/admin.py": ["/ip_tracker/models.py"], "/ip_tracker/forms.py": ["/ip_tracker/models.py"], "/cssp_ip_tracker/urls.py": ["/ip_tracker/views.py"]}
|
44,120,145
|
jmvduenas/network_tracker
|
refs/heads/main
|
/ip_tracker/filters.py
|
import django_filters
from django_filters import DateFilter, CharFilter
from django.forms.widgets import DateInput
from .models import Network
class NetworkFilter(django_filters.FilterSet):
office = CharFilter(field_name='office', lookup_expr='icontains')
device = CharFilter(field_name='device', lookup_expr='icontains')
serial_number = CharFilter(field_name='serial_number', lookup_expr='icontains')
class Meta:
model = Network
fields = ['office','device', 'serial_number']
|
{"/ip_tracker/views.py": ["/ip_tracker/models.py", "/ip_tracker/forms.py", "/ip_tracker/filters.py"], "/ip_tracker/filters.py": ["/ip_tracker/models.py"], "/ip_tracker/admin.py": ["/ip_tracker/models.py"], "/ip_tracker/forms.py": ["/ip_tracker/models.py"], "/cssp_ip_tracker/urls.py": ["/ip_tracker/views.py"]}
|
44,120,146
|
jmvduenas/network_tracker
|
refs/heads/main
|
/ip_tracker/views.py
|
from django.shortcuts import get_object_or_404, redirect, render
from django.http import HttpResponse, HttpResponseRedirect, response
from ip_tracker.models import Network
from django.urls import reverse, reverse_lazy
from ip_tracker.forms import NetworkForm
from .filters import NetworkFilter
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.contrib.auth.decorators import login_required
import csv
#CSV button
def view_csv(request):
response = HttpResponse(content_type='csv')
response['Content-Disposition'] = 'attachment; filename=views.csv'
writer = csv.writer(response)
objects = Network.objects.all()
writer.writerow(['Department', 'Device', 'IP Address', 'Gateway', 'Serial Number', 'Configured by', 'Date Configured', 'Comments'])
for object in objects:
writer.writerow([ object.office, object.device, object.ip_address, object.gateway, object.serial_number, object.configured, object.date, object.remarks])
return response
def login_page(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('home')
else:
messages.info(request, 'Username OR password is incorrect')
template = 'login.html'
context = {}
return render(request, template, {})
def logout_user(request):
logout(request)
return redirect('login')
@login_required(login_url='login')
def home_view(request):
template = 'list.html'
obj = Network.objects.all()
myFilter = NetworkFilter(request.GET, queryset=obj)
obj = myFilter.qs
context = {
'object': obj,
'myFilter': myFilter
}
return render(request, "list.html", context)
@login_required(login_url='login')
def view_network(request):
template = 'topology.html'
return render(request, template, {})
@login_required(login_url='login')
def add_ip_address(request):
form = NetworkForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('home'))
context = {
'form': form
}
return render(request, 'add_ip.html', context)
@login_required(login_url='login')
def view_details(request, pk):
obj = Network.objects.get(id=pk)
context = {
'object': obj
}
return render(request, "view.html", context)
@login_required(login_url='login')
def update_details(request, pk):
template = 'update_details.html'
obj = Network.objects.get(id=pk)
form = NetworkForm(instance=obj)
if request.method == "POST":
form = NetworkForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('home'))
context = {
'form': form
}
return render(request, template, context)
@login_required(login_url='login')
def delete_ip_address(request, pk):
obj = Network.objects.get(id=pk)
if request.method =="POST":
obj.delete()
return HttpResponseRedirect("/")
context = {}
return render(request, 'delete.html', {})
|
{"/ip_tracker/views.py": ["/ip_tracker/models.py", "/ip_tracker/forms.py", "/ip_tracker/filters.py"], "/ip_tracker/filters.py": ["/ip_tracker/models.py"], "/ip_tracker/admin.py": ["/ip_tracker/models.py"], "/ip_tracker/forms.py": ["/ip_tracker/models.py"], "/cssp_ip_tracker/urls.py": ["/ip_tracker/views.py"]}
|
44,120,147
|
jmvduenas/network_tracker
|
refs/heads/main
|
/ip_tracker/migrations/0010_auto_20210729_1501.py
|
# Generated by Django 2.2 on 2021-07-29 07:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ip_tracker', '0009_network_images'),
]
operations = [
migrations.AlterField(
model_name='network',
name='images',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
]
|
{"/ip_tracker/views.py": ["/ip_tracker/models.py", "/ip_tracker/forms.py", "/ip_tracker/filters.py"], "/ip_tracker/filters.py": ["/ip_tracker/models.py"], "/ip_tracker/admin.py": ["/ip_tracker/models.py"], "/ip_tracker/forms.py": ["/ip_tracker/models.py"], "/cssp_ip_tracker/urls.py": ["/ip_tracker/views.py"]}
|
44,120,148
|
jmvduenas/network_tracker
|
refs/heads/main
|
/ip_tracker/forms.py
|
from django import forms
from .models import Network
from django.forms.widgets import DateInput
class NetworkForm(forms.ModelForm):
class Meta:
model = Network
fields = ['office', 'device', 'ip_address', 'gateway', 'serial_number', 'configured', 'date', 'remarks', 'images']
widgets = {
'date': DateInput(attrs={'type': 'date'}),
'remarks': forms.TextInput(attrs={'placeholder': 'Enter your comments'}),
}
|
{"/ip_tracker/views.py": ["/ip_tracker/models.py", "/ip_tracker/forms.py", "/ip_tracker/filters.py"], "/ip_tracker/filters.py": ["/ip_tracker/models.py"], "/ip_tracker/admin.py": ["/ip_tracker/models.py"], "/ip_tracker/forms.py": ["/ip_tracker/models.py"], "/cssp_ip_tracker/urls.py": ["/ip_tracker/views.py"]}
|
44,120,149
|
jmvduenas/network_tracker
|
refs/heads/main
|
/ip_tracker/migrations/0008_auto_20210728_1318.py
|
# Generated by Django 2.2 on 2021-07-28 05:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ip_tracker', '0007_auto_20210722_1054'),
]
operations = [
migrations.AlterField(
model_name='network',
name='remarks',
field=models.CharField(max_length=500, verbose_name='Comments'),
),
]
|
{"/ip_tracker/views.py": ["/ip_tracker/models.py", "/ip_tracker/forms.py", "/ip_tracker/filters.py"], "/ip_tracker/filters.py": ["/ip_tracker/models.py"], "/ip_tracker/admin.py": ["/ip_tracker/models.py"], "/ip_tracker/forms.py": ["/ip_tracker/models.py"], "/cssp_ip_tracker/urls.py": ["/ip_tracker/views.py"]}
|
44,120,150
|
jmvduenas/network_tracker
|
refs/heads/main
|
/ip_tracker/migrations/0013_auto_20210802_1504.py
|
# Generated by Django 2.2 on 2021-08-02 07:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ip_tracker', '0012_auto_20210802_1452'),
]
operations = [
migrations.AddField(
model_name='network',
name='serial_number',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Serial Number'),
),
migrations.AlterField(
model_name='network',
name='date',
field=models.DateField(verbose_name='Date'),
),
migrations.AlterField(
model_name='network',
name='device',
field=models.CharField(max_length=200, verbose_name='Device'),
),
]
|
{"/ip_tracker/views.py": ["/ip_tracker/models.py", "/ip_tracker/forms.py", "/ip_tracker/filters.py"], "/ip_tracker/filters.py": ["/ip_tracker/models.py"], "/ip_tracker/admin.py": ["/ip_tracker/models.py"], "/ip_tracker/forms.py": ["/ip_tracker/models.py"], "/cssp_ip_tracker/urls.py": ["/ip_tracker/views.py"]}
|
44,120,151
|
jmvduenas/network_tracker
|
refs/heads/main
|
/ip_tracker/migrations/0009_network_images.py
|
# Generated by Django 2.2 on 2021-07-28 08:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ip_tracker', '0008_auto_20210728_1318'),
]
operations = [
migrations.AddField(
model_name='network',
name='images',
field=models.ImageField(default=False, upload_to='images/'),
preserve_default=False,
),
]
|
{"/ip_tracker/views.py": ["/ip_tracker/models.py", "/ip_tracker/forms.py", "/ip_tracker/filters.py"], "/ip_tracker/filters.py": ["/ip_tracker/models.py"], "/ip_tracker/admin.py": ["/ip_tracker/models.py"], "/ip_tracker/forms.py": ["/ip_tracker/models.py"], "/cssp_ip_tracker/urls.py": ["/ip_tracker/views.py"]}
|
44,120,152
|
jmvduenas/network_tracker
|
refs/heads/main
|
/cssp_ip_tracker/urls.py
|
"""cssp_ip_tracker URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import include, url
from django.urls import path
from ip_tracker.views import home_view, add_ip_address, delete_ip_address, view_details, update_details, view_network, login_page, logout_user, view_csv
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
path('admin/', admin.site.urls),
path('', login_page, name = 'login'),
path('logout/', logout_user, name = 'logout'),
path('home/', home_view, name = 'home'),
path('topology/', view_network, name = 'topology'),
path('add/', add_ip_address, name = 'add'),
path('view/<int:pk>/', view_details, name = 'view'),
path('update/<int:pk>/', update_details, name = 'update'),
path('delete/<int:pk>/', delete_ip_address, name = 'delete'),
path('view_csv', view_csv, name = 'csv'),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
{"/ip_tracker/views.py": ["/ip_tracker/models.py", "/ip_tracker/forms.py", "/ip_tracker/filters.py"], "/ip_tracker/filters.py": ["/ip_tracker/models.py"], "/ip_tracker/admin.py": ["/ip_tracker/models.py"], "/ip_tracker/forms.py": ["/ip_tracker/models.py"], "/cssp_ip_tracker/urls.py": ["/ip_tracker/views.py"]}
|
44,120,153
|
jmvduenas/network_tracker
|
refs/heads/main
|
/ip_tracker/migrations/0001_initial.py
|
# Generated by Django 2.2 on 2021-07-19 09:42
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Network',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('office', models.CharField(max_length=200, verbose_name='Department/Office')),
('ip_address', models.CharField(max_length=200, verbose_name='IP Address')),
('gateway', models.CharField(max_length=200, verbose_name='Gateway Address')),
('device', models.CharField(max_length=200, verbose_name='Device Assigned')),
('configured', models.CharField(max_length=200, verbose_name='Configured by')),
('date', models.DateField(verbose_name='Date Configured')),
],
),
]
|
{"/ip_tracker/views.py": ["/ip_tracker/models.py", "/ip_tracker/forms.py", "/ip_tracker/filters.py"], "/ip_tracker/filters.py": ["/ip_tracker/models.py"], "/ip_tracker/admin.py": ["/ip_tracker/models.py"], "/ip_tracker/forms.py": ["/ip_tracker/models.py"], "/cssp_ip_tracker/urls.py": ["/ip_tracker/views.py"]}
|
44,120,154
|
jmvduenas/network_tracker
|
refs/heads/main
|
/ip_tracker/migrations/0011_auto_20210802_1022.py
|
# Generated by Django 2.2 on 2021-08-02 02:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ip_tracker', '0010_auto_20210729_1501'),
]
operations = [
migrations.AlterField(
model_name='network',
name='images',
field=models.ImageField(blank=True, null=True, upload_to='images/'),
),
]
|
{"/ip_tracker/views.py": ["/ip_tracker/models.py", "/ip_tracker/forms.py", "/ip_tracker/filters.py"], "/ip_tracker/filters.py": ["/ip_tracker/models.py"], "/ip_tracker/admin.py": ["/ip_tracker/models.py"], "/ip_tracker/forms.py": ["/ip_tracker/models.py"], "/cssp_ip_tracker/urls.py": ["/ip_tracker/views.py"]}
|
44,120,155
|
jmvduenas/network_tracker
|
refs/heads/main
|
/ip_tracker/models.py
|
from django.db import models
class Network(models.Model):
office = models.CharField(verbose_name="Department", max_length=200)
ip_address = models.CharField(max_length=200, verbose_name="IP Address")
gateway = models.CharField(max_length=200, verbose_name="Gateway Address")
device = models.CharField(max_length=200, verbose_name="Device")
serial_number = models.CharField(max_length=200, verbose_name="Serial Number", null=True, blank=True)
configured = models.CharField(max_length=200, verbose_name="Configured by")
date = models.DateField(verbose_name="Date")
remarks = models.CharField(max_length=500, verbose_name="Comments")
images = models.ImageField(null=True, blank=True, upload_to="images/")
|
{"/ip_tracker/views.py": ["/ip_tracker/models.py", "/ip_tracker/forms.py", "/ip_tracker/filters.py"], "/ip_tracker/filters.py": ["/ip_tracker/models.py"], "/ip_tracker/admin.py": ["/ip_tracker/models.py"], "/ip_tracker/forms.py": ["/ip_tracker/models.py"], "/cssp_ip_tracker/urls.py": ["/ip_tracker/views.py"]}
|
44,153,452
|
lazka/msys2-web
|
refs/heads/main
|
/tests/test_exttarfile.py
|
import io
from app.exttarfile import ExtTarFile
def test_zst() -> None:
DATA = (
b'(\xb5/\xfd\x00X\xd5\x02\x00\xd4\x03test.txt\x00000664 '
b'\x00001750 14150172601 013031\x00 0\x00ustar\x0000lazka'
b'\x00 \n\x00\x8b\xc0\x0fLX\xb0*\xe74C\x0c\x85\x03\xc0V'
b'\x01H\r4`\x85S8\x81#')
with io.BytesIO(DATA) as fobj:
with ExtTarFile.open(fileobj=fobj, mode="r") as tar:
members = tar.getmembers()
assert len(members) == 1
info = members[0]
assert info.name == 'test.txt'
infofile = tar.extractfile(info)
assert infofile is not None
assert infofile.read() == b''
|
{"/tests/test_exttarfile.py": ["/app/exttarfile.py"], "/app/fetch.py": ["/app/appconfig.py", "/app/__init__.py", "/app/exttarfile.py"], "/app/__init__.py": ["/app/api.py", "/app/fetch.py"], "/app/api.py": ["/app/fetch.py"], "/tests/test_main.py": ["/app/__init__.py", "/app/fetch.py"]}
|
44,153,453
|
lazka/msys2-web
|
refs/heads/main
|
/app/exttarfile.py
|
import io
import tarfile
import zstandard
class ExtTarFile(tarfile.TarFile):
"""Extends TarFile to support zstandard"""
@classmethod
def zstdopen(cls, name, mode="r", fileobj=None, cctx=None, dctx=None, **kwargs): # type: ignore
"""Open zstd compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if mode not in ("r"):
raise ValueError("mode must be 'r'")
try:
zobj = zstandard.open(fileobj or name, mode + "b", cctx=cctx, dctx=dctx)
with zobj:
data = zobj.read()
except (zstandard.ZstdError, EOFError) as e:
raise tarfile.ReadError("not a zstd file") from e
fileobj = io.BytesIO(data)
t = cls.taropen(name, mode, fileobj, **kwargs)
t._extfileobj = False
return t
OPEN_METH = {"zstd": "zstdopen", **tarfile.TarFile.OPEN_METH}
|
{"/tests/test_exttarfile.py": ["/app/exttarfile.py"], "/app/fetch.py": ["/app/appconfig.py", "/app/__init__.py", "/app/exttarfile.py"], "/app/__init__.py": ["/app/api.py", "/app/fetch.py"], "/app/api.py": ["/app/fetch.py"], "/tests/test_main.py": ["/app/__init__.py", "/app/fetch.py"]}
|
44,153,454
|
lazka/msys2-web
|
refs/heads/main
|
/app/appconfig.py
|
# Copyright 2016-2020 Christoph Reiter
# SPDX-License-Identifier: MIT
from typing import Optional
REPO_URL = "https://repo.msys2.org"
DOWNLOAD_URL = "https://mirror.msys2.org"
REPOSITORIES = [
("mingw32", "", "mingw-w64-i686-", "mingw-w64-", REPO_URL + "/mingw/mingw32", DOWNLOAD_URL + "/mingw/mingw32", "https://github.com/msys2/MINGW-packages"),
("mingw64", "", "mingw-w64-x86_64-", "mingw-w64-", REPO_URL + "/mingw/mingw64", DOWNLOAD_URL + "/mingw/mingw64", "https://github.com/msys2/MINGW-packages"),
("ucrt64", "", "mingw-w64-ucrt-x86_64-", "mingw-w64-", REPO_URL + "/mingw/ucrt64", DOWNLOAD_URL + "/mingw/ucrt64", "https://github.com/msys2/MINGW-packages"),
("clang64", "", "mingw-w64-clang-x86_64-", "mingw-w64-", REPO_URL + "/mingw/clang64", DOWNLOAD_URL + "/mingw/clang64", "https://github.com/msys2/MINGW-packages"),
("clang32", "", "mingw-w64-clang-i686-", "mingw-w64-", REPO_URL + "/mingw/clang32", DOWNLOAD_URL + "/mingw/clang32", "https://github.com/msys2/MINGW-packages"),
("clangarm64", "", "mingw-w64-clang-aarch64-", "mingw-w64-", REPO_URL + "/mingw/clangarm64", DOWNLOAD_URL + "/mingw/clangarm64", "https://github.com/msys2/MINGW-packages"),
("msys", "x86_64", "", "", REPO_URL + "/msys/x86_64", DOWNLOAD_URL + "/msys/x86_64", "https://github.com/msys2/MSYS2-packages"),
]
DEFAULT_REPO = "mingw64"
ARCH_REPO_URL = "https://mirror.f4st.host/archlinux"
ARCH_REPO_CONFIG = []
for repo in ["core", "extra", "community", "testing", "community-testing",
"multilib"]:
ARCH_REPO_CONFIG.append(
(ARCH_REPO_URL + "/{0}/os/x86_64/{0}.db".format(repo), repo)
)
AUR_METADATA_URL = "https://aur.archlinux.org/packages-meta-v1.json.gz"
SRCINFO_URLS = [
"https://github.com/msys2/MINGW-packages/releases/download/srcinfo-cache/srcinfo.json.gz",
"https://github.com/msys2/MSYS2-packages/releases/download/srcinfo-cache/srcinfo.json.gz",
]
EXTERNAL_MAPPING_URL = "https://raw.githubusercontent.com/msys2/msys2-web/main/arch-mapping.json"
CYGWIN_METADATA_URL = "https://ftp.acc.umu.se/mirror/cygwin/x86_64/setup.zst"
BUILD_STATUS_URL = "https://github.com/msys2/msys2-autobuild/releases/download/status/status.json"
# Update every 30 minutes by default, at max 2 times every 5 minutes if triggered
UPDATE_INTERVAL = 60 * 30
UPDATE_MIN_INTERVAL = 60 * 5
UPDATE_MIN_RATE = 2
REQUEST_TIMEOUT = 60
CACHE_DIR: Optional[str] = None
|
{"/tests/test_exttarfile.py": ["/app/exttarfile.py"], "/app/fetch.py": ["/app/appconfig.py", "/app/__init__.py", "/app/exttarfile.py"], "/app/__init__.py": ["/app/api.py", "/app/fetch.py"], "/app/api.py": ["/app/fetch.py"], "/tests/test_main.py": ["/app/__init__.py", "/app/fetch.py"]}
|
44,153,455
|
lazka/msys2-web
|
refs/heads/main
|
/app/fetch.py
|
# Copyright 2016-2020 Christoph Reiter
# SPDX-License-Identifier: MIT
import os
import sys
import io
import json
import asyncio
import traceback
import hashlib
import functools
import gzip
from asyncio import Event
from urllib.parse import urlparse, quote_plus
from typing import Any, Dict, Tuple, List, Set
import httpx
from aiolimiter import AsyncLimiter
import zstandard
from .appstate import state, Source, CygwinVersions, ExternalMapping, get_repositories, SrcInfoPackage, Package, DepType, Repository
from .appconfig import CYGWIN_METADATA_URL, REQUEST_TIMEOUT, AUR_METADATA_URL, ARCH_REPO_CONFIG, EXTERNAL_MAPPING_URL, \
SRCINFO_URLS, UPDATE_INTERVAL, BUILD_STATUS_URL, UPDATE_MIN_RATE, UPDATE_MIN_INTERVAL
from .utils import version_is_newer_than, arch_version_to_msys, extract_upstream_version
from . import appconfig
from .exttarfile import ExtTarFile
async def get_content_cached(url: str, *args: Any, **kwargs: Any) -> bytes:
cache_dir = appconfig.CACHE_DIR
if cache_dir is None:
async with httpx.AsyncClient(follow_redirects=True) as client:
r = await client.get(url, *args, **kwargs)
return r.content
os.makedirs(cache_dir, exist_ok=True)
cache_fn = quote_plus(
(urlparse(url).hostname or "") +
"." + hashlib.sha256(url.encode()).hexdigest()[:16] +
".cache")
fn = os.path.join(cache_dir, cache_fn)
if not os.path.exists(fn):
async with httpx.AsyncClient(follow_redirects=True) as client:
r = await client.get(url, *args, **kwargs)
with open(fn, "wb") as h:
h.write(r.content)
with open(fn, "rb") as h:
data = h.read()
return data
def parse_cygwin_versions(base_url: str, data: bytes) -> CygwinVersions:
# This is kinda hacky: extract the source name from the src tarball and take
# last version line before it
version = None
source_package = None
versions: CygwinVersions = {}
base_url = base_url.rsplit("/", 2)[0]
for line in data.decode("utf-8").splitlines():
if line.startswith("version:"):
version = line.split(":")[-1].strip().split("-", 1)[0].split("+", 1)[0]
elif line.startswith("source:"):
source = line.split(":", 1)[-1].strip()
fn = source.rsplit(None, 2)[0]
source_package = fn.rsplit("/")[-1].rsplit("-", 3)[0]
src_url = base_url + "/" + fn
assert version is not None
if source_package not in versions:
versions[source_package] = (version, "https://cygwin.com/packages/summary/%s-src.html" % source_package, src_url)
return versions
async def update_cygwin_versions() -> None:
url = CYGWIN_METADATA_URL
if not await check_needs_update([url]):
return
print("update cygwin info")
print("Loading %r" % url)
data = await get_content_cached(url, timeout=REQUEST_TIMEOUT)
data = zstandard.ZstdDecompressor().decompress(data)
cygwin_versions = parse_cygwin_versions(url, data)
state.cygwin_versions = cygwin_versions
async def update_build_status() -> None:
url = BUILD_STATUS_URL
if not await check_needs_update([url]):
return
print("update build status")
print("Loading %r" % url)
data = await get_content_cached(url, timeout=REQUEST_TIMEOUT)
state.build_status = json.loads(data)
def parse_desc(t: str) -> Dict[str, List[str]]:
d: Dict[str, List[str]] = {}
cat = None
values: List[str] = []
for l in t.splitlines():
l = l.strip()
if cat is None:
cat = l
elif not l:
d[cat] = values
cat = None
values = []
else:
values.append(l)
if cat is not None:
d[cat] = values
return d
async def parse_repo(repo: Repository) -> Dict[str, Source]:
sources: Dict[str, Source] = {}
print("Loading %r" % repo.files_url)
def add_desc(d: Any) -> None:
source = Source.from_desc(d, repo.name)
if source.name not in sources:
sources[source.name] = source
else:
source = sources[source.name]
source.add_desc(d, repo)
data = await get_content_cached(repo.files_url, timeout=REQUEST_TIMEOUT)
with io.BytesIO(data) as f:
with ExtTarFile.open(fileobj=f, mode="r") as tar:
packages: Dict[str, list] = {}
for info in tar:
package_name = info.name.split("/", 1)[0]
infofile = tar.extractfile(info)
if infofile is None:
continue
with infofile:
packages.setdefault(package_name, []).append(
(info.name, infofile.read()))
for package_name, infos in sorted(packages.items()):
t = ""
for name, data in sorted(infos):
if name.endswith("/desc"):
t += data.decode("utf-8")
elif name.endswith("/depends"):
t += data.decode("utf-8")
elif name.endswith("/files"):
t += data.decode("utf-8")
desc = parse_desc(t)
add_desc(desc)
return sources
async def update_arch_versions() -> None:
urls = [i[0] for i in ARCH_REPO_CONFIG]
if not await check_needs_update(urls):
return
print("update versions")
arch_versions: Dict[str, Tuple[str, str, int]] = {}
awaitables = []
for (url, repo) in ARCH_REPO_CONFIG:
download_url = url.rsplit("/", 1)[0]
awaitables.append(parse_repo(Repository(repo, "", "", "", download_url, download_url, "")))
for sources in (await asyncio.gather(*awaitables)):
for source in sources.values():
version = extract_upstream_version(arch_version_to_msys(source.version))
for p in source.packages.values():
url = "https://www.archlinux.org/packages/%s/%s/%s/" % (
p.repo, p.arch, p.name)
if p.name in arch_versions:
old_ver = arch_versions[p.name][0]
if version_is_newer_than(version, old_ver):
arch_versions[p.name] = (version, url, p.builddate)
else:
arch_versions[p.name] = (version, url, p.builddate)
url = "https://www.archlinux.org/packages/%s/%s/%s/" % (
source.repos[0], source.arches[0], source.name)
if source.name in arch_versions:
old_ver = arch_versions[source.name][0]
if version_is_newer_than(version, old_ver):
arch_versions[source.name] = (version, url, source.date)
else:
arch_versions[source.name] = (version, url, source.date)
print("done")
print("update versions from AUR")
r = await get_content_cached(AUR_METADATA_URL,
timeout=REQUEST_TIMEOUT)
for item in json.loads(r):
name = item["Name"]
# We use AUR as a fallback only, since it might contain development builds
if name in arch_versions:
continue
version = item["Version"]
msys_ver = extract_upstream_version(arch_version_to_msys(version))
last_modified = item["LastModified"]
url = "https://aur.archlinux.org/packages/%s" % name
arch_versions[name] = (msys_ver, url, last_modified)
print("done")
state.arch_versions = arch_versions
async def check_needs_update(urls: List[str], _cache: Dict[str, str] = {}) -> bool:
"""Raises RequestException"""
if appconfig.CACHE_DIR:
return True
async def get_headers(client: httpx.AsyncClient, url: str, *args: Any, **kwargs: Any) -> Tuple[str, httpx.Headers]:
r = await client.head(url, *args, **kwargs)
r.raise_for_status()
return (url, r.headers)
needs_update = False
async with httpx.AsyncClient(follow_redirects=True) as client:
awaitables = []
for url in urls:
awaitables.append(get_headers(client, url, timeout=REQUEST_TIMEOUT))
for url, headers in (await asyncio.gather(*awaitables)):
old = _cache.get(url)
new = headers.get("last-modified", "")
new += headers.get("etag", "")
if old != new:
needs_update = True
_cache[url] = new
return needs_update
async def update_source() -> None:
"""Raises RequestException"""
urls = [repo.files_url for repo in get_repositories()]
if not await check_needs_update(urls):
return
print("update source")
final: Dict[str, Source] = {}
awaitables = []
for repo in get_repositories():
awaitables.append(parse_repo(repo))
for sources in await asyncio.gather(*awaitables):
for name, source in sources.items():
if name in final:
final[name].packages.update(source.packages)
else:
final[name] = source
fill_rdepends(final)
state.sources = final
async def update_sourceinfos() -> None:
urls = SRCINFO_URLS
if not await check_needs_update(urls):
return
print("update sourceinfos")
result: Dict[str, SrcInfoPackage] = {}
for url in urls:
print("Loading %r" % url)
data = await get_content_cached(url, timeout=REQUEST_TIMEOUT)
json_obj = json.loads(gzip.decompress(data).decode("utf-8"))
for hash_, m in json_obj.items():
for repo, srcinfo in m["srcinfo"].items():
for pkg in SrcInfoPackage.for_srcinfo(srcinfo, repo, m["repo"], m["path"], m["date"]):
if pkg.pkgname in result:
print(f"WARN: duplicate: {pkg.pkgname} provided by "
f"{pkg.pkgbase} and {result[pkg.pkgname].pkgbase}")
result[pkg.pkgname] = pkg
state.sourceinfos = result
def fill_rdepends(sources: Dict[str, Source]) -> None:
deps: Dict[str, Dict[Package, Set[DepType]]] = {}
for s in sources.values():
for p in s.packages.values():
for n, r in p.depends.items():
deps.setdefault(n, dict()).setdefault(p, set()).add(DepType.NORMAL)
for n, r in p.makedepends.items():
deps.setdefault(n, dict()).setdefault(p, set()).add(DepType.MAKE)
for n, r in p.optdepends.items():
deps.setdefault(n, dict()).setdefault(p, set()).add(DepType.OPTIONAL)
for n, r in p.checkdepends.items():
deps.setdefault(n, dict()).setdefault(p, set()).add(DepType.CHECK)
for s in sources.values():
for p in s.packages.values():
rdeps = [deps.get(p.name, dict())]
for prov in p.provides:
rdeps.append(deps.get(prov, dict()))
merged: Dict[Package, Set[DepType]] = {}
for rd in rdeps:
for rp, rs in rd.items():
merged.setdefault(rp, set()).update(rs)
p.rdepends = merged
async def update_external_mapping() -> None:
url = EXTERNAL_MAPPING_URL
if not await check_needs_update([url]):
return
print("update external mapping")
print("Loading %r" % url)
data = await get_content_cached(url, timeout=REQUEST_TIMEOUT)
state.external_mapping = ExternalMapping(json.loads(data))
_rate_limit = AsyncLimiter(UPDATE_MIN_RATE, UPDATE_MIN_INTERVAL)
@functools.lru_cache(maxsize=None)
def _get_update_event() -> Event:
return Event()
async def wait_for_update() -> None:
update_event = _get_update_event()
await update_event.wait()
update_event.clear()
def queue_update() -> None:
update_event = _get_update_event()
update_event.set()
async def trigger_loop() -> None:
while True:
print("Sleeping for %d" % UPDATE_INTERVAL)
await asyncio.sleep(UPDATE_INTERVAL)
queue_update()
async def update_loop() -> None:
asyncio.create_task(trigger_loop())
while True:
async with _rate_limit:
print("check for updates")
try:
awaitables = [
update_external_mapping(),
update_cygwin_versions(),
update_arch_versions(),
update_source(),
update_sourceinfos(),
update_build_status(),
]
await asyncio.gather(*awaitables)
state.ready = True
print("done")
except Exception:
traceback.print_exc(file=sys.stdout)
print("Waiting for next update")
await wait_for_update()
# XXX: it seems some updates don't propagate right away, so wait a bit
await asyncio.sleep(5)
|
{"/tests/test_exttarfile.py": ["/app/exttarfile.py"], "/app/fetch.py": ["/app/appconfig.py", "/app/__init__.py", "/app/exttarfile.py"], "/app/__init__.py": ["/app/api.py", "/app/fetch.py"], "/app/api.py": ["/app/fetch.py"], "/tests/test_main.py": ["/app/__init__.py", "/app/fetch.py"]}
|
44,153,456
|
lazka/msys2-web
|
refs/heads/main
|
/app/__init__.py
|
# Copyright 2016-2020 Christoph Reiter
# SPDX-License-Identifier: MIT
import os
import asyncio
from fastapi import FastAPI, Request
from .web import webapp, check_is_ready
from .api import api
from .fetch import update_loop
app = FastAPI(openapi_url=None)
webapp.mount("/api", api, name="api")
app.mount("/", webapp)
# https://github.com/tiangolo/fastapi/issues/1472
if not os.environ.get("NO_MIDDLEWARE"):
app.middleware("http")(check_is_ready)
# https://github.com/tiangolo/fastapi/issues/1480
@app.on_event("startup")
async def startup_event() -> None:
if not os.environ.get("NO_UPDATE_THREAD"):
asyncio.create_task(update_loop())
@webapp.exception_handler(Exception)
async def webapp_exception_handler(request: Request, exc: Exception) -> None:
import traceback
print(''.join(traceback.format_tb(exc.__traceback__)))
raise exc
@api.exception_handler(Exception)
async def api_exception_handler(request: Request, exc: Exception) -> None:
import traceback
print(''.join(traceback.format_tb(exc.__traceback__)))
raise exc
|
{"/tests/test_exttarfile.py": ["/app/exttarfile.py"], "/app/fetch.py": ["/app/appconfig.py", "/app/__init__.py", "/app/exttarfile.py"], "/app/__init__.py": ["/app/api.py", "/app/fetch.py"], "/app/api.py": ["/app/fetch.py"], "/tests/test_main.py": ["/app/__init__.py", "/app/fetch.py"]}
|
44,153,457
|
lazka/msys2-web
|
refs/heads/main
|
/app/api.py
|
from fastapi import FastAPI, APIRouter, Request, Response
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from typing import Tuple, Dict, List, Set, Iterable, Union, Optional
from .appstate import state, SrcInfoPackage
from .utils import version_is_newer_than
from .fetch import queue_update
class QueueBuild(BaseModel):
packages: List[str]
depends: Dict[str, List[str]]
new: bool
class QueueEntry(BaseModel):
name: str
version: str
version_repo: Optional[str]
repo_url: str
repo_path: str
source: bool
builds: Dict[str, QueueBuild]
router = APIRouter()
def get_srcinfos_to_build() -> Tuple[List[SrcInfoPackage], Set[str]]:
srcinfos = []
# packages that should be updated
for s in state.sources.values():
for k, p in sorted(s.packages.items()):
if p.name in state.sourceinfos:
srcinfo = state.sourceinfos[p.name]
if not version_is_newer_than(srcinfo.build_version, p.version):
continue
srcinfos.append(srcinfo)
# packages that are new
not_in_repo: Dict[str, List[SrcInfoPackage]] = {}
replaces_not_in_repo: Set[str] = set()
for srcinfo in state.sourceinfos.values():
not_in_repo.setdefault(srcinfo.pkgname, []).append(srcinfo)
replaces_not_in_repo.update(srcinfo.replaces)
for s in state.sources.values():
for p in s.packages.values():
not_in_repo.pop(p.name, None)
replaces_not_in_repo.discard(p.name)
marked_new: Set[str] = set()
for sis in not_in_repo.values():
srcinfos.extend(sis)
# packages that are considered new, that don't exist in the repo, or
# don't replace packages already in the repo. We mark them as "new" so
# we can be more lax with them when they fail to build, since there is
# no regression.
for si in sis:
all_replaces_new = all(p in replaces_not_in_repo for p in si.replaces)
if all_replaces_new:
marked_new.add(si.pkgname)
return srcinfos, marked_new
@router.get('/buildqueue2', response_model=List[QueueEntry])
async def buildqueue2(request: Request, response: Response) -> List[QueueEntry]:
srcinfos, marked_new = get_srcinfos_to_build()
srcinfo_provides = {}
srcinfo_replaces = {}
for srcinfo in state.sourceinfos.values():
for prov in srcinfo.provides.keys():
srcinfo_provides[prov] = srcinfo.pkgname
for repl in srcinfo.replaces:
srcinfo_replaces[repl] = srcinfo.pkgname
def resolve_package(pkgname: str) -> str:
# if another package provides and replaces it, prefer that one
if pkgname in srcinfo_replaces and pkgname in srcinfo_provides \
and srcinfo_provides[pkgname] == srcinfo_replaces[pkgname]:
return srcinfo_provides[pkgname]
# otherwise prefer the real one
if pkgname in state.sourceinfos:
return pkgname
# if there is no real one, try to find a provider
return srcinfo_provides.get(pkgname, pkgname)
def get_transitive_depends(packages: Iterable[str]) -> Set[str]:
todo = set(packages)
done = set()
while todo:
name = resolve_package(todo.pop())
if name in done:
continue
done.add(name)
if name in state.sourceinfos:
si = state.sourceinfos[name]
todo.update(si.depends.keys())
return done
def get_transitive_makedepends(packages: Iterable[str]) -> Set[str]:
todo: Set[str] = set()
for name in packages:
name = resolve_package(name)
if name in state.sourceinfos:
si = state.sourceinfos[name]
todo.update(si.depends.keys())
todo.update(si.makedepends.keys())
return get_transitive_depends(todo)
def srcinfo_get_repo_version(si: SrcInfoPackage) -> Optional[str]:
if si.pkgbase in state.sources:
return state.sources[si.pkgbase].version
return None
def srcinfo_has_src(si: SrcInfoPackage) -> bool:
"""If there already is a package with the same base/version in the repo
we can assume that there exists a source package already
"""
version = srcinfo_get_repo_version(si)
return version is not None and version == si.build_version
def srcinfo_is_new(si: SrcInfoPackage) -> bool:
return si.pkgname in marked_new
def build_key(srcinfo: SrcInfoPackage) -> Tuple[str, str]:
return (srcinfo.repo_url, srcinfo.repo_path)
to_build: Dict[Tuple, List[SrcInfoPackage]] = {}
for srcinfo in srcinfos:
key = build_key(srcinfo)
to_build.setdefault(key, []).append(srcinfo)
entries = []
repo_mapping = {}
all_packages: Set[str] = set()
for srcinfos in to_build.values():
packages = set()
needs_src = False
new_all: Dict[str, List[bool]] = {}
version_repo = None
for si in srcinfos:
if not srcinfo_has_src(si):
needs_src = True
version_repo = version_repo or srcinfo_get_repo_version(si)
new_all.setdefault(si.repo, []).append(srcinfo_is_new(si))
packages.add(si.pkgname)
repo_mapping[si.pkgname] = si.repo
# if all packages to build are new, we consider the build as new
new = [k for k, v in new_all.items() if all(v)]
all_packages.update(packages)
entries.append({
"repo_url": srcinfos[0].repo_url,
"repo_path": srcinfos[0].repo_path,
"version": srcinfos[0].build_version,
"version_repo": version_repo,
"name": srcinfos[0].pkgbase,
"source": needs_src,
"packages": packages,
"new": new,
"makedepends": get_transitive_makedepends(packages) | get_transitive_depends(['base-devel', 'base']),
})
# limit the deps to all packages in the queue overall, minus itself
for e in entries:
assert isinstance(e["makedepends"], set)
assert isinstance(e["packages"], set)
e["makedepends"] &= all_packages
e["makedepends"] -= e["packages"]
def group_by_repo(sequence: Iterable[str]) -> Dict[str, List]:
grouped: Dict[str, List] = {}
for name in sequence:
grouped.setdefault(repo_mapping[name], []).append(name)
for key, values in grouped.items():
grouped[key] = sorted(set(values))
return grouped
results = []
for e in entries:
assert isinstance(e["makedepends"], set)
assert isinstance(e["packages"], set)
assert isinstance(e["new"], list)
makedepends = e["makedepends"]
builds: Dict[str, QueueBuild] = {}
deps_grouped = group_by_repo(makedepends)
for repo, build_packages in group_by_repo(e["packages"]).items():
build_depends = {}
for deprepo, depends in deps_grouped.items():
if deprepo == repo or deprepo == "msys":
build_depends[deprepo] = depends
builds[repo] = QueueBuild(
packages=build_packages,
depends=build_depends,
new=(repo in e["new"])
)
results.append(QueueEntry(
name=e["name"],
version=e["version"],
version_repo=e["version_repo"],
repo_url=e["repo_url"],
repo_path=e["repo_path"],
source=e["source"],
builds=builds,
))
return results
@router.get('/removals')
async def removals(request: Request, response: Response) -> Response:
# get all packages in the pacman repo which are no in GIT
entries = []
for s in state.sources.values():
for k, p in s.packages.items():
# FIXME: can also break things if it's the only provides and removed,
# and also is ok to remove if there is a replacement
if p.name not in state.sourceinfos and not p.rdepends:
entries.append({
"repo": p.repo,
"name": p.name,
})
return JSONResponse(entries)
@router.get('/search')
async def search(request: Request, response: Response, query: str = "", qtype: str = "") -> Response:
if qtype not in ["pkg", "binpkg"]:
qtype = "pkg"
parts = query.split()
res_pkg: List[Dict[str, Union[str, List[str], int]]] = []
exact = {}
if not query:
pass
elif qtype == "pkg":
for s in state.sources.values():
if s.name.lower() == query or s.realname.lower() == query:
exact = s.get_info()
continue
if [p for p in parts if p.lower() in s.name.lower()] == parts:
res_pkg.append(s.get_info())
elif qtype == "binpkg":
for s in state.sources.values():
for sub in s.packages.values():
if sub.name.lower() == query or sub.realname.lower() == query:
exact = s.get_info()
continue
if [p for p in parts if p.lower() in sub.name.lower()] == parts:
res_pkg.append(s.get_info())
return JSONResponse(
{
'query': query,
'qtype': qtype,
'results': {
'exact': exact,
'other': res_pkg
}
}
)
@router.post("/trigger_update", response_class=JSONResponse)
async def do_trigger_update(request: Request) -> Response:
queue_update()
return JSONResponse({})
api = FastAPI(title="MSYS2 Packages API", docs_url="/")
api.include_router(router)
|
{"/tests/test_exttarfile.py": ["/app/exttarfile.py"], "/app/fetch.py": ["/app/appconfig.py", "/app/__init__.py", "/app/exttarfile.py"], "/app/__init__.py": ["/app/api.py", "/app/fetch.py"], "/app/api.py": ["/app/fetch.py"], "/tests/test_main.py": ["/app/__init__.py", "/app/fetch.py"]}
|
44,153,458
|
lazka/msys2-web
|
refs/heads/main
|
/tests/test_main.py
|
# type: ignore
import os
import base64
import datetime
os.environ["NO_MIDDLEWARE"] = "1"
import pytest
from app import app
from app.appstate import SrcInfoPackage, parse_packager
from app.fetch import parse_cygwin_versions
from app.pgp import parse_signature, SigError, Signature
from app.utils import split_optdepends, strip_vcs, vercmp
from fastapi.testclient import TestClient
@pytest.fixture
def client():
os.environ["NO_UPDATE_THREAD"] = "1"
with TestClient(app) as client:
yield client
@pytest.mark.parametrize("endpoint", [
'', 'repos', 'base', 'group', 'groups', 'updates', 'outofdate', 'queue', 'new',
'search', 'base/foo', 'group/foo', 'groups/foo', 'package/foo',
'package', 'stats', 'mirrors', 'basegroups', 'basegroups/foo',
])
def test_main_endpoints(client, endpoint):
r = client.get('/' + endpoint)
assert r.status_code == 200
assert "etag" in r.headers
etag = r.headers["etag"]
r = client.get('/' + endpoint, headers={"if-none-match": etag})
assert r.status_code == 304
r = client.get('/' + endpoint, headers={"if-none-match": "nope"})
assert r.status_code == 200
def test_parse_cygwin_versions():
data = b"""\
@ python36
category: Python Interpreters
requires: binutils cygwin libbz2_1 libcrypt0 libcrypt2 libexpat1 libffi6
version: 1:3.6.9-1
install: x86_64/release/python36/python36-3.6.9-1.tar.xz 5750152 96dd43cf9
source: x86_64/release/python36/python36-3.6.9-1-src.tar.xz 17223444 ef39d9419"""
setup_ini_url = "https://mirrors.kernel.org/sourceware/cygwin/x86_64/setup.ini"
versions = parse_cygwin_versions(setup_ini_url, data)
assert "python36" in versions
assert versions["python36"][0] == "3.6.9"
assert versions["python36"][1] == "https://cygwin.com/packages/summary/python36-src.html"
assert versions["python36"][2] == "https://mirrors.kernel.org/sourceware/cygwin/x86_64/release/python36/python36-3.6.9-1-src.tar.xz"
EXAMPLE_SIG = (
"iHUEABEIAB0WIQStNRxQrghXdetZMztfku/BpH1FoQUCXlOY5wAKCRBfku"
"/BpH1FodQoAP4nQnPNLnx5MVIJgZgCwW/hplW7Ai9MqkmFBqD8/+EXfAD/"
"Rgxtz2XH7RZ1JKh7PN5NsVz9UlBM7977PjFg9WptNGU=")
def test_pgp():
with pytest.raises(SigError):
parse_signature(b"")
with pytest.raises(SigError):
parse_signature(b"foobar")
data = base64.b64decode(EXAMPLE_SIG)
sig = parse_signature(data)
assert isinstance(sig, Signature)
assert sig.keyid == "5f92efc1a47d45a1"
assert sig.date == datetime.datetime(2020, 2, 24, 9, 35, 35)
assert sig.name == "Alexey Pavlov"
assert sig.url == "https://keyserver.ubuntu.com/pks/lookup?op=vindex&fingerprint=on&search=0x5f92efc1a47d45a1"
def test_parse_packager():
info = parse_packager("foobar")
assert info.name == "foobar"
assert info.email is None
info = parse_packager("foobar <foobar@msys2.org>")
assert info.name == "foobar"
assert info.email == "foobar@msys2.org"
def test_split_optdepends():
assert split_optdepends(["foo: bar"]) == {'foo': {'bar'}}
assert split_optdepends(["foo: bar", "foo: quux"]) == {'foo': {'bar', 'quux'}}
assert split_optdepends(["foobar"]) == {'foobar': set()}
assert split_optdepends(["foobar:"]) == {'foobar': set()}
def test_strip_vcs():
assert strip_vcs("foo") == "foo"
assert strip_vcs("foo-git") == "foo"
def test_for_srcinfo():
info = """
pkgbase = libarchive
\tpkgver = 3.5.1
\tdepends = gcc-libs
pkgname = libarchive
pkgname = libarchive-devel
\tdepends = libxml2-devel
\treplaces = libarchive-devel-git
pkgname = something
\tdepends = \n"""
packages = SrcInfoPackage.for_srcinfo(
info, "repo", "https://foo.bar", "/", "2021-01-15")
libarchive = [p for p in packages if p.pkgname == "libarchive"][0]
assert list(libarchive.depends) == ["gcc-libs"]
assert libarchive.pkgver == "3.5.1"
devel = [p for p in packages if p.pkgname == "libarchive-devel"][0]
assert list(devel.depends) == ["libxml2-devel"]
assert list(devel.replaces) == ["libarchive-devel-git"]
assert devel.pkgver == "3.5.1"
something = [p for p in packages if p.pkgname == "something"][0]
assert list(something.depends) == []
def test_vercmp():
def test_ver(a, b, res):
assert vercmp(a, b) == res
assert vercmp(b, a) == (res * -1)
test_ver("1.0.0", "2.0.0", -1)
test_ver("1.0.0", "1.0.0.r101", -1)
test_ver("1.0.0", "1.0.0", 0)
test_ver("2019.10.06", "2020.12.07", -1)
test_ver("1.3_20200327", "1.3_20210319", -1)
test_ver("r2991.1771b556", "0.161.r3039.544c61f", -1)
test_ver("6.8", "6.8.3", -1)
test_ver("6.8", "6.8.", -1)
test_ver("2.5.9.27149.9f6840e90c", "3.0.7.33374", -1)
test_ver(".", "", 1)
test_ver("0", "", 1)
test_ver("0", "00", 0)
test_ver(".", "..0", -1)
test_ver(".0", "..0", -1)
test_ver("1r", "1", -1)
test_ver("r1", "r", 1)
test_ver("1.1.0", "1.1.0a", 1)
test_ver("1.1.0.", "1.1.0a", 1)
test_ver("a", "1", -1)
test_ver(".", "1", -1)
test_ver(".", "a", 1)
test_ver("a1", "1", -1)
# FIXME:
# test_ver(".0", "0", 1)
|
{"/tests/test_exttarfile.py": ["/app/exttarfile.py"], "/app/fetch.py": ["/app/appconfig.py", "/app/__init__.py", "/app/exttarfile.py"], "/app/__init__.py": ["/app/api.py", "/app/fetch.py"], "/app/api.py": ["/app/fetch.py"], "/tests/test_main.py": ["/app/__init__.py", "/app/fetch.py"]}
|
44,153,459
|
lazka/msys2-web
|
refs/heads/main
|
/tests/test_web.py
|
# SPDX-License-Identifier: MIT
from fastapi import Request
from app.web import licenses_to_html
def test_licenses_to_html() -> None:
r = Request({"type": "http"})
assert licenses_to_html(r, []) == ""
assert licenses_to_html(r, ["FOO"]) == "FOO"
assert licenses_to_html(r, ["FOO", "BAR"]) == "BAR OR FOO"
assert licenses_to_html(r, ["FOO", "&", "<", ">"]) == \
"& OR < OR > OR FOO"
assert licenses_to_html(r, ["spdx:FOO-BAR.OK"]) == (
'<a href="https://spdx.org/licenses/FOO-BAR.OK.html">FOO-BAR.OK</a>')
assert licenses_to_html(r, ["spdx:< > &"]) == '< > &'
assert licenses_to_html(r, ["spdx:(FOO)"]) == \
'(<a href="https://spdx.org/licenses/FOO.html">FOO</a>)'
assert licenses_to_html(r, ["spdx:FOO", "spdx:BAR"]) == (
'<a href="https://spdx.org/licenses/BAR.html">BAR</a> OR '
'<a href="https://spdx.org/licenses/FOO.html">FOO</a>')
assert licenses_to_html(r, ["custom:BLA", "GPL"]) == "GPL OR custom:BLA"
assert licenses_to_html(r, ["spdx:BLA", "GPL"]) == \
'GPL OR <a href="https://spdx.org/licenses/BLA.html">BLA</a>'
assert licenses_to_html(r, ["spdx:MIT OR BSD-3-Clause", "GPL"]) == (
'GPL OR (<a href="https://spdx.org/licenses/MIT.html">MIT</a> OR '
'<a href="https://spdx.org/licenses/BSD-3-Clause.html">BSD-3-Clause</a>)')
assert licenses_to_html(r, ["&<>"]) == "&<>"
assert licenses_to_html(r, ["spdx:GPL-2.0-or-later WITH Autoconf-exception-2.0"]) == (
'<a href="https://spdx.org/licenses/GPL-2.0-or-later.html">GPL-2.0-or-later</a> WITH '
'<a href="https://spdx.org/licenses/Autoconf-exception-2.0.html">Autoconf-exception-2.0</a>'
)
assert licenses_to_html(r, ["spdx:GPL-2.0+"]) == (
'<a href="https://spdx.org/licenses/GPL-2.0%2B.html">GPL-2.0+</a>'
)
assert licenses_to_html(r, ["spdx:StandardML-NJ"]) == (
'<a href="https://spdx.org/licenses/StandardML-NJ.html">StandardML-NJ</a>'
)
assert licenses_to_html(r, ["spdx:LicenseRef-foobar"]) == 'foobar'
|
{"/tests/test_exttarfile.py": ["/app/exttarfile.py"], "/app/fetch.py": ["/app/appconfig.py", "/app/__init__.py", "/app/exttarfile.py"], "/app/__init__.py": ["/app/api.py", "/app/fetch.py"], "/app/api.py": ["/app/fetch.py"], "/tests/test_main.py": ["/app/__init__.py", "/app/fetch.py"]}
|
44,191,844
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/performance/test_drive_rate_limiter.py
|
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests for checking the rate limiter on /drives resources."""
import os
import host_tools.drive as drive_tools
MB = 2**20
def check_iops_limit(ssh_connection, block_size, count, min_time, max_time):
"""Verify if the rate limiter throttles block iops using dd."""
obs = block_size
byte_count = block_size * count
dd = "dd if=/dev/zero of=/dev/vdb ibs={} obs={} count={} oflag=direct".format(
block_size, obs, count
)
print("Running cmd {}".format(dd))
# Check write iops (writing with oflag=direct is more reliable).
exit_code, _, stderr = ssh_connection.run(dd)
assert exit_code == 0
# "dd" writes to stderr by design. We drop first lines
lines = stderr.split("\n")
dd_result = lines[2].strip()
# Interesting output looks like this:
# 4194304 bytes (4.2 MB, 4.0 MiB) copied, 0.0528524 s, 79.4 MB/s
tokens = dd_result.split()
# Check total read bytes.
assert int(tokens[0]) == byte_count
# Check duration.
assert float(tokens[7]) > min_time
assert float(tokens[7]) < max_time
def test_patch_drive_limiter(test_microvm_with_api):
"""
Test replacing the drive rate-limiter after guest boot works.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Set up the microVM with 2 vCPUs, 512 MiB of RAM, 1 network iface, a root
# file system, and a scratch drive.
test_microvm.basic_config(vcpu_count=2, mem_size_mib=512)
test_microvm.add_net_iface()
fs1 = drive_tools.FilesystemFile(
os.path.join(test_microvm.fsfiles, "scratch"), size=512
)
test_microvm.api.drive.put(
drive_id="scratch",
path_on_host=test_microvm.create_jailed_resource(fs1.path),
is_root_device=False,
is_read_only=False,
rate_limiter={
"bandwidth": {"size": 10 * MB, "refill_time": 100},
"ops": {"size": 100, "refill_time": 100},
},
)
test_microvm.start()
# Validate IOPS stays within above configured limits.
# For example, the below call will validate that reading 1000 blocks
# of 512b will complete in at 0.8-1.2 seconds ('dd' is not very accurate,
# so we target to stay within 30% error).
check_iops_limit(test_microvm.ssh, 512, 1000, 0.7, 1.3)
check_iops_limit(test_microvm.ssh, 4096, 1000, 0.7, 1.3)
# Patch ratelimiter
test_microvm.api.drive.patch(
drive_id="scratch",
rate_limiter={
"bandwidth": {"size": 100 * MB, "refill_time": 100},
"ops": {"size": 200, "refill_time": 100},
},
)
check_iops_limit(test_microvm.ssh, 512, 2000, 0.7, 1.3)
check_iops_limit(test_microvm.ssh, 4096, 2000, 0.7, 1.3)
# Patch ratelimiter
test_microvm.api.drive.patch(
drive_id="scratch", rate_limiter={"ops": {"size": 1000, "refill_time": 100}}
)
check_iops_limit(test_microvm.ssh, 512, 10000, 0.7, 1.3)
check_iops_limit(test_microvm.ssh, 4096, 10000, 0.7, 1.3)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,845
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/performance/test_network_tcp_throughput.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests the network throughput of Firecracker uVMs."""
import json
import pytest
from framework.stats import consumer, producer
from framework.stats.baseline import Provider as BaselineProvider
from framework.stats.metadata import DictProvider as DictMetadataProvider
from framework.utils import CpuMap, get_kernel_version
from framework.utils_iperf import IPerf3Test, consume_iperf3_output
from integration_tests.performance.configs import defs
TEST_ID = "network_tcp_throughput"
kernel_version = get_kernel_version(level=1)
CONFIG_NAME_REL = "test_{}_config_{}.json".format(TEST_ID, kernel_version)
CONFIG_NAME_ABS = defs.CFG_LOCATION / CONFIG_NAME_REL
BASE_PORT = 5000
# How many clients/servers should be spawned per vcpu
LOAD_FACTOR = 1
# Time (in seconds) for which iperf "warms up"
WARMUP_SEC = 5
# Time (in seconds) for which iperf runs after warmup is done
RUNTIME_SEC = 20
# pylint: disable=R0903
class NetTCPThroughputBaselineProvider(BaselineProvider):
"""Implementation of a baseline provider for the network throughput TCP...
...performance test.
"""
def __init__(self, env_id, iperf_id, raw_baselines):
"""Network TCP throughput baseline provider initialization."""
super().__init__(raw_baselines)
self._tag = "baselines/{}/" + env_id + "/{}/" + iperf_id
def get(self, metric_name: str, statistic_name: str) -> dict:
"""Return the baseline value corresponding to the key."""
key = self._tag.format(metric_name, statistic_name)
baseline = self._baselines.get(key)
if baseline:
target = baseline.get("target")
delta_percentage = baseline.get("delta_percentage")
return {
"target": target,
"delta": delta_percentage * target / 100,
}
return None
class TCPIPerf3Test(IPerf3Test):
"""IPerf3 runner for the TCP throughput performance test"""
def __init__(self, microvm, mode, host_ip, payload_length):
self._host_ip = host_ip
super().__init__(
microvm,
BASE_PORT,
RUNTIME_SEC,
WARMUP_SEC,
mode,
LOAD_FACTOR * microvm.vcpus_count,
host_ip,
payload_length=payload_length,
)
def pipe(basevm, mode, payload_length, current_avail_cpu, host_ip, env_id):
"""Create producer/consumer pipes."""
test = TCPIPerf3Test(basevm, mode, host_ip, payload_length)
iperf3_id = f"tcp-p{payload_length}-wsDEFAULT-{mode}"
raw_baselines = json.loads(CONFIG_NAME_ABS.read_text("utf-8"))
cons = consumer.LambdaConsumer(
metadata_provider=DictMetadataProvider(
measurements=raw_baselines["measurements"],
baseline_provider=NetTCPThroughputBaselineProvider(
env_id, iperf3_id, raw_baselines
),
),
func=consume_iperf3_output,
)
prod = producer.LambdaProducer(
test.run_test, func_kwargs={"first_free_cpu": current_avail_cpu}
)
return cons, prod, f"{env_id}/{iperf3_id}"
@pytest.mark.nonci
@pytest.mark.timeout(3600)
@pytest.mark.parametrize("vcpus", [1, 2])
@pytest.mark.parametrize("payload_length", ["128K", "1024K"], ids=["p128K", "p1024K"])
@pytest.mark.parametrize("mode", ["g2h", "h2g", "bd"])
def test_network_tcp_throughput(
microvm_factory,
guest_kernel,
rootfs,
vcpus,
payload_length,
mode,
st_core,
):
"""
Iperf between guest and host in both directions for TCP workload.
"""
# We run bi-directional tests only on uVM with more than 2 vCPus
# because we need to pin one iperf3/direction per vCPU, and since we
# have two directions, we need at least two vCPUs.
if mode == "bd" and vcpus < 2:
pytest.skip("bidrectional test only done with at least 2 vcpus")
guest_mem_mib = 1024
vm = microvm_factory.build(guest_kernel, rootfs, monitor_memory=False)
vm.spawn(log_level="Info")
vm.basic_config(vcpu_count=vcpus, mem_size_mib=guest_mem_mib)
iface = vm.add_net_iface()
vm.start()
microvm_cfg = f"{vcpus}vcpu_{guest_mem_mib}mb.json"
st_core.name = TEST_ID
# we will use this also as metrics dimensions
st_core.custom["guest_config"] = microvm_cfg.removesuffix(".json")
# Check if the needed CPU cores are available. We have the API thread, VMM
# thread and then one thread for each configured vCPU.
assert CpuMap.len() >= 2 + vm.vcpus_count
# Pin uVM threads to physical cores.
current_avail_cpu = 0
assert vm.pin_vmm(current_avail_cpu), "Failed to pin firecracker thread."
current_avail_cpu += 1
assert vm.pin_api(current_avail_cpu), "Failed to pin fc_api thread."
for i in range(vm.vcpus_count):
current_avail_cpu += 1
assert vm.pin_vcpu(i, current_avail_cpu), f"Failed to pin fc_vcpu {i} thread."
cons, prod, tag = pipe(
vm,
mode,
payload_length,
current_avail_cpu + 1,
iface.host_ip,
f"{st_core.env_id_prefix}/{microvm_cfg}",
)
st_core.add_pipe(prod, cons, tag)
# Start running the commands on guest, gather results and verify pass
# criteria.
st_core.run_exercise()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,846
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/stats/core.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Core module for statistics component management."""
import types
from collections import defaultdict, namedtuple
from datetime import datetime
from typing_extensions import TypedDict
from framework.utils import ExceptionAggregator
from .consumer import Consumer, ProcessingException
from .producer import Producer
class CoreException(ExceptionAggregator):
"""Exception used to return core messages.
The caller should handle the exception accordingly.
"""
def __init__(self, result=None):
"""Initialize the exception."""
super().__init__()
self.result = result
class Result(TypedDict):
"""Data class for aggregated statistic results."""
name: str
iterations: int
results: dict
custom: dict
Pipe = namedtuple("Pipe", "producer consumer")
class Core:
"""Base class for statistics core driver."""
# pylint: disable=W0102
def __init__(self, name="<PLACEHOLDER>", iterations=1, custom={}):
"""Core constructor."""
self._pipes = defaultdict(Pipe)
self._result = Result(
name=name, iterations=iterations, results={}, custom=custom
)
self._failure_aggregator = CoreException()
self.metrics_test = None
self.metrics = None
self.check_baseline = True
def add_pipe(self, producer: Producer, consumer: Consumer, tag=None):
"""Add a new producer-consumer pipe."""
if tag is None:
tag = self._result["name"] + "_" + str(datetime.timestamp(datetime.now()))
self._pipes[tag] = Pipe(producer, consumer)
def run_exercise(self, fail_fast=False) -> Result:
"""Drive the statistics producers until completion."""
iterations = self._result["iterations"]
for tag, pipe in self._pipes.items():
for iteration in range(iterations):
raw_data = pipe.producer.produce()
if not isinstance(raw_data, types.GeneratorType):
raw_data = [raw_data]
for data in raw_data:
raws = pipe.consumer.ingest(iteration, data)
if raws is not None:
dimensions = self.custom.copy()
test = tag.split("/")[-1]
dimensions["test"] = test
dimensions["performance_test"] = self.name
self.metrics.set_dimensions(dimensions)
for name, val, unit in raws:
self.metrics.put_metric(name, val, unit)
self.metrics.set_property("iteration", iteration)
self.metrics.flush()
try:
stats, custom = pipe.consumer.process(check=self.check_baseline)
except (ProcessingException, AssertionError) as err:
self._failure_aggregator.add_row(f"Failed on '{tag}':")
self._failure_aggregator.add_row(err)
stats = err.stats
custom = err.custom
if fail_fast:
raise self._failure_aggregator
self._result["results"][tag] = stats
# Custom information extracted from all the iterations.
if len(custom) > 0:
self._result["custom"][tag] = custom
self.raise_if_regression()
return self._result
def raise_if_regression(self):
"""Raise an exception if there was an issue or a regression was
detected.
"""
if self._failure_aggregator.has_any():
self._failure_aggregator.result = self._result
# If we had Python 3.11 we could use ExceptionGroup
raise self._failure_aggregator
@property
def name(self):
"""Return statistics name."""
return self._result["name"]
@name.setter
def name(self, name):
"""Set statistics name."""
self._result["name"] = name
@property
def iterations(self):
"""Return statistics iterations count."""
return self._result["iterations"]
@iterations.setter
def iterations(self, iterations):
"""Set statistics iterations count."""
self._result["iterations"] = iterations
@property
def custom(self):
"""Return statistics custom information."""
return self._result["custom"]
@custom.setter
def custom(self, custom):
"""Set statistics custom information."""
self._result["custom"] = custom
@property
def statistics(self):
"""Return statistics gathered so far."""
return self._result
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,847
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_max_vcpus.py
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests scenario for microvms with max vcpus(32)."""
MAX_VCPUS = 32
def test_max_vcpus(test_microvm_with_api):
"""
Test if all configured guest vcpus are online.
"""
microvm = test_microvm_with_api
microvm.spawn()
# Configure a microVM with 32 vCPUs.
microvm.basic_config(vcpu_count=MAX_VCPUS)
microvm.add_net_iface()
microvm.start()
cmd = "nproc"
_, stdout, stderr = microvm.ssh.run(cmd)
assert stderr == ""
assert int(stdout) == MAX_VCPUS
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,848
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tools/test_bindings.py
|
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Script used to check if bindgen-generated code creates structs that differ from previously created
onces.
The script uses `pahole` (man 1 pahole) to gather debug information from two firecracker binaries
(script's arguments). It parses pahole output and gathers struct information in a dictionary of the
form:
```
{
"struct_name": {"size": size_in_bytes, "alignment": alignment_in_bytes},
...
}
```
It also, filters structure names using the "bindings" filter for keeping only bindgen related
structs.
*NOTE*: this assumes that all bindgen-related structs live under a crate or module name with
"bindings" in it. At the moment, this is true.
It then iterates through the structs of the firecracker binary built from the older version and
checks if there are mismatches with the struct info from the second binary (newer version)
### Usage
1. Create the two binaries
```
# First create the binary with existing bindings
$ git checkout main
$ ./tools/devtool build
$ cp ./build/cargo_target/x86_64-unknown-linux-musl/debug/firecracker firecracker_old
# Second create the binary with new bindings
$ git checkout new_bindings
$ ./tools/devtool build
$ cp ./build/cargo_target/x86_64-unknown-linux-musl/debug/firecracker firecracker_new
# Run the script
$ python3 ./tools/test_bindings.py firecracker_old firecracker_new
```
"""
import argparse
import logging
import re
import subprocess
import sys
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
def parse_pahole(pahole_output):
"""Gather bindings related structs from pahole output
Parse pahole output and gather struct information filtering for the 'bindings' keyword.
The information gathered is the struct size and its alignment.
@param fname: File including pahole output
@return: A dictionary where keys are struct names and values struct size and alignment
"""
ret = {}
# regular expression matches the name of the struct, its size and alignment
structs = re.findall(
rb"struct (.*?)\{.*?/\* size: (\d+).*?\*/.*?\n\} "
rb"__attribute__\(\(__aligned__\((\d+)\)\)\)\;",
pahole_output,
flags=re.DOTALL,
)
for struct in structs:
struct_name = str(struct[0])
size = int(struct[1])
alignment = int(struct[2])
if "bindings" in struct_name:
ret[struct_name] = {"size": size, "alignment": alignment}
return ret
def pahole(binary: str) -> str:
"""Runs pahole on a binary and returns its output as a str
If pahole fails this will raise a `CalledProcessError`
@param binary: binary to run pahole on
@return: On success, it will return the stdout of the pahole process
"""
result = subprocess.run(
["pahole", binary], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True
)
return result.stdout
def check_pahole_mismatches(old: str, new: str) -> bool:
"""Checks for pahole mismatches in pahole information between two binaries
@param old: old Firecracker binary
@param new: new Firecracker binary
@return: false if no mismatches found, true otherwise
"""
pahole_structs_1 = parse_pahole(pahole(old))
pahole_structs_2 = parse_pahole(pahole(new))
# We go through all the structs existing in the old firecracker binary and check for mismatches
# in the new one.
for name, prop_1 in pahole_structs_1.items():
# Note that the reverse, i.e. a name existing in the new binary but not in the old binary,
# is not a problem. That would mean we are making use of some new struct from
# bindgen-generated code. That does not break ABI compatibility.
if name not in pahole_structs_2:
log.warning("struct '%s' does not exist in new binary", name)
continue
prop_2 = pahole_structs_2[name]
# Size mismatches are hard errors
if prop_1["size"] != prop_2["size"]:
log.error("size of '%s' does not match in two binaries", name)
log.error("old: %s", prop_1["size"])
log.error("new: %s", prop_2["size"])
return True
# Alignment mismatches just cause warnings
if prop_1["alignment"] != prop_2["alignment"]:
log.warning("alignment of '%s' does not match in two binaries", name)
log.warning("old: %s", prop_1["alignment"])
log.warning("new: %s", prop_2["alignment"])
else:
log.info("struct '%s' matches", name)
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Check bindings ABI compatibility for Firecracker"
)
parser.add_argument(
"firecracker_old",
type=str,
metavar="old-firecracker-binary",
help="Firecracker binary with old bindings",
)
parser.add_argument(
"firecracker_new",
type=str,
metavar="new-firecracker-binary",
help="Firecracker binary with new bindings",
)
args = parser.parse_args()
if check_pahole_mismatches(args.firecracker_old, args.firecracker_new):
log.error("Structure layout mismatch")
sys.exit(1)
else:
log.info("Structure layout matches")
sys.exit(0)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,849
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_cpu_template_helper.py
|
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests that verify the cpu-template-helper's behavior."""
import json
import platform
from pathlib import Path
import pytest
from framework import utils
from framework.defs import SUPPORTED_HOST_KERNELS
from framework.properties import global_props
from framework.utils_cpu_templates import nonci_on_arm
from framework.utils_cpuid import get_guest_cpuid
from host_tools import cargo_build
PLATFORM = platform.machine()
TEST_RESOURCES_DIR = Path("./data/cpu_template_helper")
class CpuTemplateHelper:
"""
Class for CPU template helper tool.
"""
# Class constants
BINARY_NAME = "cpu-template-helper"
def __init__(self):
"""Build CPU template helper tool binary"""
self.binary = cargo_build.get_binary(self.BINARY_NAME)
def template_dump(self, vm_config_path, output_path):
"""Dump guest CPU config in the JSON custom CPU template format"""
cmd = (
f"{self.binary} template dump"
f" --config {vm_config_path} --output {output_path}"
)
utils.run_cmd(cmd)
def template_strip(self, paths, suffix=""):
"""Strip entries shared between multiple CPU template files"""
paths = " ".join([str(path) for path in paths])
cmd = f"{self.binary} template strip --paths {paths} --suffix '{suffix}'"
utils.run_cmd(cmd)
def template_verify(self, vm_config_path):
"""Verify the specified CPU template"""
cmd = f"{self.binary} template verify --config {vm_config_path}"
utils.run_cmd(cmd)
def fingerprint_dump(self, vm_config_path, output_path):
"""Dump a fingerprint"""
cmd = (
f"{self.binary} fingerprint dump"
f" --config {vm_config_path} --output {output_path}"
)
utils.run_cmd(cmd)
def fingerprint_compare(
self,
prev_path,
curr_path,
filters,
):
"""Compare two fingerprint files"""
cmd = (
f"{self.binary} fingerprint compare"
f" --prev {prev_path} --curr {curr_path}"
)
if filters:
cmd += f" --filters {' '.join(filters)}"
utils.run_cmd(cmd)
@pytest.fixture(scope="session", name="cpu_template_helper")
def cpu_template_helper_fixture():
"""Fixture of CPU template helper tool"""
return CpuTemplateHelper()
def save_vm_config(microvm, tmp_path, custom_cpu_template=None):
"""
Save VM config into JSON file.
"""
config_json = microvm.api.vm_config.get().json()
config_json["boot-source"]["kernel_image_path"] = str(microvm.kernel_file)
config_json["drives"][0]["path_on_host"] = str(microvm.rootfs_file)
if custom_cpu_template is not None:
custom_cpu_template_path = tmp_path / "template.json"
Path(custom_cpu_template_path).write_text(
json.dumps(custom_cpu_template), encoding="utf-8"
)
config_json["cpu-config"] = str(custom_cpu_template_path)
vm_config_path = tmp_path / "vm_config.json"
Path(vm_config_path).write_text(json.dumps(config_json), encoding="utf-8")
return vm_config_path
def build_cpu_config_dict(cpu_config_path):
"""Build a dictionary from JSON CPU config file."""
cpu_config_dict = {
"cpuid": {},
"msrs": {},
}
cpu_config_json = json.loads(cpu_config_path.read_text(encoding="utf-8"))
# CPUID
for leaf_modifier in cpu_config_json["cpuid_modifiers"]:
for register_modifier in leaf_modifier["modifiers"]:
cpu_config_dict["cpuid"][
(
int(leaf_modifier["leaf"], 16),
int(leaf_modifier["subleaf"], 16),
register_modifier["register"],
)
] = int(register_modifier["bitmap"], 2)
# MSR
for msr_modifier in cpu_config_json["msr_modifiers"]:
cpu_config_dict["msrs"][int(msr_modifier["addr"], 16)] = int(
msr_modifier["bitmap"], 2
)
return cpu_config_dict
# List of CPUID leaves / subleaves that are not enumerated in
# KVM_GET_SUPPORTED_CPUID on Intel and AMD.
UNAVAILABLE_CPUID_ON_DUMP_LIST = [
# KVM changed to not return the host's processor topology information on
# CPUID.Bh in the following commit (backported into kernel 5.10 and 6.1,
# but not into kernel 4.14 due to merge conflict), since it's confusing
# and the userspace VMM has to populate it with meaningful values.
# https://github.com/torvalds/linux/commit/45e966fcca03ecdcccac7cb236e16eea38cc18af
# Since Firecracker only populates subleaves 0 and 1 (thread level and core
# level) in the normalization process and the subleaf 2 is left empty or
# not listed, the subleaf 2 should be skipped when the userspace cpuid
# enumerates it.
(0xB, 0x2),
# On CPUID.12h, the subleaves 0 and 1 enumerate Intel SGX capability and
# attributes respectively, and subleaves 2 or higher enumerate Intel SGX
# EPC that is listed only when CPUID.07h:EBX[2] is 1, meaning that SGX is
# supported. However, as seen in CPU config baseline files, CPUID.07h:EBX[2]
# is 0 on all tested platforms. On the other hand, the userspace cpuid
# command enumerates subleaves up to 2 regardless of CPUID.07h:EBX[2].
# KVM_GET_SUPPORTED_CPUID returns 0 in CPUID.12h.0 and firecracker passes
# it as it is, so here we ignore subleaves 1 and 2.
(0x12, 0x1),
(0x12, 0x2),
# CPUID.18h enumerates deterministic address translation parameters and the
# subleaf 0 reports the maximum supported subleaf in EAX, and all the tested
# platforms reports 0 in EAX. However, the userspace cpuid command in ubuntu
# 22 also lists the subleaf 1.
(0x18, 0x1),
# CPUID.1Bh enumerates PCONFIG information. The availability of PCONFIG is
# enumerated in CPUID.7h.0:EDX[18]. While all the supported platforms don't
# support it, the userspace cpuid command in ubuntu 22 reports not only
# the subleaf 0 but also the subleaf 1.
(0x1B, 0x1),
# CPUID.20000000h is not documented in Intel SDM and AMD APM. KVM doesn't
# report it, but the userspace cpuid command in ubuntu 22 does.
(0x20000000, 0x0),
# CPUID.40000100h is Xen-specific leaf.
# https://xenbits.xen.org/docs/4.6-testing/hypercall/x86_64/include,public,arch-x86,cpuid.h.html
(0x40000100, 0x0),
# CPUID.8000001Bh or later are not supported on kernel 4.14 with an
# exception CPUID.8000001Dh and CPUID.8000001Eh normalized by firecracker.
# https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/arch/x86/kvm/cpuid.c?h=v4.14.313#n637
# On kernel 4.16 or later, these leaves are supported.
# https://github.com/torvalds/linux/commit/8765d75329a386dd7742f94a1ea5fdcdea8d93d0
(0x8000001B, 0x0),
(0x8000001C, 0x0),
(0x8000001F, 0x0),
# CPUID.80860000h is a Transmeta-specific leaf.
(0x80860000, 0x0),
# CPUID.C0000000h is a Centaur-specific leaf.
(0xC0000000, 0x0),
]
# Dictionary of CPUID bitmasks that should not be tested due to its mutability.
CPUID_EXCEPTION_LIST = {
# CPUID.01h:ECX[OSXSAVE (bit 27)] is linked to CR4[OSXSAVE (bit 18)] that
# can be updated by guest OS.
# https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/arch/x86/kvm/x86.c?h=v5.10.176#n9872
(0x1, 0x0, "ecx"): 1 << 27,
# CPUID.07h:ECX[OSPKE (bit 4)] is linked to CR4[PKE (bit 22)] that can be
# updated by guest OS.
# https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/arch/x86/kvm/x86.c?h=v5.10.176#n9872
(0x7, 0x0, "ecx"): 1 << 4,
# CPUID.0Dh:EBX is variable depending on XCR0 that can be updated by guest
# OS with XSETBV instruction.
# https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/arch/x86/kvm/x86.c?h=v5.10.176#n973
(0xD, 0x0, "ebx"): 0xFFFF_FFFF,
(0xD, 0x1, "ebx"): 0xFFFF_FFFF,
}
# List of MSR indices that should not be tested due to its mutability.
MSR_EXCEPTION_LIST = [
# MSR_KVM_WALL_CLOCK and MSR_KVM_SYSTEM_TIME depend on the elapsed time.
0x11,
0x12,
# MSR_IA32_FEAT_CTL and MSR_IA32_SPEC_CTRL are R/W MSRs that can be
# modified by OS to control features.
0x3A,
0x48,
# MSR_IA32_SMBASE is not accessible outside of System Management Mode.
0x9E,
# MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP and MSR_IA32_SYSENTER_EIP are
# R/W MSRs that will be set up by OS to call fast system calls with
# SYSENTER.
0x174,
0x175,
0x176,
# MSR_IA32_TSC_DEADLINE specifies the time at which a timer interrupt
# should occur and depends on the elapsed time.
0x6E0,
# MSR_KVM_SYSTEM_TIME_NEW and MSR_KVM_WALL_CLOCK_NEW depend on the elapsed
# time.
0x4B564D00,
0x4B564D01,
# MSR_KVM_ASYNC_PF_EN is an asynchronous page fault (APF) control MSR and
# is intialized in VM setup process.
0x4B564D02,
# MSR_KVM_STEAL_TIME indicates CPU steal time filled in by the hypervisor
# periodically.
0x4B564D03,
# MSR_KVM_PV_EOI_EN is PV End Of Interrupt (EOI) MSR and is initialized in
# VM setup process.
0x4B564D04,
# MSR_KVM_ASYNC_PF_INT is an interrupt vector for delivery of 'page ready'
# APF events and is initialized just before MSR_KVM_ASYNC_PF_EN.
0x4B564D06,
# MSR_STAR, MSR_LSTAR, MSR_CSTAR and MSR_SYSCALL_MASK are R/W MSRs that
# will be set up by OS to call fast system calls with SYSCALL.
0xC0000081,
0xC0000082,
0xC0000083,
0xC0000084,
# MSR_AMD64_VIRT_SPEC_CTRL is R/W and can be modified by OS to control
# security features for speculative attacks.
0xC001011F,
]
def get_guest_msrs(microvm, msr_index_list):
"""
Return the guest MSR in the form of a dictionary where the key is a MSR
index and the value is the register value.
"""
msrs_dict = {}
for index in msr_index_list:
if index in MSR_EXCEPTION_LIST:
continue
rdmsr_cmd = f"rdmsr -0 {index}"
code, stdout, stderr = microvm.ssh.run(rdmsr_cmd)
assert stderr == "", f"Failed to get MSR for {index=:#x}: {code=}"
msrs_dict[index] = int(stdout, 16)
return msrs_dict
@pytest.mark.skipif(
PLATFORM != "x86_64",
reason=(
"`cpuid` and `rdmsr` commands are only available on x86_64. "
"System registers are not accessible on aarch64."
),
)
def test_cpu_config_dump_vs_actual(
microvm_factory,
guest_kernel,
rootfs,
cpu_template_helper,
tmp_path,
):
"""
Verify that the dumped CPU config matches the actual CPU config inside
guest.
"""
microvm = microvm_factory.build(guest_kernel, rootfs)
microvm.spawn()
microvm.basic_config()
microvm.add_net_iface()
vm_config_path = save_vm_config(microvm, tmp_path)
# Dump CPU config with the helper tool.
cpu_config_path = tmp_path / "cpu_config.json"
cpu_template_helper.template_dump(vm_config_path, cpu_config_path)
dump_cpu_config = build_cpu_config_dict(cpu_config_path)
# Retrieve actual CPU config from guest
microvm.start()
actual_cpu_config = {
"cpuid": get_guest_cpuid(microvm),
"msrs": get_guest_msrs(microvm, dump_cpu_config["msrs"].keys()),
}
# Compare CPUID between actual and dumped CPU config.
# Verify all the actual CPUIDs are covered and match with the dumped one.
keys_not_in_dump = {}
for key, actual in actual_cpu_config["cpuid"].items():
if (key[0], key[1]) in UNAVAILABLE_CPUID_ON_DUMP_LIST:
continue
if key not in dump_cpu_config["cpuid"]:
keys_not_in_dump[key] = actual_cpu_config["cpuid"][key]
continue
dump = dump_cpu_config["cpuid"][key]
if key in CPUID_EXCEPTION_LIST:
actual &= ~CPUID_EXCEPTION_LIST[key]
dump &= ~CPUID_EXCEPTION_LIST[key]
assert actual == dump, (
f"Mismatched CPUID for leaf={key[0]:#x} subleaf={key[1]:#x} reg={key[2]}:"
f"{actual=:#034b} vs. {dump=:#034b}"
)
assert len(keys_not_in_dump) == 0
# Verify all CPUID on the dumped CPU config are covered in actual one.
for key, dump in dump_cpu_config["cpuid"].items():
actual = actual_cpu_config["cpuid"].get(key)
# `cpuid -r` command does not list up invalid leaves / subleaves
# without specifying them.
if actual is None:
actual = get_guest_cpuid(microvm, key[0], key[1])[key]
if key in CPUID_EXCEPTION_LIST:
actual &= ~CPUID_EXCEPTION_LIST[key]
dump &= ~CPUID_EXCEPTION_LIST[key]
assert actual == dump, (
f"Mismatched CPUID for leaf={key[0]:#x} subleaf={key[1]:#x} reg={key[2]}:"
f"{actual=:#034b} vs. {dump=:#034b}"
)
# Compare MSR between actual and dumped CPU config.
for key in dump_cpu_config["msrs"]:
if key in MSR_EXCEPTION_LIST:
continue
actual = actual_cpu_config["msrs"][key]
dump = dump_cpu_config["msrs"][key]
assert (
actual == dump
), f"Mismatched MSR for {key:#010x}: {actual=:#066b} vs. {dump=:#066b}"
def detect_fingerprint_change(microvm, tmp_path, cpu_template_helper, filters=None):
"""
Compare fingerprint files with filters between one taken at the moment and
a baseline file taken in a specific point in time.
"""
# Generate VM config from test_microvm_with_api
microvm.spawn()
microvm.basic_config()
vm_config_path = save_vm_config(microvm, tmp_path)
# Dump a fingerprint with the generated VM config.
fingerprint_path = tmp_path / "fingerprint.json"
cpu_template_helper.fingerprint_dump(vm_config_path, fingerprint_path)
# Baseline fingerprint.
baseline_path = (
TEST_RESOURCES_DIR
/ f"fingerprint_{global_props.cpu_codename}_{global_props.host_linux_version}host.json"
)
# Use this code to generate baseline fingerprint.
# cpu_template_helper.fingerprint_dump(vm_config_path, baseline_path)
# Compare with baseline
cpu_template_helper.fingerprint_compare(
baseline_path,
fingerprint_path,
filters,
)
@pytest.mark.no_block_pr
@pytest.mark.skipif(
global_props.host_linux_version not in SUPPORTED_HOST_KERNELS,
reason=f"Supported kernels are {SUPPORTED_HOST_KERNELS}",
)
def test_guest_cpu_config_change(test_microvm_with_api, tmp_path, cpu_template_helper):
"""
Verify that the guest CPU config has not changed since the baseline
fingerprint was gathered.
"""
if (
global_props.host_linux_version == "4.14"
and global_props.instance == "c7g.metal"
):
# The non-SVE kernel has a different value in 0x6030000000100040 because
# it's an old kernel.
pytest.skip("old kernel has different fingerprint")
detect_fingerprint_change(
test_microvm_with_api,
tmp_path,
cpu_template_helper,
["guest_cpu_config"],
)
@pytest.mark.nonci
def test_fingerprint_change(test_microvm_with_api, tmp_path, cpu_template_helper):
"""
Verify that all the fields of the fingerprint has not changed since the
baseline fingerprint was gathered.
"""
detect_fingerprint_change(
test_microvm_with_api,
tmp_path,
cpu_template_helper,
)
@nonci_on_arm
def test_json_static_templates(
test_microvm_with_api, cpu_template_helper, tmp_path, custom_cpu_template
):
"""
Verify that JSON static CPU templates are applied as intended.
"""
# Generate VM config with JSON static CPU template
microvm = test_microvm_with_api
microvm.spawn()
microvm.basic_config()
vm_config_path = save_vm_config(microvm, tmp_path, custom_cpu_template["template"])
# Verify the JSON static CPU template.
cpu_template_helper.template_verify(vm_config_path)
def test_consecutive_cpu_config_consistency(
test_microvm_with_api, cpu_template_helper, tmp_path
):
"""
Verify that two dumped guest CPU configs obtained consecutively are
consistent. The dumped guest CPU config should not change without
any environmental changes (firecracker, kernel, microcode updates).
"""
microvm = test_microvm_with_api
microvm.spawn()
microvm.basic_config()
vm_config_path = save_vm_config(microvm, tmp_path)
# Dump CPU config with the helper tool.
cpu_config_1 = tmp_path / "cpu_config_1.json"
cpu_template_helper.template_dump(vm_config_path, cpu_config_1)
cpu_config_2 = tmp_path / "cpu_config_2.json"
cpu_template_helper.template_dump(vm_config_path, cpu_config_2)
# Strip common entries.
cpu_template_helper.template_strip([cpu_config_1, cpu_config_2])
# Check the stripped result is empty.
if PLATFORM == "x86_64":
empty_cpu_config = {
"cpuid_modifiers": [],
"kvm_capabilities": [],
"msr_modifiers": [],
}
elif PLATFORM == "aarch64":
empty_cpu_config = {
"kvm_capabilities": [],
"reg_modifiers": [],
"vcpu_features": [],
}
assert json.loads(cpu_config_1.read_text(encoding="utf-8")) == empty_cpu_config
assert json.loads(cpu_config_2.read_text(encoding="utf-8")) == empty_cpu_config
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,850
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/utils_cpu_templates.py
|
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Utilities for CPU template related functionality."""
import json
from pathlib import Path
import pytest
import framework.utils_cpuid as cpuid_utils
from framework.properties import global_props
# All existing CPU templates available on Intel
INTEL_TEMPLATES = ["C3", "T2", "T2CL", "T2S"]
# All existing CPU templates available on AMD
AMD_TEMPLATES = ["T2A"]
# All existing CPU templates available on ARM
ARM_TEMPLATES = ["V1N1"]
def get_supported_cpu_templates():
"""
Return the list of CPU templates supported by the platform.
"""
match cpuid_utils.get_cpu_vendor():
case cpuid_utils.CpuVendor.INTEL:
# T2CL template is only supported on Cascade Lake and newer CPUs.
skylake_model = "Intel(R) Xeon(R) Platinum 8175M CPU @ 2.50GHz"
if global_props.cpu_model == skylake_model:
return sorted(set(INTEL_TEMPLATES) - set(["T2CL"]))
return INTEL_TEMPLATES
case cpuid_utils.CpuVendor.AMD:
return AMD_TEMPLATES
case cpuid_utils.CpuVendor.ARM:
match global_props.instance:
case "m6g.metal":
return []
case "c7g.metal":
return ARM_TEMPLATES
return []
SUPPORTED_CPU_TEMPLATES = get_supported_cpu_templates()
# Custom CPU templates for Aarch64 for testing
AARCH64_CUSTOM_CPU_TEMPLATES_G2 = ["aarch64_remove_ssbs", "aarch64_v1n1"]
AARCH64_CUSTOM_CPU_TEMPLATES_G3 = [
"aarch64_remove_ssbs",
"aarch64_with_sve_and_pac",
"aarch64_v1n1",
]
def get_supported_custom_cpu_templates():
"""
Return the list of custom CPU templates supported by the platform.
"""
match cpuid_utils.get_cpu_vendor():
case cpuid_utils.CpuVendor.INTEL:
# T2CL template is only supported on Cascade Lake and newer CPUs.
skylake_model = "Intel(R) Xeon(R) Platinum 8175M CPU @ 2.50GHz"
if global_props.cpu_model == skylake_model:
return set(INTEL_TEMPLATES) - {"T2CL"}
return INTEL_TEMPLATES
case cpuid_utils.CpuVendor.AMD:
return AMD_TEMPLATES
case cpuid_utils.CpuVendor.ARM:
match global_props.instance:
case "m6g.metal":
return AARCH64_CUSTOM_CPU_TEMPLATES_G2
case "c7g.metal":
return AARCH64_CUSTOM_CPU_TEMPLATES_G3
def custom_cpu_templates_params():
"""Return Custom CPU templates as pytest parameters"""
for name in sorted(get_supported_custom_cpu_templates()):
tmpl = Path(f"./data/static_cpu_templates/{name.lower()}.json")
yield pytest.param(
{"name": name, "template": json.loads(tmpl.read_text("utf-8"))},
id="custom_" + name,
)
def static_cpu_templates_params():
"""Return Static CPU templates as pytest parameters"""
for name in sorted(get_supported_cpu_templates()):
yield pytest.param(name, id="static_" + name)
def nonci_on_arm(func):
"""Temporary decorator used to mark specific cpu template related tests as nonci on ARM platforms"""
if cpuid_utils.get_cpu_vendor() == cpuid_utils.CpuVendor.ARM:
return pytest.mark.nonci(func)
return func
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,851
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tools/compare_baselines/interactive.py
|
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Compare gathered baselines interactively."""
import enum
import json
import sys
import questionary
from utils.comparator import BaseComparator
from utils.defs import DEFAULT_BASELINE_DIRECTORY
from utils.fetcher import BaselineDirectoryFetcher
class Command(enum.Enum):
"""List of commands"""
LOAD = "Load baseline JSON files"
COMPARE = "Compare baseline values"
QUIT = "Quit"
class InteractiveComparator(BaseComparator):
"""Class for comparing baselines interactively"""
def __init__(self):
super().__init__()
self._fetchers = {}
@property
def fethcers(self):
"""Return fetchers"""
return self._fetchers
def cmd_loop(self):
"""Main loop to receive command"""
while True:
cmd = questionary.select(
"Select command:",
choices=[c.value for c in Command],
).ask()
if cmd == Command.LOAD.value:
self.cmd_load()
elif cmd == Command.COMPARE.value:
self.cmd_compare()
elif cmd == Command.QUIT.value:
print("Bye.")
break
def cmd_load(self):
"""Load command"""
dpath = questionary.path(
"Enter directory path to load JSON:",
default=DEFAULT_BASELINE_DIRECTORY,
).ask()
if not dpath:
return
dfetcher = BaselineDirectoryFetcher(dpath)
self._fetchers.update(dfetcher.fetchers)
def cmd_compare(self):
"""Compare command"""
# select
path1, instance1, model1 = self._select("source")
if path1 is None:
return
test = self._fetchers[path1].test
path2, instance2, model2 = self._select("target", test)
if path2 is None:
return
# calculate diff
diff = self.calc_diff(
self._fetchers[path1].get_baseline(instance1, model1),
self._fetchers[path2].get_baseline(instance2, model2),
)
# calculate stats
stats = self.calc_stats(diff)
# print to stdout
print(
f"Test: {test}\n"
f"Source:\n"
f" Instance type: {instance1}\n"
f" CPU model: {model1}\n"
f" JSON path: {path1}\n"
f"Target:\n"
f" Instance type: {instance2}\n"
f" CPU model: {model2}\n"
f" JSON path: {path2}\n"
f"Stats:\n"
f"{json.dumps(stats, indent=4)}"
)
# dump results
data = {
"test": test,
"source": {
"path": path1,
"instance": instance1,
"model": model1,
},
"target": {"path": path1, "instance": instance2, "model": model2},
"diff": diff,
"stats": stats,
}
self._dump(data)
def _select(self, sample, test=None):
"""Select a baseline"""
# select file
if test:
choices = [f.fpath for f in self._fetchers.values() if f.test == test]
else:
choices = self._fetchers.keys()
if len(choices) == 0:
print(
"No available data. Please import JSON files.",
file=sys.stderr,
)
return None, None, None
path = questionary.select(
f"Select path for {sample} sample:",
choices=sorted(choices),
).ask()
if not path:
return None, None, None
# select instance type
instance = questionary.select(
f"Select instance type for {sample} sample:",
choices=self._fetchers[path].get_instances(),
).ask()
if not instance:
return None, None, None
# select CPU model
models = self._fetchers[path].get_models(instance)
if len(models) == 1:
model = models[0]
else:
model = questionary.select(
f"Select CPU for {sample} sample:",
choices=models,
).ask()
if not model:
return None, None, None
return path, instance, model
def _dump(self, data):
"""Dump results"""
ofile = questionary.text(
"Enter file path to dump (Keep empty not to dump):"
).ask()
if not ofile:
return
dumped = json.dumps(data, indent=4)
with open(ofile, "w", encoding="utf-8") as file:
file.write(dumped)
def main():
"""Main function"""
comp = InteractiveComparator()
comp.cmd_loop()
if __name__ == "__main__":
main()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,852
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tools/parse_baselines/main.py
|
#!/bin/env python3
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Script used to calculate baselines from raw performance test output.
The script expects to find at least 2 files containing test results in
the provided data folder
(e.g. test_results/test_vsock_throughput_results_m5d.metal_5.10.json).
"""
import argparse
import json
import re
from pathlib import Path
from providers.block import BlockDataParser
from providers.iperf3 import Iperf3DataParser
from providers.latency import LatencyDataParser
from providers.snapshot_restore import SnapshotRestoreDataParser
DATA_PARSERS = {
"vsock_throughput": Iperf3DataParser,
"network_tcp_throughput": Iperf3DataParser,
"block_performance": BlockDataParser,
"snapshot_restore_performance": SnapshotRestoreDataParser,
"network_latency": LatencyDataParser,
}
def read_data_files(data_dir):
"""Return all JSON objects contained in the files of this dir, organized per test/instance/kv."""
data_dir = Path(data_dir)
assert data_dir.is_dir()
data = {}
# Get all files in the dir tree that match a test.
for file in data_dir.rglob("*.ndjson"):
match = re.search(
"test_(?P<test>.+)_results_(?P<instance>.+)_(?P<kv>.+).ndjson",
str(file.name),
)
test, instance, kv = match.groups()
for line in file.open(encoding="utf-8"):
data.setdefault((test, instance, kv), []).append(json.loads(line))
return data
def overlay(dict_old, dict_new):
"""
Overlay one dictionary on top of another
>>> a = {'a': {'b': 1, 'c': 1}}
>>> b = {'a': {'b': 2, 'd': 2}}
>>> overlay(a, b)
{'a': {'b': 2, 'c': 1, 'd': 2}}
"""
res = dict_old.copy()
for key, val in dict_new.items():
if key in dict_old and isinstance(val, dict):
res[key] = overlay(dict_old[key], dict_new[key])
else:
res[key] = val
return res
def update_baseline(test, instance, kernel, test_data):
"""Parse and update the baselines"""
baselines_path = Path(
f"./tests/integration_tests/performance/configs/test_{test}_config_{kernel}.json"
)
json_baselines = json.loads(baselines_path.read_text("utf-8"))
old_cpus = json_baselines["hosts"]["instances"][instance]["cpus"]
# Instantiate the right data parser.
parser = DATA_PARSERS[test](test_data)
cpus = parser.parse()
for cpu in cpus:
model = cpu["model"]
for old_cpu in old_cpus:
if old_cpu["model"] == model:
old_cpu["baselines"] = overlay(old_cpu["baselines"], cpu["baselines"])
baselines_path.write_text(
json.dumps(json_baselines, indent=4, sort_keys=True), encoding="utf-8"
)
# Warn against the fact that not all CPUs pertaining to
# some arch were updated.
assert len(cpus) == len(old_cpus), (
"It may be that only a subset of CPU types were updated! "
"Need to run again! Nevertheless we updated the baselines..."
)
def main():
"""Run the main logic"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"-d",
"--data-folder",
help="Path to folder containing raw test data.",
required=True,
)
args = parser.parse_args()
data = read_data_files(args.data_folder)
for test, instance, kv in data:
test_data = data[test, instance, kv]
update_baseline(test, instance, kv, test_data)
if __name__ == "__main__":
main()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,853
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/utils.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Generic utility functions that are used in the framework."""
import functools
import glob
import json
import logging
import os
import platform
import re
import signal
import subprocess
import time
import typing
from collections import defaultdict, namedtuple
from pathlib import Path
from typing import Dict
import packaging.version
import psutil
from retry import retry
from retry.api import retry_call
from framework.defs import MIN_KERNEL_VERSION_FOR_IO_URING
FLUSH_CMD = 'screen -S {session} -X colon "logfile flush 0^M"'
CommandReturn = namedtuple("CommandReturn", "returncode stdout stderr")
CMDLOG = logging.getLogger("commands")
GET_CPU_LOAD = "top -bn1 -H -p {} -w512 | tail -n+8"
class ProcessManager:
"""Host process manager.
TODO: Extend the management to guest processes.
TODO: Extend with automated process/cpu_id pinning accountability.
"""
@staticmethod
def get_threads(pid: int) -> dict:
"""Return dict consisting of child threads."""
threads_map = defaultdict(list)
proc = psutil.Process(pid)
for thread in proc.threads():
threads_map[psutil.Process(thread.id).name()].append(thread.id)
return threads_map
@staticmethod
def get_cpu_affinity(pid: int) -> list:
"""Get CPU affinity for a thread."""
return psutil.Process(pid).cpu_affinity()
@staticmethod
def set_cpu_affinity(pid: int, cpulist: list) -> list:
"""Set CPU affinity for a thread."""
real_cpulist = list(map(CpuMap, cpulist))
return psutil.Process(pid).cpu_affinity(real_cpulist)
@staticmethod
def get_cpu_percent(pid: int) -> Dict[str, Dict[str, float]]:
"""Return the instant process CPU utilization percent."""
_, stdout, _ = run_cmd(GET_CPU_LOAD.format(pid))
cpu_percentages = {}
# Take all except the last line
lines = stdout.strip().split(sep="\n")
for line in lines:
# sometimes the firecracker process will have gone away, in which case top does not return anything
if not line:
continue
info = line.strip().split()
# We need at least CPU utilization and threads names cols (which
# might be two cols e.g `fc_vcpu 0`).
info_len = len(info)
assert info_len > 11, line
cpu_percent = float(info[8])
task_id = info[0]
# Handles `fc_vcpu 0` case as well.
thread_name = info[11] + (" " + info[12] if info_len > 12 else "")
if thread_name not in cpu_percentages:
cpu_percentages[thread_name] = {}
cpu_percentages[thread_name][task_id] = cpu_percent
return cpu_percentages
class UffdHandler:
"""Describe the UFFD page fault handler process."""
def __init__(self, name, args):
"""Instantiate the handler process with arguments."""
self._proc = None
self._args = [f"/{name}"]
self._args.extend(args)
def spawn(self):
"""Spawn handler process using arguments provided."""
self._proc = subprocess.Popen(
self._args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
def proc(self):
"""Return UFFD handler process."""
return self._proc
def __del__(self):
"""Tear down the UFFD handler process."""
self._proc.kill()
# pylint: disable=too-few-public-methods
class CpuMap:
"""Cpu map from real cpu cores to containers visible cores.
When a docker container is restricted in terms of assigned cpu cores,
the information from `/proc/cpuinfo` will present all the cpu cores
of the machine instead of showing only the container assigned cores.
This class maps the real assigned host cpu cores to virtual cpu cores,
starting from 0.
"""
arr = []
def __new__(cls, cpu):
"""Instantiate the class field."""
assert CpuMap.len() > cpu
if not CpuMap.arr:
CpuMap.arr = CpuMap._cpus()
return CpuMap.arr[cpu]
@staticmethod
def len():
"""Get the host cpus count."""
if not CpuMap.arr:
CpuMap.arr = CpuMap._cpus()
return len(CpuMap.arr)
@classmethod
def _cpus(cls):
"""Obtain the real processor map.
See this issue for details:
https://github.com/moby/moby/issues/20770.
"""
# The real processor map is found at different paths based on cgroups version:
# - cgroupsv1: /cpuset.cpus
# - cgroupsv2: /cpuset.cpus.effective
# For more details, see https://docs.kernel.org/admin-guide/cgroup-v2.html#cpuset-interface-files
cpulist = None
for path in [
Path("/sys/fs/cgroup/cpuset/cpuset.cpus"),
Path("/sys/fs/cgroup/cpuset.cpus.effective"),
]:
if path.exists():
cpulist = path.read_text("ascii").strip()
break
else:
raise RuntimeError("Could not find cgroups cpuset")
return ListFormatParser(cpulist).parse()
class ListFormatParser:
"""Parser class for LIST FORMAT strings."""
def __init__(self, content):
"""Initialize the parser with the content."""
self._content = content.strip()
@classmethod
def _is_range(cls, rng):
"""Return true if the parser content is a range.
E.g ranges: 0-10.
"""
match = re.search("([0-9][1-9]*)-([0-9][1-9]*)", rng)
# Group is a singular value.
return match is not None
@classmethod
def _range_to_list(cls, rng):
"""Return a range of integers based on the content.
The content respects the LIST FORMAT defined in the
cpuset documentation.
See: https://man7.org/linux/man-pages/man7/cpuset.7.html.
"""
ends = rng.split("-")
if len(ends) != 2:
return []
return list(range(int(ends[0]), int(ends[1]) + 1))
def parse(self):
"""Parse list formats for cpuset and mems.
See LIST FORMAT here:
https://man7.org/linux/man-pages/man7/cpuset.7.html.
"""
if len(self._content) == 0:
return []
groups = self._content.split(",")
arr = set()
def func(acc, cpu):
if ListFormatParser._is_range(cpu):
acc.update(ListFormatParser._range_to_list(cpu))
else:
acc.add(int(cpu))
return acc
return list(functools.reduce(func, groups, arr))
class CmdBuilder:
"""Command builder class."""
def __init__(self, bin_path):
"""Initialize the command builder."""
self._bin_path = bin_path
self._args = {}
def with_arg(self, flag, value=""):
"""Add a new argument."""
self._args[flag] = value
return self
def build(self):
"""Build the command."""
cmd = self._bin_path + " "
for flag, value in self._args.items():
cmd += f"{flag} {value} "
return cmd
# pylint: disable=R0903
class DictQuery:
"""Utility class to query python dicts key paths.
The keys from the path must be `str`s.
Example:
> d = {
"a": {
"b": {
"c": 0
}
},
"d": 1
}
> dq = DictQuery(d)
> print(dq.get("a/b/c"))
0
> print(dq.get("d"))
1
"""
def __init__(self, inner: dict):
"""Initialize the dict query."""
self._inner = inner
def get(self, keys_path: str, default=None):
"""Retrieve value corresponding to the key path."""
keys = keys_path.strip().split("/")
if len(keys) < 1:
return default
result = self._inner
for key in keys:
if not result:
return default
result = result.get(key)
return result
def __str__(self):
"""Representation as a string."""
return str(self._inner)
class ExceptionAggregator(Exception):
"""Abstraction over an exception with message formatter."""
def __init__(self, add_newline=False):
"""Initialize the exception aggregator."""
super().__init__()
self.failures = []
# If `add_newline` is True then the failures will start one row below,
# in the logs. This is useful for having the failures starting on an
# empty line, keeping the formatting nice and clean.
if add_newline:
self.failures.append("")
def add_row(self, failure: str):
"""Add a failure entry."""
self.failures.append(f"{failure}")
def has_any(self) -> bool:
"""Return whether there are failures or not."""
if len(self.failures) == 1:
return self.failures[0] != ""
return len(self.failures) > 1
def __str__(self):
"""Return custom as string implementation."""
return "\n\n".join(self.failures)
def search_output_from_cmd(cmd: str, find_regex: typing.Pattern) -> typing.Match:
"""
Run a shell command and search a given regex object in stdout.
If the regex object is not found, a RuntimeError exception is raised.
:param cmd: command to run
:param find_regex: regular expression object to search for
:return: result of re.search()
"""
# Run the given command in a shell
_, stdout, _ = run_cmd(cmd)
# Search for the object
content = re.search(find_regex, stdout)
# If the result is not None, return it
if content:
return content
raise RuntimeError(
"Could not find '%s' in output for '%s'" % (find_regex.pattern, cmd)
)
def get_files_from(
find_path: str, pattern: str, exclude_names: list = None, recursive: bool = True
):
"""
Return a list of files from a given path, recursively.
:param find_path: path where to look for files
:param pattern: what pattern to apply to file names
:param exclude_names: folder names to exclude
:param recursive: do a recursive search for the given pattern
:return: list of found files
"""
found = []
# For each directory in the given path
for path_dir in os.scandir(find_path):
# Check if it should be skipped
if path_dir.name in exclude_names or os.path.isfile(path_dir):
continue
# Run glob inside the folder with the given pattern
found.extend(
glob.glob(f"{find_path}/{path_dir.name}/**/{pattern}", recursive=recursive)
)
# scandir will not look at the files matching the pattern in the
# current directory.
found.extend(glob.glob(f"{find_path}/./{pattern}"))
return found
def get_free_mem_ssh(ssh_connection):
"""
Get how much free memory in kB a guest sees, over ssh.
:param ssh_connection: connection to the guest
:return: available mem column output of 'free'
"""
_, stdout, stderr = ssh_connection.run("cat /proc/meminfo | grep MemAvailable")
assert stderr == ""
# Split "MemAvailable: 123456 kB" and validate it
meminfo_data = stdout.split()
if len(meminfo_data) == 3:
# Return the middle element in the array
return int(meminfo_data[1])
raise Exception("Available memory not found in `/proc/meminfo")
def run_cmd_sync(cmd, ignore_return_code=False, no_shell=False, cwd=None):
"""
Execute a given command.
:param cmd: command to execute
:param ignore_return_code: whether a non-zero return code should be ignored
:param noshell: don't run the command in a sub-shell
:param cwd: sets the current directory before the child is executed
:return: return code, stdout, stderr
"""
if isinstance(cmd, list) or no_shell:
# Create the async process
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd
)
else:
proc = subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd
)
# Capture stdout/stderr
stdout, stderr = proc.communicate()
output_message = f"\n[{proc.pid}] Command:\n{cmd}"
# Append stdout/stderr to the output message
if stdout != "":
output_message += f"\n[{proc.pid}] stdout:\n{stdout.decode()}"
if stderr != "":
output_message += f"\n[{proc.pid}] stderr:\n{stderr.decode()}"
# If a non-zero return code was thrown, raise an exception
if not ignore_return_code and proc.returncode != 0:
output_message += f"\nReturned error code: {proc.returncode}"
if stderr != "":
output_message += f"\nstderr:\n{stderr.decode()}"
raise ChildProcessError(output_message)
# Log the message with one call so that multiple statuses
# don't get mixed up
CMDLOG.debug(output_message)
return CommandReturn(proc.returncode, stdout.decode(), stderr.decode())
def run_cmd(cmd, ignore_return_code=False, no_shell=False, cwd=None):
"""
Run a command using the sync function that logs the output.
:param cmd: command to run
:param ignore_return_code: whether a non-zero return code should be ignored
:param noshell: don't run the command in a sub-shell
:returns: tuple of (return code, stdout, stderr)
"""
return run_cmd_sync(
cmd=cmd, ignore_return_code=ignore_return_code, no_shell=no_shell, cwd=cwd
)
def eager_map(func, iterable):
"""Map version for Python 3.x which is eager and returns nothing."""
for _ in map(func, iterable):
continue
def assert_seccomp_level(pid, seccomp_level):
"""Test that seccomp_level applies to all threads of a process."""
# Get number of threads
cmd = "ps -T --no-headers -p {} | awk '{{print $2}}'".format(pid)
process = run_cmd(cmd)
threads_out_lines = process.stdout.splitlines()
for tid in threads_out_lines:
# Verify each thread's Seccomp status
cmd = "cat /proc/{}/status | grep Seccomp:".format(tid)
process = run_cmd(cmd)
seccomp_line = "".join(process.stdout.split())
assert seccomp_line == "Seccomp:" + seccomp_level
def get_cpu_percent(pid: int, iterations: int, omit: int) -> dict:
"""Get total PID CPU percentage, as in system time plus user time.
If the PID has corresponding threads, creates a dictionary with the
lists of instant loads for each thread.
"""
assert iterations > 0
time.sleep(omit)
cpu_percentages = {}
for _ in range(iterations):
current_cpu_percentages = ProcessManager.get_cpu_percent(pid)
assert len(current_cpu_percentages) > 0
for thread_name, task_ids in current_cpu_percentages.items():
if not cpu_percentages.get(thread_name):
cpu_percentages[thread_name] = {}
for task_id in task_ids:
if not cpu_percentages[thread_name].get(task_id):
cpu_percentages[thread_name][task_id] = []
cpu_percentages[thread_name][task_id].append(task_ids[task_id])
time.sleep(1) # 1 second granularity.
return cpu_percentages
def summarize_cpu_percent(cpu_percentages: dict):
"""
Aggregates the results of `get_cpu_percent` into average utilization for the vmm thread, and total average
utilization of all vcpu threads
:param cpu_percentages: mapping {thread_name: { thread_id -> [cpu samples])}}.
:return: A tuple (vmm utilization, total vcpu utilization)
"""
def avg(thread_name):
assert thread_name in cpu_percentages and cpu_percentages[thread_name]
# Generally, we expect there to be just one thread with any given name, but sometimes there's two 'firecracker'
# threads
data = list(cpu_percentages[thread_name].values())[0]
return sum(data) / len(data)
vcpu_util_total = 0
vcpu = 0
while f"fc_vcpu {vcpu}" in cpu_percentages:
vcpu_util_total += avg(f"fc_vcpu {vcpu}")
vcpu += 1
return avg("firecracker"), vcpu_util_total
def run_guest_cmd(ssh_connection, cmd, expected, use_json=False):
"""Runs a shell command at the remote accessible via SSH"""
_, stdout, stderr = ssh_connection.run(cmd)
assert stderr == ""
stdout = stdout if not use_json else json.loads(stdout)
assert stdout == expected
@retry(delay=0.5, tries=5)
def wait_process_termination(p_pid):
"""Wait for a process to terminate.
Will return sucessfully if the process
got indeed killed or raises an exception if the process
is still alive after retrying several times.
"""
try:
_, stdout, _ = run_cmd("ps --pid {} -o comm=".format(p_pid))
except ChildProcessError:
return
raise Exception("{} process is still alive: ".format(stdout.strip()))
def get_firecracker_version_from_toml():
"""
Return the version of the firecracker crate, from Cargo.toml.
Should be the same as the output of `./firecracker --version`, if
the code has not been released.
"""
cmd = "cd ../src/firecracker && cargo pkgid | cut -d# -f2 | cut -d: -f2"
rc, stdout, stderr = run_cmd(cmd)
assert rc == 0, stderr
return packaging.version.parse(stdout)
def compare_versions(first, second):
"""
Compare two versions with format `X.Y.Z`.
:param first: first version string
:param second: second version string
:returns: 0 if equal, <0 if first < second, >0 if second < first
"""
first = list(map(int, first.split(".")))
second = list(map(int, second.split(".")))
for i in range(3):
diff = first[i] - second[i]
if diff != 0:
return diff
return 0
def sanitize_version(version):
"""
Get rid of dirty version information.
Transform version from format `vX.Y.Z-W` to `X.Y.Z`.
"""
if version[0].isalpha():
version = version[1:]
return version.split("-", 1)[0]
def get_kernel_version(level=2):
"""Return the current kernel version in format `major.minor.patch`."""
linux_version = platform.release()
actual_level = 0
for idx, char in enumerate(linux_version):
if char == ".":
actual_level += 1
if actual_level > level or (not char.isdigit() and char != "."):
linux_version = linux_version[0:idx]
break
return linux_version
def is_io_uring_supported():
"""
Return whether Firecracker supports io_uring for the running kernel ...
...version.
"""
return compare_versions(get_kernel_version(), MIN_KERNEL_VERSION_FOR_IO_URING) >= 0
def generate_mmds_session_token(ssh_connection, ipv4_address, token_ttl):
"""Generate session token used for MMDS V2 requests."""
cmd = "curl -m 2 -s"
cmd += " -X PUT"
cmd += ' -H "X-metadata-token-ttl-seconds: {}"'.format(token_ttl)
cmd += " http://{}/latest/api/token".format(ipv4_address)
_, stdout, _ = ssh_connection.run(cmd)
token = stdout
return token
def generate_mmds_get_request(ipv4_address, token=None, app_json=True):
"""Build `GET` request to fetch metadata from MMDS."""
cmd = "curl -m 2 -s"
if token is not None:
cmd += " -X GET"
cmd += ' -H "X-metadata-token: {}"'.format(token)
if app_json:
cmd += ' -H "Accept: application/json"'
cmd += " http://{}/".format(ipv4_address)
return cmd
def configure_mmds(
test_microvm, iface_ids, version=None, ipv4_address=None, fc_version=None
):
"""Configure mmds service."""
mmds_config = {"network_interfaces": iface_ids}
if version is not None:
mmds_config["version"] = version
# For versions prior to v1.0.0, the mmds config only contains
# the ipv4_address.
if fc_version is not None and compare_versions(fc_version, "1.0.0") < 0:
mmds_config = {}
if ipv4_address:
mmds_config["ipv4_address"] = ipv4_address
response = test_microvm.api.mmds_config.put(**mmds_config)
return response
def populate_data_store(test_microvm, data_store):
"""Populate the MMDS data store of the microvm with the provided data"""
response = test_microvm.api.mmds.get()
assert response.json() == {}
test_microvm.api.mmds.put(**data_store)
response = test_microvm.api.mmds.get()
assert response.json() == data_store
def start_screen_process(screen_log, session_name, binary_path, binary_params):
"""Start binary process into a screen session."""
start_cmd = "screen -L -Logfile {logfile} " "-dmS {session} {binary} {params}"
start_cmd = start_cmd.format(
logfile=screen_log,
session=session_name,
binary=binary_path,
params=" ".join(binary_params),
)
run_cmd(start_cmd)
# Build a regex object to match (number).session_name
regex_object = re.compile(r"([0-9]+)\.{}".format(session_name))
# Run 'screen -ls' in a retry_call loop, 30 times with a 1s
# delay between calls.
# If the output of 'screen -ls' matches the regex object, it will
# return the PID. Otherwise, a RuntimeError will be raised.
screen_pid = retry_call(
search_output_from_cmd,
fkwargs={"cmd": "screen -ls", "find_regex": regex_object},
exceptions=RuntimeError,
tries=30,
delay=1,
).group(1)
# Make sure the screen process launched successfully
# As the parent process for the binary.
screen_ps = psutil.Process(int(screen_pid))
wait_process_running(screen_ps)
# Configure screen to flush stdout to file.
run_cmd(FLUSH_CMD.format(session=session_name))
children_count = len(screen_ps.children())
if children_count != 1:
raise RuntimeError(
f"Failed to retrieve child process id for binary {binary_path}. "
f"screen session process had [{children_count}]"
)
return screen_pid, screen_ps.children()[0].pid
def guest_run_fio_iteration(ssh_connection, iteration):
"""Start FIO workload into a microVM."""
fio = """fio --filename=/dev/vda --direct=1 --rw=randread --bs=4k \
--ioengine=libaio --iodepth=16 --runtime=10 --numjobs=4 --time_based \
--group_reporting --name=iops-test-job --eta-newline=1 --readonly \
--output /tmp/fio{} > /dev/null &""".format(
iteration
)
exit_code, _, stderr = ssh_connection.run(fio)
assert exit_code == 0, stderr
def check_filesystem(ssh_connection, disk_fmt, disk):
"""Check for filesystem corruption inside a microVM."""
if disk_fmt == "squashfs":
return
cmd = "fsck.{} -n {}".format(disk_fmt, disk)
exit_code, _, stderr = ssh_connection.run(cmd)
assert exit_code == 0, stderr
def check_entropy(ssh_connection):
"""Check that we can get random numbers from /dev/hwrng"""
cmd = "dd if=/dev/hwrng of=/dev/null bs=4096 count=1"
exit_code, _, stderr = ssh_connection.run(cmd)
assert exit_code == 0, stderr
@retry(delay=0.5, tries=5)
def wait_process_running(process):
"""Wait for a process to run.
Will return successfully if the process is in
a running state and will otherwise raise an exception.
"""
assert process.is_running()
class Timeout:
"""
A Context Manager to timeout sections of code.
>>> with Timeout(30):
>>> time.sleep(35)
"""
def __init__(self, seconds, msg="Timed out"):
self.seconds = seconds
self.msg = msg
def handle_timeout(self, signum, frame):
"""Handle SIGALRM signal"""
raise TimeoutError()
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, _type, _value, _traceback):
signal.alarm(0)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,854
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/utils_vsock.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Helper functions for testing vsock device."""
import hashlib
import os.path
import re
import time
from pathlib import Path
from socket import AF_UNIX, SOCK_STREAM, socket
from subprocess import Popen
from threading import Thread
ECHO_SERVER_PORT = 5252
SERVER_ACCEPT_BACKLOG = 128
TEST_CONNECTION_COUNT = 50
BLOB_SIZE = 1 * 1024 * 1024
BUF_SIZE = 64 * 1024
VSOCK_UDS_PATH = "v.sock"
class HostEchoWorker(Thread):
"""A vsock echo worker, connecting to a guest echo server.
This will initiate a connection to a guest echo server, then start sending
it the contents of the file at `blob_path`. The echo server should send
the exact same data back, so a hash is performed on everything received
from the server. This hash will later be checked against the hashed
contents of `blob_path`.
"""
def __init__(self, uds_path, blob_path):
"""."""
super().__init__()
self.uds_path = uds_path
self.blob_path = blob_path
self.hash = None
self.error = None
self.sock = _vsock_connect_to_guest(self.uds_path, ECHO_SERVER_PORT)
def run(self):
"""Thread code payload.
Wrap up the real "run" into a catch-all block, because Python cannot
into threads - if this thread were to raise an unhandled exception,
the whole process would lock.
"""
try:
self._run()
# pylint: disable=broad-except
except Exception as err:
self.error = err
def close_uds(self):
"""Close vsock UDS connection."""
self.sock.close()
def _run(self):
with open(self.blob_path, "rb") as blob_file:
hash_obj = hashlib.md5()
while True:
buf = blob_file.read(BUF_SIZE)
if not buf:
break
sent = self.sock.send(buf)
while sent < len(buf):
sent += self.sock.send(buf[sent:])
buf = self.sock.recv(sent)
while len(buf) < sent:
buf += self.sock.recv(sent - len(buf))
hash_obj.update(buf)
self.hash = hash_obj.hexdigest()
def make_blob(dst_dir, size=BLOB_SIZE):
"""Generate a random data file."""
blob_path = os.path.join(dst_dir, "vsock-test.blob")
with open(blob_path, "wb") as blob_file:
left = size
blob_hash = hashlib.md5()
while left > 0:
count = min(left, 4096)
buf = os.urandom(count)
blob_hash.update(buf)
blob_file.write(buf)
left -= count
return blob_path, blob_hash.hexdigest()
def check_host_connections(vm, uds_path, blob_path, blob_hash):
"""Test host-initiated connections.
This will start a daemonized echo server on the guest VM, and then spawn
`TEST_CONNECTION_COUNT` `HostEchoWorker` threads.
After the workers are done transferring the data read from `blob_path`,
the hashes they computed for the data echoed back by the server are
checked against `blob_hash`.
"""
cmd = "/tmp/vsock_helper echosrv -d {}".format(ECHO_SERVER_PORT)
ecode, _, _ = vm.ssh.run(cmd)
assert ecode == 0
workers = []
for _ in range(TEST_CONNECTION_COUNT):
worker = HostEchoWorker(uds_path, blob_path)
workers.append(worker)
worker.start()
for wrk in workers:
wrk.join()
for wrk in workers:
assert wrk.hash == blob_hash
def check_guest_connections(vm, server_port_path, blob_path, blob_hash):
"""Test guest-initiated connections.
This will start an echo server on the host (in its own thread), then
start `TEST_CONNECTION_COUNT` workers inside the guest VM, all
communicating with the echo server.
"""
echo_server = Popen(
["socat", f"UNIX-LISTEN:{server_port_path},fork,backlog=5", "exec:'/bin/cat'"]
)
# Link the listening Unix socket into the VM's jail, so that
# Firecracker can connect to it.
attempt = 0
# But 1st, give socat a bit of time to create the socket
while not Path(server_port_path).exists() and attempt < 3:
time.sleep(0.2)
attempt += 1
vm.create_jailed_resource(server_port_path)
# Increase maximum process count for the ssh service.
# Avoids: "bash: fork: retry: Resource temporarily unavailable"
# Needed to execute the bash script that tests for concurrent
# vsock guest initiated connections.
pids_max_file = "/sys/fs/cgroup/system.slice/ssh.service/pids.max"
ecode, _, _ = vm.ssh.run(f"echo 1024 > {pids_max_file}")
assert ecode == 0, "Unable to set max process count for guest ssh service."
# Build the guest worker sub-command.
# `vsock_helper` will read the blob file from STDIN and send the echo
# server response to STDOUT. This response is then hashed, and the
# hash is compared against `blob_hash` (computed on the host). This
# comparison sets the exit status of the worker command.
worker_cmd = "hash=$("
worker_cmd += "cat {}".format(blob_path)
worker_cmd += " | /tmp/vsock_helper echo 2 {}".format(ECHO_SERVER_PORT)
worker_cmd += " | md5sum | cut -f1 -d\\ "
worker_cmd += ")"
worker_cmd += ' && [[ "$hash" = "{}" ]]'.format(blob_hash)
# Run `TEST_CONNECTION_COUNT` concurrent workers, using the above
# worker sub-command.
# If any worker fails, this command will fail. If all worker sub-commands
# succeed, this will also succeed.
cmd = 'workers="";'
cmd += "for i in $(seq 1 {}); do".format(TEST_CONNECTION_COUNT)
cmd += " ({})& ".format(worker_cmd)
cmd += ' workers="$workers $!";'
cmd += "done;"
cmd += "for w in $workers; do wait $w || exit -1; done"
ecode, _, stderr = vm.ssh.run(cmd)
echo_server.terminate()
rc = echo_server.wait()
# socat exits with 128 + 15 (SIGTERM)
assert rc == 143
assert ecode == 0, stderr
def make_host_port_path(uds_path, port):
"""Build the path for a Unix socket, mapped to host vsock port `port`."""
return "{}_{}".format(uds_path, port)
def _vsock_connect_to_guest(uds_path, port):
"""Return a Unix socket, connected to the guest vsock port `port`."""
sock = socket(AF_UNIX, SOCK_STREAM)
sock.connect(uds_path)
buf = bytearray("CONNECT {}\n".format(port).encode("utf-8"))
sock.send(buf)
ack_buf = sock.recv(32)
assert re.match("^OK [0-9]+\n$", ack_buf.decode("utf-8")) is not None
return sock
def _copy_vsock_data_to_guest(ssh_connection, blob_path, vm_blob_path, vsock_helper):
# Copy the data file and a vsock helper to the guest.
cmd = "mkdir -p /tmp/vsock"
ecode, _, _ = ssh_connection.run(cmd)
assert ecode == 0, "Failed to set up tmpfs drive on the guest."
ssh_connection.scp_put(vsock_helper, "/tmp/vsock_helper")
ssh_connection.scp_put(blob_path, vm_blob_path)
def check_vsock_device(vm, bin_vsock_path, test_fc_session_root_path, ssh_connection):
"""Create a blob and test guest and host initiated connections on vsock."""
vm_blob_path = "/tmp/vsock/test.blob"
# Generate a random data file for vsock.
blob_path, blob_hash = make_blob(test_fc_session_root_path)
# Copy the data file and a vsock helper to the guest.
_copy_vsock_data_to_guest(ssh_connection, blob_path, vm_blob_path, bin_vsock_path)
# Test vsock guest-initiated connections.
path = os.path.join(vm.path, make_host_port_path(VSOCK_UDS_PATH, ECHO_SERVER_PORT))
check_guest_connections(vm, path, vm_blob_path, blob_hash)
# Test vsock host-initiated connections.
path = os.path.join(vm.jailer.chroot_path(), VSOCK_UDS_PATH)
check_host_connections(vm, path, blob_path, blob_hash)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,855
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tools/compare_baselines/utils/fetcher.py
|
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Utility classes to fetch baseline data"""
import glob
import json
import os
import re
from utils.defs import BASELINE_FILENAME_PATTERN
class InvalidFilenameError(Exception):
"""Error for invalid file name"""
def __init__(self, fname):
self._message = (
f"{fname} does not match the pattern " f"`{BASELINE_FILENAME_PATTERN}`."
)
super().__init__(self._message)
def __str__(self):
return self._message
class BaselineFileFetcher:
"""Class for fetching baselines from file."""
def __init__(self, fpath):
"""Initialize baseline fetcher"""
fname = os.path.basename(fpath)
match = re.match(BASELINE_FILENAME_PATTERN, fname)
if match is None:
raise InvalidFilenameError(fname)
self._fpath = fpath
self._fname = fname
self._test = match.group(1)
self._kernel = match.group(2)
with open(fpath, "r", encoding="utf-8") as file:
self._raw = json.load(file)
@property
def fpath(self):
"""Return path of baseline file"""
return self._fpath
@property
def fname(self):
"""Return file name of baseline file"""
return self._fname
@property
def test(self):
"""Return test type"""
return self._test
@property
def kernel(self):
"""Return kernel version"""
return self._kernel
def get_baseline(self, instance, model):
"""Get baseline values by instance type and CPU model"""
if instance not in self._raw["hosts"]["instances"]:
return None
baselines = list(
filter(
lambda cpu_baseline: cpu_baseline["model"] == model,
self._raw["hosts"]["instances"][instance]["cpus"],
)
)
if len(baselines) == 0:
return None
return baselines[0]["baselines"]
def get_instances(self):
"""Get list of instances"""
return list(self._raw["hosts"]["instances"].keys())
def get_models(self, instance):
"""Get list of CPU models"""
return [m["model"] for m in self._raw["hosts"]["instances"][instance]["cpus"]]
def get_cpus(self):
"""Get list of CPUs"""
result = []
for instance, value in self._raw["hosts"]["instances"].items():
cpus = value["cpus"]
for cpu in cpus:
result.append(
{
"instance": instance,
"model": cpu["model"],
}
)
return result
class BaselineDirectoryFetcher:
"""Class for fetching baselines from directory."""
def __init__(self, dpath):
paths = sorted(glob.glob(os.path.join(dpath, "*.json")))
pattern = re.compile(BASELINE_FILENAME_PATTERN)
paths = [path for path in paths if pattern.match(os.path.basename(path))]
self._dpath = dpath
self._fetchers = {}
for path in paths:
self._fetchers[path] = BaselineFileFetcher(path)
@property
def dpath(self):
"""Return path of directory"""
return self._dpath
@property
def fetchers(self):
"""Return lists of fetchers"""
return self._fetchers
def get_fetcher(self, test, kernel):
"""Get fetcher with test type and kernel version"""
fetchers = list(
filter(
lambda f: f.test == test and f.kernel == kernel,
self._fetchers.values(),
)
)
if len(fetchers) == 0:
return None
return fetchers[0]
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,856
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_net_config_space.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests on devices config space."""
import platform
import random
import re
import string
import subprocess
from threading import Thread
import host_tools.network as net_tools # pylint: disable=import-error
# pylint: disable=global-statement
PAYLOAD_DATA_SIZE = 20
def test_net_change_mac_address(test_microvm_with_api, change_net_config_space_bin):
"""
Test changing the MAC address of the network device.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config(boot_args="ipv6.disable=1")
# Data exchange interface ('eth0' in guest).
test_microvm.add_net_iface()
# Control interface ('eth1' in guest).
test_microvm.add_net_iface()
test_microvm.start()
# Create the control ssh connection.
ssh_conn = test_microvm.ssh_iface(1)
host_ip0 = test_microvm.iface["eth0"]["iface"].host_ip
guest_ip0 = test_microvm.iface["eth0"]["iface"].guest_ip
# Start a server(host) - client(guest) communication with the following
# parameters.
host_port = 4444
iterations = 1
_exchange_data(test_microvm.jailer, ssh_conn, host_ip0, host_port, iterations)
fc_metrics = test_microvm.flush_metrics()
assert fc_metrics["net"]["tx_spoofed_mac_count"] == 0
# Change the MAC address of the network data interface.
# This change will be propagated only inside the net device kernel struct
# and will be used for ethernet frames formation when data is exchanged
# on the network interface.
mac = "06:05:04:03:02:01"
mac_hex = "0x060504030201"
guest_if1_name = net_tools.get_guest_net_if_name(ssh_conn, guest_ip0)
assert guest_if1_name is not None
_change_guest_if_mac(ssh_conn, mac, guest_if1_name)
_exchange_data(test_microvm.jailer, ssh_conn, host_ip0, host_port, iterations)
# `tx_spoofed_mac_count` metric was incremented due to the MAC address
# change.
fc_metrics = test_microvm.flush_metrics()
assert fc_metrics["net"]["tx_spoofed_mac_count"] > 0
net_addr_base = _get_net_mem_addr_base(ssh_conn, guest_if1_name)
assert net_addr_base is not None
# Write into '/dev/mem' the same mac address, byte by byte.
# This changes the MAC address physically, in the network device registers.
# After this step, the net device kernel struct MAC address will be the
# same with the MAC address stored in the network device registers. The
# `tx_spoofed_mac_count` metric shouldn't be incremented later on.
rmt_path = "/tmp/change_net_config_space"
test_microvm.ssh.scp_put(change_net_config_space_bin, rmt_path)
cmd = f"chmod u+x {rmt_path} && {rmt_path} {net_addr_base} {mac_hex}"
# This should be executed successfully.
exit_code, stdout, stderr = ssh_conn.run(cmd)
assert exit_code == 0, stderr
assert stdout == mac
# Discard any parasite data exchange which might've been
# happened on the emulation thread while the config space
# was changed on the vCPU thread.
test_microvm.flush_metrics()
_exchange_data(test_microvm.jailer, ssh_conn, host_ip0, host_port, iterations)
fc_metrics = test_microvm.flush_metrics()
assert fc_metrics["net"]["tx_spoofed_mac_count"] == 0
# Try again, just to be extra sure.
_exchange_data(test_microvm.jailer, ssh_conn, host_ip0, host_port, iterations)
fc_metrics = test_microvm.flush_metrics()
assert fc_metrics["net"]["tx_spoofed_mac_count"] == 0
def _create_server(jailer, host_ip, port, iterations):
# Wait for `iterations` TCP segments, on one connection.
# This server has to run under the network namespace, initialized
# by the integration test microvm jailer.
# pylint: disable=global-statement
script = (
"import socket\n"
"s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n"
"s.setsockopt(\n"
" socket.SOL_SOCKET, socket.SO_REUSEADDR,\n"
" s.getsockopt(socket.SOL_SOCKET,\n"
" socket.SO_REUSEADDR) | 1\n"
")\n"
"s.bind(('{}', {}))\n"
"s.listen(1)\n"
"conn, addr = s.accept()\n"
"recv_iterations = {}\n"
"while recv_iterations > 0:\n"
" data = conn.recv({})\n"
" recv_iterations -= 1\n"
"conn.close()\n"
"s.close()"
)
# The host uses Python3
cmd = 'python3 -c "{}"'.format(
script.format(host_ip, port, iterations, PAYLOAD_DATA_SIZE)
)
netns_cmd = jailer.netns_cmd_prefix() + cmd
exit_code = subprocess.call(netns_cmd, shell=True)
assert exit_code == 0
def _send_data_g2h(ssh_connection, host_ip, host_port, iterations, data, retries):
script = (
"import socket\n"
"import time\n"
"s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n"
"retries={}\n"
"while retries > 0:\n"
" try:\n"
" s.connect(('{}',{}))\n"
" retries = 0\n"
" except Exception as e:\n"
" retries -= 1\n"
" time.sleep(1)\n"
" if retries == 0:\n"
" exit(1)\n"
"send_iterations={}\n"
"while send_iterations > 0:\n"
" s.sendall(b'{}')\n"
" send_iterations -= 1\n"
"s.close()"
)
# The guest has Python3
cmd = 'python3 -c "{}"'.format(
script.format(retries, host_ip, str(host_port), iterations, data)
)
# Wait server to initialize.
exit_code, _, stderr = ssh_connection.run(cmd)
# If this assert fails, a connection refused happened.
assert exit_code == 0, stderr
assert stderr == ""
def _start_host_server_thread(jailer, host_ip, host_port, iterations):
thread = Thread(
target=_create_server, args=(jailer, host_ip, host_port, iterations)
)
thread.start()
return thread
def _exchange_data(jailer, ssh_control_connection, host_ip, host_port, iterations):
server_thread = _start_host_server_thread(jailer, host_ip, host_port, iterations)
# Generate random data.
letters = string.ascii_lowercase
data = "".join(random.choice(letters) for _ in range(PAYLOAD_DATA_SIZE))
# We need to synchronize host server with guest client. Server thread has
# to start listening for incoming connections before the client tries to
# connect. To synchronize, we implement a polling mechanism, retrying to
# establish a connection, on the client side, mechanism to retry guest
# client socket connection, in case the server had not started yet.
_send_data_g2h(
ssh_control_connection, host_ip, host_port, iterations, data, retries=5
)
# Wait for host server to receive the data sent by the guest client.
server_thread.join()
def _change_guest_if_mac(ssh_connection, guest_if_mac, guest_if_name):
cmd = "ip link set dev {} address ".format(guest_if_name) + guest_if_mac
# The connection will be down, because changing the mac will issue down/up
# on the interface.
ssh_connection.run(cmd)
def _get_net_mem_addr_base(ssh_connection, if_name):
"""Get the net device memory start address."""
if platform.machine() == "x86_64":
sys_virtio_mmio_cmdline = "/sys/devices/virtio-mmio-cmdline/"
cmd = "ls {} | grep virtio-mmio. | sed 's/virtio-mmio.//'"
exit_code, stdout, _ = ssh_connection.run(cmd.format(sys_virtio_mmio_cmdline))
assert exit_code == 0
virtio_devs_idx = stdout.split()
cmd = "cat /proc/cmdline"
exit_code, cmd_line, _ = ssh_connection.run(cmd)
assert exit_code == 0
pattern_dev = re.compile("(virtio_mmio.device=4K@0x[0-9a-f]+:[0-9]+)+")
pattern_addr = re.compile("virtio_mmio.device=4K@(0x[0-9a-f]+):[0-9]+")
devs_addr = []
for dev in re.findall(pattern_dev, cmd_line):
matched_addr = pattern_addr.search(dev)
# The 1st group which matches this pattern
# is the device start address. `0` group is
# full match.
addr = matched_addr.group(1)
devs_addr.append(addr)
cmd = "ls {}/virtio-mmio.{}/virtio{}/net"
for idx in virtio_devs_idx:
_, guest_if_name, _ = ssh_connection.run(
cmd.format(sys_virtio_mmio_cmdline, idx, idx)
)
if guest_if_name.strip() == if_name:
return devs_addr[int(idx)]
elif platform.machine() == "aarch64":
sys_virtio_mmio_cmdline = "/sys/devices/platform"
cmd = "ls {} | grep .virtio_mmio".format(sys_virtio_mmio_cmdline)
rc, stdout, _ = ssh_connection.run(cmd)
assert rc == 0
virtio_devs = stdout.split()
devs_addr = list(map(lambda dev: dev.split(".")[0], virtio_devs))
cmd = "ls {}/{}/virtio{}/net"
# Device start addresses lack the hex prefix and are not interpreted
# accordingly when parsed inside `change_config_space.c`.
hex_prefix = "0x"
for idx, dev in enumerate(virtio_devs):
_, guest_if_name, _ = ssh_connection.run(
cmd.format(sys_virtio_mmio_cmdline, dev, idx)
)
if guest_if_name.strip() == if_name:
return hex_prefix + devs_addr[int(idx)]
return None
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,857
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_rtc.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Check the well functioning af the RTC device on aarch64 platforms."""
import platform
import re
import pytest
from framework import utils
DMESG_LOG_REGEX = r"rtc-pl031\s+(\d+).rtc: setting system clock to"
@pytest.mark.skipif(
platform.machine() != "aarch64", reason="RTC exists only on aarch64."
)
def test_rtc(test_microvm_with_api):
"""
Test RTC functionality on aarch64.
"""
vm = test_microvm_with_api
vm.spawn()
vm.memory_monitor = None
vm.basic_config()
vm.add_net_iface()
vm.start()
# check that the kernel creates an rtcpl031 base device.
_, stdout, _ = vm.ssh.run("dmesg")
rtc_log = re.findall(DMESG_LOG_REGEX, stdout)
assert rtc_log is not None
_, stdout, _ = vm.ssh.run("stat /dev/rtc0")
assert "character special file" in stdout
_, host_stdout, _ = utils.run_cmd("date +%s")
_, guest_stdout, _ = vm.ssh.run("date +%s")
assert abs(int(guest_stdout) - int(host_stdout)) < 5
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,858
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_feat_parity.py
|
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests for the verifying features exposed by CPUID and MSRs by various CPU templates."""
import pytest
import framework.utils_cpuid as cpuid_utils
from framework.properties import global_props
from framework.utils_cpu_templates import SUPPORTED_CPU_TEMPLATES
pytestmark = pytest.mark.skipif(
global_props.cpu_architecture != "x86_64", reason="x86_64 specific tests"
)
# CPU templates designed to provide instruction set feature parity
INST_SET_TEMPLATES = ["T2A", "T2CL"]
@pytest.fixture(
name="inst_set_cpu_template",
params=sorted(set(SUPPORTED_CPU_TEMPLATES).intersection(INST_SET_TEMPLATES)),
)
def inst_set_cpu_template_fxt(request):
"""CPU template fixture for instruction set feature parity templates"""
return request.param
@pytest.fixture(name="vm")
def vm_fxt(
microvm_factory,
inst_set_cpu_template,
guest_kernel,
rootfs,
):
"""
Create a VM, using the normal CPU templates
"""
vm = microvm_factory.build(guest_kernel, rootfs)
vm.spawn()
vm.basic_config(vcpu_count=1, mem_size_mib=1024, cpu_template=inst_set_cpu_template)
vm.add_net_iface()
vm.start()
return vm
def test_feat_parity_cpuid_mpx(vm):
"""
Verify that MPX (Memory Protection Extensions) is not enabled in any of the supported CPU templates.
"""
# fmt: off
must_be_set = []
must_be_unset = [
(0x7, 0x0, "ebx",
(1 << 14) # MPX
),
]
# fmt: on
cpuid_utils.check_cpuid_feat_flags(
vm,
must_be_set,
must_be_unset,
)
@pytest.mark.parametrize(
"inst_set_cpu_template",
sorted(set(SUPPORTED_CPU_TEMPLATES).intersection(INST_SET_TEMPLATES + ["T2"])),
indirect=True,
)
def test_feat_parity_cpuid_inst_set(vm):
"""
Verify that CPUID feature flags related to instruction sets are properly set
for T2, T2CL and T2A CPU templates.
"""
# fmt: off
must_be_set = [
(0x7, 0x0, "ebx",
(1 << 5) | # AVX2
(1 << 9) # REP MOVSB/STOSB
),
]
must_be_unset = [
(0x1, 0x0, "ecx",
(1 << 15) # PDCM
),
(0x7, 0x0, "ebx",
(1 << 16) | # AVX512F
(1 << 17) | # AVX512DQ
(1 << 18) | # RDSEED
(1 << 19) | # ADX
(1 << 23) | # CLFLUSHOPT
(1 << 24) | # CLWB
(1 << 29) | # SHA
(1 << 30) | # AVX512BW
(1 << 31) # AVX512VL
),
(0x7, 0x0, "ecx",
(1 << 1) | # AVX512_VBMI
(1 << 6) | # AVX512_VBMI2
(1 << 8) | # GFNI
(1 << 9) | # VAES
(1 << 10) | # VPCLMULQDQ
(1 << 11) | # AVX512_VNNI
(1 << 12) | # AVX512_BITALG
(1 << 14) | # AVX512_VPOPCNTDQ
(1 << 22) # RDPID/IA32_TSC_AUX
),
(0x7, 0x0, "edx",
(1 << 2) | # AVX512_4VNNIW
(1 << 3) | # AVX512_4FMAPS
(1 << 4) | # Fast Short REP MOV
(1 << 8) # AVX512_VP2INTERSECT
),
(0x80000001, 0x0, "ecx",
(1 << 6) | # SSE4A
(1 << 7) | # MisAlignSee
(1 << 8) | # PREFETCHW
(1 << 29) # MwaitExtended
),
(0x80000001, 0x0, "edx",
(1 << 22) | # MmxExt
(1 << 25) # FFXSR
),
(0x80000008, 0x0, "ebx",
(1 << 0) | # CLZERO
(1 << 4) | # RDPRU
(1 << 8) | # MCOMMIT
(1 << 9) | # WBNOINVD
(1 << 13) # INT_WBINVD
),
]
# fmt: on
cpuid_utils.check_cpuid_feat_flags(
vm,
must_be_set,
must_be_unset,
)
def test_feat_parity_cpuid_sec(vm):
"""
Verify that security-related CPUID feature flags are properly set
for T2CL and T2A CPU templates.
"""
# fmt: off
must_be_set_common = [
(0x7, 0x0, "edx",
(1 << 26) | # IBRS/IBPB
(1 << 27) | # STIBP
(1 << 31) # SSBD
)
# Security feature bits in 0x80000008 EBX are set differently by
# 4.14 and 5.10 KVMs.
# 4.14 populates them from host's AMD flags (0x80000008 EBX), while
# 5.10 takes them from host's common flags (0x7 EDX).
# There is no great value in checking that this actually happens, as
# we cannot really control it.
# When we drop 4.14 support, we may consider enabling this check.
# (0x80000008, 0x0, "ebx",
# (1 << 12) | # IBPB
# (1 << 14) | # IBRS
# (1 << 15) | # STIBP
# (1 << 24) # SSBD
# )
]
must_be_set_intel_only = [
(0x7, 0x0, "edx",
(1 << 10) | # MD_CLEAR
(1 << 29) # IA32_ARCH_CAPABILITIES
)
]
must_be_set_amd_only = [
(0x80000008, 0x0, "ebx",
(1 << 18) | # IbrsPreferred
(1 << 19) # IbrsProvidesSameModeProtection
)
]
must_be_unset_common = [
(0x7, 0x0, "edx",
(1 << 28) # L1D_FLUSH
)
]
must_be_unset_intel_only = [
(0x80000008, 0x0, "ebx",
(1 << 18) | # IbrsPreferred
(1 << 19) # IbrsProvidesSameModeProtection
)
]
must_be_unset_amd_only = [
(0x7, 0x0, "edx",
(1 << 10) | # MD_CLEAR
(1 << 29) # IA32_ARCH_CAPABILITIES
)
]
# fmt: on
vendor = cpuid_utils.get_cpu_vendor()
if vendor == cpuid_utils.CpuVendor.INTEL:
must_be_set = must_be_set_common + must_be_set_intel_only
must_be_unset = must_be_unset_common + must_be_unset_intel_only
elif vendor == cpuid_utils.CpuVendor.AMD:
must_be_set = must_be_set_common + must_be_set_amd_only
must_be_unset = must_be_unset_common + must_be_unset_amd_only
else:
raise Exception("Unsupported CPU vendor.")
cpuid_utils.check_cpuid_feat_flags(
vm,
must_be_set,
must_be_unset,
)
def test_feat_parity_msr_arch_cap(vm):
"""
Verify availability and value of the IA32_ARCH_CAPABILITIES MSR for T2CL and T2A CPU templates.
"""
arch_capabilities_addr = "0x10a"
rdmsr_cmd = f"rdmsr {arch_capabilities_addr}"
_, stdout, stderr = vm.ssh.run(rdmsr_cmd)
cpu_template = vm.api.vm_config.get().json()["machine-config"]["cpu_template"]
if cpu_template == "T2CL":
assert stderr == ""
actual = int(stdout.strip(), 16)
# fmt: off
expected = (
(1 << 0) | # RDCL_NO
(1 << 1) | # IBRS_ALL
(1 << 3) | # SKIP_L1DFL_VMENTRY
(1 << 5) | # MDS_NO
(1 << 6) | # IF_PSCHANGE_MC_NO
(1 << 7) # TSX_CTRL
)
if global_props.cpu_codename == "INTEL_CASCADELAKE":
expected |= (1 << 19) # RRSBA
# fmt: on
assert actual == expected, f"{actual=:#x} != {expected=:#x}"
elif cpu_template == "T2A":
# IA32_ARCH_CAPABILITIES shall not be available
assert stderr != ""
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,859
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/.buildkite/pipeline_pr_no_block.py
|
#!/usr/bin/env python3
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Generate Buildkite pipelines dynamically"""
from common import (
COMMON_PARSER,
get_changed_files,
group,
overlay_dict,
pipeline_to_json,
run_all_tests,
)
# Buildkite default job priority is 0. Setting this to 1 prioritizes PRs over
# scheduled jobs and other batch jobs.
DEFAULT_PRIORITY = 1
args = COMMON_PARSER.parse_args()
defaults = {
"instances": args.instances,
"platforms": args.platforms,
# buildkite step parameters
"timeout_in_minutes": 45,
# some non-blocking tests are performance, so make sure they get ag=1 instances
"priority": DEFAULT_PRIORITY + 1,
"agents": {"ag": 1},
}
defaults = overlay_dict(defaults, args.step_param)
optional_grp = group(
"❓ Optional",
"./tools/devtool -y test -c 1-10 -m 0 -- ../tests/integration_tests/ -m 'no_block_pr and not nonci' --log-cli-level=INFO",
**defaults,
)
changed_files = get_changed_files("main")
pipeline = {"steps": [optional_grp]} if run_all_tests(changed_files) else {"steps": []}
print(pipeline_to_json(pipeline))
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,860
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/host_tools/cargo_build.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Functionality for a shared binary build and release path for all tests."""
import os
import platform
from pathlib import Path
from framework import defs, utils
from framework.defs import FC_WORKSPACE_DIR, FC_WORKSPACE_TARGET_DIR
from framework.with_filelock import with_filelock
CARGO_BUILD_REL_PATH = "firecracker_binaries"
"""Keep a single build path across all build tests."""
CARGO_RELEASE_REL_PATH = os.path.join(CARGO_BUILD_REL_PATH, "release")
"""Keep a single Firecracker release binary path across all test types."""
DEFAULT_BUILD_TARGET = "{}-unknown-linux-musl".format(platform.machine())
RELEASE_BINARIES_REL_PATH = "{}/release/".format(DEFAULT_BUILD_TARGET)
CARGO_UNITTEST_REL_PATH = os.path.join(CARGO_BUILD_REL_PATH, "test")
def cargo(
subcommand,
cargo_args: str = "",
subcommand_args: str = "",
*,
env: dict = None,
cwd: str = None,
):
"""Executes the specified cargo subcommand"""
env = env or {}
env_string = " ".join(f'{key}="{str(value)}"' for key, value in env.items())
cmd = f"{env_string} cargo {subcommand} {cargo_args} -- {subcommand_args}"
return utils.run_cmd(cmd, cwd=cwd)
def get_rustflags():
"""Get the relevant rustflags for building/unit testing."""
rustflags = "-D warnings"
if platform.machine() == "aarch64":
rustflags += " -C link-arg=-lgcc -C link-arg=-lfdt "
return rustflags
@with_filelock
def cargo_build(path, extra_args="", src_dir=None):
"""Trigger build depending on flags provided."""
cargo("build", extra_args, env={"CARGO_TARGET_DIR": path}, cwd=src_dir)
def cargo_test(path, extra_args=""):
"""Trigger unit tests depending on flags provided."""
env = {
"CARGO_TARGET_DIR": os.path.join(path, CARGO_UNITTEST_REL_PATH),
"RUST_TEST_THREADS": 1,
"RUST_BACKTRACE": 1,
"RUSTFLAGS": get_rustflags(),
}
cargo("test", extra_args + " --all --no-fail-fast", env=env)
@with_filelock
def get_binary(name):
"""Build a binary"""
target = DEFAULT_BUILD_TARGET
target_dir = FC_WORKSPACE_TARGET_DIR
out_dir = Path(f"{target_dir}/{target}/release")
bin_path = out_dir / name
if not bin_path.exists():
env = {"RUSTFLAGS": get_rustflags()}
cargo(
"build",
f"-p {name} --release --target {target}",
env=env,
cwd=FC_WORKSPACE_DIR,
)
utils.run_cmd(f"strip --strip-debug {bin_path}")
return bin_path
@with_filelock
def get_firecracker_binaries():
"""Build the Firecracker and Jailer binaries if they don't exist.
Returns the location of the firecracker related binaries eventually after
building them in case they do not exist at the specified root_path.
"""
return get_binary("firecracker"), get_binary("jailer")
@with_filelock
def run_seccompiler_bin(bpf_path, json_path=defs.SECCOMP_JSON_DIR, basic=False):
"""
Run seccompiler-bin.
:param bpf_path: path to the output file
:param json_path: optional path to json file
"""
cargo_target = "{}-unknown-linux-musl".format(platform.machine())
# If no custom json filter, use the default one for the current target.
if json_path == defs.SECCOMP_JSON_DIR:
json_path = json_path / "{}.json".format(cargo_target)
seccompiler_args = f"--input-file {json_path} --target-arch {platform.machine()} --output-file {bpf_path}"
if basic:
seccompiler_args += " --basic"
rc, _, _ = cargo(
"run",
f"-p seccompiler --target-dir {defs.SECCOMPILER_TARGET_DIR} --target {cargo_target}",
seccompiler_args,
)
assert rc == 0
@with_filelock
def run_rebase_snap_bin(base_snap, diff_snap):
"""
Run apply_diff_snap.
:param base_snap: path to the base snapshot mem file
:param diff_snap: path to diff snapshot mem file
"""
cargo_target = "{}-unknown-linux-musl".format(platform.machine())
rc, _, _ = cargo(
"run",
f"-p rebase-snap --target {cargo_target}",
f"--base-file {base_snap} --diff-file {diff_snap}",
)
assert rc == 0
@with_filelock
def gcc_compile(src_file, output_file, extra_flags="-static -O3"):
"""Build a source file with gcc."""
output_file = Path(output_file)
if not output_file.exists():
compile_cmd = f"gcc {src_file} -o {output_file} {extra_flags}"
utils.run_cmd(compile_cmd)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,861
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/host_tools/network.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Utilities for test host microVM network setup."""
import contextlib
import random
import string
from pathlib import Path
from nsenter import Namespace
from retry import retry
from framework import utils
class SSHConnection:
"""
SSHConnection encapsulates functionality for microVM SSH interaction.
This class should be instantiated as part of the ssh fixture with the
the hostname obtained from the MAC address, the username for logging into
the image and the path of the ssh key.
This translates into an SSH connection as follows:
ssh -i ssh_key_path username@hostname
"""
def __init__(self, netns_path, ssh_key: Path, host, user):
"""Instantiate a SSH client and connect to a microVM."""
self.netns_file_path = netns_path
self.ssh_key = ssh_key
# check that the key exists and the permissions are 0o400
# This saves a lot of debugging time.
assert ssh_key.exists()
ssh_key.chmod(0o400)
assert (ssh_key.stat().st_mode & 0o777) == 0o400
self.host = host
self.user = user
self.options = [
"-q",
"-o",
"ConnectTimeout=1",
"-o",
"StrictHostKeyChecking=no",
"-o",
"UserKnownHostsFile=/dev/null",
"-o",
"PreferredAuthentications=publickey",
"-i",
str(self.ssh_key),
]
self._init_connection()
def remote_path(self, path):
"""Convert a path to remote"""
return f"{self.user}@{self.host}:{path}"
def _scp(self, path1, path2, options):
"""Copy files to/from the VM using scp."""
ecode, _, stderr = self._exec(["scp", *options, path1, path2])
assert ecode == 0, stderr
def scp_put(self, local_path, remote_path, recursive=False):
"""Copy files to the VM using scp."""
opts = self.options.copy()
if recursive:
opts.append("-r")
self._scp(local_path, self.remote_path(remote_path), opts)
def scp_get(self, remote_path, local_path, recursive=False):
"""Copy files from the VM using scp."""
opts = self.options.copy()
if recursive:
opts.append("-r")
self._scp(self.remote_path(remote_path), local_path, opts)
@retry(ConnectionError, delay=0.15, tries=20)
def _init_connection(self):
"""Create an initial SSH client connection (retry until it works).
Since we're connecting to a microVM we just started, we'll probably
have to wait for it to boot up and start the SSH server.
We'll keep trying to execute a remote command that can't fail
(`/bin/true`), until we get a successful (0) exit code.
"""
ecode, _, _ = self.run("true")
if ecode != 0:
raise ConnectionError
def run(self, cmd_string):
"""Execute the command passed as a string in the ssh context."""
return self._exec(
[
"ssh",
*self.options,
f"{self.user}@{self.host}",
cmd_string,
]
)
def _exec(self, cmd):
"""Private function that handles the ssh client invocation."""
# TODO: If a microvm runs in a particular network namespace, we have to
# temporarily switch to that namespace when doing something that routes
# packets over the network, otherwise the destination will not be
# reachable. Use a better setup/solution at some point!
ctx = contextlib.nullcontext()
if self.netns_file_path is not None:
ctx = Namespace(self.netns_file_path, "net")
with ctx:
return utils.run_cmd(cmd, ignore_return_code=True)
def mac_from_ip(ip_address):
"""Create a MAC address based on the provided IP.
Algorithm:
- the first 2 bytes are fixed to 06:00
- the next 4 bytes are the IP address
Example of function call:
mac_from_ip("192.168.241.2") -> 06:00:C0:A8:F1:02
C0 = 192, A8 = 168, F1 = 241 and 02 = 2
:param ip_address: IP address as string
:return: MAC address from IP
"""
mac_as_list = ["06", "00"]
mac_as_list.extend(f"{int(octet):02x}" for octet in ip_address.split("."))
return ":".join(mac_as_list)
def get_guest_net_if_name(ssh_connection, guest_ip):
"""Get network interface name based on its IPv4 address."""
cmd = "ip a s | grep '{}' | tr -s ' ' | cut -d' ' -f6".format(guest_ip)
_, guest_if_name, _ = ssh_connection.run(cmd)
if_name = guest_if_name.strip()
return if_name if if_name != "" else None
def random_str(k):
"""Create a random string of length `k`."""
symbols = string.ascii_lowercase + string.digits
return "".join(random.choices(symbols, k=k))
class Tap:
"""Functionality for creating a tap and cleaning up after it."""
def __init__(self, name, netns, ip=None):
"""Set up the name and network namespace for this tap interface.
It also creates a new tap device, brings it up and moves the interface
to the specified namespace.
"""
# Avoid a conflict if two tests want to create the same tap device tap0
# in the host before moving it into its own netns
temp_name = "tap" + random_str(k=8)
utils.run_cmd(f"ip tuntap add mode tap name {temp_name}")
utils.run_cmd(f"ip link set {temp_name} name {name} netns {netns}")
if ip:
utils.run_cmd(f"ip netns exec {netns} ifconfig {name} {ip} up")
self._name = name
self._netns = netns
@property
def name(self):
"""Return the name of this tap interface."""
return self._name
@property
def netns(self):
"""Return the network namespace of this tap."""
return self._netns
def set_tx_queue_len(self, tx_queue_len):
"""Set the length of the tap's TX queue."""
utils.run_cmd(
"ip netns exec {} ip link set {} txqueuelen {}".format(
self.netns, self.name, tx_queue_len
)
)
def __repr__(self):
return f"<Tap name={self.name} netns={self.netns}>"
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,862
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/utils_cpuid.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Helper functions for testing CPU identification functionality."""
import platform
import re
import subprocess
from enum import Enum, auto
from framework.utils import run_cmd
from framework.utils_imdsv2 import imdsv2_get
class CpuVendor(Enum):
"""CPU vendors enum."""
AMD = auto()
INTEL = auto()
ARM = auto()
CPU_DICT = {
CpuVendor.INTEL: {
"Intel(R) Xeon(R) Platinum 8175M CPU": "INTEL_SKYLAKE",
"Intel(R) Xeon(R) Platinum 8259CL CPU": "INTEL_CASCADELAKE",
"Intel(R) Xeon(R) Platinum 8375C CPU": "INTEL_ICELAKE",
},
CpuVendor.AMD: {
"AMD EPYC 7R13": "AMD_MILAN",
},
CpuVendor.ARM: {"0xd0c": "ARM_NEOVERSE_N1", "0xd40": "ARM_NEOVERSE_V1"},
}
def get_cpu_vendor():
"""Return the CPU vendor."""
brand_str = subprocess.check_output("lscpu", shell=True).strip().decode()
machine_str = platform.machine()
if "AuthenticAMD" in brand_str:
return CpuVendor.AMD
if "aarch64" in machine_str:
return CpuVendor.ARM
return CpuVendor.INTEL
def get_cpu_model_name():
"""Return the CPU model name."""
if platform.machine() == "aarch64":
_, stdout, _ = run_cmd("cat /proc/cpuinfo | grep 'CPU part' | uniq")
else:
_, stdout, _ = run_cmd("cat /proc/cpuinfo | grep 'model name' | uniq")
info = stdout.strip().split(sep=":")
assert len(info) == 2
raw_cpu_model = info[1].strip()
if platform.machine() == "x86_64":
return raw_cpu_model
return CPU_DICT[CpuVendor.ARM].get(raw_cpu_model, "Unknown")
def get_cpu_codename(default="Unknown"):
"""Return the CPU codename."""
cpu_model = get_cpu_model_name()
vendor = get_cpu_vendor()
if vendor == CpuVendor.INTEL:
result = re.match(r"^(.*) @.*$", cpu_model)
if result:
return CPU_DICT[CpuVendor.INTEL].get(result.group(1), default)
if vendor == CpuVendor.AMD:
result = re.match(r"^(.*) [0-9]*-Core Processor$", cpu_model)
if result:
return CPU_DICT[CpuVendor.AMD].get(result.group(1), default)
if vendor == CpuVendor.ARM:
return cpu_model
return default
def get_instance_type():
"""Get the instance type through IMDSv2"""
return imdsv2_get("/meta-data/instance-type")
def check_guest_cpuid_output(
vm, guest_cmd, expected_header, expected_separator, expected_key_value_store
):
"""Parse cpuid output inside guest and match with expected one."""
_, stdout, stderr = vm.ssh.run(guest_cmd)
assert stderr == ""
for line in stdout.split("\n"):
if line != "":
# All the keys have been matched. Stop.
if not expected_key_value_store:
break
# Try to match the header if needed.
if expected_header not in (None, ""):
if line.strip() == expected_header:
expected_header = None
continue
# See if any key matches.
# We Use a try-catch block here since line.split() may fail.
try:
[key, value] = list(
map(lambda x: x.strip(), line.split(expected_separator))
)
except ValueError:
continue
if key in expected_key_value_store.keys():
assert value == expected_key_value_store[key], (
"%s does not have the expected value" % key
)
del expected_key_value_store[key]
else:
break
assert not expected_key_value_store, (
"some keys in dictionary have not been found in the output: %s"
% expected_key_value_store
)
def build_cpuid_dict(raw_cpuid_output):
"""Build CPUID dict based on raw cpuid output"""
cpuid_dict = {}
ptrn = re.compile("^ *(.*) (.*): eax=(.*) ebx=(.*) ecx=(.*) edx=(.*)$")
for line in raw_cpuid_output.strip().split("\n"):
match = re.match(ptrn, line)
assert match, f"`{line}` does not match the regex pattern."
leaf, subleaf, eax, ebx, ecx, edx = [int(x, 16) for x in match.groups()]
cpuid_dict[(leaf, subleaf, "eax")] = eax
cpuid_dict[(leaf, subleaf, "ebx")] = ebx
cpuid_dict[(leaf, subleaf, "ecx")] = ecx
cpuid_dict[(leaf, subleaf, "edx")] = edx
return cpuid_dict
def get_guest_cpuid(vm, leaf=None, subleaf=None):
"""
Return the guest CPUID of CPU 0 in the form of a dictionary where the key
is a tuple:
- leaf (integer)
- subleaf (integer)
- register ("eax", "ebx", "ecx" or "edx")
and the value is the register value (integer).
"""
if leaf is not None and subleaf is not None:
read_cpuid_cmd = f"cpuid -r -l {leaf} -s {subleaf} | head -n 2 | grep -v CPU"
else:
read_cpuid_cmd = "cpuid -r | sed '/CPU 1/q' | grep -v CPU"
_, stdout, stderr = vm.ssh.run(read_cpuid_cmd)
assert stderr == ""
return build_cpuid_dict(stdout)
def check_cpuid_feat_flags(vm, must_be_set, must_be_unset):
"""
Check that CPUID feature flag are set and unset as expected.
"""
cpuid = get_guest_cpuid(vm)
allowed_regs = ["eax", "ebx", "ecx", "edx"]
for leaf, subleaf, reg, flags in must_be_set:
assert reg in allowed_regs
actual = cpuid[(leaf, subleaf, reg)] & flags
expected = flags
assert (
actual == expected
), f"{leaf=:#x} {subleaf=:#x} {reg=} {actual=:#x}, {expected=:#x}"
for leaf, subleaf, reg, flags in must_be_unset:
assert reg in allowed_regs
actual = cpuid[(leaf, subleaf, reg)] & flags
expected = 0
assert (
actual == expected
), f"{leaf=:#x} {subleaf=:#x} {reg=} {actual=:#x}, {expected=:#x}"
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,863
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tools/compare_baselines/utils/defs.py
|
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Some common definitions used in different modules"""
# fmt: off
CODENAME2DICT = {
"skylake": {
"instance": "m5d.metal",
"model": "Intel(R) Xeon(R) Platinum 8175M CPU @ 2.50GHz",
},
"cascadelake": {
"instance": "m5d.metal",
"model": "Intel(R) Xeon(R) Platinum 8259CL CPU @ 2.50GHz",
},
"icelake": {
"instance": "m6i.metal",
"model": "Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz",
},
"milan": {
"instance": "m6a.metal",
"model": "AMD EPYC 7R13 48-Core Processor"
},
"graviton2": {
"instance": "m6g.metal",
"model": "ARM_NEOVERSE_N1"
},
"graviton3": {
"instance": "c7g.metal",
"model": "ARM_NEOVERSE_V1"
},
}
# fmt: on
MODEL2SHORT = {
"Intel(R) Xeon(R) Platinum 8175M CPU @ 2.50GHz": "m5d/SL",
"Intel(R) Xeon(R) Platinum 8259CL CPU @ 2.50GHz": "m5d/CL",
"Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz": "m6i",
"AMD EPYC 7R13 48-Core Processor": "m6a",
"ARM_NEOVERSE_N1": "m6g",
"ARM_NEOVERSE_V1": "c7g",
}
DEFAULT_BASELINE_DIRECTORY = "tests/integration_tests/performance/configs/"
BASELINE_FILENAME_PATTERN = r"^test_(.+)_config_(.+).json"
BASELINE_FILENAME_FORMAT = "test_{test}_config_{kernel}.json"
DEFAULT_RESULT_FILEPATH = "comparison_result.json"
TESTS = [
"block_performance",
"network_latency",
"network_tcp_throughput",
"snapshot_restore_performance",
"vsock_throughput",
]
KERNELS = [
"4.14",
"5.10",
"6.1",
]
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,864
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_snapshot_basic.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Basic tests scenarios for snapshot save/restore."""
import filecmp
import logging
import os
import re
from pathlib import Path
import pytest
import host_tools.drive as drive_tools
from framework.microvm import SnapshotType
from framework.utils import check_filesystem, wait_process_termination
from framework.utils_vsock import (
ECHO_SERVER_PORT,
VSOCK_UDS_PATH,
_copy_vsock_data_to_guest,
check_guest_connections,
check_host_connections,
make_blob,
make_host_port_path,
)
def _get_guest_drive_size(ssh_connection, guest_dev_name="/dev/vdb"):
# `lsblk` command outputs 2 lines to STDOUT:
# "SIZE" and the size of the device, in bytes.
blksize_cmd = "lsblk -b {} --output SIZE".format(guest_dev_name)
_, stdout, stderr = ssh_connection.run(blksize_cmd)
assert stderr == ""
lines = stdout.split("\n")
return lines[1].strip()
# Testing matrix:
# - Guest kernel: All supported ones
# - Rootfs: Ubuntu 18.04
# - Microvm: 2vCPU with 512 MB RAM
# TODO: Multiple microvm sizes must be tested in the async pipeline.
@pytest.mark.parametrize("snapshot_type", [SnapshotType.DIFF, SnapshotType.FULL])
def test_5_snapshots(
bin_vsock_path,
tmp_path,
microvm_factory,
guest_kernel,
rootfs,
snapshot_type,
):
"""
Create and load 5 snapshots.
"""
logger = logging.getLogger("snapshot_sequence")
seq_len = 5
diff_snapshots = snapshot_type == SnapshotType.DIFF
vm = microvm_factory.build(guest_kernel, rootfs)
vm.spawn()
vm.basic_config(
vcpu_count=2,
mem_size_mib=512,
track_dirty_pages=diff_snapshots,
)
vm.add_net_iface()
vm.api.vsock.put(vsock_id="vsock0", guest_cid=3, uds_path=VSOCK_UDS_PATH)
vm.start()
# Verify if guest can run commands.
exit_code, _, _ = vm.ssh.run("sync")
assert exit_code == 0
vm_blob_path = "/tmp/vsock/test.blob"
# Generate a random data file for vsock.
blob_path, blob_hash = make_blob(tmp_path)
# Copy the data file and a vsock helper to the guest.
_copy_vsock_data_to_guest(vm.ssh, blob_path, vm_blob_path, bin_vsock_path)
logger.info("Create %s #0.", snapshot_type)
# Create a snapshot from a microvm.
snapshot = vm.make_snapshot(snapshot_type)
base_snapshot = snapshot
for i in range(seq_len):
logger.info("Load snapshot #%s, mem %s", i, snapshot.mem)
microvm = microvm_factory.build()
microvm.spawn()
microvm.restore_from_snapshot(snapshot, resume=True)
# Test vsock guest-initiated connections.
path = os.path.join(
microvm.path, make_host_port_path(VSOCK_UDS_PATH, ECHO_SERVER_PORT)
)
check_guest_connections(microvm, path, vm_blob_path, blob_hash)
# Test vsock host-initiated connections.
path = os.path.join(microvm.jailer.chroot_path(), VSOCK_UDS_PATH)
check_host_connections(microvm, path, blob_path, blob_hash)
# Check that the root device is not corrupted.
check_filesystem(microvm.ssh, "squashfs", "/dev/vda")
logger.info("Create snapshot %s #%d.", snapshot_type, i + 1)
snapshot = microvm.make_snapshot(snapshot_type)
# If we are testing incremental snapshots we must merge the base with
# current layer.
if snapshot.is_diff:
logger.info("Base: %s, Layer: %s", base_snapshot.mem, snapshot.mem)
snapshot = snapshot.rebase_snapshot(base_snapshot)
# Update the base for next iteration.
base_snapshot = snapshot
def test_patch_drive_snapshot(uvm_nano, microvm_factory):
"""
Test that a patched drive is correctly used by guests loaded from snapshot.
"""
logger = logging.getLogger("snapshot_sequence")
# Use a predefined vm instance.
basevm = uvm_nano
basevm.add_net_iface()
# Add a scratch 128MB RW non-root block device.
root = Path(basevm.path)
scratch_path1 = str(root / "scratch1")
scratch_disk1 = drive_tools.FilesystemFile(scratch_path1, size=128)
basevm.add_drive("scratch", scratch_disk1.path)
basevm.start()
# Verify if guest can run commands.
exit_code, _, _ = basevm.ssh.run("sync")
assert exit_code == 0
# Update drive to have another backing file, double in size.
new_file_size_mb = 2 * int(scratch_disk1.size() / (1024 * 1024))
logger.info("Patch drive, new file: size %sMB.", new_file_size_mb)
scratch_path2 = str(root / "scratch2")
scratch_disk2 = drive_tools.FilesystemFile(scratch_path2, new_file_size_mb)
basevm.patch_drive("scratch", scratch_disk2)
# Create base snapshot.
logger.info("Create FULL snapshot #0.")
snapshot = basevm.snapshot_full()
# Load snapshot in a new Firecracker microVM.
logger.info("Load snapshot, mem %s", snapshot.mem)
vm = microvm_factory.build()
vm.spawn()
vm.restore_from_snapshot(snapshot, resume=True)
# Attempt to connect to resumed microvm and verify the new microVM has the
# right scratch drive.
guest_drive_size = _get_guest_drive_size(vm.ssh)
assert guest_drive_size == str(scratch_disk2.size())
def test_load_snapshot_failure_handling(test_microvm_with_api):
"""
Test error case of loading empty snapshot files.
"""
vm = test_microvm_with_api
vm.spawn(log_level="Info")
# Create two empty files for snapshot state and snapshot memory
chroot_path = vm.jailer.chroot_path()
snapshot_dir = os.path.join(chroot_path, "snapshot")
Path(snapshot_dir).mkdir(parents=True, exist_ok=True)
snapshot_mem = os.path.join(snapshot_dir, "snapshot_mem")
open(snapshot_mem, "w+", encoding="utf-8").close()
snapshot_vmstate = os.path.join(snapshot_dir, "snapshot_vmstate")
open(snapshot_vmstate, "w+", encoding="utf-8").close()
# Hardlink the snapshot files into the microvm jail.
jailed_mem = vm.create_jailed_resource(snapshot_mem)
jailed_vmstate = vm.create_jailed_resource(snapshot_vmstate)
# Load the snapshot
expected_msg = (
"Load microVM snapshot error: Failed to restore from snapshot: Failed to get snapshot "
"state from file: Failed to load snapshot state from file: Snapshot file is smaller "
"than CRC length."
)
with pytest.raises(RuntimeError, match=expected_msg):
vm.api.snapshot_load.put(mem_file_path=jailed_mem, snapshot_path=jailed_vmstate)
# Check if FC process is closed
wait_process_termination(vm.jailer_clone_pid)
def test_cmp_full_and_first_diff_mem(microvm_factory, guest_kernel, rootfs):
"""
Compare memory of 2 consecutive full and diff snapshots.
Testing matrix:
- Guest kernel: All supported ones
- Rootfs: Ubuntu 18.04
- Microvm: 2vCPU with 512 MB RAM
"""
logger = logging.getLogger("snapshot_sequence")
vm = microvm_factory.build(guest_kernel, rootfs)
vm.spawn()
vm.basic_config(
vcpu_count=2,
mem_size_mib=512,
track_dirty_pages=True,
)
vm.add_net_iface()
vm.start()
# Verify if guest can run commands.
exit_code, _, _ = vm.ssh.run("sync")
assert exit_code == 0
logger.info("Create full snapshot.")
# Create full snapshot.
full_snapshot = vm.snapshot_full()
logger.info("Create diff snapshot.")
# Create diff snapshot.
diff_snapshot = vm.snapshot_diff()
assert filecmp.cmp(full_snapshot.mem, diff_snapshot.mem)
def test_negative_postload_api(test_microvm_with_api, microvm_factory):
"""
Test APIs fail after loading from snapshot.
"""
basevm = test_microvm_with_api
basevm.spawn()
basevm.basic_config(track_dirty_pages=True)
basevm.add_net_iface()
basevm.start()
# Verify if guest can run commands.
exit_code, _, _ = basevm.ssh.run("sync")
assert exit_code == 0
# Create base snapshot.
snapshot = basevm.snapshot_diff()
basevm.kill()
# Do not resume, just load, so we can still call APIs that work.
microvm = microvm_factory.build()
microvm.spawn()
microvm.restore_from_snapshot(snapshot, resume=True)
fail_msg = "The requested operation is not supported after starting the microVM"
with pytest.raises(RuntimeError, match=fail_msg):
microvm.api.actions.put(action_type="InstanceStart")
with pytest.raises(RuntimeError, match=fail_msg):
microvm.basic_config()
def test_negative_snapshot_permissions(uvm_plain_rw, microvm_factory):
"""
Test missing permission error scenarios.
"""
basevm = uvm_plain_rw
basevm.spawn()
basevm.basic_config()
basevm.add_net_iface()
basevm.start()
# Remove write permissions.
os.chmod(basevm.jailer.chroot_path(), 0o444)
with pytest.raises(RuntimeError, match="Permission denied"):
basevm.snapshot_full()
# Restore proper permissions.
os.chmod(basevm.jailer.chroot_path(), 0o744)
# Create base snapshot.
snapshot = basevm.snapshot_full()
basevm.kill()
# Remove permissions for mem file.
os.chmod(snapshot.mem, 0o000)
microvm = microvm_factory.build()
microvm.spawn()
expected_err = re.escape(
"Load microVM snapshot error: Failed to restore from snapshot: Failed to load guest "
"memory: Error creating guest memory from file: Failed to load guest memory: "
"Permission denied (os error 13)"
)
with pytest.raises(RuntimeError, match=expected_err):
microvm.restore_from_snapshot(snapshot, resume=True)
# Remove permissions for state file.
os.chmod(snapshot.vmstate, 0o000)
microvm = microvm_factory.build()
microvm.spawn()
expected_err = re.escape(
"Load microVM snapshot error: Failed to restore from snapshot: Failed to get snapshot "
"state from file: Failed to open snapshot file: Permission denied (os error 13)"
)
with pytest.raises(RuntimeError, match=expected_err):
microvm.restore_from_snapshot(snapshot, resume=True)
# Restore permissions for state file.
os.chmod(snapshot.vmstate, 0o744)
os.chmod(snapshot.mem, 0o744)
# Remove permissions for block file.
os.chmod(snapshot.disks["rootfs"], 0o000)
microvm = microvm_factory.build()
microvm.spawn()
expected_err = "Block(BackingFile(Os { code: 13, kind: PermissionDenied"
with pytest.raises(RuntimeError, match=re.escape(expected_err)):
microvm.restore_from_snapshot(snapshot, resume=True)
def test_negative_snapshot_create(uvm_nano):
"""
Test create snapshot before pause.
"""
vm = uvm_nano
vm.start()
with pytest.raises(RuntimeError, match="save/restore unavailable while running"):
vm.api.snapshot_create.put(
mem_file_path="memfile", snapshot_path="statefile", snapshot_type="Full"
)
vm.api.vm.patch(state="Paused")
# Try diff with dirty pages tracking disabled.
expected_msg = (
"Diff snapshots are not allowed on uVMs with dirty page tracking disabled"
)
with pytest.raises(RuntimeError, match=expected_msg):
vm.api.snapshot_create.put(
mem_file_path="memfile", snapshot_path="statefile", snapshot_type="Diff"
)
assert not os.path.exists("statefile")
assert not os.path.exists("memfile")
vm.kill()
def test_create_large_diff_snapshot(test_microvm_with_api):
"""
Create large diff snapshot seccomp regression test.
When creating a diff snapshot of a microVM with a large memory size, a
mmap(MAP_PRIVATE|MAP_ANONYMOUS) is issued. Test that the default seccomp
filter allows it.
@issue: https://github.com/firecracker-microvm/firecracker/discussions/2811
"""
vm = test_microvm_with_api
vm.spawn()
vm.basic_config(mem_size_mib=16 * 1024, track_dirty_pages=True)
vm.start()
vm.api.vm.patch(state="Paused")
vm.api.snapshot_create.put(
mem_file_path="memfile", snapshot_path="statefile", snapshot_type="Diff"
)
# If the regression was not fixed, this would have failed. The Firecracker
# process would have been taken down.
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,865
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/security/test_custom_seccomp.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests that the --seccomp-filter parameter works as expected."""
import os
import platform
import tempfile
import time
import psutil
import pytest
import requests
from framework import utils
from host_tools.cargo_build import run_seccompiler_bin
def _custom_filter_setup(test_microvm, json_filter):
json_temp = tempfile.NamedTemporaryFile(delete=False)
json_temp.write(json_filter)
json_temp.flush()
bpf_path = os.path.join(test_microvm.path, "bpf.out")
run_seccompiler_bin(bpf_path=bpf_path, json_path=json_temp.name)
os.unlink(json_temp.name)
test_microvm.create_jailed_resource(bpf_path)
test_microvm.jailer.extra_args.update({"seccomp-filter": "bpf.out"})
def _config_file_setup(test_microvm, vm_config_file):
test_microvm.create_jailed_resource(test_microvm.kernel_file)
test_microvm.create_jailed_resource(test_microvm.rootfs_file)
vm_config_path = os.path.join(test_microvm.path, os.path.basename(vm_config_file))
with open(vm_config_file, encoding="utf-8") as f1:
with open(vm_config_path, "w", encoding="utf-8") as f2:
for line in f1:
f2.write(line)
test_microvm.create_jailed_resource(vm_config_path)
test_microvm.jailer.extra_args = {"config-file": os.path.basename(vm_config_file)}
test_microvm.jailer.extra_args.update({"no-api": None})
def test_allow_all(test_microvm_with_api):
"""
Test --seccomp-filter, allowing all syscalls.
"""
test_microvm = test_microvm_with_api
_custom_filter_setup(
test_microvm,
"""{
"Vmm": {
"default_action": "allow",
"filter_action": "trap",
"filter": []
},
"Api": {
"default_action": "allow",
"filter_action": "trap",
"filter": []
},
"Vcpu": {
"default_action": "allow",
"filter_action": "trap",
"filter": []
}
}""".encode(
"utf-8"
),
)
test_microvm.spawn()
test_microvm.basic_config()
test_microvm.start()
utils.assert_seccomp_level(test_microvm.jailer_clone_pid, "2")
def test_working_filter(test_microvm_with_api):
"""
Test --seccomp-filter, rejecting some dangerous syscalls.
"""
test_microvm = test_microvm_with_api
_custom_filter_setup(
test_microvm,
"""{
"Vmm": {
"default_action": "allow",
"filter_action": "kill_process",
"filter": [
{
"syscall": "clone"
},
{
"syscall": "execve"
}
]
},
"Api": {
"default_action": "allow",
"filter_action": "kill_process",
"filter": [
{
"syscall": "clone"
},
{
"syscall": "execve"
}
]
},
"Vcpu": {
"default_action": "allow",
"filter_action": "kill_process",
"filter": [
{
"syscall": "clone"
},
{
"syscall": "execve",
"comment": "sample comment"
}
]
}
}""".encode(
"utf-8"
),
)
test_microvm.spawn()
test_microvm.basic_config()
test_microvm.start()
# level should be 2, with no additional errors
utils.assert_seccomp_level(test_microvm.jailer_clone_pid, "2")
def test_failing_filter(test_microvm_with_api):
"""
Test --seccomp-filter, denying some needed syscalls.
"""
test_microvm = test_microvm_with_api
_custom_filter_setup(
test_microvm,
"""{
"Vmm": {
"default_action": "allow",
"filter_action": "trap",
"filter": []
},
"Api": {
"default_action": "allow",
"filter_action": "trap",
"filter": []
},
"Vcpu": {
"default_action": "allow",
"filter_action": "trap",
"filter": [
{
"syscall": "ioctl"
}
]
}
}""".encode(
"utf-8"
),
)
test_microvm.spawn()
test_microvm.basic_config(vcpu_count=1)
# Try to start the VM with error checking off, because it will fail.
try:
test_microvm.start()
except requests.exceptions.ConnectionError:
pass
# Give time for the process to get killed
time.sleep(1)
test_microvm.expect_kill_by_signal = True
# Check the logger output
ioctl_num = 16 if platform.machine() == "x86_64" else 29
test_microvm.check_log_message(
"Shutting down VM after intercepting a bad"
" syscall ({})".format(str(ioctl_num))
)
# Check the metrics
datapoints = test_microvm.get_all_metrics()
num_faults = 0
for datapoint in datapoints:
num_faults += datapoint["seccomp"]["num_faults"]
assert num_faults >= 1
# assert that the process was killed
assert not psutil.pid_exists(test_microvm.jailer_clone_pid)
@pytest.mark.parametrize("vm_config_file", ["framework/vm_config.json"])
def test_invalid_bpf(test_microvm_with_api, vm_config_file):
"""
Test that FC does not start, given an invalid binary filter.
"""
test_microvm = test_microvm_with_api
# Configure VM from JSON. Otherwise, the test will error because
# the process will be killed before configuring the API socket.
_config_file_setup(test_microvm_with_api, vm_config_file)
bpf_path = os.path.join(test_microvm.path, "bpf.out")
file = open(bpf_path, "w", encoding="utf-8")
file.write("Invalid BPF!")
file.close()
test_microvm.create_jailed_resource(bpf_path)
test_microvm.jailer.extra_args.update({"seccomp-filter": "bpf.out"})
test_microvm.spawn()
# give time for the process to get killed
time.sleep(1)
# assert that the process was killed
assert not psutil.pid_exists(test_microvm.jailer_clone_pid)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,866
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/style/test_repo.py
|
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests enforcing git repository structure"""
import subprocess
from pathlib import Path
import yaml
def test_repo_no_spaces_in_paths():
"""
Ensure there are no spaces in paths.
"""
# pylint: disable-next=subprocess-run-check
res = subprocess.run(
"git ls-files | grep '[[:space:]]'",
cwd="..",
capture_output=True,
shell=True,
)
# If grep doesn't find any, it will exit with status 1. Otherwise 0
assert res.returncode == 1, "Some files have spaces:\n" + res.stdout.decode()
def test_repo_validate_yaml():
"""
Ensure all YAML files are valid
"""
repo_root = Path("..")
for path in repo_root.rglob("*.y*ml"):
yaml.safe_load(path.open(encoding="utf-8"))
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,867
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_net.py
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests for the net device."""
import re
import time
import pytest
from framework import utils
# The iperf version to run this tests with
IPERF_BINARY = "iperf3"
def test_high_ingress_traffic(test_microvm_with_api):
"""
Run iperf rx with high UDP traffic.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config()
# Create tap before configuring interface.
test_microvm.add_net_iface()
tap = test_microvm.iface["eth0"]["tap"]
guest_ip = test_microvm.iface["eth0"]["iface"].guest_ip
# Set the tap's tx queue len to 5. This increases the probability
# of filling the tap under high ingress traffic.
tap.set_tx_queue_len(5)
# Start the microvm.
test_microvm.start()
# Start iperf3 server on the guest.
test_microvm.ssh.run("{} -sD\n".format(IPERF_BINARY))
time.sleep(1)
# Start iperf3 client on the host. Send 1Gbps UDP traffic.
# If the net device breaks, iperf will freeze. We have to use a timeout.
utils.run_cmd(
"timeout 30 {} {} -c {} -u -V -b 1000000000 -t 30".format(
test_microvm.jailer.netns_cmd_prefix(),
IPERF_BINARY,
guest_ip,
),
ignore_return_code=True,
)
# Check if the high ingress traffic broke the net interface.
# If the net interface still works we should be able to execute
# ssh commands.
exit_code, _, _ = test_microvm.ssh.run("echo success\n")
assert exit_code == 0
def test_multi_queue_unsupported(test_microvm_with_api):
"""
Creates multi-queue tap device and tries to add it to firecracker.
"""
microvm = test_microvm_with_api
microvm.spawn()
microvm.basic_config()
tapname = microvm.id[:8] + "tap1"
utils.run_cmd(f"ip tuntap add name {tapname} mode tap multi_queue")
utils.run_cmd(f"ip link set {tapname} netns {microvm.jailer.netns}")
expected_msg = re.escape(
"Could not create the network device: Open tap device failed:"
" Error while creating ifreq structure: Invalid argument (os error 22)."
f" Invalid TUN/TAP Backend provided by {tapname}. Check our documentation on setting"
" up the network devices."
)
with pytest.raises(RuntimeError, match=expected_msg):
microvm.api.network.put(
iface_id="eth0",
host_dev_name=tapname,
guest_mac="AA:FC:00:00:00:01",
)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,868
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tools/compare_baselines/main.py
|
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Compare gathered baselines"""
import argparse
import os
import subprocess
from utils.comparator import CpuComparator, DirectoryComparator
from utils.defs import (
CODENAME2DICT,
DEFAULT_BASELINE_DIRECTORY,
DEFAULT_RESULT_FILEPATH,
KERNELS,
TESTS,
)
def cmd_cpu(args):
"""Compare baselines between CPUs"""
comp = CpuComparator(
args.directory,
args.tests,
args.kernels,
args.codenames,
)
comp.compare()
comp.dump_json(args.output)
def cmd_directory(args):
"""Comparre baselines between two directories"""
comp = DirectoryComparator(
args.source,
args.target,
args.tests,
args.kernels,
args.codenames,
)
comp.compare(args.auxiliary)
comp.dump_json(args.output)
def cmd_commit(args):
"""Compare baselines between two commit hashes"""
if args.target is None:
args.target = (
subprocess.check_output(["git", "show", "--format='%H'", "--no-patch"])[:-1]
.decode()
.strip("'")
)
subprocess.run(["git", "worktree", "add", args.source, args.source], check=True)
subprocess.run(["git", "worktree", "add", args.target, args.target], check=True)
comp = DirectoryComparator(
os.path.join(args.source, args.directory),
os.path.join(args.target, args.directory),
args.tests,
args.kernels,
args.codenames,
)
subprocess.run(["git", "worktree", "remove", args.source], check=True)
subprocess.run(["git", "worktree", "remove", args.target], check=True)
comp.compare(args.auxiliary)
comp.dump_json(args.output)
def cmd_latest(args):
"""Compare baselines with the latest commit"""
latest_hash = (
subprocess.check_output(["git", "show", "--format='%H'", "--no-patch"])[:-1]
.decode()
.strip("'")
)
subprocess.run(["git", "worktree", "add", latest_hash, latest_hash], check=True)
comp = DirectoryComparator(
os.path.join(latest_hash, args.directory),
os.path.join(args.directory),
args.tests,
args.kernels,
args.codenames,
)
subprocess.run(["git", "worktree", "remove", latest_hash], check=True)
comp.compare(args.auxiliary)
comp.dump_json(args.output)
def main():
"""Main function"""
parser = argparse.ArgumentParser(description="Compare gathered baselines")
# Shared options for all subcommands
shared_parser = argparse.ArgumentParser(add_help=False)
shared_parser.add_argument(
"--tests",
help="List of test types",
nargs="+",
action="store",
choices=TESTS,
default=TESTS,
)
shared_parser.add_argument(
"--kernels",
help="List of host kernel versions",
nargs="+",
action="store",
choices=KERNELS,
default=KERNELS,
)
shared_parser.add_argument(
"--codenames",
help="List of CPU codenames. The first one is used as basis.",
action="store",
nargs="+",
choices=list(CODENAME2DICT.keys()),
default=list(CODENAME2DICT.keys()),
)
shared_parser.add_argument(
"-o",
"--output",
help="Path of output file.",
action="store",
default=DEFAULT_RESULT_FILEPATH,
)
shared_parser.add_argument(
"-a",
"--auxiliary",
help="Include auxiliary information",
action="store_true",
)
subparsers = parser.add_subparsers(title="modes")
# Subcommand options for comparing baselines between CPUs
parser_cpu = subparsers.add_parser(
"cpu", parents=[shared_parser], help="Compare between CPUs."
)
parser_cpu.set_defaults(handler=cmd_cpu)
parser_cpu.add_argument(
"-d",
"--directory",
help="Path of directory containing JSON files of baselines.",
action="store",
default=DEFAULT_BASELINE_DIRECTORY,
)
# Subcommand options for comparing baselines between directories
parser_dir = subparsers.add_parser(
"directory", parents=[shared_parser], help="Compare between two directories."
)
parser_dir.set_defaults(handler=cmd_directory)
parser_dir.add_argument(
"-s",
"--source",
help="Path of source directory containing JSON files of baselines.",
action="store",
required=True,
)
parser_dir.add_argument(
"-t",
"--target",
help="Path of target directory containing JSON files of baselines.",
action="store",
required=True,
)
# Subcommand options for comparing baselines between commit hashes
parser_commit = subparsers.add_parser(
"commit", parents=[shared_parser], help="Compare between two commit hashes."
)
parser_commit.set_defaults(handler=cmd_commit)
parser_commit.add_argument(
"-d",
"--directory",
help="Path of directory containing JSON files of baselines.",
action="store",
default=DEFAULT_BASELINE_DIRECTORY,
)
parser_commit.add_argument(
"-s",
"--source",
help="Source commit hash.",
action="store",
required=True,
)
parser_commit.add_argument(
"-t",
"--target",
help="Target commit hash.",
action="store",
)
# Subcommand options for comparing baselines with the latest commit
parser_latest = subparsers.add_parser(
"latest", parents=[shared_parser], help="Compare with the latest commit."
)
parser_latest.set_defaults(handler=cmd_latest)
parser_latest.add_argument(
"-d",
"--directory",
help="Path of directory containing JSON files of baselines.",
action="store",
default=DEFAULT_BASELINE_DIRECTORY,
)
# Parse arguments
args = parser.parse_args()
if hasattr(args, "handler"):
args.handler(args)
else:
parser.print_help()
if __name__ == "__main__":
main()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,869
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/build/test_coverage.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests enforcing code coverage for production code."""
import os
import warnings
import pytest
from framework import utils
from framework.properties import global_props
from host_tools import proc
from host_tools.cargo_build import cargo
PROC_MODEL = proc.proc_type()
# Toolchain target architecture.
if "Intel" in PROC_MODEL:
VENDOR = "Intel"
ARCH = "x86_64"
elif "AMD" in PROC_MODEL:
VENDOR = "AMD"
ARCH = "x86_64"
elif "ARM" in PROC_MODEL:
VENDOR = "ARM"
ARCH = "aarch64"
else:
raise Exception(f"Unsupported processor model ({PROC_MODEL})")
# Toolchain target.
# Currently profiling with `aarch64-unknown-linux-musl` is unsupported (see
# https://github.com/rust-lang/rustup/issues/3095#issuecomment-1280705619) therefore we profile and
# run coverage with the `gnu` toolchains and run unit tests with the `musl` toolchains.
TARGET = f"{ARCH}-unknown-linux-gnu"
@pytest.mark.timeout(600)
def test_coverage(monkeypatch):
"""Test code coverage"""
# Re-direct to repository root.
monkeypatch.chdir("..")
# Generate test profiles.
cargo(
"test",
f"--all --target {TARGET}",
"--test-threads=1",
env={
"RUSTFLAGS": "-Cinstrument-coverage",
"LLVM_PROFILE_FILE": "coverage-%p-%m.profraw",
},
)
lcov_file = "./build/cargo_target/coverage.lcov"
# Generate coverage report.
cmd = f"""
grcov . \
-s . \
--binary-path ./build/cargo_target/{TARGET}/debug/ \
--excl-start "mod tests" \
--ignore "build/*" \
--ignore "**/tests/*" \
--ignore "**/test_utils*" \
--ignore "**/mock_*" \
--ignore "src/firecracker/examples/*" \
-t lcov \
--ignore-not-existing \
-o {lcov_file}"""
# Ignore code not relevant for the intended platform
# - CPUID and CPU template
# - Static CPU templates intended for specific CPU vendors
if "AMD" == VENDOR:
cmd += " \
--ignore **/intel* \
--ignore *t2* \
--ignore *t2s* \
--ignore *t2cl* \
--ignore *c3* \
"
elif "Intel" == VENDOR:
cmd += " \
--ignore **/amd* \
--ignore *t2a* \
"
utils.run_cmd(cmd)
# Only upload if token is present and we're in EC2
if "CODECOV_TOKEN" in os.environ and global_props.is_ec2:
pr_number = os.environ.get("BUILDKITE_PULL_REQUEST")
branch = os.environ.get("BUILDKITE_BRANCH")
if not branch:
branch = utils.run_cmd("git rev-parse --abbrev-ref HEAD").stdout
codecov_cmd = f"codecov -f {lcov_file} -F {global_props.host_linux_version}-{global_props.instance}"
if pr_number and pr_number != "false":
codecov_cmd += f" -P {pr_number}"
else:
codecov_cmd += f" -B {branch}"
utils.run_cmd(codecov_cmd)
else:
warnings.warn(
"Not uploading coverage report due to missing CODECOV_TOKEN environment variable"
)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,870
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_snapshot_restore_cross_kernel.py
|
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Test to restore snapshots across kernel versions."""
import json
import logging
from pathlib import Path
import pytest
from framework.defs import FC_WORKSPACE_DIR
from framework.utils import (
generate_mmds_get_request,
generate_mmds_session_token,
guest_run_fio_iteration,
populate_data_store,
)
from framework.utils_cpuid import CpuVendor, get_cpu_vendor
from framework.utils_vsock import check_vsock_device
from integration_tests.functional.test_balloon import (
get_stable_rss_mem_by_pid,
make_guest_dirty_memory,
)
def _test_balloon(microvm):
# Get the firecracker pid.
firecracker_pid = microvm.jailer_clone_pid
# Check memory usage.
first_reading = get_stable_rss_mem_by_pid(firecracker_pid)
# Dirty 300MB of pages.
make_guest_dirty_memory(microvm.ssh, amount_mib=300)
# Check memory usage again.
second_reading = get_stable_rss_mem_by_pid(firecracker_pid)
assert second_reading > first_reading
# Inflate the balloon. Get back 200MB.
microvm.api.balloon.patch(amount_mib=200)
third_reading = get_stable_rss_mem_by_pid(firecracker_pid)
# Ensure that there is a reduction in RSS.
assert second_reading > third_reading
def _test_mmds(vm, mmds_net_iface):
# Populate MMDS.
data_store = {"latest": {"meta-data": {"ami-id": "ami-12345678"}}}
populate_data_store(vm, data_store)
mmds_ipv4_address = "169.254.169.254"
vm.guest_ip = mmds_net_iface.guest_ip
# Insert new rule into the routing table of the guest.
cmd = "ip route add {} dev {}".format(
mmds_net_iface.guest_ip, mmds_net_iface.dev_name
)
code, _, _ = vm.ssh.run(cmd)
assert code == 0
# The base microVM had MMDS version 2 configured, which was persisted
# across the snapshot-restore.
token = generate_mmds_session_token(vm.ssh, mmds_ipv4_address, token_ttl=60)
cmd = generate_mmds_get_request(mmds_ipv4_address, token=token)
_, stdout, _ = vm.ssh.run(cmd)
assert json.load(stdout) == data_store
@pytest.mark.timeout(600)
@pytest.mark.nonci
@pytest.mark.parametrize(
"cpu_template",
["C3", "T2", "T2S", "None"] if get_cpu_vendor() == CpuVendor.INTEL else ["None"],
)
def test_snap_restore_from_artifacts(
microvm_factory, bin_vsock_path, test_fc_session_root_path, cpu_template
):
"""
Restore from snapshots obtained with all supported guest kernel versions.
The snapshot artifacts have been generated through the
`create_snapshot_artifacts` devtool command. The base microVM snapshotted
has been built from the config file at
~/firecracker/tools/create_snapshot_artifact/complex_vm_config.json.
"""
logger = logging.getLogger("cross_kernel_snapshot_restore")
snapshot_root_name = "snapshot_artifacts"
snapshot_root_dir = Path(FC_WORKSPACE_DIR) / snapshot_root_name
# Iterate through all subdirectories based on CPU template
# in the snapshot root dir.
snap_subdirs = snapshot_root_dir.glob(f".*_{cpu_template}_guest_snapshot")
for snapshot_dir in snap_subdirs:
assert snapshot_dir.is_dir()
logger.info("Working with snapshot artifacts in %s.", snapshot_dir)
vm = microvm_factory.build()
vm.spawn()
logger.info("Loading microVM from snapshot...")
vm.restore_from_path(snapshot_dir)
vm.resume()
# Ensure microVM is running.
assert vm.state == "Running"
# Test that net devices have connectivity after restore.
for idx, iface in enumerate(vm.iface.values()["iface"]):
logger.info("Testing net device %s...", iface.dev_name)
exit_code, _, _ = vm.ssh_iface(idx).run("sync")
assert exit_code == 0
logger.info("Testing data store behavior...")
_test_mmds(vm, vm.iface["eth3"]["iface"])
logger.info("Testing balloon device...")
_test_balloon(vm)
logger.info("Testing vsock device...")
check_vsock_device(vm, bin_vsock_path, test_fc_session_root_path, vm.ssh)
# Run fio on the guest.
# TODO: check the result of FIO or use fsck to check that the root device is
# not corrupted. No obvious errors will be returned here.
guest_run_fio_iteration(vm.ssh, 0)
vm.kill()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,871
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/utils_iperf.py
|
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""File containing utility methods for iperf-based performance tests"""
import concurrent.futures
import json
import time
from framework import utils
from framework.utils import CmdBuilder, CpuMap, get_cpu_percent, summarize_cpu_percent
DURATION = "duration"
IPERF3_END_RESULTS_TAG = "end"
THROUGHPUT = "throughput"
CPU_UTILIZATION_VMM = "cpu_utilization_vmm"
CPU_UTILIZATION_VCPUS_TOTAL = "cpu_utilization_vcpus_total"
# Dictionary mapping modes (guest-to-host, host-to-guest, bidirectional) to arguments passed to the iperf3 clients spawned
MODE_MAP = {"g2h": [""], "h2g": ["-R"], "bd": ["", "-R"]}
# Dictionary doing the reserve of the above, for pretty-printing
REV_MODE_MAP = {"": "g2h", "-R": "h2g"}
# Number of seconds to wait for the iperf3 server to start
SERVER_STARTUP_TIME_SEC = 2
class IPerf3Test:
"""Class abstracting away the setup and execution of an iperf3-based performance test"""
def __init__(
self,
microvm,
base_port,
runtime,
omit,
mode,
num_clients,
connect_to,
*,
iperf="iperf3",
payload_length="DEFAULT",
):
self._microvm = microvm
self._base_port = base_port
self._runtime = runtime
self._omit = omit
self._mode = mode # entry into mode-map
self._num_clients = num_clients
self._connect_to = connect_to # the "host" value to pass to "--client"
self._payload_length = payload_length # the value to pass to "--len"
self._iperf = iperf
self._guest_iperf = iperf
def run_test(self, first_free_cpu):
"""Runs the performance test, using pinning the iperf3 servers to CPUs starting from `first_free_cpu`"""
assert self._num_clients < CpuMap.len() - self._microvm.vcpus_count - 2
for server_idx in range(self._num_clients):
cmd = self.host_command(server_idx).build()
assigned_cpu = CpuMap(first_free_cpu)
utils.run_cmd(
f"taskset --cpu-list {assigned_cpu} {self._microvm.jailer.netns_cmd_prefix()} {cmd}"
)
first_free_cpu += 1
time.sleep(SERVER_STARTUP_TIME_SEC)
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
cpu_load_future = executor.submit(
get_cpu_percent,
self._microvm.jailer_clone_pid,
# Ignore the final two data points as they are impacted by test teardown
self._runtime - 2,
self._omit,
)
for client_idx in range(self._num_clients):
futures.append(executor.submit(self.spawn_iperf3_client, client_idx))
data = {"cpu_load_raw": cpu_load_future.result(), "g2h": [], "h2g": []}
for i, future in enumerate(futures):
key = REV_MODE_MAP[MODE_MAP[self._mode][i % len(MODE_MAP[self._mode])]]
data[key].append(json.loads(future.result()))
return data
def host_command(self, port_offset):
"""Builds the command used for spawning an iperf3 server on the host"""
return (
CmdBuilder(self._iperf)
.with_arg("-sD")
.with_arg("-p", self._base_port + port_offset)
.with_arg("-1")
)
def spawn_iperf3_client(self, client_idx):
"""
Spawns an iperf3 client within the guest. The `client_idx` determines what direction data should flow
for this particular client (e.g. client-to-server or server-to-client)
"""
# Distribute modes evenly
mode = MODE_MAP[self._mode][client_idx % len(MODE_MAP[self._mode])]
# Add the port where the iperf3 client is going to send/receive.
cmd = self.guest_command(client_idx).with_arg(mode).build()
pinned_cmd = (
f"taskset --cpu-list {client_idx % self._microvm.vcpus_count} {cmd}"
)
rc, stdout, stderr = self._microvm.ssh.run(pinned_cmd)
assert rc == 0, stderr
return stdout
def guest_command(self, port_offset):
"""Builds the command used for spawning an iperf3 client in the guest"""
cmd = (
CmdBuilder(self._guest_iperf)
.with_arg("--time", self._runtime)
.with_arg("--json")
.with_arg("--omit", self._omit)
.with_arg("-p", self._base_port + port_offset)
.with_arg("--client", self._connect_to)
)
if self._payload_length != "DEFAULT":
return cmd.with_arg("--len", self._payload_length)
return cmd
def consume_iperf3_output(stats_consumer, iperf_result):
"""Consume the iperf3 data produced by the tcp/vsock throughput performance tests"""
for iperf3_raw in iperf_result["g2h"] + iperf_result["h2g"]:
total_received = iperf3_raw[IPERF3_END_RESULTS_TAG]["sum_received"]
duration = float(total_received["seconds"])
stats_consumer.consume_data(DURATION, duration)
# Computed at the receiving end.
total_recv_bytes = int(total_received["bytes"])
tput = round((total_recv_bytes * 8) / (1024 * 1024 * duration), 2)
stats_consumer.consume_data(THROUGHPUT, tput)
vmm_util, vcpu_util = summarize_cpu_percent(iperf_result["cpu_load_raw"])
stats_consumer.consume_stat("Avg", CPU_UTILIZATION_VMM, vmm_util)
stats_consumer.consume_stat("Avg", CPU_UTILIZATION_VCPUS_TOTAL, vcpu_util)
for idx, time_series in enumerate(iperf_result["g2h"]):
yield from [
(f"{THROUGHPUT}_g2h_{idx}", x["sum"]["bits_per_second"], "Megabits/Second")
for x in time_series["intervals"]
]
for idx, time_series in enumerate(iperf_result["h2g"]):
yield from [
(f"{THROUGHPUT}_h2g_{idx}", x["sum"]["bits_per_second"], "Megabits/Second")
for x in time_series["intervals"]
]
for thread_name, data in iperf_result["cpu_load_raw"].items():
yield from [
(f"cpu_utilization_{thread_name}", x, "Percent")
for x in list(data.values())[0]
]
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,872
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_shut_down.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests scenarios for shutting down Firecracker/VM."""
import os
import platform
import time
from framework import utils
def test_reboot(test_microvm_with_api):
"""
Test reboot from guest.
"""
vm = test_microvm_with_api
vm.jailer.daemonize = False
vm.spawn()
# We don't need to monitor the memory for this test because we are
# just rebooting and the process dies before pmap gets the RSS.
vm.memory_monitor = None
# Set up the microVM with 4 vCPUs, 256 MiB of RAM, 0 network ifaces, and
# a root file system with the rw permission. The network interfaces is
# added after we get a unique MAC and IP.
vm.basic_config(vcpu_count=4)
vm.add_net_iface()
vm.start()
# Get Firecracker PID so we can count the number of threads.
firecracker_pid = vm.jailer_clone_pid
# Get number of threads in Firecracker
cmd = "ps -o nlwp {} | tail -1 | awk '{{print $1}}'".format(firecracker_pid)
_, stdout, _ = utils.run_cmd(cmd)
nr_of_threads = stdout.rstrip()
assert int(nr_of_threads) == 6
# Consume existing metrics
lines = vm.get_all_metrics()
assert len(lines) == 1
# Rebooting Firecracker sends an exit event and should gracefully kill.
# the instance.
vm.ssh.run("reboot")
while True:
# Pytest's timeout will kill the test even if the loop doesn't exit.
try:
os.kill(firecracker_pid, 0)
time.sleep(0.01)
except OSError:
break
# Consume existing metrics
datapoints = vm.get_all_metrics()
assert len(datapoints) == 2
if platform.machine() != "x86_64":
vm.check_log_message("Received KVM_SYSTEM_EVENT: type: 2, event: 0")
vm.check_log_message("Vmm is stopping.")
# Make sure that the FC process was not killed by a seccomp fault
assert datapoints[-1]["seccomp"]["num_faults"] == 0
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,873
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tools/compare_baselines/utils/comparator.py
|
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Utility classes to compare baseline data"""
import json
import math
import sys
from utils.defs import BASELINE_FILENAME_FORMAT, CODENAME2DICT
from utils.fetcher import BaselineDirectoryFetcher
EPS = sys.float_info.epsilon
class BaseComparator:
"""Base class for comparing baselines"""
def __init__(self):
self._result = {}
@property
def result(self):
"""Return result"""
return self._result
def calc_diff(self, bl1, bl2):
"""Calculate difference between two baselines."""
diff = {}
self._calc_diff(bl1, bl2, diff)
return diff
def _calc_diff(self, bl1, bl2, diff):
"""Go down nested structure and populate difference."""
for key in bl1.keys() & bl2.keys():
if key == "target":
diff["target_diff_percentage"] = (
(bl2[key] + EPS) / (bl1[key] + EPS) - 1.0
) * 100.0
elif key == "delta_percentage":
diff["delta_percentage_diff"] = (
bl2["delta_percentage"] - bl1["delta_percentage"]
)
else:
diff.setdefault(key, {})
self._calc_diff(bl1[key], bl2[key], diff[key])
def calc_stats(self, diff):
"""Calculate mean and unbiased standard deviation for each metric."""
stats = {}
for metric in diff.keys():
stats[metric] = {}
for key in ["target_diff_percentage", "delta_percentage_diff"]:
aggregated = []
self._aggregate_data(diff[metric], key, aggregated)
mean = self._calc_mean(aggregated)
stdev = self._calc_stdev(aggregated, mean)
stats[metric][key] = {
"mean": mean,
"stdev": stdev,
}
return stats
def _aggregate_data(self, data, key, result):
"""Aggregate data into list"""
for value in data.values():
if key in value:
result.append(value[key])
else:
self._aggregate_data(value, key, result)
def _calc_mean(self, data):
"""Calculate mean for given list."""
if len(data) == 0:
return None
total = 0
for value in data:
total += value
return total / len(data)
def _calc_stdev(self, data, mean):
"""Calculate unbiased standard deviation for given list."""
if len(data) == 0 or mean is None:
return None
var = 0
for value in data:
var += (value - mean) * (value - mean)
return math.sqrt(var / (len(data) - 1))
def dump_json(self, fpath):
"""Dump results to JSON file"""
dumped = json.dumps(self._result, indent=4)
with open(fpath, "w", encoding="utf-8") as file:
file.write(dumped)
class DirectoryComparator(BaseComparator):
"""Class for comparing baselines between directories"""
def __init__(self, dpath1, dpath2, tests, kernels, codenames):
"""Initialize 2 BaselineDirectoryFetcher"""
super().__init__()
self._dfetcher1 = BaselineDirectoryFetcher(dpath1)
self._dfetcher2 = BaselineDirectoryFetcher(dpath2)
self._tests = tests
self._kernels = kernels
self._codenames = codenames
def compare(self, auxiliary=False):
"""Compare data between directories"""
result = {
"source": self._dfetcher1.dpath,
"target": self._dfetcher2.dpath,
}
for test in self._tests:
for kernel in self._kernels:
fname = BASELINE_FILENAME_FORMAT.format(test=test, kernel=kernel)
fetcher1 = self._dfetcher1.get_fetcher(test, kernel)
if fetcher1 is None:
print(
f"{fname} not found in {self._dfetcher1.dpath}",
file=sys.stderr,
)
continue
fetcher2 = self._dfetcher2.get_fetcher(test, kernel)
if fetcher2 is None:
print(
f"{fname} not found in {self._dfetcher2.dpath}",
file=sys.stderr,
)
continue
result[fname] = {
"test": test,
"kernel": kernel,
"cpus": [],
}
for codename in self._codenames:
cpu = CODENAME2DICT[codename]
baseline1 = fetcher1.get_baseline(cpu["instance"], cpu["model"])
if baseline1 is None:
print(
f"Baseline for {cpu['instance']} / {cpu['model']} not found"
f" in {fetcher1.fpath}.",
file=sys.stderr,
)
continue
baseline2 = fetcher2.get_baseline(cpu["instance"], cpu["model"])
if baseline2 is None:
print(
f"Baseline for {cpu['instance']} / {cpu['model']} not found"
f" in {fetcher2.fpath}.",
file=sys.stderr,
)
continue
diff = self.calc_diff(baseline1, baseline2)
stats = self.calc_stats(diff)
cpu_result = {
"instance": cpu["instance"],
"model": cpu["model"],
"stats": stats,
}
if auxiliary:
cpu_result["diff"] = diff
result[fname]["cpus"].append(cpu_result)
self._result = result
class CpuComparator(BaseComparator):
"""Class for comparing baselines between CPUs"""
def __init__(self, dpath, tests, kernels, codenames):
"""Initialize CPU comparator"""
super().__init__()
self._dpath = dpath
self._tests = tests
self._kernels = kernels
self._codenames = codenames
self._dfetcher = BaselineDirectoryFetcher(dpath)
def compare(self, auxiliary=False):
"""Calculate differences and statistics based on the first CPU."""
result = {}
for test in self._tests:
for kernel in self._kernels:
fname = BASELINE_FILENAME_FORMAT.format(test=test, kernel=kernel)
fetcher = self._dfetcher.get_fetcher(test, kernel)
if fetcher is None:
print(
f"{fname} not found in {self._dfetcher.dpath}",
file=sys.stderr,
)
continue
base_cpu = CODENAME2DICT[self._codenames[0]]
base_instance = base_cpu["instance"]
base_model = base_cpu["model"]
base_baseline = fetcher.get_baseline(base_instance, base_model)
if base_baseline is None:
print(
f"Baseline for {base_instance} / {base_model} not found"
f" in {fetcher.fpath}.",
file=sys.stderr,
)
continue
# fmt: off
result[fetcher.fpath] = {
"test": fetcher.test,
"kernel": fetcher.kernel,
"base": {
"instance": base_instance,
"model": base_model,
},
"stats": {},
}
if auxiliary:
result[fetcher.fpath]["diff"] = []
# fmt: on
for codename in self._codenames:
target_cpu = CODENAME2DICT[codename]
target_instance = target_cpu["instance"]
target_model = target_cpu["model"]
target_baseline = fetcher.get_baseline(
target_instance, target_model
)
if target_baseline is None:
print(
f"Baseline for {target_instance} / {target_model} not found"
f" in {fetcher.fpath}.",
file=sys.stderr,
)
continue
diff = self.calc_diff(base_baseline, target_baseline)
if auxiliary:
result[fetcher.fpath]["diff"].append(
{
"instance": target_instance,
"model": target_model,
"value": diff,
}
)
stats = self.calc_stats(diff)
for metric, data in stats.items():
result[fetcher.fpath]["stats"].setdefault(
metric,
{
"target_diff_percentage": [],
"delta_percentage_diff": [],
},
)
for key in ["target_diff_percentage", "delta_percentage_diff"]:
result[fetcher.fpath]["stats"][metric][key].append(
{
"instance": target_instance,
"model": target_model,
"value": data[key],
}
)
self._result = result
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,874
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/http_api.py
|
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""A simple HTTP client for the Firecracker API"""
# pylint:disable=too-few-public-methods
import urllib
from http import HTTPStatus
import requests
from requests_unixsocket import DEFAULT_SCHEME, UnixAdapter
class Session(requests.Session):
"""An HTTP over UNIX sockets Session
Wrapper over requests_unixsocket.Session
"""
def __init__(self):
"""Create a Session object."""
super().__init__()
# 'UnixAdapter` saves in the pool at most 'pool_connections'
# connections. When a new request is made, the adapter tries to match
# that request with an already existing connection from the pool, by
# comparing their url.
# If there's a match, then the adapter uses the connection from the
# pool to make the new request.
# Otherwise, a new connection is created and saved in the pool. If
# there is no space in the pool, the new connection will replace the
# least recently used one in the pool. The evicted connection will be
# closed.
#
# The `pool_connections` argument indicates the maximum number of
# connection saved in the pool, not the maximum number of open
# connections allowed at the same time
# (see https://urllib3.readthedocs.io/en/stable/advanced-usage.html).
#
# We set this value to be equal to micro-http's `MAX_CONNECTIONS` - 1.
# This is because when reaching the `pool_connection` limit, it is not
# guaranteed that the event to close the connection will be received
# before the event that results in creating a new connection (this
# depends on the kernel). In case the two events are not received in
# the same order, or are received together, the server might try to add
# a new connection before removing the old one, resulting in a
# `SERVER_FULL_ERROR`.
self.mount(DEFAULT_SCHEME, UnixAdapter(pool_connections=9))
class Resource:
"""An abstraction over a REST path"""
def __init__(self, api, resource, id_field=None):
self._api = api
self.resource = resource
self.id_field = id_field
def get(self):
"""Make a GET request"""
url = self._api.endpoint + self.resource
res = self._api.session.get(url)
assert res.status_code == HTTPStatus.OK, res.json()
return res
def request(self, method, path, **kwargs):
"""Make an HTTP request"""
kwargs = {key: val for key, val in kwargs.items() if val is not None}
url = self._api.endpoint + path
res = self._api.session.request(method, url, json=kwargs)
if res.status_code != HTTPStatus.NO_CONTENT:
json = res.json()
msg = res.content
if "fault_message" in json:
msg = json["fault_message"]
elif "error" in json:
msg = json["error"]
raise RuntimeError(msg, json, res)
return res
def put(self, **kwargs):
"""Make a PUT request"""
path = self.resource
if self.id_field is not None:
path += "/" + kwargs[self.id_field]
return self.request("PUT", path, **kwargs)
def patch(self, **kwargs):
"""Make a PATCH request"""
path = self.resource
if self.id_field is not None:
path += "/" + kwargs[self.id_field]
return self.request("PATCH", path, **kwargs)
class Api:
"""A simple HTTP client for the Firecracker API"""
def __init__(self, api_usocket_full_name):
self.socket = api_usocket_full_name
url_encoded_path = urllib.parse.quote_plus(api_usocket_full_name)
self.endpoint = DEFAULT_SCHEME + url_encoded_path
self.session = Session()
self.describe = Resource(self, "/")
self.vm = Resource(self, "/vm")
self.vm_config = Resource(self, "/vm/config")
self.actions = Resource(self, "/actions")
self.boot = Resource(self, "/boot-source")
self.drive = Resource(self, "/drives", "drive_id")
self.version = Resource(self, "/version")
self.logger = Resource(self, "/logger")
self.machine_config = Resource(self, "/machine-config")
self.metrics = Resource(self, "/metrics")
self.network = Resource(self, "/network-interfaces", "iface_id")
self.mmds = Resource(self, "/mmds")
self.mmds_config = Resource(self, "/mmds/config")
self.balloon = Resource(self, "/balloon")
self.balloon_stats = Resource(self, "/balloon/statistics")
self.vsock = Resource(self, "/vsock")
self.snapshot_create = Resource(self, "/snapshot/create")
self.snapshot_load = Resource(self, "/snapshot/load")
self.cpu_config = Resource(self, "/cpu-config")
self.entropy = Resource(self, "/entropy")
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,875
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/host_tools/__init__.py
|
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Are you happy pylint?"""
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,876
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tools/parse_baselines/providers/block.py
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Implement the DataParser for block device performance tests."""
import math
import statistics
from collections.abc import Iterator
from typing import List
from providers.types import DataParser
# We add a small extra percentage margin, to account for small variations
# that were not caught while gathering baselines. This provides
# slightly better reliability, while not affecting regression
# detection.
DELTA_EXTRA_MARGIN = 4
# pylint: disable=R0903
class BlockDataParser(DataParser):
"""Parse the data provided by the block performance tests."""
# pylint: disable=W0102
def __init__(self, data_provider: Iterator):
"""Initialize the data parser."""
super().__init__(
data_provider,
[
"iops_read/Avg",
"iops_write/Avg",
"bw_read/Avg",
"bw_write/Avg",
"cpu_utilization_vcpus_total/Avg",
"cpu_utilization_vmm/Avg",
],
)
def calculate_baseline(self, data: List[float]) -> dict:
"""Return the target and delta values, given a list of data points."""
avg = statistics.mean(data)
stddev = statistics.stdev(data)
return {
"target": math.ceil(round(avg, 2)),
"delta_percentage": math.ceil(3 * stddev / avg * 100) + DELTA_EXTRA_MARGIN,
}
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,877
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/performance/test_process_startup_time.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Test that the process startup time up to socket bind is within spec."""
# pylint: disable=redefined-outer-name
import os
import time
import pytest
from framework.properties import global_props
from host_tools.cargo_build import run_seccompiler_bin
@pytest.fixture
def startup_time(metrics, record_property):
"""Fixture to capture the startup time"""
metrics.set_dimensions(
{
"instance": global_props.instance,
"cpu_model": global_props.cpu_model,
"host_kernel": "linux-" + global_props.host_linux_version,
}
)
def record_startup_time(startup_time):
metrics.put_metric("startup_time", startup_time, unit="Microseconds")
record_property("startup_time_μs", startup_time)
return record_startup_time
def test_startup_time_new_pid_ns(test_microvm_with_api, startup_time):
"""
Check startup time when jailer is spawned in a new PID namespace.
"""
microvm = test_microvm_with_api
microvm.bin_cloner_path = None
microvm.jailer.new_pid_ns = True
startup_time(_test_startup_time(microvm))
def test_startup_time_daemonize(test_microvm_with_api, startup_time):
"""
Check startup time when jailer detaches Firecracker from the controlling terminal.
"""
microvm = test_microvm_with_api
startup_time(_test_startup_time(microvm))
def test_startup_time_custom_seccomp(test_microvm_with_api, startup_time):
"""
Check the startup time when using custom seccomp filters.
"""
microvm = test_microvm_with_api
_custom_filter_setup(microvm)
startup_time(_test_startup_time(microvm))
def _test_startup_time(microvm):
microvm.spawn()
microvm.basic_config(vcpu_count=2, mem_size_mib=1024)
microvm.start()
time.sleep(0.4)
# The metrics should be at index 1.
# Since metrics are flushed at InstanceStart, the first line will suffice.
datapoints = microvm.get_all_metrics()
metrics = datapoints[0]
startup_time_us = metrics["api_server"]["process_startup_time_us"]
cpu_startup_time_us = metrics["api_server"]["process_startup_time_cpu_us"]
print(
"Process startup time is: {} us ({} CPU us)".format(
startup_time_us, cpu_startup_time_us
)
)
assert cpu_startup_time_us > 0
return cpu_startup_time_us
def _custom_filter_setup(test_microvm):
bpf_path = os.path.join(test_microvm.path, "bpf.out")
run_seccompiler_bin(bpf_path)
test_microvm.create_jailed_resource(bpf_path)
test_microvm.jailer.extra_args.update({"seccomp-filter": "bpf.out"})
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,878
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/host_tools/proc.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Utility functions for interacting with the processor."""
import re
from framework import utils
def proc_type():
"""Obtain the model processor on a Linux system."""
cmd = "cat /proc/cpuinfo"
result = utils.run_cmd(cmd)
lines = result.stdout.strip().splitlines()
for line in lines:
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1)
cmd = "uname -m"
result = utils.run_cmd(cmd).stdout.strip()
if "aarch64" in result:
return "ARM"
return ""
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,879
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/host_tools/cpu_load.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Utilities for measuring cpu utilisation for a process."""
import time
from threading import Thread
from framework import utils
class CpuLoadExceededException(Exception):
"""A custom exception containing details on excessive cpu load."""
def __init__(self, cpu_load_samples, threshold):
"""Compose the error message containing the cpu load details."""
super().__init__(
f"Cpu load samples {cpu_load_samples} exceeded maximum"
f"threshold {threshold}.\n"
)
class CpuLoadMonitor(Thread):
"""Class to represent a cpu load monitor for a thread."""
CPU_LOAD_SAMPLES_TIMEOUT_S = 1
def __init__(self, process_pid, thread_pid, threshold):
"""Set up monitor attributes."""
Thread.__init__(self)
self._process_pid = process_pid
self._thread_pid = thread_pid
self._cpu_load_samples = []
self._threshold = threshold
self._should_stop = False
@property
def process_pid(self):
"""Get the process pid."""
return self._process_pid
@property
def thread_pid(self):
"""Get the thread pid."""
return self._thread_pid
@property
def threshold(self):
"""Get the cpu load threshold."""
return self._threshold
@property
def cpu_load_samples(self):
"""Get the cpu load samples."""
return self._cpu_load_samples
def signal_stop(self):
"""Signal that the thread should stop."""
self._should_stop = True
def run(self):
"""Thread for monitoring cpu load of some pid.
It is up to the caller to check the queue.
"""
while not self._should_stop:
cpus = utils.ProcessManager.get_cpu_percent(self._process_pid)
try:
fc_threads = cpus["firecracker"]
# There can be multiple "firecracker" threads sometimes, see #3429
assert len(fc_threads) > 0
for _, cpu_load in fc_threads.items():
if cpu_load > self._threshold:
self._cpu_load_samples.append(cpu_load)
except KeyError:
pass # no firecracker process
time.sleep(0.05) # 50 milliseconds granularity.
def check_samples(self):
"""Check that there are no samples above the threshold."""
if len(self.cpu_load_samples) > 0:
raise CpuLoadExceededException(self._cpu_load_samples, self._threshold)
def __enter__(self):
"""Functions to use this CPU Load class as a Context Manager
>>> clm = CpuLoadMonitor(1000, 1000, 45)
>>> with clm:
>>> # do stuff
"""
self.start()
def __exit__(self, _type, _value, _traceback):
"""Exit context"""
self.check_samples()
self.signal_stop()
self.join()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,880
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_rng.py
|
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests for the virtio-rng device"""
# pylint:disable=redefined-outer-name
import pytest
from framework.properties import global_props
from framework.utils import check_entropy
if global_props.instance == "c7g.metal" and global_props.host_linux_version == "4.14":
pytestmark = pytest.mark.skip(reason="c7g requires no SVE 5.10 kernel")
@pytest.fixture(params=[None])
def uvm_with_rng(uvm_nano, request):
"""Fixture of a microvm with virtio-rng configured"""
rate_limiter = request.param
uvm_nano.add_net_iface()
uvm_nano.api.entropy.put(rate_limiter=rate_limiter)
uvm_nano.start()
# Just stuff it in the microvm so we can look at it later
uvm_nano.rng_rate_limiter = rate_limiter
return uvm_nano
def test_rng_not_present(uvm_nano):
"""
Test a guest microVM *without* an entropy device and ensure that
we cannot get data from /dev/hwrng
"""
vm = uvm_nano
vm.add_net_iface()
vm.start()
# If the guest kernel has been built with the virtio-rng module
# the device should exist in the guest filesystem but we should
# not be able to get random numbers out of it.
cmd = "test -e /dev/hwrng"
ecode, _, _ = vm.ssh.run(cmd)
assert ecode == 0
cmd = "dd if=/dev/hwrng of=/dev/null bs=10 count=1"
ecode, _, _ = vm.ssh.run(cmd)
assert ecode == 1
def test_rng_present(uvm_with_rng):
"""
Test a guest microVM with an entropy defined configured and ensure
that we can access `/dev/hwrng`
"""
vm = uvm_with_rng
check_entropy(vm.ssh)
def test_rng_snapshot(uvm_with_rng, microvm_factory):
"""
Test that a virtio-rng device is functional after resuming from
a snapshot
"""
vm = uvm_with_rng
check_entropy(vm.ssh)
snapshot = vm.snapshot_full()
new_vm = microvm_factory.build()
new_vm.spawn()
new_vm.restore_from_snapshot(snapshot, resume=True)
check_entropy(new_vm.ssh)
def _get_percentage_difference(measured, base):
"""Return the percentage delta between the arguments."""
if measured == base:
return 0
try:
return (abs(measured - base) / base) * 100.0
except ZeroDivisionError:
# It means base and only base is 0.
return 100.0
def _throughput_units_multiplier(units):
"""
Parse the throughput units and return the multiplier that would
translate the corresponding value to Bytes/sec
"""
if units == "kB/s":
return 1000
if units == "MB/s":
return 1000 * 1000
if units == "GB/s":
return 1000 * 1000 * 1000
raise Exception("Unknown units")
def _process_dd_output(out):
"""
Parse the output of `dd` and return the achieved throughput in
KB/sec.
"""
# Example `dd` output:
#
# $ dd if=/dev/hwrng of=/dev/null bs=100 count=1
# 1+0 records in
# 1+0 records out
# 100 bytes (100 B) copied, 0.000749912 s, 133 kB/s
# So we split the lines of the output and keep the last line.
report = out.splitlines()[-1].split(" ")
# Last two items in the line are value and units
(value, units) = (report[-2], report[-1])
return float(value) * _throughput_units_multiplier(units) / 1000
def _get_throughput(ssh, random_bytes):
"""
Request `random_bytes` from `/dev/hwrng` and return the achieved
throughput in KB/sec
"""
# Issue a `dd` command to request 100 times `random_bytes` from the device.
# 100 here is used to get enough confidence on the achieved throughput.
cmd = "dd if=/dev/hwrng of=/dev/null bs={} count=100".format(random_bytes)
exit_code, _, stderr = ssh.run(cmd)
assert exit_code == 0, stderr
# dd gives its output on stderr
return _process_dd_output(stderr)
def _check_entropy_rate_limited(ssh, random_bytes, expected_kbps):
"""
Ask for `random_bytes` from `/dev/hwrng` in the guest and check
that achieved throughput is within a 10% of the expected throughput.
NOTE: 10% is an arbitrarily selected limit which should be safe enough,
so that we don't run into many intermittent CI failures.
"""
measured_kbps = _get_throughput(ssh, random_bytes)
assert (
_get_percentage_difference(measured_kbps, expected_kbps) <= 10
), "Expected {} KB/s, measured {} KB/s".format(expected_kbps, measured_kbps)
def _rate_limiter_id(rate_limiter):
"""
Helper function to return a name for the rate_limiter to be
used as an id for parametrized tests.
"""
size = rate_limiter["bandwidth"]["size"]
refill_time = rate_limiter["bandwidth"]["refill_time"]
return "{} KB/sec".format(float(size) / float(refill_time))
# parametrize the RNG rate limiter
@pytest.mark.parametrize(
"uvm_with_rng",
[
{"bandwidth": {"size": 1000, "refill_time": 100}},
{"bandwidth": {"size": 10000, "refill_time": 100}},
{"bandwidth": {"size": 100000, "refill_time": 100}},
],
indirect=True,
ids=_rate_limiter_id,
)
def test_rng_bw_rate_limiter(uvm_with_rng):
"""
Test that rate limiter without initial burst budget works
"""
vm = uvm_with_rng
# _start_vm_with_rng(vm, rate_limiter)
size = vm.rng_rate_limiter["bandwidth"]["size"]
refill_time = vm.rng_rate_limiter["bandwidth"]["refill_time"]
expected_kbps = size / refill_time
# Check the rate limiter using a request size equal to the size
# of the token bucket.
_check_entropy_rate_limited(vm.ssh, size, expected_kbps)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,881
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_api_server.py
|
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests scenario exercising api server functionality."""
import socket
from framework.utils import run_cmd
def test_api_socket_in_use(test_microvm_with_api):
"""
Test error message when api socket is already in use.
This is a very frequent scenario when Firecracker cannot
start due to the socket being left open from previous runs.
Check that the error message is a fixed one and that it also
contains the name of the path.
"""
microvm = test_microvm_with_api
cmd = "mkdir {}/run".format(microvm.chroot())
run_cmd(cmd)
sock = socket.socket(socket.AF_UNIX)
sock.bind(microvm.jailer.api_socket_path())
microvm.spawn()
msg = "Failed to open the API socket at: /run/firecracker.socket. Check that it is not already used."
microvm.check_log_message(msg)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,882
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_concurrency.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Ensure multiple microVMs work correctly when spawned simultaneously."""
from framework.utils import configure_mmds, populate_data_store
NO_OF_MICROVMS = 20
def test_run_concurrency_with_mmds(microvm_factory, guest_kernel, rootfs):
"""
Spawn multiple firecracker processes to run concurrently with MMDS
"""
data_store = {
"latest": {
"meta-data": {
"ami-id": "ami-12345678",
"reservation-id": "r-fea54097",
"local-hostname": "ip-10-251-50-12.ec2.internal",
"public-hostname": "ec2-203-0-113-25.compute-1.amazonaws.com",
"dummy_res": ["res1", "res2"],
},
"Limits": {"CPU": 512, "Memory": 512},
"Usage": {"CPU": 12.12},
}
}
microvms = []
# Launch guests with initially populated data stores
for index in range(NO_OF_MICROVMS):
microvm = microvm_factory.build(guest_kernel, rootfs)
microvm.spawn()
microvm.add_net_iface()
# Configure MMDS before population
configure_mmds(microvm, iface_ids=["eth0"], version="V2")
# Populate data store with some data prior to starting the guest
populate_data_store(microvm, data_store)
microvm.basic_config(vcpu_count=1, mem_size_mib=128)
microvm.start()
# We check that the vm is running by testing that the ssh does
# not time out.
microvm.ssh.run("true")
microvms.append(microvm)
# With all guests launched and running send a batch of
# MMDS patch requests to all running microvms.
for index in range(NO_OF_MICROVMS):
test_microvm = microvms[index]
dummy_json = {
"latest": {
"meta-data": {
"ami-id": "another_dummy",
"secret_key10": "eaasda48141411aeaeae10",
"secret_key11": "eaasda48141411aeaeae11",
"secret_key12": "eaasda48141411aeaeae12",
"secret_key13": "eaasda48141411aeaeae13",
"secret_key14": "eaasda48141411aeaeae14",
"secret_key15": "eaasda48141411aeaeae15",
"secret_key16": "eaasda48141411aeaeae16",
"secret_key17": "eaasda48141411aeaeae17",
"secret_key18": "eaasda48141411aeaeae18",
"secret_key19": "eaasda48141411aeaeae19",
"secret_key20": "eaasda48141411aeaeae20",
}
}
}
test_microvm.api.mmds.patch(json=dummy_json)
def test_run_concurrency(microvm_factory, guest_kernel, rootfs):
"""
Check we can spawn multiple microvms.
"""
for _ in range(NO_OF_MICROVMS):
microvm = microvm_factory.build(guest_kernel, rootfs)
microvm.spawn()
microvm.basic_config(vcpu_count=1, mem_size_mib=128)
microvm.add_net_iface()
microvm.start()
# We check that the vm is running by testing that the ssh does
# not time out.
microvm.ssh.run("true")
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,883
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_cmd_line_start.py
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests microvm start with configuration file as command line parameter."""
import json
import os
import platform
import re
import shutil
from pathlib import Path
import pytest
from retry.api import retry_call
from framework import utils, utils_cpuid
from framework.utils import generate_mmds_get_request, generate_mmds_session_token
# Directory with metadata JSON files
DIR = Path("./data")
def _configure_vm_from_json(test_microvm, vm_config_file):
"""
Configure a microvm using a file sent as command line parameter.
Create resources needed for the configuration of the microvm and
set as configuration file a copy of the file that was passed as
parameter to this helper function.
"""
# since we don't use basic-config, we do it by hand
test_microvm.create_jailed_resource(test_microvm.kernel_file)
test_microvm.create_jailed_resource(test_microvm.rootfs_file)
vm_config_file = Path(vm_config_file)
obj = json.load(vm_config_file.open(encoding="UTF-8"))
obj["boot-source"]["kernel_image_path"] = str(test_microvm.kernel_file.name)
obj["drives"][0]["path_on_host"] = str(test_microvm.rootfs_file.name)
obj["drives"][0]["is_read_only"] = True
vm_config = Path(test_microvm.chroot()) / vm_config_file.name
vm_config.write_text(json.dumps(obj))
test_microvm.jailer.extra_args = {"config-file": vm_config.name}
return obj
def _add_metadata_file(test_microvm, metadata_file):
"""
Configure the microvm using a metadata file.
Given a test metadata file this creates a copy of the file and
uses the copy to configure the microvm.
"""
vm_metadata_path = os.path.join(test_microvm.path, os.path.basename(metadata_file))
shutil.copyfile(metadata_file, vm_metadata_path)
test_microvm.metadata_file = vm_metadata_path
def _configure_network_interface(test_microvm):
"""
Create tap interface before spawning the microVM.
The network namespace and tap interface have to be created
beforehand when starting the microVM from a config file.
"""
# Create network namespace.
utils.run_cmd(f"ip netns add {test_microvm.jailer.netns}")
# Create tap device, and avoid creating it in the guest since it is already
# specified in the JSON
test_microvm.add_net_iface(api=False)
def _build_cmd_to_fetch_metadata(ssh_connection, version, ipv4_address):
"""
Build command to fetch metadata from the guest's side.
The request is built based on the MMDS version configured.
If MMDSv2 is used, a session token must be created before
the `GET` request.
"""
# Fetch data from MMDS from the guest's side.
if version == "V2":
# If MMDS is configured to version 2, so we need to create
# the session token first.
token = generate_mmds_session_token(ssh_connection, ipv4_address, token_ttl=60)
else:
token = None
return generate_mmds_get_request(ipv4_address, token)
def _get_optional_fields_from_file(vm_config_file):
"""
Retrieve optional `version` and `ipv4_address` fields from MMDS config.
Parse the vm config json file and retrieves optional fields from MMDS
config. Default values are used for the fields that are not specified.
:return: a pair of (version, ipv4_address) fields from mmds config.
"""
# Get MMDS version and IPv4 address configured from the file.
with open(vm_config_file, encoding="utf-8") as json_file:
mmds_config = json.load(json_file)["mmds-config"]
# Default to V1 if version is not specified.
version = mmds_config.get("version", "V1")
# Set to default if IPv4 is not specified .
ipv4_address = mmds_config.get("ipv4_address", "169.254.169.254")
return version, ipv4_address
@pytest.mark.parametrize("vm_config_file", ["framework/vm_config.json"])
def test_config_start_with_api(uvm_plain, vm_config_file):
"""
Test if a microvm configured from file boots successfully.
"""
test_microvm = uvm_plain
vm_config = _configure_vm_from_json(test_microvm, vm_config_file)
test_microvm.spawn()
assert test_microvm.state == "Running"
# Validate full vm configuration.
response = test_microvm.api.vm_config.get()
assert response.json() == vm_config
@pytest.mark.parametrize("vm_config_file", ["framework/vm_config.json"])
def test_config_start_no_api(uvm_plain, vm_config_file):
"""
Test microvm start when API server thread is disabled.
"""
test_microvm = uvm_plain
_configure_vm_from_json(test_microvm, vm_config_file)
test_microvm.jailer.extra_args.update({"no-api": None})
test_microvm.spawn()
# Get Firecracker PID so we can check the names of threads.
firecracker_pid = test_microvm.jailer_clone_pid
# Get names of threads in Firecracker.
cmd = "ps -T --no-headers -p {} | awk '{{print $5}}'".format(firecracker_pid)
# Retry running 'ps' in case it failed to list the firecracker process
# The regex matches any expression that contains 'firecracker' and does
# not contain 'fc_api'
retry_call(
utils.search_output_from_cmd,
fkwargs={
"cmd": cmd,
"find_regex": re.compile("^(?!.*fc_api)(?:.*)?firecracker", re.DOTALL),
},
exceptions=RuntimeError,
tries=10,
delay=1,
)
@pytest.mark.parametrize(
"vm_config_file",
[
"framework/vm_config_missing_vcpu_count.json",
"framework/vm_config_missing_mem_size_mib.json",
],
)
def test_config_bad_machine_config(uvm_plain, vm_config_file):
"""
Test microvm start when the `machine_config` is invalid.
"""
test_microvm = uvm_plain
_configure_vm_from_json(test_microvm, vm_config_file)
test_microvm.jailer.extra_args.update({"no-api": None})
test_microvm.spawn()
test_microvm.check_log_message("Configuration for VMM from one single json failed")
@pytest.mark.parametrize(
"test_config",
[
("framework/vm_config_cpu_template_C3.json", False, True, True),
("framework/vm_config_smt_true.json", False, False, True),
],
)
def test_config_machine_config_params(uvm_plain, test_config):
"""
Test microvm start with optional `machine_config` parameters.
"""
test_microvm = uvm_plain
# Test configuration determines if the file is a valid config or not
# based on the CPU
(vm_config_file, fail_intel, fail_amd, fail_aarch64) = test_config
_configure_vm_from_json(test_microvm, vm_config_file)
test_microvm.jailer.extra_args.update({"no-api": None})
test_microvm.spawn()
cpu_vendor = utils_cpuid.get_cpu_vendor()
check_for_failed_start = (
(cpu_vendor == utils_cpuid.CpuVendor.AMD and fail_amd)
or (cpu_vendor == utils_cpuid.CpuVendor.INTEL and fail_intel)
or (platform.machine() == "aarch64" and fail_aarch64)
)
if check_for_failed_start:
test_microvm.check_any_log_message(
[
"Failed to build MicroVM from Json",
"Could not Start MicroVM from one single json",
]
)
else:
test_microvm.check_log_message(
"Successfully started microvm that was configured " "from one single json"
)
@pytest.mark.parametrize("vm_config_file", ["framework/vm_config.json"])
def test_config_start_with_limit(test_microvm_with_api, vm_config_file):
"""
Negative test for customised request payload limit.
"""
test_microvm = test_microvm_with_api
_configure_vm_from_json(test_microvm, vm_config_file)
test_microvm.jailer.extra_args.update({"http-api-max-payload-size": "250"})
test_microvm.spawn()
assert test_microvm.state == "Running"
cmd = "curl --unix-socket {} -i".format(test_microvm.api.socket)
cmd += ' -X PUT "http://localhost/mmds/config"'
cmd += ' -H "Content-Length: 260"'
cmd += ' -H "Accept: application/json"'
cmd += ' -d "some body"'
response = "HTTP/1.1 400 \r\n"
response += "Server: Firecracker API\r\n"
response += "Connection: keep-alive\r\n"
response += "Content-Type: application/json\r\n"
response += "Content-Length: 145\r\n\r\n"
response += '{ "error": "Request payload with size 260 is larger than '
response += "the limit of 250 allowed by server.\n"
response += 'All previous unanswered requests will be dropped." }'
_, stdout, _stderr = utils.run_cmd(cmd)
assert stdout.encode("utf-8") == response.encode("utf-8")
@pytest.mark.parametrize("vm_config_file", ["framework/vm_config.json"])
def test_config_with_default_limit(test_microvm_with_api, vm_config_file):
"""
Test for request payload limit.
"""
test_microvm = test_microvm_with_api
_configure_vm_from_json(test_microvm, vm_config_file)
test_microvm.spawn()
assert test_microvm.state == "Running"
data_store = {"latest": {"meta-data": {}}}
data_store["latest"]["meta-data"]["ami-id"] = "abc"
test_microvm.api.mmds.put(json=data_store)
cmd_err = "curl --unix-socket {} -i".format(test_microvm.api.socket)
cmd_err += ' -X PUT "http://localhost/mmds/config"'
cmd_err += ' -H "Content-Length: 51201"'
cmd_err += ' -H "Accept: application/json"'
cmd_err += ' -d "some body"'
response_err = "HTTP/1.1 400 \r\n"
response_err += "Server: Firecracker API\r\n"
response_err += "Connection: keep-alive\r\n"
response_err += "Content-Type: application/json\r\n"
response_err += "Content-Length: 149\r\n\r\n"
response_err += '{ "error": "Request payload with size 51201 is larger '
response_err += "than the limit of 51200 allowed by server.\n"
response_err += 'All previous unanswered requests will be dropped." }'
_, stdout, _stderr = utils.run_cmd(cmd_err)
assert stdout.encode("utf-8") == response_err.encode("utf-8")
def test_start_with_metadata(test_microvm_with_api):
"""
Test if metadata from file is available via MMDS.
"""
test_microvm = test_microvm_with_api
metadata_file = DIR / "metadata.json"
_add_metadata_file(test_microvm, metadata_file)
test_microvm.spawn()
test_microvm.check_log_message("Successfully added metadata to mmds from file")
assert test_microvm.state == "Not started"
response = test_microvm.api.mmds.get()
with open(metadata_file, encoding="utf-8") as json_file:
assert response.json() == json.load(json_file)
def test_start_with_metadata_limit(test_microvm_with_api):
"""
Test that the metadata size limit is enforced when populating from a file.
"""
test_microvm = test_microvm_with_api
test_microvm.jailer.extra_args.update({"mmds-size-limit": "30"})
metadata_file = DIR / "metadata.json"
_add_metadata_file(test_microvm, metadata_file)
test_microvm.spawn()
test_microvm.check_log_message(
"Populating MMDS from file failed: DataStoreLimitExceeded"
)
def test_start_with_metadata_default_limit(test_microvm_with_api):
"""
Test that the metadata size limit defaults to the api payload limit.
"""
test_microvm = test_microvm_with_api
test_microvm.jailer.extra_args.update({"http-api-max-payload-size": "30"})
metadata_file = DIR / "metadata.json"
_add_metadata_file(test_microvm, metadata_file)
test_microvm.spawn()
test_microvm.check_log_message(
"Populating MMDS from file failed: DataStoreLimitExceeded"
)
def test_start_with_missing_metadata(test_microvm_with_api):
"""
Test if a microvm is configured with a missing metadata file.
"""
test_microvm = test_microvm_with_api
metadata_file = "../resources/tests/metadata_nonexisting.json"
vm_metadata_path = os.path.join(test_microvm.path, os.path.basename(metadata_file))
test_microvm.metadata_file = vm_metadata_path
try:
test_microvm.spawn()
except FileNotFoundError:
pass
finally:
test_microvm.check_log_message(
"Unable to open or read from the mmds content file"
)
test_microvm.check_log_message("No such file or directory")
def test_start_with_invalid_metadata(test_microvm_with_api):
"""
Test if a microvm is configured with a invalid metadata file.
"""
test_microvm = test_microvm_with_api
metadata_file = DIR / "metadata_invalid.json"
vm_metadata_path = os.path.join(test_microvm.path, os.path.basename(metadata_file))
shutil.copy(metadata_file, vm_metadata_path)
test_microvm.metadata_file = vm_metadata_path
try:
test_microvm.spawn()
except FileNotFoundError:
pass
finally:
test_microvm.check_log_message("MMDS error: metadata provided not valid json")
test_microvm.check_log_message("EOF while parsing an object")
@pytest.mark.parametrize(
"vm_config_file",
["framework/vm_config_with_mmdsv1.json", "framework/vm_config_with_mmdsv2.json"],
)
def test_config_start_and_mmds_with_api(uvm_plain, vm_config_file):
"""
Test MMDS behavior when the microvm is configured from file.
"""
test_microvm = uvm_plain
_configure_vm_from_json(test_microvm, vm_config_file)
_configure_network_interface(test_microvm)
# Network namespace has already been created.
test_microvm.spawn()
assert test_microvm.state == "Running"
data_store = {
"latest": {
"meta-data": {"ami-id": "ami-12345678", "reservation-id": "r-fea54097"}
}
}
# MMDS should be empty by default.
response = test_microvm.api.mmds.get()
assert response.json() == {}
# Populate MMDS with data.
response = test_microvm.api.mmds.put(**data_store)
# Ensure the MMDS contents have been successfully updated.
response = test_microvm.api.mmds.get()
assert response.json() == data_store
# Get MMDS version and IPv4 address configured from the file.
version, ipv4_address = _get_optional_fields_from_file(vm_config_file)
cmd = "ip route add {} dev eth0".format(ipv4_address)
_, stdout, stderr = test_microvm.ssh.run(cmd)
assert stderr == stdout == ""
# Fetch data from MMDS from the guest's side.
cmd = _build_cmd_to_fetch_metadata(test_microvm.ssh, version, ipv4_address)
cmd += "/latest/meta-data/"
_, stdout, _ = test_microvm.ssh.run(cmd)
assert json.loads(stdout) == data_store["latest"]["meta-data"]
# Validate MMDS configuration.
response = test_microvm.api.vm_config.get()
assert response.json()["mmds-config"] == {
"network_interfaces": ["1"],
"ipv4_address": ipv4_address,
"version": version,
}
@pytest.mark.parametrize(
"vm_config_file",
["framework/vm_config_with_mmdsv1.json", "framework/vm_config_with_mmdsv2.json"],
)
@pytest.mark.parametrize("metadata_file", [DIR / "metadata.json"])
def test_with_config_and_metadata_no_api(
test_microvm_with_api, vm_config_file, metadata_file
):
"""
Test microvm start when config/mmds and API server thread is disabled.
Ensures the metadata is stored successfully inside the MMDS and
is available to reach from the guest's side.
"""
test_microvm = test_microvm_with_api
_configure_vm_from_json(test_microvm, vm_config_file)
_add_metadata_file(test_microvm, metadata_file)
_configure_network_interface(test_microvm)
test_microvm.jailer.extra_args.update({"no-api": None})
test_microvm.spawn()
# Get MMDS version and IPv4 address configured from the file.
version, ipv4_address = _get_optional_fields_from_file(vm_config_file)
cmd = "ip route add {} dev eth0".format(ipv4_address)
_, stdout, stderr = test_microvm.ssh.run(cmd)
assert stderr == stdout == ""
# Fetch data from MMDS from the guest's side.
cmd = _build_cmd_to_fetch_metadata(test_microvm.ssh, version, ipv4_address)
_, stdout, _ = test_microvm.ssh.run(cmd)
# Compare response against the expected MMDS contents.
assert json.loads(stdout) == json.load(Path(metadata_file).open(encoding="UTF-8"))
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,884
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_kernel_cmdline.py
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Test kernel commandline behavior."""
from framework.microvm import Serial
def test_init_params(test_microvm_with_api):
"""Correct propagation of boot args to the kernel's command line.
Test that init's parameters (the ones present after "--") do not get
altered or misplaced.
"""
vm = test_microvm_with_api
vm.jailer.daemonize = False
vm.spawn()
vm.memory_monitor = None
# We will override the init with /bin/cat so that we try to read the
# Ubuntu version from the /etc/issue file.
vm.basic_config(
vcpu_count=1,
boot_args="console=ttyS0 reboot=k panic=1 pci=off"
" init=/bin/cat -- /etc/issue",
)
vm.start()
serial = Serial(vm)
serial.open()
# If the string does not show up, the test will fail.
serial.rx(token="Ubuntu 22.04.2 LTS")
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,885
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/with_filelock.py
|
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Simple decorator so that only one process is running the decorated function
at any one time.
Caveat: two functions sharing the same name and using this decorator will use
the same lock, which may be unintended, but safe.
TBD disambiguate with the module name in that case.
"""
import functools
import tempfile
from pathlib import Path
from filelock import FileLock
def with_filelock(func):
"""Decorator so that only one process is running the decorated function at
any one time.
"""
tmp_dir = Path(tempfile.gettempdir())
@functools.wraps(func)
def wrapper(*args, **kwargs):
lock_path = (tmp_dir / func.__name__).with_suffix(".lock")
lock = FileLock(lock_path)
with lock:
return func(*args, **kwargs)
return wrapper
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,886
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/artifacts.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Define classes for interacting with CI artifacts"""
import re
from dataclasses import dataclass
from pathlib import Path
from typing import Iterator
import pytest
import host_tools.network as net_tools
from framework.defs import ARTIFACT_DIR
from framework.properties import global_props
from framework.utils import get_firecracker_version_from_toml
def select_supported_kernels():
"""Select guest kernels supported by the current combination of kernel and
instance type.
"""
supported_kernels = [r"vmlinux-4.14.\d+"]
if (
global_props.instance == "c7g.metal"
and global_props.host_linux_version == "4.14"
):
supported_kernels.append(r"vmlinux-5.10-no-sve.bin")
else:
supported_kernels.append(r"vmlinux-5.10.\d+")
return supported_kernels
def kernels(glob) -> Iterator:
"""Return supported kernels as kernels supported by the current combination of kernel and
instance type.
"""
supported_kernels = select_supported_kernels()
for kernel in sorted(ARTIFACT_DIR.rglob(glob)):
for kernel_regex in supported_kernels:
if re.fullmatch(kernel_regex, kernel.name):
yield kernel
break
def disks(glob) -> Iterator:
"""Return supported rootfs"""
yield from sorted(ARTIFACT_DIR.glob(glob))
def kernel_params(glob="vmlinux-*") -> Iterator:
"""Return supported kernels"""
for kernel in kernels(glob):
yield pytest.param(kernel, id=kernel.name)
def rootfs_params(glob="ubuntu-*.squashfs") -> Iterator:
"""Return supported rootfs as pytest parameters"""
for rootfs in disks(glob=glob):
yield pytest.param(rootfs, id=rootfs.name)
@dataclass(frozen=True, repr=True)
class FirecrackerArtifact:
"""Utility class for Firecracker binary artifacts."""
path: Path
@property
def name(self):
"""Get the Firecracker name."""
return self.path.name
@property
def jailer(self):
"""Get the jailer with the same version."""
return self.path.with_name(f"jailer-v{self.version}")
@property
def version(self):
"""Return Firecracker's version: `X.Y.Z`."""
# Get the filename, split on '-' and trim the leading 'v'.
# sample: firecracker-v1.2.0
return self.path.name.split("-")[1][1:]
@property
def version_tuple(self):
"""Return the artifact's version as a tuple `(X, Y, Z)`."""
return tuple(int(x) for x in self.version.split("."))
@property
def snapshot_version_tuple(self):
"""Return the artifact's snapshot version as a tuple: `X.Y.0`."""
return self.version_tuple[:2] + (0,)
@property
def snapshot_version(self):
"""Return the artifact's snapshot version: `X.Y.0`.
Due to how Firecracker maps release versions to snapshot versions, we
have to request the minor version instead of the actual version.
"""
return ".".join(str(x) for x in self.snapshot_version_tuple)
def firecracker_artifacts():
"""Return all supported firecracker binaries."""
cargo_version = get_firecracker_version_from_toml()
# until the next minor version (but *not* including)
max_version = (cargo_version.major, cargo_version.minor + 1, 0)
min_version = (1, 3, 0)
prefix = "firecracker/firecracker-*"
for firecracker in sorted(ARTIFACT_DIR.glob(prefix)):
match = re.match(r"firecracker-v(\d+)\.(\d+)\.(\d+)", firecracker.name)
if not match:
continue
fc = FirecrackerArtifact(firecracker)
version = fc.version_tuple
if version < min_version:
continue
if version >= max_version:
continue
yield pytest.param(fc, id=fc.name)
@dataclass(frozen=True, repr=True)
class NetIfaceConfig:
"""Defines a network interface configuration."""
host_ip: str = "192.168.0.1"
guest_ip: str = "192.168.0.2"
tap_name: str = "tap0"
dev_name: str = "eth0"
netmask: int = 30
@property
def guest_mac(self):
"""Return the guest MAC address."""
return net_tools.mac_from_ip(self.guest_ip)
@staticmethod
def with_id(i):
"""Define network iface with id `i`."""
return NetIfaceConfig(
host_ip=f"192.168.{i}.1",
guest_ip=f"192.168.{i}.2",
tap_name=f"tap{i}",
dev_name=f"eth{i}",
)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,887
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/conftest.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Pytest fixtures and redefined-outer-name don't mix well. Disable it.
# pylint:disable=redefined-outer-name
# We import some fixtures that are unused. Disable that too.
# pylint:disable=unused-import
"""Imported by pytest at the start of every test session.
# Fixture Goals
Fixtures herein are made available to every test collected by pytest. They are
designed with the following goals in mind:
- Running a test on a microvm is as easy as importing a microvm fixture.
- Adding a new microvm image (kernel, rootfs) for tests to run on is as easy as
creating a fixture that references some local paths
# Notes
- Reading up on pytest fixtures is probably needed when editing this file.
https://docs.pytest.org/en/7.2.x/explanation/fixtures.html
"""
import inspect
import os
import platform
import re
import shutil
import sys
import tempfile
from pathlib import Path
import pytest
import host_tools.cargo_build as build_tools
from framework import defs, utils
from framework.artifacts import firecracker_artifacts, kernel_params, rootfs_params
from framework.microvm import MicroVMFactory
from framework.properties import global_props
from framework.utils_cpu_templates import (
custom_cpu_templates_params,
static_cpu_templates_params,
)
from host_tools.ip_generator import network_config, subnet_generator
from host_tools.metrics import get_metrics_logger
# This codebase uses Python features available in Python 3.10 or above
if sys.version_info < (3, 10):
raise SystemError("This codebase requires Python 3.10 or above.")
# Some tests create system-level resources; ensure we run as root.
if os.geteuid() != 0:
raise PermissionError("Test session needs to be run as root.")
METRICS = get_metrics_logger()
def pytest_addoption(parser):
"""Pytest hook. Add command line options."""
parser.addoption(
"--perf-fail",
action="store_true",
help="fail the test if the baseline does not match",
)
@pytest.fixture(scope="function", autouse=True)
def record_props(request, record_property):
"""Decorate test results with additional properties.
Note: there is no need to call this fixture explicitly
"""
# Augment test result with global properties
for prop_name, prop_val in global_props.__dict__.items():
# if record_testsuite_property worked with xdist we could use that
# https://docs.pytest.org/en/7.1.x/reference/reference.html#record-testsuite-property
# to record the properties once per report. But here we record each
# prop per test. It just results in larger report files.
record_property(prop_name, prop_val)
# Extract attributes from the docstrings
function_docstring = inspect.getdoc(request.function)
description = []
attributes = {}
for line in function_docstring.split("\n"):
# extract tags like @type, @issue, etc
match = re.match(r"\s*@(?P<attr>\w+):\s*(?P<value>\w+)", line)
if match:
attr, value = match["attr"], match["value"]
attributes[attr] = value
else:
description.append(line)
for attr_name, attr_value in attributes.items():
record_property(attr_name, attr_value)
record_property("description", "".join(description))
def pytest_runtest_logreport(report):
"""Send general test metrics to CloudWatch"""
if report.when == "call":
dimensions = {
"test": report.nodeid,
"instance": global_props.instance,
"cpu_model": global_props.cpu_model,
"host_kernel": "linux-" + global_props.host_linux_version,
}
METRICS.set_property("result", report.outcome)
METRICS.set_property("location", report.location)
for prop_name, prop_val in report.user_properties:
METRICS.set_property(prop_name, prop_val)
METRICS.set_dimensions(dimensions)
METRICS.put_metric(
"duration",
report.duration,
unit="Seconds",
)
METRICS.put_metric(
"failed",
1 if report.outcome == "failed" else 0,
unit="Count",
)
METRICS.flush()
@pytest.fixture()
def metrics(request):
"""Fixture to pass the metrics scope
We use a fixture instead of the @metrics_scope decorator as that conflicts
with tests.
Due to how aws-embedded-metrics works, this fixture is per-test rather
than per-session, and we flush the metrics after each test.
Ref: https://github.com/awslabs/aws-embedded-metrics-python
"""
metrics_logger = get_metrics_logger()
for prop_name, prop_val in request.node.user_properties:
metrics_logger.set_property(prop_name, prop_val)
yield metrics_logger
metrics_logger.flush()
@pytest.fixture
def record_property(record_property, metrics):
"""Override pytest's record_property to also set a property in our metrics context."""
def sub(key, value):
record_property(key, value)
metrics.set_property(key, value)
return sub
@pytest.fixture(autouse=True, scope="session")
def test_fc_session_root_path():
"""Ensure and yield the fc session root directory.
Create a unique temporary session directory. This is important, since the
scheduler will run multiple pytest sessions concurrently.
"""
os.makedirs(defs.DEFAULT_TEST_SESSION_ROOT_PATH, exist_ok=True)
fc_session_root_path = tempfile.mkdtemp(
prefix="fctest-", dir=defs.DEFAULT_TEST_SESSION_ROOT_PATH
)
yield fc_session_root_path
@pytest.fixture(scope="session")
def bin_cloner_path(test_fc_session_root_path):
"""Build a binary that `clone`s into the jailer.
It's necessary because Python doesn't interface well with the `clone()`
syscall directly.
"""
cloner_bin_path = os.path.join(test_fc_session_root_path, "newpid_cloner")
build_tools.gcc_compile("host_tools/newpid_cloner.c", cloner_bin_path)
yield cloner_bin_path
@pytest.fixture(scope="session")
def bin_vsock_path(test_fc_session_root_path):
"""Build a simple vsock client/server application."""
vsock_helper_bin_path = os.path.join(test_fc_session_root_path, "vsock_helper")
build_tools.gcc_compile("host_tools/vsock_helper.c", vsock_helper_bin_path)
yield vsock_helper_bin_path
@pytest.fixture(scope="session")
def change_net_config_space_bin(test_fc_session_root_path):
"""Build a binary that changes the MMIO config space."""
change_net_config_space_bin = os.path.join(
test_fc_session_root_path, "change_net_config_space"
)
build_tools.gcc_compile(
"host_tools/change_net_config_space.c",
change_net_config_space_bin,
extra_flags="-static",
)
yield change_net_config_space_bin
@pytest.fixture(scope="session")
def bin_seccomp_paths(test_fc_session_root_path):
"""Build jailers and jailed binaries to test seccomp.
They currently consist of:
* a jailer that receives filter generated using seccompiler-bin;
* a jailed binary that follows the seccomp rules;
* a jailed binary that breaks the seccomp rules.
"""
seccomp_build_path = (
Path(test_fc_session_root_path) / build_tools.CARGO_RELEASE_REL_PATH
)
release_binaries_path = seccomp_build_path / build_tools.RELEASE_BINARIES_REL_PATH
seccomp_examples = ["jailer", "harmless", "malicious", "panic"]
demos = {}
for example in seccomp_examples:
build_tools.cargo_build(
seccomp_build_path,
f"--release --target {platform.machine()}-unknown-linux-musl --example seccomp_{example}",
)
demos[f"demo_{example}"] = release_binaries_path / f"examples/seccomp_{example}"
yield demos
@pytest.fixture(scope="session")
def uffd_handler_paths(test_fc_session_root_path):
"""Build UFFD handler binaries."""
uffd_build_path = (
Path(test_fc_session_root_path) / build_tools.CARGO_RELEASE_REL_PATH
)
release_binaries_path = uffd_build_path / build_tools.RELEASE_BINARIES_REL_PATH
uffd_handlers = ["malicious", "valid"]
handlers = {}
for handler in uffd_handlers:
build_tools.cargo_build(
uffd_build_path,
f"--release --target {platform.machine()}-unknown-linux-musl --example uffd_{handler}_handler",
)
handlers[f"{handler}_handler"] = (
release_binaries_path / f"examples/uffd_{handler}_handler"
)
yield handlers
@pytest.fixture()
def fc_tmp_path(test_fc_session_root_path):
"""A tmp_path substitute
We should use pytest's tmp_path fixture instead of this, but this can create
very long paths, which can run into the UDS 108 character limit.
"""
return Path(tempfile.mkdtemp(dir=test_fc_session_root_path))
@pytest.fixture()
def microvm_factory(fc_tmp_path, bin_cloner_path):
"""Fixture to create microvms simply.
In order to avoid running out of space when instantiating many microvms,
we remove the directory manually when the fixture is destroyed
(that is after every test).
One can comment the removal line, if it helps with debugging.
"""
uvm_factory = MicroVMFactory(fc_tmp_path, bin_cloner_path)
yield uvm_factory
uvm_factory.kill()
shutil.rmtree(fc_tmp_path)
@pytest.fixture(params=firecracker_artifacts())
def firecracker_release(request, record_property):
"""Return all supported firecracker binaries."""
firecracker = request.param
record_property("firecracker_release", firecracker.name)
return firecracker
@pytest.fixture(params=static_cpu_templates_params())
def cpu_template(request, record_property):
"""Return all static CPU templates supported by the vendor."""
record_property("static_cpu_template", request.param)
return request.param
@pytest.fixture(params=custom_cpu_templates_params())
def custom_cpu_template(request, record_property):
"""Return all dummy custom CPU templates supported by the vendor."""
record_property("custom_cpu_template", request.param["name"])
return request.param
@pytest.fixture(params=["Sync", "Async"])
def io_engine(request):
"""All supported io_engines"""
if request.param == "Async" and not utils.is_io_uring_supported():
pytest.skip("io_uring not supported in this kernel")
return request.param
@pytest.fixture
def results_dir(request):
"""
Fixture yielding the path to a directory into which the test can dump its results
Directories are unique per test, and named after the test name. Everything the tests puts
into its directory will to be uploaded to S3. Directory will be placed inside defs.TEST_RESULTS_DIR.
For example
```py
def test_my_file(results_dir):
(results_dir / "output.txt").write_text("Hello World")
```
will result in `defs.TEST_RESULTS_DIR`/test_my_file/output.txt.
"""
results_dir = defs.TEST_RESULTS_DIR / request.node.originalname
results_dir.mkdir(parents=True, exist_ok=True)
return results_dir
def guest_kernel_fxt(request, record_property):
"""Return all supported guest kernels."""
kernel = request.param
# vmlinux-5.10.167 -> linux-5.10
prop = kernel.stem[2:]
record_property("guest_kernel", prop)
return kernel
def rootfs_fxt(request, record_property):
"""Return all supported rootfs."""
fs = request.param
record_property("rootfs", fs.name)
return fs
# Fixtures for all guest kernels, and specific versions
guest_kernel = pytest.fixture(guest_kernel_fxt, params=kernel_params("vmlinux-*"))
guest_kernel_linux_4_14 = pytest.fixture(
guest_kernel_fxt, params=kernel_params("vmlinux-4.14*")
)
guest_kernel_linux_5_10 = pytest.fixture(
guest_kernel_fxt, params=kernel_params("vmlinux-5.10*")
)
# Fixtures for all Ubuntu rootfs, and specific versions
rootfs = pytest.fixture(rootfs_fxt, params=rootfs_params("*.squashfs"))
rootfs_ubuntu_22 = pytest.fixture(
rootfs_fxt, params=rootfs_params("ubuntu-22*.squashfs")
)
rootfs_rw = pytest.fixture(rootfs_fxt, params=rootfs_params("*.ext4"))
@pytest.fixture
def uvm_plain(microvm_factory, guest_kernel_linux_5_10, rootfs_ubuntu_22):
"""Create a vanilla VM, non-parametrized
kernel: 5.10
rootfs: Ubuntu 22.04
"""
return microvm_factory.build(guest_kernel_linux_5_10, rootfs_ubuntu_22)
@pytest.fixture
def uvm_plain_rw(microvm_factory, guest_kernel_linux_5_10, rootfs_rw):
"""Create a vanilla VM, non-parametrized
kernel: 5.10
rootfs: Ubuntu 22.04
"""
return microvm_factory.build(guest_kernel_linux_5_10, rootfs_rw)
@pytest.fixture
def uvm_nano(uvm_plain):
"""A preconfigured uvm with 2vCPUs and 256MiB of memory
ready to .start()
"""
uvm_plain.spawn()
uvm_plain.basic_config(vcpu_count=2, mem_size_mib=256)
return uvm_plain
@pytest.fixture()
def artifact_dir():
"""Return the location of the CI artifacts"""
return defs.ARTIFACT_DIR
@pytest.fixture
def uvm_with_initrd(
microvm_factory, guest_kernel_linux_5_10, record_property, artifact_dir
):
"""
See file:../docs/initrd.md
"""
fs = artifact_dir / "initramfs.cpio"
record_property("rootfs", fs.name)
uvm = microvm_factory.build(guest_kernel_linux_5_10)
uvm.initrd_file = fs
yield uvm
# backwards compatibility
test_microvm_with_api = uvm_plain
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,888
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/.buildkite/pipeline_security.py
|
#!/usr/bin/env python3
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Generate Buildkite nightly security pipelines dynamically"""
from common import COMMON_PARSER, group, overlay_dict, pipeline_to_json
args = COMMON_PARSER.parse_args()
defaults = {
"instances": args.instances,
"platforms": args.platforms,
}
defaults = overlay_dict(defaults, args.step_param)
vulnerabilities_grp = group(
"🛡️ Vulnerabilities",
"./tools/devtool -y test -- ../tests/integration_tests/security/test_vulnerabilities.py -m 'no_block_pr and not nonci'",
**defaults,
)
fingerprint_grp = group(
"🖐️ Fingerprint",
"./tools/devtool -y test -- ../tests/integration_tests/functional/test_cpu_template_helper.py -m nonci -k test_fingerprint_change",
**defaults,
)
pipeline = {"steps": [vulnerabilities_grp, fingerprint_grp]}
print(pipeline_to_json(pipeline))
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,889
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/performance/test_network_latency.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests the network latency of a Firecracker guest."""
import json
import re
import pytest
from framework.stats import consumer, producer
from framework.stats.baseline import Provider as BaselineProvider
from framework.stats.metadata import DictProvider as DictMetadataProvider
from framework.utils import CpuMap, get_kernel_version
from integration_tests.performance.configs import defs
TEST_ID = "network_latency"
kernel_version = get_kernel_version(level=1)
CONFIG_NAME_REL = "test_{}_config_{}.json".format(TEST_ID, kernel_version)
CONFIG_NAME_ABS = defs.CFG_LOCATION / CONFIG_NAME_REL
PING = "ping -c {} -i {} {}"
LATENCY = "latency"
# pylint: disable=R0903
class NetLatencyBaselineProvider(BaselineProvider):
"""Implementation of a baseline provider for the network latency...
...performance test.
"""
def __init__(self, env_id, raw_baseline):
"""Network latency baseline provider initialization."""
super().__init__(raw_baseline)
self._tag = "baselines/{}/" + env_id + "/{}/ping"
def get(self, metric_name: str, statistic_name: str) -> dict:
"""Return the baseline value corresponding to the key."""
key = self._tag.format(metric_name, statistic_name)
baseline = self._baselines.get(key)
if baseline:
target = baseline.get("target")
delta_percentage = baseline.get("delta_percentage")
return {
"target": target,
"delta": delta_percentage * target / 100,
}
return None
def consume_ping_output(cons, raw_data, requests):
"""Consume ping output.
Output example:
PING 8.8.8.8 (8.8.8.8) 56(84) bytes of data.
64 bytes from 8.8.8.8: icmp_seq=1 ttl=118 time=17.7 ms
64 bytes from 8.8.8.8: icmp_seq=2 ttl=118 time=17.7 ms
64 bytes from 8.8.8.8: icmp_seq=3 ttl=118 time=17.4 ms
64 bytes from 8.8.8.8: icmp_seq=4 ttl=118 time=17.8 ms
--- 8.8.8.8 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3005ms
rtt min/avg/max/mdev = 17.478/17.705/17.808/0.210 ms
"""
output = raw_data.strip().split("\n")
assert len(output) > 2
# E.g: round-trip min/avg/max/stddev = 17.478/17.705/17.808/0.210 ms
stat_values = output[-1]
pattern_stats = "min/avg/max/[a-z]+dev = (.+)/(.+)/(.+)/(.+) ms"
stat_values = re.findall(pattern_stats, stat_values)[0]
assert len(stat_values) == 4
cons.consume_stat(st_name="Avg", ms_name=LATENCY, value=float(stat_values[1]))
# Compute percentiles.
seqs = output[1 : requests + 1]
times = []
pattern_time = ".+ bytes from .+: icmp_seq=.+ ttl=.+ time=(.+) ms"
for seq in seqs:
time = re.findall(pattern_time, seq)
assert len(time) == 1
times.append(time[0])
return [("ping_latency", float(x), "Milliseconds") for x in times]
@pytest.mark.nonci
@pytest.mark.timeout(3600)
def test_network_latency(microvm_factory, guest_kernel, rootfs, st_core):
"""
Test network latency for multiple vm configurations.
Send a ping from the guest to the host.
"""
# each iteration is 6 seconds
# * 30 iterations = 5 minutes
requests = 30
interval = 0.2 # Seconds
# Create a microvm from artifacts
guest_mem_mib = 1024
guest_vcpus = 1
vm = microvm_factory.build(guest_kernel, rootfs, monitor_memory=False)
vm.spawn(log_level="Info")
vm.basic_config(vcpu_count=guest_vcpus, mem_size_mib=guest_mem_mib)
iface = vm.add_net_iface()
vm.start()
# Check if the needed CPU cores are available. We have the API thread, VMM
# thread and then one thread for each configured vCPU.
assert CpuMap.len() >= 2 + vm.vcpus_count
# Pin uVM threads to physical cores.
current_cpu_id = 0
assert vm.pin_vmm(current_cpu_id), "Failed to pin firecracker thread."
current_cpu_id += 1
assert vm.pin_api(current_cpu_id), "Failed to pin fc_api thread."
for i in range(vm.vcpus_count):
current_cpu_id += 1
assert vm.pin_vcpu(i, current_cpu_id + i), f"Failed to pin fc_vcpu {i} thread."
# is this actually needed, beyond baselines?
guest_config = f"{guest_vcpus}vcpu_{guest_mem_mib}mb.json"
st_core.name = TEST_ID
st_core.iterations = 30
st_core.custom["guest_config"] = guest_config.removesuffix(".json")
raw_baselines = json.loads(CONFIG_NAME_ABS.read_text("utf-8"))
env_id = f"{st_core.env_id_prefix}/{guest_config}"
cons = consumer.LambdaConsumer(
metadata_provider=DictMetadataProvider(
measurements=raw_baselines["measurements"],
baseline_provider=NetLatencyBaselineProvider(env_id, raw_baselines),
),
func=consume_ping_output,
func_kwargs={"requests": requests},
)
cmd = PING.format(requests, interval, iface.host_ip)
prod = producer.SSHCommand(cmd, vm.ssh)
st_core.add_pipe(producer=prod, consumer=cons, tag=f"{env_id}/ping")
# Gather results and verify pass criteria.
st_core.run_exercise()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,890
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_logging.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests the format of human readable logs.
It checks the response of the API configuration calls and the logs that show
up in the configured logging FIFO.
"""
import re
from pathlib import Path
from time import strptime
import pytest
# Array of supported log levels of the current logging system.
# Do not change order of values inside this array as logic depends on this.
LOG_LEVELS = ["ERROR", "WARN", "INFO", "DEBUG"]
def to_formal_log_level(log_level):
"""Convert a pretty-print log level into the related log level code.
Turns a pretty formatted log level (i.e Warning) into the one actually
being logged (i.e WARN).
:param log_level: pretty formatted log level
:return: actual level being logged
"""
if log_level == "Error":
return LOG_LEVELS[0]
if log_level == "Warning":
return LOG_LEVELS[1]
if log_level == "Info":
return LOG_LEVELS[2]
if log_level == "Debug":
return LOG_LEVELS[3]
return ""
def check_log_message_format(log_str, instance_id, level, show_level, show_origin):
"""Ensure correctness of the logged message.
Parse the string representing the logs and look for the parts
that should be there.
The log line should look lie this:
YYYY-MM-DDTHH:MM:SS.NNNNNNNNN [ID:THREAD:LEVEL:FILE:LINE] MESSAGE
where LEVEL and FILE:LINE are both optional.
e.g. with THREAD NAME as TN
`2018-09-09T12:52:00.123456789 [MYID:TN:WARN:/path/to/file.rs:52] warning`
"""
timestamp, tag_and_msg = log_str.split(" ", maxsplit=1)
timestamp = timestamp[:-10]
strptime(timestamp, "%Y-%m-%dT%H:%M:%S")
pattern = "\\[(" + instance_id + ")"
pattern += ":(.*)"
if show_level:
pattern += ":(" + "|".join(LOG_LEVELS) + ")"
if show_origin:
pattern += ":([^:]+/[^:]+):([0-9]+)"
pattern += "\\].*"
mo = re.match(pattern, tag_and_msg)
assert (
mo is not None
), f"Log message ({tag_and_msg}) does not match pattern ({pattern})."
if show_level:
tag_level = mo.group(3)
tag_level_no = LOG_LEVELS.index(tag_level)
configured_level_no = LOG_LEVELS.index(to_formal_log_level(level))
assert tag_level_no <= configured_level_no
def test_no_origin_logs(test_microvm_with_api):
"""
Check that logs do not contain the origin (i.e file and line number).
"""
_test_log_config(microvm=test_microvm_with_api, show_level=True, show_origin=False)
def test_no_level_logs(test_microvm_with_api):
"""
Check that logs do not contain the level.
"""
_test_log_config(microvm=test_microvm_with_api, show_level=False, show_origin=True)
def test_no_nada_logs(test_microvm_with_api):
"""
Check that logs do not contain either level or origin.
"""
_test_log_config(microvm=test_microvm_with_api, show_level=False, show_origin=False)
def test_info_logs(test_microvm_with_api):
"""
Check output of logs when minimum level to be displayed is info.
"""
_test_log_config(microvm=test_microvm_with_api)
def test_warn_logs(test_microvm_with_api):
"""
Check output of logs when minimum level to be displayed is warning.
"""
_test_log_config(microvm=test_microvm_with_api, log_level="Warning")
def test_error_logs(test_microvm_with_api):
"""
Check output of logs when minimum level of logs displayed is error.
"""
_test_log_config(microvm=test_microvm_with_api, log_level="Error")
def test_log_config_failure(test_microvm_with_api):
"""
Check passing invalid FIFOs is detected and reported as an error.
"""
microvm = test_microvm_with_api
microvm.spawn(log_file=None)
microvm.basic_config()
# only works if log level is Debug
microvm.time_api_requests = False
expected_msg = re.escape("No such file or directory (os error 2)")
with pytest.raises(RuntimeError, match=expected_msg):
microvm.api.logger.put(
log_path="invalid log file",
level="Info",
show_level=True,
show_log_origin=True,
)
def test_api_requests_logs(test_microvm_with_api):
"""
Test that API requests are logged.
"""
microvm = test_microvm_with_api
microvm.spawn(log_file=None)
microvm.basic_config()
# Configure logging.
log_path = Path(microvm.path) / "log"
log_path.touch()
microvm.api.logger.put(
log_path=microvm.create_jailed_resource(log_path),
level="Info",
show_level=True,
show_log_origin=True,
)
microvm.log_file = log_path
# only works if log level is Debug
microvm.time_api_requests = False
# Check that a Patch request on /machine-config is logged.
microvm.api.machine_config.patch(vcpu_count=4)
# We are not interested in the actual body. Just check that the log
# message also has the string "body" in it.
microvm.check_log_message(
"The API server received a Patch request " 'on "/machine-config" with body'
)
# Check that a Put request on /machine-config is logged.
microvm.api.machine_config.put(vcpu_count=4, mem_size_mib=128)
microvm.check_log_message(
"The API server received a Put request " 'on "/machine-config" with body'
)
# Check that a Get request on /machine-config is logged without the
# body.
microvm.api.machine_config.get()
microvm.check_log_message(
"The API server received a Get request " 'on "/machine-config".'
)
# Check that all requests on /mmds are logged without the body.
dummy_json = {"latest": {"meta-data": {"ami-id": "dummy"}}}
microvm.api.mmds.put(json=dummy_json)
microvm.check_log_message('The API server received a Put request on "/mmds".')
microvm.api.mmds.patch(json=dummy_json)
microvm.check_log_message('The API server received a Patch request on "/mmds".')
microvm.api.mmds.get()
microvm.check_log_message('The API server received a Get request on "/mmds".')
# Check that the fault message return by the client is also logged in the
# FIFO.
fault_msg = (
"The kernel file cannot be opened: No such file or directory (os error 2)"
)
with pytest.raises(RuntimeError, match=re.escape(fault_msg)):
microvm.api.boot.put(kernel_image_path="inexistent_path")
microvm.check_log_message(
"Received Error. "
"Status code: 400 Bad Request. "
"Message: {}".format(fault_msg)
)
# pylint: disable=W0102
def _test_log_config(microvm, log_level="Info", show_level=True, show_origin=True):
"""Exercises different scenarios for testing the logging config."""
microvm.spawn(log_file=None)
# only works if log level is Debug
microvm.time_api_requests = False
# Configure logging.
log_path = Path(microvm.path) / "log"
log_path.touch()
microvm.api.logger.put(
log_path=microvm.create_jailed_resource(log_path),
level=log_level,
show_level=show_level,
show_log_origin=show_origin,
)
microvm.log_file = log_path
microvm.basic_config()
microvm.start()
lines = microvm.log_data.splitlines()
for idx, line in enumerate(lines):
if idx == 0:
assert line.startswith("Running Firecracker")
continue
check_log_message_format(line, microvm.id, log_level, show_level, show_origin)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,891
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/performance/test_vsock_throughput.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests the VSOCK throughput of Firecracker uVMs."""
import json
import os
import pytest
from framework.stats import consumer, producer
from framework.stats.baseline import Provider as BaselineProvider
from framework.stats.metadata import DictProvider as DictMetadataProvider
from framework.utils import CpuMap, get_kernel_version
from framework.utils_iperf import IPerf3Test, consume_iperf3_output
from framework.utils_vsock import VSOCK_UDS_PATH, make_host_port_path
from integration_tests.performance.configs import defs
TEST_ID = "vsock_throughput"
kernel_version = get_kernel_version(level=1)
CONFIG_NAME_REL = "test_{}_config_{}.json".format(TEST_ID, kernel_version)
CONFIG_NAME_ABS = defs.CFG_LOCATION / CONFIG_NAME_REL
BASE_PORT = 5201
# How many clients/servers should be spawned per vcpu
LOAD_FACTOR = 1
# Time (in seconds) for which iperf "warms up"
WARMUP_SEC = 3
# Time (in seconds) for which iperf runs after warmup is done
RUNTIME_SEC = 20
# pylint: disable=R0903
class VsockThroughputBaselineProvider(BaselineProvider):
"""Implementation of a baseline provider for the vsock throughput...
...performance test.
"""
def __init__(self, env_id, iperf_id, raw_baselines):
"""Vsock throughput baseline provider initialization."""
super().__init__(raw_baselines)
self._tag = "baselines/{}/" + env_id + "/{}/" + iperf_id
def get(self, metric_name: str, statistic_name: str) -> dict:
"""Return the baseline corresponding to the key."""
key = self._tag.format(metric_name, statistic_name)
baseline = self._baselines.get(key)
if baseline:
target = baseline.get("target")
delta_percentage = baseline.get("delta_percentage")
return {
"target": target,
"delta": delta_percentage * target / 100,
}
return None
class VsockIPerf3Test(IPerf3Test):
"""IPerf3 runner for the vsock throughput performance test"""
def __init__(self, microvm, mode, payload_length):
super().__init__(
microvm,
BASE_PORT,
RUNTIME_SEC,
WARMUP_SEC,
mode,
LOAD_FACTOR * microvm.vcpus_count,
2,
iperf="/usr/local/bin/iperf3-vsock",
payload_length=payload_length,
)
def host_command(self, port_offset):
return (
super()
.host_command(port_offset)
.with_arg("--vsock")
.with_arg("-B", os.path.join(self._microvm.path, VSOCK_UDS_PATH))
)
def spawn_iperf3_client(self, client_idx):
# Bind the UDS in the jailer's root.
self._microvm.create_jailed_resource(
os.path.join(
self._microvm.path,
make_host_port_path(VSOCK_UDS_PATH, self._base_port + client_idx),
)
)
# The rootfs does not have iperf3-vsock
iperf3_guest = "/tmp/iperf3-vsock"
self._microvm.ssh.scp_put(self._iperf, iperf3_guest)
self._guest_iperf = iperf3_guest
return super().spawn_iperf3_client(client_idx)
def guest_command(self, port_offset):
return super().guest_command(port_offset).with_arg("--vsock")
def pipe(basevm, current_avail_cpu, env_id, mode, payload_length):
"""Producer/Consumer pipes generator."""
test = VsockIPerf3Test(basevm, mode, payload_length)
iperf3_id = f"vsock-p{payload_length}-{mode}"
raw_baselines = json.loads(CONFIG_NAME_ABS.read_text("utf-8"))
cons = consumer.LambdaConsumer(
metadata_provider=DictMetadataProvider(
raw_baselines["measurements"],
VsockThroughputBaselineProvider(env_id, iperf3_id, raw_baselines),
),
func=consume_iperf3_output,
)
prod = producer.LambdaProducer(
test.run_test, func_kwargs={"first_free_cpu": current_avail_cpu}
)
return cons, prod, f"{env_id}/{iperf3_id}"
@pytest.mark.nonci
@pytest.mark.timeout(1200)
@pytest.mark.parametrize("vcpus", [1, 2], ids=["1vcpu", "2vcpu"])
@pytest.mark.parametrize("payload_length", ["64K", "1024K"], ids=["p64K", "p1024K"])
@pytest.mark.parametrize("mode", ["g2h", "h2g", "bd"])
def test_vsock_throughput(
microvm_factory,
guest_kernel,
rootfs,
vcpus,
payload_length,
mode,
st_core,
):
"""
Test vsock throughput for multiple vm configurations.
"""
# We run bi-directional tests only on uVM with more than 2 vCPus
# because we need to pin one iperf3/direction per vCPU, and since we
# have two directions, we need at least two vCPUs.
if mode == "bd" and vcpus < 2:
pytest.skip("bidrectional test only done with at least 2 vcpus")
mem_size_mib = 1024
vm = microvm_factory.build(guest_kernel, rootfs, monitor_memory=False)
vm.spawn(log_level="Info")
vm.basic_config(vcpu_count=vcpus, mem_size_mib=mem_size_mib)
vm.add_net_iface()
# Create a vsock device
vm.api.vsock.put(vsock_id="vsock0", guest_cid=3, uds_path="/" + VSOCK_UDS_PATH)
vm.start()
guest_config = f"{vcpus}vcpu_{mem_size_mib}mb.json"
st_core.name = TEST_ID
st_core.custom["guest_config"] = guest_config.removesuffix(".json")
# Check if the needed CPU cores are available. We have the API thread, VMM
# thread and then one thread for each configured vCPU.
assert CpuMap.len() >= 2 + vm.vcpus_count
# Pin uVM threads to physical cores.
current_avail_cpu = 0
assert vm.pin_vmm(current_avail_cpu), "Failed to pin firecracker thread."
current_avail_cpu += 1
assert vm.pin_api(current_avail_cpu), "Failed to pin fc_api thread."
for i in range(vm.vcpus_count):
current_avail_cpu += 1
assert vm.pin_vcpu(i, current_avail_cpu), f"Failed to pin fc_vcpu {i} thread."
cons, prod, tag = pipe(
vm,
current_avail_cpu + 1,
f"{st_core.env_id_prefix}/{guest_config}",
mode,
payload_length,
)
st_core.add_pipe(prod, cons, tag)
# Start running the commands on guest, gather results and verify pass
# criteria.
st_core.run_exercise()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,892
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tools/create_snapshot_artifact/main.py
|
#!/usr/bin/env python3
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Script used to generate snapshots of microVMs."""
import json
import os
import re
import shutil
import sys
import tempfile
from pathlib import Path
# Hack to be able to import testing framework functions.
sys.path.append(os.path.join(os.getcwd(), "tests")) # noqa: E402
# pylint: disable=wrong-import-position
# The test infra assumes it is running from the `tests` directory.
os.chdir("tests")
from framework.artifacts import disks, kernels
from framework.defs import DEFAULT_TEST_SESSION_ROOT_PATH
from framework.microvm import MicroVMFactory
from framework.utils import (
generate_mmds_get_request,
generate_mmds_session_token,
run_cmd,
)
from framework.utils_cpuid import CpuVendor, get_cpu_vendor
# restore directory
os.chdir("..")
# Default IPv4 address to route MMDS requests.
IPV4_ADDRESS = "169.254.169.254"
NET_IFACE_FOR_MMDS = "eth3"
# Path to the VM configuration file.
VM_CONFIG_FILE = "tools/create_snapshot_artifact/complex_vm_config.json"
# Root directory for the snapshot artifacts.
SNAPSHOT_ARTIFACTS_ROOT_DIR = "snapshot_artifacts"
def populate_mmds(microvm, data_store):
"""Populate MMDS contents with json data provided."""
# MMDS should be empty.
response = microvm.api.mmds.get()
assert response.json() == {}
# Populate MMDS with data.
microvm.api.mmds.put(**data_store)
# Ensure data is persistent inside the data store.
response = microvm.api.mmds.get()
assert response.json() == data_store
def validate_mmds(ssh_connection, data_store):
"""Validate that MMDS contents fetched from the guest."""
# Configure interface to route MMDS requests
cmd = "ip route add {} dev {}".format(IPV4_ADDRESS, NET_IFACE_FOR_MMDS)
_, stdout, stderr = ssh_connection.run(cmd)
assert stdout == stderr == ""
# Fetch metadata to ensure MMDS is accessible.
token = generate_mmds_session_token(ssh_connection, IPV4_ADDRESS, token_ttl=60)
cmd = generate_mmds_get_request(IPV4_ADDRESS, token=token)
_, stdout, _ = ssh_connection.run(cmd)
assert json.loads(stdout) == data_store
def main():
"""
Run the main logic.
Create snapshot artifacts from complex microVMs with all Firecracker's
functionality enabled. The kernels are parametrized to include all guest
supported versions.
Artifacts are saved in the following format:
snapshot_artifacts
|
-> <guest_kernel_supported_0>_<cpu_template>_guest_snapshot
|
-> vm.mem
-> vm.vmstate
-> ubuntu-18.04.id_rsa
-> ubuntu-18.04.ext4
-> <guest_kernel_supported_1>_<cpu_template>_guest_snapshot
|
...
"""
# Create directory dedicated to store snapshot artifacts for
# each guest kernel version.
print("Cleanup")
shutil.rmtree(SNAPSHOT_ARTIFACTS_ROOT_DIR, ignore_errors=True)
root_path = tempfile.mkdtemp(dir=DEFAULT_TEST_SESSION_ROOT_PATH)
vm_factory = MicroVMFactory(root_path, None)
cpu_templates = ["None"]
if get_cpu_vendor() == CpuVendor.INTEL:
cpu_templates.extend(["C3", "T2", "T2S"])
for cpu_template in cpu_templates:
for kernel in kernels(glob="vmlinux-*"):
for rootfs in disks(glob="ubuntu-*.squashfs"):
print(kernel, rootfs, cpu_template)
vm = vm_factory.build()
create_snapshots(vm, rootfs, kernel, cpu_template)
def create_snapshots(vm, rootfs, kernel, cpu_template):
"""Snapshot microVM built from vm configuration file."""
# Get ssh key from read-only artifact.
vm.ssh_key = rootfs.with_suffix(".id_rsa")
vm.rootfs_file = rootfs
vm.kernel_file = kernel
# adapt the JSON file
vm_config_file = Path(VM_CONFIG_FILE)
obj = json.load(vm_config_file.open(encoding="UTF-8"))
obj["boot-source"]["kernel_image_path"] = kernel.name
obj["drives"][0]["path_on_host"] = rootfs.name
obj["drives"][0]["is_read_only"] = True
obj["machine-config"]["cpu_template"] = cpu_template
vm.create_jailed_resource(vm_config_file)
vm_config = Path(vm.chroot()) / vm_config_file.name
vm_config.write_text(json.dumps(obj))
vm.jailer.extra_args = {"config-file": vm_config_file.name}
# since we are using a JSON file, we need to do this manually
vm.create_jailed_resource(rootfs)
vm.create_jailed_resource(kernel)
# Create network namespace.
run_cmd(f"ip netns add {vm.jailer.netns}")
for i in range(4):
vm.add_net_iface(api=False)
vm.spawn(log_level="Info")
# Ensure the microVM has started.
assert vm.state == "Running"
# Populate MMDS.
data_store = {
"latest": {
"meta-data": {
"ami-id": "ami-12345678",
"reservation-id": "r-fea54097",
"local-hostname": "ip-10-251-50-12.ec2.internal",
"public-hostname": "ec2-203-0-113-25.compute-1.amazonaws.com",
}
}
}
populate_mmds(vm, data_store)
# Iterate and validate connectivity on all ifaces after boot.
for i in range(4):
exit_code, _, _ = vm.ssh_iface(i).run("sync")
assert exit_code == 0
# Validate MMDS.
validate_mmds(vm.ssh, data_store)
# Snapshot the microVM.
snapshot = vm.snapshot_diff()
# Create snapshot artifacts directory specific for the kernel version used.
guest_kernel_version = re.search("vmlinux-(.*)", kernel.name)
snapshot_artifacts_dir = (
Path(SNAPSHOT_ARTIFACTS_ROOT_DIR)
/ f"{guest_kernel_version.group(1)}_{cpu_template}_guest_snapshot"
)
snapshot_artifacts_dir.mkdir(parents=True)
snapshot.save_to(snapshot_artifacts_dir)
print(f"Copied snapshot to: {snapshot_artifacts_dir}.")
vm.kill()
if __name__ == "__main__":
main()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,893
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/performance/test_boottime.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests that ensure the boot time to init process is within spec."""
# pylint:disable=redefined-outer-name
import re
import time
import pytest
from framework.properties import global_props
# The maximum acceptable boot time in us.
MAX_BOOT_TIME_US = 150000
# Regex for obtaining boot time from some string.
TIMESTAMP_LOG_REGEX = r"Guest-boot-time\s+\=\s+(\d+)\s+us"
DEFAULT_BOOT_ARGS = (
"reboot=k panic=1 pci=off nomodules 8250.nr_uarts=0"
" i8042.noaux i8042.nomux i8042.nopnp i8042.dumbkbd"
)
DIMENSIONS = {
"instance": global_props.instance,
"cpu_model": global_props.cpu_model,
"host_kernel": "linux-" + global_props.host_linux_version,
}
@pytest.fixture
def fast_microvm(microvm_factory, guest_kernel_linux_4_14, rootfs_rw):
"""The microvm defined for the boottime SLA
Guest kernel 4.14
Rootfs: Ubuntu 22.04 ext4
Using ext4 seems to result in a faster boot than with squashfs. Probably
because we have to spend CPU time decompressing and extracting into memory.
"""
return microvm_factory.build(kernel=guest_kernel_linux_4_14, rootfs=rootfs_rw)
def test_no_boottime(test_microvm_with_api):
"""
Check that boot timer device is not present by default.
"""
vm = test_microvm_with_api
_configure_and_run_vm(vm)
# microvm.start() ensures that the vm is in Running mode,
# so there is no need to sleep and wait for log message.
timestamps = re.findall(TIMESTAMP_LOG_REGEX, test_microvm_with_api.log_data)
assert not timestamps
# temporarily disable this test in 6.1
@pytest.mark.xfail(
global_props.host_linux_version == "6.1",
reason="perf regression under investigation",
)
@pytest.mark.skipif(
global_props.cpu_codename == "INTEL_SKYLAKE"
and global_props.host_linux_version == "5.10",
reason="perf regression under investigation",
)
def test_boottime_no_network(fast_microvm, record_property, metrics):
"""
Check boot time of microVM without a network device.
"""
vm = fast_microvm
vm.jailer.extra_args.update({"boot-timer": None})
_configure_and_run_vm(vm)
boottime_us = _get_microvm_boottime(vm)
print(f"Boot time with no network is: {boottime_us} us")
record_property("boottime_no_network", f"{boottime_us} us < {MAX_BOOT_TIME_US} us")
metrics.set_dimensions(DIMENSIONS)
metrics.put_metric("boot_time", boottime_us, unit="Microseconds")
assert (
boottime_us < MAX_BOOT_TIME_US
), f"boot time {boottime_us} cannot be greater than: {MAX_BOOT_TIME_US} us"
# temporarily disable this test in 6.1
@pytest.mark.xfail(
global_props.host_linux_version == "6.1",
reason="perf regression under investigation",
)
@pytest.mark.skipif(
global_props.cpu_codename == "INTEL_SKYLAKE"
and global_props.host_linux_version == "5.10",
reason="perf regression under investigation",
)
def test_boottime_with_network(fast_microvm, record_property, metrics):
"""Check boot time of microVM with a network device."""
vm = fast_microvm
vm.jailer.extra_args.update({"boot-timer": None})
_configure_and_run_vm(vm, network=True)
boottime_us = _get_microvm_boottime(vm)
print(f"Boot time with network configured is: {boottime_us} us")
record_property(
"boottime_with_network", f"{boottime_us} us < {MAX_BOOT_TIME_US} us"
)
metrics.set_dimensions(DIMENSIONS)
metrics.put_metric("boot_time_with_net", boottime_us, unit="Microseconds")
assert (
boottime_us < MAX_BOOT_TIME_US
), f"boot time {boottime_us} cannot be greater than: {MAX_BOOT_TIME_US} us"
def test_initrd_boottime(uvm_with_initrd, record_property, metrics):
"""
Check boot time of microVM when using an initrd.
"""
vm = uvm_with_initrd
vm.jailer.extra_args.update({"boot-timer": None})
_configure_and_run_vm(vm, initrd=True)
boottime_us = _get_microvm_boottime(vm)
print(f"Boot time with initrd is: {boottime_us} us")
record_property("boottime_initrd", f"{boottime_us} us")
metrics.set_dimensions(DIMENSIONS)
metrics.put_metric("boot_time_with_initrd", boottime_us, unit="Microseconds")
def _get_microvm_boottime(vm):
"""Auxiliary function for asserting the expected boot time."""
boot_time_us = 0
timestamps = []
for _ in range(10):
timestamps = re.findall(TIMESTAMP_LOG_REGEX, vm.log_data)
if timestamps:
break
time.sleep(0.1)
if timestamps:
boot_time_us = int(timestamps[0])
assert boot_time_us > 0
return boot_time_us
def _configure_and_run_vm(microvm, network=False, initrd=False):
"""Auxiliary function for preparing microvm before measuring boottime."""
microvm.spawn()
# Machine configuration specified in the SLA.
config = {
"vcpu_count": 1,
"mem_size_mib": 128,
"boot_args": DEFAULT_BOOT_ARGS + " init=/usr/local/bin/init",
}
if initrd:
config["add_root_device"] = False
config["use_initrd"] = True
microvm.basic_config(**config)
if network:
microvm.add_net_iface()
microvm.start()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,894
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/style/test_licenses.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests checking against the existence of licenses in each file."""
import datetime
from framework import utils
AMAZON_COPYRIGHT_YEARS = range(2018, datetime.datetime.now().year + 1)
AMAZON_COPYRIGHT = (
"Copyright {} Amazon.com, Inc. or its affiliates. All Rights Reserved."
)
AMAZON_LICENSE = "SPDX-License-Identifier: Apache-2.0"
CHROMIUM_COPYRIGHT = "Copyright 2017 The Chromium OS Authors. All rights reserved."
CHROMIUM_LICENSE = (
"Use of this source code is governed by a BSD-style license that can be"
)
TUNTAP_COPYRIGHT = (
"Copyright TUNTAP, 2017 The Chromium OS Authors. All rights reserved."
)
TUNTAP_LICENSE = (
"Use of this source code is governed by a BSD-style license that can be"
)
ALIBABA_COPYRIGHT = "Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved."
ALIBABA_LICENSE = "SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause"
EXCLUDE = ["build", ".kernel", ".git"]
def _has_amazon_copyright(string):
for year in AMAZON_COPYRIGHT_YEARS:
if AMAZON_COPYRIGHT.format(year) in string:
return True
return False
def _look_for_license(file, license_msg):
line = file.readline()
while line.startswith("//") or line.startswith("#"):
if license_msg in line:
return True
line = file.readline()
return False
def _validate_license(filename):
"""
Validate license all .rs/.py. or .sh file.
Python and Rust files should have the licenses on the first 2 lines
Shell files license is located on lines 3-4 to account for shebang
"""
with open(filename, "r", encoding="utf-8") as file:
# Find the copyright line
while True:
line = file.readline()
if line.startswith(("// Copyright", "# Copyright")):
copyright_info = line
break
if line == "":
return False
has_amazon_copyright = _has_amazon_copyright(
copyright_info
) and _look_for_license(file, AMAZON_LICENSE)
has_chromium_copyright = (
CHROMIUM_COPYRIGHT in copyright_info
and _look_for_license(file, CHROMIUM_LICENSE)
)
has_tuntap_copyright = TUNTAP_COPYRIGHT in copyright_info and _look_for_license(
file, CHROMIUM_LICENSE
)
has_alibaba_copyright = (
ALIBABA_COPYRIGHT in copyright_info
and _look_for_license(file, ALIBABA_LICENSE)
)
return (
has_amazon_copyright
or has_chromium_copyright
or has_tuntap_copyright
or has_alibaba_copyright
)
return True
def test_for_valid_licenses():
"""
Test that all *.py, *.rs and *.sh files contain a valid license.
"""
python_files = utils.get_files_from(
find_path="..", pattern="*.py", exclude_names=EXCLUDE
)
rust_files = utils.get_files_from(
find_path="..", pattern="*.rs", exclude_names=EXCLUDE
)
bash_files = utils.get_files_from(
find_path="..", pattern="*.sh", exclude_names=EXCLUDE
)
all_files = rust_files + python_files + bash_files
error_msg = []
for file in all_files:
if _validate_license(file) is False:
error_msg.append(file)
assert not error_msg, f"Files {error_msg} have invalid licenses"
if __name__ == "__main__":
test_for_valid_licenses()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,895
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/stats/__init__.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Single threaded producer/consumer for statistics gathering.
The purpose of this module is to provide primitives for statistics exercises
which need a common framework that sets expectations in terms of tests
design and results.
The main components of the module consist of: `Core`, `Producer` and
`Consumer`, `ComparisonCriteria`, metadata providers and baselines providers.
The `Core` is the component which drives the interaction between `Producer`
and `Consumer`. The `Producer` goal is to pass raw data to the `Consumer`,
while the `Consumer` is responsible for raw data processing and transformation.
Metadata and baselines providers are independently used by the `Consumer` to
get measurements and statistics definitions relevant in the processing and
transformation step. In the end, the processing and transformation step
makes use of comparison criteria, present in statistics definitions,
which will assert expectations in terms of exercise end result.
# Test design
Lets create a test using the above components. The test will answer to two
questions:
1. What is the sum of 10 randomly generated integers, between 0 and 100,
fetched with `randint` module?
2. What is the 10th randomly generated integer, between 0 and 100, fetched with
`randint` module?
We can define two exercises from the above questions, so lets call them
`10RandomIntsSumExercise` and `10thRandomIntExercise`. The test logic starts
with defining raw data producers for both exercises. The producer definition
depends on the chosen implementation. We're going to use the `LambdaProducer`.
This producer needs a function which produces the raw data.
```
from random import randint
from framework.stats.producer import LambdaProducer
st_prod_func = lambda llimit, ulimit: randint(llimit, ulimit)
st_prod = LambdaProducer(
func=st_prod_func,
func_kwargs={"llimit": 0, "ulimit": 99}
)
```
Next up, we need to define consumers for the `st_prod`. For the
`10RandomIntsSumExercise`, the consumer must process 10 random integers and
sum them up, while for the `10thRandomIntExercise`, the consumer must process
the 10th random generated integer and return it. `Consumer`s definitions
depend largely on the chosen consumer implementations. We're going to use the
`LambdaConsumer`. To define a `LambdaConsumer` we need the following resources:
1. Measurements definitions: provided through metadata and baselines providers
or through the `Consumer`s `set_measurement_def` interface. They can be
hardcoded in the test logic or programmatically generated. We're going to
use here the programmatic alternative, where measurements definitions
will be found in a global config dictionary, processed through programmatic
means.
2. A function that processes and transforms the data coming from the
`st_prod`.
Let's lay down our measurements definitions first inside the test global
configuration dictionary. The dictionary consists from measurements
definitions and from baselines, which are going to be used for setting up
pass criteria for measurements statistics.
```
CONFIG = {
"measurements": {
# This is a map where keys represent the exercise id while the
# values represent a map from measurements name to measurements
# definition. The values follow the expected `DictProvider` schema.
"10RandomIntsSumExercise": {
"ints": { # Measurement name.
"unit": "none", # We do not have a specific measurement unit.
"statistics": [
{
# By default, the statistic definition name is the
# function name.
"function": "Sum",
"criteria": "LowerThan"
}
]
}
},
"10thRandomIntExercise": {
"int": {
"unit": "none", # We do not have a specific measurement unit.
"statistics": [
{
# The function below simply acts like a no-op on top of
# the result provided by the `Producer`. It is mainly
# useful when consuming statistics results (which do
# not need further processing).
"function": "ValuePlaceholder",
"criteria": "GreaterThan",
}
]
}
}
},
"baselines": {
"10RandomIntsSumExercise": {
"ints": {
# Info about the environment that generated the data.
"randint": {
"Sum": {
"target": 600,
}
}
}
},
"10thRandomIntExercise": {
"int": {
"randint": {
"value": {
"target": 50,
}
}
}
}
}
}
```
We'll continue by implementing the metadata and baseline providers. The
measurements definitions from the global configuration dictionary
will be processed by the `DictProvider` metadata provider. The measurements
definitions schema can be found in the `DictProvider` documentation.
```
from framework.stats.metadata import DictProvider as DictMetadataProvider
from framework.stats.baseline import Provider as BaselineProvider
from framework.utils import DictQuery
# The baseline provider is a requirement for the `DictProvider`.
class RandintBaselineProvider(BaselineProvider):
def __init__(self, exercise_id, env_id):
super().__init__(DictQuery({}))
if "baselines" in CONFIG:
super().__init__(DictQuery(CONFIG["baselines"][exercise_id]))
self._tag = "{}/" + env_id + "/{}"
def get(self, ms_name: str, st_name: str) -> dict:
key = self._tag.format(ms_name, st_name)
baseline = self._baselines.get(key)
if baseline:
target = baseline.get("target")
return {
"target": target,
}
return None
baseline_provider_sum = RandintBaselineProvider(
"10RandomIntsSumExercise",
"randint")
baseline_provider_10th = RandintBaselineProvider(
"10thRandomIntExercise",
"randint")
metadata_provider_sum = DictMetadataProvider(
CONFIG["measurements"]["10RandomIntsSumExercise"],
baseline_provider_sum)
metadata_provider_10th = DictMetadataProvider(
CONFIG["measurements"]["10thRandomIntExercise"],
baseline_provider_10th)
```
The alternative here would be to manually define our measurements and pass
them to the `LambdaConsumer` at a later step. Depending on the magnitude of
the exercise, this alternative might be preffered over the other. Here's how
it can be done:
```
from framework.utils import DictQuery
from framework.stats.function import FunctionFactory
from framework.stats.criteria import CriteriaFactory
from framework.stats.types import MeasurementDef, StatisticDef
def baseline(ms_name: str, st_name: str, exercise_id: str):
baselines = DictQuery(CONFIG["baselines"][exercise_id])
target = baselines.get(f"{ms_name}/randint/{st_name}/target")
return {
"target": target
}
def measurements(exercise_id: str):
ms_list = []
for ms_name in CONFIG["measurements"][exercise_id]:
st_list = []
unit = CONFIG["measurements"][exercise_id][ms_name]["unit"]
st_defs = CONFIG["measurements"][exercise_id][ms_name]["statistics"]
for st_def in st_defs:
func_cls_id = st_def["function"]
func_cls = FunctionFactory.get(func_cls_id)
criteria_cls_id = st_def["criteria"]
criteria_cls = CriteriaFactory.get(criteria_cls_id)
bs = baseline(ms_name, func_cls.__name__, exercise_id)
st_list.append(StatisticDef(func_cls(), criteria_cls(bs)))
ms_list.append(MeasurementDef(ms_name, unit, st_list))
return ms_list
```
Next, having our measurements definitions layed out, we can start defining
`LambdaConsumer`s functions. The functions are strictly related to
`LambdaProducer` function, so in our case we need to process an integer
coming from the producer.
```
# The following function is consuming data points, pertaining to measurements
# defined above.
st_cons_sum_func = lambda cons, res: cons.consume_data("ints", res)
# Here we consume a statistic value directly. Statistics can be both consumed
# or computed based on their measurement data points, consumed via the
# `Consumer`s `consume_data` interface.
st_cons_10th_func = lambda cons, res: cons.consume_stat("value", "int", res)
```
We can define now our `LambdaConsumer`s for both exercices:
1. Through the metadata and baseline providers.
```
from framework.stats.consumer import LambdaConsumer
st_cons_sum = LambdaConsumer(
st_cons_sum_func,
metadata_provider=metadata_provider_sum)
st_cons_10th = LambdaConsumer(
st_cons_10th_func,
metadata_provider=metadata_provider_10th)
```
2. By setting the measurements definitions separately:
```
from framework.stats.consumer import LambdaConsumer
from framework.utils import eager_map
st_cons_sum = LambdaConsumer(st_cons_sum_func)
id_sum = "10RandomIntsSumExercise"
id_10th = "10thRandomIntExercise"
eager_map(st_cons_sum.set_measurement_def, measurements(id_sum))
st_cons_10th = LambdaConsumer(st_cons_10th_func)
eager_map(st_cons_10th.set_measurement_def, measurements(id_10th))
```
Once we have defined our producers and consumers, we will continue by
defining the statistics `Core`.
```
from framework.stats.core import Core
# Both exercises require the core to drive both producers and consumers for
# 10 iterations to achieve the wanted result.
st_core = Core(name="randint_observation", iterations=10)
st_core.add_pipe(st_prod, st_cons_sum, tag="10RandomIntsSumExercise")
st_core.add_pipe(st_prod, st_cons_10th, tag="10thRandomIntExercise")
```
Let's start the exercise without verifying the criteria:
```
# Start the exercise without checking the criteria.
st_core.run_exercise(check_criteria=False)
```
Output:
```
{
'name': 'randint_observation',
'iterations': 10,
'results': {
'10RandomIntsSumExercise': {
'ints': {
'_unit': 'none',
'Sum': 454
}
},
'10thRandomIntExercise': {
'10thRandomIntExercise': {
'_unit': 'none',
'value': 12
}
}
},
'custom': {}
}
```
Now, verifying the criteria:
```
# Start the exercise without checking the criteria.
st_core.run_exercise()
```
Output for failure on `10RandomIntsSumExercise`:
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/iul/iul_fc/tests/framework/statistics/core.py", line 63,
in run_exercise
assert False, f"Failed on '{tag}': {err.msg}"
AssertionError: Failed on '10RandomIntsSumExercise': 'ints/Sum':
LowerThan failed. Target: '600 vs Actual: '892'.
```
Output for failure on `10thRandomIntExercise`:
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/iul/iul_fc/tests/framework/statistics/core.py", line 63,
in run_exercise
assert False, f"Failed on '{tag}': {err.msg}"
AssertionError: Failed on '10thRandomIntExercise': 'int/value': GreaterThan
failed. Target: '50 vs Actual: '42'.
```
# Custom producer information
Important mentions which were not caught in the test design above is the
`consume_custom` interface offered by the `Consumer`. Sometimes we
need to store per iteration custom information, which might be relevant for
analyzing the `Producer` raw data (e.g while debugging). In the above case we
might want to produce as well information specific to the PRNG state. Let's
modify the producer to do this as well:
```
import random
from framework.stats.producer import LambdaProducer
def st_prod_func(llimit, ulimit):
return {
"randint": random.randint(llimit, ulimit),
"state": random.getstate()
}
st_prod = LambdaProducer(
func=st_prod_func,
func_kwargs={"llimit": 0, "ulimit": 99}
)
```
Next, let's redefine the consumer to consume the state as custom data. We
start again with the `LambdaConsumer` function:
```
def st_cons_sum_func(cons, res):
cons.consume_data("ints", res["randint"])
cons.consume_custom("PNGR_state", hash(res["state"]))
def st_cons_10th_func(cons, res):
cons.consume_stat("value", "int", res["randint"])
cons.consume_custom("PNGR_state", hash(res["state"]))
```
Next, let's define our consumers, based on metadata providers:
```
from framework.stats.consumer import LambdaConsumer
st_cons_sum = LambdaConsumer(
st_cons_sum_func,
metadata_provider=metadata_provider_sum)
st_cons_10th = LambdaConsumer(
st_cons_10th_func,
metadata_provider=metadata_provider_10th)
```
In the end, we redefine the statistics core:
```
from framework.stats.core import Core
# Both exercises require the core to drive both producers and consumers for
# 10 iterations to achieve the wanted result.
st_core = Core(name="randint_observation", iterations=10)
st_core.add_pipe(st_prod, st_cons_sum, tag="10RandomIntsSumExercise")
st_core.add_pipe(st_prod, st_cons_10th, tag="10thRandomIntExercise")
```
And run again the exercise:
```
# Start the exercise without checking the criteria.
st_core.run_exercise(check_criteria=False)
```
Output:
```
{'name': 'randint_observation', 'iterations': 10, 'results': {
'10RandomIntsSumExercise': {'ints': {'_unit': 'none', 'Sum': 502}},
'10thRandomIntExercise': {'int': {'_unit': 'none', 'value': 93}}},
'custom': {
'10RandomIntsSumExercise': {0: {'PNGR_state': [-7761051367110439654]},
1: {'PNGR_state': [4797715617643311001]},
2: {'PNGR_state': [-3343211298676199688]},
3: {'PNGR_state': [-1351346424793161009]},
4: {'PNGR_state': [-1505689957772366290]},
5: {'PNGR_state': [3810535014128659389]},
6: {'PNGR_state': [8691056006996621084]},
7: {'PNGR_state': [-8394051250601789870]},
8: {'PNGR_state': [-3480127558785488400]},
9: {'PNGR_state': [-1363822145985393657]}},
'10thRandomIntExercise': {0: {'PNGR_state': [1074948021089717094]},
1: {'PNGR_state': [-3949202314244540587]},
2: {'PNGR_state': [9001501428032987604]},
3: {'PNGR_state': [480646194341861131]},
4: {'PNGR_state': [8214022971886477930]},
5: {'PNGR_state': [-5298632435091237207]},
6: {'PNGR_state': [-3177751479450511864]},
7: {'PNGR_state': [8940293789185365310]},
8: {'PNGR_state': [1072449063189689805]},
9: {'PNGR_state': [-6391784864046788756]}}}}
```
"""
from . import baseline, consumer, core, criteria, function, metadata, producer, types
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,896
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_snapshot_version.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Basic tests scenarios for snapshot save/restore."""
import json
import platform
import pytest
from framework.utils import get_firecracker_version_from_toml, run_cmd
from host_tools.cargo_build import get_firecracker_binaries
# Firecracker v0.23 used 16 IRQ lines. For virtio devices,
# IRQs are available from 5 to 23, so the maximum number
# of devices allowed at the same time was 11.
FC_V0_23_MAX_DEVICES_ATTACHED = 11
def _create_and_start_microvm_with_net_devices(test_microvm, devices_no=0):
test_microvm.spawn()
# Set up a basic microVM: configure the boot source and
# add a root device.
test_microvm.basic_config(track_dirty_pages=True)
# Add network devices on top of the already configured rootfs for a
# total of (`devices_no` + 1) devices.
for _ in range(devices_no):
# Create tap before configuring interface.
test_microvm.add_net_iface()
test_microvm.start()
if devices_no > 0:
# Verify if guest can run commands.
exit_code, _, _ = test_microvm.ssh.run("sync")
assert exit_code == 0
@pytest.mark.skipif(
platform.machine() != "x86_64", reason="Exercises specific x86_64 functionality."
)
def test_create_with_too_many_devices(test_microvm_with_api):
"""
Create snapshot with unexpected device count for previous versions.
"""
test_microvm = test_microvm_with_api
# Create and start a microVM with `FC_V0_23_MAX_DEVICES_ATTACHED`
# network devices.
devices_no = FC_V0_23_MAX_DEVICES_ATTACHED
_create_and_start_microvm_with_net_devices(test_microvm, devices_no)
# Pause microVM for snapshot.
test_microvm.pause()
# Attempt to create a snapshot with version: `0.23.0`. Firecracker
# v0.23 allowed a maximum of `FC_V0_23_MAX_DEVICES_ATTACHED` virtio
# devices at a time. This microVM has `FC_V0_23_MAX_DEVICES_ATTACHED`
# network devices on top of the rootfs, so the limit is exceeded.
with pytest.raises(RuntimeError, match="Too many devices attached"):
test_microvm.api.snapshot_create.put(
mem_file_path="/vm.mem",
snapshot_path="/vm.vmstate",
snapshot_type="Diff",
version="0.23.0",
)
def test_create_invalid_version(uvm_nano):
"""
Test scenario: create snapshot targeting invalid version.
"""
# Use a predefined vm instance.
test_microvm = uvm_nano
test_microvm.start()
# Target an invalid Firecracker version string.
with pytest.raises(RuntimeError, match="unexpected character 'i'"):
test_microvm.api.snapshot_create.put(
mem_file_path="/vm.mem",
snapshot_path="/vm.vmstate",
snapshot_type="Full",
version="invalid",
)
# Target a valid version string but with no snapshot support.
with pytest.raises(
RuntimeError, match="Cannot translate microVM version to snapshot data version"
):
test_microvm.api.snapshot_create.put(
mem_file_path="/vm.mem",
snapshot_path="/vm.vmstate",
snapshot_type="Full",
version="0.22.0",
)
def test_snapshot_current_version(uvm_nano):
"""Tests taking a snapshot at the version specified in Cargo.toml
Check that it is possible to take a snapshot at the version of the upcoming
release (during the release process this ensures that if we release version
x.y, then taking a snapshot at version x.y works - something we'd otherwise
only be able to test once the x.y binary has been uploaded to S3, at which
point it is too late, see also the 1.3 release).
"""
vm = uvm_nano
vm.start()
version = get_firecracker_version_from_toml()
# normalize to a snapshot version
target_version = f"{version.major}.{version.minor}.0"
snapshot = vm.snapshot_full(target_version=target_version)
# Fetch Firecracker binary for the latest version
fc_binary, _ = get_firecracker_binaries()
# Verify the output of `--describe-snapshot` command line parameter
cmd = [str(fc_binary)] + ["--describe-snapshot", str(snapshot.vmstate)]
code, stdout, stderr = run_cmd(cmd)
assert code == 0, stderr
assert stderr == ""
assert target_version in stdout
def test_create_with_newer_virtio_features(uvm_nano):
"""
Attempt to create a snapshot with newer virtio features.
"""
test_microvm = uvm_nano
test_microvm.add_net_iface()
test_microvm.start()
# Init a ssh connection in order to wait for the VM to boot. This way
# we can be sure that the block device was activated.
test_microvm.ssh.run("true")
# Pause microVM for snapshot.
test_microvm.pause()
# We try to create a snapshot to a target version < 1.0.0.
# This should fail because Fc versions < 1.0.0 don't support
# virtio notification suppression.
target_fc_versions = ["0.24.0", "0.25.0"]
if platform.machine() == "x86_64":
target_fc_versions.insert(0, "0.23.0")
expected_msg = (
"The virtio devices use a features that is incompatible "
"with older versions of Firecracker: notification suppression"
)
for target_fc_version in target_fc_versions:
with pytest.raises(RuntimeError, match=expected_msg):
test_microvm.api.snapshot_create.put(
mem_file_path="/vm.mem",
snapshot_path="/vm.vmstate",
version=target_fc_version,
)
# We try to create a snapshot for target version 1.0.0. This should
# fail because in 1.0.0 we do not support notification suppression for Net.
with pytest.raises(RuntimeError, match=expected_msg):
test_microvm.api.snapshot_create.put(
mem_file_path="/vm.mem",
snapshot_path="/vm.vmstate",
version="1.0.0",
)
# It should work when we target a version >= 1.1.0
test_microvm.api.snapshot_create.put(
mem_file_path="/vm.mem",
snapshot_path="/vm.vmstate",
version="1.1.0",
)
def test_create_with_1_5_cpu_template(uvm_plain):
"""
Verifies that we can't create a snapshot with target version
less than 1.5 if cpu template with additional vcpu features or
kvm capabilities is in use.
"""
# We remove KVM_CAP_IOEVENTFD from kvm checks just for testing purpose.
custom_cpu_template = json.loads('{"kvm_capabilities": ["!36"]}')
test_microvm = uvm_plain
test_microvm.spawn()
test_microvm.basic_config(vcpu_count=2, mem_size_mib=256)
test_microvm.api.cpu_config.put(**custom_cpu_template)
test_microvm.start()
# Should fail because target version is less than 1.5
with pytest.raises(
RuntimeError, match="Cannot translate microVM version to snapshot data version"
):
test_microvm.snapshot_full(target_version="1.4.0")
# Should pass because target version is >=1.5
test_microvm.snapshot_full()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,897
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/stats/criteria.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Module for comparison criteria."""
from abc import ABC, abstractmethod
from pydoc import locate
class CriteriaException(Exception):
"""Exception returned by failure of check criteria."""
# pylint: disable=R0903
class CriteriaFactory:
"""Comparison criteria factory class."""
@classmethod
def get(cls, criteria_cls_name) -> "ComparisonCriteria":
"""`criteria_cls_name` must be a valid criteria class name."""
return locate(f"framework.stats.criteria.{criteria_cls_name}")
# pylint: disable=R0903
class ComparisonCriteria(ABC):
"""Comparison criteria between results and targets."""
def __init__(self, name: str, baseline: dict):
"""Initialize the comparison criteria.
Baseline expected schema:
```
{
"type": "object",
"properties": {
"target": number,
},
"required": ["target"]
}
```
"""
self._baseline = baseline
self._name = name
@abstractmethod
def check(self, actual):
"""Compare the target and the actual."""
@property
def name(self):
"""Return criteria name."""
return self._name
@property
def target(self):
"""Return criteria target."""
if self._baseline is None:
raise CriteriaException("Baseline data not defined.")
target = self._baseline.get("target")
if target is None:
raise CriteriaException("Baseline target not defined.")
return target
@property
def baseline(self) -> dict:
"""Return the baseline."""
return self._baseline
def fail_msg(self, actual):
"""Return the default fail message."""
return self.name + f" failed. Target: '{self.target} vs Actual: " f"'{actual}'."
# pylint: disable=R0903
class GreaterThan(ComparisonCriteria):
"""Greater than comparison criteria."""
def __init__(self, baseline: dict):
"""Initialize the criteria."""
super().__init__("GreaterThan", baseline)
def check(self, actual):
"""Compare the target and the actual."""
if actual < self.target:
raise CriteriaException(self.fail_msg(actual))
# pylint: disable=R0903
class LowerThan(ComparisonCriteria):
"""Lower than comparison criteria."""
def __init__(self, baseline: dict):
"""Initialize the criteria."""
super().__init__("LowerThan", baseline)
def check(self, actual):
"""Compare the target and the actual."""
if actual > self.target:
raise CriteriaException(self.fail_msg(actual))
# pylint: disable=R0903
class EqualWith(ComparisonCriteria):
"""Equal with comparison criteria.
Baseline expected schema:
```
{
"type": "object",
"properties": {
"target": number,
"delta": number
},
"required": ["target", "delta"]
}
```
"""
def __init__(self, baseline: dict):
"""Initialize the criteria."""
super().__init__("EqualWith", baseline)
@property
def delta(self):
"""Return the `delta` field of the baseline."""
if self._baseline is None:
raise CriteriaException("Baseline data not defined.")
delta = self._baseline.get("delta")
if delta is None:
raise CriteriaException("Baseline delta not defined.")
return delta
def fail_msg(self, actual):
"""Return the `EqualWith` failure message."""
return (
self.name + f" failed. Target: '{self.target} +- "
f"{self.delta}' vs Actual: '{actual}'."
)
def check(self, actual):
"""Compare the target and the actual."""
if abs(self.target - actual) > self.delta:
raise CriteriaException(self.fail_msg(actual))
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,898
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/.buildkite/common.py
|
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Common helpers to create Buildkite pipelines
"""
import argparse
import json
import subprocess
from pathlib import Path
DEFAULT_INSTANCES = [
"m5d.metal",
"m6i.metal",
"m6a.metal",
"m6g.metal",
"c7g.metal",
]
DEFAULT_PLATFORMS = [
("al2", "linux_4.14"),
("al2", "linux_5.10"),
("al2023", "linux_6.1"),
]
def overlay_dict(base: dict, update: dict):
"""Overlay a dict over a base one"""
base = base.copy()
for key, val in update.items():
if key in base and isinstance(val, dict):
base[key] = overlay_dict(base.get(key, {}), val)
else:
base[key] = val
return base
def field_fmt(field, args):
"""If `field` is a string, interpolate variables in `args`"""
if not isinstance(field, str):
return field
return field.format(**args)
def dict_fmt(dict_tmpl, args):
"""Apply field_fmt over a hole dict"""
res = {}
for key, val in dict_tmpl.items():
if isinstance(val, dict):
res[key] = dict_fmt(val, args)
else:
res[key] = field_fmt(val, args)
return res
def group(label, command, instances, platforms, **kwargs):
"""
Generate a group step with specified parameters, for each instance+kernel
combination
https://buildkite.com/docs/pipelines/group-step
"""
# Use the 1st character of the group name (should be an emoji)
label1 = label[0]
steps = []
commands = command
if isinstance(command, str):
commands = [command]
for instance in instances:
for os, kv in platforms:
# fill any templated variables
args = {"instance": instance, "os": os, "kv": kv}
step = {
"command": [cmd.format(**args) for cmd in commands],
"label": f"{label1} {instance} {os} {kv}",
"agents": args,
}
step_kwargs = dict_fmt(kwargs, args)
step = overlay_dict(step_kwargs, step)
steps.append(step)
return {"group": label, "steps": steps}
def pipeline_to_json(pipeline):
"""Serialize a pipeline dictionary to JSON"""
return json.dumps(pipeline, indent=4, sort_keys=True, ensure_ascii=False)
def get_changed_files(branch):
"""
Get all files changed since `branch`
"""
stdout = subprocess.check_output(["git", "diff", "--name-only", branch])
return [Path(line) for line in stdout.decode().splitlines()]
def run_all_tests(changed_files):
"""
Check if we should run all tests, based on the files that have been changed
"""
# run the whole test suite if either of:
# - any file changed that is not documentation nor GitHub action config file
# - no files changed
return not changed_files or any(
x.suffix != ".md" and not (x.parts[0] == ".github" and x.suffix == ".yml")
for x in changed_files
)
class DictAction(argparse.Action):
"""An argparse action that can receive a nested dictionary
Examples:
--step-param a/b/c=3
{"a": {"b": {"c": 3}}}
"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super().__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, value, option_string=None):
res = getattr(namespace, self.dest, {})
key_str, val = value.split("=", maxsplit=1)
keys = key_str.split("/")
update = {keys[-1]: val}
for key in list(reversed(keys))[1:]:
update = {key: update}
res = overlay_dict(res, update)
setattr(namespace, self.dest, res)
COMMON_PARSER = argparse.ArgumentParser()
COMMON_PARSER.add_argument(
"--instances",
required=False,
nargs="+",
default=DEFAULT_INSTANCES,
)
COMMON_PARSER.add_argument(
"--platforms",
metavar="OS-KV",
required=False,
nargs="+",
default=DEFAULT_PLATFORMS,
type=lambda arg: tuple(arg.split("-", maxsplit=1)),
)
COMMON_PARSER.add_argument(
"--step-param",
metavar="PARAM=VALUE",
help="parameters to add to each step",
required=False,
action=DictAction,
default={},
type=str,
)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,899
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/host_tools/drive.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Utilities for creating filesystems on the host."""
import os
from framework import utils
class FilesystemFile:
"""Facility for creating and working with filesystems."""
KNOWN_FILEFS_FORMATS = {"ext4"}
path = None
def __init__(self, path: str, size: int = 256, fs_format: str = "ext4"):
"""Create a new file system in a file.
Raises if the file system format is not supported, if the file already
exists, or if it ends in '/'.
"""
if fs_format not in self.KNOWN_FILEFS_FORMATS:
raise ValueError("Format not in: + " + str(self.KNOWN_FILEFS_FORMATS))
# Here we append the format as a
path = os.path.join(path + "." + fs_format)
if os.path.isfile(path):
raise FileExistsError("File already exists: " + path)
utils.run_cmd(
"dd status=none if=/dev/zero"
" of=" + path + " bs=1M count=" + str(size)
)
utils.run_cmd("mkfs.ext4 -qF " + path)
self.path = path
def __repr__(self):
return f"<FilesystemFile path={self.path} size={self.size()}>"
def resize(self, new_size):
"""Resize the filesystem."""
utils.run_cmd("truncate --size " + str(new_size) + "M " + self.path)
utils.run_cmd("resize2fs " + self.path)
def size(self):
"""Return the size of the filesystem."""
return os.stat(self.path).st_size
def __del__(self):
"""Destructor cleaning up filesystem from where it was created."""
if self.path:
try:
os.remove(self.path)
except OSError:
pass
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,900
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/.buildkite/pipeline_cpu_template.py
|
#!/usr/bin/env python3
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Generate Buildkite CPU Template pipelines dynamically"""
import argparse
from enum import Enum
from common import DEFAULT_PLATFORMS, group, pipeline_to_json
class BkStep(str, Enum):
"""
Commonly used BuildKite step keywords
"""
LABEL = "label"
TIMEOUT = "timeout"
COMMAND = "commands"
ARTIFACTS = "artifact_paths"
cpu_template_test = {
"rdmsr": {
BkStep.COMMAND: [
"tools/devtool -y test -- -s -ra -m nonci --log-cli-level=INFO integration_tests/functional/test_cpu_features.py -k 'test_cpu_rdmsr' "
],
BkStep.LABEL: "📖 rdmsr",
"instances": ["m5d.metal", "m6a.metal", "m6i.metal"],
"platforms": DEFAULT_PLATFORMS,
},
"cpuid_wrmsr": {
"snapshot": {
BkStep.COMMAND: [
"tools/devtool -y test -- -s -ra -m nonci --log-cli-level=INFO integration_tests/functional/test_cpu_features.py -k 'test_cpu_wrmsr_snapshot or test_cpu_cpuid_snapshot'",
"mkdir -pv tests/snapshot_artifacts_upload/{instance}_{os}_{kv}",
"sudo mv tests/snapshot_artifacts/* tests/snapshot_artifacts_upload/{instance}_{os}_{kv}",
],
BkStep.LABEL: "📸 create snapshots",
BkStep.ARTIFACTS: "tests/snapshot_artifacts_upload/**/*",
BkStep.TIMEOUT: 30,
},
"restore": {
BkStep.COMMAND: [
"buildkite-agent artifact download tests/snapshot_artifacts_upload/{instance}_{os}_{kv}/**/* .",
"mv tests/snapshot_artifacts_upload/{instance}_{os}_{kv} tests/snapshot_artifacts",
"tools/devtool -y test -- -s -ra -m nonci --log-cli-level=INFO integration_tests/functional/test_cpu_features.py -k 'test_cpu_wrmsr_restore or test_cpu_cpuid_restore'",
],
BkStep.LABEL: "📸 load snapshot artifacts created on {instance} {snapshot_os} {snapshot_kv} to {restore_instance} {restore_os} {restore_kv}",
BkStep.TIMEOUT: 30,
},
"cross_instances": {
"m5d.metal": ["m6i.metal"],
"m6i.metal": ["m5d.metal"],
},
"instances": ["m5d.metal", "m6i.metal", "m6a.metal"],
},
"aarch64_cpu_templates": {
BkStep.COMMAND: [
"tools/devtool -y test -- -s -ra -m nonci --log-cli-level=INFO integration_tests/functional/test_cpu_features_aarch64.py"
],
BkStep.LABEL: "📖 cpu templates",
"instances": ["m6g.metal", "c7g.metal"],
"platforms": [("al2_armpatch", "linux_5.10")],
},
}
def group_single(tests):
"""
Generate a group step with specified parameters for each instance
and kernel combination
https://buildkite.com/docs/pipelines/group-step
"""
group_step = group(
label=tests[BkStep.LABEL],
command=tests[BkStep.COMMAND],
instances=tests["instances"],
platforms=tests["platforms"],
artifacts=["./test_results/**/*"],
)
return [group_step]
def group_snapshot_restore(test_step):
"""
Generate a group step with specified parameters for each instance
and kernel combination and handle "wait" command between steps
https://buildkite.com/docs/pipelines/group-step
"""
groups = []
groups.append(
group(
label=test_step["snapshot"][BkStep.LABEL],
command=test_step["snapshot"][BkStep.COMMAND],
instances=test_step["instances"],
platforms=DEFAULT_PLATFORMS,
timeout=test_step["snapshot"][BkStep.TIMEOUT],
artifacts=test_step["snapshot"][BkStep.ARTIFACTS],
)
)
groups.append("wait")
snapshot_restore_combinations = []
for dp in DEFAULT_PLATFORMS:
for src_instance in test_step["instances"]:
for dst_instance in [src_instance] + test_step["cross_instances"].get(
src_instance, []
):
snapshot_restore_combinations.append(
((dp, src_instance), (dp, dst_instance))
)
steps = []
for combination in snapshot_restore_combinations:
(snapshot_os, snapshot_kv), snapshot_instance = combination[0]
(restore_os, restore_kv), restore_instance = combination[1]
restore_commands = [
command.format(instance=snapshot_instance, os=snapshot_os, kv=snapshot_kv)
for command in test_step["restore"][BkStep.COMMAND]
]
restore_label = test_step["restore"][BkStep.LABEL].format(
instance=snapshot_instance,
snapshot_os=snapshot_os,
snapshot_kv=snapshot_kv,
restore_instance=restore_instance,
restore_os=restore_os,
restore_kv=restore_kv,
)
steps.append(
{
BkStep.COMMAND: restore_commands,
BkStep.LABEL: restore_label,
BkStep.TIMEOUT: test_step["restore"][BkStep.TIMEOUT],
"agents": [
f"instance={restore_instance}",
f"kv={restore_kv}",
f"os={restore_os}",
],
}
)
groups.append({"group": "📸 restores snapshots", "steps": steps})
return groups
def main():
"""
Generate group template required to trigger pipelines for
the requested CPU template test.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--test",
required=True,
choices=list(cpu_template_test),
help="CPU template test",
)
test_args = parser.parse_args()
if test_args.test == "rdmsr":
test_group = group_single(cpu_template_test[test_args.test])
elif test_args.test == "cpuid_wrmsr":
test_group = group_snapshot_restore(cpu_template_test[test_args.test])
elif test_args.test == "aarch64_cpu_templates":
test_group = group_single(cpu_template_test[test_args.test])
pipeline = {"steps": test_group}
print(pipeline_to_json(pipeline))
if __name__ == "__main__":
main()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,901
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/performance/test_snapshot_restore_performance.py
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Performance benchmark for snapshot restore."""
import json
import tempfile
from functools import lru_cache
import pytest
import framework.stats as st
import host_tools.drive as drive_tools
from framework.stats.baseline import Provider as BaselineProvider
from framework.stats.metadata import DictProvider as DictMetadataProvider
from framework.utils import get_kernel_version
from integration_tests.performance.configs import defs
TEST_ID = "snapshot_restore_performance"
WORKLOAD = "restore"
CONFIG_NAME_REL = "test_{}_config_{}.json".format(TEST_ID, get_kernel_version(level=1))
CONFIG_NAME_ABS = defs.CFG_LOCATION / CONFIG_NAME_REL
BASE_VCPU_COUNT = 1
BASE_MEM_SIZE_MIB = 128
BASE_NET_COUNT = 1
BASE_BLOCK_COUNT = 1
USEC_IN_MSEC = 1000
# Measurements tags.
RESTORE_LATENCY = "latency"
# pylint: disable=R0903
class SnapRestoreBaselinesProvider(BaselineProvider):
"""Baselines provider for snapshot restore latency."""
def __init__(self, env_id, workload, raw_baselines):
"""Snapshot baseline provider initialization."""
super().__init__(raw_baselines)
self._tag = "baselines/{}/" + env_id + "/{}/" + workload
def get(self, metric_name: str, statistic_name: str) -> dict:
"""Return the baseline value corresponding to the key."""
key = self._tag.format(metric_name, statistic_name)
baseline = self._baselines.get(key)
if baseline:
target = baseline.get("target")
delta_percentage = baseline.get("delta_percentage")
return {
"target": target,
"delta": delta_percentage * target / 100,
}
return None
@lru_cache
def get_scratch_drives():
"""Create an array of scratch disks."""
scratchdisks = ["vdb", "vdc", "vdd", "vde"]
return [
(drive, drive_tools.FilesystemFile(tempfile.mktemp(), size=64))
for drive in scratchdisks
]
def default_lambda_consumer(env_id, workload):
"""Create a default lambda consumer for the snapshot restore test."""
raw_baselines = json.loads(CONFIG_NAME_ABS.read_text("utf-8"))
return st.consumer.LambdaConsumer(
metadata_provider=DictMetadataProvider(
raw_baselines["measurements"],
SnapRestoreBaselinesProvider(env_id, workload, raw_baselines),
),
func=consume_output,
)
def get_snap_restore_latency(
microvm_factory,
guest_kernel,
rootfs,
vcpus,
mem_size,
nets=3,
blocks=3,
all_devices=False,
iterations=30,
):
"""Restore snapshots with various configs to measure latency."""
scratch_drives = get_scratch_drives()
vm = microvm_factory.build(guest_kernel, rootfs, monitor_memory=False)
vm.spawn(log_level="Info")
vm.basic_config(
vcpu_count=vcpus,
mem_size_mib=mem_size,
rootfs_io_engine="Sync",
)
for _ in range(nets):
vm.add_net_iface()
if blocks > 1:
for name, diskfile in scratch_drives[: (blocks - 1)]:
vm.add_drive(name, diskfile.path, io_engine="Sync")
if all_devices:
vm.api.balloon.put(
amount_mib=0, deflate_on_oom=True, stats_polling_interval_s=1
)
vm.api.vsock.put(vsock_id="vsock0", guest_cid=3, uds_path="/v.sock")
vm.start()
snapshot = vm.snapshot_full()
vm.kill()
values = []
for _ in range(iterations):
microvm = microvm_factory.build()
microvm.spawn()
microvm.restore_from_snapshot(snapshot, resume=True)
# Check if guest still runs commands.
exit_code, _, _ = microvm.ssh.run("dmesg")
assert exit_code == 0
value = 0
# Parse all metric data points in search of load_snapshot time.
microvm.flush_metrics()
metrics = microvm.get_all_metrics()
for data_point in metrics:
cur_value = data_point["latencies_us"]["load_snapshot"]
if cur_value > 0:
value = cur_value / USEC_IN_MSEC
break
assert value > 0
values.append(value)
microvm.kill()
snapshot.delete()
return values
def consume_output(cons, latencies):
"""Consumer function."""
for value in latencies:
yield RESTORE_LATENCY, value, "Milliseconds"
cons.consume_data(RESTORE_LATENCY, value)
@pytest.mark.nonci
@pytest.mark.parametrize(
"mem, vcpus",
[
(128, 1),
(1024, 1),
(2048, 2),
(4096, 3),
(6144, 4),
(8192, 5),
(10240, 6),
(12288, 7),
],
)
def test_snapshot_scaling(microvm_factory, rootfs, guest_kernel, st_core, mem, vcpus):
"""
Restores snapshots with vcpu/memory configuration, roughly scaling according to mem = (vcpus - 1) * 2048MB,
which resembles firecracker production setups.
"""
# The guest kernel does not "participate" in snapshot restore, so just pick some
# arbitrary one
if "4.14" not in guest_kernel.name:
pytest.skip()
guest_config = f"{vcpus}vcpu_{mem}mb"
env_id = f"{st_core.env_id_prefix}/{guest_config}"
st_prod = st.producer.LambdaProducer(
func=get_snap_restore_latency,
func_kwargs={
"microvm_factory": microvm_factory,
"guest_kernel": guest_kernel,
"rootfs": rootfs,
"vcpus": vcpus,
"mem_size": mem,
},
)
st_cons = default_lambda_consumer(env_id, WORKLOAD)
st_core.add_pipe(st_prod, st_cons, f"{env_id}/{WORKLOAD}")
st_core.name = TEST_ID
st_core.custom["guest_config"] = guest_config
st_core.run_exercise()
@pytest.mark.nonci
def test_snapshot_all_devices(microvm_factory, rootfs, guest_kernel, st_core):
"""Restore snapshots with one of each devices."""
# The guest kernel does not "participate" in snapshot restore, so just pick some
# arbitrary one
if "4.14" not in guest_kernel.name:
pytest.skip()
guest_config = "all_dev"
env_id = f"{st_core.env_id_prefix}/{guest_config}"
st_prod = st.producer.LambdaProducer(
func=get_snap_restore_latency,
func_kwargs={
"microvm_factory": microvm_factory,
"guest_kernel": guest_kernel,
"rootfs": rootfs,
"nets": 1,
"blocks": 1,
"vcpus": BASE_VCPU_COUNT,
"mem_size": BASE_MEM_SIZE_MIB,
"all_devices": True,
},
)
st_cons = default_lambda_consumer(env_id, WORKLOAD)
st_core.add_pipe(st_prod, st_cons, f"{env_id}/{WORKLOAD}")
st_core.name = TEST_ID
st_core.custom["guest_config"] = guest_config
st_core.run_exercise()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,902
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/stats/function.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Module for statistical functions."""
from abc import ABC, abstractmethod
from pydoc import locate
# pylint: disable=E0611
from statistics import mean, stdev
from typing import Any, List
# pylint: disable=R0903
class FunctionFactory:
"""Function factory class."""
@classmethod
def get(cls, func_cls_name) -> "Function":
"""`func_cls_name` must be a valid function class name."""
return locate(f"framework.stats.function.{func_cls_name}")
# pylint: disable=R0903
class Function(ABC):
"""Statistic function abstract class."""
def __init__(self, name):
"""Initialize the statistic function."""
self._name = name
@abstractmethod
def __call__(self, result: Any) -> Any:
"""Builtin function needs to be implemented."""
@property
def name(self) -> str:
"""Return the a name identifier for the class."""
return self._name
# pylint: disable=R0903
class ValuePlaceholder(Function):
"""This function can be used as a placeholder for results consumption.
When used, it will simply provide a no-op over the result passed to it.
The data will be forwarded to the final statistics view as it is received.
If used for each iteration of a statistical exercise, the function will
retain only the result corresponding to the last iteration.
"""
def __init__(self, name="result"):
"""Initialize the statistic function."""
super().__init__(name)
def __call__(self, result: Any) -> Any:
"""Get the value."""
return result
# pylint: disable=R0903
class Min(Function):
"""A function which computes the minimum observation from a list of...
...observations.
"""
def __init__(self, name="Min"):
"""Initialize the statistic function."""
super().__init__(name)
def __call__(self, result: Any) -> Any:
"""Get the minimum observation."""
assert isinstance(result, list)
return min(result)
# pylint: disable=R0903
class Max(Function):
"""A function which computes the maximum observation from a list of...
...observations.
"""
def __init__(self, name="Max"):
"""Initialize the statistic function."""
super().__init__(name)
def __call__(self, result: Any) -> Any:
"""Get the maximum observation."""
assert isinstance(result, list)
return max(result)
# pylint: disable=R0903
class Avg(Function):
"""A function which computes the average of a list of observations."""
def __init__(self, name="Avg"):
"""Initialize the statistic function."""
super().__init__(name)
def __call__(self, result: Any) -> Any:
"""Get the average."""
assert isinstance(result, list)
return mean(result)
# pylint: disable=R0903
class Sum(Function):
"""A function which computes the sum for a list of observations."""
def __init__(self, name="Sum"):
"""Initialize the statistic function."""
super().__init__(name)
def __call__(self, result: Any) -> Any:
"""Get the sum."""
assert isinstance(result, list)
return sum(result)
# pylint: disable=R0903
class Stddev(Function):
"""A function which computes the standard deviation of a list of...
...observations.
"""
def __init__(self, name="Stddev"):
"""Initialize the statistic function."""
super().__init__(name)
def __call__(self, result: Any) -> Any:
"""Get the stddev."""
assert isinstance(result, list)
assert len(result) > 0
if len(result) == 1:
return 0
return stdev(result)
# pylint: disable=R0903
class Percentile(Function, ABC):
"""A function which computes the kth percentile of a list of...
...observations.
"""
def __init__(self, k: int, name: str):
"""Initialize the function."""
super().__init__(name)
self.k = k
def __call__(self, result: List) -> Any:
"""Get the kth percentile of the statistical exercise."""
assert isinstance(result, list)
if len(result) == 1:
return result[0]
length = len(result)
result.sort()
idx = length * self.k / 100
if not idx.is_integer():
return (result[int(idx)] + result[min((int(idx) + 1), length - 1)]) / 2
return result[int(idx)]
class Percentile50(Percentile):
"""A function which computes the 50th percentile of a list of...
...observations.
"""
def __init__(self, name="P50"):
"""Initialize the function."""
super().__init__(50, name)
class Percentile90(Percentile):
"""A function which computes the 90th percentile of a list of...
...observations.
"""
def __init__(self, name="P90"):
"""Initialize the statistic function."""
super().__init__(90, name)
class Percentile99(Percentile):
"""A function which computes the 99th percentile of a list of...
...observations.
"""
def __init__(self, name="P99"):
"""Initialize the statistic function."""
super().__init__(99, name)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,903
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/test_kani.py
|
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Proofs ensuring memory safety properites, user-defined assertions,
absence of panics and some types of unexpected behavior (e.g., arithmetic overflows).
"""
import platform
import pytest
from framework import utils
PLATFORM = platform.machine()
CRATES_WITH_PROOFS = ["dumbo", "vmm"]
@pytest.mark.timeout(1800)
@pytest.mark.skipif(PLATFORM != "x86_64", reason="Kani proofs run only on x86_64.")
@pytest.mark.parametrize("crate", CRATES_WITH_PROOFS)
def test_kani(results_dir, crate):
"""
Test all Kani proof harnesses.
"""
# --enable-stubbing is required to enable the stubbing feature
# --restrict-vtable is required for some virtio queue proofs, which go out of memory otherwise
# -j enables kani harnesses to be verified in parallel (required to keep CI time low)
# --output-format terse is required by -j
# --enable-unstable is needed for each of the above
rc, stdout, stderr = utils.run_cmd(
f"cargo kani -p {crate} --enable-unstable --enable-stubbing --restrict-vtable -j --output-format terse"
)
assert rc == 0, stderr
(results_dir / f"kani_log_{crate}").write_text(stdout, encoding="utf-8")
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,904
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_drives.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests for guest-side operations on /drives resources."""
# pylint:disable=redefined-outer-name
import os
from subprocess import check_output
import pytest
import host_tools.drive as drive_tools
from framework import utils
MB = 1024 * 1024
@pytest.fixture
def uvm_with_partuuid(uvm_plain, record_property, rootfs_ubuntu_22, tmp_path):
"""uvm_plain with a partuuid rootfs
We build the disk image here so we don't need a separate artifact for it.
"""
disk_img = tmp_path / "disk.img"
initial_size = rootfs_ubuntu_22.stat().st_size + 50 * MB
disk_img.touch()
os.truncate(disk_img, initial_size)
check_output(f"echo type=83 | sfdisk {str(disk_img)}", shell=True)
stdout = check_output(
f"losetup --find --partscan --show {str(disk_img)}", shell=True
)
loop_dev = stdout.decode("ascii").strip()
check_output(f"dd if={str(rootfs_ubuntu_22)} of={loop_dev}p1", shell=True)
# UUID=$(sudo blkid -s UUID -o value "${loop_dev}p1")
stdout = check_output(f"blkid -s PARTUUID -o value {loop_dev}p1", shell=True)
partuuid = stdout.decode("ascii").strip()
# cleanup: release loop device
check_output(f"losetup -d {loop_dev}", shell=True)
record_property("rootfs", rootfs_ubuntu_22.name)
uvm_plain.spawn()
uvm_plain.rootfs_file = disk_img
uvm_plain.ssh_key = rootfs_ubuntu_22.with_suffix(".id_rsa")
uvm_plain.partuuid = partuuid
uvm_plain.basic_config(add_root_device=False)
uvm_plain.add_net_iface()
yield uvm_plain
disk_img.unlink()
def test_rescan_file(test_microvm_with_api):
"""
Verify that rescan works with a file-backed virtio device.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Set up the microVM with 1 vCPUs, 256 MiB of RAM and a root file system
test_microvm.basic_config()
test_microvm.add_net_iface()
block_size = 2
# Add a scratch block device.
fs = drive_tools.FilesystemFile(
os.path.join(test_microvm.fsfiles, "scratch"), size=block_size
)
test_microvm.add_drive("scratch", fs.path)
test_microvm.start()
_check_block_size(test_microvm.ssh, "/dev/vdb", fs.size())
# Check if reading from the entire disk results in a file of the same size
# or errors out, after a truncate on the host.
truncated_size = block_size // 2
utils.run_cmd(f"truncate --size {truncated_size}M {fs.path}")
block_copy_name = "/tmp/dev_vdb_copy"
_, _, stderr = test_microvm.ssh.run(
f"dd if=/dev/vdb of={block_copy_name} bs=1M count={block_size}"
)
assert "dd: error reading '/dev/vdb': Input/output error" in stderr
_check_file_size(test_microvm.ssh, f"{block_copy_name}", truncated_size * MB)
test_microvm.api.drive.patch(
drive_id="scratch",
path_on_host=test_microvm.create_jailed_resource(fs.path),
)
_check_block_size(test_microvm.ssh, "/dev/vdb", fs.size())
def test_device_ordering(test_microvm_with_api):
"""
Verify device ordering.
The root device should correspond to /dev/vda in the guest and
the order of the other devices should match their configuration order.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Add first scratch block device.
fs1 = drive_tools.FilesystemFile(
os.path.join(test_microvm.fsfiles, "scratch1"), size=128
)
test_microvm.add_drive("scratch1", fs1.path)
# Set up the microVM with 1 vCPUs, 256 MiB of RAM and a root file system
# (this is the second block device added).
test_microvm.basic_config()
test_microvm.add_net_iface()
# Add the third block device.
fs2 = drive_tools.FilesystemFile(
os.path.join(test_microvm.fsfiles, "scratch2"), size=512
)
test_microvm.add_drive("scratch2", fs2.path)
test_microvm.start()
# Determine the size of the microVM rootfs in bytes.
rc, stdout, stderr = utils.run_cmd(
"du --apparent-size --block-size=1 {}".format(test_microvm.rootfs_file),
)
assert rc == 0, f"Failed to get microVM rootfs size: {stderr}"
assert len(stdout.split()) == 2
rootfs_size = stdout.split("\t")[0]
# The devices were added in this order: fs1, rootfs, fs2.
# However, the rootfs is the root device and goes first,
# so we expect to see this order: rootfs, fs1, fs2.
# The devices are identified by their size.
ssh_connection = test_microvm.ssh
_check_block_size(ssh_connection, "/dev/vda", rootfs_size)
_check_block_size(ssh_connection, "/dev/vdb", fs1.size())
_check_block_size(ssh_connection, "/dev/vdc", fs2.size())
def test_rescan_dev(test_microvm_with_api):
"""
Verify that rescan works with a device-backed virtio device.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Set up the microVM with 1 vCPUs, 256 MiB of RAM and a root file system
test_microvm.basic_config()
test_microvm.add_net_iface()
# Add a scratch block device.
fs1 = drive_tools.FilesystemFile(os.path.join(test_microvm.fsfiles, "fs1"))
test_microvm.add_drive("scratch", fs1.path)
test_microvm.start()
_check_block_size(test_microvm.ssh, "/dev/vdb", fs1.size())
fs2 = drive_tools.FilesystemFile(
os.path.join(test_microvm.fsfiles, "fs2"), size=512
)
losetup = ["losetup", "--find", "--show", fs2.path]
rc, stdout, _ = utils.run_cmd(losetup)
assert rc == 0
loopback_device = stdout.rstrip()
try:
test_microvm.api.drive.patch(
drive_id="scratch",
path_on_host=test_microvm.create_jailed_resource(loopback_device),
)
_check_block_size(test_microvm.ssh, "/dev/vdb", fs2.size())
finally:
if loopback_device:
utils.run_cmd(["losetup", "--detach", loopback_device])
def test_non_partuuid_boot(test_microvm_with_api):
"""
Test the output reported by blockdev when booting from /dev/vda.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Sets up the microVM with 1 vCPUs, 256 MiB of RAM and a root file system
test_microvm.basic_config(vcpu_count=1)
test_microvm.add_net_iface()
# Add another read-only block device.
fs = drive_tools.FilesystemFile(os.path.join(test_microvm.fsfiles, "readonly"))
test_microvm.add_drive("scratch", fs.path, is_read_only=True)
test_microvm.start()
# Prepare the input for doing the assertion
assert_dict = {}
# Keep an array of strings specifying the location where some string
# from the output is located.
# 1-0 means line 1, column 0.
keys_array = ["1-0", "1-6", "2-0"]
# Keep a dictionary where the keys are the location and the values
# represent the input to assert against.
assert_dict[keys_array[0]] = "ro"
assert_dict[keys_array[1]] = "/dev/vda"
assert_dict[keys_array[2]] = "ro"
_check_drives(test_microvm, assert_dict, keys_array)
def test_partuuid_boot(uvm_with_partuuid):
"""
Test the output reported by blockdev when booting with PARTUUID.
"""
test_microvm = uvm_with_partuuid
# Add the root block device specified through PARTUUID.
test_microvm.add_drive(
"rootfs",
test_microvm.rootfs_file,
is_root_device=True,
partuuid=test_microvm.partuuid,
)
test_microvm.start()
assert_dict = {}
keys_array = ["1-0", "1-6", "2-0", "2-6"]
assert_dict[keys_array[0]] = "rw"
assert_dict[keys_array[1]] = "/dev/vda"
assert_dict[keys_array[2]] = "rw"
assert_dict[keys_array[3]] = "/dev/vda1"
_check_drives(test_microvm, assert_dict, keys_array)
def test_partuuid_update(test_microvm_with_api):
"""
Test successful switching from PARTUUID boot to /dev/vda boot.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Set up the microVM with 1 vCPUs, 256 MiB of RAM
test_microvm.basic_config(vcpu_count=1, add_root_device=False)
test_microvm.add_net_iface()
# Add the root block device specified through PARTUUID.
test_microvm.add_drive(
"rootfs", test_microvm.rootfs_file, is_root_device=True, partuuid="0eaa91a0-01"
)
# Update the root block device to boot from /dev/vda.
test_microvm.add_drive(
"rootfs",
test_microvm.rootfs_file,
is_root_device=True,
)
test_microvm.start()
# Assert that the final booting method is from /dev/vda.
assert_dict = {}
keys_array = ["1-0", "1-6"]
assert_dict[keys_array[0]] = "rw"
assert_dict[keys_array[1]] = "/dev/vda"
_check_drives(test_microvm, assert_dict, keys_array)
def test_patch_drive(test_microvm_with_api):
"""
Test replacing the backing filesystem after guest boot works.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Set up the microVM with 1 vCPUs, 256 MiB of RAM and a root file system
test_microvm.basic_config()
test_microvm.add_net_iface()
fs1 = drive_tools.FilesystemFile(os.path.join(test_microvm.fsfiles, "scratch"))
test_microvm.add_drive("scratch", fs1.path)
test_microvm.start()
# Updates to `path_on_host` with a valid path are allowed.
fs2 = drive_tools.FilesystemFile(
os.path.join(test_microvm.fsfiles, "otherscratch"), size=512
)
test_microvm.api.drive.patch(
drive_id="scratch", path_on_host=test_microvm.create_jailed_resource(fs2.path)
)
# The `lsblk` command should output 2 lines to STDOUT: "SIZE" and the size
# of the device, in bytes.
blksize_cmd = "lsblk -b /dev/vdb --output SIZE"
size_bytes_str = "536870912" # = 512 MiB
_, stdout, stderr = test_microvm.ssh.run(blksize_cmd)
assert stderr == ""
lines = stdout.split("\n")
# skip "SIZE"
assert lines[1].strip() == size_bytes_str
def test_no_flush(test_microvm_with_api):
"""
Verify default block ignores flush.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config(vcpu_count=1, add_root_device=False)
test_microvm.add_net_iface()
# Add the block device
test_microvm.add_drive(
"rootfs",
test_microvm.rootfs_file,
is_root_device=True,
)
test_microvm.start()
# Verify all flush commands were ignored during boot.
fc_metrics = test_microvm.flush_metrics()
assert fc_metrics["block"]["flush_count"] == 0
# Have the guest drop the caches to generate flush requests.
cmd = "sync; echo 1 > /proc/sys/vm/drop_caches"
_, _, stderr = test_microvm.ssh.run(cmd)
assert stderr == ""
# Verify all flush commands were ignored even after
# dropping the caches.
fc_metrics = test_microvm.flush_metrics()
assert fc_metrics["block"]["flush_count"] == 0
def test_flush(uvm_plain_rw):
"""
Verify block with flush actually flushes.
"""
test_microvm = uvm_plain_rw
test_microvm.spawn()
test_microvm.basic_config(vcpu_count=1, add_root_device=False)
test_microvm.add_net_iface()
# Add the block device with explicitly enabling flush.
test_microvm.add_drive(
"rootfs",
test_microvm.rootfs_file,
is_root_device=True,
cache_type="Writeback",
)
test_microvm.start()
# Have the guest drop the caches to generate flush requests.
cmd = "sync; echo 1 > /proc/sys/vm/drop_caches"
_, _, stderr = test_microvm.ssh.run(cmd)
assert stderr == ""
# On average, dropping the caches right after boot generates
# about 6 block flush requests.
fc_metrics = test_microvm.flush_metrics()
assert fc_metrics["block"]["flush_count"] > 0
def test_block_default_cache_old_version(test_microvm_with_api):
"""
Verify that saving a snapshot for old versions works correctly.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config(vcpu_count=1, add_root_device=False)
# Add the block device with explicitly enabling flush.
test_microvm.add_drive(
"rootfs",
test_microvm.rootfs_file,
is_root_device=True,
cache_type="Writeback",
)
test_microvm.start()
# Pause the VM to create the snapshot.
test_microvm.pause()
# Create the snapshot for a version without block cache type.
test_microvm.api.snapshot_create.put(
mem_file_path="memfile",
snapshot_path="snapsfile",
snapshot_type="Full",
version="0.24.0",
)
# We should find a warning in the logs for this case as this
# cache type was not supported in 0.24.0 and we should default
# to "Unsafe" mode.
test_microvm.check_log_message(
"Target version does not implement the"
" current cache type. "
'Defaulting to "unsafe" mode.'
)
def _check_block_size(ssh_connection, dev_path, size):
_, stdout, stderr = ssh_connection.run("blockdev --getsize64 {}".format(dev_path))
assert stderr == ""
assert stdout.strip() == str(size)
def _check_file_size(ssh_connection, dev_path, size):
_, stdout, stderr = ssh_connection.run("stat --format=%s {}".format(dev_path))
assert stderr == ""
assert stdout.strip() == str(size)
def _process_blockdev_output(blockdev_out, assert_dict, keys_array):
blockdev_out_lines = blockdev_out.splitlines()
for key in keys_array:
line = int(key.split("-")[0])
col = int(key.split("-")[1])
blockdev_out_line_cols = blockdev_out_lines[line].split()
assert blockdev_out_line_cols[col] == assert_dict[key]
def _check_drives(test_microvm, assert_dict, keys_array):
_, stdout, stderr = test_microvm.ssh.run("blockdev --report")
assert stderr == ""
_process_blockdev_output(stdout, assert_dict, keys_array)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,905
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/stats/metadata.py
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Module for common statistic tests metadata providers."""
from abc import ABC, abstractmethod
from typing import Dict
from .baseline import Provider as BaselineProvider
from .criteria import CriteriaFactory
from .function import FunctionFactory
from .types import MeasurementDef, StatisticDef
# pylint: disable=R0903
class Provider(ABC):
"""Backend for test metadata retrieval.
Metadata consists from measurements and statistics definitions.
"""
def __init__(self, baseline_provider: BaselineProvider):
"""Initialize the metadata provider."""
self._baseline_provider = baseline_provider
@property
@abstractmethod
def measurements(self) -> Dict[str, MeasurementDef]:
"""Return measurement dictionary."""
@property
def baseline_provider(self) -> BaselineProvider:
"""Return the baseline provider."""
return self._baseline_provider
# pylint: disable=R0903
class DictProvider(Provider):
"""Backend for test metadata retrieval."""
UNIT_KEY = "unit"
STATISTICS_KEY = "statistics"
def __init__(self, measurements: dict, baseline_provider: BaselineProvider):
"""
Initialize metadata provider.
The provider expects to receive measurements following the below
schema:
```
"measurements": {
"$id": "MEASUREMENTS_SCHEMA"
"type": "object",
"definitions": "definitions": {
"Criteria": {
"type": "string",
"description": "Comparison criteria class name. They are
implemented in the `statistics.criteria` module."
}
"Function": {
"type": "string",
"description": "Statistic functions class name. They are
implemented in the `statistics.function` module."
}
"StatisticDef": {
{
"type": "object",
"description": "Exhaustive statistic definition."
"properties": {
"name": { "type": "string" },
"function": {
"type": "string"
"$ref": "#/definitions/Function"
},
"criteria": {
"type": "string"
"$ref" "#/definitions/Criteria"
}
},
"required": ["function"]
}
}
},
"properties": {
"key": {
"type": "string",
"description": "Measurement name."
},
"value": {
"type": "object",
"properties": {
"unit": "string",
"statistics": {
"type": "object",
"properties": {
"key": {
"type": "string",
"description": "Statistic name."
},
"value": {
"type": "object",
"$ref": "#/definitions/StatisticDef"
}
}
}
},
"required": ["unit"]
}
}
}
```
"""
super().__init__(baseline_provider)
self._measurements = {}
for ms_name in measurements:
assert DictProvider.UNIT_KEY in measurements[ms_name], (
f"'{DictProvider.UNIT_KEY}' field is required for '"
f"{ms_name}' measurement definition."
)
assert DictProvider.STATISTICS_KEY in measurements[ms_name], (
f"'{DictProvider.STATISTICS_KEY}' field is required for '"
f"{ms_name}' measurement definition."
)
unit = measurements[ms_name][DictProvider.UNIT_KEY]
st_defs = measurements[ms_name][DictProvider.STATISTICS_KEY]
st_list = []
for st_def in st_defs:
# Mandatory.
func_cls_name = st_def.get("function")
assert func_cls_name, (
f"Error in '{ms_name}' "
"measurement definition: "
"'function' field is required for "
"measurement statistics definitions."
)
func_cls = FunctionFactory.get(func_cls_name)
assert func_cls_name, (
f"Error in '{ms_name}' "
"measurement definition: "
f"'{func_cls_name}' is not a valid "
f"statistic function."
)
name = st_def.get("name")
func = func_cls()
if name:
func = func_cls(name)
# When the statistic definition does not define a criteria, `criteria` is
# `None`. On the other hand, when the statistic definition defines a
# criteria but does not have the corresponding baseline, `criteria` is
# initialized with an empty baseline.
criteria = None
criteria_cls_name = st_def.get("criteria")
if criteria_cls_name:
criteria_cls = CriteriaFactory.get(criteria_cls_name)
assert criteria_cls, (
f"{criteria_cls_name} is not a " f"valid criteria."
)
baseline = baseline_provider.get(ms_name, func.name)
criteria = criteria_cls(baseline if baseline else {})
st_list.append(StatisticDef(func, criteria))
self._measurements[ms_name] = MeasurementDef(ms_name, unit, st_list)
@property
def measurements(self):
"""Return measurement dictionary."""
return self._measurements
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.